From 71850e52935dd8a294b45f294183fac58f13db2e Mon Sep 17 00:00:00 2001 From: Neelanjan Manna Date: Sat, 24 Dec 2022 17:14:11 +0530 Subject: [PATCH] feat(scaler): Adds ArangoDB Scaler (#4028) * chore(scaler): add arangodb scaler Signed-off-by: Neelanjan Manna * updated helm install command Signed-off-by: Neelanjan Manna * adds helper function to wait for all pods in the namespace to be running Signed-off-by: Neelanjan Manna * fixes username as base64 input Signed-off-by: Neelanjan Manna * fix helper for deleting arango deployment Signed-off-by: Neelanjan Manna * updates changelog Signed-off-by: Neelanjan Manna * updates changelog Signed-off-by: Neelanjan Manna * resolves ci checks and comments Signed-off-by: Neelanjan Manna * adds function comment Signed-off-by: Neelanjan Manna * removes unnecessary leading newline Signed-off-by: Neelanjan Manna * adds feature for executing any query that returns a float value Signed-off-by: Neelanjan Manna * resolves pr comments Signed-off-by: Neelanjan Manna Signed-off-by: Neelanjan Manna --- CHANGELOG.md | 2 +- go.mod | 5 +- go.sum | 16 +- pkg/metricsservice/api/metrics.pb.go | 2 +- pkg/metricsservice/api/metrics_grpc.pb.go | 2 +- pkg/scalers/arangodb_scaler.go | 281 ++++ pkg/scalers/arangodb_scaler_test.go | 129 ++ .../externalscaler/externalscaler.pb.go | 2 +- .../externalscaler/externalscaler_grpc.pb.go | 2 +- pkg/scalers/liiklus/LiiklusService.pb.go | 2 +- pkg/scalers/liiklus/LiiklusService_grpc.pb.go | 2 +- pkg/scaling/scale_handler.go | 2 + tests/helper/helper.go | 26 + tests/scalers/arangodb/arangodb_test.go | 276 ++++ tests/scalers/arangodb/helper.go | 118 ++ vendor/github.com/arangodb/go-driver/.envrc | 8 + .../github.com/arangodb/go-driver/.gitignore | 15 + .../arangodb/go-driver/.golangci.yaml | 52 + .../github.com/arangodb/go-driver/.travis.yml | 45 + .../arangodb/go-driver/CHANGELOG.md | 78 ++ .../arangodb/go-driver/CONTRIBUTING.md | 63 + .../arangodb/go-driver/Dockerfile.debug | 11 + vendor/github.com/arangodb/go-driver/HEADER | 18 + vendor/github.com/arangodb/go-driver/LICENSE | 202 +++ .../arangodb/go-driver/MAINTAINERS.md | 27 + vendor/github.com/arangodb/go-driver/Makefile | 565 ++++++++ .../github.com/arangodb/go-driver/README.md | 37 + vendor/github.com/arangodb/go-driver/VERSION | 1 + .../arangodb/go-driver/authentication.go | 114 ++ .../github.com/arangodb/go-driver/client.go | 124 ++ .../arangodb/go-driver/client_admin_backup.go | 149 +++ .../go-driver/client_admin_backup_impl.go | 305 +++++ .../arangodb/go-driver/client_cluster.go | 33 + .../arangodb/go-driver/client_cluster_impl.go | 46 + .../arangodb/go-driver/client_databases.go | 85 ++ .../go-driver/client_databases_impl.go | 154 +++ .../arangodb/go-driver/client_foxx.go | 47 + .../arangodb/go-driver/client_foxx_impl.go | 28 + .../arangodb/go-driver/client_impl.go | 153 +++ .../arangodb/go-driver/client_replication.go | 29 + .../go-driver/client_replication_impl.go | 28 + .../arangodb/go-driver/client_server_admin.go | 204 +++ .../go-driver/client_server_admin_impl.go | 243 ++++ .../arangodb/go-driver/client_server_info.go | 61 + .../go-driver/client_server_info_impl.go | 152 +++ .../arangodb/go-driver/client_users.go | 52 + .../arangodb/go-driver/client_users_impl.go | 144 ++ .../github.com/arangodb/go-driver/cluster.go | 357 +++++ .../arangodb/go-driver/cluster/cluster.go | 356 +++++ .../arangodb/go-driver/cluster/doc.go | 26 + .../arangodb/go-driver/cluster_impl.go | 489 +++++++ .../arangodb/go-driver/collection.go | 316 +++++ .../go-driver/collection_document_impl.go | 677 ++++++++++ .../go-driver/collection_documents.go | 178 +++ .../arangodb/go-driver/collection_impl.go | 356 +++++ .../arangodb/go-driver/collection_indexes.go | 302 +++++ .../go-driver/collection_indexes_impl.go | 388 ++++++ .../arangodb/go-driver/connection.go | 172 +++ .../arangodb/go-driver/content_type.go | 46 + .../github.com/arangodb/go-driver/context.go | 543 ++++++++ .../github.com/arangodb/go-driver/cursor.go | 91 ++ .../arangodb/go-driver/cursor_impl.go | 345 +++++ .../github.com/arangodb/go-driver/database.go | 129 ++ .../database_arangosearch_analyzers.go | 61 + .../database_arangosearch_analyzers_impl.go | 184 +++ .../go-driver/database_collections.go | 224 ++++ .../go-driver/database_collections_impl.go | 190 +++ .../go-driver/database_collections_schema.go | 54 + .../arangodb/go-driver/database_graphs.go | 92 ++ .../go-driver/database_graphs_impl.go | 270 ++++ .../arangodb/go-driver/database_impl.go | 250 ++++ .../arangodb/go-driver/database_pregel.go | 182 +++ .../go-driver/database_pregel_impl.go | 116 ++ .../go-driver/database_transactions.go | 76 ++ .../go-driver/database_transactions_impl.go | 100 ++ .../arangodb/go-driver/database_views.go | 58 + .../arangodb/go-driver/database_views_impl.go | 202 +++ vendor/github.com/arangodb/go-driver/doc.go | 44 + vendor/github.com/arangodb/go-driver/edge.go | 31 + .../edge_collection_documents_impl.go | 596 +++++++++ .../go-driver/edge_collection_impl.go | 178 +++ .../go-driver/edge_collection_indexes_impl.go | 148 ++ .../arangodb/go-driver/encode-go_1_8.go | 39 + .../github.com/arangodb/go-driver/encode.go | 39 + vendor/github.com/arangodb/go-driver/error.go | 297 +++++ vendor/github.com/arangodb/go-driver/foxx.go | 85 ++ vendor/github.com/arangodb/go-driver/graph.go | 80 ++ .../go-driver/graph_edge_collections.go | 66 + .../go-driver/graph_edge_collections_impl.go | 239 ++++ .../arangodb/go-driver/graph_impl.go | 139 ++ .../go-driver/graph_vertex_collections.go | 53 + .../graph_vertex_collections_impl.go | 176 +++ .../arangodb/go-driver/http/authentication.go | 279 ++++ .../arangodb/go-driver/http/connection.go | 496 +++++++ .../go-driver/http/connection_wrapper.go | 92 ++ .../github.com/arangodb/go-driver/http/doc.go | 69 + .../arangodb/go-driver/http/mergeObject.go | 84 ++ .../arangodb/go-driver/http/request_json.go | 369 +++++ .../arangodb/go-driver/http/request_vpack.go | 144 ++ .../arangodb/go-driver/http/response_json.go | 206 +++ .../go-driver/http/response_json_element.go | 113 ++ .../arangodb/go-driver/http/response_vpack.go | 151 +++ .../go-driver/http/response_vpack_element.go | 124 ++ vendor/github.com/arangodb/go-driver/id.go | 98 ++ vendor/github.com/arangodb/go-driver/index.go | 102 ++ .../arangodb/go-driver/index_impl.go | 287 ++++ .../github.com/arangodb/go-driver/jwt/doc.go | 57 + .../github.com/arangodb/go-driver/jwt/jwt.go | 84 ++ vendor/github.com/arangodb/go-driver/meta.go | 69 + .../github.com/arangodb/go-driver/protocol.go | 56 + vendor/github.com/arangodb/go-driver/query.go | 297 +++++ .../arangodb/go-driver/replication.go | 68 + .../arangodb/go-driver/replication_impl.go | 165 +++ .../github.com/arangodb/go-driver/revision.go | 273 ++++ .../arangodb/go-driver/transaction.go | 80 ++ vendor/github.com/arangodb/go-driver/user.go | 128 ++ .../arangodb/go-driver/user_impl.go | 401 ++++++ .../github.com/arangodb/go-driver/util/doc.go | 26 + .../arangodb/go-driver/util/endpoints.go | 38 + .../github.com/arangodb/go-driver/version.go | 109 ++ .../vertex_collection_documents_impl.go | 562 ++++++++ .../go-driver/vertex_collection_impl.go | 178 +++ .../vertex_collection_indexes_impl.go | 148 ++ vendor/github.com/arangodb/go-driver/view.go | 55 + .../arangodb/go-driver/view_arangosearch.go | 493 +++++++ .../go-driver/view_arangosearch_alias.go | 54 + .../go-driver/view_arangosearch_alias_impl.go | 78 ++ .../go-driver/view_arangosearch_impl.go | 74 + .../arangodb/go-driver/view_impl.go | 139 ++ .../github.com/arangodb/go-velocypack/.envrc | 8 + .../arangodb/go-velocypack/.gitignore | 4 + .../arangodb/go-velocypack/.travis.yml | 8 + .../github.com/arangodb/go-velocypack/LICENSE | 202 +++ .../arangodb/go-velocypack/Makefile | 56 + .../arangodb/go-velocypack/README.md | 7 + .../arangodb/go-velocypack/array_iterator.go | 91 ++ .../go-velocypack/attribute_translator.go | 51 + .../arangodb/go-velocypack/builder.go | 1186 +++++++++++++++++ .../arangodb/go-velocypack/builder_buffer.go | 131 ++ .../go-velocypack/builder_index_vector.go | 57 + .../go-velocypack/builder_sort_entry.go | 82 ++ .../arangodb/go-velocypack/builder_stack.go | 73 + .../arangodb/go-velocypack/decoder.go | 1031 ++++++++++++++ .../github.com/arangodb/go-velocypack/doc.go | 26 + .../arangodb/go-velocypack/dumper.go | 381 ++++++ .../arangodb/go-velocypack/encoder.go | 670 ++++++++++ .../arangodb/go-velocypack/encoder_field.go | 330 +++++ .../arangodb/go-velocypack/encoder_fold.go | 168 +++ .../arangodb/go-velocypack/encoder_tags.go | 89 ++ .../arangodb/go-velocypack/error.go | 231 ++++ .../arangodb/go-velocypack/object_iterator.go | 114 ++ .../arangodb/go-velocypack/parser.go | 151 +++ .../arangodb/go-velocypack/raw_slice.go | 50 + .../arangodb/go-velocypack/slice.go | 927 +++++++++++++ .../arangodb/go-velocypack/slice_factory.go | 69 + .../arangodb/go-velocypack/slice_merge.go | 99 ++ .../arangodb/go-velocypack/slice_reader.go | 197 +++ .../arangodb/go-velocypack/slice_type.go | 135 ++ .../github.com/arangodb/go-velocypack/util.go | 202 +++ .../arangodb/go-velocypack/value.go | 199 +++ .../arangodb/go-velocypack/value_length.go | 53 + .../arangodb/go-velocypack/value_type.go | 382 ++++++ vendor/github.com/golang-jwt/jwt/.gitignore | 4 + vendor/github.com/golang-jwt/jwt/LICENSE | 9 + .../golang-jwt/jwt/MIGRATION_GUIDE.md | 22 + vendor/github.com/golang-jwt/jwt/README.md | 113 ++ .../golang-jwt/jwt/VERSION_HISTORY.md | 131 ++ vendor/github.com/golang-jwt/jwt/claims.go | 146 ++ vendor/github.com/golang-jwt/jwt/doc.go | 4 + vendor/github.com/golang-jwt/jwt/ecdsa.go | 142 ++ .../github.com/golang-jwt/jwt/ecdsa_utils.go | 69 + vendor/github.com/golang-jwt/jwt/ed25519.go | 81 ++ .../golang-jwt/jwt/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/errors.go | 59 + vendor/github.com/golang-jwt/jwt/hmac.go | 95 ++ .../github.com/golang-jwt/jwt/map_claims.go | 120 ++ vendor/github.com/golang-jwt/jwt/none.go | 52 + vendor/github.com/golang-jwt/jwt/parser.go | 148 ++ vendor/github.com/golang-jwt/jwt/rsa.go | 101 ++ vendor/github.com/golang-jwt/jwt/rsa_pss.go | 142 ++ vendor/github.com/golang-jwt/jwt/rsa_utils.go | 101 ++ .../golang-jwt/jwt/signing_method.go | 35 + vendor/github.com/golang-jwt/jwt/token.go | 104 ++ vendor/github.com/mattn/go-ieproxy/README.md | 6 +- vendor/github.com/mattn/go-ieproxy/ieproxy.go | 5 - .../mattn/go-ieproxy/ieproxy_darwin.go | 123 -- .../mattn/go-ieproxy/ieproxy_unix.go | 6 +- .../mattn/go-ieproxy/ieproxy_windows.go | 8 +- .../github.com/mattn/go-ieproxy/pac_darwin.go | 141 -- .../github.com/mattn/go-ieproxy/pac_unix.go | 2 +- .../go-ieproxy/proxy_middleman_darwin.go | 43 - .../mattn/go-ieproxy/proxy_middleman_unix.go | 2 +- vendor/modules.txt | 15 +- 193 files changed, 28636 insertions(+), 343 deletions(-) create mode 100644 pkg/scalers/arangodb_scaler.go create mode 100644 pkg/scalers/arangodb_scaler_test.go create mode 100644 tests/scalers/arangodb/arangodb_test.go create mode 100644 tests/scalers/arangodb/helper.go create mode 100644 vendor/github.com/arangodb/go-driver/.envrc create mode 100644 vendor/github.com/arangodb/go-driver/.gitignore create mode 100644 vendor/github.com/arangodb/go-driver/.golangci.yaml create mode 100644 vendor/github.com/arangodb/go-driver/.travis.yml create mode 100644 vendor/github.com/arangodb/go-driver/CHANGELOG.md create mode 100644 vendor/github.com/arangodb/go-driver/CONTRIBUTING.md create mode 100644 vendor/github.com/arangodb/go-driver/Dockerfile.debug create mode 100644 vendor/github.com/arangodb/go-driver/HEADER create mode 100644 vendor/github.com/arangodb/go-driver/LICENSE create mode 100644 vendor/github.com/arangodb/go-driver/MAINTAINERS.md create mode 100644 vendor/github.com/arangodb/go-driver/Makefile create mode 100644 vendor/github.com/arangodb/go-driver/README.md create mode 100644 vendor/github.com/arangodb/go-driver/VERSION create mode 100644 vendor/github.com/arangodb/go-driver/authentication.go create mode 100644 vendor/github.com/arangodb/go-driver/client.go create mode 100644 vendor/github.com/arangodb/go-driver/client_admin_backup.go create mode 100644 vendor/github.com/arangodb/go-driver/client_admin_backup_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_cluster.go create mode 100644 vendor/github.com/arangodb/go-driver/client_cluster_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_databases.go create mode 100644 vendor/github.com/arangodb/go-driver/client_databases_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_foxx.go create mode 100644 vendor/github.com/arangodb/go-driver/client_foxx_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_replication.go create mode 100644 vendor/github.com/arangodb/go-driver/client_replication_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_server_admin.go create mode 100644 vendor/github.com/arangodb/go-driver/client_server_admin_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_server_info.go create mode 100644 vendor/github.com/arangodb/go-driver/client_server_info_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/client_users.go create mode 100644 vendor/github.com/arangodb/go-driver/client_users_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/cluster.go create mode 100644 vendor/github.com/arangodb/go-driver/cluster/cluster.go create mode 100644 vendor/github.com/arangodb/go-driver/cluster/doc.go create mode 100644 vendor/github.com/arangodb/go-driver/cluster_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/collection.go create mode 100644 vendor/github.com/arangodb/go-driver/collection_document_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/collection_documents.go create mode 100644 vendor/github.com/arangodb/go-driver/collection_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/collection_indexes.go create mode 100644 vendor/github.com/arangodb/go-driver/collection_indexes_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/connection.go create mode 100644 vendor/github.com/arangodb/go-driver/content_type.go create mode 100644 vendor/github.com/arangodb/go-driver/context.go create mode 100644 vendor/github.com/arangodb/go-driver/cursor.go create mode 100644 vendor/github.com/arangodb/go-driver/cursor_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database.go create mode 100644 vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers.go create mode 100644 vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_collections.go create mode 100644 vendor/github.com/arangodb/go-driver/database_collections_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_collections_schema.go create mode 100644 vendor/github.com/arangodb/go-driver/database_graphs.go create mode 100644 vendor/github.com/arangodb/go-driver/database_graphs_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_pregel.go create mode 100644 vendor/github.com/arangodb/go-driver/database_pregel_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_transactions.go create mode 100644 vendor/github.com/arangodb/go-driver/database_transactions_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/database_views.go create mode 100644 vendor/github.com/arangodb/go-driver/database_views_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/doc.go create mode 100644 vendor/github.com/arangodb/go-driver/edge.go create mode 100644 vendor/github.com/arangodb/go-driver/edge_collection_documents_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/edge_collection_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/edge_collection_indexes_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/encode-go_1_8.go create mode 100644 vendor/github.com/arangodb/go-driver/encode.go create mode 100644 vendor/github.com/arangodb/go-driver/error.go create mode 100644 vendor/github.com/arangodb/go-driver/foxx.go create mode 100644 vendor/github.com/arangodb/go-driver/graph.go create mode 100644 vendor/github.com/arangodb/go-driver/graph_edge_collections.go create mode 100644 vendor/github.com/arangodb/go-driver/graph_edge_collections_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/graph_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/graph_vertex_collections.go create mode 100644 vendor/github.com/arangodb/go-driver/graph_vertex_collections_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/http/authentication.go create mode 100644 vendor/github.com/arangodb/go-driver/http/connection.go create mode 100644 vendor/github.com/arangodb/go-driver/http/connection_wrapper.go create mode 100644 vendor/github.com/arangodb/go-driver/http/doc.go create mode 100644 vendor/github.com/arangodb/go-driver/http/mergeObject.go create mode 100644 vendor/github.com/arangodb/go-driver/http/request_json.go create mode 100644 vendor/github.com/arangodb/go-driver/http/request_vpack.go create mode 100644 vendor/github.com/arangodb/go-driver/http/response_json.go create mode 100644 vendor/github.com/arangodb/go-driver/http/response_json_element.go create mode 100644 vendor/github.com/arangodb/go-driver/http/response_vpack.go create mode 100644 vendor/github.com/arangodb/go-driver/http/response_vpack_element.go create mode 100644 vendor/github.com/arangodb/go-driver/id.go create mode 100644 vendor/github.com/arangodb/go-driver/index.go create mode 100644 vendor/github.com/arangodb/go-driver/index_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/jwt/doc.go create mode 100644 vendor/github.com/arangodb/go-driver/jwt/jwt.go create mode 100644 vendor/github.com/arangodb/go-driver/meta.go create mode 100644 vendor/github.com/arangodb/go-driver/protocol.go create mode 100644 vendor/github.com/arangodb/go-driver/query.go create mode 100644 vendor/github.com/arangodb/go-driver/replication.go create mode 100644 vendor/github.com/arangodb/go-driver/replication_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/revision.go create mode 100644 vendor/github.com/arangodb/go-driver/transaction.go create mode 100644 vendor/github.com/arangodb/go-driver/user.go create mode 100644 vendor/github.com/arangodb/go-driver/user_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/util/doc.go create mode 100644 vendor/github.com/arangodb/go-driver/util/endpoints.go create mode 100644 vendor/github.com/arangodb/go-driver/version.go create mode 100644 vendor/github.com/arangodb/go-driver/vertex_collection_documents_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/vertex_collection_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/view.go create mode 100644 vendor/github.com/arangodb/go-driver/view_arangosearch.go create mode 100644 vendor/github.com/arangodb/go-driver/view_arangosearch_alias.go create mode 100644 vendor/github.com/arangodb/go-driver/view_arangosearch_alias_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/view_arangosearch_impl.go create mode 100644 vendor/github.com/arangodb/go-driver/view_impl.go create mode 100644 vendor/github.com/arangodb/go-velocypack/.envrc create mode 100644 vendor/github.com/arangodb/go-velocypack/.gitignore create mode 100644 vendor/github.com/arangodb/go-velocypack/.travis.yml create mode 100644 vendor/github.com/arangodb/go-velocypack/LICENSE create mode 100644 vendor/github.com/arangodb/go-velocypack/Makefile create mode 100644 vendor/github.com/arangodb/go-velocypack/README.md create mode 100644 vendor/github.com/arangodb/go-velocypack/array_iterator.go create mode 100644 vendor/github.com/arangodb/go-velocypack/attribute_translator.go create mode 100644 vendor/github.com/arangodb/go-velocypack/builder.go create mode 100644 vendor/github.com/arangodb/go-velocypack/builder_buffer.go create mode 100644 vendor/github.com/arangodb/go-velocypack/builder_index_vector.go create mode 100644 vendor/github.com/arangodb/go-velocypack/builder_sort_entry.go create mode 100644 vendor/github.com/arangodb/go-velocypack/builder_stack.go create mode 100644 vendor/github.com/arangodb/go-velocypack/decoder.go create mode 100644 vendor/github.com/arangodb/go-velocypack/doc.go create mode 100644 vendor/github.com/arangodb/go-velocypack/dumper.go create mode 100644 vendor/github.com/arangodb/go-velocypack/encoder.go create mode 100644 vendor/github.com/arangodb/go-velocypack/encoder_field.go create mode 100644 vendor/github.com/arangodb/go-velocypack/encoder_fold.go create mode 100644 vendor/github.com/arangodb/go-velocypack/encoder_tags.go create mode 100644 vendor/github.com/arangodb/go-velocypack/error.go create mode 100644 vendor/github.com/arangodb/go-velocypack/object_iterator.go create mode 100644 vendor/github.com/arangodb/go-velocypack/parser.go create mode 100644 vendor/github.com/arangodb/go-velocypack/raw_slice.go create mode 100644 vendor/github.com/arangodb/go-velocypack/slice.go create mode 100644 vendor/github.com/arangodb/go-velocypack/slice_factory.go create mode 100644 vendor/github.com/arangodb/go-velocypack/slice_merge.go create mode 100644 vendor/github.com/arangodb/go-velocypack/slice_reader.go create mode 100644 vendor/github.com/arangodb/go-velocypack/slice_type.go create mode 100644 vendor/github.com/arangodb/go-velocypack/util.go create mode 100644 vendor/github.com/arangodb/go-velocypack/value.go create mode 100644 vendor/github.com/arangodb/go-velocypack/value_length.go create mode 100644 vendor/github.com/arangodb/go-velocypack/value_type.go create mode 100644 vendor/github.com/golang-jwt/jwt/.gitignore create mode 100644 vendor/github.com/golang-jwt/jwt/LICENSE create mode 100644 vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/golang-jwt/jwt/README.md create mode 100644 vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md create mode 100644 vendor/github.com/golang-jwt/jwt/claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/doc.go create mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/errors.go create mode 100644 vendor/github.com/golang-jwt/jwt/hmac.go create mode 100644 vendor/github.com/golang-jwt/jwt/map_claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/none.go create mode 100644 vendor/github.com/golang-jwt/jwt/parser.go create mode 100644 vendor/github.com/golang-jwt/jwt/rsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/rsa_pss.go create mode 100644 vendor/github.com/golang-jwt/jwt/rsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/signing_method.go create mode 100644 vendor/github.com/golang-jwt/jwt/token.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/pac_darwin.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a7dc15e5db..81529861906 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio Here is an overview of all **stable** additions: -- **General**: TODO ([#TODO](https://github.com/kedacore/keda/issues/TODO)) +- **General**: Introduce new ArangoDB Scaler ([#4000](https://github.com/kedacore/keda/issues/4000)) Here is an overview of all new **experimental** features: diff --git a/go.mod b/go.mod index 2c51ca8dfb5..46b4f6b7187 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/DataDog/datadog-api-client-go v1.16.0 github.com/Huawei/gophercloud v1.0.21 github.com/Shopify/sarama v1.37.2 + github.com/arangodb/go-driver v1.4.0 github.com/aws/aws-sdk-go v1.44.150 github.com/denisenkom/go-mssqldb v0.12.3 github.com/dysnix/predictkube-libs v0.0.4-0.20220717101015-44c816c4fb9c @@ -124,6 +125,7 @@ require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/andybalholm/brotli v1.0.4 // indirect + github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect @@ -157,6 +159,7 @@ require ( github.com/gobuffalo/flect v0.2.5 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect @@ -207,7 +210,7 @@ require ( github.com/leodido/go-urn v1.2.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.7 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect diff --git a/go.sum b/go.sum index 1e2306ea97f..ed5f922fe67 100644 --- a/go.sum +++ b/go.sum @@ -155,6 +155,10 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/arangodb/go-driver v1.4.0 h1:uNCbVYkr5ZP3hIVUP6wqjOVyhMYOL9NDmR762tIeYP0= +github.com/arangodb/go-driver v1.4.0/go.mod h1:5GAx3XvK72DJPhJgyjZOtYAGc4SpY7rZDb3LyhCvLcQ= +github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2LcQBbxd0ZFdbGSyRKTYMZCfBbw/pMJFOk1g= +github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -203,8 +207,10 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-iptables v0.4.3/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 h1:rtAn27wIbmOGUs7RIbVgPEjb31ehTVniDwPGXyMxm5U= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -214,6 +220,7 @@ github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dchest/uniuri v0.0.0-20160212164326-8902c56451e9/go.mod h1:GgB8SF9nRG+GqaDtLcwJZsQFhcogVCJ79j4EdT0c2V4= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= @@ -372,6 +379,7 @@ github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -642,9 +650,8 @@ github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.7 h1:d2hBmNUJOAf2aGgzMQtz1wBByJQvRk72/1TXBiCVHXU= -github.com/mattn/go-ieproxy v0.0.7/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -797,6 +804,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -1039,7 +1048,6 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1129,7 +1137,6 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220110181412-a018aaa089fe/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1171,6 +1178,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go index 2ec2d937352..f3b6c4441e5 100644 --- a/pkg/metricsservice/api/metrics.pb.go +++ b/pkg/metricsservice/api/metrics.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc v3.21.11 // source: metrics.proto package api diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go index 01c0490e7fa..a0424ee349c 100644 --- a/pkg/metricsservice/api/metrics_grpc.pb.go +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 +// - protoc v3.21.11 // source: metrics.proto package api diff --git a/pkg/scalers/arangodb_scaler.go b/pkg/scalers/arangodb_scaler.go new file mode 100644 index 00000000000..9154bd73827 --- /dev/null +++ b/pkg/scalers/arangodb_scaler.go @@ -0,0 +1,281 @@ +package scalers + +import ( + "context" + "crypto/tls" + "fmt" + "strconv" + "strings" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" + "github.com/arangodb/go-driver/jwt" + "github.com/go-logr/logr" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/authentication" +) + +type arangoDBScaler struct { + metricType v2.MetricTargetType + metadata *arangoDBMetadata + client driver.Client + logger logr.Logger +} + +type dbResult struct { + Value float64 `json:"value"` +} + +// arangoDBMetadata specify arangoDB scaler params. +type arangoDBMetadata struct { + // Specify arangoDB server endpoint URL or comma separated URL endpoints of all the coordinators. + // +required + endpoints string + // Authentication parameters for connecting to the database + // +required + arangoDBAuth *authentication.AuthMeta + // Specify the unique arangoDB server ID. Only required if bearer JWT is being used. + // +optional + serverID string + + // The name of the database to be queried. + // +required + dbName string + // The name of the collection to be queried. + // +required + collection string + // The arangoDB query to be executed. + // +required + query string + // A threshold that is used as targetAverageValue in HPA. + // +required + queryValue float64 + // A threshold that is used to check if scaler is active. + // +optional + activationQueryValue float64 + // Specify whether to verify the server's certificate chain and host name. + // +optional + unsafeSsl bool + // Specify the max size of the active connection pool. + // +optional + connectionLimit int64 + + // The index of the scaler inside the ScaledObject + // +internal + scalerIndex int +} + +// NewArangoDBScaler creates a new arangodbScaler +func NewArangoDBScaler(config *ScalerConfig) (Scaler, error) { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + + meta, err := parseArangoDBMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing arangoDB metadata: %w", err) + } + + client, err := getNewArangoDBClient(meta) + if err != nil { + return nil, err + } + + return &arangoDBScaler{ + metricType: metricType, + metadata: meta, + client: client, + logger: InitializeLogger(config, "arangodb_scaler"), + }, nil +} + +func getNewArangoDBClient(meta *arangoDBMetadata) (driver.Client, error) { + var auth driver.Authentication + + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: strings.Split(meta.endpoints, ","), + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS13, + InsecureSkipVerify: meta.unsafeSsl, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to create a new http connection, %w", err) + } + + if meta.arangoDBAuth.EnableBasicAuth { + auth = driver.BasicAuthentication(meta.arangoDBAuth.Username, meta.arangoDBAuth.Password) + } else if meta.arangoDBAuth.EnableBearerAuth { + hdr, err := jwt.CreateArangodJwtAuthorizationHeader(meta.arangoDBAuth.BearerToken, meta.serverID) + if err != nil { + return nil, fmt.Errorf("failed to create bearer token authorization header, %w", err) + } + auth = driver.RawAuthentication(hdr) + } + + client, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: auth, + }) + + if err != nil { + return nil, fmt.Errorf("failed to initialize a new client, %w", err) + } + + return client, nil +} + +func parseArangoDBMetadata(config *ScalerConfig) (*arangoDBMetadata, error) { + // setting default metadata + meta := arangoDBMetadata{} + + // parse metaData from ScaledJob config + endpoints, err := GetFromAuthOrMeta(config, "endpoints") + if err != nil { + return nil, err + } + meta.endpoints = endpoints + + if val, ok := config.TriggerMetadata["collection"]; ok { + meta.collection = val + } else { + return nil, fmt.Errorf("no collection given") + } + + if val, ok := config.TriggerMetadata["query"]; ok { + meta.query = val + } else { + return nil, fmt.Errorf("no query given") + } + + if val, ok := config.TriggerMetadata["queryValue"]; ok { + queryValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert queryValue to int, %w", err) + } + meta.queryValue = queryValue + } else { + return nil, fmt.Errorf("no queryValue given") + } + + meta.activationQueryValue = 0 + if val, ok := config.TriggerMetadata["activationQueryValue"]; ok { + activationQueryValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert activationQueryValue to int, %w", err) + } + meta.activationQueryValue = activationQueryValue + } + + dbName, err := GetFromAuthOrMeta(config, "dbName") + if err != nil { + return nil, err + } + meta.dbName = dbName + + meta.unsafeSsl = false + if val, ok := config.TriggerMetadata["unsafeSsl"]; ok && val != "" { + unsafeSslValue, err := strconv.ParseBool(val) + if err != nil { + return nil, fmt.Errorf("failed to parse unsafeSsl, %w", err) + } + meta.unsafeSsl = unsafeSslValue + } + + if val, ok := config.TriggerMetadata["connectionLimit"]; ok { + connectionLimit, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert connectionLimit to int, %w", err) + } + meta.connectionLimit = connectionLimit + } + + // parse auth configs from ScalerConfig + arangoDBAuth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams) + if err != nil { + return nil, err + } + meta.arangoDBAuth = arangoDBAuth + + meta.scalerIndex = config.ScalerIndex + return &meta, nil +} + +// Close disposes of arangoDB connections +func (s *arangoDBScaler) Close(ctx context.Context) error { + return nil +} + +func (s *arangoDBScaler) getQueryResult(ctx context.Context) (float64, error) { + dbExists, err := s.client.DatabaseExists(ctx, s.metadata.dbName) + if err != nil { + return -1, fmt.Errorf("failed to check if %s database exists, %w", s.metadata.dbName, err) + } + + if !dbExists { + return -1, fmt.Errorf("%s database not found", s.metadata.dbName) + } + + db, err := s.client.Database(ctx, s.metadata.dbName) + if err != nil { + return -1, fmt.Errorf("failed to connect to %s db, %w", s.metadata.dbName, err) + } + + collectionExists, err := db.CollectionExists(ctx, s.metadata.collection) + if err != nil { + return -1, fmt.Errorf("failed to check if %s collection exists, %w", s.metadata.collection, err) + } + + if !collectionExists { + return -1, fmt.Errorf("%s collection not found in %s database", s.metadata.collection, s.metadata.dbName) + } + + ctx = driver.WithQueryCount(ctx) + + cursor, err := db.Query(ctx, s.metadata.query, nil) + if err != nil { + return -1, fmt.Errorf("failed to execute the query, %w", err) + } + + defer cursor.Close() + + if cursor.Count() != 1 { + return -1, fmt.Errorf("more than one values received, please check the query, %w", err) + } + + var result dbResult + if _, err = cursor.ReadDocument(ctx, &result); err != nil { + return -1, fmt.Errorf("query result is not in the specified format, pleast check the query, %w", err) + } + + return result.Value, nil +} + +// GetMetricsAndActivity query from arangoDB, and return to external metrics and activity +func (s *arangoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + num, err := s.getQueryResult(ctx) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect arangoDB, %w", err) + } + + metric := GenerateMetricInMili(metricName, num) + + return append([]external_metrics.ExternalMetricValue{}, metric), num > s.metadata.activationQueryValue, nil +} + +// GetMetricSpecForScaling get the query value for scaling +func (s *arangoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "arangodb"), + }, + Target: GetMetricTargetMili(s.metricType, s.metadata.queryValue), + } + metricSpec := v2.MetricSpec{ + External: externalMetric, Type: externalMetricType, + } + return []v2.MetricSpec{metricSpec} +} diff --git a/pkg/scalers/arangodb_scaler_test.go b/pkg/scalers/arangodb_scaler_test.go new file mode 100644 index 00000000000..3b4057b2f23 --- /dev/null +++ b/pkg/scalers/arangodb_scaler_test.go @@ -0,0 +1,129 @@ +package scalers + +import ( + "context" + "strings" + "testing" + + "github.com/go-logr/logr" +) + +type parseArangoDBMetadataTestData struct { + metadata map[string]string + authParams map[string]string + raisesError bool +} + +var testArangoDBMetadata = []parseArangoDBMetadataTestData{ + // No metadata + { + metadata: map[string]string{}, + authParams: map[string]string{}, + raisesError: true, + }, + // missing query + { + metadata: map[string]string{"endpoints": "https://localhost:8529", "collection": "demo", "queryValue": "12", "dbName": "test"}, + authParams: map[string]string{}, + raisesError: true, + }, + // with metric name + { + metadata: map[string]string{"endpoints": "https://localhost:8529", "query": `FOR t IN testCollection FILTER t.cook_time == '3 hours' RETURN t`, "collection": "demo", "queryValue": "12", "dbName": "test"}, + authParams: map[string]string{}, + raisesError: false, + }, + // from trigger auth + { + metadata: map[string]string{"endpoints": "https://localhost:8529", "query": `FOR t IN testCollection FILTER t.cook_time == '3 hours' RETURN t`, "collection": "demo", "queryValue": "12"}, + authParams: map[string]string{"dbName": "test", "username": "sample", "password": "secure"}, + raisesError: false, + }, + // wrong activationQueryValue + { + metadata: map[string]string{"endpoints": "https://localhost:8529", "query": `FOR t IN testCollection FILTER t.cook_time == '3 hours' RETURN t`, "collection": "demo", "queryValue": "12", "activationQueryValue": "aa", "dbName": "test"}, + authParams: map[string]string{}, + raisesError: true, + }, +} + +type arangoDBAuthMetadataTestData struct { + metadata map[string]string + authParams map[string]string + raisesError bool +} + +var testArangoDBAuthMetadata = []arangoDBAuthMetadataTestData{ + // success bearer default + {map[string]string{"endpoints": "https://http://34.162.13.192:8529,https://34.162.13.193:8529", "collection": "demo", "query": "FOR d IN myCollection RETURN d", "queryValue": "1", "dbName": "testdb", "authModes": "bearer"}, map[string]string{"bearerToken": "dummy-token"}, false}, + // fail bearerAuth with no token + {map[string]string{"endpoints": "https://http://34.162.13.192:8529,https://34.162.13.193:8529", "collection": "demo", "query": "FOR d IN myCollection RETURN d", "queryValue": "1", "dbName": "testdb", "authModes": "bearer"}, map[string]string{}, true}, + // success basicAuth + {map[string]string{"endpoints": "https://http://34.162.13.192:8529,https://34.162.13.193:8529", "collection": "demo", "query": "FOR d IN myCollection RETURN d", "queryValue": "1", "dbName": "testdb", "authModes": "basic"}, map[string]string{"username": "user", "password": "pass"}, false}, + // fail basicAuth with no username + {map[string]string{"endpoints": "https://http://34.162.13.192:8529,https://34.162.13.193:8529", "collection": "demo", "query": "FOR d IN myCollection RETURN d", "queryValue": "1", "dbName": "testdb", "authModes": "basic"}, map[string]string{}, true}, + // success basicAuth with no password + {map[string]string{"endpoints": "https://http://34.162.13.192:8529,https://34.162.13.193:8529", "collection": "demo", "query": "FOR d IN myCollection RETURN d", "queryValue": "1", "dbName": "testdb", "authModes": "basic"}, map[string]string{"username": "user"}, false}, +} + +type arangoDBMetricIdentifier struct { + metadataTestData *parseArangoDBMetadataTestData + scalerIndex int + name string +} + +var arangoDBMetricIdentifiers = []arangoDBMetricIdentifier{ + {metadataTestData: &testArangoDBMetadata[2], scalerIndex: 0, name: "s0-arangodb"}, + {metadataTestData: &testArangoDBMetadata[2], scalerIndex: 1, name: "s1-arangodb"}, +} + +func TestParseArangoDBMetadata(t *testing.T) { + for _, testData := range testArangoDBMetadata { + _, err := parseArangoDBMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + if err != nil && !testData.raisesError { + t.Error("Expected success but got error:", err) + } + if err == nil && testData.raisesError { + t.Error("Expected error but got success") + } + } +} + +func TestArangoDBScalerAuthParams(t *testing.T) { + for _, testData := range testArangoDBAuthMetadata { + meta, err := parseArangoDBMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + + if err != nil && !testData.raisesError { + t.Error("Expected success but got error", err) + } + if testData.raisesError && err == nil { + t.Error("Expected error but got success") + } + + if err == nil { + if meta.arangoDBAuth.EnableBasicAuth && !strings.Contains(testData.metadata["authModes"], "basic") { + t.Error("wrong auth mode detected") + } + } + } +} + +func TestArangoDBGetMetricSpecForScaling(t *testing.T) { + for _, testData := range arangoDBMetricIdentifiers { + meta, err := parseArangoDBMetadata(&ScalerConfig{ + AuthParams: testData.metadataTestData.authParams, + TriggerMetadata: testData.metadataTestData.metadata, + ScalerIndex: testData.scalerIndex, + }) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + mockArangoDBScaler := arangoDBScaler{"", meta, nil, logr.Discard()} + + metricSpec := mockArangoDBScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + if metricName != testData.name { + t.Error("Wrong External metric source name:", metricName) + } + } +} diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index 20ee1054af4..95b5f44e540 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc v3.21.11 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go index 89ad8b78112..34e85c3c83f 100644 --- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go +++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 +// - protoc v3.21.11 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index b101d7cc6fc..00bb5fb9e33 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc v3.21.11 // source: LiiklusService.proto package liiklus diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index d05170338af..4edb5e16529 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 +// - protoc v3.21.11 // source: LiiklusService.proto package liiklus diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 39879cf743c..765a97dd6fc 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -565,6 +565,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, switch triggerType { case "activemq": return scalers.NewActiveMQScaler(config) + case "arangodb": + return scalers.NewArangoDBScaler(config) case "artemis-queue": return scalers.NewArtemisQueueScaler(config) case "aws-cloudwatch": diff --git a/tests/helper/helper.go b/tests/helper/helper.go index 1b25db557bf..630a4e570e3 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -318,6 +318,32 @@ func WaitForPodCountInNamespace(t *testing.T, kc *kubernetes.Clientset, namespac return false } +// Waits until all the pods in the namespace have a running status. +func WaitForAllPodRunningInNamespace(t *testing.T, kc *kubernetes.Clientset, namespace string, iterations, intervalSeconds int) bool { + for i := 0; i < iterations; i++ { + runningCount := 0 + pods, _ := kc.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) + + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + break + } + runningCount++ + } + + t.Logf("Waiting for pods in namespace to be in 'Running' status. Namespace - %s, Current - %d, Target - %d", + namespace, runningCount, len(pods.Items)) + + if runningCount == len(pods.Items) { + return true + } + + time.Sleep(time.Duration(intervalSeconds) * time.Second) + } + + return false +} + // Waits until deployment ready replica count hits target or number of iterations are done. func WaitForDeploymentReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string, target, iterations, intervalSeconds int) bool { diff --git a/tests/scalers/arangodb/arangodb_test.go b/tests/scalers/arangodb/arangodb_test.go new file mode 100644 index 00000000000..1b301df5821 --- /dev/null +++ b/tests/scalers/arangodb/arangodb_test.go @@ -0,0 +1,276 @@ +//go:build e2e +// +build e2e + +package arangodb_test + +import ( + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + "github.com/kedacore/keda/v2/tests/scalers/arangodb" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "arangodb-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + secretName = fmt.Sprintf("%s-secret", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + + arangoDBUsername = "cm9vdA==" + arangoDBName = "test" + arangoDBCollection = "test" + + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + Database string + Collection string + TriggerAuthName string + Username string + SecretName string + MinReplicaCount int + MaxReplicaCount int +} + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: test-app + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: test-app + template: + metadata: + labels: + app: test-app + type: keda-testing + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + username: {{.Username}} +` + + triggerAuthTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretName}} + key: username +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 3 + cooldownPeriod: 1 + triggers: + - type: arangodb + metadata: + endpoints: https://example-arangodb-cluster-int.{{.TestNamespace}}.svc.cluster.local:8529 + queryValue: '3' + activationQueryValue: '3' + dbName: {{.Database}} + collection: {{.Collection}} + unsafeSsl: "true" + query: FOR doc IN {{.Collection}} COLLECT WITH COUNT INTO length RETURN {"value":length} + authModes: "basic" + authenticationRef: + name: {{.TriggerAuthName}} +` + + generateLowLevelDataJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: generate-low-level-data-job + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - image: nginx:stable + name: test + command: ["/bin/sh"] + args: ["-c", "curl --location --request POST 'https://example-arangodb-cluster-ea.{{.TestNamespace}}.svc.cluster.local:8529/_db/{{.Database}}/_api/document/{{.Collection}}' --header 'Authorization: Basic cm9vdDo=' --data-raw '[{\"Hi\": \"Nathan\"}, {\"Hi\": \"Laura\"}]' -k"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` + + generateDataJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: generate-data-job + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - image: nginx:stable + name: test + command: ["/bin/sh"] + args: ["-c", "curl --location --request POST 'https://example-arangodb-cluster-ea.{{.TestNamespace}}.svc.cluster.local:8529/_db/{{.Database}}/_api/document/{{.Collection}}' --header 'Authorization: Basic cm9vdDo=' --data-raw '[{\"Hi\": \"Harry\"}, {\"Hi\": \"Neha\"}]' -k"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` + + deleteDataJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: delete-data-job + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - image: nginx:stable + name: test + command: ["/bin/sh"] + args: ["-c", "curl --location --request POST 'https://example-arangodb-cluster-ea.{{.TestNamespace}}.svc.cluster.local:8529/_db/{{.Database}}/_api/cursor' --header 'Authorization: Basic cm9vdDo=' --data-raw '{\"query\": \"FOR doc in {{.Collection}} REMOVE doc in {{.Collection}}\"}' -k"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` +) + +func TestArangoDBScaler(t *testing.T) { + // Create kubernetes resources + kc := GetKubernetesClient(t) + + CreateNamespace(t, kc, testNamespace) + arangodb.InstallArangoDB(t, kc, testNamespace) + arangodb.SetupArangoDB(t, kc, testNamespace, arangoDBName, arangoDBCollection, arangoDBUsername) + + data, templates := getTemplateData() + KubectlApplyMultipleWithTemplate(t, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + + testActivation(t, kc, data) + testScaleOut(t, kc, data) + testScaleIn(t, kc, data) + + // cleanup + KubectlDeleteMultipleWithTemplate(t, data, templates) + arangodb.UninstallArangoDB(t, kc, testNamespace) + + DeleteNamespace(t, kc, testNamespace) + WaitForNamespaceDeletion(t, kc, testNamespace) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + Database: arangoDBName, + Collection: arangoDBCollection, + TriggerAuthName: triggerAuthName, + SecretName: secretName, + Username: arangoDBUsername, + }, []Template{ + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "triggerAuthTemplate", Config: triggerAuthTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + + KubectlApplyWithTemplate(t, data, "generateLowLevelDataJobTemplate", generateLowLevelDataJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, "generate-low-level-data-job", testNamespace, 5, 60), "test activation job failed") + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale out ---") + + KubectlApplyWithTemplate(t, data, "generateDataJobTemplate", generateDataJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, "generate-data-job", testNamespace, 5, 60), "test scale-out job failed") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale in ---") + + KubectlApplyWithTemplate(t, data, "deleteDataJobTemplate", deleteDataJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, "delete-data-job", testNamespace, 5, 60), "test scale-in job failed") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 5), + "replica count should be %d after 5 minutes", minReplicaCount) +} diff --git a/tests/scalers/arangodb/helper.go b/tests/scalers/arangodb/helper.go new file mode 100644 index 00000000000..b09253edbac --- /dev/null +++ b/tests/scalers/arangodb/helper.go @@ -0,0 +1,118 @@ +//go:build e2e +// +build e2e + +package arangodb + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + "github.com/kedacore/keda/v2/tests/helper" +) + +type templateData struct { + Namespace string + Database string + Collection string +} + +const ( + arangoDeploymentTemplate = `apiVersion: "database.arangodb.com/v1" +kind: "ArangoDeployment" +metadata: + name: "example-arangodb-cluster" + namespace: {{.Namespace}} +spec: + architectures: + - arm64 + - amd64 + mode: Cluster + image: "arangodb/arangodb:3.10.1" +` + + createDatabaseTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: create-db + namespace: {{.Namespace}} +spec: + template: + spec: + containers: + - image: nginx:stable + name: alpine + command: ["/bin/sh"] + args: ["-c", "curl -H 'Authorization: Basic cm9vdDo=' --location --request POST 'https://example-arangodb-cluster-ea.{{.Namespace}}.svc.cluster.local:8529/_api/database' --data-raw '{\"name\": \"{{.Database}}\"}' -k"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` + + createCollectionTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: create-arangodb-collection + namespace: {{.Namespace}} +spec: + template: + spec: + containers: + - image: nginx:stable + name: alpine + command: ["/bin/sh"] + args: ["-c", "curl -H 'Authorization: Basic cm9vdDo=' --location --request POST 'https://example-arangodb-cluster-ea.{{.Namespace}}.svc.cluster.local:8529/_db/{{.Database}}/_api/collection' --data-raw '{\"name\": \"{{.Collection}}\"}' -k"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` +) + +func InstallArangoDB(t *testing.T, kc *kubernetes.Clientset, testNamespace string) { + t.Log("installing arangodb crds") + _, err := helper.ExecuteCommand(fmt.Sprintf("helm install arangodb-crds https://github.com/arangodb/kube-arangodb/releases/download/1.2.20/kube-arangodb-crd-1.2.20.tgz --namespace=%s --wait", testNamespace)) + assert.NoErrorf(t, err, "cannot install crds - %s", err) + + t.Log("installing arangodb operator") + _, err = helper.ExecuteCommand(fmt.Sprintf("helm install arangodb https://github.com/arangodb/kube-arangodb/releases/download/1.2.20/kube-arangodb-1.2.20.tgz --set 'operator.architectures={arm64,amd64}' --namespace=%s --wait", testNamespace)) + assert.NoErrorf(t, err, "cannot create operator deployment - %s", err) + + t.Log("creating arangodeployment resource") + helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace}, "arangoDeploymentTemplate", arangoDeploymentTemplate) + assert.True(t, helper.WaitForPodCountInNamespace(t, kc, testNamespace, 11, 5, 20), "pod count should be 11") + assert.True(t, helper.WaitForAllPodRunningInNamespace(t, kc, testNamespace, 5, 20), "all pods should be running") +} + +func SetupArangoDB(t *testing.T, kc *kubernetes.Clientset, testNamespace, arangoDBName, arangoDBCollection, arangoDBUsername string) { + helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace, Database: arangoDBName}, "createDatabaseTemplate", createDatabaseTemplate) + assert.True(t, helper.WaitForJobSuccess(t, kc, "create-db", testNamespace, 5, 10), "create database job failed") + + helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace, Database: arangoDBName, Collection: arangoDBCollection}, "createCollectionTemplate", createCollectionTemplate) + assert.True(t, helper.WaitForJobSuccess(t, kc, "create-arangodb-collection", testNamespace, 5, 10), "create collection job failed") +} + +func UninstallArangoDB(t *testing.T, kc *kubernetes.Clientset, namespace string) { + helper.KubectlDeleteMultipleWithTemplate(t, templateData{Namespace: namespace}, []helper.Template{{Name: "arangoDeploymentTemplate", Config: arangoDeploymentTemplate}}) + + _, err := helper.ExecuteCommand(fmt.Sprintf("helm uninstall arangodb --namespace=%s --wait", namespace)) + assert.NoErrorf(t, err, "cannot uninstall arangodb operator - %s", err) + + _, err = helper.ExecuteCommand(fmt.Sprintf("helm uninstall arangodb-crds --namespace=%s --wait", namespace)) + assert.NoErrorf(t, err, "cannot uninstall arangodb crds - %s", err) +} diff --git a/vendor/github.com/arangodb/go-driver/.envrc b/vendor/github.com/arangodb/go-driver/.envrc new file mode 100644 index 00000000000..a1a8fa6e212 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/.envrc @@ -0,0 +1,8 @@ +export GOBUILDDIR=$(pwd)/.gobuild +export GOPATH=$GOBUILDDIR:$GOPATH +PATH_add $GOBUILDDIR/bin + +if [ ! -e ${GOBUILDDIR} ]; then + mkdir -p ${GOBUILDDIR}/src/github.com/arangodb/ + ln -s ../../../.. ${GOBUILDDIR}/src/github.com/arangodb/go-driver +fi \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-driver/.gitignore b/vendor/github.com/arangodb/go-driver/.gitignore new file mode 100644 index 00000000000..99346b8671b --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/.gitignore @@ -0,0 +1,15 @@ +#Go packages +.gobuild + +#Temporary tests files +.tmp + +#IDE's files +.idea + +#Vendor files +vendor + +# Helper files +debug/ +*.log diff --git a/vendor/github.com/arangodb/go-driver/.golangci.yaml b/vendor/github.com/arangodb/go-driver/.golangci.yaml new file mode 100644 index 00000000000..6854ac3f3c2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/.golangci.yaml @@ -0,0 +1,52 @@ +--- + +run: + issues-exit-code: 3 + timeout: 30m + skip-dirs: + - vendor + +linters: + fast: false + enable-all: false + disable-all: false + presets: + - performance + - format + - complexity + - bugs + - unused + disable: + - staticcheck + - errcheck + - govet + - gosec + - ineffassign + - noctx + - contextcheck + - unparam + - scopelint + - exhaustive + - cyclop + - errorlint + - errchkjson + - nestif + - prealloc + - maligned + - funlen + - typecheck + - deadcode + - unused + - maintidx + - varcheck + - gocognit + - gofumpt + - gocyclo + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/arangodb) + - prefix(github.com/arangodb/go-driver) diff --git a/vendor/github.com/arangodb/go-driver/.travis.yml b/vendor/github.com/arangodb/go-driver/.travis.yml new file mode 100644 index 00000000000..571b6c2bc01 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/.travis.yml @@ -0,0 +1,45 @@ +sudo: required + +before_install: + - | + mkdir -p $HOME/resources + for i in {0..3} + do + + if ! [ -f "$HOME/resources/itzpapalotl-v1.2.0.zip" ]; then + curl -L0 -o $HOME/resources/itzpapalotl-v1.2.0.zip "https://github.com/arangodb-foxx/demo-itzpapalotl/archive/v1.2.0.zip" + fi + + SHA=$(sha256sum $HOME/resources/itzpapalotl-v1.2.0.zip | cut -f 1 -d " ") + if [ "${SHA}" = "86117db897efe86cbbd20236abba127a08c2bdabbcd63683567ee5e84115d83a" ]; then + break + fi + + $HOME/resources/itzpapalotl-v1.2.0.zip + done + - | + if ! [ -f "$HOME/resources/itzpapalotl-v1.2.0.zip" ]; then + travis_terminate 1 + fi + +services: + - docker + +language: go + +env: + - TEST_SUITE=run-unit-tests GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 ALWAYS=1 + - TEST_SUITE=run-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=eu.gcr.io/arangodb-ci/official/arangodb/arangodb:3.8.5.1 + - TEST_SUITE=run-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=eu.gcr.io/arangodb-ci/official/arangodb/arangodb:3.9.0 + - TEST_SUITE=run-tests-cluster GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=gcr.io/gcr-for-testing/arangodb/arangodb-preview:3.10.0-rc.1 + - TEST_SUITE=run-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=gcr.io/gcr-for-testing/arangodb/arangodb-preview:3.10.0-rc.1 TEST_DISALLOW_UNKNOWN_FIELDS=false ALWAYS=1 + - TEST_SUITE=run-v2-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=eu.gcr.io/arangodb-ci/official/arangodb/arangodb:3.8.5.1 + - TEST_SUITE=run-v2-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=eu.gcr.io/arangodb-ci/official/arangodb/arangodb:3.9.0 + - TEST_SUITE=run-v2-tests-single GOIMAGE=gcr.io/gcr-for-testing/golang:1.17.6 STARTER=gcr.io/gcr-for-testing/arangodb/arangodb-starter:latest ALPINE_IMAGE=gcr.io/gcr-for-testing/alpine:3.4 ARANGODB=gcr.io/gcr-for-testing/arangodb/arangodb-preview:3.10.0-rc.1 ALWAYS=1 + +script: + - | + if [ "$TRAVIS_PULL_REQUEST" != "false" ] || [ ! -z "$ALWAYS" ]; then + make linter + make $TEST_SUITE TEST_RESOURCES="$HOME/resources/" VERBOSE=1; + fi diff --git a/vendor/github.com/arangodb/go-driver/CHANGELOG.md b/vendor/github.com/arangodb/go-driver/CHANGELOG.md new file mode 100644 index 00000000000..9d99609a269 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/CHANGELOG.md @@ -0,0 +1,78 @@ +# Change Log + +## [master](https://github.com/arangodb/go-driver/tree/master) (N/A) + +## [1.4.0](https://github.com/arangodb/go-driver/tree/v1.4.0) (2022-10-04) +- Add `hex` property to analyzer's properties +- Add support for `computedValues` +- Optional `computeOn` field in `computedValues` +- Add support for `computedValues` into collection inventory +- Update the structures to align them with the ArangoDB 3.10 release +- Add `IsNotFoundGeneral` and `IsDataSourceOrDocumentNotFound` methods - deprecate `IsNotFound` +- Add support for optimizer rules (AQL query) +- New `LegacyPolygons` parameter for Geo Indexes +- New parameters (`cacheEnabled` and `storedValues`) for Persistent Indexes +- New analyzers: `classification`, `nearest neighbors`, `minhash` +- Add support for Inverted index +- Deprecate fulltext index +- Add support for Pregel API +- Add tests to check support for Enterprise Graphs +- Search View v2 (`search-alias`) +- Add Rename View support +- Add support for `Metrics` + +## [1.3.3](https://github.com/arangodb/go-driver/tree/v1.3.3) (2022-07-27) +- Fix `lastValue` field type +- Setup Go-lang linter with minimal configuration +- Use Go 1.17.6 +- Add missing `deduplicate` param to PersistentIndex + +## [1.3.2](https://github.com/arangodb/go-driver/tree/v1.3.2) (2022-05-16) +- Fix selectivityEstimate Index field type + +## [1.3.1](https://github.com/arangodb/go-driver/tree/v1.3.1) (2022-03-23) +- Add support for `exclusive` field for transaction options +- Fix cursor executionTime statistics getter +- Fix cursor warnings field type +- Fix for DocumentMeta name field overrides name field + +## [1.3.0](https://github.com/arangodb/go-driver/tree/v1.3.0) (2022-03-17) +- Disallow unknown fields feature +- inBackground parameter in ArangoSearch links +- ZKD indexes +- Hybrid SmartGraphs +- Segmentation and Collation Analyzers +- Bypass caching for specific collections +- Overload Control +- [V2] Add support for streaming the response body by the caller. +- [V2] Bugfix with escaping the URL path twice. +- Bugfix for the satellites' collection shard info. +- [V2] Support for satellites' collections. + +## [1.2.1](https://github.com/arangodb/go-driver/tree/v1.2.1) (2021-09-21) +- Add support for fetching shards' info by the given collection name. +- Change versioning to be go mod compatible +- Add support for ForceOneShardAttributeValue in Query + +## [1.2.0](https://github.com/arangodb/go-driver/tree/1.2.0) (2021-08-04) +- Add support for AQL, Pipeline, Stopwords, GeoJSON and GeoPoint Arango Search analyzers. +- Add `estimates` field to indexes properties. +- Add tests for 3.8 ArangoDB and remove tests for 3.5. +- Add Plan support in Query execution. +- Change Golang version from 1.13.4 to 1.16.6. +- Add graceful shutdown for the coordinators. +- Replace 'github.com/dgrijalva/jwt-go' with 'github.com/golang-jwt/jwt' + +## [1.1.1](https://github.com/arangodb/go-driver/tree/1.1.1) (2020-11-13) +- Add Driver V2 in Alpha version +- Add HTTP2 support for V1 and V2 +- Don't omit the `stopwords` field. The field is mandatory in 3.6 ArangoDB + +## [1.1.0](https://github.com/arangodb/go-driver/tree/1.1.0) (2020-08-11) +- Use internal coordinator communication for cursors if specified coordinator was not found on endpoint list +- Add support for Overwrite Mode (ArangoDB 3.7) +- Add support for Schema Collection options (ArangoDB 3.7) +- Add support for Disjoint and Satellite Graphs options (ArangoDB 3.7) + +## [1.0.0](https://github.com/arangodb/go-driver/tree/1.0.0) (N/A) +- Enable proper CHANGELOG and versioning diff --git a/vendor/github.com/arangodb/go-driver/CONTRIBUTING.md b/vendor/github.com/arangodb/go-driver/CONTRIBUTING.md new file mode 100644 index 00000000000..6a0faffc235 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/CONTRIBUTING.md @@ -0,0 +1,63 @@ +Contributing +============ + +We welcome bug fixes and patches from 3rd party contributors. Please +see the [Contributor Agreement](https://www.arangodb.com/community#contribute) +for details. + +Please follow these guidelines if you want to contribute to ArangoDB: + +Reporting Bugs +-------------- + +When reporting bugs, please use our issue tracker on GitHub. Please make sure +to include the version number of ArangoDB and the commit hash of the go-driver in your bug report, along with the +platform you are using (e.g. `Linux OpenSuSE x86_64`). Please also include the +ArangoDB startup mode (daemon, console, supervisor mode), type of connection used +towards ArangoDB plus any special configuration. +This will help us reproducing and finding bugs. + +Please also take the time to check there are no similar/identical issues open +yet. + +Contributing features, documentation, tests +------------------------------------------- + +* Create a new branch in your fork, based on the **master** branch + +* Develop and test your modifications there + +* Commit as you like, but preferably in logical chunks. Use meaningful commit + messages and make sure you do not commit unnecessary files (e.g. object + files). It is normally a good idea to reference the issue number from the + commit message so the issues will get updated automatically with comments. + +* If the modifications change any documented behavior or add new features, + document the changes and provide application tests in the `test` folder. + All documentation should be written in American English (AE). + +* When done, run the complete test suite (`make run-tests`) and make sure all tests pass. + +* When finished, push the changes to your GitHub repository and send a pull + request from your fork to the ArangoDB repository. Please make sure to select + the appropriate branches there. This will most likely be **master**. + +* You must use the Apache License for your changes and have signed our + [CLA](https://www.arangodb.com/documents/cla.pdf). We cannot accept pull requests + from contributors that didn't sign the CLA. + +* Please let us know if you plan to work on a ticket. This way we can make sure + redundant work is avoided. + + +Additional Resources +-------------------- + +* [ArangoDB website](https://www.arangodb.com/) + +* [ArangoDB on Twitter](https://twitter.com/arangodb) + +* [General GitHub documentation](https://help.github.com/) + +* [GitHub pull request documentation](https://help.github.com/send-pull-requests/) + diff --git a/vendor/github.com/arangodb/go-driver/Dockerfile.debug b/vendor/github.com/arangodb/go-driver/Dockerfile.debug new file mode 100644 index 00000000000..cb13e63175a --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/Dockerfile.debug @@ -0,0 +1,11 @@ +FROM golang:1.17.6 as builder + +ARG TESTS_DIRECTORY +ARG TESTS_ROOT_PATH="." + +RUN go install github.com/go-delve/delve/cmd/dlv@latest + +WORKDIR /app/ +ADD . /app/ + +RUN cd $TESTS_ROOT_PATH && go test -gcflags "all=-N -l" -c -o /test_debug.test $TESTS_DIRECTORY diff --git a/vendor/github.com/arangodb/go-driver/HEADER b/vendor/github.com/arangodb/go-driver/HEADER new file mode 100644 index 00000000000..f5c2dabfd27 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/HEADER @@ -0,0 +1,18 @@ + +DISCLAIMER + +Copyright 2020 ArangoDB GmbH, Cologne, Germany + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright holder is ArangoDB GmbH, Cologne, Germany \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-driver/LICENSE b/vendor/github.com/arangodb/go-driver/LICENSE new file mode 100644 index 00000000000..b8ff39b5ad4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 ArangoDB GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-driver/MAINTAINERS.md b/vendor/github.com/arangodb/go-driver/MAINTAINERS.md new file mode 100644 index 00000000000..5677dab0a6e --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/MAINTAINERS.md @@ -0,0 +1,27 @@ +# Maintainer Instructions + +- Always preserve backward compatibility +- Build using `make clean && make` +- After merging PR, always run `make changelog` and commit changes +- Set ArangoDB docker container (used for testing) using `export ARANGODB=` +- Run tests using: + - `make run-tests-single` + - `make run-tests-resilientsingle` + - `make run-tests-cluster`. +- The test can be launched with the flag `RACE=on` which means that test will be performed with the race detector, e.g: + - `RACE=on make run-tests-single` +- Always create changes in a PR + + +# Change Golang version + +- Edit the .travis file and change all occurrences of `golang:x.y.z-stretch` to the appropriate version. + +- Edit the Makefile and change the line `GOVERSION ?= 1.16.6` into the required version. + +## Debugging with DLV + +To attach DLV debugger run tests with `DEBUG=true` flag e.g.: +```shell +DEBUG=true TESTOPTIONS="-test.run TestResponseHeader -test.v" make run-tests-single-json-with-auth +``` diff --git a/vendor/github.com/arangodb/go-driver/Makefile b/vendor/github.com/arangodb/go-driver/Makefile new file mode 100644 index 00000000000..17a76344608 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/Makefile @@ -0,0 +1,565 @@ +PROJECT := go-driver +SCRIPTDIR := $(shell pwd) + +CURR=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) +ROOTDIR:=$(CURR) + +GOVERSION ?= 1.17.6 +GOIMAGE ?= golang:$(GOVERSION) +GOV2IMAGE ?= $(GOIMAGE) +ALPINE_IMAGE ?= alpine:3.14 +TMPDIR := ${SCRIPTDIR}/.tmp + +DOCKER_CMD:=docker run + +GOBUILDTAGS:=$(TAGS) +GOBUILDTAGSOPT=-tags "$(GOBUILDTAGS)" + +ARANGODB ?= arangodb/arangodb:latest +STARTER ?= arangodb/arangodb-starter:latest + +ifndef TESTOPTIONS + TESTOPTIONS := +endif +ifdef VERBOSE + TESTVERBOSEOPTIONS := -v +endif + +CGO_ENABLED=0 +ifdef RACE + TESTVERBOSEOPTIONS += -race + CGO_ENABLED=1 +endif + + +ORGPATH := github.com/arangodb +REPONAME := $(PROJECT) +REPODIR := $(ORGDIR)/$(REPONAME) +REPOPATH := $(ORGPATH)/$(REPONAME) + +SOURCES_EXCLUDE:=vendor +SOURCES := $(shell find "$(ROOTDIR)" $(foreach SOURCE,$(SOURCES_EXCLUDE),-not -path '$(ROOTDIR)/$(SOURCE)/*') -name '*.go') + +# Test variables + +ifndef TESTCONTAINER + TESTCONTAINER := $(PROJECT)-test +endif +ifndef DBCONTAINER + DBCONTAINER := $(TESTCONTAINER)-db +endif + +ifeq ("$(TEST_AUTH)", "none") + ARANGOENV := -e ARANGO_NO_AUTH=1 + TEST_AUTHENTICATION := + TESTS := $(REPOPATH) $(REPOPATH)/test +else ifeq ("$(TEST_AUTH)", "rootpw") + ARANGOENV := -e ARANGO_ROOT_PASSWORD=rootpw + TEST_AUTHENTICATION := basic:root:rootpw + GOBUILDTAGS += auth + TESTS := $(REPOPATH)/test +else ifeq ("$(TEST_AUTH)", "jwt") + ARANGOENV := -e ARANGO_ROOT_PASSWORD=rootpw + TEST_AUTHENTICATION := jwt:root:rootpw + GOBUILDTAGS += auth + TESTS := $(REPOPATH)/test + JWTSECRET := testing + JWTSECRETFILE := "${TMPDIR}/${TESTCONTAINER}-jwtsecret" + ARANGOVOL := -v "$(JWTSECRETFILE):/jwtsecret" + ARANGOARGS := --server.jwt-secret=/jwtsecret +endif + +TEST_NET := --net=container:$(TESTCONTAINER)-ns +TEST_ENDPOINTS := http://localhost:7001 +TESTS := $(REPOPATH)/test +ifeq ("$(TEST_AUTH)", "rootpw") + CLUSTERENV := JWTSECRET=testing + TEST_JWTSECRET := testing + TEST_AUTHENTICATION := basic:root: +endif +ifeq ("$(TEST_AUTH)", "jwt") + CLUSTERENV := JWTSECRET=testing + TEST_JWTSECRET := testing + TEST_AUTHENTICATION := jwt:root: +endif +ifeq ("$(TEST_AUTH)", "jwtsuper") + CLUSTERENV := JWTSECRET=testing + TEST_JWTSECRET := testing + TEST_AUTHENTICATION := super:testing +endif +ifeq ("$(TEST_SSL)", "auto") + CLUSTERENV := SSL=auto $(CLUSTERENV) + TEST_ENDPOINTS = https://localhost:7001 +endif + +ifeq ("$(TEST_CONNECTION)", "vst") + TESTS := $(REPOPATH)/test +ifndef TEST_CONTENT_TYPE + TEST_CONTENT_TYPE := vpack +endif +endif + +ifeq ("$(TEST_BENCHMARK)", "true") + TAGS := -bench=. -run=notests -cpu=1,2,4 + TESTS := $(REPOPATH)/test +endif + +ifdef TEST_ENDPOINTS_OVERRIDE + TEST_NET := --net=host + TEST_ENDPOINTS := $(TEST_ENDPOINTS_OVERRIDE) +endif + +ifdef TEST_NET_OVERRIDE + TEST_NET := $(TEST_NET_OVERRIDE) +endif + +ifdef ENABLE_VST11 + VST11_SINGLE_TESTS := run-tests-single-vst-1.1 + VST11_RESILIENTSINGLE_TESTS := run-tests-resilientsingle-vst-1.1 + VST11_CLUSTER_TESTS := run-tests-cluster-vst-1.1 +endif + +TEST_RESOURCES_VOLUME := +ifdef TEST_RESOURCES + TEST_RESOURCES_VOLUME := -v ${TEST_RESOURCES}:/tmp/resources +endif + +ifeq ("$(DEBUG)", "true") + GOIMAGE := go-driver-tests:debug + DOCKER_DEBUG_ARGS := --security-opt=seccomp:unconfined + DEBUG_PORT := 2345 + + DOCKER_RUN_CMD := $(DOCKER_DEBUG_ARGS) $(GOIMAGE) /go/bin/dlv --listen=:$(DEBUG_PORT) --headless=true --api-version=2 exec /test_debug.test -- $(TESTOPTIONS) + DOCKER_V2_RUN_CMD := $(DOCKER_RUN_CMD) +else + DOCKER_RUN_CMD := $(GOIMAGE) go test $(GOBUILDTAGSOPT) $(TESTOPTIONS) $(TESTVERBOSEOPTIONS) $(TESTS) + DOCKER_V2_RUN_CMD := $(GOV2IMAGE) go test $(GOBUILDTAGSOPT) $(TESTOPTIONS) $(TESTVERBOSEOPTIONS) ./tests +endif + +.PHONY: all build clean linter run-tests + +all: build + +build: $(SOURCES) + go build -v $(REPOPATH) $(REPOPATH)/http $(REPOPATH)/vst $(REPOPATH)/agency $(REPOPATH)/jwt + +clean: + @rm -rf "${TMPDIR}" + +.PHONY: changelog +changelog: + @$(DOCKER_CMD) --rm \ + -e CHANGELOG_GITHUB_TOKEN=$(shell cat ~/.arangodb/github-token) \ + -v "${ROOTDIR}":/usr/local/src/your-app \ + ferrarimarco/github-changelog-generator \ + --user arangodb \ + --project go-driver \ + --no-author \ + --unreleased-label "Master" + +run-tests: run-unit-tests run-tests-single run-tests-resilientsingle run-tests-cluster + +# The below rule exists only for backward compatibility. +run-tests-http: run-unit-tests + +run-unit-tests: run-v2-unit-tests + @$(DOCKER_CMD) \ + --rm \ + -v "${ROOTDIR}":/usr/code \ + -e CGO_ENABLED=$(CGO_ENABLED) \ + -w /usr/code/ \ + $(GOIMAGE) \ + go test $(TESTOPTIONS) $(REPOPATH)/http $(REPOPATH)/agency + +run-v2-unit-tests: + @$(DOCKER_CMD) \ + --rm \ + -v "${ROOTDIR}"/v2:/usr/code \ + -e CGO_ENABLED=$(CGO_ENABLED) \ + -w /usr/code/ \ + $(GOIMAGE) \ + go test $(TESTOPTIONS) $(REPOPATH)/v2/connection + +# Single server tests +run-tests-single: run-tests-single-json run-tests-single-vpack run-tests-single-vst-1.0 $(VST11_SINGLE_TESTS) + +run-tests-single-json: run-tests-single-json-with-auth run-tests-single-json-no-auth run-tests-single-json-jwt-super run-tests-single-json-ssl + +run-tests-single-vpack: run-tests-single-vpack-with-auth run-tests-single-vpack-no-auth run-tests-single-vpack-ssl + +run-tests-single-vst-1.0: run-tests-single-vst-1.0-with-auth run-tests-single-vst-1.0-no-auth run-tests-single-vst-1.0-ssl + +run-tests-single-vst-1.1: run-tests-single-vst-1.1-with-auth run-tests-single-vst-1.1-jwt-auth run-tests-single-vst-1.1-no-auth run-tests-single-vst-1.1-ssl run-tests-single-vst-1.1-jwt-ssl + +run-tests-single-json-no-auth: + @echo "Single server, HTTP+JSON, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-vpack-no-auth: + @echo "Single server, HTTP+Velocypack, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-single-vst-1.0-no-auth: + @echo "Single server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-single-vst-1.1-no-auth: + @echo "Single server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-json-with-auth: + @echo "Single server, HTTP+JSON, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-json-http2-with-auth: + @echo "Single server, HTTP+JSON, with authentication" + @${MAKE} TEST_MODE="single" TAGS="http2" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-vpack-with-auth: + @echo "Single server, HTTP+Velocypack, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-single-vst-1.0-with-auth: + @echo "Single server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-single-vst-1.1-with-auth: + @echo "Single server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-vst-1.1-jwt-auth: + @echo "Single server, Velocystream 1.1, JWT authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="jwt" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-json-jwt-super: + @echo "Single server, HTTP+JSON, JWT super authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="jwtsuper" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-json-ssl: + @echo "Single server, HTTP+JSON, with authentication, SSL" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-vpack-ssl: + @echo "Single server, HTTP+Velocypack, with authentication, SSL" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-single-vst-1.0-ssl: + @echo "Single server, Velocystream 1.0, with authentication, SSL" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-single-vst-1.1-ssl: + @echo "Single server, Velocystream 1.1, with authentication, SSL" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-vst-1.1-jwt-ssl: + @echo "Single server, Velocystream 1.1, JWT authentication, SSL" + @${MAKE} TEST_MODE="single" TEST_AUTH="jwt" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# ResilientSingle server tests +run-tests-resilientsingle: run-tests-resilientsingle-json run-tests-resilientsingle-vpack run-tests-resilientsingle-vst-1.0 $(VST11_RESILIENTSINGLE_TESTS) + +run-tests-resilientsingle-json: run-tests-resilientsingle-json-with-auth run-tests-resilientsingle-json-no-auth + +run-tests-resilientsingle-vpack: run-tests-resilientsingle-vpack-with-auth run-tests-resilientsingle-vpack-no-auth + +run-tests-resilientsingle-vst-1.0: run-tests-resilientsingle-vst-1.0-with-auth run-tests-resilientsingle-vst-1.0-no-auth + +run-tests-resilientsingle-vst-1.1: run-tests-resilientsingle-vst-1.1-with-auth run-tests-resilientsingle-vst-1.1-jwt-auth run-tests-resilientsingle-vst-1.1-no-auth + +run-tests-resilientsingle-json-no-auth: + @echo "Resilient Single server, HTTP+JSON, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-resilientsingle-vpack-no-auth: + @echo "Resilient Single server, HTTP+Velocypack, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-resilientsingle-vst-1.0-no-auth: + @echo "Resilient Single server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-resilientsingle-vst-1.1-no-auth: + @echo "Resilient Single server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-resilientsingle-json-with-auth: + @echo "Resilient Single server, HTTP+JSON, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-resilientsingle-vpack-with-auth: + @echo "Resilient Single server, HTTP+Velocypack, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-resilientsingle-vst-1.0-with-auth: + @echo "Resilient Single server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-resilientsingle-vst-1.1-with-auth: + @echo "Resilient Single server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-resilientsingle-vst-1.1-jwt-auth: + @echo "Resilient Single server, Velocystream 1.1, JWT authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="jwt" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# Cluster mode tests +run-tests-cluster: run-tests-cluster-json run-tests-cluster-vpack run-tests-cluster-vst-1.0 $(VST11_CLUSTER_TESTS) + +run-tests-cluster-json: run-tests-cluster-json-no-auth run-tests-cluster-json-with-auth run-tests-cluster-json-ssl + +run-tests-cluster-vpack: run-tests-cluster-vpack-no-auth run-tests-cluster-vpack-with-auth run-tests-cluster-vpack-ssl + +run-tests-cluster-vst-1.0: run-tests-cluster-vst-1.0-no-auth run-tests-cluster-vst-1.0-with-auth run-tests-cluster-vst-1.0-ssl + +run-tests-cluster-vst-1.1: run-tests-cluster-vst-1.1-no-auth run-tests-cluster-vst-1.1-with-auth run-tests-cluster-vst-1.1-ssl + +run-tests-cluster-json-no-auth: + @echo "Cluster server, JSON, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-no-auth: + @echo "Cluster server, Velocypack, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-no-auth: + @echo "Cluster server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-no-auth: + @echo "Cluster server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-cluster-json-with-auth: + @echo "Cluster server, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-json-jwt-super: + @echo "Cluster server, HTTP+JSON, JWT super authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="jwtsuper" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-with-auth: + @echo "Cluster server, Velocypack, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-with-auth: + @echo "Cluster server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-with-auth: + @echo "Cluster server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-cluster-json-ssl: + @echo "Cluster server, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-ssl: + @echo "Cluster server, Velocypack, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-ssl: + @echo "Cluster server, Velocystream 1.0, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-ssl: + @echo "Cluster server, Velocystream 1.1, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# Internal test tasks +__run_tests: __test_debug__ __test_prepare __test_go_test __test_cleanup + +__test_go_test: + $(DOCKER_CMD) \ + --name=$(TESTCONTAINER) \ + $(TEST_NET) \ + -v "${ROOTDIR}":/usr/code ${TEST_RESOURCES_VOLUME} \ + -e TEST_ENDPOINTS=$(TEST_ENDPOINTS) \ + -e TEST_NOT_WAIT_UNTIL_READY=$(TEST_NOT_WAIT_UNTIL_READY) \ + -e TEST_AUTHENTICATION=$(TEST_AUTHENTICATION) \ + -e TEST_JWTSECRET=$(TEST_JWTSECRET) \ + -e TEST_CONNECTION=$(TEST_CONNECTION) \ + -e TEST_CVERSION=$(TEST_CVERSION) \ + -e TEST_CONTENT_TYPE=$(TEST_CONTENT_TYPE) \ + -e TEST_PPROF=$(TEST_PPROF) \ + -e TEST_MODE=$(TEST_MODE) \ + -e TEST_BACKUP_REMOTE_REPO=$(TEST_BACKUP_REMOTE_REPO) \ + -e TEST_BACKUP_REMOTE_CONFIG='$(TEST_BACKUP_REMOTE_CONFIG)' \ + -e TEST_DEBUG='$(TEST_DEBUG)' \ + -e TEST_ENABLE_SHUTDOWN=$(TEST_ENABLE_SHUTDOWN) \ + -e TEST_REQUEST_LOG=$(TEST_REQUEST_LOG) \ + -e TEST_DISALLOW_UNKNOWN_FIELDS=$(TEST_DISALLOW_UNKNOWN_FIELDS) \ + -e GODEBUG=tls13=1 \ + -e CGO_ENABLED=$(CGO_ENABLED) \ + -w /usr/code/ \ + $(DOCKER_RUN_CMD) && echo "success!" || \ + { echo "failure! \n\nARANGODB-STARTER logs:"; docker logs ${TESTCONTAINER}-s; \ + echo "\nARANGODB logs:"; docker ps -f name=${TESTCONTAINER}-s- -q | xargs -L 1 docker logs; exit 1; } + +# Internal test tasks +__run_v2_tests: __test_v2_debug__ __test_prepare __test_v2_go_test __test_cleanup + +__test_v2_go_test: + $(DOCKER_CMD) \ + --name=$(TESTCONTAINER) \ + $(TEST_NET) \ + -v "${ROOTDIR}":/usr/code:ro ${TEST_RESOURCES_VOLUME} \ + -e TEST_ENDPOINTS=$(TEST_ENDPOINTS) \ + -e TEST_NOT_WAIT_UNTIL_READY=$(TEST_NOT_WAIT_UNTIL_READY) \ + -e TEST_AUTHENTICATION=$(TEST_AUTHENTICATION) \ + -e TEST_JWTSECRET=$(TEST_JWTSECRET) \ + -e TEST_MODE=$(TEST_MODE) \ + -e TEST_BACKUP_REMOTE_REPO=$(TEST_BACKUP_REMOTE_REPO) \ + -e TEST_BACKUP_REMOTE_CONFIG='$(TEST_BACKUP_REMOTE_CONFIG)' \ + -e TEST_DEBUG='$(TEST_DEBUG)' \ + -e TEST_ENABLE_SHUTDOWN=$(TEST_ENABLE_SHUTDOWN) \ + -e GODEBUG=tls13=1 \ + -e CGO_ENABLED=$(CGO_ENABLED) \ + -w /usr/code/v2/ \ + $(DOCKER_V2_RUN_CMD) && echo "success!" || \ + { echo "failure! \n\nARANGODB-STARTER logs:"; docker logs ${TESTCONTAINER}-s; \ + echo "\nARANGODB logs:"; docker ps -f name=${TESTCONTAINER}-s- -q | xargs -L 1 docker logs; exit 1; } + +__test_debug__: +ifeq ("$(DEBUG)", "true") + @docker build -f Dockerfile.debug --build-arg "TESTS_DIRECTORY=./test" -t $(GOIMAGE) . +endif + +__test_v2_debug__: +ifeq ("$(DEBUG)", "true") + @docker build -f Dockerfile.debug --build-arg "TESTS_DIRECTORY=./tests" --build-arg "TESTS_ROOT_PATH=v2" -t $(GOIMAGE) . +endif + +__test_prepare: +ifdef TEST_ENDPOINTS_OVERRIDE + @-docker rm -f -v $(TESTCONTAINER) &> /dev/null + @sleep 3 +else +ifdef JWTSECRET + echo "$JWTSECRET" > "${JWTSECRETFILE}" +endif + @-docker rm -f -v $(TESTCONTAINER) &> /dev/null + @mkdir -p "${TMPDIR}" + @echo "${TMPDIR}" + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) ALPINE_IMAGE=$(ALPINE_IMAGE) ENABLE_BACKUP=$(ENABLE_BACKUP) ARANGO_LICENSE_KEY=$(ARANGO_LICENSE_KEY) STARTER=$(STARTER) STARTERMODE=$(TEST_MODE) TMPDIR="${TMPDIR}" DEBUG_PORT=$(DEBUG_PORT) $(CLUSTERENV) "${ROOTDIR}/test/cluster.sh" start +endif + +__test_cleanup: +ifdef TESTCONTAINER + @TESTCONTAINERS=$$(docker ps -a -q --filter="name=$(TESTCONTAINER)") + @if [ -n "$$TESTCONTAINERS" ]; then docker rm -f -v $$(docker ps -a -q --filter="name=$(TESTCONTAINER)"); fi +endif +ifndef TEST_ENDPOINTS_OVERRIDE + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) ALPINE_IMAGE=$(ALPINE_IMAGE) STARTER=$(STARTER) STARTERMODE=$(TEST_MODE) "${ROOTDIR}/test/cluster.sh" cleanup +else + @-docker rm -f -v $(TESTCONTAINER) &> /dev/null +endif + @sleep 3 + + +run-tests-cluster-failover: + # Note that we use 127.0.0.1:7001.. as endpoints, so we force using IPv4 + # This is essential since we only block IPv4 ports in the test. + @echo "Cluster server, failover, no authentication" + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) ALPINE_IMAGE=$(ALPINE_IMAGE) "${ROOTDIR}/test/cluster.sh" start + go get github.com/coreos/go-iptables/iptables + $(DOCKER_CMD) \ + --rm \ + $(TEST_NET) \ + --privileged \ + -v "${ROOTDIR}":/usr/code \ + -e TEST_ENDPOINTS=http://127.0.0.1:7001,http://127.0.0.1:7006,http://127.0.0.1:7011 \ + -e TEST_NOT_WAIT_UNTIL_READY=$(TEST_NOT_WAIT_UNTIL_READY) \ + -e TEST_AUTHENTICATION=basic:root: \ + -e GODEBUG=tls13=1 \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + go test -run ".*Failover.*" -tags failover $(TESTOPTIONS) $(REPOPATH)/test + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) ALPINE_IMAGE=$(ALPINE_IMAGE) "${ROOTDIR}/test/cluster.sh" cleanup + +run-tests-cluster-cleanup: + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) ALPINE_IMAGE=$(ALPINE_IMAGE) "${ROOTDIR}/test/cluster.sh" cleanup + +# Benchmarks +run-benchmarks-single-json-no-auth: + @echo "Benchmarks: Single server, JSON no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="json" TEST_BENCHMARK="true" __run_tests + +run-benchmarks-single-vpack-no-auth: + @echo "Benchmarks: Single server, Velocypack, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" TEST_BENCHMARK="true" __run_tests + +## Lint + +.PHONY: tools +tools: + @echo ">> Fetching golangci-lint linter" + @GOBIN=$(TMPDIR)/bin go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2 + @echo ">> Fetching goimports" + @go get -mod 'readonly' golang.org/x/tools/cmd/goimports + @echo ">> Fetching license check" + @go get -mod 'readonly' github.com/google/addlicense + +.PHONY: license +license: + @echo ">> Ensuring license of files" + @go run -mod 'readonly' github.com/google/addlicense -f "$(ROOTDIR)/HEADER" $(SOURCES) + +.PHONY: license-verify +license-verify: + @echo ">> Verify license of files" + @go run -mod 'readonly' github.com/google/addlicense -f "$(ROOTDIR)/HEADER" -check $(SOURCES) + +.PHONY: fmt +fmt: + @echo ">> Ensuring style of files" + @go run -mod 'readonly' golang.org/x/tools/cmd/goimports -w $(SOURCES) + +.PHONY: fmt-verify +fmt-verify: license-verify + @echo ">> Verify files style" + @if [ X"$$(go run -mod 'readonly' golang.org/x/tools/cmd/goimports -l $(SOURCES) | wc -l)" != X"0" ]; then echo ">> Style errors"; go run -mod 'readonly' golang.org/x/tools/cmd/goimports -l $(SOURCES); exit 1; fi + +.PHONY: linter +linter: fmt + $(TMPDIR)/bin/golangci-lint run ./... + +# V2 + +v2-%: + @(cd "$(ROOTDIR)/v2"; make) + +run-v2-tests: run-v2-tests-single run-v2-tests-cluster run-v2-tests-resilientsingle + +run-v2-tests-cluster: run-v2-tests-cluster-with-basic-auth run-v2-tests-cluster-without-ssl run-v2-tests-cluster-without-auth run-v2-tests-cluster-with-jwt-auth + +run-v2-tests-cluster-with-basic-auth: + @echo "Cluster server, with basic authentication, v2" + @${MAKE} TEST_MODE="cluster" TEST_SSL="auto" TEST_AUTH="rootpw" __run_v2_tests + +run-v2-tests-cluster-with-jwt-auth: + @echo "Cluster server, with JWT authentication, v2" + @${MAKE} TEST_MODE="cluster" TEST_SSL="auto" TEST_AUTH="jwt" __run_v2_tests + +run-v2-tests-cluster-without-auth: + @echo "Cluster server, without authentication, v2" + @${MAKE} TEST_MODE="cluster" TEST_SSL="auto" TEST_AUTH="none" __run_v2_tests + +run-v2-tests-cluster-without-ssl: + @echo "Cluster server, without authentication and SSL, v2" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" __run_v2_tests + +run-v2-tests-single: run-v2-tests-single-without-auth run-v2-tests-single-with-auth + +run-v2-tests-single-without-auth: + @echo "Single server, without authentication, v2" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" __run_v2_tests + +run-v2-tests-single-with-auth: + @echo "Single server, with authentication, v2" + @${MAKE} TEST_MODE="single" TEST_SSL="auto" TEST_AUTH="rootpw" __run_v2_tests + +run-v2-tests-resilientsingle: run-v2-tests-resilientsingle-with-auth + +run-v2-tests-resilientsingle-with-auth: + @echo "Resilient Single, with authentication, v2" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" __run_v2_tests \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-driver/README.md b/vendor/github.com/arangodb/go-driver/README.md new file mode 100644 index 00000000000..ab14ae11d9f --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/README.md @@ -0,0 +1,37 @@ +![ArangoDB-Logo](https://www.arangodb.com/docs/assets/arangodb_logo_2016_inverted.png) + +# ArangoDB GO Driver + +This project contains the official Go driver for the [ArangoDB database](https://arangodb.com). + +[![Build Status](https://travis-ci.org/arangodb/go-driver.svg?branch=master)](https://travis-ci.org/arangodb/go-driver) +[![GoDoc](https://godoc.org/github.com/arangodb/go-driver?status.svg)](http://godoc.org/github.com/arangodb/go-driver) + + +- [Getting Started](https://www.arangodb.com/docs/stable/drivers/go-getting-started.html) +- [Example Requests](https://www.arangodb.com/docs/stable/drivers/go-example-requests.html) +- [Connection Management](https://www.arangodb.com/docs/stable/drivers/go-connection-management.html) +- [Reference](https://godoc.org/github.com/arangodb/go-driver) + +# Supported Go Versions + +| | Go 1.13 | Go 1.14 | Go 1.16 | +|---------------|---------|---------|---------| +| `1.0.0-1.3.0` | ✓ | ✓ | ✓ | +| `master` | ✓ | ✓ | ✓ | + +# Supported Versions + +| | < ArangoDB 3.6 | ArangoDB 3.6 | ArangoDB 3.7 | ArangoDB 3.8 | ArangoDB 3.9 | +|----------|----------------|--------------|--------------|--------------|--------------| +| `1.0.0` | ✓ | ✓ | - | - | - | +| `1.1.0` | + | + | ✓ | - | - | +| `1.2.1` | + | + | ✓ | ✓ | - | +| `1.3.0` | + | + | ✓ | ✓ | ✓ | +| `master` | + | + | + | + | + | + +Key: + +* `✓` Exactly the same features in both driver and the ArangoDB version. +* `+` Features included in driver may be not present in the ArangoDB API. Calls to the ArangoDB may result in unexpected responses (404). +* `-` The ArangoDB has features which are not supported by driver. diff --git a/vendor/github.com/arangodb/go-driver/VERSION b/vendor/github.com/arangodb/go-driver/VERSION new file mode 100644 index 00000000000..6085e946503 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/VERSION @@ -0,0 +1 @@ +1.2.1 diff --git a/vendor/github.com/arangodb/go-driver/authentication.go b/vendor/github.com/arangodb/go-driver/authentication.go new file mode 100644 index 00000000000..44c379b1166 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/authentication.go @@ -0,0 +1,114 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +type AuthenticationType int + +const ( + // AuthenticationTypeBasic uses username+password basic authentication + AuthenticationTypeBasic AuthenticationType = iota + // AuthenticationTypeJWT uses username+password JWT token based authentication + AuthenticationTypeJWT + // AuthenticationTypeRaw uses a raw value for the Authorization header + AuthenticationTypeRaw +) + +// Authentication implements a kind of authentication. +type Authentication interface { + // Returns the type of authentication + Type() AuthenticationType + // Get returns a configuration property of the authentication. + // Supported properties depend on type of authentication. + Get(property string) string +} + +// BasicAuthentication creates an authentication implementation based on the given username & password. +func BasicAuthentication(userName, password string) Authentication { + return &userNameAuthentication{ + authType: AuthenticationTypeBasic, + userName: userName, + password: password, + } +} + +// JWTAuthentication creates a JWT token authentication implementation based on the given username & password. +func JWTAuthentication(userName, password string) Authentication { + return &userNameAuthentication{ + authType: AuthenticationTypeJWT, + userName: userName, + password: password, + } +} + +// basicAuthentication implements HTTP Basic authentication. +type userNameAuthentication struct { + authType AuthenticationType + userName string + password string +} + +// Returns the type of authentication +func (a *userNameAuthentication) Type() AuthenticationType { + return a.authType +} + +// Get returns a configuration property of the authentication. +// Supported properties depend on type of authentication. +func (a *userNameAuthentication) Get(property string) string { + switch property { + case "username": + return a.userName + case "password": + return a.password + default: + return "" + } +} + +// RawAuthentication creates a raw authentication implementation based on the given value for the Authorization header. +func RawAuthentication(value string) Authentication { + return &rawAuthentication{ + value: value, + } +} + +// rawAuthentication implements Raw authentication. +type rawAuthentication struct { + value string +} + +// Returns the type of authentication +func (a *rawAuthentication) Type() AuthenticationType { + return AuthenticationTypeRaw +} + +// Get returns a configuration property of the authentication. +// Supported properties depend on type of authentication. +func (a *rawAuthentication) Get(property string) string { + switch property { + case "value": + return a.value + default: + return "" + } +} diff --git a/vendor/github.com/arangodb/go-driver/client.go b/vendor/github.com/arangodb/go-driver/client.go new file mode 100644 index 00000000000..49a5ab8c618 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client.go @@ -0,0 +1,124 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "sort" + "strings" + "time" +) + +// Client provides access to a single ArangoDB database server, or an entire cluster of ArangoDB servers. +type Client interface { + // SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the + // connection to use those endpoints. + // When this client is connected to a single server, nothing happens. + // When this client is connected to a cluster of servers, the connection will be updated to reflect + // the layout of the cluster. + // This function requires ArangoDB 3.1.15 or up. + SynchronizeEndpoints(ctx context.Context) error + + // SynchronizeEndpoints2 fetches all endpoints from an ArangoDB cluster and updates the + // connection to use those endpoints. + // When this client is connected to a single server, nothing happens. + // When this client is connected to a cluster of servers, the connection will be updated to reflect + // the layout of the cluster. + // Compared to SynchronizeEndpoints, this function expects a database name as additional parameter. + // This database name is used to call `_db//_api/cluster/endpoints`. SynchronizeEndpoints uses + // the default database, i.e. `_system`. In the case the user does not have access to `_system`, + // SynchronizeEndpoints does not work with earlier versions of arangodb. + SynchronizeEndpoints2(ctx context.Context, dbname string) error + + // Connection returns the connection used by this client + Connection() Connection + + // Database functions + ClientDatabases + + // User functions + ClientUsers + + // Cluster functions + ClientCluster + + // Individual server information functions + ClientServerInfo + + // Server/cluster administration functions + ClientServerAdmin + + // Replication functions + ClientReplication + + // Backup functions + ClientAdminBackup + + ClientFoxx +} + +// ClientConfig contains all settings needed to create a client. +type ClientConfig struct { + // Connection is the actual server/cluster connection. + // See http.NewConnection. + Connection Connection + // Authentication implements authentication on the server. + Authentication Authentication + // SynchronizeEndpointsInterval is the interval between automatic synchronization of endpoints. + // If this value is 0, no automatic synchronization is performed. + // If this value is > 0, automatic synchronization is started on a go routine. + // This feature requires ArangoDB 3.1.15 or up. + SynchronizeEndpointsInterval time.Duration +} + +// VersionInfo describes the version of a database server. +type VersionInfo struct { + // This will always contain "arango" + Server string `json:"server,omitempty"` + // The server version string. The string has the format "major.minor.sub". + // Major and minor will be numeric, and sub may contain a number or a textual version. + Version Version `json:"version,omitempty"` + // Type of license of the server + License string `json:"license,omitempty"` + // Optional additional details. This is returned only if the context is configured using WithDetails. + Details map[string]interface{} `json:"details,omitempty"` +} + +func (v *VersionInfo) IsEnterprise() bool { + return v.License == "enterprise" +} + +// String creates a string representation of the given VersionInfo. +func (v VersionInfo) String() string { + result := fmt.Sprintf("%s, version %s, license %s", v.Server, v.Version, v.License) + if len(v.Details) > 0 { + lines := make([]string, 0, len(v.Details)) + for k, v := range v.Details { + lines = append(lines, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(lines) + result = result + "\n" + strings.Join(lines, "\n") + } + return result +} diff --git a/vendor/github.com/arangodb/go-driver/client_admin_backup.go b/vendor/github.com/arangodb/go-driver/client_admin_backup.go new file mode 100644 index 00000000000..0503210fabd --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_admin_backup.go @@ -0,0 +1,149 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import ( + "context" + "time" +) + +// BackupMeta provides meta data of a backup +type BackupMeta struct { + ID BackupID `json:"id,omitempty"` + Version string `json:"version,omitempty"` + DateTime time.Time `json:"datetime,omitempty"` + NumberOfFiles uint `json:"nrFiles,omitempty"` + NumberOfDBServers uint `json:"nrDBServers,omitempty"` + SizeInBytes uint64 `json:"sizeInBytes,omitempty"` + PotentiallyInconsistent bool `json:"potentiallyInconsistent,omitempty"` + Available bool `json:"available,omitempty"` + NumberOfPiecesPresent uint `json:"nrPiecesPresent,omitempty"` + Keys []BackupMetaSha256 `json:"keys,omitempty"` +} + +// BackupMetaSha256 backup sha details +type BackupMetaSha256 struct { + SHA256 string `json:"sha256"` +} + +// BackupRestoreOptions provides options for Restore +type BackupRestoreOptions struct { + // do not version check when doing a restore (expert only) + IgnoreVersion bool `json:"ignoreVersion,omitempty"` +} + +// BackupListOptions provides options for List +type BackupListOptions struct { + // Only receive meta data about a specific id + ID BackupID `json:"id,omitempty"` +} + +// BackupCreateOptions provides options for Create +type BackupCreateOptions struct { + Label string `json:"label,omitempty"` + AllowInconsistent bool `json:"allowInconsistent,omitempty"` + Timeout time.Duration `json:"timeout,omitempty"` +} + +// BackupTransferStatus represents all possible states a transfer job can be in +type BackupTransferStatus string + +const ( + TransferAcknowledged BackupTransferStatus = "ACK" + TransferStarted BackupTransferStatus = "STARTED" + TransferCompleted BackupTransferStatus = "COMPLETED" + TransferFailed BackupTransferStatus = "FAILED" + TransferCancelled BackupTransferStatus = "CANCELLED" +) + +// BackupTransferReport provides progress information of a backup transfer job for a single dbserver +type BackupTransferReport struct { + Status BackupTransferStatus `json:"Status,omitempty"` + Error int `json:"Error,omitempty"` + ErrorMessage string `json:"ErrorMessage,omitempty"` + Progress struct { + Total int `json:"Total,omitempty"` + Done int `json:"Done,omitempty"` + Timestamp string `json:"Timestamp,omitempty"` + } `json:"Progress,omitempty"` +} + +// BackupTransferProgressReport provides progress information for a backup transfer job +type BackupTransferProgressReport struct { + BackupID BackupID `json:"BackupID,omitempty"` + Cancelled bool `json:"Cancelled,omitempty"` + Timestamp string `json:"Timestamp,omitempty"` + DBServers map[string]BackupTransferReport `json:"DBServers,omitempty"` +} + +// BackupTransferJobID represents a Transfer (upload/download) job +type BackupTransferJobID string + +// BackupID identifies a backup +type BackupID string + +// ClientAdminBackup provides access to the Backup API via the Client interface +type ClientAdminBackup interface { + Backup() ClientBackup +} + +// BackupCreateResponse contains information about a newly created backup +type BackupCreateResponse struct { + NumberOfFiles uint + NumberOfDBServers uint + SizeInBytes uint64 + PotentiallyInconsistent bool + CreationTime time.Time +} + +// ClientBackup provides access to server/cluster backup functions of an arangodb database server +// or an entire cluster of arangodb servers. +type ClientBackup interface { + // Create creates a new backup and returns its id + Create(ctx context.Context, opt *BackupCreateOptions) (BackupID, BackupCreateResponse, error) + + // Delete deletes the backup with given id + Delete(ctx context.Context, id BackupID) error + + // Restore restores the backup with given id + Restore(ctx context.Context, id BackupID, opt *BackupRestoreOptions) error + + // List returns meta data about some/all backups available + List(ctx context.Context, opt *BackupListOptions) (map[BackupID]BackupMeta, error) + + // only enterprise version + + // Upload triggers an upload to the remote repository of backup with id using the given config + // and returns the job id. + Upload(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) + + // Download triggers an download to the remote repository of backup with id using the given config + // and returns the job id. + Download(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) + + // Progress returns the progress state of the given Transfer job + Progress(ctx context.Context, job BackupTransferJobID) (BackupTransferProgressReport, error) + + // Abort aborts the Transfer job if possible + Abort(ctx context.Context, job BackupTransferJobID) error +} diff --git a/vendor/github.com/arangodb/go-driver/client_admin_backup_impl.go b/vendor/github.com/arangodb/go-driver/client_admin_backup_impl.go new file mode 100644 index 00000000000..956d474e932 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_admin_backup_impl.go @@ -0,0 +1,305 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import ( + "context" + "time" +) + +type clientBackup struct { + conn Connection +} + +func (c *client) Backup() ClientBackup { + return &clientBackup{ + conn: c.conn, + } +} + +// Create creates a new backup and returns its id +func (c *clientBackup) Create(ctx context.Context, opt *BackupCreateOptions) (BackupID, BackupCreateResponse, error) { + req, err := c.conn.NewRequest("POST", "_admin/backup/create") + if err != nil { + return "", BackupCreateResponse{}, WithStack(err) + } + applyContextSettings(ctx, req) + if opt != nil { + body := struct { + Label string `json:"label,omitempty"` + AllowInconsistent bool `json:"allowInconsistent,omitempty"` + Timeout float64 `json:"timeout,omitempty"` + }{ + Label: opt.Label, + AllowInconsistent: opt.AllowInconsistent, + Timeout: opt.Timeout.Seconds(), + } + req, err = req.SetBody(body) + if err != nil { + return "", BackupCreateResponse{}, WithStack(err) + } + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", BackupCreateResponse{}, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return "", BackupCreateResponse{}, WithStack(err) + } + var result struct { + ID BackupID `json:"id,omitempty"` + PotentiallyInconsistent bool `json:"potentiallyInconsistent,omitempty"` + NumberOfFiles uint `json:"nrFiles,omitempty"` + NumberOfDBServers uint `json:"nrDBServers,omitempty"` + SizeInBytes uint64 `json:"sizeInBytes,omitempty"` + CreationTime time.Time `json:"datetime,omitempty"` + } + if err := resp.ParseBody("result", &result); err != nil { + return "", BackupCreateResponse{}, WithStack(err) + } + return result.ID, BackupCreateResponse{ + PotentiallyInconsistent: result.PotentiallyInconsistent, + NumberOfFiles: result.NumberOfFiles, + NumberOfDBServers: result.NumberOfDBServers, + SizeInBytes: result.SizeInBytes, + CreationTime: result.CreationTime, + }, nil +} + +// Delete deletes the backup with given id +func (c *clientBackup) Delete(ctx context.Context, id BackupID) error { + req, err := c.conn.NewRequest("POST", "_admin/backup/delete") + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupID `json:"id,omitempty"` + }{ + ID: id, + } + req, err = req.SetBody(body) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Restore restores the backup with given id +func (c *clientBackup) Restore(ctx context.Context, id BackupID, opt *BackupRestoreOptions) error { + req, err := c.conn.NewRequest("POST", "_admin/backup/restore") + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupID `json:"id,omitempty"` + IgnoreVersion bool `json:"ignoreVersion,omitempty"` + }{ + ID: id, + } + if opt != nil { + body.IgnoreVersion = opt.IgnoreVersion + } + req, err = req.SetBody(body) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + // THIS SHOULD BE 202 ACCEPTED and not OK, because it is not completed when returns (at least for single server) + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// List returns meta data about some/all backups available +func (c *clientBackup) List(ctx context.Context, opt *BackupListOptions) (map[BackupID]BackupMeta, error) { + req, err := c.conn.NewRequest("POST", "_admin/backup/list") + if err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + if opt != nil { + req, err = req.SetBody(opt) + if err != nil { + return nil, WithStack(err) + } + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var result struct { + List map[BackupID]BackupMeta `json:"list,omitempty"` + } + if err := resp.ParseBody("result", &result); err != nil { + return nil, WithStack(err) + } + return result.List, nil +} + +// Upload triggers an upload to the remote repository of backup with id using the given config +// and returns the job id. +func (c *clientBackup) Upload(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) { + req, err := c.conn.NewRequest("POST", "_admin/backup/upload") + if err != nil { + return "", WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupID `json:"id,omitempty"` + RemoteRepo string `json:"remoteRepository,omitempty"` + Config interface{} `json:"config,omitempty"` + }{ + ID: id, + RemoteRepo: remoteRepository, + Config: config, + } + req, err = req.SetBody(body) + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return "", WithStack(err) + } + var result struct { + UploadID BackupTransferJobID `json:"uploadId,omitempty"` + } + if err := resp.ParseBody("result", &result); err != nil { + return "", WithStack(err) + } + return result.UploadID, nil +} + +// Download triggers an download to the remote repository of backup with id using the given config +// and returns the job id. +func (c *clientBackup) Download(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) { + req, err := c.conn.NewRequest("POST", "_admin/backup/download") + if err != nil { + return "", WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupID `json:"id,omitempty"` + RemoteRepo string `json:"remoteRepository,omitempty"` + Config interface{} `json:"config,omitempty"` + }{ + ID: id, + RemoteRepo: remoteRepository, + Config: config, + } + + req, err = req.SetBody(body) + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return "", WithStack(err) + } + var result struct { + DownloadID BackupTransferJobID `json:"downloadId,omitempty"` + } + if err := resp.ParseBody("result", &result); err != nil { + return "", WithStack(err) + } + return result.DownloadID, nil +} + +// Progress returns the progress state of the given Transfer job +func (c *clientBackup) Progress(ctx context.Context, job BackupTransferJobID) (result BackupTransferProgressReport, error error) { + req, err := c.conn.NewRequest("POST", "_admin/backup/upload") + if err != nil { + return BackupTransferProgressReport{}, WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupTransferJobID `json:"uploadId,omitempty"` + }{ + ID: job, + } + req, err = req.SetBody(body) + if err != nil { + return BackupTransferProgressReport{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return BackupTransferProgressReport{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return BackupTransferProgressReport{}, WithStack(err) + } + if err := resp.ParseBody("result", &result); err != nil { + return BackupTransferProgressReport{}, WithStack(err) + } + return result, nil +} + +// Abort aborts the Transfer job if possible +func (c *clientBackup) Abort(ctx context.Context, job BackupTransferJobID) error { + req, err := c.conn.NewRequest("POST", "_admin/backup/upload") + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + body := struct { + ID BackupTransferJobID `json:"uploadId,omitempty"` + Abort bool `json:"abort,omitempty"` + }{ + ID: job, + Abort: true, + } + req, err = req.SetBody(body) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_cluster.go b/vendor/github.com/arangodb/go-driver/client_cluster.go new file mode 100644 index 00000000000..a6c879d2cc3 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_cluster.go @@ -0,0 +1,33 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientCluster provides methods needed to access cluster functionality from a client. +type ClientCluster interface { + // Cluster provides access to cluster wide specific operations. + // To use this interface, an ArangoDB cluster is required. + // If this method is a called without a cluster, a PreconditionFailed error is returned. + Cluster(ctx context.Context) (Cluster, error) +} diff --git a/vendor/github.com/arangodb/go-driver/client_cluster_impl.go b/vendor/github.com/arangodb/go-driver/client_cluster_impl.go new file mode 100644 index 00000000000..5ac0ee62a7a --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_cluster_impl.go @@ -0,0 +1,46 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// Cluster provides access to cluster wide specific operations. +// To use this interface, an ArangoDB cluster is required. +// If this method is a called without a cluster, a PreconditionFailed error is returned. +func (c *client) Cluster(ctx context.Context) (Cluster, error) { + role, err := c.ServerRole(ctx) + if err != nil { + return nil, WithStack(err) + } + if role == ServerRoleSingle || role == ServerRoleSingleActive || role == ServerRoleSinglePassive { + // Standalone server, this is wrong + return nil, WithStack(newArangoError(412, 0, "Cluster expected, found SINGLE server")) + } + cl, err := newCluster(c.conn) + if err != nil { + return nil, WithStack(err) + } + return cl, nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_databases.go b/vendor/github.com/arangodb/go-driver/client_databases.go new file mode 100644 index 00000000000..715d5ddfd98 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_databases.go @@ -0,0 +1,85 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +type DatabaseSharding string + +const ( + DatabaseShardingSingle DatabaseSharding = "single" + DatabaseShardingNone DatabaseSharding = "" +) + +// ClientDatabases provides access to the databases in a single arangodb database server, or an entire cluster of arangodb servers. +type ClientDatabases interface { + // Database opens a connection to an existing database. + // If no database with given name exists, an NotFoundError is returned. + Database(ctx context.Context, name string) (Database, error) + + // DatabaseExists returns true if a database with given name exists. + DatabaseExists(ctx context.Context, name string) (bool, error) + + // Databases returns a list of all databases found by the client. + Databases(ctx context.Context) ([]Database, error) + + // AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. + AccessibleDatabases(ctx context.Context) ([]Database, error) + + // CreateDatabase creates a new database with given name and opens a connection to it. + // If the a database with given name already exists, a DuplicateError is returned. + CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) +} + +// CreateDatabaseOptions contains options that customize the creating of a database. +type CreateDatabaseOptions struct { + // List of users to initially create for the new database. User information will not be changed for users that already exist. + // If users is not specified or does not contain any users, a default user root will be created with an empty string password. + // This ensures that the new database will be accessible after it is created. + Users []CreateDatabaseUserOptions `json:"users,omitempty"` + + // Options database defaults + Options CreateDatabaseDefaultOptions `json:"options,omitempty"` +} + +// CreateDatabaseDefaultOptions contains options that change defaults for collections +type CreateDatabaseDefaultOptions struct { + // Default replication factor for collections in database + ReplicationFactor int `json:"replicationFactor,omitempty"` + // Default write concern for collections in database + WriteConcern int `json:"writeConcern,omitempty"` + // Default sharding for collections in database + Sharding DatabaseSharding `json:"sharding,omitempty"` +} + +// CreateDatabaseUserOptions contains options for creating a single user for a database. +type CreateDatabaseUserOptions struct { + // Loginname of the user to be created + UserName string `json:"user,omitempty"` + // The user password as a string. If not specified, it will default to an empty string. + Password string `json:"passwd,omitempty"` + // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. + Active *bool `json:"active,omitempty"` + // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. + Extra interface{} `json:"extra,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/client_databases_impl.go b/vendor/github.com/arangodb/go-driver/client_databases_impl.go new file mode 100644 index 00000000000..27d2b04b6ce --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_databases_impl.go @@ -0,0 +1,154 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Database opens a connection to an existing database. +// If no database with given name exists, an NotFoundError is returned. +func (c *client) Database(ctx context.Context, name string) (Database, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_db", escapedName, "_api/database/current")) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + db, err := newDatabase(name, c.conn) + if err != nil { + return nil, WithStack(err) + } + return db, nil +} + +// DatabaseExists returns true if a database with given name exists. +func (c *client) DatabaseExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_db", escapedName, "_api/database/current")) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getDatabaseResponse struct { + Result []string `json:"result,omitempty"` + ArangoError +} + +// Databases returns a list of all databases found by the client. +func (c *client) Databases(ctx context.Context) ([]Database, error) { + result, err := listDatabases(ctx, c.conn, path.Join("/_db/_system/_api/database")) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. +func (c *client) AccessibleDatabases(ctx context.Context) ([]Database, error) { + result, err := listDatabases(ctx, c.conn, path.Join("/_db/_system/_api/database/user")) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// listDatabases returns a list of databases using a GET to the given path. +func listDatabases(ctx context.Context, conn Connection, path string) ([]Database, error) { + req, err := conn.NewRequest("GET", path) + if err != nil { + return nil, WithStack(err) + } + resp, err := conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getDatabaseResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Database, 0, len(data.Result)) + for _, name := range data.Result { + db, err := newDatabase(name, conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, db) + } + return result, nil +} + +// CreateDatabase creates a new database with given name and opens a connection to it. +// If the a database with given name already exists, a DuplicateError is returned. +func (c *client) CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) { + input := struct { + CreateDatabaseOptions + Name string `json:"name"` + }{ + Name: name, + } + if options != nil { + input.CreateDatabaseOptions = *options + } + req, err := c.conn.NewRequest("POST", path.Join("_db/_system/_api/database")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + db, err := newDatabase(name, c.conn) + if err != nil { + return nil, WithStack(err) + } + return db, nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_foxx.go b/vendor/github.com/arangodb/go-driver/client_foxx.go new file mode 100644 index 00000000000..2201111bba4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_foxx.go @@ -0,0 +1,47 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Tomasz Mielech +// + +package driver + +import ( + "context" +) + +type FoxxCreateOptions struct { + Mount string +} + +type FoxxDeleteOptions struct { + Mount string + Teardown bool +} + +type ClientFoxx interface { + Foxx() FoxxService +} + +type FoxxService interface { + // InstallFoxxService installs a new service at a given mount path. + InstallFoxxService(ctx context.Context, zipFile string, options FoxxCreateOptions) error + // UninstallFoxxService uninstalls service at a given mount path. + UninstallFoxxService(ctx context.Context, options FoxxDeleteOptions) error +} diff --git a/vendor/github.com/arangodb/go-driver/client_foxx_impl.go b/vendor/github.com/arangodb/go-driver/client_foxx_impl.go new file mode 100644 index 00000000000..3868731366a --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_foxx_impl.go @@ -0,0 +1,28 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Tomasz Mielech +// + +package driver + +// Foxx provides access to foxx services specific operations. +func (c *client) Foxx() FoxxService { + return c +} diff --git a/vendor/github.com/arangodb/go-driver/client_impl.go b/vendor/github.com/arangodb/go-driver/client_impl.go new file mode 100644 index 00000000000..ef71d39f46b --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_impl.go @@ -0,0 +1,153 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" + "time" + + "github.com/arangodb/go-driver/util" +) + +// NewClient creates a new Client based on the given config setting. +func NewClient(config ClientConfig) (Client, error) { + if config.Connection == nil { + return nil, WithStack(InvalidArgumentError{Message: "Connection is not set"}) + } + conn := config.Connection + if config.Authentication != nil { + var err error + conn, err = conn.SetAuthentication(config.Authentication) + if err != nil { + return nil, WithStack(err) + } + } + c := &client{ + conn: conn, + } + if config.SynchronizeEndpointsInterval > 0 { + go c.autoSynchronizeEndpoints(config.SynchronizeEndpointsInterval) + } + return c, nil +} + +// client implements the Client interface. +type client struct { + conn Connection +} + +// Connection returns the connection used by this client +func (c *client) Connection() Connection { + return c.conn +} + +// SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the +// connection to use those endpoints. +// When this client is connected to a single server, nothing happens. +// When this client is connected to a cluster of servers, the connection will be updated to reflect +// the layout of the cluster. +func (c *client) SynchronizeEndpoints(ctx context.Context) error { + return c.SynchronizeEndpoints2(ctx, "") +} + +// SynchronizeEndpoints2 fetches all endpoints from an ArangoDB cluster and updates the +// connection to use those endpoints. +// When this client is connected to a single server, nothing happens. +// When this client is connected to a cluster of servers, the connection will be updated to reflect +// the layout of the cluster. +// Compared to SynchronizeEndpoints, this function expects a database name as additional parameter. +// This database name is used to call `_db//_api/cluster/endpoints`. SynchronizeEndpoints uses +// the default database, i.e. `_system`. In the case the user does not have access to `_system`, +// SynchronizeEndpoints does not work with earlier versions of arangodb. +func (c *client) SynchronizeEndpoints2(ctx context.Context, dbname string) error { + // Cluster mode, fetch endpoints + cep, err := c.clusterEndpoints(ctx, dbname) + if err != nil { + // ignore Forbidden: automatic failover is not enabled errors + if !IsArangoErrorWithErrorNum(err, ErrHttpForbidden, ErrHttpInternal, 0, ErrNotImplemented, ErrForbidden) { + // 3.2 returns no error code, thus check for 0 + // 501 with ErrorNum 9 is in there since 3.7, earlier versions returned 403 and ErrorNum 11. + return WithStack(err) + } + + return nil + } + var endpoints []string + for _, ep := range cep.Endpoints { + endpoints = append(endpoints, util.FixupEndpointURLScheme(ep.Endpoint)) + } + + // Update connection + if err := c.conn.UpdateEndpoints(endpoints); err != nil { + return WithStack(err) + } + + return nil +} + +// autoSynchronizeEndpoints performs automatic endpoint synchronization. +func (c *client) autoSynchronizeEndpoints(interval time.Duration) { + for { + // SynchronizeEndpoints endpoints + c.SynchronizeEndpoints(nil) + + // Wait a bit + time.Sleep(interval) + } +} + +type clusterEndpointsResponse struct { + Endpoints []clusterEndpoint `json:"endpoints,omitempty"` +} + +type clusterEndpoint struct { + Endpoint string `json:"endpoint,omitempty"` +} + +// clusterEndpoints returns the endpoints of a cluster. +func (c *client) clusterEndpoints(ctx context.Context, dbname string) (clusterEndpointsResponse, error) { + var url string + if dbname == "" { + url = "_api/cluster/endpoints" + } else { + url = path.Join("_db", pathEscape(dbname), "_api/cluster/endpoints") + } + req, err := c.conn.NewRequest("GET", url) + if err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + var data clusterEndpointsResponse + if err := resp.ParseBody("", &data); err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + return data, nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_replication.go b/vendor/github.com/arangodb/go-driver/client_replication.go new file mode 100644 index 00000000000..bf6e7fea5bb --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_replication.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// ClientReplication provides methods needed to access replication functionality from a client. +type ClientReplication interface { + // Replication provides access to replication specific operations. + Replication() Replication +} diff --git a/vendor/github.com/arangodb/go-driver/client_replication_impl.go b/vendor/github.com/arangodb/go-driver/client_replication_impl.go new file mode 100644 index 00000000000..03db2a4ad67 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_replication_impl.go @@ -0,0 +1,28 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// Replication provides access to replication specific operations. +func (c *client) Replication() Replication { + return c +} diff --git a/vendor/github.com/arangodb/go-driver/client_server_admin.go b/vendor/github.com/arangodb/go-driver/client_server_admin.go new file mode 100644 index 00000000000..bd2d70ee042 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_server_admin.go @@ -0,0 +1,204 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientServerAdmin provides access to server administrations functions of an arangodb database server +// or an entire cluster of arangodb servers. +type ClientServerAdmin interface { + // ServerMode returns the current mode in which the server/cluster is operating. + // This call needs ArangoDB 3.3 and up. + ServerMode(ctx context.Context) (ServerMode, error) + // SetServerMode changes the current mode in which the server/cluster is operating. + // This call needs a client that uses JWT authentication. + // This call needs ArangoDB 3.3 and up. + SetServerMode(ctx context.Context, mode ServerMode) error + + // Shutdown a specific server, optionally removing it from its cluster. + Shutdown(ctx context.Context, removeFromCluster bool) error + + // Metrics returns the metrics of the server in Prometheus format. + // List of metrics: https://www.arangodb.com/docs/devel/http/administration-and-monitoring-metrics.html + // You can parse it using Prometheus client: + /* + var parser expfmt.TextParser + metricsProm, err := parser.TextToMetricFamilies(strings.NewReader(string(metrics))) + */ + Metrics(ctx context.Context) ([]byte, error) + + // MetricsForSingleServer returns the metrics of the specific server in Prometheus format. + // This parameter 'serverID' is only meaningful on Coordinators. + // List of metrics: https://www.arangodb.com/docs/devel/http/administration-and-monitoring-metrics.html + // You can parse it using Prometheus client: + /* + var parser expfmt.TextParser + metricsProm, err := parser.TextToMetricFamilies(strings.NewReader(string(metrics))) + */ + MetricsForSingleServer(ctx context.Context, serverID string) ([]byte, error) + + // Deprecated: Use Metrics instead. + // Statistics queries statistics from a specific server + Statistics(ctx context.Context) (ServerStatistics, error) + + // ShutdownV2 shuts down a specific coordinator, optionally removing it from the cluster with a graceful manner. + ShutdownV2(ctx context.Context, removeFromCluster, graceful bool) error + + // ShutdownInfoV2 queries information about shutdown progress. + ShutdownInfoV2(ctx context.Context) (ShutdownInfo, error) + + // Logs retrieve logs from server in ArangoDB 3.8.0+ format + Logs(ctx context.Context) (ServerLogs, error) +} + +type ServerLogs struct { + Total int `json:"total"` + Messages []ServerLogMessage `json:"messages,omitempty"` +} + +type ServerLogMessage struct { + ID int `json:"id"` + Topic string `json:"topic"` + Level string `json:"level"` + Date string `json:"date"` + Message string `json:"message"` +} + +type ServerMode string + +// ServerStatistics contains statistical data about the server as a whole. +type ServerStatistics struct { + Time float64 `json:"time"` + Enabled bool `json:"enabled"` + System SystemStats `json:"system"` + Client ClientStats `json:"client"` + ClientUser ClientStats `json:"clientUser,omitempty"` + HTTP HTTPStats `json:"http"` + Server ServerStats `json:"server"` + ArangoError +} + +// SystemStats contains statistical data about the system, this is part of +// ServerStatistics. +type SystemStats struct { + MinorPageFaults int64 `json:"minorPageFaults"` + MajorPageFaults int64 `json:"majorPageFaults"` + UserTime float64 `json:"userTime"` + SystemTime float64 `json:"systemTime"` + NumberOfThreads int64 `json:"numberOfThreads"` + ResidentSize int64 `json:"residentSize"` + ResidentSizePercent float64 `json:"residentSizePercent"` + VirtualSize int64 `json:"virtualSize"` +} + +// Stats is used for various time-related statistics. +type Stats struct { + Sum float64 `json:"sum"` + Count int64 `json:"count"` + Counts []int64 `json:"counts"` +} + +type ClientStats struct { + HTTPConnections int64 `json:"httpConnections"` + ConnectionTime Stats `json:"connectionTime"` + TotalTime Stats `json:"totalTime"` + RequestTime Stats `json:"requestTime"` + QueueTime Stats `json:"queueTime"` + IoTime Stats `json:"ioTime"` + BytesSent Stats `json:"bytesSent"` + BytesReceived Stats `json:"bytesReceived"` +} + +// HTTPStats contains statistics about the HTTP traffic. +type HTTPStats struct { + RequestsTotal int64 `json:"requestsTotal"` + RequestsAsync int64 `json:"requestsAsync"` + RequestsGet int64 `json:"requestsGet"` + RequestsHead int64 `json:"requestsHead"` + RequestsPost int64 `json:"requestsPost"` + RequestsPut int64 `json:"requestsPut"` + RequestsPatch int64 `json:"requestsPatch"` + RequestsDelete int64 `json:"requestsDelete"` + RequestsOptions int64 `json:"requestsOptions"` + RequestsOther int64 `json:"requestsOther"` + RequestsSuperuser int64 `json:"requestsSuperuser,omitempty"` + RequestsUser int64 `json:"requestsUser,omitempty"` +} + +// TransactionStats contains statistics about transactions. +type TransactionStats struct { + Started int64 `json:"started"` + Aborted int64 `json:"aborted"` + Committed int64 `json:"committed"` + IntermediateCommits int64 `json:"intermediateCommits"` + ReadOnly int64 `json:"readOnly,omitempty"` + DirtyReadOnly int64 `json:"dirtyReadOnly,omitempty"` +} + +// MemoryStats contains statistics about memory usage. +type MemoryStats struct { + ContextID int64 `json:"contextId"` + TMax float64 `json:"tMax"` + CountOfTimes int64 `json:"countOfTimes"` + HeapMax int64 `json:"heapMax"` + HeapMin int64 `json:"heapMin"` + Invocations int64 `json:"invocations,omitempty"` +} + +// V8ContextStats contains statistics about V8 contexts. +type V8ContextStats struct { + Available int64 `json:"available"` + Busy int64 `json:"busy"` + Dirty int64 `json:"dirty"` + Free int64 `json:"free"` + Min int64 `json:"min,omitempty"` + Max int64 `json:"max"` + Memory []MemoryStats `json:"memory"` +} + +// ThreadsStats contains statistics about threads. +type ThreadStats struct { + SchedulerThreads int64 `json:"scheduler-threads"` + Blocked int64 `json:"blocked"` + Queued int64 `json:"queued"` + InProgress int64 `json:"in-progress"` + DirectExec int64 `json:"direct-exec"` +} + +// ServerStats contains statistics about the server. +type ServerStats struct { + Uptime float64 `json:"uptime"` + PhysicalMemory int64 `json:"physicalMemory"` + Transactions TransactionStats `json:"transactions"` + V8Context V8ContextStats `json:"v8Context"` + Threads ThreadStats `json:"threads"` +} + +const ( + // ServerModeDefault is the normal mode of the database in which read and write requests + // are allowed. + ServerModeDefault ServerMode = "default" + // ServerModeReadOnly is the mode in which all modifications to th database are blocked. + // Behavior is the same as user that has read-only access to all databases & collections. + ServerModeReadOnly ServerMode = "readonly" +) diff --git a/vendor/github.com/arangodb/go-driver/client_server_admin_impl.go b/vendor/github.com/arangodb/go-driver/client_server_admin_impl.go new file mode 100644 index 00000000000..c8fead958ef --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_server_admin_impl.go @@ -0,0 +1,243 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +type serverModeResponse struct { + Mode ServerMode `json:"mode"` + ArangoError +} + +type serverModeRequest struct { + Mode ServerMode `json:"mode"` +} + +// ShutdownInfo stores information about shutdown of the coordinator. +type ShutdownInfo struct { + // AQLCursors stores a number of AQL cursors that are still active. + AQLCursors int `json:"AQLcursors"` + // Transactions stores a number of ongoing transactions. + Transactions int `json:"transactions"` + // PendingJobs stores a number of ongoing asynchronous requests. + PendingJobs int `json:"pendingJobs"` + // DoneJobs stores a number of finished asynchronous requests, whose result has not yet been collected. + DoneJobs int `json:"doneJobs"` + // PregelConductors stores a number of ongoing Pregel jobs. + PregelConductors int `json:"pregelConductors"` + // LowPrioOngoingRequests stores a number of ongoing low priority requests. + LowPrioOngoingRequests int `json:"lowPrioOngoingRequests"` + // LowPrioQueuedRequests stores a number of queued low priority requests. + LowPrioQueuedRequests int `json:"lowPrioQueuedRequests"` + // AllClear is set if all operations are closed. + AllClear bool `json:"allClear"` + // SoftShutdownOngoing describes whether a soft shutdown of the Coordinator is in progress. + SoftShutdownOngoing bool `json:"softShutdownOngoing"` +} + +// ServerMode returns the current mode in which the server/cluster is operating. +// This call needs ArangoDB 3.3 and up. +func (c *client) ServerMode(ctx context.Context) (ServerMode, error) { + req, err := c.conn.NewRequest("GET", "_admin/server/mode") + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return "", WithStack(err) + } + var result serverModeResponse + if err := resp.ParseBody("", &result); err != nil { + return "", WithStack(err) + } + return result.Mode, nil +} + +// SetServerMode changes the current mode in which the server/cluster is operating. +// This call needs a client that uses JWT authentication. +// This call needs ArangoDB 3.3 and up. +func (c *client) SetServerMode(ctx context.Context, mode ServerMode) error { + req, err := c.conn.NewRequest("PUT", "_admin/server/mode") + if err != nil { + return WithStack(err) + } + input := serverModeRequest{ + Mode: mode, + } + req, err = req.SetBody(input) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Logs retrieve logs from server in ArangoDB 3.8.0+ format +func (c *client) Logs(ctx context.Context) (ServerLogs, error) { + req, err := c.conn.NewRequest("GET", "_admin/log/entries") + if err != nil { + return ServerLogs{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ServerLogs{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ServerLogs{}, WithStack(err) + } + var data ServerLogs + if err := resp.ParseBody("", &data); err != nil { + return ServerLogs{}, WithStack(err) + } + return data, nil +} + +// Shutdown a specific server, optionally removing it from its cluster. +func (c *client) Shutdown(ctx context.Context, removeFromCluster bool) error { + req, err := c.conn.NewRequest("DELETE", "_admin/shutdown") + if err != nil { + return WithStack(err) + } + if removeFromCluster { + req.SetQuery("remove_from_cluster", "1") + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Metrics returns the metrics of the server in Prometheus format. +func (c *client) Metrics(ctx context.Context) ([]byte, error) { + return c.getMetrics(ctx, "") +} + +// MetricsForSingleServer returns the metrics of the specific server in Prometheus format. +// This parameter 'serverID' is only meaningful on Coordinators. +func (c *client) MetricsForSingleServer(ctx context.Context, serverID string) ([]byte, error) { + return c.getMetrics(ctx, serverID) +} + +// Metrics returns the metrics of the server in Prometheus format. +func (c *client) getMetrics(ctx context.Context, serverID string) ([]byte, error) { + var rawResponse []byte + ctx = WithRawResponse(ctx, &rawResponse) + + req, err := c.conn.NewRequest("GET", "_admin/metrics/v2") + if err != nil { + return rawResponse, WithStack(err) + } + + if serverID != "" { + req.SetQuery("serverId", serverID) + } + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return rawResponse, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return rawResponse, WithStack(err) + } + return rawResponse, nil +} + +// Statistics queries statistics from a specific server. +func (c *client) Statistics(ctx context.Context) (ServerStatistics, error) { + req, err := c.conn.NewRequest("GET", "_admin/statistics") + if err != nil { + return ServerStatistics{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ServerStatistics{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ServerStatistics{}, WithStack(err) + } + var data ServerStatistics + if err := resp.ParseBody("", &data); err != nil { + return ServerStatistics{}, WithStack(err) + } + return data, nil +} + +// ShutdownV2 shuts down a specific coordinator, optionally removing it from the cluster with a graceful manner. +// When `graceful` is true then run soft shutdown process and the `ShutdownInfoV2` can be used to check the progress. +// It is available since versions: v3.7.12, v3.8.1, v3.9.0. +func (c *client) ShutdownV2(ctx context.Context, removeFromCluster, graceful bool) error { + req, err := c.conn.NewRequest("DELETE", "_admin/shutdown") + if err != nil { + return WithStack(err) + } + if removeFromCluster { + req.SetQuery("remove_from_cluster", "1") + } + if graceful { + req.SetQuery("soft", "true") + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// ShutdownInfoV2 returns information about shutdown progress. +// It is available since versions: v3.7.12, v3.8.1, v3.9.0. +func (c *client) ShutdownInfoV2(ctx context.Context) (ShutdownInfo, error) { + req, err := c.conn.NewRequest("GET", "_admin/shutdown") + if err != nil { + return ShutdownInfo{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ShutdownInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ShutdownInfo{}, WithStack(err) + } + data := ShutdownInfo{} + if err := resp.ParseBody("", &data); err != nil { + return ShutdownInfo{}, WithStack(err) + } + return data, nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_server_info.go b/vendor/github.com/arangodb/go-driver/client_server_info.go new file mode 100644 index 00000000000..30fe0ccc7aa --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_server_info.go @@ -0,0 +1,61 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientServerInfo provides access to information about a single ArangoDB server. +// When your client uses multiple endpoints, it is undefined which server +// will respond to requests of this interface. +type ClientServerInfo interface { + // Version returns version information from the connected database server. + // Use WithDetails to configure a context that will include additional details in the return VersionInfo. + Version(ctx context.Context) (VersionInfo, error) + + // ServerRole returns the role of the server that answers the request. + ServerRole(ctx context.Context) (ServerRole, error) + + // Gets the ID of this server in the cluster. + // An error is returned when calling this to a server that is not part of a cluster. + ServerID(ctx context.Context) (string, error) +} + +// ServerRole is the role of an arangod server +type ServerRole string + +const ( + // ServerRoleSingle indicates that the server is a single-server instance + ServerRoleSingle ServerRole = "Single" + // ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair + ServerRoleSingleActive ServerRole = "SingleActive" + // ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair + ServerRoleSinglePassive ServerRole = "SinglePassive" + // ServerRoleDBServer indicates that the server is a dbserver within a cluster + ServerRoleDBServer ServerRole = "DBServer" + // ServerRoleCoordinator indicates that the server is a coordinator within a cluster + ServerRoleCoordinator ServerRole = "Coordinator" + // ServerRoleAgent indicates that the server is an agent within a cluster + ServerRoleAgent ServerRole = "Agent" + // ServerRoleUndefined indicates that the role of the server cannot be determined + ServerRoleUndefined ServerRole = "Undefined" +) diff --git a/vendor/github.com/arangodb/go-driver/client_server_info_impl.go b/vendor/github.com/arangodb/go-driver/client_server_info_impl.go new file mode 100644 index 00000000000..b0917d5d8f4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_server_info_impl.go @@ -0,0 +1,152 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// Version returns version information from the connected database server. +func (c *client) Version(ctx context.Context) (VersionInfo, error) { + req, err := c.conn.NewRequest("GET", "_api/version") + if err != nil { + return VersionInfo{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return VersionInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return VersionInfo{}, WithStack(err) + } + var data VersionInfo + if err := resp.ParseBody("", &data); err != nil { + return VersionInfo{}, WithStack(err) + } + return data, nil +} + +// roleResponse contains the response body of the `/admin/server/role` api. +type roleResponse struct { + // Role of the server within a cluster + Role string `json:"role,omitempty"` + Mode string `json:"mode,omitempty"` + ArangoError +} + +// asServerRole converts the response into a ServerRole +func (r roleResponse) asServerRole(ctx context.Context, c *client) (ServerRole, error) { + switch r.Role { + case "SINGLE": + switch r.Mode { + case "resilient": + if err := c.echo(ctx); IsNoLeader(err) { + return ServerRoleSinglePassive, nil + } else if err != nil { + return ServerRoleUndefined, WithStack(err) + } + return ServerRoleSingleActive, nil + default: + return ServerRoleSingle, nil + } + case "PRIMARY": + return ServerRoleDBServer, nil + case "COORDINATOR": + return ServerRoleCoordinator, nil + case "AGENT": + return ServerRoleAgent, nil + case "UNDEFINED": + return ServerRoleUndefined, nil + default: + return ServerRoleUndefined, nil + } +} + +// ServerRole returns the role of the server that answers the request. +func (c *client) ServerRole(ctx context.Context) (ServerRole, error) { + req, err := c.conn.NewRequest("GET", "_admin/server/role") + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ServerRoleUndefined, WithStack(err) + } + var data roleResponse + if err := resp.ParseBody("", &data); err != nil { + return ServerRoleUndefined, WithStack(err) + } + role, err := data.asServerRole(ctx, c) + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + return role, nil +} + +type idResponse struct { + ID string `json:"id,omitempty"` +} + +// Gets the ID of this server in the cluster. +// An error is returned when calling this to a server that is not part of a cluster. +func (c *client) ServerID(ctx context.Context) (string, error) { + req, err := c.conn.NewRequest("GET", "_admin/server/id") + if err != nil { + return "", WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return "", WithStack(err) + } + var data idResponse + if err := resp.ParseBody("", &data); err != nil { + return "", WithStack(err) + } + return data.ID, nil +} + +// clusterEndpoints returns the endpoints of a cluster. +func (c *client) echo(ctx context.Context) error { + req, err := c.conn.NewRequest("GET", "_admin/echo") + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/client_users.go b/vendor/github.com/arangodb/go-driver/client_users.go new file mode 100644 index 00000000000..b891b44caf8 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_users.go @@ -0,0 +1,52 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientUsers provides access to the users in a single arangodb database server, or an entire cluster of arangodb servers. +type ClientUsers interface { + // User opens a connection to an existing user. + // If no user with given name exists, an NotFoundError is returned. + User(ctx context.Context, name string) (User, error) + + // UserExists returns true if a user with given name exists. + UserExists(ctx context.Context, name string) (bool, error) + + // Users returns a list of all users found by the client. + Users(ctx context.Context) ([]User, error) + + // CreateUser creates a new user with given name and opens a connection to it. + // If a user with given name already exists, a Conflict error is returned. + CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) +} + +// UserOptions contains options for creating a new user, updating or replacing a user. +type UserOptions struct { + // The user password as a string. If not specified, it will default to an empty string. + Password string `json:"passwd,omitempty"` + // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. + Active *bool `json:"active,omitempty"` + // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. + Extra interface{} `json:"extra,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/client_users_impl.go b/vendor/github.com/arangodb/go-driver/client_users_impl.go new file mode 100644 index 00000000000..0a17194da74 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/client_users_impl.go @@ -0,0 +1,144 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// User opens a connection to an existing user. +// If no user with given name exists, an NotFoundError is returned. +func (c *client) User(ctx context.Context, name string) (User, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_api/user", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + u, err := newUser(data, c.conn) + if err != nil { + return nil, WithStack(err) + } + return u, nil +} + +// UserExists returns true if a database with given name exists. +func (c *client) UserExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_api", "user", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type listUsersResponse struct { + Result []userData `json:"result,omitempty"` + ArangoError +} + +// Users returns a list of all users found by the client. +func (c *client) Users(ctx context.Context) ([]User, error) { + req, err := c.conn.NewRequest("GET", "/_api/user") + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listUsersResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]User, 0, len(data.Result)) + for _, userData := range data.Result { + u, err := newUser(userData, c.conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, u) + } + return result, nil +} + +// CreateUser creates a new user with given name and opens a connection to it. +// If a user with given name already exists, a DuplicateError is returned. +func (c *client) CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) { + input := struct { + UserOptions + Name string `json:"user"` + }{ + Name: name, + } + if options != nil { + input.UserOptions = *options + } + req, err := c.conn.NewRequest("POST", path.Join("_api/user")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + u, err := newUser(data, c.conn) + if err != nil { + return nil, WithStack(err) + } + return u, nil +} diff --git a/vendor/github.com/arangodb/go-driver/cluster.go b/vendor/github.com/arangodb/go-driver/cluster.go new file mode 100644 index 00000000000..69222e584c2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cluster.go @@ -0,0 +1,357 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// Cluster provides access to cluster wide specific operations. +// To use this interface, an ArangoDB cluster is required. +type Cluster interface { + // Get the cluster configuration & health + Health(ctx context.Context) (ClusterHealth, error) + + // Get the inventory of the cluster containing all collections (with entire details) of a database. + DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) + + // MoveShard moves a single shard of the given collection from server `fromServer` to + // server `toServer`. + MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error + + // CleanOutServer triggers activities to clean out a DBServer. + CleanOutServer(ctx context.Context, serverID string) error + + // ResignServer triggers activities to let a DBServer resign for all shards. + ResignServer(ctx context.Context, serverID string) error + + // IsCleanedOut checks if the dbserver with given ID has been cleaned out. + IsCleanedOut(ctx context.Context, serverID string) (bool, error) + + // RemoveServer is a low-level option to remove a server from a cluster. + // This function is suitable for servers of type coordinator or dbserver. + // The use of `ClientServerAdmin.Shutdown` is highly recommended above this function. + RemoveServer(ctx context.Context, serverID ServerID) error +} + +// ServerID identifies an arangod server in a cluster. +type ServerID string + +// ClusterHealth contains health information for all servers in a cluster. +type ClusterHealth struct { + // Unique identifier of the entire cluster. + // This ID is created when the cluster was first created. + ID string `json:"ClusterId"` + // Health per server + Health map[ServerID]ServerHealth `json:"Health"` +} + +// ServerSyncStatus describes the servers sync status +type ServerSyncStatus string + +const ( + ServerSyncStatusUnknown ServerSyncStatus = "UNKNOWN" + ServerSyncStatusUndefined ServerSyncStatus = "UNDEFINED" + ServerSyncStatusStartup ServerSyncStatus = "STARTUP" + ServerSyncStatusStopping ServerSyncStatus = "STOPPING" + ServerSyncStatusStopped ServerSyncStatus = "STOPPED" + ServerSyncStatusServing ServerSyncStatus = "SERVING" + ServerSyncStatusShutdown ServerSyncStatus = "SHUTDOWN" +) + +// ServerHealth contains health information of a single server in a cluster. +type ServerHealth struct { + Endpoint string `json:"Endpoint"` + LastHeartbeatAcked time.Time `json:"LastHeartbeatAcked"` + LastHeartbeatSent time.Time `json:"LastHeartbeatSent"` + LastHeartbeatStatus string `json:"LastHeartbeatStatus"` + Role ServerRole `json:"Role"` + ShortName string `json:"ShortName"` + Status ServerStatus `json:"Status"` + CanBeDeleted bool `json:"CanBeDeleted"` + HostID string `json:"Host,omitempty"` + Version Version `json:"Version,omitempty"` + Engine EngineType `json:"Engine,omitempty"` + SyncStatus ServerSyncStatus `json:"SyncStatus,omitempty"` + + // Only for Coordinators + AdvertisedEndpoint *string `json:"AdvertisedEndpoint,omitempty"` + + // Only for Agents + Leader *string `json:"Leader,omitempty"` + Leading *bool `json:"Leading,omitempty"` +} + +// ServerStatus describes the health status of a server +type ServerStatus string + +const ( + // ServerStatusGood indicates server is in good state + ServerStatusGood ServerStatus = "GOOD" + // ServerStatusBad indicates server has missed 1 heartbeat + ServerStatusBad ServerStatus = "BAD" + // ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad. + ServerStatusFailed ServerStatus = "FAILED" +) + +// DatabaseInventory describes a detailed state of the collections & shards of a specific database within a cluster. +type DatabaseInventory struct { + // Details of database, this is present since ArangoDB 3.6 + Info DatabaseInfo `json:"properties,omitempty"` + // Details of all collections + Collections []InventoryCollection `json:"collections,omitempty"` + // Details of all views + Views []InventoryView `json:"views,omitempty"` + State State `json:"state,omitempty"` + Tick string `json:"tick,omitempty"` +} + +type State struct { + Running bool `json:"running,omitempty"` + LastLogTick string `json:"lastLogTick,omitempty"` + LastUncommittedLogTick string `json:"lastUncommittedLogTick,omitempty"` + TotalEvents int64 `json:"totalEvents,omitempty"` + Time time.Time `json:"time,omitempty"` +} + +// UnmarshalJSON marshals State to arangodb json representation +func (s *State) UnmarshalJSON(d []byte) error { + var internal interface{} + + if err := json.Unmarshal(d, &internal); err != nil { + return err + } + + if val, ok := internal.(string); ok { + if val != "unused" { + fmt.Printf("unrecognized State value: %s\n", val) + } + *s = State{} + return nil + } else { + type Alias State + out := Alias{} + + if err := json.Unmarshal(d, &out); err != nil { + return &json.UnmarshalTypeError{ + Value: string(d), + Type: reflect.TypeOf(s).Elem(), + } + } + *s = State(out) + } + + return nil +} + +// IsReady returns true if the IsReady flag of all collections is set. +func (i DatabaseInventory) IsReady() bool { + for _, c := range i.Collections { + if !c.IsReady { + return false + } + } + return true +} + +// PlanVersion returns the plan version of the first collection in the given inventory. +func (i DatabaseInventory) PlanVersion() int64 { + if len(i.Collections) == 0 { + return 0 + } + return i.Collections[0].PlanVersion +} + +// CollectionByName returns the InventoryCollection with given name. +// Return false if not found. +func (i DatabaseInventory) CollectionByName(name string) (InventoryCollection, bool) { + for _, c := range i.Collections { + if c.Parameters.Name == name { + return c, true + } + } + return InventoryCollection{}, false +} + +// ViewByName returns the InventoryView with given name. +// Return false if not found. +func (i DatabaseInventory) ViewByName(name string) (InventoryView, bool) { + for _, v := range i.Views { + if v.Name == name { + return v, true + } + } + return InventoryView{}, false +} + +// InventoryCollection is a single element of a DatabaseInventory, containing all information +// of a specific collection. +type InventoryCollection struct { + Parameters InventoryCollectionParameters `json:"parameters"` + Indexes []InventoryIndex `json:"indexes,omitempty"` + PlanVersion int64 `json:"planVersion,omitempty"` + IsReady bool `json:"isReady,omitempty"` + AllInSync bool `json:"allInSync,omitempty"` +} + +// IndexByFieldsAndType returns the InventoryIndex with given fields & type. +// Return false if not found. +func (i InventoryCollection) IndexByFieldsAndType(fields []string, indexType string) (InventoryIndex, bool) { + for _, idx := range i.Indexes { + if idx.Type == indexType && idx.FieldsEqual(fields) { + return idx, true + } + } + return InventoryIndex{}, false +} + +// InventoryCollectionParameters contains all configuration parameters of a collection in a database inventory. +type InventoryCollectionParameters struct { + // Available from 3.7 ArangoD version. + CacheEnabled bool `json:"cacheEnabled,omitempty"` + Deleted bool `json:"deleted,omitempty"` + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + DoCompact bool `json:"doCompact,omitempty"` + // Available from 3.7 ArangoD version. + GloballyUniqueId string `json:"globallyUniqueId,omitempty"` + ID string `json:"id,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + IndexBuckets int `json:"indexBuckets,omitempty"` + Indexes []InventoryIndex `json:"indexes,omitempty"` + // Available from 3.9 ArangoD version. + InternalValidatorType int `json:"internalValidatorType,omitempty"` + // Available from 3.7 ArangoD version. + IsDisjoint bool `json:"isDisjoint,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + // Available from 3.7 ArangoD version. + IsSmartChild bool `json:"isSmartChild,omitempty"` + IsSystem bool `json:"isSystem,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + IsVolatile bool `json:"isVolatile,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + JournalSize int64 `json:"journalSize,omitempty"` + KeyOptions struct { + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + // Deprecated: this field has wrong type and will be removed in the future. It is not used anymore since it can cause parsing issues. + LastValue int64 `json:"-"` + LastValueV2 uint64 `json:"lastValue,omitempty"` + Type string `json:"type,omitempty"` + } `json:"keyOptions"` + // Deprecated: use 'WriteConcern' instead. + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + Name string `json:"name,omitempty"` + NumberOfShards int `json:"numberOfShards,omitempty"` + // Deprecated: since 3.7 ArangoD version. + Path string `json:"path,omitempty"` + PlanID string `json:"planId,omitempty"` + ReplicationFactor int `json:"replicationFactor,omitempty"` + // Schema for collection validation. + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + ShadowCollections []int `json:"shadowCollections,omitempty"` + ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` + ShardKeys []string `json:"shardKeys,omitempty"` + Shards map[ShardID][]ServerID `json:"shards,omitempty"` + // Optional only for some collections. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + // Optional only for some collections. + SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` + Status CollectionStatus `json:"status,omitempty"` + // Available from 3.7 ArangoD version. + SyncByRevision bool `json:"syncByRevision,omitempty"` + Type CollectionType `json:"type,omitempty"` + // Available from 3.7 ArangoD version. + UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` + WaitForSync bool `json:"waitForSync,omitempty"` + // Available from 3.6 ArangoD version. + WriteConcern int `json:"writeConcern,omitempty"` + // Available from 3.10 ArangoD version. + ComputedValues []ComputedValue `json:"computedValues,omitempty"` +} + +// IsSatellite returns true if the collection is a satellite collection +func (icp *InventoryCollectionParameters) IsSatellite() bool { + return icp.ReplicationFactor == ReplicationFactorSatellite +} + +// ShardID is an internal identifier of a specific shard +type ShardID string + +// InventoryIndex contains all configuration parameters of a single index of a collection in a database inventory. +type InventoryIndex struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Fields []string `json:"fields,omitempty"` + Unique bool `json:"unique"` + Sparse bool `json:"sparse"` + Deduplicate bool `json:"deduplicate"` + MinLength int `json:"minLength,omitempty"` + GeoJSON bool `json:"geoJson,omitempty"` + Name string `json:"name,omitempty"` + ExpireAfter int `json:"expireAfter,omitempty"` + Estimates bool `json:"estimates,omitempty"` + FieldValueTypes string `json:"fieldValueTypes,omitempty"` + CacheEnabled *bool `json:"cacheEnabled,omitempty"` +} + +// FieldsEqual returns true when the given fields list equals the +// Fields list in the InventoryIndex. +// The order of fields is irrelevant. +func (i InventoryIndex) FieldsEqual(fields []string) bool { + return stringSliceEqualsIgnoreOrder(i.Fields, fields) +} + +// InventoryView is a single element of a DatabaseInventory, containing all information +// of a specific view. +type InventoryView struct { + Name string `json:"name,omitempty"` + Deleted bool `json:"deleted,omitempty"` + ID string `json:"id,omitempty"` + IsSystem bool `json:"isSystem,omitempty"` + PlanID string `json:"planId,omitempty"` + Type ViewType `json:"type,omitempty"` + // Include all properties from an arangosearch view. + ArangoSearchViewProperties +} + +// stringSliceEqualsIgnoreOrder returns true when the given lists contain the same elements. +// The order of elements is irrelevant. +func stringSliceEqualsIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + bMap := make(map[string]struct{}) + for _, x := range b { + bMap[x] = struct{}{} + } + for _, x := range a { + if _, found := bMap[x]; !found { + return false + } + } + return true +} diff --git a/vendor/github.com/arangodb/go-driver/cluster/cluster.go b/vendor/github.com/arangodb/go-driver/cluster/cluster.go new file mode 100644 index 00000000000..1eb5dff4a73 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cluster/cluster.go @@ -0,0 +1,356 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package cluster + +import ( + "context" + "math" + "net/http" + "sort" + "strings" + "sync" + "time" + + driver "github.com/arangodb/go-driver" +) + +const ( + keyFollowLeaderRedirect driver.ContextKey = "arangodb-followLeaderRedirect" +) + +// ConnectionConfig provides all configuration options for a cluster connection. +type ConnectionConfig struct { + // DefaultTimeout is the timeout used by requests that have no timeout set in the given context. + DefaultTimeout time.Duration +} + +// ServerConnectionBuilder specifies a function called by the cluster connection when it +// needs to create an underlying connection to a specific endpoint. +type ServerConnectionBuilder func(endpoint string) (driver.Connection, error) + +// NewConnection creates a new cluster connection to a cluster of servers. +// The given connections are existing connections to each of the servers. +func NewConnection(config ConnectionConfig, connectionBuilder ServerConnectionBuilder, endpoints []string) (driver.Connection, error) { + if connectionBuilder == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "Must a connection builder"}) + } + if len(endpoints) == 0 { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "Must provide at least 1 endpoint"}) + } + if config.DefaultTimeout == 0 { + config.DefaultTimeout = defaultTimeout + } + cConn := &clusterConnection{ + connectionBuilder: connectionBuilder, + defaultTimeout: config.DefaultTimeout, + } + // Initialize endpoints + if err := cConn.UpdateEndpoints(endpoints); err != nil { + return nil, driver.WithStack(err) + } + return cConn, nil +} + +const ( + defaultTimeout = 9 * time.Minute + keyEndpoint driver.ContextKey = "arangodb-endpoint" +) + +type clusterConnection struct { + connectionBuilder ServerConnectionBuilder + servers []driver.Connection + endpoints []string + current int + mutex sync.RWMutex + defaultTimeout time.Duration + auth driver.Authentication +} + +// NewRequest creates a new request with given method and path. +func (c *clusterConnection) NewRequest(method, path string) (driver.Request, error) { + c.mutex.RLock() + servers := c.servers + c.mutex.RUnlock() + + // It is assumed that all servers used the same protocol. + if len(servers) > 0 { + return servers[0].NewRequest(method, path) + } + return nil, driver.WithStack(driver.ArangoError{ + HasError: true, + Code: http.StatusServiceUnavailable, + ErrorMessage: "no servers available", + }) +} + +// Do performs a given request, returning its response. +func (c *clusterConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + followLeaderRedirect := true + if ctx == nil { + ctx = context.Background() + } else { + if v := ctx.Value(keyFollowLeaderRedirect); v != nil { + if on, ok := v.(bool); ok { + followLeaderRedirect = on + } + } + } + // Timeout management. + // We take the given timeout and divide it in 3 so we allow for other servers + // to give it a try if an earlier server fails. + deadline, hasDeadline := ctx.Deadline() + var timeout time.Duration + if hasDeadline { + timeout = deadline.Sub(time.Now()) + } else { + timeout = c.defaultTimeout + } + + var server driver.Connection + var serverCount int + var durationPerRequest time.Duration + + if v := ctx.Value(keyEndpoint); v != nil { + if endpoint, ok := v.(string); ok { + // Override pool to only specific server if it is found + if s, ok := c.getSpecificServer(endpoint); ok { + server = s + durationPerRequest = timeout + serverCount = 1 + } + } + } + + if server == nil { + server, serverCount = c.getCurrentServer() + timeoutDivider := math.Max(1.0, math.Min(3.0, float64(serverCount))) + durationPerRequest = time.Duration(float64(timeout) / timeoutDivider) + } + + attempt := 1 + for { + // Send request to specific endpoint with a 1/3 timeout (so we get 3 attempts) + serverCtx, cancel := context.WithTimeout(ctx, durationPerRequest) + resp, err := server.Do(serverCtx, req) + cancel() + + isNoLeaderResponse := false + if err == nil && resp.StatusCode() == 503 { + // Service unavailable, parse the body, perhaps this is a "no leader" + // case where we have to failover. + var aerr driver.ArangoError + if perr := resp.ParseBody("", &aerr); perr == nil && aerr.HasError { + if driver.IsNoLeader(aerr) { + isNoLeaderResponse = true + // Save error in case we have no more servers + err = aerr + } + } + } + + if !isNoLeaderResponse || !followLeaderRedirect { + if err == nil { + // We're done + return resp, nil + } + // No success yet + if driver.IsCanceled(err) { + // Request was cancelled, we return directly. + return nil, driver.WithStack(err) + } + // If we've completely written the request, we return the error, + // otherwise we'll failover to a new server. + if req.Written() { + // Request has been written to network, do not failover + if driver.IsArangoError(err) { + // ArangoError, so we got an error response from server. + return nil, driver.WithStack(err) + } + // Not an ArangoError, so it must be some kind of timeout, network ... error. + return nil, driver.WithStack(&driver.ResponseError{Err: err}) + } + } + + // Failed, try next server + attempt++ + if attempt > serverCount { + // A specific server was specified, no failover. + // or + // We've tried all servers. Giving up. + return nil, driver.WithStack(err) + } + server = c.getNextServer() + } +} + +/*func printError(err error, indent string) { + if err == nil { + return + } + fmt.Printf("%sGot %T %+v\n", indent, err, err) + if xerr, ok := err.(*os.SyscallError); ok { + printError(xerr.Err, indent+" ") + } else if xerr, ok := err.(*net.OpError); ok { + printError(xerr.Err, indent+" ") + } else if xerr, ok := err.(*url.Error); ok { + printError(xerr.Err, indent+" ") + } +}*/ + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *clusterConnection) Unmarshal(data driver.RawObject, result interface{}) error { + c.mutex.RLock() + servers := c.servers + c.mutex.RUnlock() + + if len(servers) > 0 { + if err := c.servers[0].Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + return nil + } + return driver.WithStack(driver.ArangoError{ + HasError: true, + Code: http.StatusServiceUnavailable, + ErrorMessage: "no servers available", + }) +} + +// Endpoints returns the endpoints used by this connection. +func (c *clusterConnection) Endpoints() []string { + c.mutex.RLock() + defer c.mutex.RUnlock() + + var result []string + for _, s := range c.servers { + result = append(result, s.Endpoints()...) + } + return result +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *clusterConnection) UpdateEndpoints(endpoints []string) error { + if len(endpoints) == 0 { + return driver.WithStack(driver.InvalidArgumentError{Message: "Must provide at least 1 endpoint"}) + } + sort.Strings(endpoints) + if strings.Join(endpoints, ",") == strings.Join(c.endpoints, ",") { + // No changes + return nil + } + + // Create new connections + servers := make([]driver.Connection, 0, len(endpoints)) + for _, ep := range endpoints { + conn, err := c.connectionBuilder(ep) + if err != nil { + return driver.WithStack(err) + } + if c.auth != nil { + conn, err = conn.SetAuthentication(c.auth) + if err != nil { + return driver.WithStack(err) + } + } + servers = append(servers, conn) + } + + // Swap connections + c.mutex.Lock() + defer c.mutex.Unlock() + c.servers = servers + c.endpoints = endpoints + c.current = 0 + + return nil +} + +// Configure the authentication used for this connection. +func (c *clusterConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Configure underlying servers + newServerConnections := make([]driver.Connection, len(c.servers)) + for i, s := range c.servers { + authConn, err := s.SetAuthentication(auth) + if err != nil { + return nil, driver.WithStack(err) + } + newServerConnections[i] = authConn + } + + // Save authentication + c.auth = auth + c.servers = newServerConnections + + return c, nil +} + +// Protocols returns all protocols used by this connection. +func (c *clusterConnection) Protocols() driver.ProtocolSet { + c.mutex.RLock() + defer c.mutex.RUnlock() + + var result driver.ProtocolSet + for _, s := range c.servers { + for _, p := range s.Protocols() { + if !result.Contains(p) { + result = append(result, p) + } + } + } + return result +} + +// getCurrentServer returns the currently used server and number of servers. +func (c *clusterConnection) getCurrentServer() (driver.Connection, int) { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.servers[c.current], len(c.servers) +} + +// getSpecificServer returns the server with the given endpoint. +func (c *clusterConnection) getSpecificServer(endpoint string) (driver.Connection, bool) { + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, s := range c.servers { + for _, x := range s.Endpoints() { + if x == endpoint { + return s, true + } + } + } + + // If endpoint is not found allow to use default connection pool - request will be routed thru coordinators + return nil, false +} + +// getNextServer changes the currently used server and returns the new server. +func (c *clusterConnection) getNextServer() driver.Connection { + c.mutex.Lock() + defer c.mutex.Unlock() + c.current = (c.current + 1) % len(c.servers) + return c.servers[c.current] +} diff --git a/vendor/github.com/arangodb/go-driver/cluster/doc.go b/vendor/github.com/arangodb/go-driver/cluster/doc.go new file mode 100644 index 00000000000..53abf292071 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cluster/doc.go @@ -0,0 +1,26 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package cluster implements a driver.Connection that provides cluster failover support (it is not intended to be used directly). +*/ +package cluster diff --git a/vendor/github.com/arangodb/go-driver/cluster_impl.go b/vendor/github.com/arangodb/go-driver/cluster_impl.go new file mode 100644 index 00000000000..8028c4ed50b --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cluster_impl.go @@ -0,0 +1,489 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "path" + "reflect" +) + +// newCluster creates a new Cluster implementation. +func newCluster(conn Connection) (Cluster, error) { + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &cluster{ + conn: conn, + }, nil +} + +type cluster struct { + conn Connection +} + +// Health returns the state of the cluster +func (c *cluster) Health(ctx context.Context) (ClusterHealth, error) { + req, err := c.conn.NewRequest("GET", "_admin/cluster/health") + if err != nil { + return ClusterHealth{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ClusterHealth{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ClusterHealth{}, WithStack(err) + } + var result ClusterHealth + if err := resp.ParseBody("", &result); err != nil { + return ClusterHealth{}, WithStack(err) + } + return result, nil +} + +// DatabaseInventory Get the inventory of the cluster containing all collections (with entire details) of a database. +func (c *cluster) DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) { + req, err := c.conn.NewRequest("GET", path.Join("_db", db.Name(), "_api/replication/clusterInventory")) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInventory{}, WithStack(err) + } + var result DatabaseInventory + if err := resp.ParseBody("", &result); err != nil { + return DatabaseInventory{}, WithStack(err) + } + return result, nil +} + +type moveShardRequest struct { + Database string `json:"database"` + Collection string `json:"collection"` + Shard ShardID `json:"shard"` + FromServer ServerID `json:"fromServer"` + ToServer ServerID `json:"toServer"` +} + +// MoveShard moves a single shard of the given collection from server `fromServer` to +// server `toServer`. +func (c *cluster) MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error { + req, err := c.conn.NewRequest("POST", "_admin/cluster/moveShard") + if err != nil { + return WithStack(err) + } + input := moveShardRequest{ + Database: col.Database().Name(), + Collection: col.Name(), + Shard: shard, + FromServer: fromServer, + ToServer: toServer, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + var result jobIDResponse + if err := resp.ParseBody("", &result); err != nil { + return WithStack(err) + } + if cs.JobIDResponse != nil { + *cs.JobIDResponse = result.JobID + } + return nil +} + +type cleanOutServerRequest struct { + Server string `json:"server"` +} + +type jobIDResponse struct { + JobID string `json:"id"` +} + +// CleanOutServer triggers activities to clean out a DBServers. +func (c *cluster) CleanOutServer(ctx context.Context, serverID string) error { + req, err := c.conn.NewRequest("POST", "_admin/cluster/cleanOutServer") + if err != nil { + return WithStack(err) + } + input := cleanOutServerRequest{ + Server: serverID, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + var result jobIDResponse + if err := resp.ParseBody("", &result); err != nil { + return WithStack(err) + } + if cs.JobIDResponse != nil { + *cs.JobIDResponse = result.JobID + } + return nil +} + +// ResignServer triggers activities to let a DBServer resign for all shards. +func (c *cluster) ResignServer(ctx context.Context, serverID string) error { + req, err := c.conn.NewRequest("POST", "_admin/cluster/resignLeadership") + if err != nil { + return WithStack(err) + } + input := cleanOutServerRequest{ + Server: serverID, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + var result jobIDResponse + if err := resp.ParseBody("", &result); err != nil { + return WithStack(err) + } + if cs.JobIDResponse != nil { + *cs.JobIDResponse = result.JobID + } + return nil +} + +// IsCleanedOut checks if the dbserver with given ID has been cleaned out. +func (c *cluster) IsCleanedOut(ctx context.Context, serverID string) (bool, error) { + r, err := c.NumberOfServers(ctx) + if err != nil { + return false, WithStack(err) + } + for _, id := range r.CleanedServerIDs { + if id == serverID { + return true, nil + } + } + return false, nil +} + +// NumberOfServersResponse holds the data returned from a NumberOfServer request. +type NumberOfServersResponse struct { + NoCoordinators int `json:"numberOfCoordinators,omitempty"` + NoDBServers int `json:"numberOfDBServers,omitempty"` + CleanedServerIDs []string `json:"cleanedServers,omitempty"` +} + +// NumberOfServers returns the number of coordinator & dbservers in a clusters and the +// ID's of cleaned out servers. +func (c *cluster) NumberOfServers(ctx context.Context) (NumberOfServersResponse, error) { + req, err := c.conn.NewRequest("GET", "_admin/cluster/numberOfServers") + if err != nil { + return NumberOfServersResponse{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return NumberOfServersResponse{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return NumberOfServersResponse{}, WithStack(err) + } + var result NumberOfServersResponse + if err := resp.ParseBody("", &result); err != nil { + return NumberOfServersResponse{}, WithStack(err) + } + return result, nil +} + +// RemoveServer is a low-level option to remove a server from a cluster. +// This function is suitable for servers of type coordinator or dbserver. +// The use of `ClientServerAdmin.Shutdown` is highly recommended above this function. +func (c *cluster) RemoveServer(ctx context.Context, serverID ServerID) error { + req, err := c.conn.NewRequest("POST", "_admin/cluster/removeServer") + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(serverID); err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + return nil +} + +// replicationFactor represents the replication factor of a collection +// Has special value ReplicationFactorSatellite for satellite collections +type replicationFactor int + +type inventoryCollectionParametersInternal struct { + // Available from 3.7 ArangoD version. + CacheEnabled bool `json:"cacheEnabled,omitempty"` + Deleted bool `json:"deleted,omitempty"` + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + DoCompact bool `json:"doCompact,omitempty"` + // Available from 3.7 ArangoD version. + GloballyUniqueId string `json:"globallyUniqueId,omitempty"` + ID string `json:"id,omitempty"` + IndexBuckets int `json:"indexBuckets,omitempty"` + Indexes []InventoryIndex `json:"indexes,omitempty"` + // Available from 3.9 ArangoD version. + InternalValidatorType int `json:"internalValidatorType,omitempty"` + // Available from 3.7 ArangoD version. + IsDisjoint bool `json:"isDisjoint,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + // Available from 3.7 ArangoD version. + IsSmartChild bool `json:"isSmartChild,omitempty"` + IsSystem bool `json:"isSystem,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + IsVolatile bool `json:"isVolatile,omitempty"` + // Deprecated: since 3.7 version. It is related only to MMFiles. + JournalSize int64 `json:"journalSize,omitempty"` + KeyOptions struct { + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + LastValue uint64 `json:"lastValue,omitempty"` + Type string `json:"type,omitempty"` + } `json:"keyOptions"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + Name string `json:"name,omitempty"` + NumberOfShards int `json:"numberOfShards,omitempty"` + Path string `json:"path,omitempty"` + PlanID string `json:"planId,omitempty"` + ReplicationFactor replicationFactor `json:"replicationFactor,omitempty"` + // Schema for collection validation + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + ShadowCollections []int `json:"shadowCollections,omitempty"` + ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` + ShardKeys []string `json:"shardKeys,omitempty"` + Shards map[ShardID][]ServerID `json:"shards,omitempty"` + // Optional only for some collections. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + // Optional only for some collections. + SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` + Status CollectionStatus `json:"status,omitempty"` + // Available from 3.7 ArangoD version + SyncByRevision bool `json:"syncByRevision,omitempty"` + Type CollectionType `json:"type,omitempty"` + // Available from 3.7 ArangoD version + UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` + WaitForSync bool `json:"waitForSync,omitempty"` + // Available from 3.6 ArangoD version. + WriteConcern int `json:"writeConcern,omitempty"` + // Available from 3.10 ArangoD version. + ComputedValues []ComputedValue `json:"computedValues,omitempty"` +} + +func (p *InventoryCollectionParameters) asInternal() inventoryCollectionParametersInternal { + lastValue := p.KeyOptions.LastValueV2 + if lastValue == 0 && p.KeyOptions.LastValue != 0 { + lastValue = uint64(p.KeyOptions.LastValue) + } + + return inventoryCollectionParametersInternal{ + CacheEnabled: p.CacheEnabled, + Deleted: p.Deleted, + DistributeShardsLike: p.DistributeShardsLike, + DoCompact: p.DoCompact, + GloballyUniqueId: p.GloballyUniqueId, + ID: p.ID, + IndexBuckets: p.IndexBuckets, + Indexes: p.Indexes, + InternalValidatorType: p.InternalValidatorType, + IsDisjoint: p.IsDisjoint, + IsSmart: p.IsSmart, + IsSmartChild: p.IsSmartChild, + IsSystem: p.IsSystem, + IsVolatile: p.IsVolatile, + JournalSize: p.JournalSize, + KeyOptions: struct { + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + LastValue uint64 `json:"lastValue,omitempty"` + Type string `json:"type,omitempty"` + }{ + p.KeyOptions.AllowUserKeys, + lastValue, + p.KeyOptions.Type}, + MinReplicationFactor: p.MinReplicationFactor, + Name: p.Name, + NumberOfShards: p.NumberOfShards, + Path: p.Path, + PlanID: p.PlanID, + ReplicationFactor: replicationFactor(p.ReplicationFactor), + Schema: p.Schema, + ShadowCollections: p.ShadowCollections, + ShardingStrategy: p.ShardingStrategy, + ShardKeys: p.ShardKeys, + Shards: p.Shards, + SmartGraphAttribute: p.SmartGraphAttribute, + SmartJoinAttribute: p.SmartJoinAttribute, + Status: p.Status, + SyncByRevision: p.SyncByRevision, + Type: p.Type, + UsesRevisionsAsDocumentIds: p.UsesRevisionsAsDocumentIds, + WaitForSync: p.WaitForSync, + WriteConcern: p.WriteConcern, + ComputedValues: p.ComputedValues, + } +} + +func (p *InventoryCollectionParameters) fromInternal(i inventoryCollectionParametersInternal) { + *p = i.asExternal() +} + +func (p *inventoryCollectionParametersInternal) asExternal() InventoryCollectionParameters { + return InventoryCollectionParameters{ + CacheEnabled: p.CacheEnabled, + Deleted: p.Deleted, + DistributeShardsLike: p.DistributeShardsLike, + DoCompact: p.DoCompact, + GloballyUniqueId: p.GloballyUniqueId, + ID: p.ID, + IndexBuckets: p.IndexBuckets, + Indexes: p.Indexes, + InternalValidatorType: p.InternalValidatorType, + IsDisjoint: p.IsDisjoint, + IsSmart: p.IsSmart, + IsSmartChild: p.IsSmartChild, + IsSystem: p.IsSystem, + IsVolatile: p.IsVolatile, + JournalSize: p.JournalSize, + KeyOptions: struct { + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + LastValue int64 `json:"-"` + LastValueV2 uint64 `json:"lastValue,omitempty"` + Type string `json:"type,omitempty"` + }{ + p.KeyOptions.AllowUserKeys, + // cast to int64 to keep backwards compatibility for most cases + int64(p.KeyOptions.LastValue), + p.KeyOptions.LastValue, + p.KeyOptions.Type}, + MinReplicationFactor: p.MinReplicationFactor, + Name: p.Name, + NumberOfShards: p.NumberOfShards, + Path: p.Path, + PlanID: p.PlanID, + ReplicationFactor: int(p.ReplicationFactor), + Schema: p.Schema, + ShadowCollections: p.ShadowCollections, + ShardingStrategy: p.ShardingStrategy, + ShardKeys: p.ShardKeys, + Shards: p.Shards, + SmartGraphAttribute: p.SmartGraphAttribute, + SmartJoinAttribute: p.SmartJoinAttribute, + Status: p.Status, + SyncByRevision: p.SyncByRevision, + Type: p.Type, + UsesRevisionsAsDocumentIds: p.UsesRevisionsAsDocumentIds, + WaitForSync: p.WaitForSync, + WriteConcern: p.WriteConcern, + ComputedValues: p.ComputedValues, + } +} + +// MarshalJSON converts InventoryCollectionParameters into json +func (p *InventoryCollectionParameters) MarshalJSON() ([]byte, error) { + return json.Marshal(p.asInternal()) +} + +// UnmarshalJSON loads InventoryCollectionParameters from json +func (p *InventoryCollectionParameters) UnmarshalJSON(d []byte) error { + var internal inventoryCollectionParametersInternal + if err := json.Unmarshal(d, &internal); err != nil { + return err + } + + p.fromInternal(internal) + return nil +} + +const ( + replicationFactorSatelliteString string = "satellite" +) + +// MarshalJSON marshals InventoryCollectionParameters to arangodb json representation +func (r replicationFactor) MarshalJSON() ([]byte, error) { + var replicationFactor interface{} + + if int(r) == ReplicationFactorSatellite { + replicationFactor = replicationFactorSatelliteString + } else { + replicationFactor = int(r) + } + + return json.Marshal(replicationFactor) +} + +// UnmarshalJSON marshals InventoryCollectionParameters to arangodb json representation +func (r *replicationFactor) UnmarshalJSON(d []byte) error { + var internal interface{} + + if err := json.Unmarshal(d, &internal); err != nil { + return err + } + + if i, ok := internal.(float64); ok { + *r = replicationFactor(i) + return nil + } else if str, ok := internal.(string); ok { + if ok && str == replicationFactorSatelliteString { + *r = replicationFactor(ReplicationFactorSatellite) + return nil + } + } + + return &json.UnmarshalTypeError{ + Value: string(d), + Type: reflect.TypeOf(r).Elem(), + } +} diff --git a/vendor/github.com/arangodb/go-driver/collection.go b/vendor/github.com/arangodb/go-driver/collection.go new file mode 100644 index 00000000000..038e60c3e17 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection.go @@ -0,0 +1,316 @@ +// +// DISCLAIMER +// +// Copyright 2017-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "time" +) + +// Collection provides access to the information of a single collection, all its documents and all its indexes. +type Collection interface { + // Name returns the name of the collection. + Name() string + + // Database returns the database containing the collection. + Database() Database + + // Status fetches the current status of the collection. + Status(ctx context.Context) (CollectionStatus, error) + + // Count fetches the number of document in the collection. + Count(ctx context.Context) (int64, error) + + // Statistics returns the number of documents and additional statistical information about the collection. + Statistics(ctx context.Context) (CollectionStatistics, error) + + // Revision fetches the revision ID of the collection. + // The revision ID is a server-generated string that clients can use to check whether data + // in a collection has changed since the last revision check. + Revision(ctx context.Context) (string, error) + + // Properties fetches extended information about the collection. + Properties(ctx context.Context) (CollectionProperties, error) + + // SetProperties changes properties of the collection. + SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error + + // Shards fetches shards information of the collection. + Shards(ctx context.Context, details bool) (CollectionShards, error) + + // Load the collection into memory. + Load(ctx context.Context) error + + // Unload unloads the collection from memory. + Unload(ctx context.Context) error + + // Remove removes the entire collection. + // If the collection does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Truncate removes all documents from the collection, but leaves the indexes intact. + Truncate(ctx context.Context) error + + // All index functions + CollectionIndexes + + // All document functions + CollectionDocuments +} + +// CollectionInfo contains information about a collection +type CollectionInfo struct { + // The identifier of the collection. + ID string `json:"id,omitempty"` + // The name of the collection. + Name string `json:"name,omitempty"` + // The status of the collection + Status CollectionStatus `json:"status,omitempty"` + // StatusString represents status as a string. + StatusString string `json:"statusString,omitempty"` + // The type of the collection + Type CollectionType `json:"type,omitempty"` + // If true then the collection is a system collection. + IsSystem bool `json:"isSystem,omitempty"` + // Global unique name for the collection + GloballyUniqueId string `json:"globallyUniqueId,omitempty"` +} + +// CollectionProperties contains extended information about a collection. +type CollectionProperties struct { + CollectionInfo + ArangoError + + // WaitForSync; If true then creating, changing or removing documents will wait until the data has been synchronized to disk. + WaitForSync bool `json:"waitForSync,omitempty"` + // DoCompact specifies whether or not the collection will be compacted. + DoCompact bool `json:"doCompact,omitempty"` + // JournalSize is the maximal size setting for journals / datafiles in bytes. + JournalSize int64 `json:"journalSize,omitempty"` + // CacheEnabled set cacheEnabled option in collection properties + CacheEnabled bool `json:"cacheEnabled,omitempty"` + // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression + ComputedValues []ComputedValue `json:"computedValues,omitempty"` + // KeyOptions + KeyOptions struct { + // Type specifies the type of the key generator. The currently available generators are traditional and autoincrement. + Type KeyGeneratorType `json:"type,omitempty"` + // AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document. + // If set to false, then the key generator is solely responsible for generating keys and supplying own key values in + // the _key attribute of documents is considered an error. + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + LastValue uint64 `json:"lastValue,omitempty"` + } `json:"keyOptions,omitempty"` + // NumberOfShards is the number of shards of the collection. + // Only available in cluster setup. + NumberOfShards int `json:"numberOfShards,omitempty"` + // ShardKeys contains the names of document attributes that are used to determine the target shard for documents. + // Only available in cluster setup. + ShardKeys []string `json:"shardKeys,omitempty"` + // ReplicationFactor contains how many copies of each shard are kept on different DBServers. + // Only available in cluster setup. + ReplicationFactor int `json:"-"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + // WriteConcern contains how many copies must be available before a collection can be written. + // It is required that 1 <= WriteConcern <= ReplicationFactor. + // Default is 1. Not available for satellite collections. + // Available from 3.6 arangod version. + WriteConcern int `json:"writeConcern,omitempty"` + // SmartJoinAttribute + // See documentation for smart joins. + // This requires ArangoDB Enterprise Edition. + SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` + // This attribute specifies the name of the sharding strategy to use for the collection. + // Can not be changed after creation. + ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` + // This attribute specifies that the sharding of a collection follows that of another + // one. + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + // This attribute specifies if the new format introduced in 3.7 is used for this + // collection. + UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` + // The following attribute specifies if the new MerkleTree based sync protocol + // can be used on the collection. + SyncByRevision bool `json:"syncByRevision,omitempty"` + // Schema for collection validation + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + + Revision string `json:"revision,omitempty"` + + // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ + IsDisjoint bool `json:"isDisjoint,omitempty"` + + IsSmartChild bool `json:"isSmartChild,omitempty"` + + InternalValidatorType *int `json:"internalValidatorType, omitempty"` + + // Set to create a smart edge or vertex collection. + // This requires ArangoDB Enterprise Edition. + IsSmart bool `json:"isSmart,omitempty"` + + // StatusString represents status as a string. + StatusString string `json:"statusString,omitempty"` + + TempObjectId string `json:"tempObjectId,omitempty"` + + ObjectId string `json:"objectId,omitempty"` +} + +const ( + // ReplicationFactorSatellite represents a satellite collection's replication factor + ReplicationFactorSatellite int = -1 +) + +// IsSatellite returns true if the collection is a satellite collection +func (p *CollectionProperties) IsSatellite() bool { + return p.ReplicationFactor == ReplicationFactorSatellite +} + +// SetCollectionPropertiesOptions contains data for Collection.SetProperties. +type SetCollectionPropertiesOptions struct { + // If true then creating or changing a document will wait until the data has been synchronized to disk. + WaitForSync *bool `json:"waitForSync,omitempty"` + // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected. + JournalSize int64 `json:"journalSize,omitempty"` + // ReplicationFactor contains how many copies of each shard are kept on different DBServers. + // Only available in cluster setup. + ReplicationFactor int `json:"replicationFactor,omitempty"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + // WriteConcern contains how many copies must be available before a collection can be written. + // Available from 3.6 arangod version. + WriteConcern int `json:"writeConcern,omitempty"` + // CacheEnabled set cacheEnabled option in collection properties + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + // Schema for collection validation + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression + ComputedValues []ComputedValue `json:"computedValues,omitempty"` +} + +// CollectionStatus indicates the status of a collection. +type CollectionStatus int + +const ( + CollectionStatusNewBorn = CollectionStatus(1) + CollectionStatusUnloaded = CollectionStatus(2) + CollectionStatusLoaded = CollectionStatus(3) + CollectionStatusUnloading = CollectionStatus(4) + CollectionStatusDeleted = CollectionStatus(5) + CollectionStatusLoading = CollectionStatus(6) +) + +// CollectionStatistics contains the number of documents and additional statistical information about a collection. +type CollectionStatistics struct { + ArangoError + CollectionProperties + + //The number of documents currently present in the collection. + Count int64 `json:"count,omitempty"` + // The maximal size of a journal or datafile in bytes. + JournalSize int64 `json:"journalSize,omitempty"` + Figures struct { + DataFiles struct { + // The number of datafiles. + Count int64 `json:"count,omitempty"` + // The total filesize of datafiles (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"datafiles"` + // The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles. + UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"` + // The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues. + DocumentReferences int64 `json:"documentReferences,omitempty"` + CompactionStatus struct { + // The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues. + Message string `json:"message,omitempty"` + // The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues. + Time time.Time `json:"time,omitempty"` + } `json:"compactionStatus"` + Compactors struct { + // The number of compactor files. + Count int64 `json:"count,omitempty"` + // The total filesize of all compactor files (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"compactors"` + Dead struct { + // The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure. + Count int64 `json:"count,omitempty"` + // The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure. + Deletion int64 `json:"deletion,omitempty"` + // The total size in bytes used by all dead documents. + Size int64 `json:"size,omitempty"` + } `json:"dead"` + Indexes struct { + // The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index). + Count int64 `json:"count,omitempty"` + // The total memory allocated for indexes in bytes. + Size int64 `json:"size,omitempty"` + } `json:"indexes"` + ReadCache struct { + // The number of revisions of this collection stored in the document revisions cache. + Count int64 `json:"count,omitempty"` + // The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations. + Size int64 `json:"size,omitempty"` + } `json:"readcache"` + // An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues. + WaitingFor string `json:"waitingFor,omitempty"` + Alive struct { + // The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. + Count int64 `json:"count,omitempty"` + // The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. + Size int64 `json:"size,omitempty"` + } `json:"alive"` + // The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal. + LastTick int64 `json:"lastTick,omitempty"` + Journals struct { + // The number of journal files. + Count int64 `json:"count,omitempty"` + // The total filesize of all journal files (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"journals"` + Revisions struct { + // The number of revisions of this collection managed by the storage engine. + Count int64 `json:"count,omitempty"` + // The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions. + Size int64 `json:"size,omitempty"` + } `json:"revisions"` + + DocumentsSize *int64 `json:"documentsSize,omitempty"` + + // RocksDB cache statistics + CacheInUse *bool `json:"cacheInUse,omitempty"` + CacheSize *int64 `json:"cacheSize,omitempty"` + CacheUsage *int64 `json:"cacheUsage,omitempty"` + } `json:"figures"` +} + +// CollectionShards contains shards information about a collection. +type CollectionShards struct { + CollectionProperties + + // Shards is a list of shards that belong to the collection. + // Each shard contains a list of DB servers where the first one is the leader and the rest are followers. + Shards map[ShardID][]ServerID `json:"shards,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/collection_document_impl.go b/vendor/github.com/arangodb/go-driver/collection_document_impl.go new file mode 100644 index 00000000000..450741cbb55 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection_document_impl.go @@ -0,0 +1,677 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *collection) DocumentExists(ctx context.Context, key string) (bool, error) { + if err := validateKey(key); err != nil { + return false, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("HEAD", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return false, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + found := resp.StatusCode() == 200 + return found, nil +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + // This line introduces a lot of side effects. In particular If-Match headers are now set (which is a bugfix) + // and invalid query parameters like waitForSync (which is potentially breaking change) + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // load context response values + loadContextResponseValues(cs, resp) + // Parse result + if result != nil { + if err := resp.ParseBody("", result); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// ReadDocuments reads multiple documents with given keys from the collection. +// The documents data is stored into elements of the given results slice, +// the documents meta data is returned. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error) { + resultsVal := reflect.ValueOf(results) + switch resultsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("results data must be of kind Array, got %s", resultsVal.Kind())}) + } + if keys == nil { + return nil, nil, WithStack(InvalidArgumentError{Message: "keys nil"}) + } + resultCount := resultsVal.Len() + if len(keys) != resultCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", resultCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + req, err := c.conn.NewRequest("PUT", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + req = req.SetQuery("onlyget", "1") + cs := applyContextSettings(ctx, req) + if _, err := req.SetBodyArray(keys, nil); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, nil, WithStack(err) + } + // load context response values + loadContextResponseValues(cs, resp) + // Parse response array + metas, errs, err := parseResponseArray(resp, resultCount, cs, results) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil + +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *collection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + if document == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath("document")) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned in its inded in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *collection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + req, err := c.conn.NewRequest("POST", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBody(documents); err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, documentCount, cs, nil) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + req, err := c.conn.NewRequest("PATCH", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + mergeArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(updates, mergeArray); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, updateCount, cs, nil) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + req, err := c.conn.NewRequest("PUT", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + mergeArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(documents, mergeArray); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, documentCount, cs, nil) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + keyCount := len(keys) + req, err := c.conn.NewRequest("DELETE", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + metaArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(metaArray, nil); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, keyCount, cs, nil) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *collection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return ImportDocumentStatistics{}, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + req, err := c.conn.NewRequest("POST", path.Join(c.db.relPath(), "_api/import")) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + req.SetQuery("collection", c.name) + req.SetQuery("type", "documents") + if options != nil { + if v := options.FromPrefix; v != "" { + req.SetQuery("fromPrefix", v) + } + if v := options.ToPrefix; v != "" { + req.SetQuery("toPrefix", v) + } + if v := options.Overwrite; v { + req.SetQuery("overwrite", "true") + } + if v := options.OnDuplicate; v != "" { + req.SetQuery("onDuplicate", string(v)) + } + if v := options.Complete; v { + req.SetQuery("complete", "true") + } + } + if _, err := req.SetBodyImportArray(documents); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + // Parse response + var data ImportDocumentStatistics + if err := resp.ParseBody("", &data); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + // Import details (if needed) + if details := cs.ImportDetails; details != nil { + if err := resp.ParseBody("details", details); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + } + return data, nil +} + +// createMergeArray returns an array of metadata maps with `_key` and/or `_rev` elements. +func createMergeArray(keys, revs []string) ([]map[string]interface{}, error) { + if keys == nil && revs == nil { + return nil, nil + } + if revs == nil { + mergeArray := make([]map[string]interface{}, len(keys)) + for i, k := range keys { + mergeArray[i] = map[string]interface{}{ + "_key": k, + } + } + return mergeArray, nil + } + if keys == nil { + mergeArray := make([]map[string]interface{}, len(revs)) + for i, r := range revs { + mergeArray[i] = map[string]interface{}{ + "_rev": r, + } + } + return mergeArray, nil + } + if len(keys) != len(revs) { + return nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("#keys must be equal to #revs, got %d, %d", len(keys), len(revs))}) + } + mergeArray := make([]map[string]interface{}, len(keys)) + for i, k := range keys { + mergeArray[i] = map[string]interface{}{ + "_key": k, + "_rev": revs[i], + } + } + return mergeArray, nil + +} + +// parseResponseArray parses an array response in the given response +func parseResponseArray(resp Response, count int, cs contextSettings, results interface{}) (DocumentMetaSlice, ErrorSlice, error) { + resps, err := resp.ParseArrayBody() + if err != nil { + return nil, nil, WithStack(err) + } + metas := make(DocumentMetaSlice, count) + errs := make(ErrorSlice, count) + returnOldVal := reflect.ValueOf(cs.ReturnOld) + returnNewVal := reflect.ValueOf(cs.ReturnNew) + resultsVal := reflect.ValueOf(results) + for i := 0; i < count; i++ { + resp := resps[i] + var meta DocumentMeta + if err := resp.CheckStatus(200, 201, 202); err != nil { + errs[i] = err + } else { + if err := resp.ParseBody("", &meta); err != nil { + errs[i] = err + } else { + metas[i] = meta + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + returnOldEntryVal := returnOldVal.Index(i).Addr() + if err := resp.ParseBody("old", returnOldEntryVal.Interface()); err != nil { + errs[i] = err + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + returnNewEntryVal := returnNewVal.Index(i).Addr() + if err := resp.ParseBody("new", returnNewEntryVal.Interface()); err != nil { + errs[i] = err + } + } + } + if results != nil { + // Parse compare result document + resultsEntryVal := resultsVal.Index(i).Addr() + if err := resp.ParseBody("", resultsEntryVal.Interface()); err != nil { + errs[i] = err + } + } + } + } + return metas, errs, nil +} diff --git a/vendor/github.com/arangodb/go-driver/collection_documents.go b/vendor/github.com/arangodb/go-driver/collection_documents.go new file mode 100644 index 00000000000..e1212feff8c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection_documents.go @@ -0,0 +1,178 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// CollectionDocuments provides access to the documents in a single collection. +type CollectionDocuments interface { + // DocumentExists checks if a document with given key exists in the collection. + DocumentExists(ctx context.Context, key string) (bool, error) + + // ReadDocument reads a single document with given key from the collection. + // The document data is stored into result, the document meta data is returned. + // If no document exists with given key, a NotFoundError is returned. + ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) + + // ReadDocuments reads multiple documents with given keys from the collection. + // The documents data is stored into elements of the given results slice, + // the documents meta data is returned. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // CreateDocument creates a single document in the collection. + // The document data is loaded from the given document, the document meta data is returned. + // If the document data already contains a `_key` field, this will be used as key of the new document, + // otherwise a unique key is created. + // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) + + // CreateDocuments creates multiple documents in the collection. + // The document data is loaded from the given documents slice, the documents meta data is returned. + // If a documents element already contains a `_key` field, this will be used as key of the new document, + // otherwise a unique key is created. + // If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, + // a ConflictError is returned in its index in the errors slice. + // To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be + // a slice with the same number of entries as the `documents` slice. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If the create request itself fails or one of the arguments is invalid, an error is returned. + CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // UpdateDocument updates a single document with given key in the collection. + // The document meta data is returned. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) + + // UpdateDocuments updates multiple document with given keys in the collection. + // The updates are loaded from the given updates slice, the documents meta data are returned. + // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + // If keys is nil, each element in the updates slice must contain a `_key` field. + UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. + // The document meta data is returned. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) + + // ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. + // The replacements are loaded from the given documents slice, the documents meta data are returned. + // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + // If keys is nil, each element in the documents slice must contain a `_key` field. + ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // RemoveDocument removes a single document with given key from the collection. + // The document meta data is returned. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) + + // RemoveDocuments removes multiple documents with given keys from the collection. + // The document meta data are returned. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) + + // ImportDocuments imports one or more documents into the collection. + // The document data is loaded from the given documents argument, statistics are returned. + // The documents argument can be one of the following: + // - An array of structs: All structs will be imported as individual documents. + // - An array of maps: All maps will be imported as individual documents. + // To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. + // To return details about documents that could not be imported, prepare a context with `WithImportDetails`. + ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) +} + +// ImportDocumentOptions holds optional options that control the import document process. +type ImportDocumentOptions struct { + // FromPrefix is an optional prefix for the values in _from attributes. If specified, the value is automatically + // prepended to each _from input value. This allows specifying just the keys for _from. + FromPrefix string `json:"fromPrefix,omitempty"` + // ToPrefix is an optional prefix for the values in _to attributes. If specified, the value is automatically + // prepended to each _to input value. This allows specifying just the keys for _to. + ToPrefix string `json:"toPrefix,omitempty"` + // Overwrite is a flag that if set, then all data in the collection will be removed prior to the import. + // Note that any existing index definitions will be preseved. + Overwrite bool `json:"overwrite,omitempty"` + // OnDuplicate controls what action is carried out in case of a unique key constraint violation. + // Possible values are: + // - ImportOnDuplicateError + // - ImportOnDuplicateUpdate + // - ImportOnDuplicateReplace + // - ImportOnDuplicateIgnore + OnDuplicate ImportOnDuplicate `json:"onDuplicate,omitempty"` + // Complete is a flag that if set, will make the whole import fail if any error occurs. + // Otherwise the import will continue even if some documents cannot be imported. + Complete bool `json:"complete,omitempty"` +} + +// ImportOnDuplicate is a type to control what action is carried out in case of a unique key constraint violation. +type ImportOnDuplicate string + +const ( + // ImportOnDuplicateError will not import the current document because of the unique key constraint violation. + // This is the default setting. + ImportOnDuplicateError = ImportOnDuplicate("error") + // ImportOnDuplicateUpdate will update an existing document in the database with the data specified in the request. + // Attributes of the existing document that are not present in the request will be preserved. + ImportOnDuplicateUpdate = ImportOnDuplicate("update") + // ImportOnDuplicateReplace will replace an existing document in the database with the data specified in the request. + ImportOnDuplicateReplace = ImportOnDuplicate("replace") + // ImportOnDuplicateIgnore will not update an existing document and simply ignore the error caused by a unique key constraint violation. + ImportOnDuplicateIgnore = ImportOnDuplicate("ignore") +) + +// ImportDocumentStatistics holds statistics of an import action. +type ImportDocumentStatistics struct { + // Created holds the number of documents imported. + Created int64 `json:"created,omitempty"` + // Errors holds the number of documents that were not imported due to an error. + Errors int64 `json:"errors,omitempty"` + // Empty holds the number of empty lines found in the input (will only contain a value greater zero for types documents or auto). + Empty int64 `json:"empty,omitempty"` + // Updated holds the number of updated/replaced documents (in case onDuplicate was set to either update or replace). + Updated int64 `json:"updated,omitempty"` + // Ignored holds the number of failed but ignored insert operations (in case onDuplicate was set to ignore). + Ignored int64 `json:"ignored,omitempty"` + // if query parameter details is set to true, the result will contain a details attribute which is an array + // with more detailed information about which documents could not be inserted. + Details []string + + ArangoError +} diff --git a/vendor/github.com/arangodb/go-driver/collection_impl.go b/vendor/github.com/arangodb/go-driver/collection_impl.go new file mode 100644 index 00000000000..c0b913ead1c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection_impl.go @@ -0,0 +1,356 @@ +// +// DISCLAIMER +// +// Copyright 2017-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "encoding/json" + "path" +) + +// newCollection creates a new Collection implementation. +func newCollection(name string, db *database) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &collection{ + name: name, + db: db, + conn: db.conn, + }, nil +} + +type collection struct { + name string + db *database + conn Connection +} + +// relPath creates the relative path to this collection (`_db//_api//`) +func (c *collection) relPath(apiName string) string { + escapedName := pathEscape(c.name) + return path.Join(c.db.relPath(), "_api", apiName, escapedName) +} + +// Name returns the name of the collection. +func (c *collection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *collection) Database() Database { + return c.db +} + +// Status fetches the current status of the collection. +func (c *collection) Status(ctx context.Context) (CollectionStatus, error) { + req, err := c.conn.NewRequest("GET", c.relPath("collection")) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionStatus(0), WithStack(err) + } + var data CollectionInfo + if err := resp.ParseBody("", &data); err != nil { + return CollectionStatus(0), WithStack(err) + } + return data.Status, nil +} + +// Count fetches the number of document in the collection. +func (c *collection) Count(ctx context.Context) (int64, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "count")) + if err != nil { + return 0, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return 0, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return 0, WithStack(err) + } + var data struct { + Count int64 `json:"count,omitempty"` + } + if err := resp.ParseBody("", &data); err != nil { + return 0, WithStack(err) + } + return data.Count, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *collection) Statistics(ctx context.Context) (CollectionStatistics, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "figures")) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionStatistics{}, WithStack(err) + } + var data CollectionStatistics + if err := resp.ParseBody("", &data); err != nil { + return CollectionStatistics{}, WithStack(err) + } + return data, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *collection) Revision(ctx context.Context) (string, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "revision")) + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return "", WithStack(err) + } + var data CollectionProperties + if err := resp.ParseBody("", &data); err != nil { + return "", WithStack(err) + } + return data.Revision, nil +} + +// Properties fetches extended information about the collection. +func (c *collection) Properties(ctx context.Context) (CollectionProperties, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "properties")) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionProperties{}, WithStack(err) + } + + var data struct { + CollectionProperties + ReplicationFactorV2 replicationFactor `json:"replicationFactor,omitempty"` + } + + if err := resp.ParseBody("", &data); err != nil { + return CollectionProperties{}, WithStack(err) + } + data.ReplicationFactor = int(data.ReplicationFactorV2) + + return data.CollectionProperties, nil +} + +// SetProperties changes properties of the collection. +func (c *collection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "properties")) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options.asInternal()); err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Shards fetches shards information of the collection. +func (c *collection) Shards(ctx context.Context, details bool) (CollectionShards, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "shards")) + if err != nil { + return CollectionShards{}, WithStack(err) + } + if details { + req.SetQuery("details", "true") + } + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionShards{}, WithStack(err) + } + + if err := resp.CheckStatus(200); err != nil { + return CollectionShards{}, WithStack(err) + } + + var data struct { + CollectionShards + ReplicationFactorV2 replicationFactor `json:"replicationFactor,omitempty"` + } + + if err := resp.ParseBody("", &data); err != nil { + return CollectionShards{}, WithStack(err) + } + data.ReplicationFactor = int(data.ReplicationFactorV2) + + return data.CollectionShards, nil +} + +// Load the collection into memory. +func (c *collection) Load(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "load")) + if err != nil { + return WithStack(err) + } + opts := struct { + Count bool `json:"count"` + }{ + Count: false, + } + if _, err := req.SetBody(opts); err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// UnLoad the collection from memory. +func (c *collection) Unload(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "unload")) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil + +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *collection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath("collection")) + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *collection) Truncate(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "truncate")) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +type setCollectionPropertiesOptionsInternal struct { + WaitForSync *bool `json:"waitForSync,omitempty"` + JournalSize int64 `json:"journalSize,omitempty"` + ReplicationFactor replicationFactor `json:"replicationFactor,omitempty"` + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + ComputedValues []ComputedValue `json:"computedValues,omitempty"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + // Available from 3.6 arangod version. + WriteConcern int `json:"writeConcern,omitempty"` + Schema *CollectionSchemaOptions `json:"schema,omitempty"` +} + +func (p *SetCollectionPropertiesOptions) asInternal() setCollectionPropertiesOptionsInternal { + return setCollectionPropertiesOptionsInternal{ + WaitForSync: p.WaitForSync, + JournalSize: p.JournalSize, + CacheEnabled: p.CacheEnabled, + ComputedValues: p.ComputedValues, + ReplicationFactor: replicationFactor(p.ReplicationFactor), + MinReplicationFactor: p.MinReplicationFactor, + WriteConcern: p.WriteConcern, + Schema: p.Schema, + } +} + +func (p *SetCollectionPropertiesOptions) fromInternal(i *setCollectionPropertiesOptionsInternal) { + p.WaitForSync = i.WaitForSync + p.JournalSize = i.JournalSize + p.CacheEnabled = i.CacheEnabled + p.ReplicationFactor = int(i.ReplicationFactor) + p.MinReplicationFactor = i.MinReplicationFactor + p.WriteConcern = i.WriteConcern + p.Schema = i.Schema +} + +// MarshalJSON converts SetCollectionPropertiesOptions into json +func (p *SetCollectionPropertiesOptions) MarshalJSON() ([]byte, error) { + return json.Marshal(p.asInternal()) +} + +// UnmarshalJSON loads SetCollectionPropertiesOptions from json +func (p *SetCollectionPropertiesOptions) UnmarshalJSON(d []byte) error { + var internal setCollectionPropertiesOptionsInternal + if err := json.Unmarshal(d, &internal); err != nil { + return err + } + + p.fromInternal(&internal) + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/collection_indexes.go b/vendor/github.com/arangodb/go-driver/collection_indexes.go new file mode 100644 index 00000000000..4a0b15017e1 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection_indexes.go @@ -0,0 +1,302 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// CollectionIndexes provides access to the indexes in a single collection. +type CollectionIndexes interface { + // Index opens a connection to an existing index within the collection. + // If no index with given name exists, an NotFoundError is returned. + Index(ctx context.Context, name string) (Index, error) + + // IndexExists returns true if an index with given name exists within the collection. + IndexExists(ctx context.Context, name string) (bool, error) + + // Indexes returns a list of all indexes in the collection. + Indexes(ctx context.Context) ([]Index, error) + + // Deprecated: since 3.10 version. Use ArangoSearch view instead. + // EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. + // Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) + + // EnsureGeoIndex creates a hash index in the collection, if it does not already exist. + // Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, + // then a geo-spatial index on all documents is created using location as path to the coordinates. + // The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) + // and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. + // If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created + // using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the + // attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) + + // EnsureHashIndex creates a hash index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) + + // EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) + + // EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) + + // EnsureTTLIndex creates a TLL collection, if it does not already exist. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error) + + // EnsureZKDIndex creates a ZKD multi-dimensional index for the collection, if it does not already exist. + // Note that zkd indexes are an experimental feature in ArangoDB 3.9. + EnsureZKDIndex(ctx context.Context, fields []string, options *EnsureZKDIndexOptions) (Index, bool, error) + + // EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. + // Available in ArangoDB 3.10 and later. + EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (Index, bool, error) +} + +// Deprecated: since 3.10 version. Use ArangoSearch view instead. +// EnsureFullTextIndexOptions contains specific options for creating a full text index. +type EnsureFullTextIndexOptions struct { + // MinLength is the minimum character length of words to index. Will default to a server-defined + // value if unspecified (0). It is thus recommended to set this value explicitly when creating the index. + MinLength int + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool +} + +// EnsureGeoIndexOptions contains specific options for creating a geo index. +type EnsureGeoIndexOptions struct { + // If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array + // is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions + GeoJSON bool + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool + // LegacyPolygons determines if the to-be-created index should use legacy polygons or not. + // It is relevant for those that have geoJson set to true only. + // Old geo indexes from versions from below 3.10 will always implicitly have the legacyPolygons option set to true. + // Newly generated geo indexes from 3.10 on will have the legacyPolygons option by default set to false, + // however, it can still be explicitly overwritten with true to create a legacy index but is not recommended. + LegacyPolygons bool +} + +// EnsureHashIndexOptions contains specific options for creating a hash index. +// Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7 +type EnsureHashIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool + // If true, de-duplication of array-values, before being added to the index, will be turned off. + // This flag requires ArangoDB 3.2. + // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") + NoDeduplicate bool + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool +} + +// EnsurePersistentIndexOptions contains specific options for creating a persistent index. +// Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7 +type EnsurePersistentIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool + // If true, de-duplication of array-values, before being added to the index, will be turned off. + // This flag requires ArangoDB 3.2. + // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") + NoDeduplicate bool + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool + // CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default. + CacheEnabled bool + // StoreValues if true, then the additional attributes will be included. + // These additional attributes cannot be used for index lookups or sorts, but they can be used for projections. + // There must be no overlap of attribute paths between `fields` and `storedValues`. The maximum number of values is 32. + StoredValues []string +} + +// EnsureSkipListIndexOptions contains specific options for creating a skip-list index. +// Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7 +type EnsureSkipListIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool + // If true, de-duplication of array-values, before being added to the index, will be turned off. + // This flag requires ArangoDB 3.2. + // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") + NoDeduplicate bool + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool +} + +// EnsureTTLIndexOptions provides specific options for creating a TTL index +type EnsureTTLIndexOptions struct { + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates *bool +} + +// EnsureZKDIndexOptions provides specific options for creating a ZKD index +type EnsureZKDIndexOptions struct { + // If true, then create a unique index. + Unique bool + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground bool + // Name optional user defined name used for hints in AQL queries + Name string + // fieldValueTypes is required and the only allowed value is "double". Future extensions of the index will allow other types. + FieldValueTypes string + + // If true, then create a sparse index. + // TODO: The sparse property is not supported yet + // Sparse bool +} + +// InvertedIndexOptions provides specific options for creating an inverted index +// Available since ArangoDB 3.10 +type InvertedIndexOptions struct { + // Name optional user defined name used for hints in AQL queries + Name string `json:"name"` + // InBackground This attribute can be set to true to create the index in the background, + // not write-locking the underlying collection for as long as if the index is built in the foreground. + // The default value is false. + InBackground bool `json:"inBackground,omitempty"` + IsNewlyCreated bool `json:"isNewlyCreated,omitempty"` + + // The number of threads to use for indexing the fields. Default: 2 + Parallelism int `json:"parallelism,omitempty"` + // PrimarySort You can define a primary sort order to enable an AQL optimization. + // If a query iterates over all documents of a collection, wants to sort them by attribute values, and the (left-most) fields to sort by, + // as well as their sorting direction, match with the primarySort definition, then the SORT operation is optimized away. + PrimarySort InvertedIndexPrimarySort `json:"primarySort,omitempty"` + // StoredValues The optional storedValues attribute can contain an array of paths to additional attributes to store in the index. + // These additional attributes cannot be used for index lookups or for sorting, but they can be used for projections. + // This allows an index to fully cover more queries and avoid extra document lookups. + StoredValues []StoredValue `json:"storedValues,omitempty"` + // Analyzer The name of an Analyzer to use by default. This Analyzer is applied to the values of the indexed fields for which you don’t define Analyzers explicitly. + Analyzer string `json:"analyzer,omitempty"` + // Features list of analyzer features, default [] + Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` + // IncludeAllFields If set to true, all fields of this element will be indexed. Defaults to false. + IncludeAllFields bool `json:"includeAllFields,omitempty"` + // TrackListPositions If set to true, values in a listed are treated as separate values. Defaults to false. + TrackListPositions bool `json:"trackListPositions,omitempty"` + // This option only applies if you use the inverted index in a search-alias Views. + // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values as the default. + // If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. + // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. + // Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. + // You cannot use an array expansion if searchField is enabled. + SearchField bool `json:"searchField,omitempty"` + // Fields contains the properties for individual fields of the element. + // The key of the map are field names. + Fields []InvertedIndexField `json:"fields,omitempty"` + // ConsolidationIntervalMsec Wait at least this many milliseconds between applying ‘consolidationPolicy’ to consolidate View data store + // and possibly release space on the filesystem (default: 1000, to disable use: 0). + ConsolidationIntervalMsec *int64 `json:"consolidationIntervalMsec,omitempty"` + // CommitIntervalMsec Wait at least this many milliseconds between committing View data store changes and making + // documents visible to queries (default: 1000, to disable use: 0). + CommitIntervalMsec *int64 `json:"commitIntervalMsec,omitempty"` + // CleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data directory + // (default: 2, to disable use: 0). + CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` + // ConsolidationPolicy The consolidation policy to apply for selecting which segments should be merged (default: {}). + ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"` + // WriteBufferIdle Maximum number of writers (segments) cached in the pool (default: 64, use 0 to disable) + WriteBufferIdle *int64 `json:"writebufferIdle,omitempty"` + // WriteBufferActive Maximum number of concurrent active writers (segments) that perform a transaction. + // Other writers (segments) wait till current active writers (segments) finish (default: 0, use 0 to disable) + WriteBufferActive *int64 `json:"writebufferActive,omitempty"` + // WriteBufferSizeMax Maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. + // 0 value turns off this limit for any writer (buffer) and data will be flushed periodically based on the value defined for the flush thread (ArangoDB server startup option). + // 0 value should be used carefully due to high potential memory consumption (default: 33554432, use 0 to disable) + WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` +} + +// InvertedIndexPrimarySort defines compression and list of fields to be sorted. +type InvertedIndexPrimarySort struct { + Fields []ArangoSearchPrimarySortEntry `json:"fields,omitempty"` + // Compression optional + Compression PrimarySortCompression `json:"compression,omitempty"` +} + +// InvertedIndexField contains configuration for indexing of the field +type InvertedIndexField struct { + // Name An attribute path. The . character denotes sub-attributes. + Name string `json:"name"` + // Analyzer indicating the name of an analyzer instance + // Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer. + Analyzer string `json:"analyzer,omitempty"` + // IncludeAllFields This option only applies if you use the inverted index in a search-alias Views. + // If set to true, then all sub-attributes of this field are indexed, excluding any sub-attributes that are configured separately by other elements in the fields array (and their sub-attributes). The analyzer and features properties apply to the sub-attributes. + // If set to false, then sub-attributes are ignored. The default value is defined by the top-level includeAllFields option, or false if not set. + IncludeAllFields bool `json:"includeAllFields,omitempty"` + // SearchField This option only applies if you use the inverted index in a search-alias Views. + // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. + // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled. + // Default: the value defined by the top-level searchField option, or false if not set. + SearchField bool `json:"searchField,omitempty"` + // TrackListPositions This option only applies if you use the inverted index in a search-alias Views. + // If set to true, then track the value position in arrays for array values. For example, when querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to specify the array element, e.g. doc.attr[1] == "valueY". + // If set to false, all values in an array are treated as equal alternatives. You don’t specify an array element in queries, e.g. doc.attr == "valueY", and all elements are searched for a match. + // Default: the value defined by the top-level trackListPositions option, or false if not set. + TrackListPositions bool `json:"trackListPositions,omitempty"` + // A list of Analyzer features to use for this field. They define what features are enabled for the analyzer + Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` + // Nested - Index the specified sub-objects that are stored in an array. + // Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values. + // For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them. + // Enterprise-only feature + Nested []InvertedIndexField `json:"nested,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/collection_indexes_impl.go b/vendor/github.com/arangodb/go-driver/collection_indexes_impl.go new file mode 100644 index 00000000000..fb29a6dcda9 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/collection_indexes_impl.go @@ -0,0 +1,388 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "path" +) + +type indexData struct { + ID string `json:"id,omitempty"` + Type string `json:"type"` + Fields []string `json:"fields,omitempty"` + Unique *bool `json:"unique,omitempty"` + Deduplicate *bool `json:"deduplicate,omitempty"` + Sparse *bool `json:"sparse,omitempty"` + GeoJSON *bool `json:"geoJson,omitempty"` + InBackground *bool `json:"inBackground,omitempty"` + Estimates *bool `json:"estimates,omitempty"` + MaxNumCoverCells int `json:"maxNumCoverCells,omitempty"` + MinLength int `json:"minLength,omitempty"` + ExpireAfter int `json:"expireAfter,omitempty"` + Name string `json:"name,omitempty"` + FieldValueTypes string `json:"fieldValueTypes,omitempty"` + IsNewlyCreated *bool `json:"isNewlyCreated,omitempty"` + SelectivityEstimate float64 `json:"selectivityEstimate,omitempty"` + BestIndexedLevel int `json:"bestIndexedLevel,omitempty"` + WorstIndexedLevel int `json:"worstIndexedLevel,omitempty"` + LegacyPolygons *bool `json:"legacyPolygons,omitempty"` + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + StoredValues []string `json:"storedValues,omitempty"` + + ArangoError `json:",inline"` +} + +type indexListResponse struct { + Indexes []json.RawMessage `json:"indexes,omitempty"` + ArangoError +} + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *collection) Index(ctx context.Context, name string) (Index, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("index"), name)) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data indexData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + idx, err := newIndex(data, c) + if err != nil { + return nil, WithStack(err) + } + return idx, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *collection) IndexExists(ctx context.Context, name string) (bool, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("index"), name)) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +// Indexes returns a list of all indexes in the collection. +func (c *collection) Indexes(ctx context.Context) ([]Index, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.db.relPath(), "_api", "index")) + if err != nil { + return nil, WithStack(err) + } + req.SetQuery("collection", c.name) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data indexListResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Index, 0, len(data.Indexes)) + for _, x := range data.Indexes { + idx, err := newIndexFromMap(x, c) + if err != nil { + return nil, WithStack(err) + } + result = append(result, idx) + } + return result, nil +} + +// Deprecated: since 3.10 version. Use ArangoSearch view instead. +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(FullTextIndex), + Fields: fields, + } + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.MinLength = options.MinLength + input.Estimates = options.Estimates + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(GeoIndex), + Fields: fields, + } + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.GeoJSON = &options.GeoJSON + input.Estimates = options.Estimates + if options.LegacyPolygons { + input.LegacyPolygons = &options.LegacyPolygons + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(HashIndex), + Fields: fields, + } + off := false + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.Unique = &options.Unique + input.Sparse = &options.Sparse + input.Estimates = options.Estimates + if options.NoDeduplicate { + input.Deduplicate = &off + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(PersistentIndex), + Fields: fields, + } + off := false + on := true + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.Unique = &options.Unique + input.Sparse = &options.Sparse + input.Estimates = options.Estimates + if options.NoDeduplicate { + input.Deduplicate = &off + } + if options.CacheEnabled { + input.CacheEnabled = &on + } + if options.StoredValues != nil { + input.StoredValues = options.StoredValues + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(SkipListIndex), + Fields: fields, + } + off := false + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.Unique = &options.Unique + input.Sparse = &options.Sparse + input.Estimates = options.Estimates + if options.NoDeduplicate { + input.Deduplicate = &off + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureTTLIndex creates a TLL collection, if it does not already exist. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(TTLIndex), + Fields: []string{field}, + ExpireAfter: expireAfter, + } + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.Estimates = options.Estimates + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureZKDIndex creates a ZKD index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureZKDIndex(ctx context.Context, fields []string, options *EnsureZKDIndexOptions) (Index, bool, error) { + input := indexData{ + Type: string(ZKDIndex), + Fields: fields, + // fieldValueTypes is required and the only allowed value is "double". Future extensions of the index will allow other types. + FieldValueTypes: "double", + } + if options != nil { + input.InBackground = &options.InBackground + input.Name = options.Name + input.Unique = &options.Unique + //input.Sparse = &options.Sparse + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +type invertedIndexData struct { + InvertedIndexOptions + Type string `json:"type"` + ID string `json:"id,omitempty"` + + ArangoError `json:",inline"` +} + +// EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. +// Available in ArangoDB 3.10 and later. +func (c *collection) EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (Index, bool, error) { + req, err := c.conn.NewRequest("POST", path.Join(c.db.relPath(), "_api/index")) + if err != nil { + return nil, false, WithStack(err) + } + if options == nil { + options = &InvertedIndexOptions{} + } + req.SetQuery("collection", c.name) + if _, err := req.SetBody(invertedIndexData{InvertedIndexOptions: *options, Type: string(InvertedIndex)}); err != nil { + return nil, false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, false, WithStack(err) + } + if err := resp.CheckStatus(200, 201); err != nil { + return nil, false, WithStack(err) + } + created := resp.StatusCode() == 201 + + var data invertedIndexData + if err := resp.ParseBody("", &data); err != nil { + return nil, false, WithStack(err) + } + idx, err := newInvertedIndex(data, c) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// ensureIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) ensureIndex(ctx context.Context, options indexData) (Index, bool, error) { + req, err := c.conn.NewRequest("POST", path.Join(c.db.relPath(), "_api/index")) + if err != nil { + return nil, false, WithStack(err) + } + req.SetQuery("collection", c.name) + if _, err := req.SetBody(options); err != nil { + return nil, false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, false, WithStack(err) + } + if err := resp.CheckStatus(200, 201); err != nil { + return nil, false, WithStack(err) + } + created := resp.StatusCode() == 201 + var data indexData + if err := resp.ParseBody("", &data); err != nil { + return nil, false, WithStack(err) + } + idx, err := newIndex(data, c) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} diff --git a/vendor/github.com/arangodb/go-driver/connection.go b/vendor/github.com/arangodb/go-driver/connection.go new file mode 100644 index 00000000000..2f625505f8a --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/connection.go @@ -0,0 +1,172 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "errors" + + velocypack "github.com/arangodb/go-velocypack" +) + +// Connection is a connenction to a database server using a specific protocol. +type Connection interface { + // NewRequest creates a new request with given method and path. + NewRequest(method, path string) (Request, error) + + // Do performs a given request, returning its response. + Do(ctx context.Context, req Request) (Response, error) + + // Unmarshal unmarshals the given raw object into the given result interface. + Unmarshal(data RawObject, result interface{}) error + + // Endpoints returns the endpoints used by this connection. + Endpoints() []string + + // UpdateEndpoints reconfigures the connection to use the given endpoints. + UpdateEndpoints(endpoints []string) error + + // Configure the authentication used for this connection. + SetAuthentication(Authentication) (Connection, error) + + // Protocols returns all protocols used by this connection. + Protocols() ProtocolSet +} + +// Request represents the input to a request on the server. +type Request interface { + // SetQuery sets a single query argument of the request. + // Any existing query argument with the same key is overwritten. + SetQuery(key, value string) Request + // SetBody sets the content of the request. + // The protocol of the connection determines what kinds of marshalling is taking place. + // When multiple bodies are given, they are merged, with fields in the first document prevailing. + SetBody(body ...interface{}) (Request, error) + // SetBodyArray sets the content of the request as an array. + // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). + // The merge is NOT recursive. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (Request, error) + // SetBodyImportArray sets the content of the request as an array formatted for importing documents. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyImportArray(bodyArray interface{}) (Request, error) + // SetHeader sets a single header arguments of the request. + // Any existing header argument with the same key is overwritten. + SetHeader(key, value string) Request + // Written returns true as soon as this request has been written completely to the network. + // This does not guarantee that the server has received or processed the request. + Written() bool + // Clone creates a new request containing the same data as this request + Clone() Request + // Path returns the Request path + Path() string + // Method returns the Request method + Method() string +} + +type BodyBuilder interface { + // GetBody returns data which are generated by the body builder + GetBody() []byte + // SetBody sets the content of the request. + // The protocol of the connection determines what kinds of marshalling is taking place. + // When multiple bodies are given, they are merged, with fields in the first document prevailing. + SetBody(body ...interface{}) error + // SetBodyArray sets the content of the request as an array. + // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). + // The merge is NOT recursive. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) error + // SetBodyImportArray sets the content of the request as an array formatted for importing documents. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyImportArray(bodyArray interface{}) error + // GetContentType returns the type of the data in a body + GetContentType() string + // Clone creates new Body builder + Clone() BodyBuilder +} + +// Response represents the response from the server on a given request. +type Response interface { + // StatusCode returns an HTTP compatible status code of the response. + StatusCode() int + // Endpoint returns the endpoint that handled the request. + Endpoint() string + // CheckStatus checks if the status of the response equals to one of the given status codes. + // If so, nil is returned. + // If not, an attempt is made to parse an error response in the body and an error is returned. + CheckStatus(validStatusCodes ...int) error + // Header returns the value of a response header with given key. + // If no such header is found, an empty string is returned. + // On nested Response's, this function will always return an empty string. + Header(key string) string + // ParseBody performs protocol specific unmarshalling of the response data into the given result. + // If the given field is non-empty, the contents of that field will be parsed into the given result. + // This can only be used for requests that return a single object. + ParseBody(field string, result interface{}) error + // ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. + // This can only be used for requests that return an array of objects. + ParseArrayBody() ([]Response, error) +} + +// RawObject is a raw encoded object. +// Connection implementations must be able to unmarshal *RawObject into Go objects. +type RawObject []byte + +// MarshalJSON returns *r as the JSON encoding of r. +func (r *RawObject) MarshalJSON() ([]byte, error) { + return *r, nil +} + +// UnmarshalJSON sets *r to a copy of data. +func (r *RawObject) UnmarshalJSON(data []byte) error { + if r == nil { + return errors.New("RawObject: UnmarshalJSON on nil pointer") + } + *r = append((*r)[0:0], data...) + return nil +} + +// Ensure RawObject implements json.Marshaler & json.Unmarshaler +var _ json.Marshaler = (*RawObject)(nil) +var _ json.Unmarshaler = (*RawObject)(nil) + +// MarshalVPack returns m as the Velocypack encoding of m. +func (r RawObject) MarshalVPack() (velocypack.Slice, error) { + if r == nil { + return velocypack.NullSlice(), nil + } + return velocypack.Slice(r), nil +} + +// UnmarshalVPack sets *m to a copy of data. +func (r *RawObject) UnmarshalVPack(data velocypack.Slice) error { + if r == nil { + return errors.New("velocypack.RawSlice: UnmarshalVPack on nil pointer") + } + *r = append((*r)[0:0], data...) + return nil +} + +var _ velocypack.Marshaler = (*RawObject)(nil) +var _ velocypack.Unmarshaler = (*RawObject)(nil) diff --git a/vendor/github.com/arangodb/go-driver/content_type.go b/vendor/github.com/arangodb/go-driver/content_type.go new file mode 100644 index 00000000000..fe4b0ada776 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/content_type.go @@ -0,0 +1,46 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "fmt" + +// ContentType identifies the type of encoding to use for the data. +type ContentType int + +const ( + // ContentTypeJSON encodes data as json + ContentTypeJSON ContentType = iota + // ContentTypeVelocypack encodes data as Velocypack + ContentTypeVelocypack +) + +func (ct ContentType) String() string { + switch ct { + case ContentTypeJSON: + return "application/json" + case ContentTypeVelocypack: + return "application/x-velocypack" + default: + panic(fmt.Sprintf("Unknown content type %d", int(ct))) + } +} diff --git a/vendor/github.com/arangodb/go-driver/context.go b/vendor/github.com/arangodb/go-driver/context.go new file mode 100644 index 00000000000..8789828e719 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/context.go @@ -0,0 +1,543 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "fmt" + "reflect" + "strconv" + "time" + + "github.com/arangodb/go-driver/util" +) + +// ContextKey is an internal type used for holding values in a `context.Context` +// do not use!. +type ContextKey string + +const ( + keyRevision ContextKey = "arangodb-revision" + keyRevisions ContextKey = "arangodb-revisions" + keyReturnNew ContextKey = "arangodb-returnNew" + keyReturnOld ContextKey = "arangodb-returnOld" + keySilent ContextKey = "arangodb-silent" + keyWaitForSync ContextKey = "arangodb-waitForSync" + keyDetails ContextKey = "arangodb-details" + keyKeepNull ContextKey = "arangodb-keepNull" + keyMergeObjects ContextKey = "arangodb-mergeObjects" + keyRawResponse ContextKey = "arangodb-rawResponse" + keyImportDetails ContextKey = "arangodb-importDetails" + keyResponse ContextKey = "arangodb-response" + keyEndpoint ContextKey = "arangodb-endpoint" + keyIsRestore ContextKey = "arangodb-isRestore" + keyIsSystem ContextKey = "arangodb-isSystem" + keyIgnoreRevs ContextKey = "arangodb-ignoreRevs" + keyEnforceReplicationFactor ContextKey = "arangodb-enforceReplicationFactor" + keyConfigured ContextKey = "arangodb-configured" + keyFollowLeaderRedirect ContextKey = "arangodb-followLeaderRedirect" + keyDBServerID ContextKey = "arangodb-dbserverID" + keyBatchID ContextKey = "arangodb-batchID" + keyJobIDResponse ContextKey = "arangodb-jobIDResponse" + keyAllowDirtyReads ContextKey = "arangodb-allowDirtyReads" + keyTransactionID ContextKey = "arangodb-transactionID" + keyOverwriteMode ContextKey = "arangodb-overwriteMode" + keyOverwrite ContextKey = "arangodb-overwrite" + keyUseQueueTimeout ContextKey = "arangodb-use-queue-timeout" + keyMaxQueueTime ContextKey = "arangodb-max-queue-time-seconds" +) + +type OverwriteMode string + +const ( + OverwriteModeIgnore OverwriteMode = "ignore" + OverwriteModeReplace OverwriteMode = "replace" + OverwriteModeUpdate OverwriteMode = "update" + OverwriteModeConflict OverwriteMode = "conflict" +) + +// WithRevision is used to configure a context to make document +// functions specify an explicit revision of the document using an `If-Match` condition. +func WithRevision(parent context.Context, revision string) context.Context { + return context.WithValue(contextOrBackground(parent), keyRevision, revision) +} + +// WithRevisions is used to configure a context to make multi-document +// functions specify explicit revisions of the documents. +func WithRevisions(parent context.Context, revisions []string) context.Context { + return context.WithValue(contextOrBackground(parent), keyRevisions, revisions) +} + +// WithReturnNew is used to configure a context to make create, update & replace document +// functions return the new document into the given result. +func WithReturnNew(parent context.Context, result interface{}) context.Context { + return context.WithValue(contextOrBackground(parent), keyReturnNew, result) +} + +// WithReturnOld is used to configure a context to make update & replace document +// functions return the old document into the given result. +func WithReturnOld(parent context.Context, result interface{}) context.Context { + return context.WithValue(contextOrBackground(parent), keyReturnOld, result) +} + +// WithDetails is used to configure a context to make Client.Version return additional details. +// You can pass a single (optional) boolean. If that is set to false, you explicitly ask to not provide details. +func WithDetails(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyDetails, v) +} + +// WithEndpoint is used to configure a context that forces a request to be executed on a specific endpoint. +// If you specify an endpoint like this, failover is disabled. +// If you specify an unknown endpoint, an InvalidArgumentError is returned from requests. +func WithEndpoint(parent context.Context, endpoint string) context.Context { + endpoint = util.FixupEndpointURLScheme(endpoint) + return context.WithValue(contextOrBackground(parent), keyEndpoint, endpoint) +} + +// WithKeepNull is used to configure a context to make update functions keep null fields (value==true) +// or remove fields with null values (value==false). +func WithKeepNull(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyKeepNull, value) +} + +// WithMergeObjects is used to configure a context to make update functions merge objects present in both +// the existing document and the patch document (value==true) or overwrite objects in the existing document +// with objects found in the patch document (value==false) +func WithMergeObjects(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyMergeObjects, value) +} + +// WithSilent is used to configure a context to make functions return an empty result (silent==true), +// instead of a metadata result (silent==false, default). +// You can pass a single (optional) boolean. If that is set to false, you explicitly ask to return metadata result. +func WithSilent(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keySilent, v) +} + +// WithWaitForSync is used to configure a context to make modification +// functions wait until the data has been synced to disk (or not). +// You can pass a single (optional) boolean. If that is set to false, you explicitly do not wait for +// data to be synced to disk. +func WithWaitForSync(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyWaitForSync, v) +} + +// WithAllowDirtyReads is used in an active failover deployment to allow reads from the follower. +// You can pass a reference to a boolean that will set according to whether a potentially dirty read +// happened or not. nil is allowed. +// This is valid for document reads, aql queries, gharial vertex and edge reads. +// Since 3.10 This feature is available in the Enterprise Edition for cluster deployments as well +func WithAllowDirtyReads(parent context.Context, wasDirtyRead *bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyAllowDirtyReads, wasDirtyRead) +} + +// WithArangoQueueTimeout is used to enable Queue timeout on the server side. +// If WithArangoQueueTime is used then its value takes precedence in other case value of ctx.Deadline will be taken +func WithArangoQueueTimeout(parent context.Context, useQueueTimeout bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyUseQueueTimeout, useQueueTimeout) +} + +// WithArangoQueueTime defines max queue timeout on the server side. +func WithArangoQueueTime(parent context.Context, duration time.Duration) context.Context { + return context.WithValue(contextOrBackground(parent), keyMaxQueueTime, duration) +} + +// WithRawResponse is used to configure a context that will make all functions store the raw response into a +// buffer. +func WithRawResponse(parent context.Context, value *[]byte) context.Context { + return context.WithValue(contextOrBackground(parent), keyRawResponse, value) +} + +// WithResponse is used to configure a context that will make all functions store the response into the given value. +func WithResponse(parent context.Context, value *Response) context.Context { + return context.WithValue(contextOrBackground(parent), keyResponse, value) +} + +// WithImportDetails is used to configure a context that will make import document requests return +// details about documents that could not be imported. +func WithImportDetails(parent context.Context, value *[]string) context.Context { + return context.WithValue(contextOrBackground(parent), keyImportDetails, value) +} + +// WithIsRestore is used to configure a context to make insert functions use the "isRestore=" +// setting. +// Note: This function is intended for internal (replication) use. It is NOT intended to +// be used by normal client. This CAN screw up your database. +func WithIsRestore(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyIsRestore, value) +} + +// WithIsSystem is used to configure a context to make insert functions use the "isSystem=" +// setting. +func WithIsSystem(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyIsSystem, value) +} + +// WithIgnoreRevisions is used to configure a context to make modification +// functions ignore revisions in the update. +// Do not use in combination with WithRevision or WithRevisions. +func WithIgnoreRevisions(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyIgnoreRevs, v) +} + +// WithEnforceReplicationFactor is used to configure a context to make adding collections +// fail if the replication factor is too high (default or true) or +// silently accept (false). +func WithEnforceReplicationFactor(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyEnforceReplicationFactor, value) +} + +// WithConfigured is used to configure a context to return the configured value of +// a user grant instead of the effective grant. +func WithConfigured(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyConfigured, v) +} + +// WithFollowLeaderRedirect is used to configure a context to return turn on/off +// following redirection responses from the server when the request is answered by a follower. +// Default behavior is "on". +func WithFollowLeaderRedirect(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyFollowLeaderRedirect, value) +} + +// WithDBServerID is used to configure a context that includes an ID of a specific DBServer. +func WithDBServerID(parent context.Context, id string) context.Context { + return context.WithValue(contextOrBackground(parent), keyDBServerID, id) +} + +// WithBatchID is used to configure a context that includes an ID of a Batch. +// This is used in replication functions. +func WithBatchID(parent context.Context, id string) context.Context { + return context.WithValue(contextOrBackground(parent), keyBatchID, id) +} + +// WithJobIDResponse is used to configure a context that includes a reference to a JobID +// that is filled on a error-free response. +// This is used in cluster functions. +func WithJobIDResponse(parent context.Context, jobID *string) context.Context { + return context.WithValue(contextOrBackground(parent), keyJobIDResponse, jobID) +} + +// WithTransactionID is used to bind a request to a specific transaction +func WithTransactionID(parent context.Context, tid TransactionID) context.Context { + return context.WithValue(contextOrBackground(parent), keyTransactionID, tid) +} + +// WithOverwriteMode is used to configure a context to instruct how a document should be overwritten. +func WithOverwriteMode(parent context.Context, mode OverwriteMode) context.Context { + return context.WithValue(contextOrBackground(parent), keyOverwriteMode, mode) +} + +// WithOverwrite is used to configure a context to instruct if a document should be overwritten. +func WithOverwrite(parent context.Context) context.Context { + return context.WithValue(contextOrBackground(parent), keyOverwrite, true) +} + +type contextSettings struct { + Silent bool + WaitForSync bool + ReturnOld interface{} + ReturnNew interface{} + Revision string + Revisions []string + ImportDetails *[]string + IsRestore bool + IsSystem bool + AllowDirtyReads bool + DirtyReadFlag *bool + IgnoreRevs *bool + EnforceReplicationFactor *bool + Configured *bool + FollowLeaderRedirect *bool + DBServerID string + BatchID string + JobIDResponse *string + OverwriteMode OverwriteMode + Overwrite bool + QueueTimeout bool + MaxQueueTime time.Duration +} + +// loadContextResponseValue loads generic values from the response and puts it into variables specified +// via context values. +func loadContextResponseValues(cs contextSettings, resp Response) { + // Parse potential dirty read + if cs.DirtyReadFlag != nil { + if dirtyRead := resp.Header("X-Arango-Potential-Dirty-Read"); dirtyRead != "" { + *cs.DirtyReadFlag = true // The documentation does not say anything about the actual value (dirtyRead == "true") + } else { + *cs.DirtyReadFlag = false + } + } +} + +// setDirtyReadFlagIfRequired is a helper function that sets the bool reference for allowDirtyReads to the +// specified value, if required and reference is not nil. +func setDirtyReadFlagIfRequired(ctx context.Context, wasDirty bool) { + if v := ctx.Value(keyAllowDirtyReads); v != nil { + if ref, ok := v.(*bool); ok && ref != nil { + *ref = wasDirty + } + } +} + +// applyContextSettings returns the settings configured in the context in the given request. +// It then returns information about the applied settings that may be needed later in API implementation functions. +func applyContextSettings(ctx context.Context, req Request) contextSettings { + result := contextSettings{} + if ctx == nil { + return result + } + // Details + if v := ctx.Value(keyDetails); v != nil { + if details, ok := v.(bool); ok { + req.SetQuery("details", strconv.FormatBool(details)) + } + } + // KeepNull + if v := ctx.Value(keyKeepNull); v != nil { + if keepNull, ok := v.(bool); ok { + req.SetQuery("keepNull", strconv.FormatBool(keepNull)) + } + } + // MergeObjects + if v := ctx.Value(keyMergeObjects); v != nil { + if mergeObjects, ok := v.(bool); ok { + req.SetQuery("mergeObjects", strconv.FormatBool(mergeObjects)) + } + } + // Silent + if v := ctx.Value(keySilent); v != nil { + if silent, ok := v.(bool); ok { + req.SetQuery("silent", strconv.FormatBool(silent)) + result.Silent = silent + } + } + // WaitForSync + if v := ctx.Value(keyWaitForSync); v != nil { + if waitForSync, ok := v.(bool); ok { + req.SetQuery("waitForSync", strconv.FormatBool(waitForSync)) + result.WaitForSync = waitForSync + } + } + // AllowDirtyReads + if v := ctx.Value(keyAllowDirtyReads); v != nil { + req.SetHeader("x-arango-allow-dirty-read", "true") + result.AllowDirtyReads = true + if dirtyReadFlag, ok := v.(*bool); ok { + result.DirtyReadFlag = dirtyReadFlag + } + } + + // Enable Queue timeout + if v := ctx.Value(keyUseQueueTimeout); v != nil { + if useQueueTimeout, ok := v.(bool); ok && useQueueTimeout { + result.QueueTimeout = useQueueTimeout + if v := ctx.Value(keyMaxQueueTime); v != nil { + if timeout, ok := v.(time.Duration); ok { + result.MaxQueueTime = timeout + req.SetHeader("x-arango-queue-time-seconds", fmt.Sprint(timeout.Seconds())) + } + } else if deadline, ok := ctx.Deadline(); ok { + timeout := deadline.Sub(time.Now()) + req.SetHeader("x-arango-queue-time-seconds", fmt.Sprint(timeout.Seconds())) + } + } + } + + // TransactionID + if v := ctx.Value(keyTransactionID); v != nil { + req.SetHeader("x-arango-trx-id", string(v.(TransactionID))) + } + // ReturnOld + if v := ctx.Value(keyReturnOld); v != nil { + req.SetQuery("returnOld", "true") + result.ReturnOld = v + } + // ReturnNew + if v := ctx.Value(keyReturnNew); v != nil { + req.SetQuery("returnNew", "true") + result.ReturnNew = v + } + // If-Match + if v := ctx.Value(keyRevision); v != nil { + if rev, ok := v.(string); ok { + req.SetHeader("If-Match", rev) + result.Revision = rev + } + } + // Revisions + if v := ctx.Value(keyRevisions); v != nil { + if revs, ok := v.([]string); ok { + req.SetQuery("ignoreRevs", "false") + result.Revisions = revs + } + } + // ImportDetails + if v := ctx.Value(keyImportDetails); v != nil { + if details, ok := v.(*[]string); ok { + req.SetQuery("details", "true") + result.ImportDetails = details + } + } + // IsRestore + if v := ctx.Value(keyIsRestore); v != nil { + if isRestore, ok := v.(bool); ok { + req.SetQuery("isRestore", strconv.FormatBool(isRestore)) + result.IsRestore = isRestore + } + } + // IsSystem + if v := ctx.Value(keyIsSystem); v != nil { + if isSystem, ok := v.(bool); ok { + req.SetQuery("isSystem", strconv.FormatBool(isSystem)) + result.IsSystem = isSystem + } + } + // IgnoreRevs + if v := ctx.Value(keyIgnoreRevs); v != nil { + if ignoreRevs, ok := v.(bool); ok { + req.SetQuery("ignoreRevs", strconv.FormatBool(ignoreRevs)) + result.IgnoreRevs = &ignoreRevs + } + } + // EnforeReplicationFactor + if v := ctx.Value(keyEnforceReplicationFactor); v != nil { + if enforceReplicationFactor, ok := v.(bool); ok { + req.SetQuery("enforceReplicationFactor", strconv.FormatBool(enforceReplicationFactor)) + result.EnforceReplicationFactor = &enforceReplicationFactor + } + } + // Configured + if v := ctx.Value(keyConfigured); v != nil { + if configured, ok := v.(bool); ok { + req.SetQuery("configured", strconv.FormatBool(configured)) + result.Configured = &configured + } + } + // FollowLeaderRedirect + if v := ctx.Value(keyFollowLeaderRedirect); v != nil { + if followLeaderRedirect, ok := v.(bool); ok { + result.FollowLeaderRedirect = &followLeaderRedirect + } + } + // DBServerID + if v := ctx.Value(keyDBServerID); v != nil { + if id, ok := v.(string); ok { + req.SetQuery("DBserver", id) + result.DBServerID = id + } + } + // BatchID + if v := ctx.Value(keyBatchID); v != nil { + if id, ok := v.(string); ok { + req.SetQuery("batchId", id) + result.BatchID = id + } + } + // JobIDResponse + if v := ctx.Value(keyJobIDResponse); v != nil { + if idRef, ok := v.(*string); ok { + result.JobIDResponse = idRef + } + } + // OverwriteMode + if v := ctx.Value(keyOverwriteMode); v != nil { + if mode, ok := v.(OverwriteMode); ok { + req.SetQuery("overwriteMode", string(mode)) + result.OverwriteMode = mode + } + } + + if v := ctx.Value(keyOverwrite); v != nil { + if overwrite, ok := v.(bool); ok && overwrite { + req.SetQuery("overwrite", "true") + result.Overwrite = true + } + } + + return result +} + +// contextOrBackground returns the given context if it is not nil. +// Returns context.Background() otherwise. +func contextOrBackground(ctx context.Context) context.Context { + if ctx != nil { + return ctx + } + return context.Background() +} + +// withDocumentAt returns a context derived from the given parent context to be used in multi-document options +// that needs a client side "loop" implementation. +// It handle: +// - WithRevisions +// - WithReturnNew +// - WithReturnOld +func withDocumentAt(ctx context.Context, index int) (context.Context, error) { + if ctx == nil { + return nil, nil + } + // Revisions + if v := ctx.Value(keyRevisions); v != nil { + if revs, ok := v.([]string); ok { + if index >= len(revs) { + return nil, WithStack(InvalidArgumentError{Message: "Index out of range: revisions"}) + } + ctx = WithRevision(ctx, revs[index]) + } + } + // ReturnOld + if v := ctx.Value(keyReturnOld); v != nil { + val := reflect.ValueOf(v) + ctx = WithReturnOld(ctx, val.Index(index).Addr().Interface()) + } + // ReturnNew + if v := ctx.Value(keyReturnNew); v != nil { + val := reflect.ValueOf(v) + ctx = WithReturnNew(ctx, val.Index(index).Addr().Interface()) + } + + return ctx, nil +} diff --git a/vendor/github.com/arangodb/go-driver/cursor.go b/vendor/github.com/arangodb/go-driver/cursor.go new file mode 100644 index 00000000000..2a8d362437d --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cursor.go @@ -0,0 +1,91 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "io" + "time" +) + +// QueryExtra holds Query extra information +type QueryExtra interface { + // GetStatistics returns Query statistics + GetStatistics() QueryStatistics + + // GetProfileRaw returns raw profile information in json + GetProfileRaw() ([]byte, bool, error) + + // PlanRaw returns raw plan + GetPlanRaw() ([]byte, bool, error) +} + +// Statistics returned with the query cursor +type QueryStatistics interface { + // the total number of data-modification operations successfully executed. + WritesExecuted() int64 + // The total number of data-modification operations that were unsuccessful + WritesIgnored() int64 + // The total number of documents iterated over when scanning a collection without an index. + ScannedFull() int64 + // The total number of documents iterated over when scanning a collection using an index. + ScannedIndex() int64 + // the total number of documents that were removed after executing a filter condition in a FilterNode + Filtered() int64 + // Returns the numer of results before the last LIMIT in the query was applied. + // A valid return value is only available when the has been created with a context that was + // prepared with `WithFullCount`. Additionally this will also not return a valid value if + // the context was prepared with `WithStream`. + FullCount() int64 + // Execution time of the query (wall-clock time). value will be set from the outside + ExecutionTime() time.Duration +} + +// Cursor is returned from a query, used to iterate over a list of documents. +// Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. +type Cursor interface { + io.Closer + + // HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error. + HasMore() bool + + // ReadDocument reads the next document from the cursor. + // The document data is stored into result, the document meta data is returned. + // If the cursor has no more documents, a NoMoreDocuments error is returned. + // Note: If the query (resulting in this cursor) does not return documents, + // then the returned DocumentMeta will be empty. + ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) + + // Count returns the total number of result documents available. + // A valid return value is only available when the cursor has been created with a context that was + // prepared with `WithQueryCount` and not with `WithQueryStream`. + Count() int64 + + // Statistics returns the query execution statistics for this cursor. + // This might not be valid if the cursor has been created with a context that was + // prepared with `WithQueryStream` + Statistics() QueryStatistics + + // Extra returns the query extras for this cursor. + Extra() QueryExtra +} diff --git a/vendor/github.com/arangodb/go-driver/cursor_impl.go b/vendor/github.com/arangodb/go-driver/cursor_impl.go new file mode 100644 index 00000000000..a1014d0d071 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/cursor_impl.go @@ -0,0 +1,345 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "path" + "reflect" + "sync" + "sync/atomic" + "time" +) + +// newCursor creates a new Cursor implementation. +func newCursor(data cursorData, endpoint string, db *database, allowDirtyReads bool) (Cursor, error) { + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &cursor{ + cursorData: data, + endpoint: endpoint, + db: db, + conn: db.conn, + allowDirtyReads: allowDirtyReads, + }, nil +} + +type cursor struct { + cursorData + endpoint string + resultIndex int + db *database + conn Connection + closed int32 + closeMutex sync.Mutex + allowDirtyReads bool + lastReadWasDirty bool +} + +// CursorStats TODO: all these int64 should be changed into uint64 +type cursorStats struct { + // The total number of data-modification operations successfully executed. + WritesExecutedInt int64 `json:"writesExecuted,omitempty"` + // The total number of data-modification operations that were unsuccessful + WritesIgnoredInt int64 `json:"writesIgnored,omitempty"` + // The total number of documents iterated over when scanning a collection without an index. + ScannedFullInt int64 `json:"scannedFull,omitempty"` + // The total number of documents iterated over when scanning a collection using an index. + ScannedIndexInt int64 `json:"scannedIndex,omitempty"` + // The total number of documents that were removed after executing a filter condition in a FilterNode + FilteredInt int64 `json:"filtered,omitempty"` + // The total number of documents that matched the search condition if the query's final LIMIT statement were not present. + FullCountInt int64 `json:"fullCount,omitempty"` + // Query execution time (wall-clock time). value will be set from the outside + ExecutionTimeInt float64 `json:"executionTime,omitempty"` + Nodes []cursorPlanNodes `json:"nodes,omitempty"` + HttpRequests int64 `json:"httpRequests,omitempty"` + PeakMemoryUsage int64 `json:"peakMemoryUsage,omitempty"` + + // CursorsCreated the total number of cursor objects created during query execution. Cursor objects are created for index lookups. + CursorsCreated uint64 `json:"cursorsCreated,omitempty"` + // CursorsRearmed the total number of times an existing cursor object was repurposed. + // Repurposing an existing cursor object is normally more efficient compared to destroying an existing cursor object + // and creating a new one from scratch. + CursorsRearmed uint64 `json:"cursorsRearmed,omitempty"` + // CacheHits the total number of index entries read from in-memory caches for indexes of type edge or persistent. + // This value will only be non-zero when reading from indexes that have an in-memory cache enabled, + // and when the query allows using the in-memory cache (i.e. using equality lookups on all index attributes). + CacheHits uint64 `json:"cacheHits,omitempty"` + // CacheMisses the total number of cache read attempts for index entries that could not be served from in-memory caches for indexes of type edge or persistent. + // This value will only be non-zero when reading from indexes that have an in-memory cache enabled, + // the query allows using the in-memory cache (i.e. using equality lookups on all index attributes) and the looked up values are not present in the cache. + CacheMisses uint64 `json:"cacheMisses,omitempty"` +} + +type cursorPlan struct { + Nodes []cursorPlanNodes `json:"nodes,omitempty"` + Rules []string `json:"rules,omitempty"` + Collections []cursorPlanCollection `json:"collections,omitempty"` + Variables []cursorPlanVariable `json:"variables,omitempty"` + EstimatedCost float64 `json:"estimatedCost,omitempty"` + EstimatedNrItems int `json:"estimatedNrItems,omitempty"` + IsModificationQuery bool `json:"isModificationQuery,omitempty"` +} + +type cursorExtra struct { + Stats cursorStats `json:"stats,omitempty"` + Profile cursorProfile `json:"profile,omitempty"` + Plan *cursorPlan `json:"plan,omitempty"` + Warnings []warn `json:"warnings,omitempty"` +} + +type warn struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func (c cursorExtra) GetStatistics() QueryStatistics { + return c.Stats +} + +func (c cursorExtra) GetProfileRaw() ([]byte, bool, error) { + if c.Profile == nil { + return nil, false, nil + } + + d, err := json.Marshal(c.Profile) + if err != nil { + return nil, true, err + } + + return d, true, nil +} + +func (c cursorExtra) GetPlanRaw() ([]byte, bool, error) { + if c.Plan == nil { + return nil, false, nil + } + + d, err := json.Marshal(c.Plan) + if err != nil { + return nil, true, err + } + + return d, true, nil +} + +type cursorPlanVariable struct { + ID int `json:"id"` + Name string `json:"name"` + IsDataFromCollection bool `json:"isDataFromCollection"` + IsFullDocumentFromCollection bool `json:"isFullDocumentFromCollection"` +} + +type cursorPlanCollection struct { + Name string `json:"name"` + Type string `json:"type"` +} + +type cursorPlanNodes map[string]interface{} + +type cursorProfile map[string]interface{} + +type cursorData struct { + Key string `json:"_key,omitempty"` + Count int64 `json:"count,omitempty"` // the total number of result documents available (only available if the query was executed with the count attribute set) + ID string `json:"id"` // id of temporary cursor created on the server (optional, see above) + Result []*RawObject `json:"result,omitempty"` // an array of result documents (might be empty if query has no results) + HasMore bool `json:"hasMore,omitempty"` // A boolean indicator whether there are more results available for the cursor on the server + Extra cursorExtra `json:"extra"` + Cached bool `json:"cached,omitempty"` + ArangoError +} + +// relPath creates the relative path to this cursor (`_db//_api/cursor`) +func (c *cursor) relPath() string { + return path.Join(c.db.relPath(), "_api", "cursor") +} + +// Name returns the name of the collection. +func (c *cursor) HasMore() bool { + return c.resultIndex < len(c.Result) || c.cursorData.HasMore +} + +// Count returns the total number of result documents available. +// A valid return value is only available when the cursor has been created with a context that was +// prepare with `WithQueryCount`. +func (c *cursor) Count() int64 { + return c.cursorData.Count +} + +// Close deletes the cursor and frees the resources associated with it. +func (c *cursor) Close() error { + if c == nil { + // Avoid panics in the case that someone defer's a close before checking that the cursor is not nil. + return nil + } + if c := atomic.LoadInt32(&c.closed); c != 0 { + return nil + } + c.closeMutex.Lock() + defer c.closeMutex.Unlock() + if c.closed == 0 { + if c.cursorData.ID != "" { + // Force use of initial endpoint + ctx := WithEndpoint(nil, c.endpoint) + + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), c.cursorData.ID)) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + } + atomic.StoreInt32(&c.closed, 1) + } + return nil +} + +// ReadDocument reads the next document from the cursor. +// The document data is stored into result, the document meta data is returned. +// If the cursor has no more documents, a NoMoreDocuments error is returned. +func (c *cursor) ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) { + // Force use of initial endpoint + ctx = WithEndpoint(ctx, c.endpoint) + + if c.resultIndex >= len(c.Result) && c.cursorData.HasMore { + // This is required since we are interested if this was a dirty read + // but we do not want to trash the users bool reference. + var wasDirtyRead bool + fetchctx := ctx + if c.allowDirtyReads { + fetchctx = WithAllowDirtyReads(ctx, &wasDirtyRead) + } + + // Fetch next batch + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), c.cursorData.ID)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(fetchctx, req) + resp, err := c.conn.Do(fetchctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + loadContextResponseValues(cs, resp) + var data cursorData + if err := resp.ParseBody("", &data); err != nil { + return DocumentMeta{}, WithStack(err) + } + c.cursorData = data + c.resultIndex = 0 + c.lastReadWasDirty = wasDirtyRead + } + // ReadDocument should act as if it would actually do a read + // hence update the bool reference + if c.allowDirtyReads { + setDirtyReadFlagIfRequired(ctx, c.lastReadWasDirty) + } + + index := c.resultIndex + if index >= len(c.Result) { + // Out of data + return DocumentMeta{}, WithStack(NoMoreDocumentsError{}) + } + c.resultIndex++ + var meta DocumentMeta + resultPtr := c.Result[index] + if resultPtr == nil { + // Got NULL result + rv := reflect.ValueOf(result) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return DocumentMeta{}, WithStack(&json.InvalidUnmarshalError{Type: reflect.TypeOf(result)}) + } + e := rv.Elem() + e.Set(reflect.Zero(e.Type())) + } else { + if err := c.conn.Unmarshal(*resultPtr, &meta); err != nil { + // If a cursor returns something other than a document, this will fail. + // Just ignore it. + } + if err := c.conn.Unmarshal(*resultPtr, result); err != nil { + return DocumentMeta{}, WithStack(err) + } + } + return meta, nil +} + +// Return execution statistics for this cursor. This might not +// be valid if the cursor has been created with a context that was +// prepared with `WithStream` +func (c *cursor) Statistics() QueryStatistics { + return c.cursorData.Extra.Stats +} + +func (c *cursor) Extra() QueryExtra { + return c.cursorData.Extra +} + +// the total number of data-modification operations successfully executed. +func (cs cursorStats) WritesExecuted() int64 { + return cs.WritesExecutedInt +} + +// The total number of data-modification operations that were unsuccessful +func (cs cursorStats) WritesIgnored() int64 { + return cs.WritesIgnoredInt +} + +// The total number of documents iterated over when scanning a collection without an index. +func (cs cursorStats) ScannedFull() int64 { + return cs.ScannedFullInt +} + +// The total number of documents iterated over when scanning a collection using an index. +func (cs cursorStats) ScannedIndex() int64 { + return cs.ScannedIndexInt +} + +// the total number of documents that were removed after executing a filter condition in a FilterNode +func (cs cursorStats) Filtered() int64 { + return cs.FilteredInt +} + +// Returns the numer of results before the last LIMIT in the query was applied. +// A valid return value is only available when the has been created with a context that was +// prepared with `WithFullCount`. Additionally this will also not return a valid value if +// the context was prepared with `WithStream`. +func (cs cursorStats) FullCount() int64 { + return cs.FullCountInt +} + +// query execution time (wall-clock time). value will be set from the outside +func (cs cursorStats) ExecutionTime() time.Duration { + return time.Duration(cs.ExecutionTimeInt * float64(time.Second)) +} diff --git a/vendor/github.com/arangodb/go-driver/database.go b/vendor/github.com/arangodb/go-driver/database.go new file mode 100644 index 00000000000..024c7d5d096 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database.go @@ -0,0 +1,129 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Database provides access to all collections & graphs in a single database. +type Database interface { + // Name returns the name of the database. + Name() string + + // Info fetches information about the database. + Info(ctx context.Context) (DatabaseInfo, error) + + // EngineInfo returns information about the database engine being used. + // Note: When your cluster has multiple endpoints (cluster), you will get information + // from the server that is currently being used. + // If you want to know exactly which server the information is from, use a client + // with only a single endpoint and avoid automatic synchronization of endpoints. + EngineInfo(ctx context.Context) (EngineInfo, error) + + // Remove removes the entire database. + // If the database does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Collection functions + DatabaseCollections + + // View functions + DatabaseViews + + // Graph functions + DatabaseGraphs + + // Pregel functions + DatabasePregels + + // Streaming Transactions functions + DatabaseStreamingTransactions + + // ArangoSearch Analyzers API + DatabaseArangoSearchAnalyzers + + // Query performs an AQL query, returning a cursor used to iterate over the returned documents. + // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. + Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error) + + // ValidateQuery validates an AQL query. + // When the query is valid, nil returned, otherwise an error is returned. + // The query is not executed. + ValidateQuery(ctx context.Context, query string) error + + // OptimizerRulesForQueries returns the available optimizer rules for AQL queries + // returns an array of objects that contain the name of each available rule and its respective flags. + OptimizerRulesForQueries(ctx context.Context) ([]QueryRule, error) + + // Transaction performs a javascript transaction. The result of the transaction function is returned. + Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error) +} + +// DatabaseInfo contains information about a database +type DatabaseInfo struct { + // The identifier of the database. + ID string `json:"id,omitempty"` + // The name of the database. + Name string `json:"name,omitempty"` + // The filesystem path of the database. + Path string `json:"path,omitempty"` + // If true then the database is the _system database. + IsSystem bool `json:"isSystem,omitempty"` + // Default replication factor for collections in database + ReplicationFactor int `json:"replicationFactor,omitempty"` + // Default write concern for collections in database + WriteConcern int `json:"writeConcern,omitempty"` + // Default sharding for collections in database + Sharding DatabaseSharding `json:"sharding,omitempty"` +} + +// EngineType indicates type of database engine being used. +type EngineType string + +const ( + EngineTypeMMFiles = EngineType("mmfiles") + EngineTypeRocksDB = EngineType("rocksdb") +) + +func (t EngineType) String() string { + return string(t) +} + +// EngineInfo contains information about the database engine being used. +type EngineInfo struct { + Type EngineType `json:"name"` + Supports map[string]interface{} `json:"supports,omitempty"` +} + +type QueryRule struct { + Name string `json:"name"` + Flags QueryFlags `json:"flags,omitempty"` +} + +type QueryFlags struct { + Hidden bool `json:"hidden,omitempty"` + ClusterOnly bool `json:"clusterOnly,omitempty"` + CanBeDisabled bool `json:"canBeDisabled,omitempty"` + CanCreateAdditionalPlans bool `json:"canCreateAdditionalPlans,omitempty"` + DisabledByDefault bool `json:"disabledByDefault,omitempty"` + EnterpriseOnly bool `json:"enterpriseOnly,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers.go b/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers.go new file mode 100644 index 00000000000..c2fa463faa3 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers.go @@ -0,0 +1,61 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import "context" + +type ArangoSearchAnalyzer interface { + // Name returns the analyzer name + Name() string + + // Type returns the analyzer type + Type() ArangoSearchAnalyzerType + + // UniqueName returns the unique name: :: + UniqueName() string + + // Definition returns the analyzer definition + Definition() ArangoSearchAnalyzerDefinition + + // Properties returns the analyzer properties + Properties() ArangoSearchAnalyzerProperties + + // Database returns the database of this analyzer + Database() Database + + // Removes the analyzers + Remove(ctx context.Context, force bool) error +} + +type DatabaseArangoSearchAnalyzers interface { + + // Ensure ensures that the given analyzer exists. If it does not exist it is created. + // The function returns whether the analyzer already existed or an error. + EnsureAnalyzer(ctx context.Context, analyzer ArangoSearchAnalyzerDefinition) (bool, ArangoSearchAnalyzer, error) + + // Get returns the analyzer definition for the given analyzer or returns an error + Analyzer(ctx context.Context, name string) (ArangoSearchAnalyzer, error) + + // List returns a list of all analyzers + Analyzers(ctx context.Context) ([]ArangoSearchAnalyzer, error) +} diff --git a/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers_impl.go b/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers_impl.go new file mode 100644 index 00000000000..a832595ed07 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_arangosearch_analyzers_impl.go @@ -0,0 +1,184 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import ( + "context" + "path" + "strings" +) + +type analyzer struct { + definition ArangoSearchAnalyzerDefinition + db *database +} + +// Name returns the analyzer name +func (a *analyzer) Name() string { + split := strings.Split(a.definition.Name, "::") + return split[len(split)-1] +} + +// UniqueName returns the unique name: :: +func (a *analyzer) UniqueName() string { + return a.definition.Name +} + +// Type returns the analyzer type +func (a *analyzer) Type() ArangoSearchAnalyzerType { + return a.definition.Type +} + +// Definition returns the analyzer definition +func (a *analyzer) Definition() ArangoSearchAnalyzerDefinition { + return a.definition +} + +// Properties returns the analyzer properties +func (a *analyzer) Properties() ArangoSearchAnalyzerProperties { + return a.definition.Properties +} + +// Removes the analyzers +func (a *analyzer) Remove(ctx context.Context, force bool) error { + req, err := a.db.conn.NewRequest("DELETE", path.Join(a.db.relPath(), "_api/analyzer/", a.Name())) + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + payload := struct { + Force bool `json:"force,omitempty"` + }{ + Force: force, + } + req, err = req.SetBody(payload) + if err != nil { + return WithStack(err) + } + resp, err := a.db.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + var actualDef ArangoSearchAnalyzerDefinition + if err := resp.ParseBody("", &actualDef); err != nil { + return WithStack(err) + } + return nil +} + +// Database returns the database of this analyzer +func (a *analyzer) Database() Database { + return a.db +} + +// Ensure ensures that the given analyzer exists. If it does not exist it is created. +// The function returns whether the analyzer already existed or an error. +func (d *database) EnsureAnalyzer(ctx context.Context, definition ArangoSearchAnalyzerDefinition) (bool, ArangoSearchAnalyzer, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/analyzer")) + if err != nil { + return false, nil, WithStack(err) + } + applyContextSettings(ctx, req) + req, err = req.SetBody(definition) + if err != nil { + return false, nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, nil, WithStack(err) + } + if err := resp.CheckStatus(201, 200); err != nil { + return false, nil, WithStack(err) + } + found := resp.StatusCode() == 200 + var actualDef ArangoSearchAnalyzerDefinition + if err := resp.ParseBody("", &actualDef); err != nil { + return false, nil, WithStack(err) + } + return found, &analyzer{ + db: d, + definition: actualDef, + }, nil +} + +// Get returns the analyzer definition for the given analyzer or returns an error +func (d *database) Analyzer(ctx context.Context, name string) (ArangoSearchAnalyzer, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/analyzer/", name)) + if err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var actualDef ArangoSearchAnalyzerDefinition + if err := resp.ParseBody("", &actualDef); err != nil { + return nil, WithStack(err) + } + return &analyzer{ + db: d, + definition: actualDef, + }, nil +} + +type analyzerListResponse struct { + Analyzer []ArangoSearchAnalyzerDefinition `json:"result,omitempty"` + ArangoError +} + +// List returns a list of all analyzers +func (d *database) Analyzers(ctx context.Context) ([]ArangoSearchAnalyzer, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/analyzer")) + if err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var response analyzerListResponse + if err := resp.ParseBody("", &response); err != nil { + return nil, WithStack(err) + } + + result := make([]ArangoSearchAnalyzer, 0, len(response.Analyzer)) + for _, a := range response.Analyzer { + result = append(result, &analyzer{ + db: d, + definition: a, + }) + } + + return result, nil +} diff --git a/vendor/github.com/arangodb/go-driver/database_collections.go b/vendor/github.com/arangodb/go-driver/database_collections.go new file mode 100644 index 00000000000..0f9a821b1c5 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_collections.go @@ -0,0 +1,224 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// DatabaseCollections provides access to all collections in a single database. +type DatabaseCollections interface { + // Collection opens a connection to an existing collection within the database. + // If no collection with given name exists, an NotFoundError is returned. + Collection(ctx context.Context, name string) (Collection, error) + + // CollectionExists returns true if a collection with given name exists within the database. + CollectionExists(ctx context.Context, name string) (bool, error) + + // Collections returns a list of all collections in the database. + Collections(ctx context.Context) ([]Collection, error) + + // CreateCollection creates a new collection with given name and options, and opens a connection to it. + // If a collection with given name already exists within the database, a DuplicateError is returned. + CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error) +} + +// CreateCollectionOptions contains options that customize the creating of a collection. +type CreateCollectionOptions struct { + // CacheEnabled set cacheEnabled option in collection properties + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression + ComputedValues []ComputedValue `json:"computedValues,omitempty"` + // This field is used for internal purposes only. DO NOT USE. + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + // DoCompact checks if the collection will be compacted (default is true) + DoCompact *bool `json:"doCompact,omitempty"` + // The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power + // of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash + // table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. + // For example, 64 might be a sensible value for a collection with 100 000 000 documents. + // Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. + // Changes are applied when the collection is loaded the next time. + IndexBuckets int `json:"indexBuckets,omitempty"` + // Available from 3.9 ArangoD version. + InternalValidatorType int `json:"internalValidatorType,omitempty"` + // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ + IsDisjoint bool `json:"isDisjoint,omitempty"` + // Set to create a smart edge or vertex collection. + // This requires ArangoDB Enterprise Edition. + IsSmart bool `json:"isSmart,omitempty"` + // If true, create a system collection. In this case collection-name should start with an underscore. + // End users should normally create non-system collections only. API implementors may be required to create system + // collections in very special occasions, but normally a regular collection will do. (The default is false) + IsSystem bool `json:"isSystem,omitempty"` + // If true then the collection data is kept in-memory only and not made persistent. + // Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also + // cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster + // than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any + // CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, + // and not for data that cannot be re-created otherwise. (The default is false) + IsVolatile bool `json:"isVolatile,omitempty"` + // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter) + JournalSize int `json:"journalSize,omitempty"` + // Specifies how keys in the collection are created. + KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + // In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1) + NumberOfShards int `json:"numberOfShards,omitempty"` + // ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers. + // The value 1 means that only one copy (no synchronous replication) is kept. + // A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. + // Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas, + // before the write operation is reported successful. If a server fails, this is detected automatically + // and one of the servers holding copies take over, usually without an error being reported. + ReplicationFactor int `json:"replicationFactor,omitempty"` + // Schema for collection validation + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + // This attribute specifies the name of the sharding strategy to use for the collection. + // Must be one of ShardingStrategy* values. + ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` + // In a cluster, this attribute determines which document attributes are used to + // determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. + // The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. + // Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup. + // The default is []string{"_key"}. + ShardKeys []string `json:"shardKeys,omitempty"` + // This field must be set to the attribute that will be used for sharding or smart graphs. + // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. + // This requires ArangoDB Enterprise Edition. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + // SmartJoinAttribute + // In the specific case that the two collections have the same number of shards, the data of the two collections can + // be co-located on the same server for the same shard key values. In this case the extra hop via the coordinator will not be necessary. + // See documentation for smart joins. + // This requires ArangoDB Enterprise Edition. + SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` + // Available from 3.7 ArangoDB version + SyncByRevision bool `json:"syncByRevision,omitempty"` + // The type of the collection to create. (default is CollectionTypeDocument) + Type CollectionType `json:"type,omitempty"` + // If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false) + WaitForSync bool `json:"waitForSync,omitempty"` + // WriteConcern contains how many copies must be available before a collection can be written. + // It is required that 1 <= WriteConcern <= ReplicationFactor. + // Default is 1. Not available for satellite collections. + // Available from 3.6 ArangoDB version. + WriteConcern int `json:"writeConcern,omitempty"` +} + +// Init translate deprecated fields into current one for backward compatibility +func (c *CreateCollectionOptions) Init() { + if c == nil { + return + } + + c.KeyOptions.Init() +} + +// CollectionType is the type of a collection. +type CollectionType int + +const ( + // CollectionTypeDocument specifies a document collection + CollectionTypeDocument = CollectionType(2) + // CollectionTypeEdge specifies an edges collection + CollectionTypeEdge = CollectionType(3) +) + +type ComputedValue struct { + // The name of the target attribute. Can only be a top-level attribute, but you + // may return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`, + // or a shard key attribute. + Name string `json:"name"` + // An AQL `RETURN` operation with an expression that computes the desired value. + Expression string `json:"expression"` + // An array of strings to define on which write operations the value shall be + // computed. The possible values are `"insert"`, `"update"`, and `"replace"`. + // The default is `["insert", "update", "replace"]`. + ComputeOn []ComputeOn `json:"computeOn,omitempty"` + // Whether the computed value shall take precedence over a user-provided or existing attribute. + Overwrite bool `json:"overwrite"` + // Whether to let the write operation fail if the expression produces a warning. The default is false. + FailOnWarning *bool `json:"failOnWarning,omitempty"` + // Whether the result of the expression shall be stored if it evaluates to `null`. + // This can be used to skip the value computation if any pre-conditions are not met. + KeepNull *bool `json:"keepNull,omitempty"` +} + +type ComputeOn string + +const ( + ComputeOnInsert ComputeOn = "insert" + ComputeOnUpdate ComputeOn = "update" + ComputeOnReplace ComputeOn = "replace" +) + +// CollectionKeyOptions specifies ways for creating keys of a collection. +type CollectionKeyOptions struct { + // If set to true, then it is allowed to supply own key values in the _key attribute of a document. + // If set to false, then the key generator will solely be responsible for generating keys and supplying own + // key values in the _key attribute of documents is considered an error. + // Deprecated: Use AllowUserKeysPtr instead + AllowUserKeys bool `json:"-"` + // If set to true, then it is allowed to supply own key values in the _key attribute of a document. + // If set to false, then the key generator will solely be responsible for generating keys and supplying own + // key values in the _key attribute of documents is considered an error. + AllowUserKeysPtr *bool `json:"allowUserKeys,omitempty"` + // Specifies the type of the key generator. The currently available generators are traditional and autoincrement. + Type KeyGeneratorType `json:"type,omitempty"` + // increment value for autoincrement key generator. Not used for other key generator types. + Increment int `json:"increment,omitempty"` + // Initial offset value for autoincrement key generator. Not used for other key generator types. + Offset int `json:"offset,omitempty"` +} + +// Init translate deprecated fields into current one for backward compatibility +func (c *CollectionKeyOptions) Init() { + if c == nil { + return + } + + if c.AllowUserKeysPtr == nil { + if c.AllowUserKeys { + c.AllowUserKeysPtr = &c.AllowUserKeys + } + } +} + +// KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`. +type KeyGeneratorType string + +const ( + KeyGeneratorTraditional = KeyGeneratorType("traditional") + KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement") +) + +// ShardingStrategy describes the sharding strategy of a collection +type ShardingStrategy string + +const ( + ShardingStrategyCommunityCompat ShardingStrategy = "community-compat" + ShardingStrategyEnterpriseCompat ShardingStrategy = "enterprise-compat" + ShardingStrategyEnterpriseSmartEdgeCompat ShardingStrategy = "enterprise-smart-edge-compat" + ShardingStrategyHash ShardingStrategy = "hash" + ShardingStrategyEnterpriseHashSmartEdge ShardingStrategy = "enterprise-hash-smart-edge" +) diff --git a/vendor/github.com/arangodb/go-driver/database_collections_impl.go b/vendor/github.com/arangodb/go-driver/database_collections_impl.go new file mode 100644 index 00000000000..dbcd3be3040 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_collections_impl.go @@ -0,0 +1,190 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Collection opens a connection to an existing collection within the database. +// If no collection with given name exists, an NotFoundError is returned. +func (d *database) Collection(ctx context.Context, name string) (Collection, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + coll, err := newCollection(name, d) + if err != nil { + return nil, WithStack(err) + } + return coll, nil +} + +// CollectionExists returns true if a collection with given name exists within the database. +func (d *database) CollectionExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getCollectionResponse struct { + Result []CollectionInfo `json:"result,omitempty"` +} + +// Collections returns a list of all collections in the database. +func (d *database) Collections(ctx context.Context) ([]Collection, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection")) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Result)) + for _, info := range data.Result { + col, err := newCollection(info.Name, d) + if err != nil { + return nil, WithStack(err) + } + result = append(result, col) + } + return result, nil +} + +type createCollectionOptionsInternal struct { + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + ComputedValues []ComputedValue `json:"computedValues,omitempty"` + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + DoCompact *bool `json:"doCompact,omitempty"` + IndexBuckets int `json:"indexBuckets,omitempty"` + InternalValidatorType int `json:"internalValidatorType,omitempty"` + IsDisjoint bool `json:"isDisjoint,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + IsSystem bool `json:"isSystem,omitempty"` + IsVolatile bool `json:"isVolatile,omitempty"` + JournalSize int `json:"journalSize,omitempty"` + KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"` + // Deprecated: use 'WriteConcern' instead + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + Name string `json:"name"` + NumberOfShards int `json:"numberOfShards,omitempty"` + ReplicationFactor replicationFactor `json:"replicationFactor,omitempty"` + Schema *CollectionSchemaOptions `json:"schema,omitempty"` + ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` + ShardKeys []string `json:"shardKeys,omitempty"` + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` + SyncByRevision bool `json:"syncByRevision,omitempty"` + Type CollectionType `json:"type,omitempty"` + WaitForSync bool `json:"waitForSync,omitempty"` + WriteConcern int `json:"writeConcern,omitempty"` +} + +// CreateCollection creates a new collection with given name and options, and opens a connection to it. +// If a collection with given name already exists within the database, a DuplicateError is returned. +func (d *database) CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error) { + options.Init() + input := createCollectionOptionsInternal{ + Name: name, + } + if options != nil { + input.fromExternal(options) + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/collection")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + col, err := newCollection(name, d) + if err != nil { + return nil, WithStack(err) + } + return col, nil +} + +func (p *createCollectionOptionsInternal) fromExternal(i *CreateCollectionOptions) { + p.CacheEnabled = i.CacheEnabled + p.ComputedValues = i.ComputedValues + p.DistributeShardsLike = i.DistributeShardsLike + p.DoCompact = i.DoCompact + p.IndexBuckets = i.IndexBuckets + p.InternalValidatorType = i.InternalValidatorType + p.IsDisjoint = i.IsDisjoint + p.IsSmart = i.IsSmart + p.IsSystem = i.IsSystem + p.IsVolatile = i.IsVolatile + p.JournalSize = i.JournalSize + p.KeyOptions = i.KeyOptions + p.MinReplicationFactor = i.MinReplicationFactor + p.NumberOfShards = i.NumberOfShards + p.ReplicationFactor = replicationFactor(i.ReplicationFactor) + p.Schema = i.Schema + p.ShardingStrategy = i.ShardingStrategy + p.ShardKeys = i.ShardKeys + p.SmartGraphAttribute = i.SmartGraphAttribute + p.SmartJoinAttribute = i.SmartJoinAttribute + p.SyncByRevision = i.SyncByRevision + p.Type = i.Type + p.WaitForSync = i.WaitForSync + p.WriteConcern = i.WriteConcern +} diff --git a/vendor/github.com/arangodb/go-driver/database_collections_schema.go b/vendor/github.com/arangodb/go-driver/database_collections_schema.go new file mode 100644 index 00000000000..a022a27e912 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_collections_schema.go @@ -0,0 +1,54 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Adam Janikowski +// + +package driver + +import ( + "encoding/json" +) + +type CollectionSchemaLevel string + +const ( + CollectionSchemaLevelNone CollectionSchemaLevel = "none" + CollectionSchemaLevelNew CollectionSchemaLevel = "new" + CollectionSchemaLevelModerate CollectionSchemaLevel = "moderate" + CollectionSchemaLevelStrict CollectionSchemaLevel = "strict" +) + +type CollectionSchemaOptions struct { + Rule interface{} `json:"rule,omitempty"` + Level CollectionSchemaLevel `json:"level,omitempty"` + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` +} + +func (d *CollectionSchemaOptions) LoadRule(data []byte) error { + var rule interface{} + + if err := json.Unmarshal(data, &rule); err != nil { + return err + } + + d.Rule = rule + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/database_graphs.go b/vendor/github.com/arangodb/go-driver/database_graphs.go new file mode 100644 index 00000000000..c5b1cdbd6eb --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_graphs.go @@ -0,0 +1,92 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +const ( + SatelliteGraph = -100 +) + +// DatabaseGraphs provides access to all graphs in a single database. +type DatabaseGraphs interface { + // Graph opens a connection to an existing graph within the database. + // If no graph with given name exists, an NotFoundError is returned. + Graph(ctx context.Context, name string) (Graph, error) + + // GraphExists returns true if a graph with given name exists within the database. + GraphExists(ctx context.Context, name string) (bool, error) + + // Graphs returns a list of all graphs in the database. + Graphs(ctx context.Context) ([]Graph, error) + + // CreateGraph creates a new graph with given name and options, and opens a connection to it. + // If a graph with given name already exists within the database, a DuplicateError is returned. + // Deprecated: since ArangoDB 3.9 - please use CreateGraphV2 instead + CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) + + // CreateGraphV2 creates a new graph with given name and options, and opens a connection to it. + // If a graph with given name already exists within the database, a DuplicateError is returned. + CreateGraphV2(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) +} + +// CreateGraphOptions contains options that customize the creating of a graph. +type CreateGraphOptions struct { + // OrphanVertexCollections is an array of additional vertex collections used in the graph. + // These are vertices for which there are no edges linking these vertices with anything. + OrphanVertexCollections []string + // EdgeDefinitions is an array of edge definitions for the graph. + EdgeDefinitions []EdgeDefinition + // IsSmart defines if the created graph should be smart. + // This only has effect in Enterprise Edition. + IsSmart bool + // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph. + // Every vertex in this Graph has to have this attribute. + // Cannot be modified later. + SmartGraphAttribute string + // NumberOfShards is the number of shards that is used for every collection within this graph. + // Cannot be modified later. + NumberOfShards int + // ReplicationFactor is the number of replication factor that is used for every collection within this graph. + // Cannot be modified later. + ReplicationFactor int + // WriteConcern is the number of min replication factor that is used for every collection within this graph. + // Cannot be modified later. + WriteConcern int + // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ + IsDisjoint bool + // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) + Satellites []string `json:"satellites,omitempty"` +} + +// EdgeDefinition contains all information needed to define a single edge in a graph. +type EdgeDefinition struct { + // The name of the edge collection to be used. + Collection string `json:"collection"` + // To contains the names of one or more vertex collections that can contain target vertices. + To []string `json:"to"` + // From contains the names of one or more vertex collections that can contain source vertices. + From []string `json:"from"` + // Options contains optional parameters + Options CreateEdgeCollectionOptions `json:"options,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/database_graphs_impl.go b/vendor/github.com/arangodb/go-driver/database_graphs_impl.go new file mode 100644 index 00000000000..c89d5f23a6e --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_graphs_impl.go @@ -0,0 +1,270 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "path" + + "github.com/pkg/errors" +) + +// Graph opens a connection to an existing graph within the database. +// If no graph with given name exists, an NotFoundError is returned. +func (d *database) Graph(ctx context.Context, name string) (Graph, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + g, err := newGraph(data.Graph, d) + if err != nil { + return nil, WithStack(err) + } + return g, nil +} + +// GraphExists returns true if a graph with given name exists within the database. +func (d *database) GraphExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getGraphsResponse struct { + Graphs []graphDefinition `json:"graphs,omitempty"` + ArangoError +} + +// Graphs returns a list of all graphs in the database. +func (d *database) Graphs(ctx context.Context) ([]Graph, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial")) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getGraphsResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Graph, 0, len(data.Graphs)) + for _, info := range data.Graphs { + g, err := newGraph(info, d) + if err != nil { + return nil, WithStack(err) + } + result = append(result, g) + } + return result, nil +} + +type createGraphOptions struct { + Name string `json:"name"` + OrphanVertexCollections []string `json:"orphanCollections,omitempty"` + EdgeDefinitions []EdgeDefinition `json:"edgeDefinitions,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + Options *createGraphAdditionalOptions `json:"options,omitempty"` +} + +type graphReplicationFactor int + +func (g graphReplicationFactor) MarshalJSON() ([]byte, error) { + switch g { + case SatelliteGraph: + return json.Marshal(replicationFactorSatelliteString) + default: + return json.Marshal(int(g)) + } +} + +func (g *graphReplicationFactor) UnmarshalJSON(data []byte) error { + var d int + + if err := json.Unmarshal(data, &d); err == nil { + *g = graphReplicationFactor(d) + return nil + } + + var s string + + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + switch s { + case replicationFactorSatelliteString: + *g = graphReplicationFactor(SatelliteGraph) + return nil + default: + return errors.Errorf("Unsupported type %s", s) + } +} + +type createGraphAdditionalOptions struct { + // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph. + // Every vertex in this Graph has to have this attribute. + // Cannot be modified later. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + // NumberOfShards is the number of shards that is used for every collection within this graph. + // Cannot be modified later. + NumberOfShards int `json:"numberOfShards,omitempty"` + // ReplicationFactor is the number of replication factor that is used for every collection within this graph. + // Cannot be modified later. + ReplicationFactor graphReplicationFactor `json:"replicationFactor,omitempty"` + // WriteConcern is the number of min replication factor that is used for every collection within this graph. + // Cannot be modified later. + WriteConcern int `json:"writeConcern,omitempty"` + // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ + IsDisjoint bool `json:"isDisjoint,omitempty"` + // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) + Satellites []string `json:"satellites,omitempty"` +} + +// CreateGraph creates a new graph with given name and options, and opens a connection to it. +// If a graph with given name already exists within the database, a DuplicateError is returned. +// Deprecated: since ArangoDB 3.9 - please use CreateGraphV2 instead +func (d *database) CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) { + input := createGraphOptions{ + Name: name, + } + if options != nil { + input.OrphanVertexCollections = options.OrphanVertexCollections + input.EdgeDefinitions = options.EdgeDefinitions + input.IsSmart = options.IsSmart + if options.ReplicationFactor == SatelliteGraph { + input.Options = &createGraphAdditionalOptions{ + SmartGraphAttribute: options.SmartGraphAttribute, + ReplicationFactor: graphReplicationFactor(options.ReplicationFactor), + IsDisjoint: options.IsDisjoint, + Satellites: options.Satellites, + } + } else if options.SmartGraphAttribute != "" || options.NumberOfShards != 0 { + input.Options = &createGraphAdditionalOptions{ + SmartGraphAttribute: options.SmartGraphAttribute, + NumberOfShards: options.NumberOfShards, + ReplicationFactor: graphReplicationFactor(options.ReplicationFactor), + WriteConcern: options.WriteConcern, + IsDisjoint: options.IsDisjoint, + Satellites: options.Satellites, + } + } + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/gharial")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + g, err := newGraph(data.Graph, d) + if err != nil { + return nil, WithStack(err) + } + return g, nil +} + +// CreateGraphV2 creates a new graph with given name and options, and opens a connection to it. +// If a graph with given name already exists within the database, a DuplicateError is returned. +func (d *database) CreateGraphV2(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) { + input := createGraphOptions{ + Name: name, + } + if options != nil { + input.OrphanVertexCollections = options.OrphanVertexCollections + input.EdgeDefinitions = options.EdgeDefinitions + input.IsSmart = options.IsSmart + input.Options = &createGraphAdditionalOptions{ + SmartGraphAttribute: options.SmartGraphAttribute, + NumberOfShards: options.NumberOfShards, + ReplicationFactor: graphReplicationFactor(options.ReplicationFactor), + WriteConcern: options.WriteConcern, + IsDisjoint: options.IsDisjoint, + Satellites: options.Satellites, + } + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/gharial")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + g, err := newGraph(data.Graph, d) + if err != nil { + return nil, WithStack(err) + } + return g, nil +} diff --git a/vendor/github.com/arangodb/go-driver/database_impl.go b/vendor/github.com/arangodb/go-driver/database_impl.go new file mode 100644 index 00000000000..1dae347d5a2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_impl.go @@ -0,0 +1,250 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "net/http" + "path" +) + +// newDatabase creates a new Database implementation. +func newDatabase(name string, conn Connection) (Database, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &database{ + name: name, + conn: conn, + }, nil +} + +// database implements the Database interface. +type database struct { + name string + conn Connection +} + +// relPath creates the relative path to this database (`_db/`) +func (d *database) relPath() string { + escapedName := pathEscape(d.name) + return path.Join("_db", escapedName) +} + +// Name returns the name of the database. +func (d *database) Name() string { + return d.name +} + +// Info fetches information about the database. +func (d *database) Info(ctx context.Context) (DatabaseInfo, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/database/current")) + if err != nil { + return DatabaseInfo{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return DatabaseInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInfo{}, WithStack(err) + } + var data DatabaseInfo + if err := resp.ParseBody("result", &data); err != nil { + return DatabaseInfo{}, WithStack(err) + } + return data, nil +} + +// EngineInfo returns information about the database engine being used. +// Note: When your cluster has multiple endpoints (cluster), you will get information +// from the server that is currently being used. +// If you want to know exactly which server the information is from, use a client +// with only a single endpoint and avoid automatic synchronization of endpoints. +func (d *database) EngineInfo(ctx context.Context) (EngineInfo, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/engine")) + if err != nil { + return EngineInfo{}, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return EngineInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200, 404); err != nil { + return EngineInfo{}, WithStack(err) + } + if resp.StatusCode() == 404 { + // On version 3.1, this endpoint is not yet supported + return EngineInfo{Type: EngineTypeMMFiles}, nil + } + var data EngineInfo + if err := resp.ParseBody("", &data); err != nil { + return EngineInfo{}, WithStack(err) + } + return data, nil +} + +// Remove removes the entire database. +// If the database does not exist, a NotFoundError is returned. +func (d *database) Remove(ctx context.Context) error { + req, err := d.conn.NewRequest("DELETE", path.Join("_db/_system/_api/database", pathEscape(d.name))) + if err != nil { + return WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Query performs an AQL query, returning a cursor used to iterate over the returned documents. +func (d *database) Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/cursor")) + if err != nil { + return nil, WithStack(err) + } + input := queryRequest{ + Query: query, + BindVars: bindVars, + } + input.applyContextSettings(ctx) + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + var data cursorData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + col, err := newCursor(data, resp.Endpoint(), d, cs.AllowDirtyReads) + if err != nil { + return nil, WithStack(err) + } + return col, nil +} + +// ValidateQuery validates an AQL query. +// When the query is valid, nil returned, otherwise an error is returned. +// The query is not executed. +func (d *database) ValidateQuery(ctx context.Context, query string) error { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/query")) + if err != nil { + return WithStack(err) + } + input := parseQueryRequest{ + Query: query, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// OptimizerRulesForQueries returns the available optimizer rules for AQL query +// returns an array of objects that contain the name of each available rule and its respective flags. +func (d *database) OptimizerRulesForQueries(ctx context.Context) ([]QueryRule, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/query/rules")) + if err != nil { + return []QueryRule{}, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return []QueryRule{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return []QueryRule{}, WithStack(err) + } + + var data []QueryRule + responses, err := resp.ParseArrayBody() + if err != nil { + return []QueryRule{}, WithStack(err) + } + + for _, response := range responses { + var rule QueryRule + if err := response.ParseBody("", &rule); err != nil { + return []QueryRule{}, WithStack(err) + } + data = append(data, rule) + } + return data, nil +} + +func (d *database) Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/transaction")) + if err != nil { + return nil, WithStack(err) + } + input := transactionRequest{Action: action} + if options != nil { + input.MaxTransactionSize = options.MaxTransactionSize + input.LockTimeout = options.LockTimeout + input.WaitForSync = options.WaitForSync + input.IntermediateCommitCount = options.IntermediateCommitCount + input.Params = options.Params + input.IntermediateCommitSize = options.IntermediateCommitSize + input.Collections.Read = options.ReadCollections + input.Collections.Write = options.WriteCollections + input.Collections.Exclusive = options.ExclusiveCollections + } + if _, err = req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err = resp.CheckStatus(http.StatusOK); err != nil { + return nil, WithStack(err) + } + + output := &transactionResponse{} + if err = resp.ParseBody("", output); err != nil { + return nil, WithStack(err) + } + + return output.Result, nil +} diff --git a/vendor/github.com/arangodb/go-driver/database_pregel.go b/vendor/github.com/arangodb/go-driver/database_pregel.go new file mode 100644 index 00000000000..caeafd972ee --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_pregel.go @@ -0,0 +1,182 @@ +// +// DISCLAIMER +// +// Copyright 2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package driver + +import ( + "context" + "time" +) + +// DatabasePregels provides access to all Pregel Jobs in a single database. +type DatabasePregels interface { + // StartJob - Start the execution of a Pregel algorithm + StartJob(ctx context.Context, options PregelJobOptions) (string, error) + // GetJob - Get the status of a Pregel execution + GetJob(ctx context.Context, id string) (*PregelJob, error) + // GetJobs - Returns a list of currently running and recently finished Pregel jobs without retrieving their results. + GetJobs(ctx context.Context) ([]*PregelJob, error) + // CancelJob - Cancel an ongoing Pregel execution + CancelJob(ctx context.Context, id string) error +} + +type PregelAlgorithm string + +const ( + PregelAlgorithmPageRank PregelAlgorithm = "pagerank" + PregelAlgorithmSingleSourceShortestPath PregelAlgorithm = "sssp" + PregelAlgorithmConnectedComponents PregelAlgorithm = "connectedcomponents" + PregelAlgorithmWeaklyConnectedComponents PregelAlgorithm = "wcc" + PregelAlgorithmStronglyConnectedComponents PregelAlgorithm = "scc" + PregelAlgorithmHyperlinkInducedTopicSearch PregelAlgorithm = "hits" + PregelAlgorithmEffectiveCloseness PregelAlgorithm = "effectivecloseness" + PregelAlgorithmLineRank PregelAlgorithm = "linerank" + PregelAlgorithmLabelPropagation PregelAlgorithm = "labelpropagation" + PregelAlgorithmSpeakerListenerLabelPropagation PregelAlgorithm = "slpa" +) + +type PregelJobOptions struct { + // Name of the algorithm + Algorithm PregelAlgorithm `json:"algorithm"` + // Name of a graph. Either this or the parameters VertexCollections and EdgeCollections are required. + // Please note that there are special sharding requirements for graphs in order to be used with Pregel. + GraphName string `json:"graphName,optional"` + // List of vertex collection names. Please note that there are special sharding requirements for collections in order to be used with Pregel. + VertexCollections []string `json:"vertexCollections,optional"` + // List of edge collection names. Please note that there are special sharding requirements for collections in order to be used with Pregel. + EdgeCollections []string `json:"edgeCollections,optional"` + // General as well as algorithm-specific options. + Params map[string]interface{} `json:"params,optional"` +} + +type PregelJobState string + +const ( + // PregelJobStateNone - The Pregel run did not yet start. + PregelJobStateNone PregelJobState = "none" + // PregelJobStateLoading - The graph is loaded from the database into memory before the execution of the algorithm. + PregelJobStateLoading PregelJobState = "loading" + // PregelJobStateRunning - The algorithm is executing normally. + PregelJobStateRunning PregelJobState = "running" + // PregelJobStateStoring - The algorithm finished, but the results are still being written back into the collections. Occurs only if the store parameter is set to true. + PregelJobStateStoring PregelJobState = "storing" + // PregelJobStateDone - The execution is done. In version 3.7.1 and later, this means that storing is also done. + // In earlier versions, the results may not be written back into the collections yet. This event is announced in the server log (requires at least info log level for the pregel log topic). + PregelJobStateDone PregelJobState = "done" + // PregelJobStateCanceled - The execution was permanently canceled, either by the user or by an error. + PregelJobStateCanceled PregelJobState = "canceled" + // PregelJobStateFatalError - The execution has failed and cannot recover. + PregelJobStateFatalError PregelJobState = "fatal error" + // PregelJobStateInError - The execution is in an error state. This can be caused by DB-Servers being not reachable or being non-responsive. + // The execution might recover later, or switch to "canceled" if it was not able to recover successfully. + PregelJobStateInError PregelJobState = "in error" + // PregelJobStateRecovering - (currently unused): The execution is actively recovering and switches back to running if the recovery is successful. + PregelJobStateRecovering PregelJobState = "recovering" +) + +type PregelJob struct { + // The ID of the Pregel job, as a string. + ID string `json:"id"` + // The algorithm used by the job. + Algorithm PregelAlgorithm `json:"algorithm,omitempty"` + // The date and time when the job was created. + Created time.Time `json:"created,omitempty"` + // The date and time when the job results expire. + // The expiration date is only meaningful for jobs that were completed, canceled or resulted in an error. + // Such jobs are cleaned up by the garbage collection when they reach their expiration date/time. + Started time.Time `json:"started,omitempty"` + // The TTL (time to live) value for the job results, specified in seconds. The TTL is used to calculate the expiration date for the job’s results. + TTL uint64 `json:"ttl,omitempty"` + // The state of the execution. + State PregelJobState `json:"state,omitempty"` + // The number of global supersteps executed. + Gss uint64 `json:"gss,omitempty"` + // The total runtime of the execution up to now (if the execution is still ongoing). + TotalRuntime float64 `json:"totalRuntime,omitempty"` + // The startup runtime of the execution. The startup time includes the data loading time and can be substantial. + StartupTime float64 `json:"startupTime,omitempty"` + // The algorithm execution time. Is shown when the computation started. + ComputationTime float64 `json:"computationTime,omitempty"` + // The time for storing the results if the job includes results storage. Is shown when the storing started. + StorageTime float64 `json:"storageTime,omitempty"` + // Computation time of each global super step. Is shown when the computation started. + GSSTimes []float64 `json:"gssTimes,omitempty"` + // This attribute is used by Programmable Pregel Algorithms (air, experimental). The value is only populated once the algorithm has finished. + Reports []map[string]interface{} `json:"reports,omitempty"` + // The total number of vertices processed. + VertexCount uint64 `json:"vertexCount,omitempty"` + // The total number of edges processed. + EdgeCount uint64 `json:"edgeCount,omitempty"` + // UseMemoryMaps + UseMemoryMaps *bool `json:"useMemoryMaps,omitempty"` + // The Pregel run details. + // Available from 3.10 arangod version. + Detail *PregelRunDetails `json:"detail,omitempty"` +} + +// PregelRunDetails - The Pregel run details. +// Available from 3.10 arangod version. +type PregelRunDetails struct { + // The aggregated details of the full Pregel run. The values are totals of all the DB-Server. + AggregatedStatus *AggregatedStatus `json:"aggregatedStatus,omitempty"` + // The details of the Pregel for every DB-Server. Each object key is a DB-Server ID, and each value is a nested object similar to the aggregatedStatus attribute. + // In a single server deployment, there is only a single entry with an empty string as key. + WorkerStatus map[string]*AggregatedStatus `json:"workerStatus,omitempty"` +} + +// AggregatedStatus The aggregated details of the full Pregel run. The values are totals of all the DB-Server. +type AggregatedStatus struct { + // The time at which the status was measured. + TimeStamp time.Time `json:"timeStamp,omitempty"` + // The status of the in memory graph. + GraphStoreStatus *GraphStoreStatus `json:"graphStoreStatus,omitempty"` + // Information about the global supersteps. + AllGSSStatus *AllGSSStatus `json:"allGssStatus,omitempty"` +} + +// GraphStoreStatus The status of the in memory graph. +type GraphStoreStatus struct { + // The number of vertices that are loaded from the database into memory. + VerticesLoaded uint64 `json:"verticesLoaded,omitempty"` + // The number of edges that are loaded from the database into memory. + EdgesLoaded uint64 `json:"edgesLoaded,omitempty"` + // The number of bytes used in-memory for the loaded graph. + MemoryBytesUsed uint64 `json:"memoryBytesUsed,omitempty"` + // The number of vertices that are written back to the database after the Pregel computation finished. It is only set if the store parameter is set to true. + VerticesStored uint64 `json:"verticesStored,omitempty"` +} + +// AllGSSStatus Information about the global supersteps. +type AllGSSStatus struct { + // A list of objects with details for each global superstep. + Items []GSSStatus `json:"items,omitempty"` +} + +// GSSStatus Information about the global superstep +type GSSStatus struct { + // The number of vertices that have been processed in this step. + VerticesProcessed uint64 `json:"verticesProcessed,omitempty"` + // The number of messages sent in this step. + MessagesSent uint64 `json:"messagesSent,omitempty"` + // The number of messages received in this step. + MessagesReceived uint64 `json:"messagesReceived,omitempty"` + // The number of bytes used in memory for the messages in this step. + MemoryBytesUsedForMessages uint64 `json:"memoryBytesUsedForMessages,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/database_pregel_impl.go b/vendor/github.com/arangodb/go-driver/database_pregel_impl.go new file mode 100644 index 00000000000..897da5d48a4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_pregel_impl.go @@ -0,0 +1,116 @@ +// +// DISCLAIMER +// +// Copyright 2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package driver + +import ( + "context" + "path" + "strings" +) + +func (d *database) StartJob(ctx context.Context, options PregelJobOptions) (string, error) { + id := "" + + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/control_pregel")) + if err != nil { + return id, WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return id, WithStack(err) + } + + var rawResponse []byte + ctx = WithRawResponse(ctx, &rawResponse) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return id, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return id, WithStack(err) + } + + return strings.Trim(string(rawResponse), "\""), nil +} + +func (d *database) GetJob(ctx context.Context, id string) (*PregelJob, error) { + escapedId := pathEscape(id) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/control_pregel", escapedId)) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data PregelJob + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + return &data, nil +} + +func (d *database) GetJobs(ctx context.Context) ([]*PregelJob, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/control_pregel")) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + + var data []*PregelJob + responses, err := resp.ParseArrayBody() + if err != nil { + return nil, WithStack(err) + } + + for _, response := range responses { + var job PregelJob + if err := response.ParseBody("", &job); err != nil { + return nil, WithStack(err) + } + data = append(data, &job) + } + return data, nil +} + +func (d *database) CancelJob(ctx context.Context, id string) error { + escapedId := pathEscape(id) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/control_pregel", escapedId)) + if err != nil { + return WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/database_transactions.go b/vendor/github.com/arangodb/go-driver/database_transactions.go new file mode 100644 index 00000000000..7fed317bf3d --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_transactions.go @@ -0,0 +1,76 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import ( + "context" + "time" +) + +// BeginTransactionOptions provides options for BeginTransaction call +type BeginTransactionOptions struct { + WaitForSync bool + AllowImplicit bool + LockTimeout time.Duration + MaxTransactionSize uint64 +} + +// TransactionCollections is used to specify which collections are accessed by +// a transaction and how +type TransactionCollections struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + Exclusive []string `json:"exclusive,omitempty"` +} + +// CommitTransactionOptions provides options for CommitTransaction. Currently unused +type CommitTransactionOptions struct{} + +// AbortTransactionOptions provides options for CommitTransaction. Currently unused +type AbortTransactionOptions struct{} + +// TransactionID identifies a transaction +type TransactionID string + +// TransactionStatus describes the status of an transaction +type TransactionStatus string + +const ( + TransactionRunning TransactionStatus = "running" + TransactionCommitted TransactionStatus = "committed" + TransactionAborted TransactionStatus = "aborted" +) + +// TransactionStatusRecord provides insight about the status of transaction +type TransactionStatusRecord struct { + Status TransactionStatus +} + +// DatabaseStreamingTransactions provides access to the Streaming Transactions API +type DatabaseStreamingTransactions interface { + BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (TransactionID, error) + CommitTransaction(ctx context.Context, tid TransactionID, opts *CommitTransactionOptions) error + AbortTransaction(ctx context.Context, tid TransactionID, opts *AbortTransactionOptions) error + + TransactionStatus(ctx context.Context, tid TransactionID) (TransactionStatusRecord, error) +} diff --git a/vendor/github.com/arangodb/go-driver/database_transactions_impl.go b/vendor/github.com/arangodb/go-driver/database_transactions_impl.go new file mode 100644 index 00000000000..813db1d58e6 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_transactions_impl.go @@ -0,0 +1,100 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Lars Maier +// + +package driver + +import ( + "context" + "path" +) + +type beginTransactionRequest struct { + WaitForSync bool `json:"waitForSync,omitempty"` + AllowImplicit bool `json:"allowImplicit,omitempty"` + LockTimeout float64 `json:"lockTimeout,omitempty"` + MaxTransactionSize uint64 `json:"maxTransactionSize,omitempty"` + Collections TransactionCollections `json:"collections,omitempty"` +} + +func (d *database) BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (TransactionID, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/transaction/begin")) + if err != nil { + return "", WithStack(err) + } + var reqBody beginTransactionRequest + if opts != nil { + reqBody.WaitForSync = opts.WaitForSync + reqBody.AllowImplicit = opts.AllowImplicit + reqBody.LockTimeout = opts.LockTimeout.Seconds() + } + reqBody.Collections = cols + if _, err := req.SetBody(reqBody); err != nil { + return "", WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return "", WithStack(err) + } + var result struct { + TransactionID TransactionID `json:"id,omitempty"` + } + if err := resp.ParseBody("result", &result); err != nil { + return "", WithStack(err) + } + return result.TransactionID, nil +} + +func (d *database) requestForTransaction(ctx context.Context, tid TransactionID, method string) (TransactionStatusRecord, error) { + req, err := d.conn.NewRequest(method, path.Join(d.relPath(), "_api/transaction/", string(tid))) + if err != nil { + return TransactionStatusRecord{}, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return TransactionStatusRecord{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return TransactionStatusRecord{}, WithStack(err) + } + var result TransactionStatusRecord + if err := resp.ParseBody("result", &result); err != nil { + return TransactionStatusRecord{}, WithStack(err) + } + return result, nil +} + +func (d *database) CommitTransaction(ctx context.Context, tid TransactionID, opts *CommitTransactionOptions) error { + _, err := d.requestForTransaction(ctx, tid, "PUT") + return err +} + +func (d *database) AbortTransaction(ctx context.Context, tid TransactionID, opts *AbortTransactionOptions) error { + _, err := d.requestForTransaction(ctx, tid, "DELETE") + return err +} + +func (d *database) TransactionStatus(ctx context.Context, tid TransactionID) (TransactionStatusRecord, error) { + return d.requestForTransaction(ctx, tid, "GET") +} diff --git a/vendor/github.com/arangodb/go-driver/database_views.go b/vendor/github.com/arangodb/go-driver/database_views.go new file mode 100644 index 00000000000..83da7420a2b --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_views.go @@ -0,0 +1,58 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// DatabaseViews provides access to all views in a single database. +// Views are only available in ArangoDB 3.4 and higher. +type DatabaseViews interface { + // View opens a connection to an existing view within the database. + // If no collection with given name exists, an NotFoundError is returned. + View(ctx context.Context, name string) (View, error) + + // ViewExists returns true if a view with given name exists within the database. + ViewExists(ctx context.Context, name string) (bool, error) + + // Views returns a list of all views in the database. + Views(ctx context.Context) ([]View, error) + + // CreateArangoSearchView creates a new view of type ArangoSearch, + // with given name and options, and opens a connection to it. + // If a view with given name already exists within the database, a ConflictError is returned. + CreateArangoSearchView(ctx context.Context, name string, options *ArangoSearchViewProperties) (ArangoSearchView, error) + + // CreateArangoSearchAliasView creates ArangoSearch alias view with given name and options, and opens a connection to it. + // If a view with given name already exists within the database, a ConflictError is returned. + CreateArangoSearchAliasView(ctx context.Context, name string, options *ArangoSearchAliasViewProperties) (ArangoSearchViewAlias, error) +} + +// ViewType is the type of view. +type ViewType string + +const ( + // ViewTypeArangoSearch specifies an ArangoSearch view type. + ViewTypeArangoSearch = ViewType("arangosearch") + // ViewTypeArangoSearchAlias specifies an ArangoSearch view type alias. + ViewTypeArangoSearchAlias = ViewType("search-alias") +) diff --git a/vendor/github.com/arangodb/go-driver/database_views_impl.go b/vendor/github.com/arangodb/go-driver/database_views_impl.go new file mode 100644 index 00000000000..2a0217d877c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/database_views_impl.go @@ -0,0 +1,202 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type viewInfo struct { + Name string `json:"name,omitempty"` + Type ViewType `json:"type,omitempty"` + ArangoID + ArangoError +} + +type getViewResponse struct { + Result []viewInfo `json:"result,omitempty"` + + ArangoError +} + +// View opens a connection to an existing view within the database. +// If no collection with given name exists, an NotFoundError is returned. +func (d *database) View(ctx context.Context, name string) (View, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/view", escapedName)) + if err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data viewInfo + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + view, err := newView(name, data.Type, d) + if err != nil { + return nil, WithStack(err) + } + return view, nil +} + +// ViewExists returns true if a view with given name exists within the database. +func (d *database) ViewExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/view", escapedName)) + if err != nil { + return false, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +// Views returns a list of all views in the database. +func (d *database) Views(ctx context.Context) ([]View, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/view")) + if err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getViewResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]View, 0, len(data.Result)) + for _, info := range data.Result { + view, err := newView(info.Name, info.Type, d) + if err != nil { + return nil, WithStack(err) + } + result = append(result, view) + } + return result, nil +} + +// CreateArangoSearchView creates a new view of type ArangoSearch, +// with given name and options, and opens a connection to it. +// If a view with given name already exists within the database, a ConflictError is returned. +func (d *database) CreateArangoSearchView(ctx context.Context, name string, options *ArangoSearchViewProperties) (ArangoSearchView, error) { + input := struct { + Name string `json:"name"` + Type ViewType `json:"type"` + ArangoSearchViewProperties // `json:"properties"` + }{ + Name: name, + Type: ViewTypeArangoSearch, + } + if options != nil { + input.ArangoSearchViewProperties = *options + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/view")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + view, err := newView(name, input.Type, d) + if err != nil { + return nil, WithStack(err) + } + result, err := view.ArangoSearchView() + if err != nil { + return nil, WithStack(err) + } + + return result, nil +} + +// CreateArangoSearchAliasView creates a new view of type search-alias, +// with given name and options, and opens a connection to it. +// If a view with given name already exists within the database, a ConflictError is returned. +func (d *database) CreateArangoSearchAliasView(ctx context.Context, name string, options *ArangoSearchAliasViewProperties) (ArangoSearchViewAlias, error) { + input := struct { + Name string `json:"name"` + Type ViewType `json:"type"` + ArangoSearchAliasViewProperties + }{ + Name: name, + Type: ViewTypeArangoSearchAlias, + } + if options != nil { + input.ArangoSearchAliasViewProperties = *options + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/view")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + view, err := newView(name, input.Type, d) + if err != nil { + return nil, WithStack(err) + } + result, err := view.ArangoSearchViewAlias() + if err != nil { + return nil, WithStack(err) + } + + return result, nil +} diff --git a/vendor/github.com/arangodb/go-driver/doc.go b/vendor/github.com/arangodb/go-driver/doc.go new file mode 100644 index 00000000000..2e3f4851e3c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/doc.go @@ -0,0 +1,44 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package driver implements a Go driver for the ArangoDB database. + +To get started, create a connection to the database and wrap a client around it. + + // Create an HTTP connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + // Handle error + } + // Create a client + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + if err != nil { + // Handle error + } + +*/ +package driver diff --git a/vendor/github.com/arangodb/go-driver/edge.go b/vendor/github.com/arangodb/go-driver/edge.go new file mode 100644 index 00000000000..5f82fd500c2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/edge.go @@ -0,0 +1,31 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// EdgeDocument is a minimal document for use in edge collection. +// You can use this in your own edge document structures completely use your own. +// If you use your own, make sure to include a `_from` and `_to` field. +type EdgeDocument struct { + From DocumentID `json:"_from,omitempty"` + To DocumentID `json:"_to,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/edge_collection_documents_impl.go b/vendor/github.com/arangodb/go-driver/edge_collection_documents_impl.go new file mode 100644 index 00000000000..b92ab6d1f82 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/edge_collection_documents_impl.go @@ -0,0 +1,596 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" + "strings" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *edgeCollection) DocumentExists(ctx context.Context, key string) (bool, error) { + if result, err := c.rawCollection().DocumentExists(ctx, key); err != nil { + return false, WithStack(err) + } else { + return result, nil + } +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + meta, _, err := c.readDocument(ctx, key, result) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) readDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + // Concerns: ReadDocuments reads multiple documents via multiple calls to readDocument (this function). + // Currently with AllowDirtyReads the wasDirtyFlag is only set according to the last read request. + loadContextResponseValues(cs, resp) + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + // Parse result + if result != nil { + if err := resp.ParseBody("edge", result); err != nil { + return meta, contextSettings{}, WithStack(err) + } + } + return meta, cs, nil +} + +// ReadDocuments reads multiple documents with given keys from the collection. +// The documents data is stored into elements of the given results slice, +// the documents meta data is returned. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error) { + resultsVal := reflect.ValueOf(results) + switch resultsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("results data must be of kind Array, got %s", resultsVal.Kind())}) + } + if keys == nil { + return nil, nil, WithStack(InvalidArgumentError{Message: "keys nil"}) + } + resultCount := resultsVal.Len() + if len(keys) != resultCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", resultCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, resultCount) + errs := make(ErrorSlice, resultCount) + silent := false + for i := 0; i < resultCount; i++ { + result := resultsVal.Index(i).Addr() + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + key := keys[i] + meta, cs, err := c.readDocument(ctx, key, result.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *edgeCollection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + meta, _, err := c.createDocument(ctx, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) createDocument(ctx context.Context, document interface{}) (DocumentMeta, contextSettings, error) { + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath()) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned at its index in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *edgeCollection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.createDocument(ctx, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + meta, _, err := c.updateDocument(ctx, key, update) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) updateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, updateCount) + errs := make(ErrorSlice, updateCount) + silent := false + for i := 0; i < updateCount; i++ { + update := updatesVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(update) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.updateDocument(ctx, key, update.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + meta, _, err := c.replaceDocument(ctx, key, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) replaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(doc) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.replaceDocument(ctx, key, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + meta, _, err := c.removeDocument(ctx, key) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) removeDocument(ctx context.Context, key string) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + if cs.ReturnOld != nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "ReturnOld is not supported when removing edges"}) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + keyCount := len(keys) + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, keyCount) + errs := make(ErrorSlice, keyCount) + silent := false + for i := 0; i < keyCount; i++ { + key := keys[i] + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.removeDocument(ctx, key) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *edgeCollection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + stats, err := c.rawCollection().ImportDocuments(ctx, documents, options) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + return stats, nil +} + +// getKeyFromDocument looks for a `_key` document in the given document and returns it. +func getKeyFromDocument(doc reflect.Value) (string, error) { + if doc.IsNil() { + return "", WithStack(InvalidArgumentError{Message: "Document is nil"}) + } + if doc.Kind() == reflect.Ptr { + doc = doc.Elem() + } + switch doc.Kind() { + case reflect.Struct: + structType := doc.Type() + fieldCount := structType.NumField() + for i := 0; i < fieldCount; i++ { + f := structType.Field(i) + tagParts := strings.Split(f.Tag.Get("json"), ",") + if tagParts[0] == "_key" { + // We found the _key field + keyVal := doc.Field(i) + return keyVal.String(), nil + } + } + return "", WithStack(InvalidArgumentError{Message: "Document contains no '_key' field"}) + case reflect.Map: + keyVal := doc.MapIndex(reflect.ValueOf("_key")) + if keyVal.IsNil() { + return "", WithStack(InvalidArgumentError{Message: "Document contains no '_key' entry"}) + } + return keyVal.String(), nil + default: + return "", WithStack(InvalidArgumentError{Message: fmt.Sprintf("Document must be struct or map. Got %s", doc.Kind())}) + } +} diff --git a/vendor/github.com/arangodb/go-driver/edge_collection_impl.go b/vendor/github.com/arangodb/go-driver/edge_collection_impl.go new file mode 100644 index 00000000000..d851825c487 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/edge_collection_impl.go @@ -0,0 +1,178 @@ +// +// DISCLAIMER +// +// Copyright 2017-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "path" +) + +// newEdgeCollection creates a new EdgeCollection implementation. +func newEdgeCollection(name string, g *graph) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if g == nil { + return nil, WithStack(InvalidArgumentError{Message: "g is nil"}) + } + return &edgeCollection{ + name: name, + g: g, + conn: g.db.conn, + }, nil +} + +type edgeCollection struct { + name string + g *graph + conn Connection +} + +// relPath creates the relative path to this edge collection (`_db//_api/gharial//edge/`) +func (c *edgeCollection) relPath() string { + escapedName := pathEscape(c.name) + return path.Join(c.g.relPath(), "edge", escapedName) +} + +// Name returns the name of the edge collection. +func (c *edgeCollection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *edgeCollection) Database() Database { + return c.g.db +} + +// rawCollection returns a standard document implementation of Collection +// for this edge collection. +func (c *edgeCollection) rawCollection() Collection { + result, _ := newCollection(c.name, c.g.db) + return result +} + +// Status fetches the current status of the collection. +func (c *edgeCollection) Status(ctx context.Context) (CollectionStatus, error) { + result, err := c.rawCollection().Status(ctx) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + return result, nil +} + +// Count fetches the number of document in the collection. +func (c *edgeCollection) Count(ctx context.Context) (int64, error) { + result, err := c.rawCollection().Count(ctx) + if err != nil { + return 0, WithStack(err) + } + return result, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *edgeCollection) Statistics(ctx context.Context) (CollectionStatistics, error) { + result, err := c.rawCollection().Statistics(ctx) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + return result, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *edgeCollection) Revision(ctx context.Context) (string, error) { + result, err := c.rawCollection().Revision(ctx) + if err != nil { + return "", WithStack(err) + } + return result, nil +} + +// Properties fetches extended information about the collection. +func (c *edgeCollection) Properties(ctx context.Context) (CollectionProperties, error) { + result, err := c.rawCollection().Properties(ctx) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + return result, nil +} + +// SetProperties changes properties of the collection. +func (c *edgeCollection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + if err := c.rawCollection().SetProperties(ctx, options); err != nil { + return WithStack(err) + } + return nil +} + +// Shards fetches shards information of the collection. +func (c *edgeCollection) Shards(ctx context.Context, details bool) (CollectionShards, error) { + result, err := c.rawCollection().Shards(ctx, details) + if err != nil { + return result, WithStack(err) + } + return result, nil +} + +// Load the collection into memory. +func (c *edgeCollection) Load(ctx context.Context) error { + if err := c.rawCollection().Load(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// UnLoad the collection from memory. +func (c *edgeCollection) Unload(ctx context.Context) error { + if err := c.rawCollection().Unload(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *edgeCollection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *edgeCollection) Truncate(ctx context.Context) error { + if err := c.rawCollection().Truncate(ctx); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/edge_collection_indexes_impl.go b/vendor/github.com/arangodb/go-driver/edge_collection_indexes_impl.go new file mode 100644 index 00000000000..bbd2f84296e --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/edge_collection_indexes_impl.go @@ -0,0 +1,148 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *edgeCollection) Index(ctx context.Context, name string) (Index, error) { + result, err := c.rawCollection().Index(ctx, name) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *edgeCollection) IndexExists(ctx context.Context, name string) (bool, error) { + result, err := c.rawCollection().IndexExists(ctx, name) + if err != nil { + return false, WithStack(err) + } + return result, nil +} + +// Indexes returns a list of all indexes in the collection. +func (c *edgeCollection) Indexes(ctx context.Context) ([]Index, error) { + result, err := c.rawCollection().Indexes(ctx) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// Deprecated: since 3.10 version. Use ArangoSearch view instead. +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureFullTextIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureGeoIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureHashIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsurePersistentIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureSkipListIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureTTLIndex creates a TLL collection, if it does not already exist. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureTTLIndex(ctx, field, expireAfter, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureZKDIndex creates a ZKD index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureZKDIndex(ctx context.Context, fields []string, options *EnsureZKDIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureZKDIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. +// Available in ArangoDB 3.10 and later. +func (c *edgeCollection) EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureInvertedIndex(ctx, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} diff --git a/vendor/github.com/arangodb/go-driver/encode-go_1_8.go b/vendor/github.com/arangodb/go-driver/encode-go_1_8.go new file mode 100644 index 00000000000..a8f8f30095f --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/encode-go_1_8.go @@ -0,0 +1,39 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +//go:build go1.8 +// +build go1.8 + +package driver + +import "net/url" + +// pathEscape the given value for use in a URL path. +func pathEscape(s string) string { + return url.PathEscape(s) +} + +// pathUnescape unescapes the given value for use in a URL path. +func pathUnescape(s string) string { + r, _ := url.PathUnescape(s) + return r +} diff --git a/vendor/github.com/arangodb/go-driver/encode.go b/vendor/github.com/arangodb/go-driver/encode.go new file mode 100644 index 00000000000..a539eea8aa5 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/encode.go @@ -0,0 +1,39 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +//go:build !go1.8 +// +build !go1.8 + +package driver + +import "net/url" + +// Escape the given value for use in a URL path. +func pathEscape(s string) string { + return url.QueryEscape(s) +} + +// pathUnescape unescapes the given value for use in a URL path. +func pathUnescape(s string) string { + r, _ := url.QueryUnescape(s) + return r +} diff --git a/vendor/github.com/arangodb/go-driver/error.go b/vendor/github.com/arangodb/go-driver/error.go new file mode 100644 index 00000000000..576d89047ab --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/error.go @@ -0,0 +1,297 @@ +// +// DISCLAIMER +// +// Copyright 2017-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" +) + +const ( + // general errors + ErrNotImplemented = 9 + ErrForbidden = 11 + ErrDisabled = 36 + + // HTTP error status codes + ErrHttpForbidden = 403 + ErrHttpInternal = 501 + + // Internal ArangoDB storage errors + ErrArangoReadOnly = 1004 + + // General ArangoDB storage errors + ErrArangoConflict = 1200 + ErrArangoDocumentNotFound = 1202 + ErrArangoDataSourceNotFound = 1203 + ErrArangoUniqueConstraintViolated = 1210 + ErrArangoDatabaseNameInvalid = 1229 + + // ArangoDB cluster errors + ErrClusterLeadershipChallengeOngoing = 1495 + ErrClusterNotLeader = 1496 + + // User management errors + ErrUserDuplicate = 1702 +) + +// ArangoError is a Go error with arangodb specific error information. +type ArangoError struct { + HasError bool `json:"error"` + Code int `json:"code"` + ErrorNum int `json:"errorNum"` + ErrorMessage string `json:"errorMessage"` +} + +// Error returns the error message of an ArangoError. +func (ae ArangoError) Error() string { + if ae.ErrorMessage != "" { + return ae.ErrorMessage + } + return fmt.Sprintf("ArangoError: Code %d, ErrorNum %d", ae.Code, ae.ErrorNum) +} + +// Timeout returns true when the given error is a timeout error. +func (ae ArangoError) Timeout() bool { + return ae.HasError && (ae.Code == http.StatusRequestTimeout || ae.Code == http.StatusGatewayTimeout) +} + +// Temporary returns true when the given error is a temporary error. +func (ae ArangoError) Temporary() bool { + return ae.HasError && ae.Code == http.StatusServiceUnavailable +} + +// newArangoError creates a new ArangoError with given values. +func newArangoError(code, errorNum int, errorMessage string) error { + return ArangoError{ + HasError: true, + Code: code, + ErrorNum: errorNum, + ErrorMessage: errorMessage, + } +} + +// IsArangoError returns true when the given error is an ArangoError. +func IsArangoError(err error) bool { + ae, ok := Cause(err).(ArangoError) + return ok && ae.HasError +} + +// AsArangoError returns true when the given error is an ArangoError together with an object. +func AsArangoError(err error) (ArangoError, bool) { + ae, ok := Cause(err).(ArangoError) + if ok { + return ae, true + } else { + return ArangoError{}, false + } +} + +// IsArangoErrorWithCode returns true when the given error is an ArangoError and its Code field is equal to the given code. +func IsArangoErrorWithCode(err error, code int) bool { + ae, ok := Cause(err).(ArangoError) + return ok && ae.Code == code +} + +// IsArangoErrorWithErrorNum returns true when the given error is an ArangoError and its ErrorNum field is equal to one of the given numbers. +func IsArangoErrorWithErrorNum(err error, errorNum ...int) bool { + ae, ok := Cause(err).(ArangoError) + if !ok { + return false + } + for _, x := range errorNum { + if ae.ErrorNum == x { + return true + } + } + return false +} + +// IsInvalidRequest returns true if the given error is an ArangoError with code 400, indicating an invalid request. +func IsInvalidRequest(err error) bool { + return IsArangoErrorWithCode(err, http.StatusBadRequest) + +} + +// IsUnauthorized returns true if the given error is an ArangoError with code 401, indicating an unauthorized request. +func IsUnauthorized(err error) bool { + return IsArangoErrorWithCode(err, http.StatusUnauthorized) +} + +// IsForbidden returns true if the given error is an ArangoError with code 403, indicating a forbidden request. +func IsForbidden(err error) bool { + return IsArangoErrorWithCode(err, http.StatusForbidden) +} + +// Deprecated: Use IsNotFoundGeneral instead. +// For ErrArangoDocumentNotFound error there is a chance that we get a different HTTP code if the API requires an existing document as input, which is not found. +// +// IsNotFound returns true if the given error is an ArangoError with code 404, indicating a object not found. +func IsNotFound(err error) bool { + return IsArangoErrorWithCode(err, http.StatusNotFound) || + IsArangoErrorWithErrorNum(err, ErrArangoDocumentNotFound, ErrArangoDataSourceNotFound) +} + +// IsNotFoundGeneral returns true if the given error is an ArangoError with code 404, indicating an object is not found. +func IsNotFoundGeneral(err error) bool { + return IsArangoErrorWithCode(err, http.StatusNotFound) +} + +// IsDataSourceOrDocumentNotFound returns true if the given error is an Arango storage error, indicating an object is not found. +func IsDataSourceOrDocumentNotFound(err error) bool { + return IsArangoErrorWithCode(err, http.StatusNotFound) && + IsArangoErrorWithErrorNum(err, ErrArangoDocumentNotFound, ErrArangoDataSourceNotFound) +} + +// IsConflict returns true if the given error is an ArangoError with code 409, indicating a conflict. +func IsConflict(err error) bool { + return IsArangoErrorWithCode(err, http.StatusConflict) || IsArangoErrorWithErrorNum(err, ErrUserDuplicate) +} + +// IsPreconditionFailed returns true if the given error is an ArangoError with code 412, indicating a failed precondition. +func IsPreconditionFailed(err error) bool { + return IsArangoErrorWithCode(err, http.StatusPreconditionFailed) || + IsArangoErrorWithErrorNum(err, ErrArangoConflict, ErrArangoUniqueConstraintViolated) +} + +// IsNoLeader returns true if the given error is an ArangoError with code 503 error number 1496. +func IsNoLeader(err error) bool { + return IsArangoErrorWithCode(err, http.StatusServiceUnavailable) && IsArangoErrorWithErrorNum(err, ErrClusterNotLeader) +} + +// IsNoLeaderOrOngoing return true if the given error is an ArangoError with code 503 and error number 1496 or 1495 +func IsNoLeaderOrOngoing(err error) bool { + return IsArangoErrorWithCode(err, http.StatusServiceUnavailable) && + IsArangoErrorWithErrorNum(err, ErrClusterLeadershipChallengeOngoing, ErrClusterNotLeader) +} + +// InvalidArgumentError is returned when a go function argument is invalid. +type InvalidArgumentError struct { + Message string +} + +// Error implements the error interface for InvalidArgumentError. +func (e InvalidArgumentError) Error() string { + return e.Message +} + +// IsInvalidArgument returns true if the given error is an InvalidArgumentError. +func IsInvalidArgument(err error) bool { + _, ok := Cause(err).(InvalidArgumentError) + return ok +} + +// NoMoreDocumentsError is returned by Cursor's, when an attempt is made to read documents when there are no more. +type NoMoreDocumentsError struct{} + +// Error implements the error interface for NoMoreDocumentsError. +func (e NoMoreDocumentsError) Error() string { + return "no more documents" +} + +// IsNoMoreDocuments returns true if the given error is an NoMoreDocumentsError. +func IsNoMoreDocuments(err error) bool { + _, ok := Cause(err).(NoMoreDocumentsError) + return ok +} + +// A ResponseError is returned when a request was completely written to a server, but +// the server did not respond, or some kind of network error occurred during the response. +type ResponseError struct { + Err error +} + +// Error returns the Error() result of the underlying error. +func (e *ResponseError) Error() string { + return e.Err.Error() +} + +// IsResponse returns true if the given error is (or is caused by) a ResponseError. +func IsResponse(err error) bool { + return isCausedBy(err, func(e error) bool { _, ok := e.(*ResponseError); return ok }) +} + +// IsCanceled returns true if the given error is the result on a cancelled context. +func IsCanceled(err error) bool { + return isCausedBy(err, func(e error) bool { return e == context.Canceled }) +} + +// IsTimeout returns true if the given error is the result on a deadline that has been exceeded. +func IsTimeout(err error) bool { + return isCausedBy(err, func(e error) bool { return e == context.DeadlineExceeded }) +} + +// isCausedBy returns true if the given error returns true on the given predicate, +// unwrapping various standard library error wrappers. +func isCausedBy(err error, p func(error) bool) bool { + if p(err) { + return true + } + err = Cause(err) + for { + if p(err) { + return true + } else if err == nil { + return false + } + if xerr, ok := err.(*ResponseError); ok { + err = xerr.Err + } else if xerr, ok := err.(*url.Error); ok { + err = xerr.Err + } else if xerr, ok := err.(*net.OpError); ok { + err = xerr.Err + } else if xerr, ok := err.(*os.SyscallError); ok { + err = xerr.Err + } else { + return false + } + } +} + +var ( + // WithStack is called on every return of an error to add stacktrace information to the error. + // When setting this function, also set the Cause function. + // The interface of this function is compatible with functions in github.com/pkg/errors. + WithStack = func(err error) error { return err } + // Cause is used to get the root cause of the given error. + // The interface of this function is compatible with functions in github.com/pkg/errors. + Cause = func(err error) error { return err } +) + +// ErrorSlice is a slice of errors +type ErrorSlice []error + +// FirstNonNil returns the first error in the slice that is not nil. +// If all errors in the slice are nil, nil is returned. +func (l ErrorSlice) FirstNonNil() error { + for _, e := range l { + if e != nil { + return e + } + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/foxx.go b/vendor/github.com/arangodb/go-driver/foxx.go new file mode 100644 index 00000000000..0ea203c5c58 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/foxx.go @@ -0,0 +1,85 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "io/ioutil" + "net/http" + "strconv" +) + +// InstallFoxxService installs a new service at a given mount path. +func (c *client) InstallFoxxService(ctx context.Context, zipFile string, options FoxxCreateOptions) error { + + req, err := c.conn.NewRequest("POST", "_api/foxx") + if err != nil { + return WithStack(err) + } + + req.SetHeader("Content-Type", "application/zip") + req.SetQuery("mount", options.Mount) + + bytes, err := ioutil.ReadFile(zipFile) + if err != nil { + return WithStack(err) + } + + _, err = req.SetBody(bytes) + if err != nil { + return WithStack(err) + } + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + + if err := resp.CheckStatus(http.StatusCreated); err != nil { + return WithStack(err) + } + + return nil +} + +// UninstallFoxxService uninstalls service at a given mount path. +func (c *client) UninstallFoxxService(ctx context.Context, options FoxxDeleteOptions) error { + req, err := c.conn.NewRequest("DELETE", "_api/foxx/service") + if err != nil { + return WithStack(err) + } + + req.SetQuery("mount", options.Mount) + req.SetQuery("teardown", strconv.FormatBool(options.Teardown)) + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + + if err := resp.CheckStatus(http.StatusNoContent); err != nil { + return WithStack(err) + } + + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/graph.go b/vendor/github.com/arangodb/go-driver/graph.go new file mode 100644 index 00000000000..7816ae03faf --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph.go @@ -0,0 +1,80 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Graph provides access to all edge & vertex collections of a single graph in a database. +type Graph interface { + // Name returns the name of the graph. + Name() string + + // Remove removes the entire graph. + // If the graph does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // IsSmart returns true of smart is smart. In case of Community Edition it is always false + IsSmart() bool + + // IsSatellite returns true of smart is satellite. In case of Community Edition it is always false + IsSatellite() bool + + // IsDisjoint return information if graph have isDisjoint flag set to true + IsDisjoint() bool + + // GraphEdgeCollections Edge collection functions + GraphEdgeCollections + + // GraphVertexCollections Vertex collection functions + GraphVertexCollections + + // ID returns the id of the graph. + ID() string + + // Key returns the key of the graph. + Key() DocumentID + + // Rev returns the revision of the graph. + Rev() string + + // EdgeDefinitions returns the edge definitions of the graph. + EdgeDefinitions() []EdgeDefinition + + // SmartGraphAttribute returns the attributes of a smart graph if there are any. + SmartGraphAttribute() string + + // MinReplicationFactor returns the minimum replication factor for the graph. + MinReplicationFactor() int + + // NumberOfShards returns the number of shards for the graph. + NumberOfShards() int + + // OrphanCollections returns the orphan collcetions of the graph. + OrphanCollections() []string + + // ReplicationFactor returns the current replication factor. + ReplicationFactor() int + + // WriteConcern returns the write concern setting of the graph. + WriteConcern() int +} diff --git a/vendor/github.com/arangodb/go-driver/graph_edge_collections.go b/vendor/github.com/arangodb/go-driver/graph_edge_collections.go new file mode 100644 index 00000000000..928fd135f05 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph_edge_collections.go @@ -0,0 +1,66 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// GraphEdgeCollections provides access to all edge collections of a single graph in a database. +type GraphEdgeCollections interface { + // EdgeCollection opens a connection to an existing edge-collection within the graph. + // If no edge-collection with given name exists, an NotFoundError is returned. + // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. + EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error) + + // EdgeCollectionExists returns true if an edge-collection with given name exists within the graph. + EdgeCollectionExists(ctx context.Context, name string) (bool, error) + + // EdgeCollections returns all edge collections of this graph + // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. + EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error) + + // CreateEdgeCollection creates an edge collection in the graph. + // collection: The name of the edge collection to be used. + // constraints.From: contains the names of one or more vertex collections that can contain source vertices. + // constraints.To: contains the names of one or more edge collections that can contain target vertices. + CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error) + + // CreateEdgeCollectionWithOptions creates an edge collection in the graph with additional options + CreateEdgeCollectionWithOptions(ctx context.Context, collection string, constraints VertexConstraints, options CreateEdgeCollectionOptions) (Collection, error) + + // SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph. + SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error +} + +// VertexConstraints limit the vertex collection you can use in an edge. +type VertexConstraints struct { + // From contains names of vertex collection that are allowed to be used in the From part of an edge. + From []string + // To contains names of vertex collection that are allowed to be used in the To part of an edge. + To []string +} + +// CreateEdgeCollectionOptions contains optional parameters for creating a new edge collection +type CreateEdgeCollectionOptions struct { + // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) + Satellites []string `json:"satellites,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/graph_edge_collections_impl.go b/vendor/github.com/arangodb/go-driver/graph_edge_collections_impl.go new file mode 100644 index 00000000000..81076f217d4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph_edge_collections_impl.go @@ -0,0 +1,239 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type graphDefinition struct { + Name string `json:"name"` + IsSmart bool `json:"isSmart"` + IsSatellite bool `json:"isSatellite"` + IsDisjoint bool `json:"isDisjoint,omitempty"` + + EdgeDefinitions []EdgeDefinition `json:"edgeDefinitions,omitempty"` + + NumberOfShards int `json:"numberOfShards,omitempty"` + OrphanCollections []string `json:"orphanCollections,omitempty"` + + // Deprecated: use 'WriteConcern' instead. + MinReplicationFactor int `json:"minReplicationFactor,omitempty"` + WriteConcern int `json:"writeConcern,omitempty"` + + // ReplicationFactor is the number of replication factor that is used for every collection within this graph. + // Cannot be modified later. + ReplicationFactor graphReplicationFactor `json:"replicationFactor,omitempty"` + + // This field must be set to the attribute that will be used for sharding or smart graphs. + // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. + // This requires ArangoDB Enterprise Edition. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + + Initial *string `json:"initial,omitempty"` + InitialCid int `json:"initialCid,omitempty"` + ID string `json:"_id"` + Key DocumentID `json:"_key"` + Rev string `json:"_rev"` +} + +type getGraphResponse struct { + Graph graphDefinition `json:"graph"` + ArangoError +} + +// EdgeCollection opens a connection to an existing edge-collection within the graph. +// If no edge-collection with given name exists, an NotFoundError is returned. +func (g *graph) EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + for _, n := range data.Graph.EdgeDefinitions { + if n.Collection == name { + ec, err := newEdgeCollection(name, g) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + constraints := VertexConstraints{ + From: n.From, + To: n.To, + } + return ec, constraints, nil + } + } + return nil, VertexConstraints{}, WithStack(newArangoError(404, 0, "not found")) +} + +// EdgeCollectionExists returns true if an edge-collection with given name exists within the graph. +func (g *graph) EdgeCollectionExists(ctx context.Context, name string) (bool, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return false, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return false, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return false, WithStack(err) + } + for _, n := range data.Graph.EdgeDefinitions { + if n.Collection == name { + return true, nil + } + } + return false, nil +} + +// EdgeCollections returns all edge collections of this graph +func (g *graph) EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return nil, nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, nil, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Graph.EdgeDefinitions)) + constraints := make([]VertexConstraints, 0, len(data.Graph.EdgeDefinitions)) + for _, n := range data.Graph.EdgeDefinitions { + ec, err := newEdgeCollection(n.Collection, g) + if err != nil { + return nil, nil, WithStack(err) + } + result = append(result, ec) + constraints = append(constraints, VertexConstraints{ + From: n.From, + To: n.To, + }) + } + return result, constraints, nil +} + +// collection: The name of the edge collection to be used. +// from: contains the names of one or more vertex collections that can contain source vertices. +// to: contains the names of one or more edge collections that can contain target vertices. +func (g *graph) CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "edge")) + if err != nil { + return nil, WithStack(err) + } + input := EdgeDefinition{ + Collection: collection, + From: constraints.From, + To: constraints.To, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newEdgeCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} + +// CreateEdgeCollectionWithOptions creates an edge collection in the graph with additional options +func (g *graph) CreateEdgeCollectionWithOptions(ctx context.Context, collection string, constraints VertexConstraints, options CreateEdgeCollectionOptions) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "edge")) + if err != nil { + return nil, WithStack(err) + } + input := EdgeDefinition{ + Collection: collection, + From: constraints.From, + To: constraints.To, + Options: options, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newEdgeCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} + +// SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph. +func (g *graph) SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error { + req, err := g.conn.NewRequest("PUT", path.Join(g.relPath(), "edge", collection)) + if err != nil { + return WithStack(err) + } + input := EdgeDefinition{ + Collection: collection, + From: constraints.From, + To: constraints.To, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/graph_impl.go b/vendor/github.com/arangodb/go-driver/graph_impl.go new file mode 100644 index 00000000000..2f1ec81511d --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph_impl.go @@ -0,0 +1,139 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newGraph creates a new Graph implementation. +func newGraph(input graphDefinition, db *database) (Graph, error) { + if input.Name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &graph{ + input: input, + db: db, + conn: db.conn, + }, nil +} + +type graph struct { + input graphDefinition + db *database + conn Connection +} + +func (g *graph) IsSmart() bool { + return g.input.IsSmart +} + +func (g *graph) IsDisjoint() bool { + return g.input.IsDisjoint +} + +func (g *graph) IsSatellite() bool { + return g.input.IsSatellite +} + +// relPath creates the relative path to this graph (`_db//_api/gharial/`) +func (g *graph) relPath() string { + escapedName := pathEscape(g.Name()) + return path.Join(g.db.relPath(), "_api", "gharial", escapedName) +} + +// Name returns the name of the graph. +func (g *graph) Name() string { + return g.input.Name +} + +// ID returns the id of the graph. +func (g *graph) ID() string { + return g.input.ID +} + +// Key returns the key of the graph. +func (g *graph) Key() DocumentID { + return g.input.Key +} + +// Key returns the key of the graph. +func (g *graph) Rev() string { + return g.input.Rev +} + +// EdgeDefinitions returns the edge definitions of the graph. +func (g *graph) EdgeDefinitions() []EdgeDefinition { + return g.input.EdgeDefinitions +} + +// IsSmart returns the isSmart setting of the graph. +func (g *graph) SmartGraphAttribute() string { + return g.input.SmartGraphAttribute +} + +// MinReplicationFactor returns the minimum replication factor for the graph. +func (g *graph) MinReplicationFactor() int { + return g.input.MinReplicationFactor +} + +// NumberOfShards returns the number of shards for the graph. +func (g *graph) NumberOfShards() int { + return g.input.NumberOfShards +} + +// OrphanCollections returns the orphan collcetions of the graph. +func (g *graph) OrphanCollections() []string { + return g.input.OrphanCollections +} + +// ReplicationFactor returns the current replication factor. +func (g *graph) ReplicationFactor() int { + return int(g.input.ReplicationFactor) +} + +// WriteConcern returns the write concern setting of the graph. +func (g *graph) WriteConcern() int { + return g.input.WriteConcern +} + +// Remove removes the entire graph. +// If the graph does not exist, a NotFoundError is returned. +func (g *graph) Remove(ctx context.Context) error { + req, err := g.conn.NewRequest("DELETE", g.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/graph_vertex_collections.go b/vendor/github.com/arangodb/go-driver/graph_vertex_collections.go new file mode 100644 index 00000000000..94a9872058c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph_vertex_collections.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// GraphVertexCollections provides access to all vertex collections of a single graph in a database. +type GraphVertexCollections interface { + // VertexCollection opens a connection to an existing vertex-collection within the graph. + // If no vertex-collection with given name exists, an NotFoundError is returned. + // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. + VertexCollection(ctx context.Context, name string) (Collection, error) + + // VertexCollectionExists returns true if an vertex-collection with given name exists within the graph. + VertexCollectionExists(ctx context.Context, name string) (bool, error) + + // VertexCollections returns all vertex collections of this graph + // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. + VertexCollections(ctx context.Context) ([]Collection, error) + + // CreateVertexCollection creates a vertex collection in the graph. + // collection: The name of the vertex collection to be used. + CreateVertexCollection(ctx context.Context, collection string) (Collection, error) + + // CreateVertexCollectionWithOptions creates a vertex collection in the graph + CreateVertexCollectionWithOptions(ctx context.Context, collection string, options CreateVertexCollectionOptions) (Collection, error) +} + +// CreateVertexCollectionOptions contains optional parameters for creating a new vertex collection +type CreateVertexCollectionOptions struct { + // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) + Satellites []string `json:"satellites,omitempty"` +} diff --git a/vendor/github.com/arangodb/go-driver/graph_vertex_collections_impl.go b/vendor/github.com/arangodb/go-driver/graph_vertex_collections_impl.go new file mode 100644 index 00000000000..03f4cf7cb29 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/graph_vertex_collections_impl.go @@ -0,0 +1,176 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type listVertexCollectionResponse struct { + Collections []string `json:"collections,omitempty"` + ArangoError +} + +// VertexCollection opens a connection to an existing edge-collection within the graph. +// If no edge-collection with given name exists, an NotFoundError is returned. +func (g *graph) VertexCollection(ctx context.Context, name string) (Collection, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + for _, n := range data.Collections { + if n == name { + ec, err := newVertexCollection(name, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil + } + } + return nil, WithStack(newArangoError(404, 0, "not found")) +} + +// VertexCollectionExists returns true if an edge-collection with given name exists within the graph. +func (g *graph) VertexCollectionExists(ctx context.Context, name string) (bool, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return false, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return false, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return false, WithStack(err) + } + for _, n := range data.Collections { + if n == name { + return true, nil + } + } + return false, nil +} + +// VertexCollections returns all edge collections of this graph +func (g *graph) VertexCollections(ctx context.Context) ([]Collection, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Collections)) + for _, name := range data.Collections { + ec, err := newVertexCollection(name, g) + if err != nil { + return nil, WithStack(err) + } + result = append(result, ec) + } + return result, nil +} + +// collection: The name of the edge collection to be used. +// from: contains the names of one or more vertex collections that can contain source vertices. +// to: contains the names of one or more edge collections that can contain target vertices. +func (g *graph) CreateVertexCollection(ctx context.Context, collection string) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + input := struct { + Collection string `json:"collection,omitempty"` + }{ + Collection: collection, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newVertexCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} + +// CreateVertexCollectionWithOptions creates a vertex collection in the graph +func (g *graph) CreateVertexCollectionWithOptions(ctx context.Context, collection string, options CreateVertexCollectionOptions) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + input := struct { + Collection string `json:"collection,omitempty"` + Options CreateVertexCollectionOptions `json:"options,omitempty"` + }{ + Collection: collection, + Options: options, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newVertexCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} diff --git a/vendor/github.com/arangodb/go-driver/http/authentication.go b/vendor/github.com/arangodb/go-driver/http/authentication.go new file mode 100644 index 00000000000..71b1c806c45 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/authentication.go @@ -0,0 +1,279 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "sync" + "sync/atomic" + + driver "github.com/arangodb/go-driver" +) + +// ErrAuthenticationNotChanged is returned when authentication is not changed. +var ErrAuthenticationNotChanged = errors.New("authentication not changed") + +// Authentication implements a kind of authentication. +type httpAuthentication interface { + // Prepare is called before the first request of the given connection is made. + Prepare(ctx context.Context, conn driver.Connection) error + + // Configure is called for every request made on a connection. + Configure(req driver.Request) error +} + +// IsAuthenticationTheSame checks whether two authentications are the same. +func IsAuthenticationTheSame(auth1, auth2 driver.Authentication) bool { + + if auth1 == nil && auth2 == nil { + return true + } + + if auth1 == nil || auth2 == nil { + return false + } + + if auth1.Type() != auth2.Type() { + return false + } + + if auth1.Type() == driver.AuthenticationTypeRaw { + if auth1.Get("value") != auth2.Get("value") { + return false + } + } else { + if auth1.Get("username") != auth2.Get("username") || + auth1.Get("password") != auth2.Get("password") { + return false + } + } + + return true +} + +// newBasicAuthentication creates an authentication implementation based on the given username & password. +func newBasicAuthentication(userName, password string) httpAuthentication { + auth := fmt.Sprintf("%s:%s", userName, password) + encoded := base64.StdEncoding.EncodeToString([]byte(auth)) + return &basicAuthentication{ + authorizationValue: "Basic " + encoded, + } +} + +// newJWTAuthentication creates a JWT token authentication implementation based on the given username & password. +func newJWTAuthentication(userName, password string) httpAuthentication { + return &jwtAuthentication{ + userName: userName, + password: password, + } +} + +// newRawAuthentication creates a Raw authentication implementation based on the given value. +func newRawAuthentication(value string) httpAuthentication { + return &basicAuthentication{ + authorizationValue: value, + } +} + +// basicAuthentication implements HTTP Basic authentication. +type basicAuthentication struct { + authorizationValue string +} + +// Prepare is called before the first request of the given connection is made. +func (a *basicAuthentication) Prepare(ctx context.Context, conn driver.Connection) error { + // No need to do anything here + return nil +} + +// Configure is called for every request made on a connection. +func (a *basicAuthentication) Configure(req driver.Request) error { + req.SetHeader("Authorization", a.authorizationValue) + return nil +} + +// jwtAuthentication implements JWT token authentication. +type jwtAuthentication struct { + userName string + password string + token string +} + +type jwtOpenRequest struct { + UserName string `json:"username"` + Password string `json:"password"` +} + +type jwtOpenResponse struct { + Token string `json:"jwt"` + MustChangePassword bool `json:"must_change_password,omitempty"` +} + +// Prepare is called before the first request of the given connection is made. +func (a *jwtAuthentication) Prepare(ctx context.Context, conn driver.Connection) error { + // Prepare request + r, err := conn.NewRequest("POST", "/_open/auth") + if err != nil { + return driver.WithStack(err) + } + r.SetBody(jwtOpenRequest{ + UserName: a.userName, + Password: a.password, + }) + + // Perform request + resp, err := conn.Do(ctx, r) + if err != nil { + return driver.WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return driver.WithStack(err) + } + + // Parse response + var data jwtOpenResponse + if err := resp.ParseBody("", &data); err != nil { + return driver.WithStack(err) + } + + // Store token + a.token = data.Token + + // Ok + return nil +} + +// Configure is called for every request made on a connection. +func (a *jwtAuthentication) Configure(req driver.Request) error { + req.SetHeader("Authorization", "bearer "+a.token) + return nil +} + +// newAuthenticatedConnection creates a Connection that applies the given connection on the given underlying connection. +func newAuthenticatedConnection(conn driver.Connection, auth httpAuthentication) (driver.Connection, error) { + if conn == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "conn is nil"}) + } + if auth == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "auth is nil"}) + } + return &authenticatedConnection{ + conn: conn, + auth: auth, + }, nil +} + +// authenticatedConnection implements authentication behavior for connections. +type authenticatedConnection struct { + conn driver.Connection // Un-authenticated connection + auth httpAuthentication + prepareMutex sync.Mutex + prepared int32 +} + +// NewRequest creates a new request with given method and path. +func (c *authenticatedConnection) NewRequest(method, path string) (driver.Request, error) { + r, err := c.conn.NewRequest(method, path) + if err != nil { + return nil, driver.WithStack(err) + } + return r, nil +} + +// Do performs a given request, returning its response. +func (c *authenticatedConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + if atomic.LoadInt32(&c.prepared) == 0 { + // Probably we're not yet prepared + if err := c.prepare(ctx); err != nil { + // Authentication failed + return nil, driver.WithStack(err) + } + } + // Configure the request for authentication. + if err := c.auth.Configure(req); err != nil { + // Failed to configure request for authentication + return nil, driver.WithStack(err) + } + // Do the authenticated request + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, driver.WithStack(err) + } + return resp, nil +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *authenticatedConnection) Unmarshal(data driver.RawObject, result interface{}) error { + if err := c.conn.Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + return nil +} + +// Endpoints returns the endpoints used by this connection. +func (c *authenticatedConnection) Endpoints() []string { + return c.conn.Endpoints() +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *authenticatedConnection) UpdateEndpoints(endpoints []string) error { + if err := c.conn.UpdateEndpoints(endpoints); err != nil { + return driver.WithStack(err) + } + return nil +} + +// Configure the authentication used for this connection. +func (c *authenticatedConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + result, err := c.conn.SetAuthentication(auth) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil +} + +// Protocols returns all protocols used by this connection. +func (c *authenticatedConnection) Protocols() driver.ProtocolSet { + return c.conn.Protocols() +} + +// prepare calls Authentication.Prepare if needed. +func (c *authenticatedConnection) prepare(ctx context.Context) error { + c.prepareMutex.Lock() + defer c.prepareMutex.Unlock() + if c.prepared == 0 { + // We need to prepare first + if err := c.auth.Prepare(ctx, c.conn); err != nil { + // Authentication failed + return driver.WithStack(err) + } + // We're now prepared + atomic.StoreInt32(&c.prepared, 1) + } else { + // We're already prepared, do nothing + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/http/connection.go b/vendor/github.com/arangodb/go-driver/http/connection.go new file mode 100644 index 00000000000..df1a3438196 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/connection.go @@ -0,0 +1,496 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "sync" + "time" + + velocypack "github.com/arangodb/go-velocypack" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/cluster" + "github.com/arangodb/go-driver/util" +) + +const ( + DefaultMaxIdleConnsPerHost = 64 + DefaultConnLimit = 32 + + keyRawResponse driver.ContextKey = "arangodb-rawResponse" + keyResponse driver.ContextKey = "arangodb-response" +) + +// ConnectionConfig provides all configuration options for a HTTP connection. +type ConnectionConfig struct { + // Endpoints holds 1 or more URL's used to connect to the database. + // In case of a connection to an ArangoDB cluster, you must provide the URL's of all coordinators. + Endpoints []string + // TLSConfig holds settings used to configure a TLS (HTTPS) connection. + // This is only used for endpoints using the HTTPS scheme. + TLSConfig *tls.Config + // Transport allows the use of a custom round tripper. + // If Transport is not of type `*http.Transport`, the `TLSConfig` property is not used. + // Otherwise a `TLSConfig` property other than `nil` will overwrite the `TLSClientConfig` + // property of `Transport`. + // + // When using a custom `http.Transport`, make sure to set the `MaxIdleConnsPerHost` field at least as + // high as the maximum number of concurrent requests you will make to your database. + // A lower number will cause the golang runtime to create additional connections and close them + // directly after use, resulting in a large number of connections in `TIME_WAIT` state. + // When this value is not set, the driver will set it to 64 automatically. + Transport http.RoundTripper + // DontFollowRedirect; if set, redirect will not be followed, response from the initial request will be returned without an error + // DontFollowRedirect takes precendance over FailOnRedirect. + DontFollowRedirect bool + // FailOnRedirect; if set, redirect will not be followed, instead the status code is returned as error + FailOnRedirect bool + // Cluster configuration settings + cluster.ConnectionConfig + // ContentType specified type of content encoding to use. + ContentType driver.ContentType + // ConnLimit is the upper limit to the number of connections to a single server. + // The default is 32 (DefaultConnLimit). + // Set this value to -1 if you do not want any upper limit. + ConnLimit int +} + +// NewConnection creates a new HTTP connection based on the given configuration settings. +func NewConnection(config ConnectionConfig) (driver.Connection, error) { + c, err := cluster.NewConnection(config.ConnectionConfig, func(endpoint string) (driver.Connection, error) { + conn, err := newHTTPConnection(endpoint, config) + if err != nil { + return nil, driver.WithStack(err) + } + return conn, nil + }, config.Endpoints) + if err != nil { + return nil, driver.WithStack(err) + } + return c, nil +} + +// newHTTPConnection creates a new HTTP connection for a single endpoint and the remainder of the given configuration settings. +func newHTTPConnection(endpoint string, config ConnectionConfig) (driver.Connection, error) { + if config.ConnLimit == 0 { + config.ConnLimit = DefaultConnLimit + } + endpoint = util.FixupEndpointURLScheme(endpoint) + u, err := url.Parse(endpoint) + if err != nil { + return nil, driver.WithStack(err) + } + var httpTransport *http.Transport + if config.Transport != nil { + httpTransport, _ = config.Transport.(*http.Transport) + } else { + httpTransport = &http.Transport{ + // Copy default values from http.DefaultTransport + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + config.Transport = httpTransport + } + if httpTransport != nil { + if httpTransport.MaxIdleConnsPerHost == 0 { + // Raise the default number of idle connections per host since in a database application + // it is very likely that you want more than 2 concurrent connections to a host. + // We raise it to avoid the extra concurrent connections being closed directly + // after use, resulting in a lot of connection in `TIME_WAIT` state. + httpTransport.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost + } + defaultMaxIdleConns := 3 * DefaultMaxIdleConnsPerHost + if httpTransport.MaxIdleConns > 0 && httpTransport.MaxIdleConns < defaultMaxIdleConns { + // For a cluster scenario we assume the use of 3 coordinators (don't know the exact number here) + // and derive the maximum total number of idle connections from that. + httpTransport.MaxIdleConns = defaultMaxIdleConns + } + if config.TLSConfig != nil { + httpTransport.TLSClientConfig = config.TLSConfig + } + } + httpClient := &http.Client{ + Transport: config.Transport, + } + if config.DontFollowRedirect { + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Do not wrap, standard library will not understand + } + } else if config.FailOnRedirect { + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return driver.ArangoError{ + HasError: true, + Code: http.StatusFound, + ErrorNum: 0, + ErrorMessage: "Redirect not allowed", + } + } + } + var connPool chan int + if config.ConnLimit > 0 { + connPool = make(chan int, config.ConnLimit) + // Fill with available tokens + for i := 0; i < config.ConnLimit; i++ { + connPool <- i + } + } + c := &httpConnection{ + endpoint: *u, + contentType: config.ContentType, + client: httpClient, + connPool: connPool, + } + return c, nil +} + +// httpConnection implements an HTTP + JSON connection to an arangodb server. +type httpConnection struct { + endpoint url.URL + contentType driver.ContentType + client *http.Client + connPool chan int +} + +// String returns the endpoint as string +func (c *httpConnection) String() string { + return c.endpoint.String() +} + +// NewRequest creates a new request with given method and path. +func (c *httpConnection) NewRequest(method, path string) (driver.Request, error) { + switch method { + case "GET", "POST", "DELETE", "HEAD", "PATCH", "PUT", "OPTIONS": + // Ok + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("Invalid method '%s'", method)}) + } + + ct := c.contentType + if ct != driver.ContentTypeJSON && strings.Contains(path, "_api/gharial") { + // Currently (3.1.18) calls to this API do not work well with vpack. + ct = driver.ContentTypeJSON + } + + r := &httpRequest{ + method: method, + path: path, + } + + switch ct { + case driver.ContentTypeJSON: + r.bodyBuilder = NewJsonBodyBuilder() + return r, nil + case driver.ContentTypeVelocypack: + r.bodyBuilder = NewVelocyPackBodyBuilder() + r.velocyPack = true + return r, nil + default: + return nil, driver.WithStack(fmt.Errorf("Unsupported content type %d", int(c.contentType))) + } +} + +// Do performs a given request, returning its response. +func (c *httpConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + request, ok := req.(*httpRequest) + if !ok { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "request is not a httpRequest type"}) + } + + r, err := request.createHTTPRequest(c.endpoint) + rctx := ctx + if rctx == nil { + rctx = context.Background() + } + rctx = httptrace.WithClientTrace(rctx, &httptrace.ClientTrace{ + WroteRequest: func(info httptrace.WroteRequestInfo) { + request.WroteRequest(info) + }, + }) + r = r.WithContext(rctx) + if err != nil { + return nil, driver.WithStack(err) + } + + // Block on too many concurrent connections + if c.connPool != nil { + select { + case t := <-c.connPool: + // Ok, we're allowed to continue + defer func() { + // Give back token + c.connPool <- t + }() + case <-rctx.Done(): + // Context cancelled or expired + return nil, driver.WithStack(rctx.Err()) + } + } + + resp, err := c.client.Do(r) + if err != nil { + return nil, driver.WithStack(err) + } + var rawResponse *[]byte + useRawResponse := false + if ctx != nil { + if v := ctx.Value(keyRawResponse); v != nil { + useRawResponse = true + if buf, ok := v.(*[]byte); ok { + rawResponse = buf + } + } + } + + // Read response body + body, err := readBody(resp) + if err != nil { + return nil, driver.WithStack(err) + } + if rawResponse != nil { + *rawResponse = body + } + + ct := resp.Header.Get("Content-Type") + var httpResp driver.Response + switch strings.Split(ct, ";")[0] { + case "application/json", "application/x-arango-dump": + httpResp = &httpJSONResponse{resp: resp, rawResponse: body} + case "application/x-velocypack": + httpResp = &httpVPackResponse{resp: resp, rawResponse: body} + default: + if resp.StatusCode == http.StatusUnauthorized { + // When unauthorized the server sometimes return a `text/plain` response. + return nil, driver.WithStack(driver.ArangoError{ + HasError: true, + Code: resp.StatusCode, + ErrorMessage: string(body), + }) + } + // Handle empty 'text/plain' body as empty JSON object + if len(body) == 0 { + body = []byte("{}") + if rawResponse != nil { + *rawResponse = body + } + httpResp = &httpJSONResponse{resp: resp, rawResponse: body} + } else if useRawResponse { + httpResp = &httpJSONResponse{resp: resp, rawResponse: body} + } else { + return nil, driver.WithStack(fmt.Errorf("Unsupported content type '%s' with status %d and content '%s'", ct, resp.StatusCode, string(body))) + } + } + if ctx != nil { + if v := ctx.Value(keyResponse); v != nil { + if respPtr, ok := v.(*driver.Response); ok { + *respPtr = httpResp + } + } + } + return httpResp, nil +} + +// readBody reads the body of the given response into a byte slice. +func readBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + contentLength := resp.ContentLength + if contentLength < 0 { + // Don't know the content length, do it the slowest way + result, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil + } + buf := &bytes.Buffer{} + if int64(int(contentLength)) == contentLength { + // contentLength is an int64. If we can safely cast to int, use Grow. + buf.Grow(int(contentLength)) + } + if _, err := buf.ReadFrom(resp.Body); err != nil { + return nil, driver.WithStack(err) + } + return buf.Bytes(), nil +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *httpConnection) Unmarshal(data driver.RawObject, result interface{}) error { + ct := c.contentType + if ct == driver.ContentTypeVelocypack && len(data) >= 2 { + // Poor mans auto detection of json + l := len(data) + if (data[0] == '{' && data[l-1] == '}') || (data[0] == '[' && data[l-1] == ']') { + ct = driver.ContentTypeJSON + } + } + switch ct { + case driver.ContentTypeJSON: + if err := json.Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + case driver.ContentTypeVelocypack: + //panic(velocypack.Slice(data)) + if err := velocypack.Unmarshal(velocypack.Slice(data), result); err != nil { + return driver.WithStack(err) + } + default: + return driver.WithStack(fmt.Errorf("Unsupported content type %d", int(c.contentType))) + } + return nil +} + +// Endpoints returns the endpoints used by this connection. +func (c *httpConnection) Endpoints() []string { + return []string{c.endpoint.String()} +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *httpConnection) UpdateEndpoints(endpoints []string) error { + // Do nothing here. + // The real updating is done in cluster Connection. + return nil +} + +// Configure the authentication used for this connection. +func (c *httpConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + var httpAuth httpAuthentication + switch auth.Type() { + case driver.AuthenticationTypeBasic: + userName := auth.Get("username") + password := auth.Get("password") + httpAuth = newBasicAuthentication(userName, password) + case driver.AuthenticationTypeJWT: + userName := auth.Get("username") + password := auth.Get("password") + httpAuth = newJWTAuthentication(userName, password) + case driver.AuthenticationTypeRaw: + value := auth.Get("value") + httpAuth = newRawAuthentication(value) + default: + return nil, driver.WithStack(fmt.Errorf("Unsupported authentication type %d", int(auth.Type()))) + } + + result, err := newAuthenticatedConnection(c, httpAuth) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil +} + +// Protocols returns all protocols used by this connection. +func (c *httpConnection) Protocols() driver.ProtocolSet { + return driver.ProtocolSet{driver.ProtocolHTTP} +} + +// RequestRepeater creates possibility to send the request many times. +type RequestRepeater interface { + Repeat(conn driver.Connection, resp driver.Response, err error) bool +} + +// RepeatConnection is responsible for sending request until request repeater gives up. +type RepeatConnection struct { + mutex sync.Mutex + auth driver.Authentication + conn driver.Connection + repeat RequestRepeater +} + +func NewRepeatConnection(conn driver.Connection, repeat RequestRepeater) driver.Connection { + return &RepeatConnection{ + conn: conn, + repeat: repeat, + } +} + +// NewRequest creates a new request with given method and path. +func (h *RepeatConnection) NewRequest(method, path string) (driver.Request, error) { + return h.conn.NewRequest(method, path) +} + +// Do performs a given request, returning its response. Repeats requests until repeat function gives up. +func (h *RepeatConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + for { + resp, err := h.conn.Do(ctx, req.Clone()) + + if !h.repeat.Repeat(h, resp, err) { + return resp, err + } + } +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (h *RepeatConnection) Unmarshal(data driver.RawObject, result interface{}) error { + return h.conn.Unmarshal(data, result) +} + +// Endpoints returns the endpoints used by this connection. +func (h *RepeatConnection) Endpoints() []string { + return h.conn.Endpoints() +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (h *RepeatConnection) UpdateEndpoints(endpoints []string) error { + return h.conn.UpdateEndpoints(endpoints) +} + +// Configure the authentication used for this connection. +// Returns ErrAuthenticationNotChanged in when the authentication is not changed. +func (h *RepeatConnection) SetAuthentication(authentication driver.Authentication) (driver.Connection, error) { + h.mutex.Lock() + defer h.mutex.Unlock() + + if IsAuthenticationTheSame(h.auth, authentication) { + return h, ErrAuthenticationNotChanged + } + + _, err := h.conn.SetAuthentication(authentication) + if err != nil { + return nil, driver.WithStack(err) + } + h.auth = authentication + + return h, nil +} + +// Protocols returns all protocols used by this connection. +func (h RepeatConnection) Protocols() driver.ProtocolSet { + return h.conn.Protocols() +} diff --git a/vendor/github.com/arangodb/go-driver/http/connection_wrapper.go b/vendor/github.com/arangodb/go-driver/http/connection_wrapper.go new file mode 100644 index 00000000000..08c5686e689 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/connection_wrapper.go @@ -0,0 +1,92 @@ +package http + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/arangodb/go-velocypack" + + "github.com/arangodb/go-driver" +) + +type connectionDebugWrapper struct { + driver.Connection + ct driver.ContentType +} + +func NewConnectionDebugWrapper(conn driver.Connection, ct driver.ContentType) driver.Connection { + return &connectionDebugWrapper{conn, ct} +} + +func (c *connectionDebugWrapper) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + if c.ct == driver.ContentTypeJSON { + resp, err := c.Connection.Do(ctx, req) + if err != nil { + return resp, err + } + + httpResponse, ok := resp.(*httpJSONResponse) + if !ok { + panic("can not cast response to the httpJSONResponse type!") + } + + return &responseDebugWrapper{httpResponse}, err + + } + return c.Connection.Do(ctx, req) +} + +func (c *connectionDebugWrapper) Unmarshal(data driver.RawObject, result interface{}) error { + ct := c.ct + if ct == driver.ContentTypeVelocypack && len(data) >= 2 { + // Poor mans auto detection of json + l := len(data) + if (data[0] == '{' && data[l-1] == '}') || (data[0] == '[' && data[l-1] == ']') { + ct = driver.ContentTypeJSON + } + } + switch ct { + case driver.ContentTypeJSON: + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + + if err := decoder.Decode(result); err != nil { + return driver.WithStack(err) + } + + if err := json.Unmarshal(data, result); err != nil { + fmt.Printf("Struct: %s \n", reflect.TypeOf(result).String()) + fmt.Printf("Response: %s \n\n", string(data)) + return driver.WithStack(errors.New(fmt.Sprintf("Struct: %s, Error: %s", reflect.TypeOf(result).String(), err.Error()))) + } + case driver.ContentTypeVelocypack: + if err := velocypack.Unmarshal(velocypack.Slice(data), result); err != nil { + return driver.WithStack(err) + } + default: + return driver.WithStack(fmt.Errorf("unsupported content type %d", int(c.ct))) + } + return nil +} + +type responseDebugWrapper struct { + *httpJSONResponse +} + +func (r *responseDebugWrapper) ParseBody(field string, result interface{}) error { + if field == "" { + decoder := json.NewDecoder(strings.NewReader(string(r.httpJSONResponse.rawResponse))) + decoder.DisallowUnknownFields() + + if err := decoder.Decode(result); err != nil { + fmt.Printf("Struct: %s \n", reflect.TypeOf(result).String()) + fmt.Printf("Response: %s \n\n", string(r.httpJSONResponse.rawResponse)) + return driver.WithStack(errors.New(fmt.Sprintf("Struct: %s, Error: %s", reflect.TypeOf(result).String(), err.Error()))) + } + } + return r.httpJSONResponse.ParseBody(field, result) +} diff --git a/vendor/github.com/arangodb/go-driver/http/doc.go b/vendor/github.com/arangodb/go-driver/http/doc.go new file mode 100644 index 00000000000..0cffc2b0114 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/doc.go @@ -0,0 +1,69 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package http implements driver.Connection using an HTTP connection. + +This connection uses HTTP or HTTPS to connect to the ArangoDB database and +encodes its content as JSON or Velocypack, depending on the value +of the `ContentType` fields in the `http.ConnectionConfig`. + +Creating an Insecure Connection + +To create an HTTP connection, use code like this. + + // Create an HTTP connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + // Handle error + } + +The resulting connection is used to create a client which you will use +for normal database requests. + + // Create a client + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + if err != nil { + // Handle error + } + +Creating a Secure Connection + +To create a secure HTTPS connection, use code like this. + + // Create an HTTPS connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"https://localhost:8529"}, + TLSConfig: &tls.Config{ + InsecureSkipVerify: trueWhenUsingNonPublicCertificates, + }, + }) + if err != nil { + // Handle error + } + +*/ +package http diff --git a/vendor/github.com/arangodb/go-driver/http/mergeObject.go b/vendor/github.com/arangodb/go-driver/http/mergeObject.go new file mode 100644 index 00000000000..c11dda23aee --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/mergeObject.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + + driver "github.com/arangodb/go-driver" +) + +// mergeObject is a helper used to merge 2 objects into JSON. +type mergeObject struct { + Object interface{} + Merge interface{} +} + +func (m mergeObject) MarshalJSON() ([]byte, error) { + m1, err := toMap(m.Object) + if err != nil { + return nil, driver.WithStack(err) + } + m2, err := toMap(m.Merge) + if err != nil { + return nil, driver.WithStack(err) + } + var merged map[string]interface{} + // If m1 an empty object? + if len(m1) == 0 { + merged = m2 + } else if len(m2) == 0 { + merged = m1 + } else { + // Merge + merged = make(map[string]interface{}) + for k, v := range m1 { + merged[k] = v + } + for k, v := range m2 { + merged[k] = v + } + } + // Marshal merged map + data, err := json.Marshal(merged) + if err != nil { + return nil, driver.WithStack(err) + } + return data, nil +} + +// toMap converts the given object to a map (using JSON marshal/unmarshal when needed) +func toMap(object interface{}) (map[string]interface{}, error) { + if m, ok := object.(map[string]interface{}); ok { + return m, nil + } + data, err := json.Marshal(object) + if err != nil { + return nil, driver.WithStack(err) + } + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return nil, driver.WithStack(err) + } + return m, nil +} diff --git a/vendor/github.com/arangodb/go-driver/http/request_json.go b/vendor/github.com/arangodb/go-driver/http/request_json.go new file mode 100644 index 00000000000..e2727b99f9e --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/request_json.go @@ -0,0 +1,369 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptrace" + "net/url" + "reflect" + "strconv" + "strings" + + driver "github.com/arangodb/go-driver" +) + +// httpRequest implements driver.Request using standard golang http requests. +type httpRequest struct { + method string + path string + q url.Values + hdr map[string]string + written bool + bodyBuilder driver.BodyBuilder + velocyPack bool +} + +// Path returns the Request path +func (r *httpRequest) Path() string { + return r.path +} + +// Method returns the Request method +func (r *httpRequest) Method() string { + return r.method +} + +// Clone creates a new request containing the same data as this request +func (r *httpRequest) Clone() driver.Request { + clone := *r + clone.q = url.Values{} + for k, v := range r.q { + for _, x := range v { + clone.q.Add(k, x) + } + } + if clone.hdr != nil { + clone.hdr = make(map[string]string) + for k, v := range r.hdr { + clone.hdr[k] = v + } + } + + clone.bodyBuilder = r.bodyBuilder.Clone() + return &clone +} + +// SetQuery sets a single query argument of the request. +// Any existing query argument with the same key is overwritten. +func (r *httpRequest) SetQuery(key, value string) driver.Request { + if r.q == nil { + r.q = url.Values{} + } + r.q.Set(key, value) + return r +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpRequest) SetBody(body ...interface{}) (driver.Request, error) { + return r, r.bodyBuilder.SetBody(body...) +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpRequest) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (driver.Request, error) { + return r, r.bodyBuilder.SetBodyArray(bodyArray, mergeArray) +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpRequest) SetBodyImportArray(bodyArray interface{}) (driver.Request, error) { + err := r.bodyBuilder.SetBodyImportArray(bodyArray) + if err == nil { + if r.velocyPack { + r.SetQuery("type", "list") + } + } + + return r, err +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + return v.IsNil() + default: + return false + } +} + +// SetHeader sets a single header arguments of the request. +// Any existing header argument with the same key is overwritten. +func (r *httpRequest) SetHeader(key, value string) driver.Request { + if r.hdr == nil { + r.hdr = make(map[string]string) + } + + if strings.EqualFold(key, "Content-Type") { + switch strings.ToLower(value) { + case "application/octet-stream": + case "application/zip": + r.bodyBuilder = NewBinaryBodyBuilder(strings.ToLower(value)) + } + } + + r.hdr[key] = value + return r +} + +// Written returns true as soon as this request has been written completely to the network. +// This does not guarantee that the server has received or processed the request. +func (r *httpRequest) Written() bool { + return r.written +} + +// WroteRequest implements the WroteRequest function of an httptrace. +// It sets written to true. +func (r *httpRequest) WroteRequest(httptrace.WroteRequestInfo) { + r.written = true +} + +// createHTTPRequest creates a golang http.Request based on the configured arguments. +func (r *httpRequest) createHTTPRequest(endpoint url.URL) (*http.Request, error) { + r.written = false + u := endpoint + u.Path = "" + url := u.String() + if !strings.HasSuffix(url, "/") { + url = url + "/" + } + p := r.path + if strings.HasPrefix(p, "/") { + p = p[1:] + } + url = url + p + if r.q != nil { + q := r.q.Encode() + if len(q) > 0 { + url = url + "?" + q + } + } + + var bodyReader io.Reader + body := r.bodyBuilder.GetBody() + if body != nil { + bodyReader = bytes.NewReader(body) + } + + req, err := http.NewRequest(r.method, url, bodyReader) + if err != nil { + return nil, driver.WithStack(err) + } + + if r.hdr != nil { + for k, v := range r.hdr { + req.Header.Set(k, v) + } + } + + if r.velocyPack { + req.Header.Set("Accept", "application/x-velocypack") + } + + if body != nil { + req.Header.Set("Content-Length", strconv.Itoa(len(body))) + req.Header.Set("Content-Type", r.bodyBuilder.GetContentType()) + } + return req, nil +} + +type jsonBody struct { + body []byte +} + +func NewJsonBodyBuilder() *jsonBody { + return &jsonBody{} +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *jsonBody) SetBody(body ...interface{}) error { + switch len(body) { + case 0: + return driver.WithStack(fmt.Errorf("Must provide at least 1 body")) + case 1: + if data, err := json.Marshal(body[0]); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + case 2: + mo := mergeObject{Object: body[1], Merge: body[0]} + if data, err := json.Marshal(mo); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + default: + return driver.WithStack(fmt.Errorf("Must provide at most 2 bodies")) + } + +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *jsonBody) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) error { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + if mergeArray == nil { + // Simple case; just marshal bodyArray directly. + if data, err := json.Marshal(bodyArray); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + } + // Complex case, mergeArray is not nil + elementCount := bodyArrayVal.Len() + mergeObjects := make([]mergeObject, elementCount) + for i := 0; i < elementCount; i++ { + mergeObjects[i] = mergeObject{ + Object: bodyArrayVal.Index(i).Interface(), + Merge: mergeArray[i], + } + } + // Now marshal merged array + if data, err := json.Marshal(mergeObjects); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *jsonBody) SetBodyImportArray(bodyArray interface{}) error { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + // Render elements + elementCount := bodyArrayVal.Len() + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + for i := 0; i < elementCount; i++ { + entryVal := bodyArrayVal.Index(i) + if isNil(entryVal) { + buf.WriteString("\n") + } else { + if err := encoder.Encode(entryVal.Interface()); err != nil { + return driver.WithStack(err) + } + } + } + b.body = buf.Bytes() + return nil +} + +func (b *jsonBody) GetBody() []byte { + return b.body +} + +func (b *jsonBody) GetContentType() string { + return "application/json" +} + +func (b *jsonBody) Clone() driver.BodyBuilder { + return &jsonBody{ + body: b.GetBody(), + } +} + +type binaryBody struct { + body []byte + contentType string +} + +func NewBinaryBodyBuilder(contentType string) *binaryBody { + b := binaryBody{ + contentType: contentType, + } + return &b +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *binaryBody) SetBody(body ...interface{}) error { + if len(body) == 0 { + return driver.WithStack(fmt.Errorf("must provide at least 1 body")) + } + + if data, ok := body[0].([]byte); ok { + b.body = data + return nil + } + + return driver.WithStack(fmt.Errorf("must provide body as a []byte type")) +} + +func (b *binaryBody) SetBodyArray(_ interface{}, _ []map[string]interface{}) error { + return nil +} + +func (b *binaryBody) SetBodyImportArray(_ interface{}) error { + return nil +} + +func (b *binaryBody) GetBody() []byte { + return b.body +} + +func (b *binaryBody) GetContentType() string { + return b.contentType +} + +func (b *binaryBody) Clone() driver.BodyBuilder { + return &binaryBody{ + body: b.GetBody(), + contentType: b.GetContentType(), + } +} diff --git a/vendor/github.com/arangodb/go-driver/http/request_vpack.go b/vendor/github.com/arangodb/go-driver/http/request_vpack.go new file mode 100644 index 00000000000..659107f5970 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/request_vpack.go @@ -0,0 +1,144 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "bytes" + "fmt" + "reflect" + + velocypack "github.com/arangodb/go-velocypack" + + driver "github.com/arangodb/go-driver" +) + +type velocyPackBody struct { + body []byte +} + +func NewVelocyPackBodyBuilder() *velocyPackBody { + return &velocyPackBody{} +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *velocyPackBody) SetBody(body ...interface{}) error { + + switch len(body) { + case 0: + return driver.WithStack(fmt.Errorf("Must provide at least 1 body")) + case 1: + if data, err := velocypack.Marshal(body[0]); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + case 2: + mo := mergeObject{Object: body[1], Merge: body[0]} + if data, err := velocypack.Marshal(mo); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + default: + return driver.WithStack(fmt.Errorf("Must provide at most 2 bodies")) + } + + return nil +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *velocyPackBody) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) error { + + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + if mergeArray == nil { + // Simple case; just marshal bodyArray directly. + if data, err := velocypack.Marshal(bodyArray); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil + } + // Complex case, mergeArray is not nil + elementCount := bodyArrayVal.Len() + mergeObjects := make([]mergeObject, elementCount) + for i := 0; i < elementCount; i++ { + mergeObjects[i] = mergeObject{ + Object: bodyArrayVal.Index(i).Interface(), + Merge: mergeArray[i], + } + } + // Now marshal merged array + if data, err := velocypack.Marshal(mergeObjects); err != nil { + return driver.WithStack(err) + } else { + b.body = data + } + return nil +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (b *velocyPackBody) SetBodyImportArray(bodyArray interface{}) error { + bodyArrayVal := reflect.ValueOf(bodyArray) + + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + // Render elements + buf := &bytes.Buffer{} + encoder := velocypack.NewEncoder(buf) + if err := encoder.Encode(bodyArray); err != nil { + return driver.WithStack(err) + } + b.body = buf.Bytes() + return nil +} + +func (b *velocyPackBody) GetBody() []byte { + return b.body +} + +func (b *velocyPackBody) GetContentType() string { + return "application/x-velocypack" +} + +func (b *velocyPackBody) Clone() driver.BodyBuilder { + return &velocyPackBody{ + body: b.GetBody(), + } +} diff --git a/vendor/github.com/arangodb/go-driver/http/response_json.go b/vendor/github.com/arangodb/go-driver/http/response_json.go new file mode 100644 index 00000000000..cb86058b323 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/response_json.go @@ -0,0 +1,206 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + + driver "github.com/arangodb/go-driver" +) + +// httpJSONResponse implements driver.Response for standard golang JSON encoded http responses. +type httpJSONResponse struct { + resp *http.Response + rawResponse []byte + bodyObject map[string]*json.RawMessage + bodyArray []map[string]*json.RawMessage +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpJSONResponse) StatusCode() int { + return r.resp.StatusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpJSONResponse) Endpoint() string { + u := *r.resp.Request.URL + u.Path = "" + u.RawQuery = "" + return u.String() +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpJSONResponse) CheckStatus(validStatusCodes ...int) error { + for _, x := range validStatusCodes { + if x == r.resp.StatusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: r.resp.StatusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", r.resp.StatusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpJSONResponse) Header(key string) string { + return r.resp.Header.Get(key) +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpJSONResponse) ParseBody(field string, result interface{}) error { + if r.bodyObject == nil { + bodyMap := make(map[string]*json.RawMessage) + if err := json.Unmarshal(r.rawResponse, &bodyMap); err != nil { + return driver.WithStack(err) + } + r.bodyObject = bodyMap + } + if result != nil { + if err := parseBody(r.bodyObject, field, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpJSONResponse) ParseArrayBody() ([]driver.Response, error) { + if r.bodyArray == nil { + var bodyArray []map[string]*json.RawMessage + if err := json.Unmarshal(r.rawResponse, &bodyArray); err != nil { + return nil, driver.WithStack(err) + } + r.bodyArray = bodyArray + } + resps := make([]driver.Response, len(r.bodyArray)) + for i, x := range r.bodyArray { + resps[i] = &httpJSONResponseElement{bodyObject: x} + } + return resps, nil +} + +func parseBody(bodyObject map[string]*json.RawMessage, field string, result interface{}) error { + if field != "" { + // Unmarshal only a specific field + raw, ok := bodyObject[field] + if !ok || raw == nil { + // Field not found, silently ignored + return nil + } + // Unmarshal field + if err := json.Unmarshal(*raw, result); err != nil { + return driver.WithStack(err) + } + return nil + } + // Unmarshal entire body + rv := reflect.ValueOf(result) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &json.InvalidUnmarshalError{Type: reflect.TypeOf(result)} + } + objValue := rv.Elem() + switch objValue.Kind() { + case reflect.Struct: + if err := decodeObjectFields(objValue, bodyObject); err != nil { + return driver.WithStack(err) + } + case reflect.Map: + if err := decodeMapFields(objValue, bodyObject); err != nil { + return driver.WithStack(err) + } + default: + return &json.InvalidUnmarshalError{Type: reflect.TypeOf(result)} + } + return nil +} + +// decodeObjectFields decodes fields from the given body into a objValue of kind struct. +func decodeObjectFields(objValue reflect.Value, body map[string]*json.RawMessage) error { + objValueType := objValue.Type() + for i := 0; i != objValue.NumField(); i++ { + f := objValueType.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recurse into fields of anonymous field + if err := decodeObjectFields(objValue.Field(i), body); err != nil { + return driver.WithStack(err) + } + } else { + // Decode individual field + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + if jsonName == "" { + jsonName = f.Name + } else if jsonName == "-" { + continue + } + raw, ok := body[jsonName] + if ok && raw != nil { + field := objValue.Field(i) + if err := json.Unmarshal(*raw, field.Addr().Interface()); err != nil { + return driver.WithStack(err) + } + } + } + } + return nil +} + +// decodeMapFields decodes fields from the given body into a mapValue of kind map. +func decodeMapFields(val reflect.Value, body map[string]*json.RawMessage) error { + mapVal := val + if mapVal.IsNil() { + valType := val.Type() + mapType := reflect.MapOf(valType.Key(), valType.Elem()) + mapVal = reflect.MakeMap(mapType) + } + for jsonName, raw := range body { + var value interface{} + if raw != nil { + if err := json.Unmarshal(*raw, &value); err != nil { + return driver.WithStack(err) + } + } + mapVal.SetMapIndex(reflect.ValueOf(jsonName), reflect.ValueOf(value)) + } + val.Set(mapVal) + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/http/response_json_element.go b/vendor/github.com/arangodb/go-driver/http/response_json_element.go new file mode 100644 index 00000000000..17b50e1fb2c --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/response_json_element.go @@ -0,0 +1,113 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + "fmt" + + driver "github.com/arangodb/go-driver" +) + +// httpJSONResponseElement implements driver.Response for an entry of an array response. +type httpJSONResponseElement struct { + statusCode *int + bodyObject map[string]*json.RawMessage +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpJSONResponseElement) StatusCode() int { + if r.statusCode == nil { + statusCode := 200 + // Look for "error" field + if errorFieldJSON, found := r.bodyObject["error"]; found { + var hasError bool + if err := json.Unmarshal(*errorFieldJSON, &hasError); err == nil && hasError { + // We have an error, look for code field + statusCode = 500 + if codeFieldJSON, found := r.bodyObject["code"]; found { + var code int + if err := json.Unmarshal(*codeFieldJSON, &code); err == nil { + statusCode = code + } + } + } + } + r.statusCode = &statusCode + } + return *r.statusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpJSONResponseElement) Endpoint() string { + return "" +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpJSONResponseElement) CheckStatus(validStatusCodes ...int) error { + statusCode := r.StatusCode() + for _, x := range validStatusCodes { + if x == statusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: statusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", statusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpJSONResponseElement) Header(key string) string { + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpJSONResponseElement) ParseBody(field string, result interface{}) error { + if result != nil { + if err := parseBody(r.bodyObject, field, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpJSONResponseElement) ParseArrayBody() ([]driver.Response, error) { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "ParseArrayBody not allowed"}) +} diff --git a/vendor/github.com/arangodb/go-driver/http/response_vpack.go b/vendor/github.com/arangodb/go-driver/http/response_vpack.go new file mode 100644 index 00000000000..ec65242e6c2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/response_vpack.go @@ -0,0 +1,151 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "fmt" + "net/http" + + velocypack "github.com/arangodb/go-velocypack" + + driver "github.com/arangodb/go-driver" +) + +// httpVPackResponse implements driver.Response for standard golang Velocypack encoded http responses. +type httpVPackResponse struct { + resp *http.Response + rawResponse []byte + slice velocypack.Slice + bodyArray []driver.Response +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpVPackResponse) StatusCode() int { + return r.resp.StatusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpVPackResponse) Endpoint() string { + u := *r.resp.Request.URL + u.Path = "" + u.RawQuery = "" + return u.String() +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpVPackResponse) CheckStatus(validStatusCodes ...int) error { + for _, x := range validStatusCodes { + if x == r.resp.StatusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: r.resp.StatusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", r.resp.StatusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpVPackResponse) Header(key string) string { + return r.resp.Header.Get(key) +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpVPackResponse) ParseBody(field string, result interface{}) error { + slice, err := r.getSlice() + if err != nil { + return driver.WithStack(err) + } + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpVPackResponse) ParseArrayBody() ([]driver.Response, error) { + if r.bodyArray == nil { + slice, err := r.getSlice() + if err != nil { + return nil, driver.WithStack(err) + } + l, err := slice.Length() + if err != nil { + return nil, driver.WithStack(err) + } + + bodyArray := make([]driver.Response, 0, l) + it, err := velocypack.NewArrayIterator(slice) + if err != nil { + return nil, driver.WithStack(err) + } + for it.IsValid() { + v, err := it.Value() + if err != nil { + return nil, driver.WithStack(err) + } + bodyArray = append(bodyArray, &httpVPackResponseElement{slice: v}) + it.Next() + } + r.bodyArray = bodyArray + } + + return r.bodyArray, nil +} + +// getSlice reads the slice from the response if needed. +func (r *httpVPackResponse) getSlice() (velocypack.Slice, error) { + if r.slice == nil { + r.slice = velocypack.Slice(r.rawResponse) + //fmt.Println(r.slice) + } + return r.slice, nil +} diff --git a/vendor/github.com/arangodb/go-driver/http/response_vpack_element.go b/vendor/github.com/arangodb/go-driver/http/response_vpack_element.go new file mode 100644 index 00000000000..9f2222a6d13 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/http/response_vpack_element.go @@ -0,0 +1,124 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "fmt" + + "github.com/arangodb/go-velocypack" + + "github.com/arangodb/go-driver" +) + +// httpVPackResponseElement implements driver.Response for an entry of an array response. +type httpVPackResponseElement struct { + statusCode *int + slice velocypack.Slice +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpVPackResponseElement) StatusCode() int { + if r.statusCode == nil { + statusCode := 200 + // Look for "error" field + if errorFieldSlice, _ := r.slice.Get("error"); !errorFieldSlice.IsNone() { + if hasError, err := errorFieldSlice.GetBool(); err == nil && hasError { + // We have an error, look for code field + statusCode = 500 + if codeFieldSlice, _ := r.slice.Get("code"); !codeFieldSlice.IsNone() { + if code, err := codeFieldSlice.GetInt(); err == nil { + statusCode = int(code) + } + } + } + } + r.statusCode = &statusCode + } + return *r.statusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpVPackResponseElement) Endpoint() string { + return "" +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpVPackResponseElement) CheckStatus(validStatusCodes ...int) error { + statusCode := r.StatusCode() + for _, x := range validStatusCodes { + if x == statusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: statusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", statusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpVPackResponseElement) Header(key string) string { + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpVPackResponseElement) ParseBody(field string, result interface{}) error { + slice := r.slice + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpVPackResponseElement) ParseArrayBody() ([]driver.Response, error) { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "ParseArrayBody not allowed"}) +} diff --git a/vendor/github.com/arangodb/go-driver/id.go b/vendor/github.com/arangodb/go-driver/id.go new file mode 100644 index 00000000000..a57b8376377 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/id.go @@ -0,0 +1,98 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "fmt" + "strings" +) + +// ArangoID is a generic Arango ID struct representation +type ArangoID struct { + ID string `json:"id,omitempty"` + GloballyUniqueId string `json:"globallyUniqueId,omitempty"` +} + +// DocumentID references a document in a collection. +// Format: collection/_key +type DocumentID string + +// String returns a string representation of the document ID. +func (id DocumentID) String() string { + return string(id) +} + +// Validate validates the given id. +func (id DocumentID) Validate() error { + if id == "" { + return WithStack(fmt.Errorf("DocumentID is empty")) + } + parts := strings.Split(string(id), "/") + if len(parts) != 2 { + return WithStack(fmt.Errorf("Expected 'collection/key', got '%s'", string(id))) + } + if parts[0] == "" { + return WithStack(fmt.Errorf("Collection part of '%s' is empty", string(id))) + } + if parts[1] == "" { + return WithStack(fmt.Errorf("Key part of '%s' is empty", string(id))) + } + return nil +} + +// ValidateOrEmpty validates the given id unless it is empty. +// In case of empty, nil is returned. +func (id DocumentID) ValidateOrEmpty() error { + if id == "" { + return nil + } + if err := id.Validate(); err != nil { + return WithStack(err) + } + return nil +} + +// IsEmpty returns true if the given ID is empty, false otherwise. +func (id DocumentID) IsEmpty() bool { + return id == "" +} + +// Collection returns the collection part of the ID. +func (id DocumentID) Collection() string { + parts := strings.Split(string(id), "/") + return pathUnescape(parts[0]) +} + +// Key returns the key part of the ID. +func (id DocumentID) Key() string { + parts := strings.Split(string(id), "/") + if len(parts) == 2 { + return pathUnescape(parts[1]) + } + return "" +} + +// NewDocumentID creates a new document ID from the given collection, key pair. +func NewDocumentID(collection, key string) DocumentID { + return DocumentID(pathEscape(collection) + "/" + pathEscape(key)) +} diff --git a/vendor/github.com/arangodb/go-driver/index.go b/vendor/github.com/arangodb/go-driver/index.go new file mode 100644 index 00000000000..ad3ca3e9bf2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/index.go @@ -0,0 +1,102 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// IndexType represents a index type as string +type IndexType string + +// Symbolic constants for index types +const ( + PrimaryIndex = IndexType("primary") + FullTextIndex = IndexType("fulltext") // Deprecated: since 3.10 version. Use ArangoSearch view instead. + HashIndex = IndexType("hash") + SkipListIndex = IndexType("skiplist") + PersistentIndex = IndexType("persistent") + GeoIndex = IndexType("geo") + EdgeIndex = IndexType("edge") + TTLIndex = IndexType("ttl") + ZKDIndex = IndexType("zkd") + InvertedIndex = IndexType("inverted") +) + +// Index provides access to a single index in a single collection. +type Index interface { + // Name returns the collection specific ID of the index. This value should be used for all functions + // the require a index _name_. + Name() string + + // ID returns the ID of the index. Effectively this is `/`. + ID() string + + // UserName returns the user provided name of the index or empty string if non is provided. This _name_ + // is used in query to provide hints for the optimizer about preferred indexes. + UserName() string + + // Type returns the type of the index + Type() IndexType + + // Remove removes the entire index. + // If the index does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Fields returns a list of attributes of this index. + Fields() []string + + // Unique returns if this index is unique. + Unique() bool + + // Deduplicate returns deduplicate setting of this index. + Deduplicate() bool + + // Sparse returns if this is a sparse index or not. + Sparse() bool + + // GeoJSON returns if geo json was set for this index or not. + GeoJSON() bool + + // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). + InBackground() bool + + // Estimates determines if the to-be-created index should maintain selectivity estimates or not. + Estimates() bool + + // MinLength returns min length for this index if set. + MinLength() int + + // ExpireAfter returns an expire after for this index if set. + ExpireAfter() int + + // LegacyPolygons determines if the index uses legacy polygons or not - GeoIndex only + LegacyPolygons() bool + + // CacheEnabled returns if the index is enabled for caching or not - PersistentIndex only + CacheEnabled() bool + + // StoredValues returns a list of stored values for this index - PersistentIndex only + StoredValues() []string + + // InvertedIndexOptions returns the inverted index options for this index - InvertedIndex only + InvertedIndexOptions() InvertedIndexOptions +} diff --git a/vendor/github.com/arangodb/go-driver/index_impl.go b/vendor/github.com/arangodb/go-driver/index_impl.go new file mode 100644 index 00000000000..3b9da50dd0e --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/index_impl.go @@ -0,0 +1,287 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "path" + "strings" +) + +// indexStringToType converts a string representation of an index to IndexType +func indexStringToType(indexTypeString string) (IndexType, error) { + switch indexTypeString { + case string(FullTextIndex): + return FullTextIndex, nil + case string(HashIndex): + return HashIndex, nil + case string(SkipListIndex): + return SkipListIndex, nil + case string(PrimaryIndex): + return PrimaryIndex, nil + case string(PersistentIndex): + return PersistentIndex, nil + case string(GeoIndex), "geo1", "geo2": + return GeoIndex, nil + case string(EdgeIndex): + return EdgeIndex, nil + case string(TTLIndex): + return TTLIndex, nil + case string(ZKDIndex): + return ZKDIndex, nil + case string(InvertedIndex): + return InvertedIndex, nil + default: + return "", WithStack(InvalidArgumentError{Message: "unknown index type"}) + } +} + +// newIndex creates a new Index implementation. +func newIndex(data indexData, col *collection) (Index, error) { + if data.ID == "" { + return nil, WithStack(InvalidArgumentError{Message: "id is empty"}) + } + parts := strings.Split(data.ID, "/") + if len(parts) != 2 { + return nil, WithStack(InvalidArgumentError{Message: "id must be `collection/name`"}) + } + if col == nil { + return nil, WithStack(InvalidArgumentError{Message: "col is nil"}) + } + indexType, err := indexStringToType(data.Type) + if err != nil { + return nil, WithStack(err) + } + return &index{ + indexData: data, + indexType: indexType, + col: col, + db: col.db, + conn: col.conn, + }, nil +} + +// newIndex creates a new Index implementation. +func newInvertedIndex(data invertedIndexData, col *collection) (Index, error) { + if data.ID == "" { + return nil, WithStack(InvalidArgumentError{Message: "id is empty"}) + } + parts := strings.Split(data.ID, "/") + if len(parts) != 2 { + return nil, WithStack(InvalidArgumentError{Message: "id must be `collection/name`"}) + } + if col == nil { + return nil, WithStack(InvalidArgumentError{Message: "col is nil"}) + } + indexType, err := indexStringToType(data.Type) + if err != nil { + return nil, WithStack(err) + } + + dataIndex := indexData{ + ID: data.ID, + Type: data.Type, + InBackground: &data.InvertedIndexOptions.InBackground, + IsNewlyCreated: &data.InvertedIndexOptions.IsNewlyCreated, + Name: data.InvertedIndexOptions.Name, + ArangoError: data.ArangoError, + } + return &index{ + indexData: dataIndex, + invertedDataIndex: data, + indexType: indexType, + col: col, + db: col.db, + conn: col.conn, + }, nil +} + +// newIndexFrom map returns Index implementation based on index type extracted from rawData +func newIndexFromMap(rawData json.RawMessage, col *collection) (Index, error) { + type generalIndexData struct { + Type string `json:"type"` + } + var gen generalIndexData + err := json.Unmarshal(rawData, &gen) + if err != nil { + return nil, WithStack(err) + } + + if IndexType(gen.Type) == InvertedIndex { + var idxData invertedIndexData + err = json.Unmarshal(rawData, &idxData) + if err != nil { + return nil, WithStack(err) + } + return newInvertedIndex(idxData, col) + } + + var idxData indexData + err = json.Unmarshal(rawData, &idxData) + if err != nil { + return nil, WithStack(err) + } + return newIndex(idxData, col) +} + +type index struct { + indexData + invertedDataIndex invertedIndexData + indexType IndexType + db *database + col *collection + conn Connection +} + +// relPath creates the relative path to this index (`_db//_api/index`) +func (i *index) relPath() string { + return path.Join(i.db.relPath(), "_api", "index") +} + +// Name returns the name of the index. +func (i *index) Name() string { + parts := strings.Split(i.indexData.ID, "/") + return parts[1] +} + +// ID returns the ID of the index. +func (i *index) ID() string { + return i.indexData.ID +} + +// UserName returns the user provided name of the index or empty string if non is provided. +func (i *index) UserName() string { + return i.indexData.Name +} + +// Type returns the type of the index +func (i *index) Type() IndexType { + return i.indexType +} + +// Fields returns a list of attributes of this index. +func (i *index) Fields() []string { + return i.indexData.Fields +} + +// Unique returns if this index is unique. +func (i *index) Unique() bool { + if i.indexData.Unique == nil { + return false + } + return *i.indexData.Unique +} + +// Deduplicate returns deduplicate setting of this index. +func (i *index) Deduplicate() bool { + if i.indexData.Deduplicate == nil { + return false + } + return *i.indexData.Deduplicate +} + +// Sparse returns if this is a sparse index or not. +func (i *index) Sparse() bool { + if i.indexData.Sparse == nil { + return false + } + return *i.indexData.Sparse +} + +// GeoJSON returns if geo json was set for this index or not. +func (i *index) GeoJSON() bool { + if i.indexData.GeoJSON == nil { + return false + } + return *i.indexData.GeoJSON +} + +// InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). +func (i *index) InBackground() bool { + if i.indexData.InBackground == nil { + return false + } + return *i.indexData.InBackground +} + +// Estimates determines if the to-be-created index should maintain selectivity estimates or not. +func (i *index) Estimates() bool { + if i.indexData.Estimates == nil { + return false + } + return *i.indexData.Estimates +} + +// MinLength returns min length for this index if set. +func (i *index) MinLength() int { + return i.indexData.MinLength +} + +// ExpireAfter returns an expire after for this index if set. +func (i *index) ExpireAfter() int { + return i.indexData.ExpireAfter +} + +// LegacyPolygons determines if the index uses legacy polygons or not - GeoIndex only +func (i *index) LegacyPolygons() bool { + if i.indexData.LegacyPolygons == nil { + return false + } + return *i.indexData.LegacyPolygons +} + +// CacheEnabled returns if the index is enabled for caching or not - PersistentIndex only +func (i *index) CacheEnabled() bool { + if i.indexData.CacheEnabled == nil { + return false + } + return *i.indexData.CacheEnabled +} + +// StoredValues returns a list of stored values for this index - PersistentIndex only +func (i *index) StoredValues() []string { + return i.indexData.StoredValues +} + +// InvertedIndexOptions returns the inverted index options for this index - InvertedIndex only +func (i *index) InvertedIndexOptions() InvertedIndexOptions { + return i.invertedDataIndex.InvertedIndexOptions +} + +// Remove removes the entire index. +// If the index does not exist, a NotFoundError is returned. +func (i *index) Remove(ctx context.Context) error { + req, err := i.conn.NewRequest("DELETE", path.Join(i.relPath(), i.indexData.ID)) + if err != nil { + return WithStack(err) + } + resp, err := i.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/jwt/doc.go b/vendor/github.com/arangodb/go-driver/jwt/doc.go new file mode 100644 index 00000000000..7d226ee3780 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/jwt/doc.go @@ -0,0 +1,57 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package jwt provides a helper function used to access ArangoDB +servers using a JWT secret. + +Authenticating with a JWT secret results in "super-user" access +to the database. + +To use a JWT secret to access your database, use code like this: + + // Create an HTTP connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + // Handle error + } + + // Prepare authentication + hdr, err := CreateArangodJwtAuthorizationHeader("yourJWTSecret", "yourUniqueServerID") + if err != nil { + // Handle error + } + auth := driver.RawAuthentication(hdr) + + // Create a client + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: auth, + }) + if err != nil { + // Handle error + } + +*/ +package jwt diff --git a/vendor/github.com/arangodb/go-driver/jwt/jwt.go b/vendor/github.com/arangodb/go-driver/jwt/jwt.go new file mode 100644 index 00000000000..305737917ef --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/jwt/jwt.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2018-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package jwt + +import ( + "github.com/golang-jwt/jwt" + + driver "github.com/arangodb/go-driver" +) + +const ( + issArangod = "arangodb" +) + +// CreateArangodJwtAuthorizationHeader calculates a JWT authorization header, for authorization +// of a request to an arangod server, based on the given secret. +// If the secret is empty, nothing is done. +// Use the result of this function as input for driver.RawAuthentication. +func CreateArangodJwtAuthorizationHeader(jwtSecret, serverID string) (string, error) { + if jwtSecret == "" || serverID == "" { + return "", nil + } + // Create a new token object, specifying signing method and the claims + // you would like it to contain. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iss": issArangod, + "server_id": serverID, + }) + + // Sign and get the complete encoded token as a string using the secret + signedToken, err := token.SignedString([]byte(jwtSecret)) + if err != nil { + return "", driver.WithStack(err) + } + + return "bearer " + signedToken, nil +} + +// CreateArangodJwtAuthorizationHeaderAllowedPaths calculates a JWT authorization header, for authorization +// of a request to an arangod server, based on the given secret. +// If the secret is empty, nothing is done. +// Use the result of this function as input for driver.RawAuthentication. +// Additionally allowed paths can be specified +func CreateArangodJwtAuthorizationHeaderAllowedPaths(jwtSecret, serverID string, paths []string) (string, error) { + if jwtSecret == "" || serverID == "" { + return "", nil + } + // Create a new token object, specifying signing method and the claims + // you would like it to contain. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iss": issArangod, + "server_id": serverID, + "allowed_paths": paths, + }) + + // Sign and get the complete encoded token as a string using the secret + signedToken, err := token.SignedString([]byte(jwtSecret)) + if err != nil { + return "", driver.WithStack(err) + } + + return "bearer " + signedToken, nil +} diff --git a/vendor/github.com/arangodb/go-driver/meta.go b/vendor/github.com/arangodb/go-driver/meta.go new file mode 100644 index 00000000000..f87fc3a4a08 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/meta.go @@ -0,0 +1,69 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// DocumentMeta contains all meta data used to identifier a document. +type DocumentMeta struct { + Key string `json:"_key,omitempty"` + ID DocumentID `json:"_id,omitempty"` + Rev string `json:"_rev,omitempty"` + OldRev string `json:"_oldRev,omitempty"` +} + +// validateKey returns an error if the given key is empty otherwise invalid. +func validateKey(key string) error { + if key == "" { + return WithStack(InvalidArgumentError{Message: "key is empty"}) + } + return nil +} + +// DocumentMetaSlice is a slice of DocumentMeta elements +type DocumentMetaSlice []DocumentMeta + +// Keys returns the keys of all elements. +func (l DocumentMetaSlice) Keys() []string { + keys := make([]string, len(l)) + for i, m := range l { + keys[i] = m.Key + } + return keys +} + +// Revs returns the revisions of all elements. +func (l DocumentMetaSlice) Revs() []string { + revs := make([]string, len(l)) + for i, m := range l { + revs[i] = m.Rev + } + return revs +} + +// IDs returns the ID's of all elements. +func (l DocumentMetaSlice) IDs() []DocumentID { + ids := make([]DocumentID, len(l)) + for i, m := range l { + ids[i] = m.ID + } + return ids +} diff --git a/vendor/github.com/arangodb/go-driver/protocol.go b/vendor/github.com/arangodb/go-driver/protocol.go new file mode 100644 index 00000000000..62702b26be0 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/protocol.go @@ -0,0 +1,56 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +type Protocol int + +const ( + ProtocolHTTP Protocol = iota + ProtocolVST1_0 + ProtocolVST1_1 +) + +// ProtocolSet is a set of protocols. +type ProtocolSet []Protocol + +// Contains returns true if the given protocol is contained in the given set, false otherwise. +func (ps ProtocolSet) Contains(p Protocol) bool { + for _, x := range ps { + if x == p { + return true + } + } + return false +} + +// ContainsAny returns true if any of the given protocols is contained in the given set, false otherwise. +func (ps ProtocolSet) ContainsAny(p ...Protocol) bool { + for _, x := range ps { + for _, y := range p { + if x == y { + return true + } + } + } + return false +} diff --git a/vendor/github.com/arangodb/go-driver/query.go b/vendor/github.com/arangodb/go-driver/query.go new file mode 100644 index 00000000000..df4d68d8d19 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/query.go @@ -0,0 +1,297 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" +) + +const ( + keyQueryCount = "arangodb-query-count" + keyQueryBatchSize = "arangodb-query-batchSize" + keyQueryCache = "arangodb-query-cache" + keyQueryMemoryLimit = "arangodb-query-memoryLimit" + keyQueryForceOneShardAttributeValue = "arangodb-query-forceOneShardAttributeValue" + keyQueryTTL = "arangodb-query-ttl" + keyQueryOptSatSyncWait = "arangodb-query-opt-satSyncWait" + keyQueryOptFullCount = "arangodb-query-opt-fullCount" + keyQueryOptStream = "arangodb-query-opt-stream" + keyQueryOptProfile = "arangodb-query-opt-profile" + keyQueryOptMaxRuntime = "arangodb-query-opt-maxRuntime" + keyQueryShardIds = "arangodb-query-opt-shardIds" + keyFillBlockCache = "arangodb-query-opt-fillBlockCache" +) + +// WithQueryCount is used to configure a context that will set the Count of a query request, +// If value is not given it defaults to true. +func WithQueryCount(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryCount, v) +} + +// WithQueryBatchSize is used to configure a context that will set the BatchSize of a query request, +func WithQueryBatchSize(parent context.Context, value int) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryBatchSize, value) +} + +// WithQuerySharIds is used to configure a context that will set the ShardIds of a query request, +func WithQueryShardIds(parent context.Context, value []string) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryShardIds, value) +} + +// WithQueryCache is used to configure a context that will set the Cache of a query request, +// If value is not given it defaults to true. +func WithQueryCache(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryCache, v) +} + +// WithQueryMemoryLimit is used to configure a context that will set the MemoryList of a query request, +func WithQueryMemoryLimit(parent context.Context, value int64) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryMemoryLimit, value) +} + +// WithQueryForceOneShardAttributeValue is used to configure a context that will set the ForceOneShardAttributeValue of a query request, +func WithQueryForceOneShardAttributeValue(parent context.Context, value string) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryForceOneShardAttributeValue, value) +} + +// WithQueryTTL is used to configure a context that will set the TTL of a query request, +func WithQueryTTL(parent context.Context, value time.Duration) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryTTL, value) +} + +// WithQuerySatelliteSyncWait sets the satelliteSyncWait query value on the query cursor request +func WithQuerySatelliteSyncWait(parent context.Context, value time.Duration) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryOptSatSyncWait, value) +} + +// WithQueryFullCount is used to configure whether the query returns the full count of results +// before the last LIMIT statement +func WithQueryFullCount(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryOptFullCount, v) +} + +// WithQueryStream is used to configure whether this becomes a stream query. +// A stream query is not executed right away, but continually evaluated +// when the client is requesting more results. Should the cursor expire +// the query transaction is canceled. This means for writing queries clients +// have to read the query-cursor until the HasMore() method returns false. +func WithQueryStream(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryOptStream, v) +} + +// WithQueryProfile is used to configure whether Query should be profiled. +func WithQueryProfile(parent context.Context, value ...int) context.Context { + v := 1 + if len(value) > 0 { + v = value[0] + } + + if v < 0 { + v = 0 + } else if v > 2 { + v = 2 + } + + return context.WithValue(contextOrBackground(parent), keyQueryOptProfile, v) +} + +func WithQueryMaxRuntime(parent context.Context, value ...float64) context.Context { + v := 0.0 + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryOptMaxRuntime, v) +} + +// WithQueryFillBlockCache if is set to true or not specified, this will make the query store the data it reads via the RocksDB storage engine in the RocksDB block cache. +// This is usually the desired behavior. The option can be set to false for queries that are known to either read a lot of data which would thrash the block cache, +// or for queries that read data which are known to be outside of the hot set. By setting the option to false, data read by the query will not make it into +// the RocksDB block cache if not already in there, thus leaving more room for the actual hot set. +func WithQueryFillBlockCache(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyFillBlockCache, v) +} + +type queryRequest struct { + // indicates whether the number of documents in the result set should be returned in the "count" attribute of the result. + // Calculating the "count" attribute might have a performance impact for some queries in the future so this option is + // turned off by default, and "count" is only returned when requested. + Count bool `json:"count,omitempty"` + // maximum number of result documents to be transferred from the server to the client in one roundtrip. + // If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed. + BatchSize int `json:"batchSize,omitempty"` + // flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup + // will be skipped for the query. If set to true, it will lead to the query cache being checked for the query + // if the query cache mode is either on or demand. + Cache bool `json:"cache,omitempty"` + // the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the query will fail + // with error "resource limit exceeded" in case it allocates too much memory. A value of 0 indicates that there is no memory limit. + MemoryLimit int64 `json:"memoryLimit,omitempty"` + // The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified + // amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. + // If not set, a server-defined value will be used. + TTL float64 `json:"ttl,omitempty"` + // contains the query string to be executed + Query string `json:"query"` + // key/value pairs representing the bind parameters. + BindVars map[string]interface{} `json:"bindVars,omitempty"` + Options struct { + // ShardId query option + ShardIds []string `json:"shardIds,omitempty"` + // Profile If set to true or 1, then the additional query profiling information will be returned in the sub-attribute profile of the extra return attribute, + // if the query result is not served from the query cache. Set to 2 the query will include execution stats per query plan node in + // sub-attribute stats.nodes of the extra return attribute. Additionally the query plan is returned in the sub-attribute extra.plan. + Profile int `json:"profile,omitempty"` + // A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. + // To disable a rule, prefix its name with a -, to enable a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules. + OptimizerRules string `json:"optimizer.rules,omitempty"` + // This Enterprise Edition parameter allows to configure how long a DBServer will have time to bring the satellite collections + // involved in the query into sync. The default value is 60.0 (seconds). When the max time has been reached the query will be stopped. + SatelliteSyncWait float64 `json:"satelliteSyncWait,omitempty"` + // if set to true and the query contains a LIMIT clause, then the result will have an extra attribute with the sub-attributes + // stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The fullCount attribute will contain the number + // of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents + // that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. + // Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and + // thus make queries run longer. Note that the fullCount attribute will only be present in the result if the query has a LIMIT clause + // and the LIMIT clause is actually used in the query. + FullCount bool `json:"fullCount,omitempty"` + // Limits the maximum number of plans that are created by the AQL query optimizer. + MaxPlans int `json:"maxPlans,omitempty"` + // Specify true and the query will be executed in a streaming fashion. The query result is not stored on + // the server, but calculated on the fly. Beware: long-running queries will need to hold the collection + // locks for as long as the query cursor exists. When set to false a query will be executed right away in + // its entirety. + Stream bool `json:"stream,omitempty"` + // MaxRuntime specify the timeout which can be used to kill a query on the server after the specified + // amount in time. The timeout value is specified in seconds. A value of 0 means no timeout will be enforced. + MaxRuntime float64 `json:"maxRuntime,omitempty"` + // ForceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer cannot + // automatically detect that the query can be limited to only a single server (e.g. in a disjoint smart graph case). + ForceOneShardAttributeValue *string `json:"forceOneShardAttributeValue,omitempty"` + // FillBlockCache if is set to true or not specified, this will make the query store the data it reads via the RocksDB storage engine in the RocksDB block cache. + // This is usually the desired behavior. The option can be set to false for queries that are known to either read a lot of data which would thrash the block cache, + // or for queries that read data which are known to be outside of the hot set. By setting the option to false, data read by the query will not make it into + // the RocksDB block cache if not already in there, thus leaving more room for the actual hot set. + FillBlockCache bool `json:"fillBlockCache,omitempty"` + } `json:"options,omitempty"` +} + +// applyContextSettings fills fields in the queryRequest from the given context. +func (q *queryRequest) applyContextSettings(ctx context.Context) { + if ctx == nil { + return + } + if rawValue := ctx.Value(keyQueryCount); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Count = value + } + } + if rawValue := ctx.Value(keyQueryBatchSize); rawValue != nil { + if value, ok := rawValue.(int); ok { + q.BatchSize = value + } + } + if rawValue := ctx.Value(keyQueryShardIds); rawValue != nil { + if value, ok := rawValue.([]string); ok { + q.Options.ShardIds = value + } + } + if rawValue := ctx.Value(keyQueryCache); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Cache = value + } + } + if rawValue := ctx.Value(keyQueryMemoryLimit); rawValue != nil { + if value, ok := rawValue.(int64); ok { + q.MemoryLimit = value + } + } + if rawValue := ctx.Value(keyQueryForceOneShardAttributeValue); rawValue != nil { + if value, ok := rawValue.(string); ok { + q.Options.ForceOneShardAttributeValue = &value + } + } + if rawValue := ctx.Value(keyQueryTTL); rawValue != nil { + if value, ok := rawValue.(time.Duration); ok { + q.TTL = value.Seconds() + } + } + if rawValue := ctx.Value(keyQueryOptSatSyncWait); rawValue != nil { + if value, ok := rawValue.(time.Duration); ok { + q.Options.SatelliteSyncWait = value.Seconds() + } + } + if rawValue := ctx.Value(keyQueryOptFullCount); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Options.FullCount = value + } + } + if rawValue := ctx.Value(keyQueryOptStream); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Options.Stream = value + } + } + if rawValue := ctx.Value(keyQueryOptProfile); rawValue != nil { + if _, ok := rawValue.(bool); ok { + q.Options.Profile = 1 + } else if value, ok := rawValue.(int); ok { + q.Options.Profile = value + } + } + if rawValue := ctx.Value(keyQueryOptMaxRuntime); rawValue != nil { + if value, ok := rawValue.(float64); ok { + q.Options.MaxRuntime = value + } + } + if rawValue := ctx.Value(keyFillBlockCache); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Options.FillBlockCache = value + } + } +} + +type parseQueryRequest struct { + // contains the query string to be executed + Query string `json:"query"` +} diff --git a/vendor/github.com/arangodb/go-driver/replication.go b/vendor/github.com/arangodb/go-driver/replication.go new file mode 100644 index 00000000000..5f0f8d26bc7 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/replication.go @@ -0,0 +1,68 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" +) + +// Tick is represent a place in either the Write-Ahead Log, +// journals and datafiles value reported by the server +type Tick string + +// Batch represents state on the server used during +// certain replication operations to keep state required +// by the client (such as Write-Ahead Log, inventory and data-files) +type Batch interface { + // id of this batch + BatchID() string + // LastTick reported by the server for this batch + LastTick() Tick + // Extend the lifetime of an existing batch on the server + Extend(ctx context.Context, ttl time.Duration) error + // DeleteBatch deletes an existing batch on the server + Delete(ctx context.Context) error +} + +// Replication provides access to replication related operations. +type Replication interface { + // CreateBatch creates a "batch" to prevent removal of state required for replication + CreateBatch(ctx context.Context, db Database, serverID int64, ttl time.Duration) (Batch, error) + + // Get the inventory of the server containing all collections (with entire details) of a database. + // When this function is called on a coordinator is a cluster, an ID of a DBServer must be provided + // using a context that is prepare with `WithDBServerID`. + DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) + + // GetRevisionTree retrieves the Revision tree (Merkel tree) associated with the collection. + GetRevisionTree(ctx context.Context, db Database, batchId, collection string) (RevisionTree, error) + + // GetRevisionsByRanges retrieves the revision IDs of documents within requested ranges. + GetRevisionsByRanges(ctx context.Context, db Database, batchId, collection string, minMaxRevision []RevisionMinMax, + resume RevisionUInt64) (RevisionRanges, error) + + // GetRevisionDocuments retrieves documents by revision. + GetRevisionDocuments(ctx context.Context, db Database, batchId, collection string, + revisions Revisions) ([]map[string]interface{}, error) +} diff --git a/vendor/github.com/arangodb/go-driver/replication_impl.go b/vendor/github.com/arangodb/go-driver/replication_impl.go new file mode 100644 index 00000000000..30ac34d5f80 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/replication_impl.go @@ -0,0 +1,165 @@ +// +// DISCLAIMER +// +// Copyright 2018-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "errors" + "path" + "strconv" + "sync/atomic" + "time" +) + +// Content of the create batch resp +type batchMetadata struct { + // ID of the batch + ID string `json:"id"` + // Last Tick reported by the server + LastTickInt Tick `json:"lastTick,omitempty"` + + cl *client + serverID int64 + database string + closed int32 +} + +// ErrBatchClosed occurs when there is an attempt closing or prolonging closed batch +var ErrBatchClosed = errors.New("Batch already closed") + +// CreateBatch creates a "batch" to prevent WAL file removal and to take a snapshot +func (c *client) CreateBatch(ctx context.Context, db Database, serverID int64, ttl time.Duration) (Batch, error) { + req, err := c.conn.NewRequest("POST", path.Join("_db", db.Name(), "_api/replication/batch")) + if err != nil { + return nil, WithStack(err) + } + req = req.SetQuery("serverId", strconv.FormatInt(serverID, 10)) + params := struct { + TTL float64 `json:"ttl"` + }{TTL: ttl.Seconds()} // just use a default ttl value + req, err = req.SetBody(params) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var batch batchMetadata + if err := resp.ParseBody("", &batch); err != nil { + return nil, WithStack(err) + } + batch.cl = c + batch.serverID = serverID + batch.database = db.Name() + return &batch, nil +} + +// Get the inventory of a server containing all collections (with entire details) of a database. +func (c *client) DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) { + req, err := c.conn.NewRequest("GET", path.Join("_db", db.Name(), "_api/replication/inventory")) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInventory{}, WithStack(err) + } + var result DatabaseInventory + if err := resp.ParseBody("", &result); err != nil { + return DatabaseInventory{}, WithStack(err) + } + return result, nil +} + +// BatchID reported by the server +// The receiver is pointer because this struct contains the field `closed` and it can not be copied +// because race detector will complain. +func (b *batchMetadata) BatchID() string { + return b.ID +} + +// LastTick reported by the server for this batch +// The receiver is pointer because this struct contains the field `closed` and it can not be copied +// because race detector will complain. +func (b *batchMetadata) LastTick() Tick { + return b.LastTickInt +} + +// Extend the lifetime of an existing batch on the server +func (b *batchMetadata) Extend(ctx context.Context, ttl time.Duration) error { + if !atomic.CompareAndSwapInt32(&b.closed, 0, 0) { + return WithStack(ErrBatchClosed) + } + + req, err := b.cl.conn.NewRequest("PUT", path.Join("_db", b.database, "_api/replication/batch", b.ID)) + if err != nil { + return WithStack(err) + } + req = req.SetQuery("serverId", strconv.FormatInt(b.serverID, 10)) + input := struct { + TTL int64 `json:"ttl"` + }{ + TTL: int64(ttl.Seconds()), + } + req, err = req.SetBody(input) + if err != nil { + return WithStack(err) + } + resp, err := b.cl.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(204); err != nil { + return WithStack(err) + } + return nil +} + +// Delete an existing dump batch +func (b *batchMetadata) Delete(ctx context.Context) error { + if !atomic.CompareAndSwapInt32(&b.closed, 0, 1) { + return WithStack(ErrBatchClosed) + } + + req, err := b.cl.conn.NewRequest("DELETE", path.Join("_db", b.database, "_api/replication/batch", b.ID)) + if err != nil { + return WithStack(err) + } + resp, err := b.cl.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(204); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/revision.go b/vendor/github.com/arangodb/go-driver/revision.go new file mode 100644 index 00000000000..6c834499c93 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/revision.go @@ -0,0 +1,273 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Adam Janikowski +// + +package driver + +import ( + "context" + "path" + + "github.com/arangodb/go-velocypack" +) + +// RevisionUInt64 is representation of '_rev' string value as an uint64 number +type RevisionUInt64 uint64 + +// RevisionMinMax is an array of two Revisions which create range of them +type RevisionMinMax [2]RevisionUInt64 + +// Revisions is a slice of Revisions +type Revisions []RevisionUInt64 + +type RevisionRanges struct { + Ranges []Revisions `json:"ranges"` + Resume RevisionUInt64 `json:"resume,string" velocypack:"resume"` +} + +// RevisionTreeNode is a leaf in Merkle tree with hashed Revisions and with count of documents in the leaf +type RevisionTreeNode struct { + Hash uint64 `json:"hash"` + Count uint64 `json:"count,int"` +} + +// RevisionTree is a list of Revisions in a Merkle tree +type RevisionTree struct { + Version int `json:"version"` + MaxDepth int `json:"maxDepth"` + RangeMin RevisionUInt64 `json:"rangeMin,string" velocypack:"rangeMin"` + RangeMax RevisionUInt64 `json:"rangeMax,string" velocypack:"rangeMax"` + InitialRangeMin RevisionUInt64 `json:"initialRangeMin,string" velocypack:"initialRangeMin"` + Count uint64 `json:"count,int"` + Hash uint64 `json:"hash"` + Nodes []RevisionTreeNode `json:"nodes"` +} + +var ( + revisionEncodingTable = [64]byte{'-', '_', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', + 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9'} + revisionDecodingTable = [256]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0 - 15 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 - 31 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 32 - 47 (here is the '-' on 45 place) + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 48 - 63 + 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, // 64 - 79 + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 0, 0, 0, 0, 1, // 80 - 95 + 0, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, // 96 - 111 + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 0, 0, 0, 0, 0, // 112 - 127 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 128 - 143 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 144 - 159 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 160 - 175 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 176 - 191 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 192 - 207 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 208 - 223 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 224 - 239 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 240 - 255 + } +) + +func decodeRevision(revision []byte) RevisionUInt64 { + var t RevisionUInt64 + + for _, s := range revision { + t = t*64 + RevisionUInt64(revisionDecodingTable[s]) + } + + return t +} + +func encodeRevision(revision RevisionUInt64) []byte { + if revision == 0 { + return []byte{} + } + + var result [12]byte + index := cap(result) + + for revision > 0 { + index-- + result[index] = revisionEncodingTable[uint8(revision&0x3f)] + revision >>= 6 + } + + return result[index:] +} + +// UnmarshalJSON parses string revision document into RevisionUInt64 number +func (n *RevisionUInt64) UnmarshalJSON(revision []byte) (err error) { + length := len(revision) + + if length > 2 { + *n = decodeRevision(revision[1 : length-1]) + } else { + // it can be only empty json string "" + *n = 0 + } + + return nil +} + +// MarshalJSON converts RevisionUInt64 into string revision +func (n *RevisionUInt64) MarshalJSON() ([]byte, error) { + if *n == 0 { + return []byte{'"', '"'}, nil // return an empty string + } + + value := make([]byte, 0, 16) + r := encodeRevision(*n) + value = append(value, '"') + value = append(value, r...) + value = append(value, '"') + return value, nil +} + +// UnmarshalVPack parses string revision document into RevisionUInt64 number +func (n *RevisionUInt64) UnmarshalVPack(slice velocypack.Slice) error { + source, err := slice.GetString() + if err != nil { + return err + } + + *n = decodeRevision([]byte(source)) + return nil +} + +// MarshalVPack converts RevisionUInt64 into string revision +func (n *RevisionUInt64) MarshalVPack() (velocypack.Slice, error) { + var b velocypack.Builder + + value := velocypack.NewStringValue(string(encodeRevision(*n))) + if err := b.AddValue(value); err != nil { + return nil, err + } + + return b.Slice() +} + +// GetRevisionTree retrieves the Revision tree (Merkel tree) associated with the collection. +func (c *client) GetRevisionTree(ctx context.Context, db Database, batchId, collection string) (RevisionTree, error) { + + req, err := c.conn.NewRequest("GET", path.Join("_db", db.Name(), "_api/replication/revisions/tree")) + if err != nil { + return RevisionTree{}, WithStack(err) + } + + req = req.SetQuery("batchId", batchId) + req = req.SetQuery("collection", collection) + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return RevisionTree{}, WithStack(err) + } + + if err := resp.CheckStatus(200); err != nil { + return RevisionTree{}, WithStack(err) + } + + var tree RevisionTree + if err := resp.ParseBody("", &tree); err != nil { + return RevisionTree{}, WithStack(err) + } + + return tree, nil +} + +// GetRevisionsByRanges retrieves the revision IDs of documents within requested ranges. +func (c *client) GetRevisionsByRanges(ctx context.Context, db Database, batchId, collection string, + minMaxRevision []RevisionMinMax, resume RevisionUInt64) (RevisionRanges, error) { + + req, err := c.conn.NewRequest("PUT", path.Join("_db", db.Name(), "_api/replication/revisions/ranges")) + if err != nil { + return RevisionRanges{}, WithStack(err) + } + + req = req.SetQuery("batchId", batchId) + req = req.SetQuery("collection", collection) + if resume > 0 { + req = req.SetQuery("resume", string(encodeRevision(resume))) + } + + req, err = req.SetBodyArray(minMaxRevision, nil) + if err != nil { + return RevisionRanges{}, WithStack(err) + } + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return RevisionRanges{}, WithStack(err) + } + + if err := resp.CheckStatus(200); err != nil { + return RevisionRanges{}, WithStack(err) + } + + var ranges RevisionRanges + if err := resp.ParseBody("", &ranges); err != nil { + return RevisionRanges{}, WithStack(err) + } + + return ranges, nil +} + +// GetRevisionDocuments retrieves documents by revision. +func (c *client) GetRevisionDocuments(ctx context.Context, db Database, batchId, collection string, + revisions Revisions) ([]map[string]interface{}, error) { + + req, err := c.conn.NewRequest("PUT", path.Join("_db", db.Name(), "_api/replication/revisions/documents")) + if err != nil { + return nil, WithStack(err) + } + + req = req.SetQuery("batchId", batchId) + req = req.SetQuery("collection", collection) + + req, err = req.SetBody(revisions) + if err != nil { + return nil, WithStack(err) + } + + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + + arrayResponse, err := resp.ParseArrayBody() + if err != nil { + return nil, WithStack(err) + } + + documents := make([]map[string]interface{}, 0, len(arrayResponse)) + for _, a := range arrayResponse { + document := map[string]interface{}{} + if err = a.ParseBody("", &document); err != nil { + return nil, WithStack(err) + } + documents = append(documents, document) + } + + return documents, nil +} diff --git a/vendor/github.com/arangodb/go-driver/transaction.go b/vendor/github.com/arangodb/go-driver/transaction.go new file mode 100644 index 00000000000..d145d2d3259 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/transaction.go @@ -0,0 +1,80 @@ +// +// DISCLAIMER +// +// Copyright 2020 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Adam Janikowski +// + +package driver + +// TransactionOptions contains options that customize the transaction. +type TransactionOptions struct { + // Transaction size limit in bytes. Honored by the RocksDB storage engine only. + MaxTransactionSize int + + // An optional numeric value that can be used to set a timeout for waiting on collection + // locks. If not specified, a default value will be used. + // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock. + LockTimeout *int + + // An optional boolean flag that, if set, will force the transaction to write + // all data to disk before returning. + WaitForSync bool + + // Maximum number of operations after which an intermediate commit is performed + // automatically. Honored by the RocksDB storage engine only. + IntermediateCommitCount *int + + // Optional arguments passed to action. + Params []interface{} + + // Maximum total size of operations after which an intermediate commit is + // performed automatically. Honored by the RocksDB storage engine only. + IntermediateCommitSize *int + + // ReadCollections Collections that the transaction reads from. + ReadCollections []string + + // WriteCollections Collections that the transaction writes to. + WriteCollections []string + + // ExclusiveCollections Collections that the transaction write exclusively to. + ExclusiveCollections []string +} + +type transactionRequest struct { + MaxTransactionSize int `json:"maxTransactionSize"` + LockTimeout *int `json:"lockTimeout,omitempty"` + WaitForSync bool `json:"waitForSync"` + IntermediateCommitCount *int `json:"intermediateCommitCount,omitempty"` + Params []interface{} `json:"params"` + IntermediateCommitSize *int `json:"intermediateCommitSize,omitempty"` + Action string `json:"action"` + Collections transactionCollectionsRequest `json:"collections"` +} + +type transactionCollectionsRequest struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + Exclusive []string `json:"exclusive,omitempty"` +} + +type transactionResponse struct { + ArangoError + Result interface{} `json:"result"` +} diff --git a/vendor/github.com/arangodb/go-driver/user.go b/vendor/github.com/arangodb/go-driver/user.go new file mode 100644 index 00000000000..987628a9762 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/user.go @@ -0,0 +1,128 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// User provides access to a single user of a single server / cluster of servers. +type User interface { + // Name returns the name of the user. + Name() string + + // Is this an active user? + IsActive() bool + + // Is a password change for this user needed? + IsPasswordChangeNeeded() bool + + // Get extra information about this user that was passed during its creation/update/replacement + Extra(result interface{}) error + + // Remove removes the user. + // If the user does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Update updates individual properties of the user. + // If the user does not exist, a NotFoundError is returned. + Update(ctx context.Context, options UserOptions) error + + // Replace replaces all properties of the user. + // If the user does not exist, a NotFoundError is returned. + Replace(ctx context.Context, options UserOptions) error + + // AccessibleDatabases returns a list of all databases that can be accessed (read/write or read-only) by this user. + AccessibleDatabases(ctx context.Context) ([]Database, error) + + // SetDatabaseAccess sets the access this user has to the given database. + // Pass a `nil` database to set the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`. + SetDatabaseAccess(ctx context.Context, db Database, access Grant) error + + // GetDatabaseAccess gets the access rights for this user to the given database. + // Pass a `nil` database to get the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up. + // By default this function returns the "effective" grant. + // To return the "configured" grant, pass a context configured with `WithConfigured`. + // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. + GetDatabaseAccess(ctx context.Context, db Database) (Grant, error) + + // RemoveDatabaseAccess removes the access this user has to the given database. + // As a result the users access falls back to its default access. + // If you remove default access (db==`nil`) for a user (and there are no specific access + // rules for a database), the user's access falls back to no-access. + // Pass a `nil` database to set the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up. + RemoveDatabaseAccess(ctx context.Context, db Database) error + + // SetCollectionAccess sets the access this user has to a collection. + // If you pass a `Collection`, it will set access for that collection. + // If you pass a `Database`, it will set the default collection access for that database. + // If you pass `nil`, it will set the default collection access for the default database. + // This function requires ArangoDB 3.2 and up. + SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error + + // GetCollectionAccess gets the access rights for this user to the given collection. + // If you pass a `Collection`, it will get access for that collection. + // If you pass a `Database`, it will get the default collection access for that database. + // If you pass `nil`, it will get the default collection access for the default database. + // By default this function returns the "effective" grant. + // To return the "configured" grant, pass a context configured with `WithConfigured`. + // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. + GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error) + + // RemoveCollectionAccess removes the access this user has to a collection. + // If you pass a `Collection`, it will removes access for that collection. + // If you pass a `Database`, it will removes the default collection access for that database. + // If you pass `nil`, it will removes the default collection access for the default database. + // This function requires ArangoDB 3.2 and up. + RemoveCollectionAccess(ctx context.Context, col AccessTarget) error + + // GrantReadWriteAccess grants this user read/write access to the given database. + // + // Deprecated: use GrantDatabaseReadWriteAccess instead. + GrantReadWriteAccess(ctx context.Context, db Database) error + + // RevokeAccess revokes this user access to the given database. + // + // Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead. + RevokeAccess(ctx context.Context, db Database) error +} + +// Grant specifies access rights for an object +type Grant string + +const ( + // GrantReadWrite indicates read/write access to an object + GrantReadWrite Grant = "rw" + // GrantReadOnly indicates read-only access to an object + GrantReadOnly Grant = "ro" + // GrantNone indicates no access to an object + GrantNone Grant = "none" +) + +// AccessTarget is implemented by Database & Collection and it used to +// get/set/remove collection permissions. +type AccessTarget interface { + // Name returns the name of the database/collection. + Name() string +} diff --git a/vendor/github.com/arangodb/go-driver/user_impl.go b/vendor/github.com/arangodb/go-driver/user_impl.go new file mode 100644 index 00000000000..243081f64b5 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/user_impl.go @@ -0,0 +1,401 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newUser creates a new User implementation. +func newUser(data userData, conn Connection) (User, error) { + if data.Name == "" { + return nil, WithStack(InvalidArgumentError{Message: "data.Name is empty"}) + } + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &user{ + data: data, + conn: conn, + }, nil +} + +type user struct { + data userData + conn Connection +} + +type userData struct { + Name string `json:"user,omitempty"` + Active bool `json:"active,omitempty"` + Extra *RawObject `json:"extra,omitempty"` + ChangePassword bool `json:"changePassword,omitempty"` + ArangoError +} + +// relPath creates the relative path to this index (`_api/user/`) +func (u *user) relPath() string { + escapedName := pathEscape(u.data.Name) + return path.Join("_api", "user", escapedName) +} + +// Name returns the name of the user. +func (u *user) Name() string { + return u.data.Name +} + +// Is this an active user? +func (u *user) IsActive() bool { + return u.data.Active +} + +// Is a password change for this user needed? +func (u *user) IsPasswordChangeNeeded() bool { + return u.data.ChangePassword +} + +// Get extra information about this user that was passed during its creation/update/replacement +func (u *user) Extra(result interface{}) error { + if u.data.Extra == nil { + return nil + } + if err := u.conn.Unmarshal(*u.data.Extra, result); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Remove(ctx context.Context) error { + req, err := u.conn.NewRequest("DELETE", u.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + return nil +} + +// Update updates individual properties of the user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Update(ctx context.Context, options UserOptions) error { + req, err := u.conn.NewRequest("PATCH", u.relPath()) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return WithStack(err) + } + u.data = data + return nil +} + +// Replace replaces all properties of the user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Replace(ctx context.Context, options UserOptions) error { + req, err := u.conn.NewRequest("PUT", u.relPath()) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return WithStack(err) + } + u.data = data + return nil +} + +type userAccessibleDatabasesResponse struct { + Result map[string]string `json:"result"` +} + +// AccessibleDatabases returns a list of all databases that can be accessed by this user. +func (u *user) AccessibleDatabases(ctx context.Context) ([]Database, error) { + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database")) + if err != nil { + return nil, WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data userAccessibleDatabasesResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Database, 0, len(data.Result)) + for name := range data.Result { + db, err := newDatabase(name, u.conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, db) + } + return result, nil +} + +// SetDatabaseAccess sets the access this user has to the given database. +// Pass a `nil` database to set the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`. +func (u *user) SetDatabaseAccess(ctx context.Context, db Database, access Grant) error { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("PUT", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return WithStack(err) + } + input := struct { + Grant Grant `json:"grant"` + }{ + Grant: access, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +type getAccessResponse struct { + Result string `json:"result"` +} + +// GetDatabaseAccess gets the access rights for this user to the given database. +// Pass a `nil` database to get the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up. +func (u *user) GetDatabaseAccess(ctx context.Context, db Database) (Grant, error) { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return GrantNone, WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return GrantNone, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := u.conn.Do(ctx, req) + if err != nil { + return GrantNone, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return GrantNone, WithStack(err) + } + + var data getAccessResponse + if err := resp.ParseBody("", &data); err != nil { + return GrantNone, WithStack(err) + } + return Grant(data.Result), nil +} + +// RemoveDatabaseAccess removes the access this user has to the given database. +// As a result the users access falls back to its default access. +// If you remove default access (db==`nil`) for a user (and there are no specific access +// rules for a database), the user's access falls back to no-access. +// Pass a `nil` database to set the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up. +func (u *user) RemoveDatabaseAccess(ctx context.Context, db Database) error { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("DELETE", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + return nil +} + +// SetCollectionAccess sets the access this user has to a collection. +// If you pass a `Collection`, it will set access for that collection. +// If you pass a `Database`, it will set the default collection access for that database. +// If you pass `nil`, it will set the default collection access for the default database. +// This function requires ArangoDB 3.2 and up. +func (u *user) SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("PUT", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return WithStack(err) + } + input := struct { + Grant Grant `json:"grant"` + }{ + Grant: access, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// GetCollectionAccess gets the access rights for this user to the given collection. +// If you pass a `Collection`, it will get access for that collection. +// If you pass a `Database`, it will get the default collection access for that database. +// If you pass `nil`, it will get the default collection access for the default database. +func (u *user) GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error) { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return GrantNone, WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return GrantNone, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := u.conn.Do(ctx, req) + if err != nil { + return GrantNone, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return GrantNone, WithStack(err) + } + + var data getAccessResponse + if err := resp.ParseBody("", &data); err != nil { + return GrantNone, WithStack(err) + } + return Grant(data.Result), nil +} + +// RemoveCollectionAccess removes the access this user has to a collection. +// If you pass a `Collection`, it will removes access for that collection. +// If you pass a `Database`, it will removes the default collection access for that database. +// If you pass `nil`, it will removes the default collection access for the default database. +// This function requires ArangoDB 3.2 and up. +func (u *user) RemoveCollectionAccess(ctx context.Context, col AccessTarget) error { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("DELETE", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + return nil +} + +// getDatabaseAndCollectionName returns database-name, collection-name from given access target. +func getDatabaseAndCollectionName(col AccessTarget) (string, string, error) { + if col == nil { + return "*", "*", nil + } + if x, ok := col.(Collection); ok { + return x.Database().Name(), x.Name(), nil + } + if x, ok := col.(Database); ok { + return x.Name(), "*", nil + } + return "", "", WithStack(InvalidArgumentError{"Need Collection or Database or nil"}) +} + +// GrantReadWriteAccess grants this user read/write access to the given database. +// +// Deprecated: use GrantDatabaseReadWriteAccess instead. +func (u *user) GrantReadWriteAccess(ctx context.Context, db Database) error { + if err := u.SetDatabaseAccess(ctx, db, GrantReadWrite); err != nil { + return WithStack(err) + } + return nil +} + +// RevokeAccess revokes this user access to the given database. +// +// Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead. +func (u *user) RevokeAccess(ctx context.Context, db Database) error { + if err := u.SetDatabaseAccess(ctx, db, GrantNone); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/util/doc.go b/vendor/github.com/arangodb/go-driver/util/doc.go new file mode 100644 index 00000000000..b16feff9f90 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/util/doc.go @@ -0,0 +1,26 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package util provides some helper methods for the go-driver (it is unlikely that you need this package directly). +*/ +package util diff --git a/vendor/github.com/arangodb/go-driver/util/endpoints.go b/vendor/github.com/arangodb/go-driver/util/endpoints.go new file mode 100644 index 00000000000..aa2263249e0 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/util/endpoints.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package util + +import "strings" + +var ( + urlFixer = strings.NewReplacer( + "tcp://", "http://", + "ssl://", "https://", + ) +) + +// FixupEndpointURLScheme changes endpoint URL schemes used by arangod to ones used by go. +// E.g. "tcp://localhost:8529" -> "http://localhost:8529" +func FixupEndpointURLScheme(u string) string { + return urlFixer.Replace(u) +} diff --git a/vendor/github.com/arangodb/go-driver/version.go b/vendor/github.com/arangodb/go-driver/version.go new file mode 100644 index 00000000000..f52e85daebc --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/version.go @@ -0,0 +1,109 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "strconv" + "strings" +) + +// Version holds a server version string. The string has the format "major.minor.sub". +// Major and minor will be numeric, and sub may contain a number or a textual version. +type Version string + +// Major returns the major part of the version +// E.g. "3.1.7" -> 3 +func (v Version) Major() int { + parts := strings.Split(string(v), ".") + result, _ := strconv.Atoi(parts[0]) + return result +} + +// Minor returns the minor part of the version. +// E.g. "3.1.7" -> 1 +func (v Version) Minor() int { + parts := strings.Split(string(v), ".") + if len(parts) >= 2 { + result, _ := strconv.Atoi(parts[1]) + return result + } + return 0 +} + +// Sub returns the sub part of the version. +// E.g. "3.1.7" -> "7" +func (v Version) Sub() string { + parts := strings.SplitN(string(v), ".", 3) + if len(parts) == 3 { + return parts[2] + } + return "" +} + +// SubInt returns the sub part of the version as integer. +// The bool return value indicates if the sub part is indeed a number. +// E.g. "3.1.7" -> 7, true +// E.g. "3.1.foo" -> 0, false +func (v Version) SubInt() (int, bool) { + result, err := strconv.Atoi(v.Sub()) + return result, err == nil +} + +// CompareTo returns an integer comparing two version. +// The result will be 0 if v==other, -1 if v < other, and +1 if v > other. +// If major & minor parts are equal and sub part is not a number, +// the sub part will be compared using lexicographical string comparison. +func (v Version) CompareTo(other Version) int { + a := v.Major() + b := other.Major() + if a < b { + return -1 + } + if a > b { + return 1 + } + + a = v.Minor() + b = other.Minor() + if a < b { + return -1 + } + if a > b { + return 1 + } + + a, aIsInt := v.SubInt() + b, bIsInt := other.SubInt() + + if !aIsInt || !bIsInt { + // Do a string comparison + return strings.Compare(v.Sub(), other.Sub()) + } + if a < b { + return -1 + } + if a > b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/arangodb/go-driver/vertex_collection_documents_impl.go b/vendor/github.com/arangodb/go-driver/vertex_collection_documents_impl.go new file mode 100644 index 00000000000..9a32ee311fe --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/vertex_collection_documents_impl.go @@ -0,0 +1,562 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *vertexCollection) DocumentExists(ctx context.Context, key string) (bool, error) { + if result, err := c.rawCollection().DocumentExists(ctx, key); err != nil { + return false, WithStack(err) + } else { + return result, nil + } +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + meta, _, err := c.readDocument(ctx, key, result) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) readDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + // Concerns: ReadDocuments reads multiple documents via multiple calls to readDocument (this function). + // Currently with AllowDirtyReads the wasDirtyFlag is only set according to the last read request. + loadContextResponseValues(cs, resp) + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + // Parse result + if result != nil { + if err := resp.ParseBody("vertex", result); err != nil { + return meta, contextSettings{}, WithStack(err) + } + } + return meta, cs, nil +} + +// ReadDocuments reads multiple documents with given keys from the collection. +// The documents data is stored into elements of the given results slice, +// the documents meta data is returned. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error) { + resultsVal := reflect.ValueOf(results) + switch resultsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("results data must be of kind Array, got %s", resultsVal.Kind())}) + } + if keys == nil { + return nil, nil, WithStack(InvalidArgumentError{Message: "keys nil"}) + } + resultCount := resultsVal.Len() + if len(keys) != resultCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", resultCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, resultCount) + errs := make(ErrorSlice, resultCount) + silent := false + for i := 0; i < resultCount; i++ { + result := resultsVal.Index(i).Addr() + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + key := keys[i] + meta, cs, err := c.readDocument(ctx, key, result.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *vertexCollection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + meta, _, err := c.createDocument(ctx, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) createDocument(ctx context.Context, document interface{}) (DocumentMeta, contextSettings, error) { + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath()) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned in its inded in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *vertexCollection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.createDocument(ctx, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + meta, _, err := c.updateDocument(ctx, key, update) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) updateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, updateCount) + errs := make(ErrorSlice, updateCount) + silent := false + for i := 0; i < updateCount; i++ { + update := updatesVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(update) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.updateDocument(ctx, key, update.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + meta, _, err := c.replaceDocument(ctx, key, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) replaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(doc) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.replaceDocument(ctx, key, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + meta, _, err := c.removeDocument(ctx, key) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) removeDocument(ctx context.Context, key string) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + if cs.ReturnOld != nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "ReturnOld is not support when removing vertices"}) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + keyCount := len(keys) + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, keyCount) + errs := make(ErrorSlice, keyCount) + silent := false + for i := 0; i < keyCount; i++ { + key := keys[i] + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.removeDocument(ctx, key) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *vertexCollection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + stats, err := c.rawCollection().ImportDocuments(ctx, documents, options) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + return stats, nil +} diff --git a/vendor/github.com/arangodb/go-driver/vertex_collection_impl.go b/vendor/github.com/arangodb/go-driver/vertex_collection_impl.go new file mode 100644 index 00000000000..b6c8c7bb39f --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/vertex_collection_impl.go @@ -0,0 +1,178 @@ +// +// DISCLAIMER +// +// Copyright 2017-2021 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// Author Tomasz Mielech +// + +package driver + +import ( + "context" + "path" +) + +// newVertexCollection creates a new Vertex Collection implementation. +func newVertexCollection(name string, g *graph) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if g == nil { + return nil, WithStack(InvalidArgumentError{Message: "g is nil"}) + } + return &vertexCollection{ + name: name, + g: g, + conn: g.db.conn, + }, nil +} + +type vertexCollection struct { + name string + g *graph + conn Connection +} + +// relPath creates the relative path to this edge collection (`_db//_api/gharial//vertex/`) +func (c *vertexCollection) relPath() string { + escapedName := pathEscape(c.name) + return path.Join(c.g.relPath(), "vertex", escapedName) +} + +// Name returns the name of the edge collection. +func (c *vertexCollection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *vertexCollection) Database() Database { + return c.g.db +} + +// rawCollection returns a standard document implementation of Collection +// for this vertex collection. +func (c *vertexCollection) rawCollection() Collection { + result, _ := newCollection(c.name, c.g.db) + return result +} + +// Status fetches the current status of the collection. +func (c *vertexCollection) Status(ctx context.Context) (CollectionStatus, error) { + result, err := c.rawCollection().Status(ctx) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + return result, nil +} + +// Count fetches the number of document in the collection. +func (c *vertexCollection) Count(ctx context.Context) (int64, error) { + result, err := c.rawCollection().Count(ctx) + if err != nil { + return 0, WithStack(err) + } + return result, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *vertexCollection) Statistics(ctx context.Context) (CollectionStatistics, error) { + result, err := c.rawCollection().Statistics(ctx) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + return result, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *vertexCollection) Revision(ctx context.Context) (string, error) { + result, err := c.rawCollection().Revision(ctx) + if err != nil { + return "", WithStack(err) + } + return result, nil +} + +// Properties fetches extended information about the collection. +func (c *vertexCollection) Properties(ctx context.Context) (CollectionProperties, error) { + result, err := c.rawCollection().Properties(ctx) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + return result, nil +} + +// SetProperties changes properties of the collection. +func (c *vertexCollection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + if err := c.rawCollection().SetProperties(ctx, options); err != nil { + return WithStack(err) + } + return nil +} + +// Shards fetches shards information of the collection. +func (c *vertexCollection) Shards(ctx context.Context, details bool) (CollectionShards, error) { + result, err := c.rawCollection().Shards(ctx, details) + if err != nil { + return result, WithStack(err) + } + return result, nil +} + +// Load the collection into memory. +func (c *vertexCollection) Load(ctx context.Context) error { + if err := c.rawCollection().Load(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// Unload unloads the collection from memory. +func (c *vertexCollection) Unload(ctx context.Context) error { + if err := c.rawCollection().Unload(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *vertexCollection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *vertexCollection) Truncate(ctx context.Context) error { + if err := c.rawCollection().Truncate(ctx); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go b/vendor/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go new file mode 100644 index 00000000000..8cc08539f96 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go @@ -0,0 +1,148 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *vertexCollection) Index(ctx context.Context, name string) (Index, error) { + result, err := c.rawCollection().Index(ctx, name) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *vertexCollection) IndexExists(ctx context.Context, name string) (bool, error) { + result, err := c.rawCollection().IndexExists(ctx, name) + if err != nil { + return false, WithStack(err) + } + return result, nil +} + +// Indexes returns a list of all indexes in the collection. +func (c *vertexCollection) Indexes(ctx context.Context) ([]Index, error) { + result, err := c.rawCollection().Indexes(ctx) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// Deprecated: since 3.10 version. Use ArangoSearch view instead. +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureFullTextIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureGeoIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureHashIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsurePersistentIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureSkipListIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureTTLIndex creates a TLL collection, if it does not already exist. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureTTLIndex(ctx, field, expireAfter, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureZKDIndex creates a ZKD index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureZKDIndex(ctx context.Context, fields []string, options *EnsureZKDIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureZKDIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. +// Available in ArangoDB 3.10 and later. +func (c *vertexCollection) EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureInvertedIndex(ctx, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} diff --git a/vendor/github.com/arangodb/go-driver/view.go b/vendor/github.com/arangodb/go-driver/view.go new file mode 100644 index 00000000000..0e703132634 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view.go @@ -0,0 +1,55 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// View provides access to the information of a view. +// Views are only available in ArangoDB 3.4 and higher. +type View interface { + // Name returns the name of the view. + Name() string + + // Type returns the type of this view. + Type() ViewType + + // ArangoSearchView returns this view as an ArangoSearch view. + // When the type of the view is not ArangoSearch, an error is returned. + ArangoSearchView() (ArangoSearchView, error) + + // ArangoSearchViewAlias returns this view as an ArangoSearch view alias. + // When the type of the view is not ArangoSearch alias, an error is returned. + ArangoSearchViewAlias() (ArangoSearchViewAlias, error) + + // Database returns the database containing the view. + Database() Database + + // Rename renames the view (SINGLE server only). + Rename(ctx context.Context, newName string) error + + // Remove removes the entire view. + // If the view does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error +} diff --git a/vendor/github.com/arangodb/go-driver/view_arangosearch.go b/vendor/github.com/arangodb/go-driver/view_arangosearch.go new file mode 100644 index 00000000000..92a79e7d4f4 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view_arangosearch.go @@ -0,0 +1,493 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// ArangoSearchView provides access to the information of a view. +// Views are only available in ArangoDB 3.4 and higher. +type ArangoSearchView interface { + // View Includes generic View functions + View + + // Properties fetches extended information about the view. + Properties(ctx context.Context) (ArangoSearchViewProperties, error) + + // SetProperties changes properties of the view. + SetProperties(ctx context.Context, options ArangoSearchViewProperties) error +} + +// ArangoSearchAnalyzerType specifies type of analyzer +type ArangoSearchAnalyzerType string + +const ( + // ArangoSearchAnalyzerTypeIdentity treat value as atom (no transformation) + ArangoSearchAnalyzerTypeIdentity ArangoSearchAnalyzerType = "identity" + // ArangoSearchAnalyzerTypeDelimiter split into tokens at user-defined character + ArangoSearchAnalyzerTypeDelimiter ArangoSearchAnalyzerType = "delimiter" + // ArangoSearchAnalyzerTypeStem apply stemming to the value as a whole + ArangoSearchAnalyzerTypeStem ArangoSearchAnalyzerType = "stem" + // ArangoSearchAnalyzerTypeNorm apply normalization to the value as a whole + ArangoSearchAnalyzerTypeNorm ArangoSearchAnalyzerType = "norm" + // ArangoSearchAnalyzerTypeNGram create n-grams from value with user-defined lengths + ArangoSearchAnalyzerTypeNGram ArangoSearchAnalyzerType = "ngram" + // ArangoSearchAnalyzerTypeText tokenize into words, optionally with stemming, normalization and stop-word filtering + ArangoSearchAnalyzerTypeText ArangoSearchAnalyzerType = "text" + // ArangoSearchAnalyzerTypeAQL an Analyzer capable of running a restricted AQL query to perform data manipulation / filtering. + ArangoSearchAnalyzerTypeAQL ArangoSearchAnalyzerType = "aql" + // ArangoSearchAnalyzerTypePipeline an Analyzer capable of chaining effects of multiple Analyzers into one. The pipeline is a list of Analyzers, where the output of an Analyzer is passed to the next for further processing. The final token value is determined by last Analyzer in the pipeline. + ArangoSearchAnalyzerTypePipeline ArangoSearchAnalyzerType = "pipeline" + // ArangoSearchAnalyzerTypeStopwords an Analyzer capable of removing specified tokens from the input. + ArangoSearchAnalyzerTypeStopwords ArangoSearchAnalyzerType = "stopwords" + // ArangoSearchAnalyzerTypeGeoJSON an Analyzer capable of breaking up a GeoJSON object into a set of indexable tokens for further usage with ArangoSearch Geo functions. + ArangoSearchAnalyzerTypeGeoJSON ArangoSearchAnalyzerType = "geojson" + // ArangoSearchAnalyzerTypeGeoPoint an Analyzer capable of breaking up JSON object describing a coordinate into a set of indexable tokens for further usage with ArangoSearch Geo functions. + ArangoSearchAnalyzerTypeGeoPoint ArangoSearchAnalyzerType = "geopoint" + // ArangoSearchAnalyzerTypeSegmentation an Analyzer capable of breaking up the input text into tokens in a language-agnostic manner + ArangoSearchAnalyzerTypeSegmentation ArangoSearchAnalyzerType = "segmentation" + // ArangoSearchAnalyzerTypeCollation an Analyzer capable of converting the input into a set of language-specific tokens + ArangoSearchAnalyzerTypeCollation ArangoSearchAnalyzerType = "collation" + // ArangoSearchAnalyzerTypeClassification An Analyzer capable of classifying tokens in the input text. (EE only) + ArangoSearchAnalyzerTypeClassification ArangoSearchAnalyzerType = "classification" + // ArangoSearchAnalyzerTypeNearestNeighbors An Analyzer capable of finding nearest neighbors of tokens in the input. (EE only) + ArangoSearchAnalyzerTypeNearestNeighbors ArangoSearchAnalyzerType = "nearest_neighbors" + // ArangoSearchAnalyzerTypeMinhash an analyzer which is capable of evaluating so called MinHash signatures as a stream of tokens. (EE only) + ArangoSearchAnalyzerTypeMinhash ArangoSearchAnalyzerType = "minhash" +) + +// ArangoSearchAnalyzerFeature specifies a feature to an analyzer +type ArangoSearchAnalyzerFeature string + +const ( + // ArangoSearchAnalyzerFeatureFrequency how often a term is seen, required for PHRASE() + ArangoSearchAnalyzerFeatureFrequency ArangoSearchAnalyzerFeature = "frequency" + // ArangoSearchAnalyzerFeatureNorm the field normalization factor + ArangoSearchAnalyzerFeatureNorm ArangoSearchAnalyzerFeature = "norm" + // ArangoSearchAnalyzerFeaturePosition sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also required + ArangoSearchAnalyzerFeaturePosition ArangoSearchAnalyzerFeature = "position" + // ArangoSearchAnalyzerFeatureOffset can be specified if 'position' feature is set + ArangoSearchAnalyzerFeatureOffset ArangoSearchAnalyzerFeature = "offset" +) + +type ArangoSearchCaseType string + +const ( + // ArangoSearchCaseUpper to convert to all lower-case characters + ArangoSearchCaseUpper ArangoSearchCaseType = "upper" + // ArangoSearchCaseLower to convert to all upper-case characters + ArangoSearchCaseLower ArangoSearchCaseType = "lower" + // ArangoSearchCaseNone to not change character case (default) + ArangoSearchCaseNone ArangoSearchCaseType = "none" +) + +type ArangoSearchBreakType string + +const ( + // ArangoSearchBreakTypeAll to return all tokens + ArangoSearchBreakTypeAll ArangoSearchBreakType = "all" + // ArangoSearchBreakTypeAlpha to return tokens composed of alphanumeric characters only (default) + ArangoSearchBreakTypeAlpha ArangoSearchBreakType = "alpha" + // ArangoSearchBreakTypeGraphic to return tokens composed of non-whitespace characters only + ArangoSearchBreakTypeGraphic ArangoSearchBreakType = "graphic" +) + +type ArangoSearchNGramStreamType string + +const ( + // ArangoSearchNGramStreamBinary used by NGram. Default value + ArangoSearchNGramStreamBinary ArangoSearchNGramStreamType = "binary" + // ArangoSearchNGramStreamUTF8 used by NGram + ArangoSearchNGramStreamUTF8 ArangoSearchNGramStreamType = "utf8" +) + +// ArangoSearchEdgeNGram specifies options for the edgeNGram text analyzer. +// More information can be found here: https://www.arangodb.com/docs/stable/arangosearch-analyzers.html#text +type ArangoSearchEdgeNGram struct { + // Min used by Text + Min *int64 `json:"min,omitempty"` + // Max used by Text + Max *int64 `json:"max,omitempty"` + // PreserveOriginal used by Text + PreserveOriginal *bool `json:"preserveOriginal,omitempty"` +} + +// ArangoSearchAnalyzerProperties specifies options for the analyzer. Which fields are required and +// respected depends on the analyzer type. +// more information can be found here: https://www.arangodb.com/docs/stable/arangosearch-analyzers.html#analyzer-properties +type ArangoSearchAnalyzerProperties struct { + // Locale used by Stem, Norm, Text + Locale string `json:"locale,omitempty"` + // Delimiter used by Delimiter + Delimiter string `json:"delimiter,omitempty"` + // Accent used by Norm, Text + Accent *bool `json:"accent,omitempty"` + // Case used by Norm, Text, Segmentation + Case ArangoSearchCaseType `json:"case,omitempty"` + + // EdgeNGram used by Text + EdgeNGram *ArangoSearchEdgeNGram `json:"edgeNgram,omitempty"` + + // Min used by NGram + Min *int64 `json:"min,omitempty"` + // Max used by NGram + Max *int64 `json:"max,omitempty"` + // PreserveOriginal used by NGram + PreserveOriginal *bool `json:"preserveOriginal,omitempty"` + + // StartMarker used by NGram + StartMarker *string `json:"startMarker,omitempty"` + // EndMarker used by NGram + EndMarker *string `json:"endMarker,omitempty"` + // StreamType used by NGram + StreamType *ArangoSearchNGramStreamType `json:"streamType,omitempty"` + + // Stemming used by Text + Stemming *bool `json:"stemming,omitempty"` + // Stopword used by Text and Stopwords. This field is not mandatory since version 3.7 of arangod so it can not be omitted in 3.6. + Stopwords []string `json:"stopwords"` + // StopwordsPath used by Text + StopwordsPath []string `json:"stopwordsPath,omitempty"` + + // QueryString used by AQL. + QueryString string `json:"queryString,omitempty"` + // CollapsePositions used by AQL. + CollapsePositions *bool `json:"collapsePositions,omitempty"` + // KeepNull used by AQL. + KeepNull *bool `json:"keepNull,omitempty"` + // BatchSize used by AQL. + BatchSize *int `json:"batchSize,omitempty"` + // MemoryLimit used by AQL. + MemoryLimit *int `json:"memoryLimit,omitempty"` + // ReturnType used by AQL. + ReturnType *ArangoSearchAnalyzerAQLReturnType `json:"returnType,omitempty"` + + // Pipeline used by Pipeline. + Pipeline []ArangoSearchAnalyzerPipeline `json:"pipeline,omitempty"` + + // Type used by GeoJSON. + Type *ArangoSearchAnalyzerGeoJSONType `json:"type,omitempty"` + + // Options used by GeoJSON and GeoPoint + Options *ArangoSearchAnalyzerGeoOptions `json:"options,omitempty"` + + // Latitude used by GetPoint. + Latitude []string `json:"latitude,omitempty"` + // Longitude used by GetPoint. + Longitude []string `json:"longitude,omitempty"` + + // Break used by Segmentation + Break ArangoSearchBreakType `json:"break,omitempty"` + + // Hex used by stopwords. + // If false then each string in stopwords is used verbatim. + // If true, then each string in stopwords needs to be hex-encoded. + Hex *bool `json:"hex,omitempty"` + + // ModelLocation used by Classification, NearestNeighbors + // The on-disk path to the trained fastText supervised model. + // Note: if you are running this in an ArangoDB cluster, this model must exist on every machine in the cluster. + ModelLocation string `json:"model_location,omitempty"` + // TopK used by Classification, NearestNeighbors + // The number of class labels that will be produced per input (default: 1) + TopK *uint64 `json:"top_k,omitempty"` + // Threshold used by Classification + // The probability threshold for which a label will be assigned to an input. + // A fastText model produces a probability per class label, and this is what will be filtered (default: 0.99). + Threshold *float64 `json:"threshold,omitempty"` + + // Analyzer used by Minhash + // Definition of inner analyzer to use for incoming data. In case if omitted field or empty object falls back to 'identity' analyzer. + Analyzer *ArangoSearchAnalyzerDefinition `json:"analyzer,omitempty"` + // NumHashes used by Minhash + // Size of min hash signature. Must be greater or equal to 1. + NumHashes *uint64 `json:"numHashes,omitempty"` +} + +// ArangoSearchAnalyzerGeoJSONType GeoJSON Type parameter. +type ArangoSearchAnalyzerGeoJSONType string + +// New returns pointer to selected return type +func (a ArangoSearchAnalyzerGeoJSONType) New() *ArangoSearchAnalyzerGeoJSONType { + return &a +} + +const ( + // ArangoSearchAnalyzerGeoJSONTypeShape define index all GeoJSON geometry types (Point, Polygon etc.). (default) + ArangoSearchAnalyzerGeoJSONTypeShape ArangoSearchAnalyzerGeoJSONType = "shape" + // ArangoSearchAnalyzerGeoJSONTypeCentroid define compute and only index the centroid of the input geometry. + ArangoSearchAnalyzerGeoJSONTypeCentroid ArangoSearchAnalyzerGeoJSONType = "centroid" + // ArangoSearchAnalyzerGeoJSONTypePoint define only index GeoJSON objects of type Point, ignore all other geometry types. + ArangoSearchAnalyzerGeoJSONTypePoint ArangoSearchAnalyzerGeoJSONType = "point" +) + +// ArangoSearchAnalyzerGeoOptions for fine-tuning geo queries. These options should generally remain unchanged. +type ArangoSearchAnalyzerGeoOptions struct { + // MaxCells define maximum number of S2 cells. + MaxCells *int `json:"maxCells,omitempty"` + // MinLevel define the least precise S2 level. + MinLevel *int `json:"minLevel,omitempty"` + // MaxLevel define the most precise S2 level + MaxLevel *int `json:"maxLevel,omitempty"` +} + +type ArangoSearchAnalyzerAQLReturnType string + +const ( + ArangoSearchAnalyzerAQLReturnTypeString ArangoSearchAnalyzerAQLReturnType = "string" + ArangoSearchAnalyzerAQLReturnTypeNumber ArangoSearchAnalyzerAQLReturnType = "number" + ArangoSearchAnalyzerAQLReturnTypeBool ArangoSearchAnalyzerAQLReturnType = "bool" +) + +// New returns pointer to selected return type +func (a ArangoSearchAnalyzerAQLReturnType) New() *ArangoSearchAnalyzerAQLReturnType { + return &a +} + +// ArangoSearchAnalyzerPipeline provides object definition for Pipeline array parameter +type ArangoSearchAnalyzerPipeline struct { + // Type of the Pipeline Analyzer + Type ArangoSearchAnalyzerType `json:"type"` + // Properties of the Pipeline Analyzer + Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` +} + +// ArangoSearchAnalyzerDefinition provides definition of an analyzer +type ArangoSearchAnalyzerDefinition struct { + Name string `json:"name,omitempty"` + Type ArangoSearchAnalyzerType `json:"type,omitempty"` + Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` + Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` + ArangoError +} + +type ArangoSearchViewBase struct { + Type ViewType `json:"type,omitempty"` + Name string `json:"name,omitempty"` + ArangoID + ArangoError +} + +// ArangoSearchViewProperties contains properties on an ArangoSearch view. +type ArangoSearchViewProperties struct { + // CleanupIntervalStep specifies the minimum number of commits to wait between + // removing unused files in the data directory. + // Defaults to 10. + // Use 0 to disable waiting. + // For the case where the consolidation policies merge segments often + // (i.e. a lot of commit+consolidate), a lower value will cause a lot of + // disk space to be wasted. + // For the case where the consolidation policies rarely merge segments + // (i.e. few inserts/deletes), a higher value will impact performance + // without any added benefits. + CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` + // ConsolidationInterval specifies the minimum number of milliseconds that must be waited + // between committing index data changes and making them visible to queries. + // Defaults to 60000. + // Use 0 to disable. + // For the case where there are a lot of inserts/updates, a lower value, + // until commit, will cause the index not to account for them and memory usage + // would continue to grow. + // For the case where there are a few inserts/updates, a higher value will + // impact performance and waste disk space for each commit call without + // any added benefits. + ConsolidationInterval *int64 `json:"consolidationIntervalMsec,omitempty"` + // ConsolidationPolicy specifies thresholds for consolidation. + ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"` + + // CommitInterval ArangoSearch waits at least this many milliseconds between committing view data store changes and making documents visible to queries + CommitInterval *int64 `json:"commitIntervalMsec,omitempty"` + + // WriteBufferIdle specifies the maximum number of writers (segments) cached in the pool. + // 0 value turns off caching, default value is 64. + WriteBufferIdel *int64 `json:"writebufferIdle,omitempty"` + + // WriteBufferActive specifies the maximum number of concurrent active writers (segments) performs (a transaction). + // Other writers (segments) are wait till current active writers (segments) finish. + // 0 value turns off this limit and used by default. + WriteBufferActive *int64 `json:"writebufferActive,omitempty"` + + // WriteBufferSizeMax specifies maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. + // 0 value turns off this limit fon any writer (buffer) and will be flushed only after a period defined for special thread during ArangoDB server startup. + // 0 value should be used with carefully due to high potential memory consumption. + WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` + + // Links contains the properties for how individual collections + // are indexed in the view. + // The key of the map are collection names. + Links ArangoSearchLinks `json:"links,omitempty"` + + // PrimarySort describes how individual fields are sorted + PrimarySort []ArangoSearchPrimarySortEntry `json:"primarySort,omitempty"` + + // PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1). + // ArangoDB v3.5 and v3.6 always compress the index using LZ4. This option is immutable. + PrimarySortCompression PrimarySortCompression `json:"primarySortCompression,omitempty"` + + // StoredValues An array of objects to describe which document attributes to store in the View index (introduced in v3.7.1). + // It can then cover search queries, which means the data can be taken from the index directly and accessing the storage engine can be avoided. + // This option is immutable. + StoredValues []StoredValue `json:"storedValues,omitempty"` + + ArangoSearchViewBase +} + +// PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1) +type PrimarySortCompression string + +const ( + // PrimarySortCompressionLz4 (default): use LZ4 fast compression. + PrimarySortCompressionLz4 PrimarySortCompression = "lz4" + // PrimarySortCompressionNone disable compression to trade space for speed. + PrimarySortCompressionNone PrimarySortCompression = "none" +) + +type StoredValue struct { + Fields []string `json:"fields,omitempty"` + Compression PrimarySortCompression `json:"compression,omitempty"` +} + +// ArangoSearchSortDirection describes the sorting direction +type ArangoSearchSortDirection string + +const ( + // ArangoSearchSortDirectionAsc sort ascending + ArangoSearchSortDirectionAsc ArangoSearchSortDirection = "ASC" + // ArangoSearchSortDirectionDesc sort descending + ArangoSearchSortDirectionDesc ArangoSearchSortDirection = "DESC" +) + +// ArangoSearchPrimarySortEntry describes an entry for the primarySort list +type ArangoSearchPrimarySortEntry struct { + Field string `json:"field,omitempty"` + Ascending *bool `json:"asc,omitempty"` + Direction *ArangoSearchSortDirection `json:"direction,omitempty"` +} + +// GetDirection returns the sort direction or empty string if not set +func (pse ArangoSearchPrimarySortEntry) GetDirection() ArangoSearchSortDirection { + if pse.Direction != nil { + return *pse.Direction + } + + return ArangoSearchSortDirection("") +} + +// GetAscending returns the value of Ascending or false if not set +func (pse ArangoSearchPrimarySortEntry) GetAscending() bool { + if pse.Ascending != nil { + return *pse.Ascending + } + + return false +} + +// ArangoSearchConsolidationPolicyType strings for consolidation types +type ArangoSearchConsolidationPolicyType string + +const ( + // ArangoSearchConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes. + ArangoSearchConsolidationPolicyTypeTier ArangoSearchConsolidationPolicyType = "tier" + // ArangoSearchConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0]) + // {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes, + // i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}. + ArangoSearchConsolidationPolicyTypeBytesAccum ArangoSearchConsolidationPolicyType = "bytes_accum" +) + +// ArangoSearchConsolidationPolicy holds threshold values specifying when to +// consolidate view data. +// Semantics of the values depend on where they are used. +type ArangoSearchConsolidationPolicy struct { + // Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ArangoSearchConsolidationPolicy* struct. + Type ArangoSearchConsolidationPolicyType `json:"type,omitempty"` + + ArangoSearchConsolidationPolicyBytesAccum + ArangoSearchConsolidationPolicyTier +} + +// ArangoSearchConsolidationPolicyBytesAccum contains fields used for ArangoSearchConsolidationPolicyTypeBytesAccum +type ArangoSearchConsolidationPolicyBytesAccum struct { + // Threshold, see ArangoSearchConsolidationTypeBytesAccum + Threshold *float64 `json:"threshold,omitempty"` +} + +// ArangoSearchConsolidationPolicyTier contains fields used for ArangoSearchConsolidationPolicyTypeTier +type ArangoSearchConsolidationPolicyTier struct { + MinScore *int64 `json:"minScore,omitempty"` + // MinSegments specifies the minimum number of segments that will be evaluated as candidates for consolidation. + MinSegments *int64 `json:"segmentsMin,omitempty"` + // MaxSegments specifies the maximum number of segments that will be evaluated as candidates for consolidation. + MaxSegments *int64 `json:"segmentsMax,omitempty"` + // SegmentsBytesMax specifies the maxinum allowed size of all consolidated segments in bytes. + SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"` + // SegmentsBytesFloor defines the value (in bytes) to treat all smaller segments as equal for consolidation selection. + SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"` + // Lookahead specifies the number of additionally searched tiers except initially chosen candidated based on min_segments, + // max_segments, segments_bytes_max, segments_bytes_floor with respect to defined values. + // Default value falls to integer_traits::const_max (in C++ source code). + Lookahead *int64 `json:"lookahead,omitempty"` +} + +// ArangoSearchLinks is a strongly typed map containing links between a +// collection and a view. +// The keys in the map are collection names. +type ArangoSearchLinks map[string]ArangoSearchElementProperties + +// ArangoSearchFields is a strongly typed map containing properties per field. +// The keys in the map are field names. +type ArangoSearchFields map[string]ArangoSearchElementProperties + +// ArangoSearchElementProperties contains properties that specify how an element +// is indexed in an ArangoSearch view. +// Note that this structure is recursive. Settings not specified (nil) +// at a given level will inherit their setting from a lower level. +type ArangoSearchElementProperties struct { + AnalyzerDefinitions []ArangoSearchAnalyzerDefinition `json:"analyzerDefinitions,omitempty"` + // The list of analyzers to be used for indexing of string values. Defaults to ["identify"]. + Analyzers []string `json:"analyzers,omitempty"` + // If set to true, all fields of this element will be indexed. Defaults to false. + IncludeAllFields *bool `json:"includeAllFields,omitempty"` + // If set to true, values in a listed are treated as separate values. Defaults to false. + TrackListPositions *bool `json:"trackListPositions,omitempty"` + // This values specifies how the view should track values. + StoreValues ArangoSearchStoreValues `json:"storeValues,omitempty"` + // Fields contains the properties for individual fields of the element. + // The key of the map are field names. + Fields ArangoSearchFields `json:"fields,omitempty"` + // If set to true, then no exclusive lock is used on the source collection during View index creation, + // so that it remains basically available. inBackground is an option that can be set when adding links. + // It does not get persisted as it is not a View property, but only a one-off option + InBackground *bool `json:"inBackground,omitempty"` +} + +// ArangoSearchStoreValues is the type of the StoreValues option of an ArangoSearch element. +type ArangoSearchStoreValues string + +const ( + // ArangoSearchStoreValuesNone specifies that a view should not store values. + ArangoSearchStoreValuesNone ArangoSearchStoreValues = "none" + // ArangoSearchStoreValuesID specifies that a view should only store + // information about value presence, to allow use of the EXISTS() function. + ArangoSearchStoreValuesID ArangoSearchStoreValues = "id" +) diff --git a/vendor/github.com/arangodb/go-driver/view_arangosearch_alias.go b/vendor/github.com/arangodb/go-driver/view_arangosearch_alias.go new file mode 100644 index 00000000000..0e701196798 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view_arangosearch_alias.go @@ -0,0 +1,54 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// ArangoSearchViewAlias provides access to the information of a view alias +// Views aliases are only available in ArangoDB 3.10 and higher. +type ArangoSearchViewAlias interface { + // View Includes generic View functions + View + + // Properties fetches extended information about the view. + Properties(ctx context.Context) (ArangoSearchAliasViewProperties, error) + + // SetProperties changes properties of the view. + SetProperties(ctx context.Context, options ArangoSearchAliasViewProperties) (ArangoSearchAliasViewProperties, error) +} + +type ArangoSearchAliasViewProperties struct { + ArangoSearchViewBase + + // Indexes A list of inverted indexes to add to the View. + Indexes []ArangoSearchAliasIndex `json:"indexes,omitempty"` +} + +type ArangoSearchAliasIndex struct { + // Collection The name of a collection. + Collection string `json:"collection"` + // Index The name of an inverted index of the collection. + Index string `json:"index"` +} diff --git a/vendor/github.com/arangodb/go-driver/view_arangosearch_alias_impl.go b/vendor/github.com/arangodb/go-driver/view_arangosearch_alias_impl.go new file mode 100644 index 00000000000..af82e18e508 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view_arangosearch_alias_impl.go @@ -0,0 +1,78 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// viewArangoSearchAlias implements ArangoSearchViewAlias +type viewArangoSearchAlias struct { + view +} + +// Properties fetches extended information about the view. +func (v *viewArangoSearchAlias) Properties(ctx context.Context) (ArangoSearchAliasViewProperties, error) { + req, err := v.conn.NewRequest("GET", path.Join(v.relPath(), "properties")) + if err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + var data ArangoSearchAliasViewProperties + if err := resp.ParseBody("", &data); err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + return data, nil +} + +// SetProperties changes properties of the view. +func (v *viewArangoSearchAlias) SetProperties(ctx context.Context, options ArangoSearchAliasViewProperties) (ArangoSearchAliasViewProperties, error) { + req, err := v.conn.NewRequest("PUT", path.Join(v.relPath(), "properties")) + if err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + var data ArangoSearchAliasViewProperties + if err := resp.ParseBody("", &data); err != nil { + return ArangoSearchAliasViewProperties{}, WithStack(err) + } + return data, nil +} diff --git a/vendor/github.com/arangodb/go-driver/view_arangosearch_impl.go b/vendor/github.com/arangodb/go-driver/view_arangosearch_impl.go new file mode 100644 index 00000000000..90da75e6227 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view_arangosearch_impl.go @@ -0,0 +1,74 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// viewArangoSearch implements ArangoSearchView +type viewArangoSearch struct { + view +} + +// Properties fetches extended information about the view. +func (v *viewArangoSearch) Properties(ctx context.Context) (ArangoSearchViewProperties, error) { + req, err := v.conn.NewRequest("GET", path.Join(v.relPath(), "properties")) + if err != nil { + return ArangoSearchViewProperties{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return ArangoSearchViewProperties{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ArangoSearchViewProperties{}, WithStack(err) + } + var data ArangoSearchViewProperties + if err := resp.ParseBody("", &data); err != nil { + return ArangoSearchViewProperties{}, WithStack(err) + } + return data, nil +} + +// SetProperties changes properties of the view. +func (v *viewArangoSearch) SetProperties(ctx context.Context, options ArangoSearchViewProperties) error { + req, err := v.conn.NewRequest("PUT", path.Join(v.relPath(), "properties")) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-driver/view_impl.go b/vendor/github.com/arangodb/go-driver/view_impl.go new file mode 100644 index 00000000000..153e69262b2 --- /dev/null +++ b/vendor/github.com/arangodb/go-driver/view_impl.go @@ -0,0 +1,139 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "net/http" + "path" +) + +// newView creates a new View implementation. +func newView(name string, viewType ViewType, db *database) (View, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if viewType == "" { + return nil, WithStack(InvalidArgumentError{Message: "viewType is empty"}) + } + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &view{ + name: name, + viewType: viewType, + db: db, + conn: db.conn, + }, nil +} + +type view struct { + name string + viewType ViewType + db *database + conn Connection +} + +// relPath creates the relative path to this view (`_db//_api/view/`) +func (v *view) relPath() string { + escapedName := pathEscape(v.name) + return path.Join(v.db.relPath(), "_api", "view", escapedName) +} + +// Name returns the name of the view. +func (v *view) Name() string { + return v.name +} + +// Type returns the type of this view. +func (v *view) Type() ViewType { + return v.viewType +} + +// ArangoSearchView returns this view as an ArangoSearch view. +// When the type of the view is not ArangoSearch, an error is returned. +func (v *view) ArangoSearchView() (ArangoSearchView, error) { + if v.viewType != ViewTypeArangoSearch { + return nil, WithStack(newArangoError(http.StatusConflict, 0, fmt.Sprintf("Type must be '%s', got '%s'", ViewTypeArangoSearch, v.viewType))) + } + return &viewArangoSearch{view: *v}, nil +} + +func (v *view) ArangoSearchViewAlias() (ArangoSearchViewAlias, error) { + if v.viewType != ViewTypeArangoSearchAlias { + return nil, WithStack(newArangoError(http.StatusConflict, 0, fmt.Sprintf("Type must be '%s', got '%s'", ViewTypeArangoSearchAlias, v.viewType))) + } + return &viewArangoSearchAlias{view: *v}, nil +} + +// Database returns the database containing the view. +func (v *view) Database() Database { + return v.db +} + +func (v *view) Rename(ctx context.Context, newName string) error { + if newName == "" { + return WithStack(InvalidArgumentError{Message: "newName is empty"}) + } + req, err := v.conn.NewRequest("PUT", path.Join(v.relPath(), "rename")) + if err != nil { + return WithStack(err) + } + input := struct { + Name string `json:"name"` + }{ + Name: newName, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + v.name = newName + return nil +} + +// Remove removes the entire view. +// If the view does not exist, a NotFoundError is returned. +func (v *view) Remove(ctx context.Context) error { + req, err := v.conn.NewRequest("DELETE", v.relPath()) + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := v.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/.envrc b/vendor/github.com/arangodb/go-velocypack/.envrc new file mode 100644 index 00000000000..143b7823339 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/.envrc @@ -0,0 +1,8 @@ +export GOBUILDDIR=$(pwd)/.gobuild +export GOPATH=$GOBUILDDIR:$GOPATH +PATH_add $GOBUILDDIR/bin + +if [ ! -e ${GOBUILDDIR} ]; then + mkdir -p ${GOBUILDDIR}/src/github.com/arangodb/ + ln -s ../../../.. ${GOBUILDDIR}/src/github.com/arangodb/go-velocypack +fi \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-velocypack/.gitignore b/vendor/github.com/arangodb/go-velocypack/.gitignore new file mode 100644 index 00000000000..7e340153324 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/.gitignore @@ -0,0 +1,4 @@ +.gobuild +coverage.out +cpu.out +test.test diff --git a/vendor/github.com/arangodb/go-velocypack/.travis.yml b/vendor/github.com/arangodb/go-velocypack/.travis.yml new file mode 100644 index 00000000000..60975a7e78b --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/.travis.yml @@ -0,0 +1,8 @@ +sudo: required + +services: + - docker + +language: go + +script: make run-tests diff --git a/vendor/github.com/arangodb/go-velocypack/LICENSE b/vendor/github.com/arangodb/go-velocypack/LICENSE new file mode 100644 index 00000000000..b8ff39b5ad4 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 ArangoDB GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/arangodb/go-velocypack/Makefile b/vendor/github.com/arangodb/go-velocypack/Makefile new file mode 100644 index 00000000000..ef353e19812 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/Makefile @@ -0,0 +1,56 @@ +PROJECT := go-velocypack +SCRIPTDIR := $(shell pwd) +ROOTDIR := $(shell cd $(SCRIPTDIR) && pwd) + +TESTOPTIONS := +ifdef VERBOSE + TESTOPTIONS := -v +endif + +ORGPATH := github.com/arangodb +REPONAME := $(PROJECT) +REPOPATH := $(ORGPATH)/$(REPONAME) + +SOURCES := $(shell find . -name '*.go') + +.PHONY: all build clean run-tests show-coverage + +all: build + +build: $(SOURCES) + go build -v github.com/arangodb/go-velocypack + +# All unit tests +run-tests: + @go get github.com/stretchr/testify/assert + @go test -v $(REPOPATH)/test/runtime + @go test $(TESTOPTIONS) $(REPOPATH) + @go test -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test + +# All benchmarks +run-benchmarks: + @go get github.com/stretchr/testify/assert + @go test $(TESTOPTIONS) -bench=. -run=notests -cpu=1,2,4 $(REPOPATH)/test + +# All benchmarks using local profiling +run-benchmarks-prof: $(GOBUILDDIR) + @go get github.com/stretchr/testify/assert + @go test $(TESTOPTIONS) -bench=. -run=notests -cpu=1,2,4 -cpuprofile=cpu.out $(REPOPATH)/test + @echo Now profile using: go tool pprof test.test cpu.out + +# All unit tests using local Go tools +run-tests-local: $(GOBUILDDIR) + @go get github.com/stretchr/testify/assert + @go test -v $(REPOPATH)/test/runtime + @go test $(TESTOPTIONS) $(REPOPATH) + @go test -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test + +# All (except large memory) unit tests using local Go tools +run-tests-local-nolarge: $(GOBUILDDIR) + @go get github.com/stretchr/testify/assert + @go test -tags nolarge -v $(REPOPATH)/test/runtime + @go test -tags nolarge $(TESTOPTIONS) $(REPOPATH) + @go test -tags nolarge -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test + +show-coverage: run-tests + go tool cover -html coverage.out diff --git a/vendor/github.com/arangodb/go-velocypack/README.md b/vendor/github.com/arangodb/go-velocypack/README.md new file mode 100644 index 00000000000..208321c2a7f --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/README.md @@ -0,0 +1,7 @@ +# ArangoDB VelocyPack Go implementation. + + +[![Build Status](https://travis-ci.org/arangodb/go-velocypack.svg?branch=master)](https://travis-ci.org/arangodb/go-velocypack) +[![GoDoc](https://godoc.org/github.com/arangodb/go-velocypack?status.svg)](http://godoc.org/github.com/arangodb/go-velocypack) + +NOTE: THIS IS WORK IN PROGRESS. diff --git a/vendor/github.com/arangodb/go-velocypack/array_iterator.go b/vendor/github.com/arangodb/go-velocypack/array_iterator.go new file mode 100644 index 00000000000..a078746fcf4 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/array_iterator.go @@ -0,0 +1,91 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ArrayIterator struct { + s Slice + position ValueLength + size ValueLength + current Slice +} + +// NewArrayIterator initializes an iterator at position 0 of the given object slice. +func NewArrayIterator(s Slice) (*ArrayIterator, error) { + if !s.IsArray() { + return nil, InvalidTypeError{"Expected Array slice"} + } + size, err := s.Length() + if err != nil { + return nil, WithStack(err) + } + i := &ArrayIterator{ + s: s, + position: 0, + size: size, + } + if size > 0 { + i.current, err = s.At(0) + if err != nil { + return nil, WithStack(err) + } + } + return i, nil +} + +// IsValid returns true if the given position of the iterator is valid. +func (i *ArrayIterator) IsValid() bool { + return i.position < i.size +} + +// IsFirst returns true if the current position is 0. +func (i *ArrayIterator) IsFirst() bool { + return i.position == 0 +} + +// Value returns the value of the current position of the iterator +func (i *ArrayIterator) Value() (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + return current, nil + } + value, err := i.s.At(i.position) + return value, WithStack(err) +} + +// Next moves to the next position. +func (i *ArrayIterator) Next() error { + i.position++ + if i.position < i.size && i.current != nil { + var err error + // skip over entry + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + } else { + i.current = nil + } + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/attribute_translator.go b/vendor/github.com/arangodb/go-velocypack/attribute_translator.go new file mode 100644 index 00000000000..7d062cad9fe --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/attribute_translator.go @@ -0,0 +1,51 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "strconv" + +var attributeTranslator attributeIDTranslator = &arangoAttributeIDTranslator{} + +// attributeIDTranslator is used to translation integer style object keys to strings. +type attributeIDTranslator interface { + IDToString(id uint64) string +} + +type arangoAttributeIDTranslator struct{} + +func (t *arangoAttributeIDTranslator) IDToString(id uint64) string { + switch id { + case 1: + return "_key" + case 2: + return "_rev" + case 3: + return "_id" + case 4: + return "_from" + case 5: + return "_to" + default: + return strconv.FormatUint(id, 10) + } +} diff --git a/vendor/github.com/arangodb/go-velocypack/builder.go b/vendor/github.com/arangodb/go-velocypack/builder.go new file mode 100644 index 00000000000..40e0d9a507f --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/builder.go @@ -0,0 +1,1186 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "reflect" +) + +// BuilderOptions contains options that influence how Builder builds slices. +type BuilderOptions struct { + BuildUnindexedArrays bool + BuildUnindexedObjects bool + CheckAttributeUniqueness bool +} + +// Builder is used to build VPack structures. +type Builder struct { + BuilderOptions + buf builderBuffer + stack builderStack + index []indexVector + keyWritten bool +} + +func NewBuilder(capacity uint) *Builder { + b := &Builder{ + buf: make(builderBuffer, 0, capacity), + } + return b +} + +// Clear and start from scratch: +func (b *Builder) Clear() { + b.buf = nil + b.stack.Clear() + b.keyWritten = false +} + +// Bytes return the generated bytes. +// The returned slice is shared with the builder itself, so you must not modify it. +// When the builder is not closed, an error is returned. +func (b *Builder) Bytes() ([]byte, error) { + if !b.IsClosed() { + return nil, WithStack(BuilderNotClosedError) + } + return b.buf, nil +} + +// Slice returns a slice of the result. +func (b *Builder) Slice() (Slice, error) { + if b.buf.IsEmpty() { + return Slice{}, nil + } + bytes, err := b.Bytes() + return bytes, WithStack(err) +} + +// WriteTo writes the generated bytes to the given writer. +// When the builder is not closed, an error is returned. +func (b *Builder) WriteTo(w io.Writer) (int64, error) { + if !b.IsClosed() { + return 0, WithStack(BuilderNotClosedError) + } + if n, err := w.Write(b.buf); err != nil { + return 0, WithStack(err) + } else { + return int64(n), nil + } +} + +// Size returns the actual size of the generated slice. +// Returns an error when builder is not closed. +func (b *Builder) Size() (ValueLength, error) { + if !b.IsClosed() { + return 0, WithStack(BuilderNotClosedError) + } + return b.buf.Len(), nil +} + +// IsEmpty returns true when no bytes have been generated yet. +func (b *Builder) IsEmpty() bool { + return b.buf.IsEmpty() +} + +// IsOpenObject returns true when the builder has an open object at the top of the stack. +func (b *Builder) IsOpenObject() bool { + if b.stack.IsEmpty() { + return false + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + return h == 0x0b || h == 0x014 +} + +// IsOpenArray returns true when the builder has an open array at the top of the stack. +func (b *Builder) IsOpenArray() bool { + if b.stack.IsEmpty() { + return false + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + return h == 0x06 || h == 0x013 +} + +// OpenObject starts a new object. +// This must be closed using Close. +func (b *Builder) OpenObject(unindexed ...bool) error { + var vType byte + if optionalBool(unindexed, false) { + vType = 0x14 + } else { + vType = 0x0b + } + return WithStack(b.openCompoundValue(vType)) +} + +// OpenArray starts a new array. +// This must be closed using Close. +func (b *Builder) OpenArray(unindexed ...bool) error { + var vType byte + if optionalBool(unindexed, false) { + vType = 0x13 + } else { + vType = 0x06 + } + return WithStack(b.openCompoundValue(vType)) +} + +// Close ends an open object or array. +func (b *Builder) Close() error { + if b.IsClosed() { + return WithStack(BuilderNeedOpenCompoundError) + } + tos, _ := b.stack.Tos() + head := b.buf[tos] + + vpackAssert(head == 0x06 || head == 0x0b || head == 0x13 || head == 0x14) + + isArray := (head == 0x06 || head == 0x13) + index := b.index[b.stack.Len()-1] + + if index.IsEmpty() { + b.closeEmptyArrayOrObject(tos, isArray) + return nil + } + + // From now on index.size() > 0 + vpackAssert(len(index) > 0) + + // check if we can use the compact Array / Object format + if head == 0x13 || head == 0x14 || + (head == 0x06 && b.BuilderOptions.BuildUnindexedArrays) || + (head == 0x0b && (b.BuilderOptions.BuildUnindexedObjects || len(index) == 1)) { + if b.closeCompactArrayOrObject(tos, isArray, index) { + return nil + } + // This might fall through, if closeCompactArrayOrObject gave up! + } + + if isArray { + b.closeArray(tos, index) + return nil + } + + // From now on we're closing an object + + // fix head byte in case a compact Array / Object was originally requested + b.buf[tos] = 0x0b + + // First determine byte length and its format: + offsetSize := uint(8) + // can be 1, 2, 4 or 8 for the byte width of the offsets, + // the byte length and the number of subvalues: + if b.buf.Len()-tos+ValueLength(len(index))-6 <= 0xff { + // We have so far used _pos - tos bytes, including the reserved 8 + // bytes for byte length and number of subvalues. In the 1-byte number + // case we would win back 6 bytes but would need one byte per subvalue + // for the index table + offsetSize = 1 + + // Maybe we need to move down data: + targetPos := ValueLength(3) + if b.buf.Len() > (tos + 9) { + _len := ValueLength(b.buf.Len() - (tos + 9)) + checkOverflow(_len) + src := b.buf[tos+9:] + copy(b.buf[tos+targetPos:], src[:_len]) + } + diff := ValueLength(9 - targetPos) + b.buf.Shrink(uint(diff)) + n := len(index) + for i := 0; i < n; i++ { + index[i] -= diff + } + + // One could move down things in the offsetSize == 2 case as well, + // since we only need 4 bytes in the beginning. However, saving these + // 4 bytes has been sacrificed on the Altar of Performance. + } else if b.buf.Len()-tos+2*ValueLength(len(index)) <= 0xffff { + offsetSize = 2 + } else if b.buf.Len()-tos+4*ValueLength(len(index)) <= 0xffffffff { + offsetSize = 4 + } + + // Now build the table: + extraSpace := offsetSize * uint(len(index)) + if offsetSize == 8 { + extraSpace += 8 + } + b.buf.ReserveSpace(extraSpace) + tableBase := b.buf.Len() + b.buf.Grow(offsetSize * uint(len(index))) + // Object + if len(index) >= 2 { + if err := b.sortObjectIndex(b.buf[tos:], index); err != nil { + return WithStack(err) + } + } + for i := uint(0); i < uint(len(index)); i++ { + indexBase := tableBase + ValueLength(offsetSize*i) + x := uint64(index[i]) + for j := uint(0); j < offsetSize; j++ { + b.buf[indexBase+ValueLength(j)] = byte(x & 0xff) + x >>= 8 + } + } + // Finally fix the byte width in the type byte: + if offsetSize > 1 { + if offsetSize == 2 { + b.buf[tos] += 1 + } else if offsetSize == 4 { + b.buf[tos] += 2 + } else { // offsetSize == 8 + b.buf[tos] += 3 + b.appendLength(ValueLength(len(index)), 8) + } + } + + // Fix the byte length in the beginning: + x := ValueLength(b.buf.Len() - tos) + for i := uint(1); i <= offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + + if offsetSize < 8 { + x := len(index) + for i := uint(offsetSize + 1); i <= 2*offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + } + + // And, if desired, check attribute uniqueness: + if b.BuilderOptions.CheckAttributeUniqueness && len(index) > 1 { + // check uniqueness of attribute names + if err := b.checkAttributeUniqueness(Slice(b.buf[tos:])); err != nil { + return WithStack(err) + } + } + + // Now the array or object is complete, we pop a ValueLength off the _stack: + b.stack.Pop() + // Intentionally leave _index[depth] intact to avoid future allocs! + return nil +} + +// IsClosed returns true if there are no more open objects or arrays. +func (b *Builder) IsClosed() bool { + return b.stack.IsEmpty() +} + +// HasKey checks whether an Object value has a specific key attribute. +func (b *Builder) HasKey(key string) (bool, error) { + if b.stack.IsEmpty() { + return false, WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return false, WithStack(BuilderNeedOpenObjectError) + } + index := b.index[b.stack.Len()-1] + if index.IsEmpty() { + return false, nil + } + for _, idx := range index { + s := Slice(b.buf[tos+idx:]) + k, err := s.makeKey() + if err != nil { + return false, WithStack(err) + } + if eq, err := k.IsEqualString(key); err != nil { + return false, WithStack(err) + } else if eq { + return true, nil + } + } + return false, nil +} + +// GetKey returns the value for a specific key of an Object value. +// Returns Slice of type None when key is not found. +func (b *Builder) GetKey(key string) (Slice, error) { + if b.stack.IsEmpty() { + return nil, WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return nil, WithStack(BuilderNeedOpenObjectError) + } + index := b.index[b.stack.Len()-1] + if index.IsEmpty() { + return nil, nil + } + for _, idx := range index { + s := Slice(b.buf[tos+idx:]) + k, err := s.makeKey() + if err != nil { + return nil, WithStack(err) + } + if eq, err := k.IsEqualString(key); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := s.Next() + if err != nil { + return nil, WithStack(err) + } + return value, nil + } + } + return nil, nil +} + +// RemoveLast removes last subvalue written to an (unclosed) object or array. +func (b *Builder) RemoveLast() error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenCompoundError) + } + tos, _ := b.stack.Tos() + index := &b.index[b.stack.Len()-1] + if index.IsEmpty() { + return WithStack(BuilderNeedSubValueError) + } + newLength := tos + (*index)[len(*index)-1] + lastSize := b.buf.Len() - newLength + b.buf.Shrink(uint(lastSize)) + index.RemoveLast() + return nil +} + +// addNull adds a null value to the buffer. +func (b *Builder) addNull() { + b.buf.WriteByte(0x18) +} + +// addFalse adds a bool false value to the buffer. +func (b *Builder) addFalse() { + b.buf.WriteByte(0x19) +} + +// addTrue adds a bool true value to the buffer. +func (b *Builder) addTrue() { + b.buf.WriteByte(0x1a) +} + +// addBool adds a bool value to the buffer. +func (b *Builder) addBool(v bool) { + if v { + b.addTrue() + } else { + b.addFalse() + } +} + +// addDouble adds a double value to the buffer. +func (b *Builder) addDouble(v float64) { + bits := math.Float64bits(v) + b.buf.ReserveSpace(9) + b.buf.WriteByte(0x1b) + binary.LittleEndian.PutUint64(b.buf.Grow(8), bits) +} + +// addInt adds an int value to the buffer. +func (b *Builder) addInt(v int64) { + if v >= 0 && v <= 9 { + b.buf.WriteByte(0x30 + byte(v)) + } else if v < 0 && v >= -6 { + b.buf.WriteByte(byte(0x40 + int(v))) + } else { + b.appendInt(v, 0x1f) + } +} + +// addUInt adds an uint value to the buffer. +func (b *Builder) addUInt(v uint64) { + if v <= 9 { + b.buf.WriteByte(0x30 + byte(v)) + } else { + b.appendUInt(v, 0x27) + } +} + +// addUTCDate adds an UTC date value to the buffer. +func (b *Builder) addUTCDate(v int64) { + x := toUInt64(v) + dst := b.buf.Grow(9) + dst[0] = 0x1c + setLength(dst[1:], ValueLength(x), 8) +} + +// addString adds a string value to the buffer. +func (b *Builder) addString(v string) { + strLen := uint(len(v)) + if strLen > 126 { + // long string + dst := b.buf.Grow(1 + 8 + strLen) + dst[0] = 0xbf + setLength(dst[1:], ValueLength(strLen), 8) // string length + copy(dst[9:], v) // string data + } else { + dst := b.buf.Grow(1 + strLen) + dst[0] = byte(0x40 + strLen) // short string (with length) + copy(dst[1:], v) // string data + } +} + +// addBinary adds a binary value to the buffer. +func (b *Builder) addBinary(v []byte) { + l := uint(len(v)) + b.buf.ReserveSpace(1 + 8 + l) + b.appendUInt(uint64(l), 0xbf) // data length + b.buf.Write(v) // data +} + +// addIllegal adds an Illegal value to the buffer. +func (b *Builder) addIllegal() { + b.buf.WriteByte(0x17) +} + +// addMinKey adds a MinKey value to the buffer. +func (b *Builder) addMinKey() { + b.buf.WriteByte(0x1e) +} + +// addMaxKey adds a MaxKey value to the buffer. +func (b *Builder) addMaxKey() { + b.buf.WriteByte(0x1f) +} + +// Add adds a raw go value value to an array/raw value/object. +func (b *Builder) Add(v interface{}) error { + if it, ok := v.(*ObjectIterator); ok { + return WithStack(b.AddKeyValuesFromIterator(it)) + } + if it, ok := v.(*ArrayIterator); ok { + return WithStack(b.AddValuesFromIterator(it)) + } + value := NewValue(v) + if value.IsIllegal() { + return WithStack(BuilderUnexpectedTypeError{fmt.Sprintf("Cannot convert value of type %s", reflect.TypeOf(v).Name())}) + } + if err := b.addInternal(value); err != nil { + return WithStack(err) + } + return nil +} + +// AddValue adds a value to an array/raw value/object. +func (b *Builder) AddValue(v Value) error { + if err := b.addInternal(v); err != nil { + return WithStack(err) + } + return nil +} + +// AddKeyValue adds a key+value to an open object. +func (b *Builder) AddKeyValue(key string, v Value) error { + if err := b.addInternalKeyValue(key, v); err != nil { + return WithStack(err) + } + return nil +} + +// AddValuesFromIterator adds values to an array from the given iterator. +// The array must be opened before a call to this function and the array is left open Intentionally. +func (b *Builder) AddValuesFromIterator(it *ArrayIterator) error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenArrayError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x06 && h != 0x13 { + return WithStack(BuilderNeedOpenArrayError) + } + for it.IsValid() { + v, err := it.Value() + if err != nil { + return WithStack(err) + } + if err := b.addInternal(NewSliceValue(v)); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + return nil +} + +// AddKeyValuesFromIterator adds values to an object from the given iterator. +// The object must be opened before a call to this function and the object is left open Intentionally. +func (b *Builder) AddKeyValuesFromIterator(it *ObjectIterator) error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return WithStack(BuilderNeedOpenObjectError) + } + if b.keyWritten { + return WithStack(BuilderKeyAlreadyWrittenError) + } + for it.IsValid() { + k, err := it.Key(true) + if err != nil { + return WithStack(err) + } + key, err := k.GetString() + if err != nil { + return WithStack(err) + } + v, err := it.Value() + if err != nil { + return WithStack(err) + } + if err := b.addInternalKeyValue(key, NewSliceValue(v)); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + return nil +} + +// returns number of bytes required to store the value in 2s-complement +func intLength(value int64) uint { + if value >= -0x80 && value <= 0x7f { + // shortcut for the common case + return 1 + } + var x uint64 + if value >= 0 { + x = uint64(value) + } else { + x = uint64(-(value + 1)) + } + xSize := uint(0) + for { + xSize++ + x >>= 8 + if x < 0x80 { + return xSize + 1 + } + } +} + +func (b *Builder) appendInt(v int64, base uint) { + vSize := intLength(v) + var x uint64 + if vSize == 8 { + x = toUInt64(v) + } else { + shift := int64(1) << (vSize*8 - 1) // will never overflow! + if v >= 0 { + x = uint64(v) + } else { + x = uint64(v+shift) + uint64(shift) + } + // x = v >= 0 ? static_cast(v) + // : static_cast(v + shift) + shift; + } + dst := b.buf.Grow(1 + vSize) + dst[0] = byte(base + vSize) + off := 1 + for ; vSize > 0; vSize-- { + dst[off] = byte(x & 0xff) + x >>= 8 + off++ + } +} + +func (b *Builder) appendUInt(v uint64, base uint) { + b.buf.ReserveSpace(9) + save := b.buf.Len() + b.buf.WriteByte(0) // Will be overwritten at end of function. + vSize := uint(0) + for { + vSize++ + b.buf.WriteByte(byte(v & 0xff)) + v >>= 8 + if v == 0 { + break + } + } + b.buf[save] = byte(base + vSize) +} + +func (b *Builder) appendLength(v ValueLength, n uint) { + dst := b.buf.Grow(n) + setLength(dst, v, n) +} + +func setLength(dst []byte, v ValueLength, n uint) { + for i := uint(0); i < n; i++ { + dst[i] = byte(v & 0xff) + v >>= 8 + } +} + +// openCompoundValue opens an array/object, checking the context. +func (b *Builder) openCompoundValue(vType byte) error { + //haveReported := false + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if !b.keyWritten { + if h != 0x06 && h != 0x13 { + return WithStack(BuilderNeedOpenArrayError) + } + b.reportAdd() + //haveReported = true + } else { + b.keyWritten = false + } + } + b.addCompoundValue(vType) + // if err && haveReported { b.cleanupAdd() } + return nil +} + +// addCompoundValue adds the start of a component value to the stream & stack. +func (b *Builder) addCompoundValue(vType byte) { + pos := b.buf.Len() + b.stack.Push(pos) + stackLen := b.stack.Len() + toAdd := stackLen - len(b.index) + for toAdd > 0 { + newIndex := make(indexVector, 0, 16) // Pre-allocate 16 entries so we don't have to allocate memory for the first 16 entries + b.index = append(b.index, newIndex) + toAdd-- + } + b.index[stackLen-1].Clear() + b.buf.Write([]byte{vType, 0, 0, 0, 0, 0, 0, 0, 0}) +} + +// closeEmptyArrayOrObject closes an empty array/object, removing the pre-allocated length space. +func (b *Builder) closeEmptyArrayOrObject(tos ValueLength, isArray bool) { + // empty Array or Object + if isArray { + b.buf[tos] = 0x01 + } else { + b.buf[tos] = 0x0a + } + vpackAssert(b.buf.Len() == tos+9) + b.buf.Shrink(8) + b.stack.Pop() +} + +// closeCompactArrayOrObject tries to close an array/object using compact notation. +// Returns true when a compact notation was possible, false otherwise. +func (b *Builder) closeCompactArrayOrObject(tos ValueLength, isArray bool, index indexVector) bool { + // use compact notation + nrItems := len(index) + nrItemsLen := getVariableValueLength(ValueLength(nrItems)) + vpackAssert(nrItemsLen > 0) + + byteSize := b.buf.Len() - (tos + 8) + nrItemsLen + vpackAssert(byteSize > 0) + + byteSizeLen := getVariableValueLength(byteSize) + byteSize += byteSizeLen + if getVariableValueLength(byteSize) != byteSizeLen { + byteSize++ + byteSizeLen++ + } + + if byteSizeLen < 9 { + // can only use compact notation if total byte length is at most 8 bytes long + if isArray { + b.buf[tos] = 0x13 + } else { + b.buf[tos] = 0x14 + } + + valuesLen := b.buf.Len() - (tos + 9) // Amount of bytes taken up by array/object values. + if valuesLen > 0 && byteSizeLen < 8 { + // We have array/object values and our byteSize needs less than the pre-allocated 8 bytes. + // So we move the array/object values back. + checkOverflow(valuesLen) + src := b.buf[tos+9:] + copy(b.buf[tos+1+byteSizeLen:], src[:valuesLen]) + } + // Shrink buffer, removing unused space allocated for byteSize. + b.buf.Shrink(uint(8 - byteSizeLen)) + + // store byte length + vpackAssert(byteSize > 0) + storeVariableValueLength(b.buf, tos+1, byteSize, false) + + // store nrItems + b.buf.Grow(uint(nrItemsLen)) + storeVariableValueLength(b.buf, tos+byteSize-1, ValueLength(len(index)), true) + + b.stack.Pop() + return true + } + return false +} + +// checkAttributeUniqueness checks the given slice for duplicate keys. +// It returns an error when duplicate keys are found, nil otherwise. +func (b *Builder) checkAttributeUniqueness(obj Slice) error { + vpackAssert(b.BuilderOptions.CheckAttributeUniqueness) + n, err := obj.Length() + if err != nil { + return WithStack(err) + } + + if obj.IsSorted() { + // object attributes are sorted + previous, err := obj.KeyAt(0) + if err != nil { + return WithStack(err) + } + p, err := previous.GetString() + if err != nil { + return WithStack(err) + } + + // compare each two adjacent attribute names + for i := ValueLength(1); i < n; i++ { + current, err := obj.KeyAt(i) + if err != nil { + return WithStack(err) + } + // keyAt() guarantees a string as returned type + vpackAssert(current.IsString()) + + q, err := current.GetString() + if err != nil { + return WithStack(err) + } + + if p == q { + // identical key + return WithStack(DuplicateAttributeNameError) + } + // re-use already calculated values for next round + p = q + } + } else { + keys := make(map[string]struct{}) + + for i := ValueLength(0); i < n; i++ { + // note: keyAt() already translates integer attributes + key, err := obj.KeyAt(i) + if err != nil { + return WithStack(err) + } + // keyAt() guarantees a string as returned type + vpackAssert(key.IsString()) + + k, err := key.GetString() + if err != nil { + return WithStack(err) + } + if _, found := keys[k]; found { + return WithStack(DuplicateAttributeNameError) + } + keys[k] = struct{}{} + } + } + return nil +} + +func findAttrName(base []byte) ([]byte, error) { + b := base[0] + if b >= 0x40 && b <= 0xbe { + // short UTF-8 string + l := b - 0x40 + return base[1 : 1+l], nil + } + if b == 0xbf { + // long UTF-8 string + l := uint(0) + // read string length + for i := 8; i >= 1; i-- { + l = (l << 8) + uint(base[i]) + } + return base[1+8 : 1+8+l], nil + } + + // translate attribute name + key, err := Slice(base).makeKey() + if err != nil { + return nil, WithStack(err) + } + return findAttrName(key) +} + +func (b *Builder) sortObjectIndex(objBase []byte, offsets []ValueLength) error { + list := make(sortEntries, len(offsets)) + for i, off := range offsets { + name, err := findAttrName(objBase[off:]) + if err != nil { + return WithStack(err) + } + list[i] = sortEntry{ + Offset: off, + Name: name, + } + } + list.Sort() + //sort.Sort(list) + for i, entry := range list { + offsets[i] = entry.Offset + } + return nil +} + +func (b *Builder) closeArray(tos ValueLength, index []ValueLength) { + // fix head byte in case a compact Array was originally requested: + b.buf[tos] = 0x06 + + needIndexTable := true + needNrSubs := true + if len(index) == 1 { + needIndexTable = false + needNrSubs = false + } else if (b.buf.Len()-tos)-index[0] == ValueLength(len(index))*(index[1]-index[0]) { + // In this case it could be that all entries have the same length + // and we do not need an offset table at all: + noTable := true + subLen := index[1] - index[0] + if (b.buf.Len()-tos)-index[len(index)-1] != subLen { + noTable = false + } else { + for i := 1; i < len(index)-1; i++ { + if index[i+1]-index[i] != subLen { + noTable = false + break + } + } + } + if noTable { + needIndexTable = false + needNrSubs = false + } + } + + // First determine byte length and its format: + var offsetSize uint + // can be 1, 2, 4 or 8 for the byte width of the offsets, + // the byte length and the number of subvalues: + var indexLenIfNeeded ValueLength + if needIndexTable { + indexLenIfNeeded = ValueLength(len(index)) + } + nrSubsLenIfNeeded := ValueLength(7) + if needNrSubs { + nrSubsLenIfNeeded = 6 + } + if b.buf.Len()-tos+(indexLenIfNeeded)-(nrSubsLenIfNeeded) <= 0xff { + // We have so far used _pos - tos bytes, including the reserved 8 + // bytes for byte length and number of subvalues. In the 1-byte number + // case we would win back 6 bytes but would need one byte per subvalue + // for the index table + offsetSize = 1 + } else if b.buf.Len()-tos+(indexLenIfNeeded*2) <= 0xffff { + offsetSize = 2 + } else if b.buf.Len()-tos+(indexLenIfNeeded*4) <= 0xffffffff { + offsetSize = 4 + } else { + offsetSize = 8 + } + + // Maybe we need to move down data: + if offsetSize == 1 { + targetPos := ValueLength(3) + if !needIndexTable { + targetPos = 2 + } + if b.buf.Len() > (tos + 9) { + _len := ValueLength(b.buf.Len() - (tos + 9)) + checkOverflow(_len) + src := b.buf[tos+9:] + copy(b.buf[tos+targetPos:], src[:_len]) + } + diff := ValueLength(9 - targetPos) + b.buf.Shrink(uint(diff)) + if needIndexTable { + n := len(index) + for i := 0; i < n; i++ { + index[i] -= diff + } + } // Note: if !needIndexTable the index array is now wrong! + } + // One could move down things in the offsetSize == 2 case as well, + // since we only need 4 bytes in the beginning. However, saving these + // 4 bytes has been sacrificed on the Altar of Performance. + + // Now build the table: + if needIndexTable { + extraSpaceNeeded := offsetSize * uint(len(index)) + if offsetSize == 8 { + extraSpaceNeeded += 8 + } + b.buf.ReserveSpace(extraSpaceNeeded) + tableBase := b.buf.Grow(offsetSize * uint(len(index))) + for i := uint(0); i < uint(len(index)); i++ { + x := uint64(index[i]) + for j := uint(0); j < offsetSize; j++ { + tableBase[offsetSize*i+j] = byte(x & 0xff) + x >>= 8 + } + } + } else { // no index table + b.buf[tos] = 0x02 + } + // Finally fix the byte width in the type byte: + if offsetSize > 1 { + if offsetSize == 2 { + b.buf[tos] += 1 + } else if offsetSize == 4 { + b.buf[tos] += 2 + } else { // offsetSize == 8 + b.buf[tos] += 3 + if needNrSubs { + b.appendLength(ValueLength(len(index)), 8) + } + } + } + + // Fix the byte length in the beginning: + x := ValueLength(b.buf.Len() - tos) + for i := uint(1); i <= offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + + if offsetSize < 8 && needNrSubs { + x = ValueLength(len(index)) + for i := offsetSize + 1; i <= 2*offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + } + + // Now the array or object is complete, we pop a ValueLength + // off the _stack: + b.stack.Pop() + // Intentionally leave _index[depth] intact to avoid future allocs! +} + +func (b *Builder) cleanupAdd() { + depth := b.stack.Len() - 1 + b.index[depth].RemoveLast() +} + +func (b *Builder) reportAdd() { + tos, stackLen := b.stack.Tos() + depth := stackLen - 1 + b.index[depth].Add(b.buf.Len() - tos) +} + +func (b *Builder) addArray(unindexed ...bool) { + h := byte(0x06) + if optionalBool(unindexed, false) { + h = 0x13 + } + b.addCompoundValue(h) +} + +func (b *Builder) addObject(unindexed ...bool) { + h := byte(0x0b) + if optionalBool(unindexed, false) { + h = 0x14 + } + b.addCompoundValue(h) +} + +func (b *Builder) addInternal(v Value) error { + haveReported := false + if !b.stack.IsEmpty() { + if !b.keyWritten { + b.reportAdd() + haveReported = true + } + } + if err := b.set(v); err != nil { + if haveReported { + b.cleanupAdd() + } + return WithStack(err) + } + return nil +} + +func (b *Builder) addInternalKeyValue(attrName string, v Value) error { + haveReported, err := b.addInternalKey(attrName) + if err != nil { + return WithStack(err) + } + if err := b.set(v); err != nil { + if haveReported { + b.cleanupAdd() + } + return WithStack(err) + } + return nil +} + +func (b *Builder) addInternalKey(attrName string) (haveReported bool, err error) { + haveReported = false + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return haveReported, WithStack(BuilderNeedOpenObjectError) + } + if b.keyWritten { + return haveReported, WithStack(BuilderKeyAlreadyWrittenError) + } + b.reportAdd() + haveReported = true + } + + onError := func() { + if haveReported { + b.cleanupAdd() + haveReported = false + } + } + + if err := b.set(NewStringValue(attrName)); err != nil { + onError() + return haveReported, WithStack(err) + } + b.keyWritten = true + return haveReported, nil +} + +func (b *Builder) checkKeyIsString(isString bool) error { + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if h == 0x0b || h == 0x14 { + if !b.keyWritten { + if isString { + b.keyWritten = true + } else { + return WithStack(BuilderKeyMustBeStringError) + } + } else { + b.keyWritten = false + } + } + } + return nil +} + +func (b *Builder) set(item Value) error { + //oldPos := b.buf.Len() + //ctype := item.vt + + if err := b.checkKeyIsString(item.vt == String); err != nil { + return WithStack(err) + } + + if item.IsSlice() { + switch item.vt { + case None: + return WithStack(BuilderUnexpectedTypeError{"Cannot set a ValueType::None"}) + case External: + return fmt.Errorf("External not supported") + case Custom: + return WithStack(fmt.Errorf("Cannot set a ValueType::Custom with this method")) + } + s := item.sliceValue() + // Determine length of slice + l, err := s.ByteSize() + if err != nil { + return WithStack(err) + } + b.buf.Write(s[:l]) + return nil + } + + // This method builds a single further VPack item at the current + // append position. If this is an array or object, then an index + // table is created and a new ValueLength is pushed onto the stack. + switch item.vt { + case None: + return WithStack(BuilderUnexpectedTypeError{"Cannot set a ValueType::None"}) + case Null: + b.addNull() + case Bool: + b.addBool(item.boolValue()) + case Double: + b.addDouble(item.doubleValue()) + case External: + return fmt.Errorf("External not supported") + /*if (options->disallowExternals) { + // External values explicitly disallowed as a security + // precaution + throw Exception(Exception::BuilderExternalsDisallowed); + } + if (ctype != Value::CType::VoidPtr) { + throw Exception(Exception::BuilderUnexpectedValue, + "Must give void pointer for ValueType::External"); + } + reserveSpace(1 + sizeof(void*)); + // store pointer. this doesn't need to be portable + _start[_pos++] = 0x1d; + void const* value = item.getExternal(); + memcpy(_start + _pos, &value, sizeof(void*)); + _pos += sizeof(void*); + break; + }*/ + case SmallInt: + b.addInt(item.intValue()) + case Int: + b.addInt(item.intValue()) + case UInt: + b.addUInt(item.uintValue()) + case UTCDate: + b.addUTCDate(item.utcDateValue()) + case String: + b.addString(item.stringValue()) + case Array: + b.addArray(item.unindexed) + case Object: + b.addObject(item.unindexed) + case Binary: + b.addBinary(item.binaryValue()) + case Illegal: + b.addIllegal() + case MinKey: + b.addMinKey() + case MaxKey: + b.addMaxKey() + case BCD: + return WithStack(fmt.Errorf("Not implemented")) + case Custom: + return WithStack(fmt.Errorf("Cannot set a ValueType::Custom with this method")) + } + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/builder_buffer.go b/vendor/github.com/arangodb/go-velocypack/builder_buffer.go new file mode 100644 index 00000000000..fe900f13b6e --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/builder_buffer.go @@ -0,0 +1,131 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// builderBuffer is a byte slice used for building slices. +type builderBuffer []byte + +const ( + minGrowDelta = 128 // Minimum amount of extra bytes to add to a buffer when growing + maxGrowDelta = 1024 * 1024 // Maximum amount of extra bytes to add to a buffer when growing +) + +// IsEmpty returns 0 if there are no values in the buffer. +func (b builderBuffer) IsEmpty() bool { + l := len(b) + return l == 0 +} + +// Len returns the length of the buffer. +func (b builderBuffer) Len() ValueLength { + l := len(b) + return ValueLength(l) +} + +// Bytes returns the bytes written to the buffer. +// The returned slice is only valid until the next modification. +func (b *builderBuffer) Bytes() []byte { + return *b +} + +// WriteByte appends a single byte to the buffer. +func (b *builderBuffer) WriteByte(v byte) { + off := len(*b) + b.growCapacity(1) + *b = (*b)[:off+1] + (*b)[off] = v +} + +// WriteBytes appends a series of identical bytes to the buffer. +func (b *builderBuffer) WriteBytes(v byte, count uint) { + if count == 0 { + return + } + off := uint(len(*b)) + b.growCapacity(count) + *b = (*b)[:off+count] + for i := uint(0); i < count; i++ { + (*b)[off+i] = v + } +} + +// Write appends a series of bytes to the buffer. +func (b *builderBuffer) Write(v []byte) { + l := uint(len(v)) + if l > 0 { + off := uint(len(*b)) + b.growCapacity(l) + *b = (*b)[:off+l] + copy((*b)[off:], v) + } +} + +// ReserveSpace ensures that at least n bytes can be added to the buffer without allocating new memory. +func (b *builderBuffer) ReserveSpace(n uint) { + if n > 0 { + b.growCapacity(n) + } +} + +// Shrink reduces the length of the buffer by n elements (removing the last elements). +func (b *builderBuffer) Shrink(n uint) { + if n > 0 { + newLen := uint(len(*b)) - n + if newLen < 0 { + newLen = 0 + } + *b = (*b)[:newLen] + } +} + +// Grow adds n elements to the buffer, returning a slice where the added elements start. +func (b *builderBuffer) Grow(n uint) []byte { + l := uint(len(*b)) + if n > 0 { + b.growCapacity(n) + *b = (*b)[:l+n] + } + return (*b)[l:] +} + +// growCapacity ensures that there is enough capacity in the buffer to add n elements. +func (b *builderBuffer) growCapacity(n uint) { + _b := *b + curLen := uint(len(_b)) + curCap := uint(cap(_b)) + newCap := curLen + n + if newCap <= curCap { + // No need to do anything + return + } + // Increase the capacity + extra := newCap // Grow a bit more to avoid copying all the time + if extra < minGrowDelta { + extra = minGrowDelta + } else if extra > maxGrowDelta { + extra = maxGrowDelta + } + newBuffer := make(builderBuffer, curLen, newCap+extra) + copy(newBuffer, _b) + *b = newBuffer +} diff --git a/vendor/github.com/arangodb/go-velocypack/builder_index_vector.go b/vendor/github.com/arangodb/go-velocypack/builder_index_vector.go new file mode 100644 index 00000000000..770d490ca3c --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/builder_index_vector.go @@ -0,0 +1,57 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +const ( + minIndexVectorGrowDelta = 32 + maxIndexVectorGrowDelta = 1024 +) + +// indexVector is a list of index of positions. +type indexVector []ValueLength + +// Add an index position to the end of the list. +func (iv *indexVector) Add(v ValueLength) { + *iv = append(*iv, v) +} + +// RemoveLast removes the last index position from the end of the list. +func (iv *indexVector) RemoveLast() { + l := len(*iv) + if l > 0 { + *iv = (*iv)[:l-1] + } +} + +// Clear removes all entries +func (iv *indexVector) Clear() { + if len(*iv) > 0 { + *iv = (*iv)[0:0] + } +} + +// IsEmpty returns true if there are no values on the vector. +func (iv indexVector) IsEmpty() bool { + l := len(iv) + return l == 0 +} diff --git a/vendor/github.com/arangodb/go-velocypack/builder_sort_entry.go b/vendor/github.com/arangodb/go-velocypack/builder_sort_entry.go new file mode 100644 index 00000000000..fbf19454e4f --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/builder_sort_entry.go @@ -0,0 +1,82 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "sort" +) + +type sortEntry struct { + Offset ValueLength + Name []byte +} + +type sortEntries []sortEntry + +// Len is the number of elements in the collection. +func (l sortEntries) Len() int { return len(l) } + +// Less reports whether the element with +// index i should sort before the element with index j. +func (l sortEntries) Less(i, j int) bool { return bytes.Compare(l[i].Name, l[j].Name) < 0 } + +// Swap swaps the elements with indexes i and j. +func (l sortEntries) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// partition picks the last element as a pivot and reorders the array so that +// all elements with values less than the pivot come before the pivot and all +// elements with values greater than the pivot come after it. +func partition(s sortEntries) int { + hi := len(s) - 1 + pivot := s[hi] + i := 0 + for j := 0; j < hi; j++ { + r := bytes.Compare(s[j].Name, pivot.Name) + if r <= 0 { + s[i], s[j] = s[j], s[i] + i++ + } + } + s[i], s[hi] = s[hi], s[i] + return i +} + +// Sort sorts the slice in ascending order. +func (l sortEntries) qSort() { + if len(l) > 1 { + p := partition(l) + l[:p].qSort() + l[p+1:].qSort() + } +} + +// Sort sorts the slice in ascending order. +func (l sortEntries) Sort() { + x := len(l) + if x > 16 { + sort.Sort(l) + } else if len(l) > 1 { + l.qSort() + } +} diff --git a/vendor/github.com/arangodb/go-velocypack/builder_stack.go b/vendor/github.com/arangodb/go-velocypack/builder_stack.go new file mode 100644 index 00000000000..2bdf9e6cbfb --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/builder_stack.go @@ -0,0 +1,73 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// builderStack is a stack of positions. +type builderStack struct { + stack []ValueLength + bootstrap [4]ValueLength +} + +// Push the given value on top of the stack +func (s *builderStack) Push(v ValueLength) { + if s.stack == nil { + s.stack = s.bootstrap[0:1] + s.stack[0] = v + } else { + s.stack = append(s.stack, v) + } +} + +// Pop removes the top of the stack. +func (s *builderStack) Pop() { + l := len(s.stack) + if l > 0 { + s.stack = s.stack[:l-1] + } +} + +func (s *builderStack) Clear() { + s.stack = nil +} + +// Tos returns the value at the top of the stack. +// Returns , +func (s builderStack) Tos() (ValueLength, int) { + // _s := *s + l := len(s.stack) + if l > 0 { + return (s.stack)[l-1], l + } + return 0, 0 +} + +// IsEmpty returns true if there are no values on the stack. +func (s builderStack) IsEmpty() bool { + l := len(s.stack) + return l == 0 +} + +// Len returns the number of elements of the stack. +func (s builderStack) Len() int { + return len(s.stack) +} diff --git a/vendor/github.com/arangodb/go-velocypack/decoder.go b/vendor/github.com/arangodb/go-velocypack/decoder.go new file mode 100644 index 00000000000..3bc2fcc6ffb --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/decoder.go @@ -0,0 +1,1031 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is heavily inspired by the Go sources. +// See https://golang.org/src/encoding/json/ + +package velocypack + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "runtime" + "strconv" +) + +// A Decoder decodes velocypack values into Go structures. +type Decoder struct { + r io.Reader +} + +// Unmarshaler is implemented by types that can convert themselves from Velocypack. +type Unmarshaler interface { + UnmarshalVPack(Slice) error +} + +// NewDecoder creates a new Decoder that reads data from the given reader. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Unmarshal reads v from the given Velocypack encoded data slice. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal VelocyPack into a pointer, Unmarshal first handles the case of +// the VelocyPack being the VelocyPack literal Null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the VelocyPack into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal VelocyPack into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalVPack method, including +// when the input is a VelocyPack Null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a VelocyPack quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal VelocyPack into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal VelocyPack into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for VelocyPack Bool's +// float64 for VelocyPack Double's +// uint64 for VelocyPack UInt's +// int64 for VelocyPack Int's +// string, for VelocyPack String's +// []interface{}, for VelocyPack Array's +// map[string]interface{}, for VelocyPack Object's +// nil for VelocyPack Null. +// []byte for VelocyPack Binary. +// +// To unmarshal a VelocyPack array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty VelocyPack array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a VelocyPack array into a Go array, Unmarshal decodes +// VelocyPack array elements into corresponding Go array elements. +// If the Go array is smaller than the VelocyPack array, +// the additional VelocyPack array elements are discarded. +// If the VelocyPack array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a VelocyPack object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the VelocyPack object into the map. The map's key type must +// either be a string, an integer, or implement encoding.TextUnmarshaler. +// +// If a VelocyPack value is not appropriate for a given target type, +// or if a VelocyPack number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The VelocyPack Null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in VelocyPack to mean +// ``not present,'' unmarshaling a VelocyPack Null into any other Go type has no effect +// on the value and produces no error. +// +func Unmarshal(data Slice, v interface{}) error { + if err := unmarshalSlice(data, v); err != nil { + return WithStack(err) + } + return nil +} + +// Decode reads v from the decoder stream. +func (e *Decoder) Decode(v interface{}) error { + s, err := SliceFromReader(e.r) + if err != nil { + return WithStack(err) + } + if err := unmarshalSlice(s, v); err != nil { + return WithStack(err) + } + return nil +} + +// unmarshalSlice reads v from the given slice. +func unmarshalSlice(data Slice, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d := &decodeState{} + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.unmarshalValue(data, rv) + return d.savedError +} + +var ( + textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + numberType = reflect.TypeOf(json.Number("")) +) + +type decodeState struct { + useNumber bool + errorContext struct { // provides context for type errors + Struct string + Field string + } + savedError error +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(d.addErrorContext(err)) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext.Struct != "" || d.errorContext.Field != "" { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct + err.Field = d.errorContext.Field + return err + } + } + return err +} + +// unmarshalValue unmarshals any slice into given v. +func (d *decodeState) unmarshalValue(data Slice, v reflect.Value) { + if !v.IsValid() { + return + } + + switch data.Type() { + case Array: + d.unmarshalArray(data, v) + case Object: + d.unmarshalObject(data, v) + case Bool, Int, SmallInt, UInt, Double, Binary, BCD, String: + d.unmarshalLiteral(data, v) + } +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, nil, reflect.Value{} + } + if u, ok := v.Interface().(json.Unmarshaler); ok { + return nil, u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, nil, u, reflect.Value{} + } + } + } + v = v.Elem() + } + return nil, nil, nil, v +} + +// unmarshalArray unmarshals an array slice into given v. +func (d *decodeState) unmarshalArray(data Slice, v reflect.Value) { + // Check for unmarshaler. + u, ju, ut, pv := d.indirect(v, false) + if u != nil { + if err := u.UnmarshalVPack(data); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := data.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type()}) + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface(data))) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type()}) + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + it, err := NewArrayIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.unmarshalValue(value, v.Index(i)) + } + i++ + if err := it.Next(); err != nil { + d.error(err) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +// unmarshalObject unmarshals an object slice into given v. +func (d *decodeState) unmarshalObject(data Slice, v reflect.Value) { + // Check for unmarshaler. + u, ju, ut, pv := d.indirect(v, false) + if u != nil { + if err := u.UnmarshalVPack(data); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := data.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface(data))) + return + } + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + t := v.Type() + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + + var mapElem reflect.Value + + it, err := NewObjectIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + key, err := it.Key(true) + if err != nil { + d.error(err) + } + keyUTF8, err := key.GetStringUTF8() + if err != nil { + d.error(err) + } + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, keyUTF8) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + d.errorContext.Field = f.name + d.errorContext.Struct = v.Type().Name() + } + } + + if destring { + // Value should be a string that we'll decode as JSON + valueUTF8, err := value.GetStringUTF8() + if err != nil { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, expected string, got %s in %v (%v)", value.Type(), subv.Type(), err)) + } + v, err := ParseJSONFromUTF8(valueUTF8) + if err != nil { + d.saveError(err) + } else { + d.unmarshalValue(v, subv) + } + } else { + d.unmarshalValue(value, subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(keyUTF8).Convert(kt) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(key, kv, true) + kv = kv.Elem() + default: + keyStr := string(keyUTF8) + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + keyStr, Type: kt}) + return + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + keyStr, Type: kt}) + return + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + v.SetMapIndex(kv, subv) + } + + d.errorContext.Struct = "" + d.errorContext.Field = "" + + if err := it.Next(); err != nil { + d.error(err) + } + } +} + +// unmarshalLiteral unmarshals a literal slice into given v. +func (d *decodeState) unmarshalLiteral(data Slice, v reflect.Value) { + d.literalStore(data, v, false) +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface(data Slice) interface{} { + switch data.Type() { + case Array: + return d.arrayInterface(data) + case Object: + return d.objectInterface(data) + default: + return d.literalInterface(data) + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface(data Slice) []interface{} { + l, err := data.Length() + if err != nil { + d.error(err) + } + v := make([]interface{}, 0, l) + it, err := NewArrayIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + value, err := it.Value() + if err != nil { + d.error(err) + } + + v = append(v, d.valueInterface(value)) + + // Move to next field + if err := it.Next(); err != nil { + d.error(err) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface(data Slice) map[string]interface{} { + m := make(map[string]interface{}) + it, err := NewObjectIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + key, err := it.Key(true) + if err != nil { + d.error(err) + } + keyStr, err := key.GetString() + if err != nil { + d.error(err) + } + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Read value. + m[keyStr] = d.valueInterface(value) + + // Move to next field + if err := it.Next(); err != nil { + d.error(err) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface(data Slice) interface{} { + switch data.Type() { + case Null: + return nil + + case Bool: + v, err := data.GetBool() + if err != nil { + d.error(err) + } + return v + + case String: + v, err := data.GetString() + if err != nil { + d.error(err) + } + return v + + case Double: + v, err := data.GetDouble() + if err != nil { + d.error(err) + } + return v + + case Int, SmallInt: + v, err := data.GetInt() + if err != nil { + d.error(err) + } + intV := int(v) + if int64(intV) == v { + // Value fits in int + return intV + } + return v + + case UInt: + v, err := data.GetUInt() + if err != nil { + d.error(err) + } + return v + + case Binary: + v, err := data.GetBinary() + if err != nil { + d.error(err) + } + return v + + default: // ?? + d.error(fmt.Errorf("unknown literal type: %s", data.Type())) + return nil + } +} + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item Slice, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal empty slice into %v", v.Type())) + return + } + isNull := item.IsNull() // null + u, ju, ut, pv := d.indirect(v, isNull) + if u != nil { + if err := u.UnmarshalVPack(item); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := item.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + if !item.IsString() { + //if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal Slice of type %s into %v", item.Type(), v.Type())) + } else { + val := item.Type().String() + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type()}) + } + return + } + s, err := item.GetStringUTF8() + if err != nil { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal slice of type %s into %v", item.Type(), v.Type())) + } else { + d.error(InternalError) // Out of sync + } + } + if err := ut.UnmarshalText(s); err != nil { + d.error(err) + } + return + } + + v = pv + + switch item.Type() { + case Null: // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted /*&& string(item) != "null"*/ { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case Bool: // true, false + value, err := item.GetBool() + if err != nil { + d.error(err) + } + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted /*&& string(item) != "true" && string(item) != "false"*/ { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type()}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type()}) + } + } + + case String: // string + s, err := item.GetString() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + break + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + } + } + + case Double: + value, err := item.GetDouble() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(value) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := uint64(value) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := value + v.SetFloat(n) + } + + case Int, SmallInt: + value, err := item.GetInt() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + var n interface{} + intValue := int(value) + if int64(intValue) == value { + // When the value fits in an int, use int type. + n, err = d.convertNumber(intValue) + } else { + n, err = d.convertNumber(value) + } + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := value + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := uint64(value) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := float64(value) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetFloat(n) + } + + case UInt: + value, err := item.GetUInt() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(value) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := value + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := float64(value) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetFloat(n) + } + + case Binary: + value, err := item.GetBinary() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "binary", Type: v.Type()}) + break + } + v.SetBytes(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "binary", Type: v.Type()}) + } + } + + default: // number + d.error(fmt.Errorf("Unknown type %s", item.Type())) + } +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s interface{}) (interface{}, error) { + if d.useNumber { + return json.Number(fmt.Sprintf("%v", s)), nil + } + return s, nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/doc.go b/vendor/github.com/arangodb/go-velocypack/doc.go new file mode 100644 index 00000000000..2f7596f98d9 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/doc.go @@ -0,0 +1,26 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Velocypack implementation for Go. +*/ +package velocypack diff --git a/vendor/github.com/arangodb/go-velocypack/dumper.go b/vendor/github.com/arangodb/go-velocypack/dumper.go new file mode 100644 index 00000000000..51bd1c36d51 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/dumper.go @@ -0,0 +1,381 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "io" + "strconv" +) + +type DumperOptions struct { + // EscapeUnicode turns on escapping multi-byte Unicode characters when dumping them to JSON (creates \uxxxx sequences). + EscapeUnicode bool + // EscapeForwardSlashes turns on escapping forward slashes when serializing VPack values into JSON. + EscapeForwardSlashes bool + UnsupportedTypeBehavior UnsupportedTypeBehavior +} + +type UnsupportedTypeBehavior int + +const ( + NullifyUnsupportedType UnsupportedTypeBehavior = iota + ConvertUnsupportedType + FailOnUnsupportedType +) + +type Dumper struct { + w io.Writer + indentation uint + options DumperOptions +} + +// NewDumper creates a new dumper around the given writer, with an optional options. +func NewDumper(w io.Writer, options *DumperOptions) *Dumper { + d := &Dumper{ + w: w, + } + if options != nil { + d.options = *options + } + return d +} + +func (d *Dumper) Append(s Slice) error { + w := d.w + switch s.Type() { + case Null: + if _, err := w.Write([]byte("null")); err != nil { + return WithStack(err) + } + return nil + case Bool: + if v, err := s.GetBool(); err != nil { + return WithStack(err) + } else if v { + if _, err := w.Write([]byte("true")); err != nil { + return WithStack(err) + } + } else { + if _, err := w.Write([]byte("false")); err != nil { + return WithStack(err) + } + } + return nil + case Double: + if v, err := s.GetDouble(); err != nil { + return WithStack(err) + } else if err := d.appendDouble(v); err != nil { + return WithStack(err) + } + return nil + case Int, SmallInt: + if v, err := s.GetInt(); err != nil { + return WithStack(err) + } else if err := d.appendInt(v); err != nil { + return WithStack(err) + } + return nil + case UInt: + if v, err := s.GetUInt(); err != nil { + return WithStack(err) + } else if err := d.appendUInt(v); err != nil { + return WithStack(err) + } + return nil + case String: + if v, err := s.GetString(); err != nil { + return WithStack(err) + } else if err := d.appendString(v); err != nil { + return WithStack(err) + } + return nil + case Array: + if err := d.appendArray(s); err != nil { + return WithStack(err) + } + return nil + case Object: + if err := d.appendObject(s); err != nil { + return WithStack(err) + } + return nil + default: + switch d.options.UnsupportedTypeBehavior { + case NullifyUnsupportedType: + if _, err := w.Write([]byte("null")); err != nil { + return WithStack(err) + } + case ConvertUnsupportedType: + msg := fmt.Sprintf("(non-representable type %s)", s.Type().String()) + if err := d.appendString(msg); err != nil { + return WithStack(err) + } + default: + return WithStack(NoJSONEquivalentError) + } + } + + return nil +} + +var ( + doubleQuoteSeq = []byte{'"'} + escapeTable = [256]byte{ + // 0 1 2 3 4 5 6 7 8 9 A B C D E + // F + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', + 'u', + 'u', // 00 + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', + 'u', + 'u', // 10 + 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + '/', // 20 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + 0, // 30~4F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + '\\', 0, 0, 0, // 50 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + 0, // 60~FF + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0} +) + +func (d *Dumper) appendUInt(v uint64) error { + s := strconv.FormatUint(v, 10) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendInt(v int64) error { + s := strconv.FormatInt(v, 10) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func formatDouble(v float64) string { + return strconv.FormatFloat(v, 'g', -1, 64) +} + +func (d *Dumper) appendDouble(v float64) error { + s := formatDouble(v) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendString(v string) error { + p := []byte(v) + e := len(p) + buf := make([]byte, 0, 16) + if _, err := d.w.Write(doubleQuoteSeq); err != nil { + return WithStack(err) + } + for i := 0; i < e; i++ { + buf = buf[0:0] + c := p[i] + if (c & 0x80) == 0 { + // check for control characters + esc := escapeTable[c] + + if esc != 0 { + if c != '/' || d.options.EscapeForwardSlashes { + // escape forward slashes only when requested + buf = append(buf, '\\') + } + buf = append(buf, esc) + + if esc == 'u' { + i1 := ((uint(c)) & 0xf0) >> 4 + i2 := ((uint(c)) & 0x0f) + + buf = append(buf, '0', '0', hexChar(i1), hexChar(i2)) + } + } else { + buf = append(buf, c) + } + } else if (c & 0xe0) == 0xc0 { + // two-byte sequence + if i+1 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := ((uint(p[i]) & 0x1f) << 6) | (uint(p[i+1]) & 0x3f) + buf = dumpUnicodeCharacter(buf, value) + } else { + buf = append(buf, p[i:i+2]...) + } + i++ + } else if (c & 0xf0) == 0xe0 { + // three-byte sequence + if i+2 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := (((uint(p[i]) & 0x0f) << 12) | ((uint(p[i+1]) & 0x3f) << 6) | (uint(p[i + +2]) & 0x3f)) + buf = dumpUnicodeCharacter(buf, value) + } else { + buf = append(buf, p[i:i+3]...) + } + i += 2 + } else if (c & 0xf8) == 0xf0 { + // four-byte sequence + if i+3 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := (((uint(p[i]) & 0x0f) << 18) | ((uint(p[i+1]) & 0x3f) << 12) | ((uint(p[i+2]) & 0x3f) << 6) | (uint(p[i+3]) & 0x3f)) + // construct the surrogate pairs + value -= 0x10000 + high := (((value & 0xffc00) >> 10) + 0xd800) + buf = dumpUnicodeCharacter(buf, high) + low := (value & 0x3ff) + 0xdc00 + buf = dumpUnicodeCharacter(buf, low) + } else { + buf = append(buf, p[i:i+4]...) + } + i += 3 + } + if _, err := d.w.Write(buf); err != nil { + return WithStack(err) + } + } + if _, err := d.w.Write(doubleQuoteSeq); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendArray(v Slice) error { + w := d.w + it, err := NewArrayIterator(v) + if err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{'['}); err != nil { + return WithStack(err) + } + for it.IsValid() { + if !it.IsFirst() { + if _, err := w.Write([]byte{','}); err != nil { + return WithStack(err) + } + } + if value, err := it.Value(); err != nil { + return WithStack(err) + } else if err := d.Append(value); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + if _, err := w.Write([]byte{']'}); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendObject(v Slice) error { + w := d.w + it, err := NewObjectIterator(v) + if err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{'{'}); err != nil { + return WithStack(err) + } + for it.IsValid() { + if !it.IsFirst() { + if _, err := w.Write([]byte{','}); err != nil { + return WithStack(err) + } + } + if key, err := it.Key(true); err != nil { + return WithStack(err) + } else if err := d.Append(key); err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{':'}); err != nil { + return WithStack(err) + } + if value, err := it.Value(); err != nil { + return WithStack(err) + } else if err := d.Append(value); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + if _, err := w.Write([]byte{'}'}); err != nil { + return WithStack(err) + } + return nil +} + +func dumpUnicodeCharacter(dst []byte, value uint) []byte { + dst = append(dst, '\\', 'u') + + mask := uint(0xf000) + shift := uint(12) + for i := 3; i >= 0; i-- { + p := (value & mask) >> shift + dst = append(dst, hexChar(p)) + if i > 0 { + mask = mask >> 4 + shift -= 4 + } + } + return dst +} + +func hexChar(v uint) byte { + v = v & uint(0x0f) + if v < 10 { + return byte('0' + v) + } + return byte('A' + v - 10) +} diff --git a/vendor/github.com/arangodb/go-velocypack/encoder.go b/vendor/github.com/arangodb/go-velocypack/encoder.go new file mode 100644 index 00000000000..1a4de737a10 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/encoder.go @@ -0,0 +1,670 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is heavily inspired by the Go sources. +// See https://golang.org/src/encoding/json/ + +package velocypack + +import ( + "bytes" + "encoding" + "encoding/json" + "io" + "reflect" + "runtime" + "sort" + "strconv" + "sync" +) + +// An Encoder encodes Go structures into velocypack values written to an output stream. +type Encoder struct { + b Builder + w io.Writer +} + +// Marshaler is implemented by types that can convert themselves into Velocypack. +type Marshaler interface { + MarshalVPack() (Slice, error) +} + +// NewEncoder creates a new Encoder that writes output to the given writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +// Marshal writes the Velocypack encoding of v to a buffer and returns that buffer. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalVPack method +// to produce Velocypack. +// If an encountered value implements the json.Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON and converts the resulting JSON to VelocyPack. +// If no MarshalVPack or MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a Velocypack string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalVPack. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as Velocypack booleans. +// +// Floating point, integer, and Number values encode as Velocypack Int's, UInt's and Double's. +// +// String values encode as Velocypack strings. +// +// Array and slice values encode as Velocypack arrays, except that +// []byte encodes as Velocypack Binary data, and a nil slice +// encodes as the Null Velocypack value. +// +// Struct values encode as Velocypack objects. +// The encoding follows the same rules as specified for json.Marshal. +// This means that all `json` tags are fully supported. +// +// Map values encode as Velocypack objects. +// The encoding follows the same rules as specified for json.Marshal. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the Null Velocypack value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the Null Velocypack value. +// +// Channel, complex, and function values cannot be encoded in Velocypack. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// Velocypack cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) (result Slice, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if s, ok := r.(string); ok { + panic(s) + } + err = r.(error) + } + }() + var b Builder + reflectValue(&b, reflect.ValueOf(v), encoderOptions{}) + return b.Slice() +} + +// Encode writes the Velocypack encoding of v to the stream. +func (e *Encoder) Encode(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if s, ok := r.(string); ok { + panic(s) + } + err = r.(error) + } + }() + e.b.Clear() + reflectValue(&e.b, reflect.ValueOf(v), encoderOptions{}) + if _, err := e.b.WriteTo(e.w); err != nil { + return WithStack(err) + } + return nil +} + +// Builder returns a reference to the builder used in the given encoder. +func (e *Encoder) Builder() *Builder { + return &e.b +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func reflectValue(b *Builder, v reflect.Value, options encoderOptions) { + valueEncoder(v)(b, v, options) +} + +type encoderOptions struct { + quoted bool +} + +type encoderFunc func(b *Builder, v reflect.Value, options encoderOptions) + +var encoderCache struct { + sync.RWMutex + m map[reflect.Type]encoderFunc +} + +func valueEncoder(v reflect.Value) encoderFunc { + if !v.IsValid() { + return invalidValueEncoder + } + return typeEncoder(v.Type()) +} + +var ( + marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() + textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() + nullValue = NewNullValue() +) + +func typeEncoder(t reflect.Type) encoderFunc { + encoderCache.RLock() + f := encoderCache.m[t] + encoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + encoderCache.Lock() + if encoderCache.m == nil { + encoderCache.m = make(map[reflect.Type]encoderFunc) + } + var wg sync.WaitGroup + wg.Add(1) + encoderCache.m[t] = func(b *Builder, v reflect.Value, options encoderOptions) { + wg.Wait() + f(b, v, options) + } + encoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeEncoder(t, true) + wg.Done() + encoderCache.Lock() + encoderCache.m[t] = f + encoderCache.Unlock() + return f +} + +// newTypeEncoder constructs an encoderFunc for a type. +// The returned encoder only checks CanAddr when allowAddr is true. +func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { + if t.Implements(marshalerType) { + return marshalerEncoder + } + if t.Implements(jsonMarshalerType) { + return jsonMarshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(marshalerType) { + return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) + } + if reflect.PtrTo(t).Implements(jsonMarshalerType) { + return newCondAddrEncoder(addrJSONMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + if t.Implements(textMarshalerType) { + return textMarshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(textMarshalerType) { + return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + switch t.Kind() { + case reflect.Bool: + return boolEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintEncoder + case reflect.Float32, reflect.Float64: + return doubleEncoder + case reflect.String: + return stringEncoder + case reflect.Interface: + return interfaceEncoder + case reflect.Struct: + return newStructEncoder(t) + case reflect.Map: + return newMapEncoder(t) + case reflect.Slice: + return newSliceEncoder(t) + case reflect.Array: + return newArrayEncoder(t) + case reflect.Ptr: + return newPtrEncoder(t) + default: + return unsupportedTypeEncoder + } +} + +func invalidValueEncoder(b *Builder, v reflect.Value, options encoderOptions) { + b.addInternal(nullValue) +} + +func marshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m, ok := v.Interface().(Marshaler) + if !ok { + b.addInternal(nullValue) + return + } + if vpack, err := m.MarshalVPack(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + b.addInternal(NewSliceValue(vpack)) + } +} + +func jsonMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m, ok := v.Interface().(json.Marshaler) + if !ok { + b.addInternal(nullValue) + return + } + if json, err := m.MarshalJSON(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + // Convert JSON to vpack + if slice, err := ParseJSON(bytes.NewReader(json)); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + b.addInternal(NewSliceValue(slice)) + } + } +} + +func addrMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(Marshaler) + if vpack, err := m.MarshalVPack(); err != nil { + panic(&MarshalerError{Type: v.Type(), Err: err}) + } else { + if err = b.AddValue(NewSliceValue(vpack)); err != nil { + panic(&MarshalerError{Type: v.Type(), Err: err}) + } + } +} + +func addrJSONMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(json.Marshaler) + if json, err := m.MarshalJSON(); err != nil { + panic(&MarshalerError{Type: v.Type(), Err: err}) + } else { + if slice, err := ParseJSON(bytes.NewReader(json)); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + // copy VPack into buffer, checking validity. + b.buf.Write(slice) + } + } +} + +func textMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m := v.Interface().(encoding.TextMarshaler) + text, err := m.MarshalText() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + b.addInternal(NewStringValue(string(text))) +} + +func addrTextMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(encoding.TextMarshaler) + text, err := m.MarshalText() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + b.addInternal(NewStringValue(string(text))) +} + +func boolEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatBool(v.Bool()))) + } else { + b.addInternal(NewBoolValue(v.Bool())) + } +} + +func intEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatInt(v.Int(), 10))) + } else { + b.addInternal(NewIntValue(v.Int())) + } +} + +func uintEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatUint(v.Uint(), 10))) + } else { + b.addInternal(NewUIntValue(v.Uint())) + } +} + +func doubleEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(formatDouble(v.Float()))) + } else { + b.addInternal(NewDoubleValue(v.Float())) + } +} + +func stringEncoder(b *Builder, v reflect.Value, options encoderOptions) { + s := v.String() + if options.quoted { + raw, _ := json.Marshal(s) + s = string(raw) + } + b.addInternal(NewStringValue(s)) +} + +func interfaceEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + vElem := v.Elem() + valueEncoder(vElem)(b, vElem, options) +} + +func unsupportedTypeEncoder(b *Builder, v reflect.Value, options encoderOptions) { + panic(&UnsupportedTypeError{v.Type()}) +} + +type structEncoder struct { + fields []field + fieldEncs []encoderFunc +} + +func (se *structEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if err := b.OpenObject(); err != nil { + panic(err) + } + for i, f := range se.fields { + fv := fieldByIndex(v, f.index) + if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { + continue + } + // Key + _, err := b.addInternalKey(f.name) + if err != nil { + panic(err) + } + // Value + options.quoted = f.quoted + se.fieldEncs[i](b, fv, options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newStructEncoder(t reflect.Type) encoderFunc { + fields := cachedTypeFields(t) + se := &structEncoder{ + fields: fields, + fieldEncs: make([]encoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) + } + return se.encode +} + +type mapEncoder struct { + elemEnc encoderFunc +} + +func (e *mapEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + if err := b.OpenObject(); err != nil { + panic(err) + } + + // Extract and sort the keys. + keys := v.MapKeys() + sv := make(reflectWithStringSlice, len(keys)) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } + } + sort.Sort(sv) + + for _, kv := range sv { + // Key + _, err := b.addInternalKey(kv.s) + if err != nil { + panic(err) + } + // Value + e.elemEnc(b, v.MapIndex(kv.v), options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newMapEncoder(t reflect.Type) encoderFunc { + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !t.Key().Implements(textMarshalerType) { + return unsupportedTypeEncoder + } + } + me := &mapEncoder{typeEncoder(t.Elem())} + return me.encode +} + +func encodeByteSlice(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + b.addInternal(NewBinaryValue(v.Bytes())) +} + +// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. +type sliceEncoder struct { + arrayEnc encoderFunc +} + +func (se *sliceEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + se.arrayEnc(b, v, options) +} + +func newSliceEncoder(t reflect.Type) encoderFunc { + // Byte slices get special treatment; arrays don't. + if t.Elem().Kind() == reflect.Uint8 { + p := reflect.PtrTo(t.Elem()) + if !p.Implements(marshalerType) && !p.Implements(jsonMarshalerType) && !p.Implements(textMarshalerType) { + return encodeByteSlice + } + } + enc := &sliceEncoder{newArrayEncoder(t)} + return enc.encode +} + +type arrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *arrayEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if err := b.OpenArray(); err != nil { + panic(err) + } + n := v.Len() + for i := 0; i < n; i++ { + ae.elemEnc(b, v.Index(i), options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newArrayEncoder(t reflect.Type) encoderFunc { + enc := &arrayEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type ptrEncoder struct { + elemEnc encoderFunc +} + +func (pe *ptrEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + pe.elemEnc(b, v.Elem(), options) +} + +func newPtrEncoder(t reflect.Type) encoderFunc { + enc := &ptrEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type condAddrEncoder struct { + canAddrEnc, elseEnc encoderFunc +} + +func (ce *condAddrEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.CanAddr() { + ce.canAddrEnc(b, v, options) + } else { + ce.elseEnc(b, v, options) + } +} + +// newCondAddrEncoder returns an encoder that checks whether its value +// CanAddr and delegates to canAddrEnc if so, else to elseEnc. +func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { + enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return enc.encode +} + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + panic("unexpected map key type") +} + +type reflectWithStringSlice []reflectWithString + +// Len is the number of elements in the collection. +func (l reflectWithStringSlice) Len() int { + return len(l) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (l reflectWithStringSlice) Less(i, j int) bool { + return l[i].s < l[j].s +} + +// Swap swaps the elements with indexes i and j. +func (l reflectWithStringSlice) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/arangodb/go-velocypack/encoder_field.go b/vendor/github.com/arangodb/go-velocypack/encoder_field.go new file mode 100644 index 00000000000..9ebf176de46 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/encoder_field.go @@ -0,0 +1,330 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, i := range index { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + t = t.Field(i).Type + } + return t +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// sort field by name, breaking ties with depth, then +// breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byNameIndexlenTag []field + +func (x byNameIndexlenTag) Len() int { return len(x) } + +func (x byNameIndexlenTag) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byNameIndexlenTag) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + tag := sf.Tag.Get("velocypack") + if len(tag) == 0 { + tag = sf.Tag.Get("json") + } + + if tag == "-" { + continue + } + + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Only strings, floats, integers, and booleans can be quoted. + quoted := false + if opts.Contains("string") { + switch ft.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + quoted = true + } + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: quoted, + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byNameIndexlenTag(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + value atomic.Value // map[reflect.Type][]field + mu sync.Mutex // used only by writers +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + m, _ := fieldCache.value.Load().(map[reflect.Type][]field) + f := m[t] + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.mu.Lock() + m, _ = fieldCache.value.Load().(map[reflect.Type][]field) + newM := make(map[reflect.Type][]field, len(m)+1) + for k, v := range m { + newM[k] = v + } + newM[t] = f + fieldCache.value.Store(newM) + fieldCache.mu.Unlock() + return f +} diff --git a/vendor/github.com/arangodb/go-velocypack/encoder_fold.go b/vendor/github.com/arangodb/go-velocypack/encoder_fold.go new file mode 100644 index 00000000000..a32f5f2e3e6 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/encoder_fold.go @@ -0,0 +1,168 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/github.com/arangodb/go-velocypack/encoder_tags.go b/vendor/github.com/arangodb/go-velocypack/encoder_tags.go new file mode 100644 index 00000000000..0efeb6a6372 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/encoder_tags.go @@ -0,0 +1,89 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "strings" + "unicode" +) + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/arangodb/go-velocypack/error.go b/vendor/github.com/arangodb/go-velocypack/error.go new file mode 100644 index 00000000000..51b6c6e2afc --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/error.go @@ -0,0 +1,231 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "errors" + "reflect" +) + +// InvalidTypeError is returned when a Slice getter is called on a slice of a different type. +type InvalidTypeError struct { + Message string +} + +// Error implements the error interface for InvalidTypeError. +func (e InvalidTypeError) Error() string { + return e.Message +} + +// IsInvalidType returns true if the given error is an InvalidTypeError. +func IsInvalidType(err error) bool { + _, ok := Cause(err).(InvalidTypeError) + return ok +} + +var ( + // NumberOutOfRangeError indicates an out of range error. + NumberOutOfRangeError = errors.New("number out of range") + // IsNumberOutOfRange returns true if the given error is an NumberOutOfRangeError. + IsNumberOutOfRange = isCausedByFunc(NumberOutOfRangeError) + // IndexOutOfBoundsError indicates an index outside of array/object bounds. + IndexOutOfBoundsError = errors.New("index out of range") + // IsIndexOutOfBounds returns true if the given error is an IndexOutOfBoundsError. + IsIndexOutOfBounds = isCausedByFunc(IndexOutOfBoundsError) + // NeedAttributeTranslatorError indicates a lack of object key translator (smallint|uint -> string). + NeedAttributeTranslatorError = errors.New("need attribute translator") + // IsNeedAttributeTranslator returns true if the given error is an NeedAttributeTranslatorError. + IsNeedAttributeTranslator = isCausedByFunc(NeedAttributeTranslatorError) + // InternalError indicates an error that the client cannot prevent. + InternalError = errors.New("internal") + // IsInternal returns true if the given error is an InternalError. + IsInternal = isCausedByFunc(InternalError) + // BuilderNeedOpenArrayError indicates an (invalid) attempt to open an array/object when that is not allowed. + BuilderNeedOpenArrayError = errors.New("builder need open array") + // IsBuilderNeedOpenArray returns true if the given error is an BuilderNeedOpenArrayError. + IsBuilderNeedOpenArray = isCausedByFunc(BuilderNeedOpenArrayError) + // BuilderNeedOpenObjectError indicates an (invalid) attempt to open an array/object when that is not allowed. + BuilderNeedOpenObjectError = errors.New("builder need open object") + // IsBuilderNeedOpenObject returns true if the given error is an BuilderNeedOpenObjectError. + IsBuilderNeedOpenObject = isCausedByFunc(BuilderNeedOpenObjectError) + // BuilderNeedOpenCompoundError indicates an (invalid) attempt to close an array/object that is already closed. + BuilderNeedOpenCompoundError = errors.New("builder need open array or object") + // IsBuilderNeedOpenCompound returns true if the given error is an BuilderNeedOpenCompoundError. + IsBuilderNeedOpenCompound = isCausedByFunc(BuilderNeedOpenCompoundError) + DuplicateAttributeNameError = errors.New("duplicate key name") + // IsDuplicateAttributeName returns true if the given error is an DuplicateAttributeNameError. + IsDuplicateAttributeName = isCausedByFunc(DuplicateAttributeNameError) + // BuilderNotClosedError is returned when a call is made to Builder.Bytes without being closed. + BuilderNotClosedError = errors.New("builder not closed") + // IsBuilderNotClosed returns true if the given error is an BuilderNotClosedError. + IsBuilderNotClosed = isCausedByFunc(BuilderNotClosedError) + // BuilderKeyAlreadyWrittenError is returned when a call is made to Builder.Bytes without being closed. + BuilderKeyAlreadyWrittenError = errors.New("builder key already written") + // IsBuilderKeyAlreadyWritten returns true if the given error is an BuilderKeyAlreadyWrittenError. + IsBuilderKeyAlreadyWritten = isCausedByFunc(BuilderKeyAlreadyWrittenError) + // BuilderKeyMustBeStringError is returned when a key is not of type string. + BuilderKeyMustBeStringError = errors.New("builder key must be string") + // IsBuilderKeyMustBeString returns true if the given error is an BuilderKeyMustBeStringError. + IsBuilderKeyMustBeString = isCausedByFunc(BuilderKeyMustBeStringError) + // BuilderNeedSubValueError is returned when a RemoveLast is called without any value in an object/array. + BuilderNeedSubValueError = errors.New("builder need sub value") + // IsBuilderNeedSubValue returns true if the given error is an BuilderNeedSubValueError. + IsBuilderNeedSubValue = isCausedByFunc(BuilderNeedSubValueError) + // InvalidUtf8SequenceError indicates an invalid UTF8 (string) sequence. + InvalidUtf8SequenceError = errors.New("invalid utf8 sequence") + // IsInvalidUtf8Sequence returns true if the given error is an InvalidUtf8SequenceError. + IsInvalidUtf8Sequence = isCausedByFunc(InvalidUtf8SequenceError) + // NoJSONEquivalentError is returned when a Velocypack type cannot be converted to JSON. + NoJSONEquivalentError = errors.New("no JSON equivalent") + // IsNoJSONEquivalent returns true if the given error is an NoJSONEquivalentError. + IsNoJSONEquivalent = isCausedByFunc(NoJSONEquivalentError) +) + +// isCausedByFunc creates an error test function. +func isCausedByFunc(cause error) func(err error) bool { + return func(err error) bool { + return Cause(err) == cause + } +} + +// BuilderUnexpectedTypeError is returned when a Builder function received an invalid type. +type BuilderUnexpectedTypeError struct { + Message string +} + +// Error implements the error interface for BuilderUnexpectedTypeError. +func (e BuilderUnexpectedTypeError) Error() string { + return e.Message +} + +// IsBuilderUnexpectedType returns true if the given error is an BuilderUnexpectedTypeError. +func IsBuilderUnexpectedType(err error) bool { + _, ok := Cause(err).(BuilderUnexpectedTypeError) + return ok +} + +// MarshalerError is returned when a custom VPack Marshaler returns an error. +type MarshalerError struct { + Type reflect.Type + Err error +} + +// Error implements the error interface for MarshalerError. +func (e MarshalerError) Error() string { + return "error calling MarshalVPack for type " + e.Type.String() + ": " + e.Err.Error() +} + +// IsMarshaler returns true if the given error is an MarshalerError. +func IsMarshaler(err error) bool { + _, ok := Cause(err).(MarshalerError) + return ok +} + +// UnsupportedTypeError is returned when a type is marshaled that cannot be marshaled. +type UnsupportedTypeError struct { + Type reflect.Type +} + +// Error implements the error interface for UnsupportedTypeError. +func (e UnsupportedTypeError) Error() string { + return "unsupported type " + e.Type.String() +} + +// IsUnsupportedType returns true if the given error is an UnsupportedTypeError. +func IsUnsupportedType(err error) bool { + _, ok := Cause(err).(UnsupportedTypeError) + return ok +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +// IsInvalidUnmarshal returns true if the given error is an InvalidUnmarshalError. +func IsInvalidUnmarshal(err error) bool { + _, ok := Cause(err).(*InvalidUnmarshalError) + return ok +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Struct string // name of the struct type containing the field + Field string // name of the field holding the Go value +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// IsUnmarshalType returns true if the given error is an UnmarshalTypeError. +func IsUnmarshalType(err error) bool { + _, ok := Cause(err).(*UnmarshalTypeError) + return ok +} + +// An ParseError is returned when JSON cannot be parsed correctly. +type ParseError struct { + msg string + Offset int64 +} + +func (e *ParseError) Error() string { + return e.msg +} + +// IsParse returns true if the given error is a ParseError. +func IsParse(err error) bool { + _, ok := Cause(err).(*ParseError) + return ok +} + +var ( + // WithStack is called on every return of an error to add stacktrace information to the error. + // When setting this function, also set the Cause function. + // The interface of this function is compatible with functions in github.com/pkg/errors. + // WithStack(nil) must return nil. + WithStack = func(err error) error { return err } + // Cause is used to get the root cause of the given error. + // The interface of this function is compatible with functions in github.com/pkg/errors. + // Cause(nil) must return nil. + Cause = func(err error) error { return err } +) diff --git a/vendor/github.com/arangodb/go-velocypack/object_iterator.go b/vendor/github.com/arangodb/go-velocypack/object_iterator.go new file mode 100644 index 00000000000..187e6379f65 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/object_iterator.go @@ -0,0 +1,114 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ObjectIterator struct { + s Slice + position ValueLength + size ValueLength + current Slice +} + +// NewObjectIterator initializes an iterator at position 0 of the given object slice. +func NewObjectIterator(s Slice, allowRandomIteration ...bool) (*ObjectIterator, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expected Object slice"} + } + size, err := s.Length() + if err != nil { + return nil, WithStack(err) + } + i := &ObjectIterator{ + s: s, + position: 0, + size: size, + } + if size > 0 { + if h := s.head(); h == 0x14 { + i.current, err = s.KeyAt(0, false) + } else if optionalBool(allowRandomIteration, false) { + i.current = s[s.findDataOffset(h):] + } + } + return i, nil +} + +// IsValid returns true if the given position of the iterator is valid. +func (i *ObjectIterator) IsValid() bool { + return i.position < i.size +} + +// IsFirst returns true if the current position is 0. +func (i *ObjectIterator) IsFirst() bool { + return i.position == 0 +} + +// Key returns the key of the current position of the iterator +func (i *ObjectIterator) Key(translate bool) (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + if translate { + key, err := current.makeKey() + return key, WithStack(err) + } + return current, nil + } + key, err := i.s.getNthKey(i.position, translate) + return key, WithStack(err) +} + +// Value returns the value of the current position of the iterator +func (i *ObjectIterator) Value() (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + value, err := current.Next() + return value, WithStack(err) + } + value, err := i.s.getNthValue(i.position) + return value, WithStack(err) +} + +// Next moves to the next position. +func (i *ObjectIterator) Next() error { + i.position++ + if i.position < i.size && i.current != nil { + var err error + // skip over key + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + // skip over value + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + } else { + i.current = nil + } + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/parser.go b/vendor/github.com/arangodb/go-velocypack/parser.go new file mode 100644 index 00000000000..55b45414419 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/parser.go @@ -0,0 +1,151 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "strings" +) + +// ParserOptions controls how the Parser builds Velocypack. +type ParserOptions struct { + // If set, all Array's will be unindexed. + BuildUnindexedArrays bool + // If set, all Objects's will be unindexed. + BuildUnindexedObjects bool +} + +// Parser is used to build VPack structures from JSON. +type Parser struct { + options ParserOptions + decoder *json.Decoder + builder *Builder +} + +// ParseJSON parses JSON from the given reader and returns the +// VPack equivalent. +func ParseJSON(r io.Reader, options ...ParserOptions) (Slice, error) { + builder := &Builder{} + p := NewParser(r, builder, options...) + if err := p.Parse(); err != nil { + return nil, WithStack(err) + } + slice, err := builder.Slice() + if err != nil { + return nil, WithStack(err) + } + return slice, nil +} + +// ParseJSONFromString parses the given JSON string and returns the +// VPack equivalent. +func ParseJSONFromString(json string, options ...ParserOptions) (Slice, error) { + return ParseJSON(strings.NewReader(json), options...) +} + +// ParseJSONFromUTF8 parses the given JSON string and returns the +// VPack equivalent. +func ParseJSONFromUTF8(json []byte, options ...ParserOptions) (Slice, error) { + return ParseJSON(bytes.NewReader(json), options...) +} + +// NewParser initializes a new Parser with JSON from the given reader and +// it will store the parsers output in the given builder. +func NewParser(r io.Reader, builder *Builder, options ...ParserOptions) *Parser { + d := json.NewDecoder(r) + d.UseNumber() + p := &Parser{ + decoder: d, + builder: builder, + } + if len(options) > 0 { + p.options = options[0] + } + return p +} + +// Parse JSON from the parsers reader and build VPack structures in the +// parsers builder. +func (p *Parser) Parse() error { + for { + t, err := p.decoder.Token() + if err == io.EOF { + break + } else if serr, ok := err.(*json.SyntaxError); ok { + return WithStack(&ParseError{msg: err.Error(), Offset: serr.Offset}) + } else if err != nil { + return WithStack(&ParseError{msg: err.Error()}) + } + switch x := t.(type) { + case nil: + if err := p.builder.AddValue(NewNullValue()); err != nil { + return WithStack(err) + } + case bool: + if err := p.builder.AddValue(NewBoolValue(x)); err != nil { + return WithStack(err) + } + case json.Number: + if xu, err := strconv.ParseUint(string(x), 10, 64); err == nil { + if err := p.builder.AddValue(NewUIntValue(xu)); err != nil { + return WithStack(err) + } + } else if xi, err := x.Int64(); err == nil { + if err := p.builder.AddValue(NewIntValue(xi)); err != nil { + return WithStack(err) + } + } else { + if xf, err := x.Float64(); err == nil { + if err := p.builder.AddValue(NewDoubleValue(xf)); err != nil { + return WithStack(err) + } + } else { + return WithStack(&ParseError{msg: err.Error()}) + } + } + case string: + if err := p.builder.AddValue(NewStringValue(x)); err != nil { + return WithStack(err) + } + case json.Delim: + switch x { + case '[': + if err := p.builder.OpenArray(p.options.BuildUnindexedArrays); err != nil { + return WithStack(err) + } + case '{': + if err := p.builder.OpenObject(p.options.BuildUnindexedObjects); err != nil { + return WithStack(err) + } + case ']', '}': + if err := p.builder.Close(); err != nil { + return WithStack(err) + } + } + } + } + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/raw_slice.go b/vendor/github.com/arangodb/go-velocypack/raw_slice.go new file mode 100644 index 00000000000..6f7b37c4ef8 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/raw_slice.go @@ -0,0 +1,50 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "errors" + +// RawSlice is a raw encoded Velocypack value. +// It implements Marshaler and Unmarshaler and can +// be used to delay Velocypack decoding or precompute a Velocypack encoding. +type RawSlice []byte + +// MarshalVPack returns m as the Velocypack encoding of m. +func (m RawSlice) MarshalVPack() (Slice, error) { + if m == nil { + return NullSlice(), nil + } + return Slice(m), nil +} + +// UnmarshalVPack sets *m to a copy of data. +func (m *RawSlice) UnmarshalVPack(data Slice) error { + if m == nil { + return errors.New("velocypack.RawSlice: UnmarshalVPack on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} + +var _ Marshaler = (*RawSlice)(nil) +var _ Unmarshaler = (*RawSlice)(nil) diff --git a/vendor/github.com/arangodb/go-velocypack/slice.go b/vendor/github.com/arangodb/go-velocypack/slice.go new file mode 100644 index 00000000000..0814bb043c7 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/slice.go @@ -0,0 +1,927 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "math" + "time" +) + +// Slice provides read only access to a VPack value +type Slice []byte + +// SliceFromHex creates a Slice by decoding the given hex string into a Slice. +// If decoding fails, nil is returned. +func SliceFromHex(v string) Slice { + if bytes, err := hex.DecodeString(v); err != nil { + return nil + } else { + return Slice(bytes) + } +} + +// String returns a HEX representation of the slice. +func (s Slice) String() string { + return hex.EncodeToString(s) +} + +// JSONString converts the contents of the slice to JSON. +func (s Slice) JSONString(options ...DumperOptions) (string, error) { + buf := &bytes.Buffer{} + var opt *DumperOptions + if len(options) > 0 { + opt = &options[0] + } + d := NewDumper(buf, opt) + if err := d.Append(s); err != nil { + return "", WithStack(err) + } + return buf.String(), nil +} + +// head returns the first element of the slice or 0 if the slice is empty. +func (s Slice) head() byte { + if len(s) > 0 { + return s[0] + } + return 0 +} + +// ByteSize returns the total byte size for the slice, including the head byte +func (s Slice) ByteSize() (ValueLength, error) { + h := s.head() + // check if the type has a fixed length first + l := fixedTypeLengths[h] + if l != 0 { + // return fixed length + return ValueLength(l), nil + } + + // types with dynamic lengths need special treatment: + switch s.Type() { + case Array, Object: + if h == 0x13 || h == 0x14 { + // compact Array or Object + return readVariableValueLength(s, 1, false), nil + } + + vpackAssert(h > 0x00 && h <= 0x0e) + return ValueLength(readIntegerNonEmpty(s[1:], widthMap[h])), nil + + case String: + vpackAssert(h == 0xbf) + // long UTF-8 String + return ValueLength(1 + 8 + readIntegerFixed(s[1:], 8)), nil + + case Binary: + vpackAssert(h >= 0xc0 && h <= 0xc7) + return ValueLength(1 + ValueLength(h) - 0xbf + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xbf))), nil + + case BCD: + if h <= 0xcf { + // positive BCD + vpackAssert(h >= 0xc8 && h < 0xcf) + return ValueLength(1 + ValueLength(h) - 0xc7 + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xc7))), nil + } + + // negative BCD + vpackAssert(h >= 0xd0 && h < 0xd7) + return ValueLength(1 + ValueLength(h) - 0xcf + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xcf))), nil + + case Custom: + vpackAssert(h >= 0xf4) + switch h { + case 0xf4, 0xf5, 0xf6: + return ValueLength(2 + readIntegerFixed(s[1:], 1)), nil + case 0xf7, 0xf8, 0xf9: + return ValueLength(3 + readIntegerFixed(s[1:], 2)), nil + case 0xfa, 0xfb, 0xfc: + return ValueLength(5 + readIntegerFixed(s[1:], 4)), nil + case 0xfd, 0xfe, 0xff: + return ValueLength(9 + readIntegerFixed(s[1:], 8)), nil + } + } + + return 0, WithStack(InternalError) +} + +// Next returns the Slice that directly follows the given slice. +// Same as s[s.ByteSize:] +func (s Slice) Next() (Slice, error) { + size, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(s[size:]), nil +} + +// GetBool returns a boolean value from the slice. +// Returns an error if slice is not of type Bool. +func (s Slice) GetBool() (bool, error) { + if err := s.AssertType(Bool); err != nil { + return false, WithStack(err) + } + return s.IsTrue(), nil +} + +// GetDouble returns a Double value from the slice. +// Returns an error if slice is not of type Double. +func (s Slice) GetDouble() (float64, error) { + if err := s.AssertType(Double); err != nil { + return 0.0, WithStack(err) + } + bits := binary.LittleEndian.Uint64(s[1:]) + return math.Float64frombits(bits), nil +} + +// GetInt returns a Int value from the slice. +// Returns an error if slice is not of type Int. +func (s Slice) GetInt() (int64, error) { + h := s.head() + + if h >= 0x20 && h <= 0x27 { + // Int T + v := readIntegerNonEmpty(s[1:], uint(h)-0x1f) + if h == 0x27 { + return toInt64(v), nil + } else { + vv := int64(v) + shift := int64(1) << ((h-0x1f)*8 - 1) + if vv < shift { + return vv, nil + } else { + return vv - (shift << 1), nil + } + } + } + + if h >= 0x28 && h <= 0x2f { + // UInt + v, err := s.GetUInt() + if err != nil { + return 0, WithStack(err) + } + if v > math.MaxInt64 { + return 0, WithStack(NumberOutOfRangeError) + } + return int64(v), nil + } + + if h >= 0x30 && h <= 0x3f { + // SmallInt + return s.GetSmallInt() + } + + return 0, WithStack(InvalidTypeError{"Expecting type Int"}) +} + +// GetUInt returns a UInt value from the slice. +// Returns an error if slice is not of type UInt. +func (s Slice) GetUInt() (uint64, error) { + h := s.head() + + if h == 0x28 { + // single byte integer + return uint64(s[1]), nil + } + + if h >= 0x29 && h <= 0x2f { + // UInt + return readIntegerNonEmpty(s[1:], uint(h)-0x27), nil + } + + if h >= 0x20 && h <= 0x27 { + // Int + v, err := s.GetInt() + if err != nil { + return 0, WithStack(err) + } + if v < 0 { + return 0, WithStack(NumberOutOfRangeError) + } + return uint64(v), nil + } + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return uint64(h - 0x30), nil + } + + if h >= 0x3a && h <= 0x3f { + // Smallint < 0 + return 0, WithStack(NumberOutOfRangeError) + } + + return 0, WithStack(InvalidTypeError{"Expecting type UInt"}) +} + +// GetSmallInt returns a SmallInt value from the slice. +// Returns an error if slice is not of type SmallInt. +func (s Slice) GetSmallInt() (int64, error) { + h := s.head() + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return int64(h - 0x30), nil + } + + if h >= 0x3a && h <= 0x3f { + // Smallint < 0 + return int64(h-0x3a) - 6, nil + } + + if (h >= 0x20 && h <= 0x27) || (h >= 0x28 && h <= 0x2f) { + // Int and UInt + // we'll leave it to the compiler to detect the two ranges above are + // adjacent + return s.GetInt() + } + + return 0, InvalidTypeError{"Expecting type SmallInt"} +} + +// GetUTCDate return the value for an UTCDate object +func (s Slice) GetUTCDate() (time.Time, error) { + if !s.IsUTCDate() { + return time.Time{}, InvalidTypeError{"Expecting type UTCDate"} + } + v := toInt64(readIntegerFixed(s[1:], 8)) // milliseconds since epoch + sec := v / 1000 + nsec := (v % 1000) * 1000000 + return time.Unix(sec, nsec).UTC(), nil +} + +// GetStringUTF8 return the value for a String object as a []byte with UTF-8 values. +// This function is a bit faster than GetString, since the conversion from +// []byte to string needs a memory allocation. +func (s Slice) GetStringUTF8() ([]byte, error) { + h := s.head() + if h >= 0x40 && h <= 0xbe { + // short UTF-8 String + length := h - 0x40 + result := s[1 : 1+length] + return result, nil + } + + if h == 0xbf { + // long UTF-8 String + length := readIntegerFixed(s[1:], 8) + if err := checkOverflow(ValueLength(length)); err != nil { + return nil, WithStack(err) + } + result := s[1+8 : 1+8+length] + return result, nil + } + + return nil, InvalidTypeError{"Expecting type String"} +} + +// GetString return the value for a String object +// This function is a bit slower than GetStringUTF8, since the conversion from +// []byte to string needs a memory allocation. +func (s Slice) GetString() (string, error) { + bytes, err := s.GetStringUTF8() + if err != nil { + return "", WithStack(err) + } + return string(bytes), nil +} + +// GetStringLength return the length for a String object +func (s Slice) GetStringLength() (ValueLength, error) { + h := s.head() + if h >= 0x40 && h <= 0xbe { + // short UTF-8 String + length := h - 0x40 + return ValueLength(length), nil + } + + if h == 0xbf { + // long UTF-8 String + length := readIntegerFixed(s[1:], 8) + if err := checkOverflow(ValueLength(length)); err != nil { + return 0, WithStack(err) + } + return ValueLength(length), nil + } + + return 0, InvalidTypeError{"Expecting type String"} +} + +// CompareString compares the string value in the slice with the given string. +// s == value -> 0 +// s < value -> -1 +// s > value -> 1 +func (s Slice) CompareString(value string) (int, error) { + k, err := s.GetStringUTF8() + if err != nil { + return 0, WithStack(err) + } + return bytes.Compare(k, []byte(value)), nil +} + +// IsEqualString compares the string value in the slice with the given string for equivalence. +func (s Slice) IsEqualString(value string) (bool, error) { + k, err := s.GetStringUTF8() + if err != nil { + return false, WithStack(err) + } + rc := bytes.Compare(k, []byte(value)) + return rc == 0, nil +} + +// GetBinary return the value for a Binary object +func (s Slice) GetBinary() ([]byte, error) { + if !s.IsBinary() { + return nil, InvalidTypeError{"Expecting type Binary"} + } + + h := s.head() + vpackAssert(h >= 0xc0 && h <= 0xc7) + + lengthSize := uint(h - 0xbf) + length := readIntegerNonEmpty(s[1:], lengthSize) + checkOverflow(ValueLength(length)) + return s[1+lengthSize : 1+uint64(lengthSize)+length], nil +} + +// GetBinaryLength return the length for a Binary object +func (s Slice) GetBinaryLength() (ValueLength, error) { + if !s.IsBinary() { + return 0, InvalidTypeError{"Expecting type Binary"} + } + + h := s.head() + vpackAssert(h >= 0xc0 && h <= 0xc7) + + lengthSize := uint(h - 0xbf) + length := readIntegerNonEmpty(s[1:], lengthSize) + return ValueLength(length), nil +} + +// Length return the number of members for an Array or Object object +func (s Slice) Length() (ValueLength, error) { + if !s.IsArray() && !s.IsObject() { + return 0, InvalidTypeError{"Expecting type Array or Object"} + } + + h := s.head() + if h == 0x01 || h == 0x0a { + // special case: empty! + return 0, nil + } + + if h == 0x13 || h == 0x14 { + // compact Array or Object + end := readVariableValueLength(s, 1, false) + return readVariableValueLength(s, end-1, true), nil + } + + offsetSize := indexEntrySize(h) + vpackAssert(offsetSize > 0) + end := readIntegerNonEmpty(s[1:], offsetSize) + + // find number of items + if h <= 0x05 { // No offset table or length, need to compute: + firstSubOffset := s.findDataOffset(h) + first := s[firstSubOffset:] + s, err := first.ByteSize() + if err != nil { + return 0, WithStack(err) + } + if s == 0 { + return 0, WithStack(InternalError) + } + return (ValueLength(end) - firstSubOffset) / s, nil + } else if offsetSize < 8 { + return ValueLength(readIntegerNonEmpty(s[offsetSize+1:], offsetSize)), nil + } + + return ValueLength(readIntegerNonEmpty(s[end-uint64(offsetSize):], offsetSize)), nil +} + +// At extracts the array value at the specified index. +func (s Slice) At(index ValueLength) (Slice, error) { + if !s.IsArray() { + return nil, InvalidTypeError{"Expecting type Array"} + } + + if result, err := s.getNth(index); err != nil { + return nil, WithStack(err) + } else { + return result, nil + } +} + +// KeyAt extracts a key from an Object at the specified index. +func (s Slice) KeyAt(index ValueLength, translate ...bool) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting type Object"} + } + + return s.getNthKey(index, optionalBool(translate, true)) +} + +// ValueAt extracts a value from an Object at the specified index +func (s Slice) ValueAt(index ValueLength) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting type Object"} + } + + key, err := s.getNthKey(index, false) + if err != nil { + return nil, WithStack(err) + } + byteSize, err := key.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(key[byteSize:]), nil +} + +func indexEntrySize(head byte) uint { + vpackAssert(head > 0x00 && head <= 0x12) + return widthMap[head] +} + +// Get looks for the specified attribute path inside an Object +// returns a Slice(ValueType::None) if not found +func (s Slice) Get(attributePath ...string) (Slice, error) { + result := s + parent := s + for _, a := range attributePath { + var err error + result, err = parent.get(a) + if err != nil { + return nil, WithStack(err) + } + if result.IsNone() { + return result, nil + } + parent = result + } + return result, nil +} + +// Get looks for the specified attribute inside an Object +// returns a Slice(ValueType::None) if not found +func (s Slice) get(attribute string) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting Object"} + } + + h := s.head() + if h == 0x0a { + // special case, empty object + return nil, nil + } + + if h == 0x14 { + // compact Object + value, err := s.getFromCompactObject(attribute) + return value, WithStack(err) + } + + offsetSize := indexEntrySize(h) + vpackAssert(offsetSize > 0) + end := ValueLength(readIntegerNonEmpty(s[1:], offsetSize)) + + // read number of items + var n ValueLength + var ieBase ValueLength + if offsetSize < 8 { + n = ValueLength(readIntegerNonEmpty(s[1+offsetSize:], offsetSize)) + ieBase = end - n*ValueLength(offsetSize) + } else { + n = ValueLength(readIntegerNonEmpty(s[end-ValueLength(offsetSize):], offsetSize)) + ieBase = end - n*ValueLength(offsetSize) - ValueLength(offsetSize) + } + + if n == 1 { + // Just one attribute, there is no index table! + key := Slice(s[s.findDataOffset(h):]) + + if key.IsString() { + if eq, err := key.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + // fall through to returning None Slice below + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + if eq, err := key.translateUnchecked().IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + } + + // no match or invalid key type + return nil, nil + } + + // only use binary search for attributes if we have at least this many entries + // otherwise we'll always use the linear search + const SortedSearchEntriesThreshold = ValueLength(4) + + // bool const isSorted = (h >= 0x0b && h <= 0x0e); + if n >= SortedSearchEntriesThreshold && (h >= 0x0b && h <= 0x0e) { + // This means, we have to handle the special case n == 1 only + // in the linear search! + switch offsetSize { + case 1: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 1) + return result, WithStack(err) + case 2: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 2) + return result, WithStack(err) + case 4: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 4) + return result, WithStack(err) + case 8: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 8) + return result, WithStack(err) + } + } + + result, err := s.searchObjectKeyLinear(attribute, ieBase, ValueLength(offsetSize), n) + return result, WithStack(err) +} + +// HasKey returns true if the slice is an object that has a given key path. +func (s Slice) HasKey(keyPath ...string) (bool, error) { + if result, err := s.Get(keyPath...); err != nil { + return false, WithStack(err) + } else { + return !result.IsNone(), nil + } +} + +func (s Slice) getFromCompactObject(attribute string) (Slice, error) { + it, err := NewObjectIterator(s) + if err != nil { + return nil, WithStack(err) + } + for it.IsValid() { + key, err := it.Key(false) + if err != nil { + return nil, WithStack(err) + } + k, err := key.makeKey() + if err != nil { + return nil, WithStack(err) + } + if eq, err := k.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + + if err := it.Next(); err != nil { + return nil, WithStack(err) + } + } + // not found + return nil, nil +} + +func (s Slice) findDataOffset(head byte) ValueLength { + // Must be called for a nonempty array or object at start(): + vpackAssert(head <= 0x12) + fsm := firstSubMap[head] + if fsm <= 2 && s[2] != 0 { + return 2 + } + if fsm <= 3 && s[3] != 0 { + return 3 + } + if fsm <= 5 && s[5] != 0 { + return 5 + } + return 9 +} + +// get the offset for the nth member from an Array or Object type +func (s Slice) getNthOffset(index ValueLength) (ValueLength, error) { + vpackAssert(s.IsArray() || s.IsObject()) + + h := s.head() + + if h == 0x13 || h == 0x14 { + // compact Array or Object + l, err := s.getNthOffsetFromCompact(index) + if err != nil { + return 0, WithStack(err) + } + return l, nil + } + + if h == 0x01 || h == 0x0a { + // special case: empty Array or empty Object + return 0, WithStack(IndexOutOfBoundsError) + } + + offsetSize := indexEntrySize(h) + end := ValueLength(readIntegerNonEmpty(s[1:], offsetSize)) + + dataOffset := ValueLength(0) + + // find the number of items + var n ValueLength + if h <= 0x05 { // No offset table or length, need to compute: + dataOffset = s.findDataOffset(h) + first := Slice(s[dataOffset:]) + s, err := first.ByteSize() + if err != nil { + return 0, WithStack(err) + } + if s == 0 { + return 0, WithStack(InternalError) + } + n = (end - dataOffset) / s + } else if offsetSize < 8 { + n = ValueLength(readIntegerNonEmpty(s[1+offsetSize:], offsetSize)) + } else { + n = ValueLength(readIntegerNonEmpty(s[end-ValueLength(offsetSize):], offsetSize)) + } + + if index >= n { + return 0, WithStack(IndexOutOfBoundsError) + } + + // empty array case was already covered + vpackAssert(n > 0) + + if h <= 0x05 || n == 1 { + // no index table, but all array items have the same length + // now fetch first item and determine its length + if dataOffset == 0 { + dataOffset = s.findDataOffset(h) + } + sliceAtDataOffset := Slice(s[dataOffset:]) + sliceAtDataOffsetByteSize, err := sliceAtDataOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + return dataOffset + index*sliceAtDataOffsetByteSize, nil + } + + offsetSize8Or0 := ValueLength(0) + if offsetSize == 8 { + offsetSize8Or0 = 8 + } + ieBase := end - n*ValueLength(offsetSize) + index*ValueLength(offsetSize) - (offsetSize8Or0) + return ValueLength(readIntegerNonEmpty(s[ieBase:], offsetSize)), nil +} + +// get the offset for the nth member from a compact Array or Object type +func (s Slice) getNthOffsetFromCompact(index ValueLength) (ValueLength, error) { + end := ValueLength(readVariableValueLength(s, 1, false)) + n := ValueLength(readVariableValueLength(s, end-1, true)) + if index >= n { + return 0, WithStack(IndexOutOfBoundsError) + } + + h := s.head() + offset := ValueLength(1 + getVariableValueLength(end)) + current := ValueLength(0) + for current != index { + sliceAtOffset := Slice(s[offset:]) + sliceAtOffsetByteSize, err := sliceAtOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + offset += sliceAtOffsetByteSize + if h == 0x14 { + sliceAtOffset := Slice(s[offset:]) + sliceAtOffsetByteSize, err := sliceAtOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + offset += sliceAtOffsetByteSize + } + current++ + } + return offset, nil +} + +// extract the nth member from an Array +func (s Slice) getNth(index ValueLength) (Slice, error) { + vpackAssert(s.IsArray()) + + offset, err := s.getNthOffset(index) + if err != nil { + return nil, WithStack(err) + } + return Slice(s[offset:]), nil +} + +// getNthKey extract the nth member from an Object +func (s Slice) getNthKey(index ValueLength, translate bool) (Slice, error) { + vpackAssert(s.Type() == Object) + + offset, err := s.getNthOffset(index) + if err != nil { + return nil, WithStack(err) + } + result := Slice(s[offset:]) + if translate { + result, err = result.makeKey() + if err != nil { + return nil, WithStack(err) + } + } + return result, nil +} + +// getNthValue extract the nth value from an Object +func (s Slice) getNthValue(index ValueLength) (Slice, error) { + key, err := s.getNthKey(index, false) + if err != nil { + return nil, WithStack(err) + } + value, err := key.Next() + return value, WithStack(err) +} + +func (s Slice) makeKey() (Slice, error) { + if s.IsString() { + return s, nil + } + if s.IsSmallInt() || s.IsUInt() { + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + return s.translateUnchecked(), nil + } + + return nil, InvalidTypeError{"Cannot translate key of this type"} +} + +// perform a linear search for the specified attribute inside an Object +func (s Slice) searchObjectKeyLinear(attribute string, ieBase, offsetSize, n ValueLength) (Slice, error) { + useTranslator := attributeTranslator != nil + + for index := ValueLength(0); index < n; index++ { + offset := ValueLength(ieBase + index*offsetSize) + key := Slice(s[readIntegerNonEmpty(s[offset:], uint(offsetSize)):]) + + if key.IsString() { + if eq, err := key.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if !eq { + continue + } + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if !useTranslator { + // no attribute translator + return nil, WithStack(NeedAttributeTranslatorError) + } + if eq, err := key.translateUnchecked().IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if !eq { + continue + } + } else { + // invalid key type + return nil, nil + } + + // key is identical. now return value + value, err := key.Next() + return value, WithStack(err) + } + + // nothing found + return nil, nil +} + +// perform a binary search for the specified attribute inside an Object +//template +func (s Slice) searchObjectKeyBinary(attribute string, ieBase ValueLength, n ValueLength, offsetSize ValueLength) (Slice, error) { + useTranslator := attributeTranslator != nil + vpackAssert(n > 0) + + l := ValueLength(0) + r := ValueLength(n - 1) + index := ValueLength(r / 2) + + for { + offset := ValueLength(ieBase + index*offsetSize) + key := Slice(s[readIntegerFixed(s[offset:], uint(offsetSize)):]) + + var res int + var err error + if key.IsString() { + res, err = key.CompareString(attribute) + if err != nil { + return nil, WithStack(err) + } + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if !useTranslator { + // no attribute translator + return nil, WithStack(NeedAttributeTranslatorError) + } + res, err = key.translateUnchecked().CompareString(attribute) + if err != nil { + return nil, WithStack(err) + } + } else { + // invalid key + return nil, nil + } + + if res == 0 { + // found. now return a Slice pointing at the value + keySize, err := key.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(key[keySize:]), nil + } + + if res > 0 { + if index == 0 { + return nil, nil + } + r = index - 1 + } else { + l = index + 1 + } + if r < l { + return nil, nil + } + + // determine new midpoint + index = l + ((r - l) / 2) + } +} + +// translates an integer key into a string +func (s Slice) translate() (Slice, error) { + if !s.IsSmallInt() && !s.IsUInt() { + return nil, WithStack(InvalidTypeError{"Cannot translate key of this type"}) + } + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + return s.translateUnchecked(), nil +} + +// return the value for a UInt object, without checks! +// returns 0 for invalid values/types +func (s Slice) getUIntUnchecked() uint64 { + h := s.head() + if h >= 0x28 && h <= 0x2f { + // UInt + return readIntegerNonEmpty(s[1:], uint(h-0x27)) + } + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return uint64(h - 0x30) + } + return 0 +} + +// translates an integer key into a string, without checks +func (s Slice) translateUnchecked() Slice { + id := s.getUIntUnchecked() + key := attributeTranslator.IDToString(id) + if key == "" { + return nil + } + return StringSlice(key) +} diff --git a/vendor/github.com/arangodb/go-velocypack/slice_factory.go b/vendor/github.com/arangodb/go-velocypack/slice_factory.go new file mode 100644 index 00000000000..39d4d4f42ca --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/slice_factory.go @@ -0,0 +1,69 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License} +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "encoding/binary" + +// NoneSlice creates a slice of type None +func NoneSlice() Slice { return Slice{0x00} } + +// IllegalSlice creates a slice of type Illegal +func IllegalSlice() Slice { return Slice{0x17} } + +// NullSlice creates a slice of type Null +func NullSlice() Slice { return Slice{0x18} } + +// FalseSlice creates a slice of type Boolean with false value +func FalseSlice() Slice { return Slice{0x19} } + +// TrueSlice creates a slice of type Boolean with true value +func TrueSlice() Slice { return Slice{0x1a} } + +// ZeroSlice creates a slice of type Smallint(0) +func ZeroSlice() Slice { return Slice{0x30} } + +// EmptyArraySlice creates a slice of type Array, empty +func EmptyArraySlice() Slice { return Slice{0x01} } + +// EmptyObjectSlice creates a slice of type Object, empty +func EmptyObjectSlice() Slice { return Slice{0x0a} } + +// MinKeySlice creates a slice of type MinKey +func MinKeySlice() Slice { return Slice{0x1e} } + +// MaxKeySlice creates a slice of type MaxKey +func MaxKeySlice() Slice { return Slice{0x1f} } + +// StringSlice creates a slice of type String with given string value +func StringSlice(s string) Slice { + raw := []byte(s) + l := len(raw) + if l <= 126 { + return Slice(append([]byte{byte(0x40 + l)}, raw...)) + } + buf := make([]byte, 1+8+l) + buf[0] = 0xbf + binary.LittleEndian.PutUint64(buf[1:], uint64(l)) + copy(buf[1+8:], raw) + return buf +} diff --git a/vendor/github.com/arangodb/go-velocypack/slice_merge.go b/vendor/github.com/arangodb/go-velocypack/slice_merge.go new file mode 100644 index 00000000000..be9a7757888 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/slice_merge.go @@ -0,0 +1,99 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// Merge creates a slice that contains all fields from all given slices. +// When a field exists (with same name) in an earlier slice, it is ignored. +// All slices must be objects. +func Merge(slices ...Slice) (Slice, error) { + // Calculate overall length + l := ValueLength(0) + for _, s := range slices { + if err := s.AssertType(Object); err != nil { + return nil, WithStack(err) + } + byteSize, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + l += byteSize + } + + if len(slices) == 1 { + // Fast path, only 1 slice + return slices[0], nil + } + + // Create a buffer to hold all slices. + b := NewBuilder(uint(l)) + keys := make(map[string]struct{}) + if err := b.OpenObject(); err != nil { + return nil, WithStack(err) + } + for _, s := range slices { + it, err := NewObjectIterator(s, true) + if err != nil { + return nil, WithStack(err) + } + for it.IsValid() { + keySlice, err := it.Key(true) + if err != nil { + return nil, WithStack(err) + } + key, err := keySlice.GetString() + if err != nil { + return nil, WithStack(err) + } + if _, found := keys[key]; !found { + // Record key + keys[key] = struct{}{} + + // Fetch value + value, err := it.Value() + if err != nil { + return nil, WithStack(err) + } + + // Add key,value + if err := b.addInternalKeyValue(key, NewSliceValue(value)); err != nil { + return nil, WithStack(err) + } + } + + // Move to next field + if err := it.Next(); err != nil { + return nil, WithStack(err) + } + } + } + if err := b.Close(); err != nil { + return nil, WithStack(err) + } + + // Return slice + result, err := b.Slice() + if err != nil { + return nil, WithStack(err) + } + return result, nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/slice_reader.go b/vendor/github.com/arangodb/go-velocypack/slice_reader.go new file mode 100644 index 00000000000..7baa365c9d8 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/slice_reader.go @@ -0,0 +1,197 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bufio" + "io" +) + +const ( + maxByteSizeBytes = 16 +) + +// SliceFromReader reads a slice from the given reader. +func SliceFromReader(r io.Reader) (Slice, error) { + if r, ok := r.(*bufio.Reader); ok { + // Buffered reader can use faster path. + return sliceFromBufReader(r) + } + hdr := make(Slice, 1, maxByteSizeBytes) + // Read first byte + if err := readBytes(hdr, r); err != nil { + if Cause(err) == io.EOF { + // Empty slice + return nil, nil + } + return nil, WithStack(err) + } + // Lookup first size + // check if the type has a fixed length first + l := fixedTypeLengths[hdr[0]] + if l != 0 { + // Found fixed length, read it (minus byte already read) + s := make(Slice, l) + s[0] = hdr[0] + if err := readBytes(s[1:], r); err != nil { + return nil, WithStack(err) + } + return s, nil + } + + readRemaining := func(prefix Slice, l ValueLength) (Slice, error) { + s := make(Slice, l) + copy(s, prefix) + if err := readBytes(s[len(prefix):], r); err != nil { + return nil, WithStack(err) + } + return s, nil + } + + // types with dynamic lengths need special treatment: + h := hdr[0] + switch hdr.Type() { + case Array, Object: + if h == 0x13 || h == 0x14 { + // compact Array or Object + l, bytes, err := readVariableValueLengthFromReader(r, false) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), l) + } + + vpackAssert(h > 0x00 && h <= 0x0e) + l, bytes, err := readIntegerNonEmptyFromReader(r, widthMap[h]) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), ValueLength(l)) + + case String: + vpackAssert(h == 0xbf) + + // long UTF-8 String + l, bytes, err := readIntegerFixedFromReader(r, 8) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), ValueLength(l+1+8)) + + case Binary: + vpackAssert(h >= 0xc0 && h <= 0xc7) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xbf) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xbf + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + + case BCD: + if h <= 0xcf { + // positive BCD + vpackAssert(h >= 0xc8 && h < 0xcf) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xc7) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xc7 + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + } + + // negative BCD + vpackAssert(h >= 0xd0 && h < 0xd7) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xcf) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xcf + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + + case Custom: + vpackAssert(h >= 0xf4) + switch h { + case 0xf4, 0xf5, 0xf6: + x, bytes, err := readIntegerFixedFromReader(r, 1) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(2 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xf7, 0xf8, 0xf9: + x, bytes, err := readIntegerFixedFromReader(r, 2) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(3 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xfa, 0xfb, 0xfc: + x, bytes, err := readIntegerFixedFromReader(r, 4) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(5 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xfd, 0xfe, 0xff: + x, bytes, err := readIntegerFixedFromReader(r, 8) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(9 + x) + return readRemaining(append(hdr, bytes...), l) + } + } + + return nil, WithStack(InternalError) +} + +// sliceFromBufReader reads a slice from the given buffered reader. +func sliceFromBufReader(r *bufio.Reader) (Slice, error) { + // ByteSize is always found within first 16 bytes + hdr, err := r.Peek(maxByteSizeBytes) + if len(hdr) == 0 && err != nil { + if Cause(err) == io.EOF { + // Empty slice + return nil, nil + } + return nil, WithStack(err) + } + s := Slice(hdr) + size, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + // Now that we know the size, read the entire slice + buf := make(Slice, size) + offset := 0 + bytesRead := 0 + for ValueLength(bytesRead) < size { + n, err := r.Read(buf[offset:]) + bytesRead += n + offset += n + if err != nil && ValueLength(bytesRead) < size { + return nil, WithStack(err) + } + } + return buf, nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/slice_type.go b/vendor/github.com/arangodb/go-velocypack/slice_type.go new file mode 100644 index 00000000000..a49b2fc08cf --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/slice_type.go @@ -0,0 +1,135 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "fmt" + +// Type returns the vpack type of the slice +func (s Slice) Type() ValueType { + return typeMap[s.head()] +} + +// IsType returns true when the vpack type of the slice is equal to the given type. +// Returns false otherwise. +func (s Slice) IsType(t ValueType) bool { + return typeMap[s.head()] == t +} + +// AssertType returns an error when the vpack type of the slice different from the given type. +// Returns nil otherwise. +func (s Slice) AssertType(t ValueType) error { + if found := typeMap[s.head()]; found != t { + return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected type '%s', got '%s'", t, found)}) + } + return nil +} + +// AssertTypeAny returns an error when the vpack type of the slice different from all of the given types. +// Returns nil otherwise. +func (s Slice) AssertTypeAny(t ...ValueType) error { + found := typeMap[s.head()] + for _, x := range t { + if x == found { + return nil + } + } + return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected types '%q', got '%s'", t, found)}) +} + +// IsNone returns true if slice is a None object +func (s Slice) IsNone() bool { return s.IsType(None) } + +// IsIllegal returns true if slice is an Illegal object +func (s Slice) IsIllegal() bool { return s.IsType(Illegal) } + +// IsNull returns true if slice is a Null object +func (s Slice) IsNull() bool { return s.IsType(Null) } + +// IsBool returns true if slice is a Bool object +func (s Slice) IsBool() bool { return s.IsType(Bool) } + +// IsTrue returns true if slice is the Boolean value true +func (s Slice) IsTrue() bool { return s.head() == 0x1a } + +// IsFalse returns true if slice is the Boolean value false +func (s Slice) IsFalse() bool { return s.head() == 0x19 } + +// IsArray returns true if slice is an Array object +func (s Slice) IsArray() bool { return s.IsType(Array) } + +// IsEmptyArray tests whether the Slice is an empty array +func (s Slice) IsEmptyArray() bool { return s.head() == 0x01 } + +// IsObject returns true if slice is an Object object +func (s Slice) IsObject() bool { return s.IsType(Object) } + +// IsEmptyObject tests whether the Slice is an empty object +func (s Slice) IsEmptyObject() bool { return s.head() == 0x0a } + +// IsDouble returns true if slice is a Double object +func (s Slice) IsDouble() bool { return s.IsType(Double) } + +// IsUTCDate returns true if slice is a UTCDate object +func (s Slice) IsUTCDate() bool { return s.IsType(UTCDate) } + +// IsExternal returns true if slice is an External object +func (s Slice) IsExternal() bool { return s.IsType(External) } + +// IsMinKey returns true if slice is a MinKey object +func (s Slice) IsMinKey() bool { return s.IsType(MinKey) } + +// IsMaxKey returns true if slice is a MaxKey object +func (s Slice) IsMaxKey() bool { return s.IsType(MaxKey) } + +// IsInt returns true if slice is an Int object +func (s Slice) IsInt() bool { return s.IsType(Int) } + +// IsUInt returns true if slice is a UInt object +func (s Slice) IsUInt() bool { return s.IsType(UInt) } + +// IsSmallInt returns true if slice is a SmallInt object +func (s Slice) IsSmallInt() bool { return s.IsType(SmallInt) } + +// IsString returns true if slice is a String object +func (s Slice) IsString() bool { return s.IsType(String) } + +// IsBinary returns true if slice is a Binary object +func (s Slice) IsBinary() bool { return s.IsType(Binary) } + +// IsBCD returns true if slice is a BCD +func (s Slice) IsBCD() bool { return s.IsType(BCD) } + +// IsCustom returns true if slice is a Custom type +func (s Slice) IsCustom() bool { return s.IsType(Custom) } + +// IsInteger returns true if a slice is any decimal number type +func (s Slice) IsInteger() bool { return s.IsInt() || s.IsUInt() || s.IsSmallInt() } + +// IsNumber returns true if slice is any Number-type object +func (s Slice) IsNumber() bool { return s.IsInteger() || s.IsDouble() } + +// IsSorted returns true if slice is an object with table offsets, sorted by attribute name +func (s Slice) IsSorted() bool { + h := s.head() + return (h >= 0x0b && h <= 0x0e) +} diff --git a/vendor/github.com/arangodb/go-velocypack/util.go b/vendor/github.com/arangodb/go-velocypack/util.go new file mode 100644 index 00000000000..b3dcbc1fa94 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/util.go @@ -0,0 +1,202 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "io" +) + +// vpackAssert panics if v is false. +func vpackAssert(v bool) { + if !v { + panic("VELOCYPACK_ASSERT failed") + } +} + +// readBytes reads bytes from the given reader until the given slice is full. +func readBytes(dst []byte, r io.Reader) error { + offset := 0 + l := len(dst) + for { + n, err := r.Read(dst[offset:]) + offset += n + l -= n + if l == 0 { + // We're done + return nil + } + if err != nil { + return WithStack(err) + } + } +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerFixed(start []byte, length uint) uint64 { + return readIntegerNonEmpty(start, length) +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerFixedFromReader(r io.Reader, length uint) (uint64, []byte, error) { + buf := make([]byte, length) + if err := readBytes(buf, r); err != nil { + return 0, nil, WithStack(err) + } + return readIntegerFixed(buf, length), buf, nil +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerNonEmpty(s []byte, length uint) uint64 { + x := uint(0) + v := uint64(0) + for i := uint(0); i < length; i++ { + v += uint64(s[i]) << x + x += 8 + } + return v +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerNonEmptyFromReader(r io.Reader, length uint) (uint64, []byte, error) { + buf := make([]byte, length) + if err := readBytes(buf, r); err != nil { + return 0, nil, WithStack(err) + } + return readIntegerNonEmpty(buf, length), buf, nil +} + +func toInt64(v uint64) int64 { + shift2 := uint64(1) << 63 + shift := int64(shift2 - 1) + if v >= shift2 { + return (int64(v-shift2) - shift) - 1 + } else { + return int64(v) + } +} + +func toUInt64(v int64) uint64 { + // If v is negative, we need to add 2^63 to make it positive, + // before we can cast it to an uint64_t: + if v >= 0 { + return uint64(v) + } + shift2 := uint64(1) << 63 + shift := int64(shift2 - 1) + return uint64((v+shift)+1) + shift2 + // return v >= 0 ? static_cast(v) + // : static_cast((v + shift) + 1) + shift2; + // Note that g++ and clang++ with -O3 compile this away to + // nothing. Further note that a plain cast from int64_t to + // uint64_t is not guaranteed to work for negative values! +} + +// read a variable length integer in unsigned LEB128 format +func readVariableValueLength(source []byte, offset ValueLength, reverse bool) ValueLength { + length := ValueLength(0) + p := uint(0) + for { + v := ValueLength(source[offset]) + length += (v & 0x7f) << p + p += 7 + if reverse { + offset-- + } else { + offset++ + } + if v&0x80 == 0 { + break + } + } + return length +} + +// read a variable length integer in unsigned LEB128 format +func readVariableValueLengthFromReader(r io.Reader, reverse bool) (ValueLength, []byte, error) { + if reverse { + return 0, nil, WithStack(fmt.Errorf("reverse is not supported")) + } + length := ValueLength(0) + p := uint(0) + buf := make([]byte, 1) + bytes := make([]byte, 0, 8) + for { + if n, err := r.Read(buf); n != 1 { + if err != nil { + return 0, nil, WithStack(err) + } else { + return 0, nil, WithStack(fmt.Errorf("failed to read 1 byte")) + } + } + bytes = append(bytes, buf[0]) + v := ValueLength(buf[0]) + length += (v & 0x7f) << p + p += 7 + if v&0x80 == 0 { + break + } + } + return length, bytes, nil +} + +// store a variable length integer in unsigned LEB128 format +func storeVariableValueLength(dst []byte, offset, value ValueLength, reverse bool) { + vpackAssert(value > 0) + + idx := offset + if reverse { + for value >= 0x80 { + dst[idx] = byte(value | 0x80) + idx-- + value >>= 7 + } + dst[idx] = byte(value & 0x7f) + } else { + for value >= 0x80 { + dst[idx] = byte(value | 0x80) + idx++ + value >>= 7 + } + dst[idx] = byte(value & 0x7f) + } +} + +// optionalBool returns the first arg element if available, otherwise returns defaultValue. +func optionalBool(arg []bool, defaultValue bool) bool { + if len(arg) == 0 { + return defaultValue + } + return arg[0] +} + +// alignAt returns the first number >= value that is aligned at the given alignment. +// alignment must be a power of 2. +func alignAt(value, alignment uint) uint { + mask := ^(alignment - 1) + return (value + alignment - 1) & mask +} diff --git a/vendor/github.com/arangodb/go-velocypack/value.go b/vendor/github.com/arangodb/go-velocypack/value.go new file mode 100644 index 00000000000..6cb748a0c6f --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/value.go @@ -0,0 +1,199 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "reflect" + "time" +) + +// Value is a helper structure used to build VPack structures. +// It holds a single data value with a specific type. +type Value struct { + vt ValueType + data interface{} + unindexed bool +} + +// NewValue creates a new Value with type derived from Go type of given value. +// If the given value is not a supported type, a Value of type Illegal is returned. +func NewValue(value interface{}) Value { + v := reflect.ValueOf(value) + return NewReflectValue(v) +} + +// NewReflectValue creates a new Value with type derived from Go type of given reflect value. +// If the given value is not a supported type, a Value of type Illegal is returned. +func NewReflectValue(v reflect.Value) Value { + vt := v.Type() + switch vt.Kind() { + case reflect.Bool: + return NewBoolValue(v.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return NewIntValue(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return NewUIntValue(v.Uint()) + case reflect.Float32, reflect.Float64: + return NewDoubleValue(v.Float()) + case reflect.String: + return NewStringValue(v.String()) + case reflect.Slice: + if vt.Elem().Kind() == reflect.Uint8 { + } + } + if v.CanInterface() { + raw := v.Interface() + if v, ok := raw.([]byte); ok { + return NewBinaryValue(v) + } + if v, ok := raw.(Slice); ok { + return NewSliceValue(v) + } + if v, ok := raw.(time.Time); ok { + return NewUTCDateValue(v) + } + if v, ok := raw.(Value); ok { + return v + } + } + return Value{Illegal, nil, false} +} + +// NewBoolValue creates a new Value of type Bool with given value. +func NewBoolValue(value bool) Value { + return Value{Bool, value, false} +} + +// NewIntValue creates a new Value of type Int with given value. +func NewIntValue(value int64) Value { + if value >= -6 && value <= 9 { + return Value{SmallInt, value, false} + } + return Value{Int, value, false} +} + +// NewUIntValue creates a new Value of type UInt with given value. +func NewUIntValue(value uint64) Value { + return Value{UInt, value, false} +} + +// NewDoubleValue creates a new Value of type Double with given value. +func NewDoubleValue(value float64) Value { + return Value{Double, value, false} +} + +// NewStringValue creates a new Value of type String with given value. +func NewStringValue(value string) Value { + return Value{String, value, false} +} + +// NewBinaryValue creates a new Value of type Binary with given value. +func NewBinaryValue(value []byte) Value { + return Value{Binary, value, false} +} + +// NewUTCDateValue creates a new Value of type UTCDate with given value. +func NewUTCDateValue(value time.Time) Value { + return Value{UTCDate, value, false} +} + +// NewSliceValue creates a new Value of from the given slice. +func NewSliceValue(value Slice) Value { + return Value{value.Type(), value, false} +} + +// NewObjectValue creates a new Value that opens a new object. +func NewObjectValue(unindexed ...bool) Value { + return Value{Object, nil, optionalBool(unindexed, false)} +} + +// NewArrayValue creates a new Value that opens a new array. +func NewArrayValue(unindexed ...bool) Value { + return Value{Array, nil, optionalBool(unindexed, false)} +} + +// NewNullValue creates a new Value of type Null. +func NewNullValue() Value { + return Value{Null, nil, false} +} + +// NewMinKeyValue creates a new Value of type MinKey. +func NewMinKeyValue() Value { + return Value{MinKey, nil, false} +} + +// NewMaxKeyValue creates a new Value of type MaxKey. +func NewMaxKeyValue() Value { + return Value{MaxKey, nil, false} +} + +// Type returns the ValueType of this value. +func (v Value) Type() ValueType { + return v.vt +} + +// IsSlice returns true when the value already contains a slice. +func (v Value) IsSlice() bool { + _, ok := v.data.(Slice) + return ok +} + +// IsIllegal returns true if the type of value is Illegal. +func (v Value) IsIllegal() bool { + return v.vt == Illegal +} + +func (v Value) boolValue() bool { + return v.data.(bool) +} + +func (v Value) intValue() int64 { + return v.data.(int64) +} + +func (v Value) uintValue() uint64 { + return v.data.(uint64) +} + +func (v Value) doubleValue() float64 { + return v.data.(float64) +} + +func (v Value) stringValue() string { + return v.data.(string) +} + +func (v Value) binaryValue() []byte { + return v.data.([]byte) +} + +func (v Value) utcDateValue() int64 { + time := v.data.(time.Time) + sec := time.Unix() + nsec := int64(time.Nanosecond()) + return sec*1000 + nsec/1000000 +} + +func (v Value) sliceValue() Slice { + return v.data.(Slice) +} diff --git a/vendor/github.com/arangodb/go-velocypack/value_length.go b/vendor/github.com/arangodb/go-velocypack/value_length.go new file mode 100644 index 00000000000..dd17edd61cd --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/value_length.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "strconv" +) + +type ValueLength uint64 + +func (s ValueLength) String() string { + return strconv.FormatInt(int64(s), 10) +} + +// getVariableValueLength calculates the length of a variable length integer in unsigned LEB128 format +func getVariableValueLength(value ValueLength) ValueLength { + l := ValueLength(1) + for value >= 0x80 { + value >>= 7 + l++ + } + return l +} + +// check if the length is beyond the size of a SIZE_MAX on this platform +func checkOverflow(length ValueLength) error { + if length < 0 { + return fmt.Errorf("Negative length") + } + // TODO + return nil +} diff --git a/vendor/github.com/arangodb/go-velocypack/value_type.go b/vendor/github.com/arangodb/go-velocypack/value_type.go new file mode 100644 index 00000000000..ac2b846c159 --- /dev/null +++ b/vendor/github.com/arangodb/go-velocypack/value_type.go @@ -0,0 +1,382 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ValueType int + +const ( + None ValueType = iota // not yet initialized + Illegal // illegal value + Null // JSON null + Bool + Array + Object + Double + UTCDate + External + MinKey + MaxKey + Int + UInt + SmallInt + String + Binary + BCD + Custom +) + +// String returns a string representation of the given type. +func (vt ValueType) String() string { + return typeNames[vt] +} + +var typeNames = [...]string{ + "None", + "Illegal", + "Null", + "Bool", + "Array", + "Object", + "Double", + "UTCDate", + "External", + "MinKey", + "MaxKey", + "Int", + "UInt", + "SmallInt", + "String", + "Binary", + "BCD", + "Custom", +} + +var typeMap = [256]ValueType{ + /* 0x00 */ None /* 0x01 */, Array, + /* 0x02 */ Array /* 0x03 */, Array, + /* 0x04 */ Array /* 0x05 */, Array, + /* 0x06 */ Array /* 0x07 */, Array, + /* 0x08 */ Array /* 0x09 */, Array, + /* 0x0a */ Object /* 0x0b */, Object, + /* 0x0c */ Object /* 0x0d */, Object, + /* 0x0e */ Object /* 0x0f */, Object, + /* 0x10 */ Object /* 0x11 */, Object, + /* 0x12 */ Object /* 0x13 */, Array, + /* 0x14 */ Object /* 0x15 */, None, + /* 0x16 */ None /* 0x17 */, Illegal, + /* 0x18 */ Null /* 0x19 */, Bool, + /* 0x1a */ Bool /* 0x1b */, Double, + /* 0x1c */ UTCDate /* 0x1d */, External, + /* 0x1e */ MinKey /* 0x1f */, MaxKey, + /* 0x20 */ Int /* 0x21 */, Int, + /* 0x22 */ Int /* 0x23 */, Int, + /* 0x24 */ Int /* 0x25 */, Int, + /* 0x26 */ Int /* 0x27 */, Int, + /* 0x28 */ UInt /* 0x29 */, UInt, + /* 0x2a */ UInt /* 0x2b */, UInt, + /* 0x2c */ UInt /* 0x2d */, UInt, + /* 0x2e */ UInt /* 0x2f */, UInt, + /* 0x30 */ SmallInt /* 0x31 */, SmallInt, + /* 0x32 */ SmallInt /* 0x33 */, SmallInt, + /* 0x34 */ SmallInt /* 0x35 */, SmallInt, + /* 0x36 */ SmallInt /* 0x37 */, SmallInt, + /* 0x38 */ SmallInt /* 0x39 */, SmallInt, + /* 0x3a */ SmallInt /* 0x3b */, SmallInt, + /* 0x3c */ SmallInt /* 0x3d */, SmallInt, + /* 0x3e */ SmallInt /* 0x3f */, SmallInt, + /* 0x40 */ String /* 0x41 */, String, + /* 0x42 */ String /* 0x43 */, String, + /* 0x44 */ String /* 0x45 */, String, + /* 0x46 */ String /* 0x47 */, String, + /* 0x48 */ String /* 0x49 */, String, + /* 0x4a */ String /* 0x4b */, String, + /* 0x4c */ String /* 0x4d */, String, + /* 0x4e */ String /* 0x4f */, String, + /* 0x50 */ String /* 0x51 */, String, + /* 0x52 */ String /* 0x53 */, String, + /* 0x54 */ String /* 0x55 */, String, + /* 0x56 */ String /* 0x57 */, String, + /* 0x58 */ String /* 0x59 */, String, + /* 0x5a */ String /* 0x5b */, String, + /* 0x5c */ String /* 0x5d */, String, + /* 0x5e */ String /* 0x5f */, String, + /* 0x60 */ String /* 0x61 */, String, + /* 0x62 */ String /* 0x63 */, String, + /* 0x64 */ String /* 0x65 */, String, + /* 0x66 */ String /* 0x67 */, String, + /* 0x68 */ String /* 0x69 */, String, + /* 0x6a */ String /* 0x6b */, String, + /* 0x6c */ String /* 0x6d */, String, + /* 0x6e */ String /* 0x6f */, String, + /* 0x70 */ String /* 0x71 */, String, + /* 0x72 */ String /* 0x73 */, String, + /* 0x74 */ String /* 0x75 */, String, + /* 0x76 */ String /* 0x77 */, String, + /* 0x78 */ String /* 0x79 */, String, + /* 0x7a */ String /* 0x7b */, String, + /* 0x7c */ String /* 0x7d */, String, + /* 0x7e */ String /* 0x7f */, String, + /* 0x80 */ String /* 0x81 */, String, + /* 0x82 */ String /* 0x83 */, String, + /* 0x84 */ String /* 0x85 */, String, + /* 0x86 */ String /* 0x87 */, String, + /* 0x88 */ String /* 0x89 */, String, + /* 0x8a */ String /* 0x8b */, String, + /* 0x8c */ String /* 0x8d */, String, + /* 0x8e */ String /* 0x8f */, String, + /* 0x90 */ String /* 0x91 */, String, + /* 0x92 */ String /* 0x93 */, String, + /* 0x94 */ String /* 0x95 */, String, + /* 0x96 */ String /* 0x97 */, String, + /* 0x98 */ String /* 0x99 */, String, + /* 0x9a */ String /* 0x9b */, String, + /* 0x9c */ String /* 0x9d */, String, + /* 0x9e */ String /* 0x9f */, String, + /* 0xa0 */ String /* 0xa1 */, String, + /* 0xa2 */ String /* 0xa3 */, String, + /* 0xa4 */ String /* 0xa5 */, String, + /* 0xa6 */ String /* 0xa7 */, String, + /* 0xa8 */ String /* 0xa9 */, String, + /* 0xaa */ String /* 0xab */, String, + /* 0xac */ String /* 0xad */, String, + /* 0xae */ String /* 0xaf */, String, + /* 0xb0 */ String /* 0xb1 */, String, + /* 0xb2 */ String /* 0xb3 */, String, + /* 0xb4 */ String /* 0xb5 */, String, + /* 0xb6 */ String /* 0xb7 */, String, + /* 0xb8 */ String /* 0xb9 */, String, + /* 0xba */ String /* 0xbb */, String, + /* 0xbc */ String /* 0xbd */, String, + /* 0xbe */ String /* 0xbf */, String, + /* 0xc0 */ Binary /* 0xc1 */, Binary, + /* 0xc2 */ Binary /* 0xc3 */, Binary, + /* 0xc4 */ Binary /* 0xc5 */, Binary, + /* 0xc6 */ Binary /* 0xc7 */, Binary, + /* 0xc8 */ BCD /* 0xc9 */, BCD, + /* 0xca */ BCD /* 0xcb */, BCD, + /* 0xcc */ BCD /* 0xcd */, BCD, + /* 0xce */ BCD /* 0xcf */, BCD, + /* 0xd0 */ BCD /* 0xd1 */, BCD, + /* 0xd2 */ BCD /* 0xd3 */, BCD, + /* 0xd4 */ BCD /* 0xd5 */, BCD, + /* 0xd6 */ BCD /* 0xd7 */, BCD, + /* 0xd8 */ None /* 0xd9 */, None, + /* 0xda */ None /* 0xdb */, None, + /* 0xdc */ None /* 0xdd */, None, + /* 0xde */ None /* 0xdf */, None, + /* 0xe0 */ None /* 0xe1 */, None, + /* 0xe2 */ None /* 0xe3 */, None, + /* 0xe4 */ None /* 0xe5 */, None, + /* 0xe6 */ None /* 0xe7 */, None, + /* 0xe8 */ None /* 0xe9 */, None, + /* 0xea */ None /* 0xeb */, None, + /* 0xec */ None /* 0xed */, None, + /* 0xee */ None /* 0xef */, None, + /* 0xf0 */ Custom /* 0xf1 */, Custom, + /* 0xf2 */ Custom /* 0xf3 */, Custom, + /* 0xf4 */ Custom /* 0xf5 */, Custom, + /* 0xf6 */ Custom /* 0xf7 */, Custom, + /* 0xf8 */ Custom /* 0xf9 */, Custom, + /* 0xfa */ Custom /* 0xfb */, Custom, + /* 0xfc */ Custom /* 0xfd */, Custom, + /* 0xfe */ Custom /* 0xff */, Custom} + +const ( + doubleLength = 8 + int64Length = 8 + charPtrLength = 8 +) + +var fixedTypeLengths = [256]int{ + /* 0x00 */ 1 /* 0x01 */, 1, + /* 0x02 */ 0 /* 0x03 */, 0, + /* 0x04 */ 0 /* 0x05 */, 0, + /* 0x06 */ 0 /* 0x07 */, 0, + /* 0x08 */ 0 /* 0x09 */, 0, + /* 0x0a */ 1 /* 0x0b */, 0, + /* 0x0c */ 0 /* 0x0d */, 0, + /* 0x0e */ 0 /* 0x0f */, 0, + /* 0x10 */ 0 /* 0x11 */, 0, + /* 0x12 */ 0 /* 0x13 */, 0, + /* 0x14 */ 0 /* 0x15 */, 0, + /* 0x16 */ 0 /* 0x17 */, 1, + /* 0x18 */ 1 /* 0x19 */, 1, + /* 0x1a */ 1 /* 0x1b */, 1 + doubleLength, /*sizeof(double)*/ + /* 0x1c */ 1 + int64Length /*sizeof(int64_t)*/ /* 0x1d */, 1 + charPtrLength, /* sizeof(char*)*/ + /* 0x1e */ 1 /* 0x1f */, 1, + /* 0x20 */ 2 /* 0x21 */, 3, + /* 0x22 */ 4 /* 0x23 */, 5, + /* 0x24 */ 6 /* 0x25 */, 7, + /* 0x26 */ 8 /* 0x27 */, 9, + /* 0x28 */ 2 /* 0x29 */, 3, + /* 0x2a */ 4 /* 0x2b */, 5, + /* 0x2c */ 6 /* 0x2d */, 7, + /* 0x2e */ 8 /* 0x2f */, 9, + /* 0x30 */ 1 /* 0x31 */, 1, + /* 0x32 */ 1 /* 0x33 */, 1, + /* 0x34 */ 1 /* 0x35 */, 1, + /* 0x36 */ 1 /* 0x37 */, 1, + /* 0x38 */ 1 /* 0x39 */, 1, + /* 0x3a */ 1 /* 0x3b */, 1, + /* 0x3c */ 1 /* 0x3d */, 1, + /* 0x3e */ 1 /* 0x3f */, 1, + /* 0x40 */ 1 /* 0x41 */, 2, + /* 0x42 */ 3 /* 0x43 */, 4, + /* 0x44 */ 5 /* 0x45 */, 6, + /* 0x46 */ 7 /* 0x47 */, 8, + /* 0x48 */ 9 /* 0x49 */, 10, + /* 0x4a */ 11 /* 0x4b */, 12, + /* 0x4c */ 13 /* 0x4d */, 14, + /* 0x4e */ 15 /* 0x4f */, 16, + /* 0x50 */ 17 /* 0x51 */, 18, + /* 0x52 */ 19 /* 0x53 */, 20, + /* 0x54 */ 21 /* 0x55 */, 22, + /* 0x56 */ 23 /* 0x57 */, 24, + /* 0x58 */ 25 /* 0x59 */, 26, + /* 0x5a */ 27 /* 0x5b */, 28, + /* 0x5c */ 29 /* 0x5d */, 30, + /* 0x5e */ 31 /* 0x5f */, 32, + /* 0x60 */ 33 /* 0x61 */, 34, + /* 0x62 */ 35 /* 0x63 */, 36, + /* 0x64 */ 37 /* 0x65 */, 38, + /* 0x66 */ 39 /* 0x67 */, 40, + /* 0x68 */ 41 /* 0x69 */, 42, + /* 0x6a */ 43 /* 0x6b */, 44, + /* 0x6c */ 45 /* 0x6d */, 46, + /* 0x6e */ 47 /* 0x6f */, 48, + /* 0x70 */ 49 /* 0x71 */, 50, + /* 0x72 */ 51 /* 0x73 */, 52, + /* 0x74 */ 53 /* 0x75 */, 54, + /* 0x76 */ 55 /* 0x77 */, 56, + /* 0x78 */ 57 /* 0x79 */, 58, + /* 0x7a */ 59 /* 0x7b */, 60, + /* 0x7c */ 61 /* 0x7d */, 62, + /* 0x7e */ 63 /* 0x7f */, 64, + /* 0x80 */ 65 /* 0x81 */, 66, + /* 0x82 */ 67 /* 0x83 */, 68, + /* 0x84 */ 69 /* 0x85 */, 70, + /* 0x86 */ 71 /* 0x87 */, 72, + /* 0x88 */ 73 /* 0x89 */, 74, + /* 0x8a */ 75 /* 0x8b */, 76, + /* 0x8c */ 77 /* 0x8d */, 78, + /* 0x8e */ 79 /* 0x8f */, 80, + /* 0x90 */ 81 /* 0x91 */, 82, + /* 0x92 */ 83 /* 0x93 */, 84, + /* 0x94 */ 85 /* 0x95 */, 86, + /* 0x96 */ 87 /* 0x97 */, 88, + /* 0x98 */ 89 /* 0x99 */, 90, + /* 0x9a */ 91 /* 0x9b */, 92, + /* 0x9c */ 93 /* 0x9d */, 94, + /* 0x9e */ 95 /* 0x9f */, 96, + /* 0xa0 */ 97 /* 0xa1 */, 98, + /* 0xa2 */ 99 /* 0xa3 */, 100, + /* 0xa4 */ 101 /* 0xa5 */, 102, + /* 0xa6 */ 103 /* 0xa7 */, 104, + /* 0xa8 */ 105 /* 0xa9 */, 106, + /* 0xaa */ 107 /* 0xab */, 108, + /* 0xac */ 109 /* 0xad */, 110, + /* 0xae */ 111 /* 0xaf */, 112, + /* 0xb0 */ 113 /* 0xb1 */, 114, + /* 0xb2 */ 115 /* 0xb3 */, 116, + /* 0xb4 */ 117 /* 0xb5 */, 118, + /* 0xb6 */ 119 /* 0xb7 */, 120, + /* 0xb8 */ 121 /* 0xb9 */, 122, + /* 0xba */ 123 /* 0xbb */, 124, + /* 0xbc */ 125 /* 0xbd */, 126, + /* 0xbe */ 127 /* 0xbf */, 0, + /* 0xc0 */ 0 /* 0xc1 */, 0, + /* 0xc2 */ 0 /* 0xc3 */, 0, + /* 0xc4 */ 0 /* 0xc5 */, 0, + /* 0xc6 */ 0 /* 0xc7 */, 0, + /* 0xc8 */ 0 /* 0xc9 */, 0, + /* 0xca */ 0 /* 0xcb */, 0, + /* 0xcc */ 0 /* 0xcd */, 0, + /* 0xce */ 0 /* 0xcf */, 0, + /* 0xd0 */ 0 /* 0xd1 */, 0, + /* 0xd2 */ 0 /* 0xd3 */, 0, + /* 0xd4 */ 0 /* 0xd5 */, 0, + /* 0xd6 */ 0 /* 0xd7 */, 0, + /* 0xd8 */ 0 /* 0xd9 */, 0, + /* 0xda */ 0 /* 0xdb */, 0, + /* 0xdc */ 0 /* 0xdd */, 0, + /* 0xde */ 0 /* 0xdf */, 0, + /* 0xe0 */ 0 /* 0xe1 */, 0, + /* 0xe2 */ 0 /* 0xe3 */, 0, + /* 0xe4 */ 0 /* 0xe5 */, 0, + /* 0xe6 */ 0 /* 0xe7 */, 0, + /* 0xe8 */ 0 /* 0xe9 */, 0, + /* 0xea */ 0 /* 0xeb */, 0, + /* 0xec */ 0 /* 0xed */, 0, + /* 0xee */ 0 /* 0xef */, 0, + /* 0xf0 */ 2 /* 0xf1 */, 3, + /* 0xf2 */ 5 /* 0xf3 */, 9, + /* 0xf4 */ 0 /* 0xf5 */, 0, + /* 0xf6 */ 0 /* 0xf7 */, 0, + /* 0xf8 */ 0 /* 0xf9 */, 0, + /* 0xfa */ 0 /* 0xfb */, 0, + /* 0xfc */ 0 /* 0xfd */, 0, + /* 0xfe */ 0 /* 0xff */, 0} + +var widthMap = [32]uint{ + 0, // 0x00, None + 1, // 0x01, empty array + 1, // 0x02, array without index table + 2, // 0x03, array without index table + 4, // 0x04, array without index table + 8, // 0x05, array without index table + 1, // 0x06, array with index table + 2, // 0x07, array with index table + 4, // 0x08, array with index table + 8, // 0x09, array with index table + 1, // 0x0a, empty object + 1, // 0x0b, object with sorted index table + 2, // 0x0c, object with sorted index table + 4, // 0x0d, object with sorted index table + 8, // 0x0e, object with sorted index table + 1, // 0x0f, object with unsorted index table + 2, // 0x10, object with unsorted index table + 4, // 0x11, object with unsorted index table + 8, // 0x12, object with unsorted index table + 0} + +var firstSubMap = [32]int{ + 0, // 0x00, None + 1, // 0x01, empty array + 2, // 0x02, array without index table + 3, // 0x03, array without index table + 5, // 0x04, array without index table + 9, // 0x05, array without index table + 3, // 0x06, array with index table + 5, // 0x07, array with index table + 9, // 0x08, array with index table + 9, // 0x09, array with index table + 1, // 0x0a, empty object + 3, // 0x0b, object with sorted index table + 5, // 0x0c, object with sorted index table + 9, // 0x0d, object with sorted index table + 9, // 0x0e, object with sorted index table + 3, // 0x0f, object with unsorted index table + 5, // 0x10, object with unsorted index table + 9, // 0x11, object with unsorted index table + 9, // 0x12, object with unsorted index table + 0} diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/.gitignore new file mode 100644 index 00000000000..09573e0169c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/LICENSE new file mode 100644 index 00000000000..35dbc252041 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..c4efbd2a8c5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v3.2.1) + +Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +### go.mod replacement + +In a first step, the easiest way is to use `go mod edit` to issue a replacement. + +``` +go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible +go mod tidy +``` + +This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work. + +### Cleanup + +If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed. + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/README.md new file mode 100644 index 00000000000..9b653e46b01 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/README.md @@ -0,0 +1,113 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + +Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md new file mode 100644 index 00000000000..637f2ba616a --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md @@ -0,0 +1,131 @@ +## `jwt-go` Version History + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go new file mode 100644 index 00000000000..f1dba3cb916 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/claims.go @@ -0,0 +1,146 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go new file mode 100644 index 00000000000..15e23435df6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ecdsa.go @@ -0,0 +1,142 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go new file mode 100644 index 00000000000..db9f4be7d8e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go new file mode 100644 index 00000000000..a2f8ddbe9bb --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ed25519.go @@ -0,0 +1,81 @@ +package jwt + +import ( + "errors" + + "crypto/ed25519" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// Implements the EdDSA family +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key ed25519.PrivateKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PrivateKey); !ok { + return "", ErrInvalidKeyType + } + + // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize + // this allows to avoid recover usage + if len(ed25519Key) != ed25519.PrivateKeySize { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + sig := ed25519.Sign(ed25519Key, []byte(signingString)) + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go new file mode 100644 index 00000000000..c6357275efc --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key") +) + +// Parse PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// Parse PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go new file mode 100644 index 00000000000..1c93024aad2 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go new file mode 100644 index 00000000000..addbe5d4018 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go new file mode 100644 index 00000000000..72c79f92e55 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/map_claims.go @@ -0,0 +1,120 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + exp, ok := m["exp"] + if !ok { + return !req + } + switch expType := exp.(type) { + case float64: + return verifyExp(int64(expType), cmp, req) + case json.Number: + v, _ := expType.Int64() + return verifyExp(v, cmp, req) + } + return false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + iat, ok := m["iat"] + if !ok { + return !req + } + switch iatType := iat.(type) { + case float64: + return verifyIat(int64(iatType), cmp, req) + case json.Number: + v, _ := iatType.Int64() + return verifyIat(v, cmp, req) + } + return false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + nbf, ok := m["nbf"] + if !ok { + return !req + } + switch nbfType := nbf.(type) { + case float64: + return verifyNbf(int64(nbfType), cmp, req) + case json.Number: + v, _ := nbfType.Int64() + return verifyNbf(v, cmp, req) + } + return false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/none.go new file mode 100644 index 00000000000..f04d189d067 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go new file mode 100644 index 00000000000..d6901d9adb5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go new file mode 100644 index 00000000000..e4caf1ca4a1 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go new file mode 100644 index 00000000000..c0147086480 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go new file mode 100644 index 00000000000..14c78c292a9 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go new file mode 100644 index 00000000000..ed1f212b21e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go new file mode 100644 index 00000000000..6b30ced1200 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/token.go @@ -0,0 +1,104 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/mattn/go-ieproxy/README.md b/vendor/github.com/mattn/go-ieproxy/README.md index 3e3b4759cf7..fbc801ae5d1 100644 --- a/vendor/github.com/mattn/go-ieproxy/README.md +++ b/vendor/github.com/mattn/go-ieproxy/README.md @@ -1,10 +1,8 @@ # ieproxy -Go package to detect the proxy settings on Windows platform, and MacOS. +Go package to detect the proxy settings on Windows platform. -On Windows, the settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails. - -On MacOS, the settings are read from [`CFNetworkCopySystemProxySettings` method of CFNetwork](https://developer.apple.com/documentation/cfnetwork/1426754-cfnetworkcopysystemproxysettings?language=objc). +The settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails. For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy) diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy.go b/vendor/github.com/mattn/go-ieproxy/ieproxy.go index 0b5460bb828..51fe18e3dbe 100644 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy.go +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy.go @@ -36,11 +36,6 @@ func GetConf() ProxyConf { return getConf() } -// ReloadConf reloads the proxy configuration -func ReloadConf() ProxyConf { - return reloadConf() -} - // OverrideEnvWithStaticProxy writes new values to the // `http_proxy`, `https_proxy` and `no_proxy` environment variables. // The values are taken from the Windows Regedit (should be called in `init()` function - see example) diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go deleted file mode 100644 index 5d53555708b..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go +++ /dev/null @@ -1,123 +0,0 @@ -package ieproxy - -/* -#cgo LDFLAGS: -framework CoreFoundation -#cgo LDFLAGS: -framework CFNetwork -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "sync" - "unsafe" -) - -var once sync.Once -var darwinProxyConf ProxyConf - -// GetConf retrieves the proxy configuration from the Windows Regedit -func getConf() ProxyConf { - once.Do(writeConf) - return darwinProxyConf -} - -// reloadConf forces a reload of the proxy configuration. -func reloadConf() ProxyConf { - writeConf() - return getConf() -} - -func cfStringGetGoString(cfStr C.CFStringRef) string { - retCString := (*C.char)(C.calloc(C.ulong(uint(128)), 1)) - defer C.free(unsafe.Pointer(retCString)) - - C.CFStringGetCString(cfStr, retCString, C.long(128), C.kCFStringEncodingUTF8) - return C.GoString(retCString) -} - -func cfNumberGetGoInt(cfNum C.CFNumberRef) int { - ret := 0 - C.CFNumberGetValue(cfNum, C.kCFNumberIntType, unsafe.Pointer(&ret)) - return ret -} - -func cfArrayGetGoStrings(cfArray C.CFArrayRef) []string { - var ret []string - for i := 0; i < int(C.CFArrayGetCount(cfArray)); i++ { - cfStr := C.CFStringRef(C.CFArrayGetValueAtIndex(cfArray, C.long(i))) - if unsafe.Pointer(cfStr) != C.NULL { - ret = append(ret, cfStringGetGoString(cfStr)) - } - } - return ret -} - -func writeConf() { - cfDictProxy := C.CFDictionaryRef(C.CFNetworkCopySystemProxySettings()) - defer C.CFRelease(C.CFTypeRef(cfDictProxy)) - darwinProxyConf = ProxyConf{} - - cfNumHttpEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPEnable))) - if unsafe.Pointer(cfNumHttpEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpEnable) > 0 { - darwinProxyConf.Static.Active = true - if darwinProxyConf.Static.Protocols == nil { - darwinProxyConf.Static.Protocols = make(map[string]string) - } - httpHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPProxy))) - httpPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPPort))) - - httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpHost), cfNumberGetGoInt(httpPort)) - darwinProxyConf.Static.Protocols["http"] = httpProxy - } - - cfNumHttpsEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSEnable))) - if unsafe.Pointer(cfNumHttpsEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpsEnable) > 0 { - darwinProxyConf.Static.Active = true - if darwinProxyConf.Static.Protocols == nil { - darwinProxyConf.Static.Protocols = make(map[string]string) - } - httpsHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSProxy))) - httpsPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSPort))) - - httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpsHost), cfNumberGetGoInt(httpsPort)) - darwinProxyConf.Static.Protocols["https"] = httpProxy - } - - if darwinProxyConf.Static.Active { - cfArrayExceptionList := C.CFArrayRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesExceptionsList))) - if unsafe.Pointer(cfArrayExceptionList) != C.NULL { - exceptionList := cfArrayGetGoStrings(cfArrayExceptionList) - darwinProxyConf.Static.NoProxy = strings.Join(exceptionList, ",") - } - } - - cfNumPacEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigEnable))) - if unsafe.Pointer(cfNumPacEnable) != C.NULL && cfNumberGetGoInt(cfNumPacEnable) > 0 { - cfStringPac := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigURLString))) - if unsafe.Pointer(cfStringPac) != C.NULL { - pac := cfStringGetGoString(cfStringPac) - darwinProxyConf.Automatic.PreConfiguredURL = pac - darwinProxyConf.Automatic.Active = true - } - } -} - -// OverrideEnvWithStaticProxy writes new values to the -// http_proxy, https_proxy and no_proxy environment variables. -// The values are taken from the MacOS System Preferences. -func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { - if conf.Static.Active { - for _, scheme := range []string{"http", "https"} { - url := conf.Static.Protocols[scheme] - if url != "" { - setenv(scheme+"_proxy", url) - } - } - if conf.Static.NoProxy != "" { - setenv("no_proxy", conf.Static.NoProxy) - } - } -} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go index 3594b975abe..dc2bccfc297 100644 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!darwin +// +build !windows package ieproxy @@ -6,9 +6,5 @@ func getConf() ProxyConf { return ProxyConf{} } -func reloadConf() ProxyConf { - return getConf() -} - func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { } diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go index 7fd375017f6..c7b29c0b356 100644 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go @@ -24,12 +24,6 @@ func getConf() ProxyConf { return windowsProxyConf } -// reloadConf forces a reload of the proxy configuration from the Windows registry -func reloadConf() ProxyConf { - writeConf() - return getConf() -} - func writeConf() { proxy := "" proxyByPass := "" @@ -48,7 +42,7 @@ func writeConf() { autoDetect = ieCfg.fAutoDetect } - if proxy == "" && !autoDetect { + if proxy == "" && !autoDetect{ // Try WinHTTP default proxy. if defaultCfg, err := getDefaultProxyConfiguration(); err == nil { defer globalFreeWrapper(defaultCfg.lpszProxy) diff --git a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go deleted file mode 100644 index a8bf90e94d7..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go +++ /dev/null @@ -1,141 +0,0 @@ -package ieproxy - -/* -#cgo LDFLAGS: -framework CoreFoundation -#cgo LDFLAGS: -framework CFNetwork -#include -#include - -#define STR_LEN 128 - -void proxyAutoConfCallback(void* client, CFArrayRef proxies, CFErrorRef error) { - CFTypeRef* result_ptr = (CFTypeRef*)client; - if (error != NULL) { - *result_ptr = CFRetain(error); - } else { - *result_ptr = CFRetain(proxies); - } - CFRunLoopStop(CFRunLoopGetCurrent()); -} - -int intCFNumber(CFNumberRef num) { - int ret; - CFNumberGetValue(num, kCFNumberIntType, &ret); - return ret; -} - -char* _getProxyUrlFromPac(char* pac, char* reqCs) { - char* retCString = (char*)calloc(STR_LEN, sizeof(char)); - - CFStringRef reqStr = CFStringCreateWithCString(NULL, reqCs, kCFStringEncodingUTF8); - CFStringRef pacStr = CFStringCreateWithCString(NULL, pac, kCFStringEncodingUTF8); - CFURLRef pacUrl = CFURLCreateWithString(NULL, pacStr, NULL); - CFURLRef reqUrl = CFURLCreateWithString(NULL, reqStr, NULL); - - CFTypeRef result = NULL; - CFStreamClientContext context = { 0, &result, NULL, NULL, NULL }; - CFRunLoopSourceRef runloop_src = CFNetworkExecuteProxyAutoConfigurationURL(pacUrl, reqUrl, proxyAutoConfCallback, &context); - - if (runloop_src) { - const CFStringRef private_runloop_mode = CFSTR("go-ieproxy"); - CFRunLoopAddSource(CFRunLoopGetCurrent(), runloop_src, private_runloop_mode); - CFRunLoopRunInMode(private_runloop_mode, DBL_MAX, false); - CFRunLoopRemoveSource(CFRunLoopGetCurrent(), runloop_src, kCFRunLoopCommonModes); - - if (CFGetTypeID(result) == CFArrayGetTypeID()) { - CFArrayRef resultArray = (CFTypeRef)result; - if (CFArrayGetCount(resultArray) > 0) { - CFDictionaryRef pxy = (CFDictionaryRef)CFArrayGetValueAtIndex(resultArray, 0); - CFStringRef pxyType = CFDictionaryGetValue(pxy, kCFProxyTypeKey); - - if (CFEqual(pxyType, kCFProxyTypeNone)) { - // noop - } - - if (CFEqual(pxyType, kCFProxyTypeHTTP)) { - CFStringRef host = (CFStringRef)CFDictionaryGetValue(pxy, kCFProxyHostNameKey); - CFNumberRef port = (CFNumberRef)CFDictionaryGetValue(pxy, kCFProxyPortNumberKey); - - char host_str[STR_LEN - 16]; - CFStringGetCString(host, host_str, STR_LEN - 16, kCFStringEncodingUTF8); - - int port_int = 80; - if (port) { - CFNumberGetValue(port, kCFNumberIntType, &port_int); - } - - sprintf(retCString, "%s:%d", host_str, port_int); - } - } - } else { - // error - } - } - - CFRelease(result); - CFRelease(reqStr); - CFRelease(reqUrl); - CFRelease(pacStr); - CFRelease(pacUrl); - return retCString; -} - -char* _getPacUrl() { - char* retCString = (char*)calloc(STR_LEN, sizeof(char)); - CFDictionaryRef proxyDict = CFNetworkCopySystemProxySettings(); - CFNumberRef pacEnable = (CFNumberRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigEnable); - - if (pacEnable && intCFNumber(pacEnable)) { - CFStringRef pacUrlStr = (CFStringRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigURLString); - if (pacUrlStr) { - CFStringGetCString(pacUrlStr, retCString, STR_LEN, kCFStringEncodingUTF8); - } - } - - CFRelease(proxyDict); - return retCString; -} - -*/ -import "C" -import ( - "net/url" - "unsafe" -) - -func (psc *ProxyScriptConf) findProxyForURL(URL string) string { - if !psc.Active { - return "" - } - proxy := getProxyForURL(psc.PreConfiguredURL, URL) - return proxy -} - -func getProxyForURL(pacFileURL, targetURL string) string { - if pacFileURL == "" { - pacFileURL = getPacUrl() - } - if pacFileURL == "" { - return "" - } - if u, err := url.Parse(pacFileURL); err != nil || u.Scheme == "" { - return "" - } - - csUrl := C.CString(targetURL) - csPac := C.CString(pacFileURL) - csRet := C._getProxyUrlFromPac(csPac, csUrl) - - defer C.free(unsafe.Pointer(csUrl)) - defer C.free(unsafe.Pointer(csPac)) - defer C.free(unsafe.Pointer(csRet)) - - return C.GoString(csRet) -} - -func getPacUrl() string { - csRet := C._getPacUrl() - - defer C.free(unsafe.Pointer(csRet)) - return C.GoString(csRet) -} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_unix.go b/vendor/github.com/mattn/go-ieproxy/pac_unix.go index e04c9fbbb2c..d44ec3cca24 100644 --- a/vendor/github.com/mattn/go-ieproxy/pac_unix.go +++ b/vendor/github.com/mattn/go-ieproxy/pac_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!darwin +// +build !windows package ieproxy diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go deleted file mode 100644 index a89948dca65..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go +++ /dev/null @@ -1,43 +0,0 @@ -package ieproxy - -import ( - "net/http" - "net/url" - - "golang.org/x/net/http/httpproxy" -) - -func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { - // Get the proxy configuration - conf := GetConf() - envCfg := httpproxy.FromEnvironment() - - if envCfg.HTTPProxy != "" || envCfg.HTTPSProxy != "" { - // If the user manually specifies environment variables, prefer those over the MacOS config. - return http.ProxyFromEnvironment - } - - return func(req *http.Request) (i *url.URL, e error) { - if conf.Automatic.Active { - host := conf.Automatic.FindProxyForURL(req.URL.String()) - if host != "" { - return &url.URL{Host: host}, nil - } - } - if conf.Static.Active { - return staticProxy(conf, req) - } - // Should return no proxy; fallthrough. - return http.ProxyFromEnvironment(req) - } -} - -func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { - // If static proxy obtaining is specified - proxy := httpproxy.Config{ - HTTPSProxy: conf.Static.Protocols["https"], - HTTPProxy: conf.Static.Protocols["http"], - NoProxy: conf.Static.NoProxy, - } - return proxy.ProxyFunc()(req.URL) -} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go index 16b4eba48c6..d0b16ec288c 100644 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!darwin +// +build !windows package ieproxy diff --git a/vendor/modules.txt b/vendor/modules.txt index a7d04462ab7..70397d4aaf3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -214,6 +214,16 @@ github.com/Shopify/sarama # github.com/andybalholm/brotli v1.0.4 ## explicit; go 1.12 github.com/andybalholm/brotli +# github.com/arangodb/go-driver v1.4.0 +## explicit; go 1.16 +github.com/arangodb/go-driver +github.com/arangodb/go-driver/cluster +github.com/arangodb/go-driver/http +github.com/arangodb/go-driver/jwt +github.com/arangodb/go-driver/util +# github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e +## explicit; go 1.12 +github.com/arangodb/go-velocypack # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 github.com/armon/go-metrics @@ -450,6 +460,9 @@ github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +# github.com/golang-jwt/jwt v3.2.2+incompatible +## explicit +github.com/golang-jwt/jwt # github.com/golang-jwt/jwt/v4 v4.4.2 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 @@ -746,7 +759,7 @@ github.com/mailru/easyjson/jwriter # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-ieproxy v0.0.7 +# github.com/mattn/go-ieproxy v0.0.1 ## explicit; go 1.14 github.com/mattn/go-ieproxy # github.com/mattn/go-isatty v0.0.16