diff --git a/apps/docs/errors/unkey/data/migration_not_found.mdx b/apps/docs/errors/unkey/data/migration_not_found.mdx new file mode 100644 index 0000000000..1e1812dd29 --- /dev/null +++ b/apps/docs/errors/unkey/data/migration_not_found.mdx @@ -0,0 +1,51 @@ +--- +title: "migration_not_found" +description: "The requested Key Migration was not found" +--- + +err:unkey:data:migration_not_found + +```json Example +{ + "meta": { + "requestId": "req_2c9a0jf23l4k567" + }, + "error": { + "detail": "The requested Migration could not be found", + "status": 404, + "title": "Not Found", + "type": "https://unkey.com/docs/api-reference/errors-v2/unkey/data/migration_not_found" + } +} +``` + +## What Happened? + +This error occurs when you're trying to migrate api keys for a migration that doesn't exist in the Unkey system. + +Common scenarios that trigger this error: + +- Using an incorrect or expired migrationId +- The migration was deleted +- The migration belongs to a different workspace +- Typos in the migrationId + +Here's an example of a request that would trigger this error: + +```bash +# Attempting to migrate keys with a non-existent migrationId +curl -X POST https://api.unkey.com/v2/keys.migrateKeys \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer " \ + -d '{ + "apiId": "api_123", + "migrationId": "migration_456", + "keys": [{ "hash": "deadbeef" }] + }' +``` + +## How To Fix + + + If you’re unsure about your migrationId or setup, contact support@unkey.com. + diff --git a/go/Makefile b/go/Makefile index ad10d9ebc5..a3176ae3e1 100644 --- a/go/Makefile +++ b/go/Makefile @@ -51,6 +51,7 @@ build: generate: buf generate go generate ./... + go fmt ./... generate-builder: buf generate --path proto/deploy/builderd diff --git a/go/apps/api/openapi/config.yaml b/go/apps/api/openapi/config.yaml index 48787eec13..9728bf4d77 100644 --- a/go/apps/api/openapi/config.yaml +++ b/go/apps/api/openapi/config.yaml @@ -3,7 +3,6 @@ package: openapi output: ./gen.go generate: models: true - output-options: nullable-type: true overlay: diff --git a/go/apps/api/openapi/gen.go b/go/apps/api/openapi/gen.go index 2390dff60e..360a2a0c71 100644 --- a/go/apps/api/openapi/gen.go +++ b/go/apps/api/openapi/gen.go @@ -963,6 +963,101 @@ type V2KeysGetKeyResponseBody struct { Meta Meta `json:"meta"` } +// V2KeysMigrateKeyData defines model for V2KeysMigrateKeyData. +type V2KeysMigrateKeyData struct { + // Credits Credit configuration and remaining balance for this key. + Credits *KeyCreditsData `json:"credits,omitempty"` + + // Enabled Controls whether the key is active immediately upon creation. + // When set to `false`, the key exists but all verification attempts fail with `code=DISABLED`. + // Useful for pre-creating keys that will be activated later or for keys requiring manual approval. + // Most keys should be created with `enabled=true` for immediate use. + Enabled *bool `json:"enabled,omitempty"` + + // Expires Sets when this key automatically expires as a Unix timestamp in milliseconds. + // Verification fails with code=EXPIRED immediately after this time passes. + // Omitting this field creates a permanent key that never expires. + // + // Avoid setting timestamps in the past as they immediately invalidate the key. + // Keys expire based on server time, not client time, which prevents timezone-related issues. + // Essential for trial periods, temporary access, and security compliance requiring key rotation. + Expires *int64 `json:"expires,omitempty"` + + // ExternalId Links this key to a user or entity in your system using your own identifier. + // Returned during verification to identify the key owner without additional database lookups. + // Essential for user-specific analytics, billing, and multi-tenant key management. + // Use your primary user ID, organization ID, or tenant ID for best results. + // Accepts letters, numbers, underscores, dots, and hyphens for flexible identifier formats. + ExternalId *string `json:"externalId,omitempty"` + + // Hash The current hash of the key on your side + Hash string `json:"hash"` + + // Meta Stores arbitrary JSON metadata returned during key verification for contextual information. + // Eliminates additional database lookups during verification, improving performance for stateless services. + // Avoid storing sensitive data here as it's returned in verification responses. + // Large metadata objects increase verification latency and should stay under 10KB total size. + Meta *map[string]interface{} `json:"meta,omitempty"` + + // Name Sets a human-readable identifier for internal organization and dashboard display. + // Never exposed to end users, only visible in management interfaces and API responses. + // Avoid generic names like "API Key" when managing multiple keys for the same user or service. + Name *string `json:"name,omitempty"` + + // Permissions Grants specific permissions directly to this key without requiring role membership. + // Wildcard permissions like `documents.*` grant access to all sub-permissions including `documents.read` and `documents.write`. + // Direct permissions supplement any permissions inherited from assigned roles. + Permissions *[]string `json:"permissions,omitempty"` + + // Ratelimits Defines time-based rate limits that protect against abuse by controlling request frequency. + // Unlike credits which track total usage, rate limits reset automatically after each window expires. + // Multiple rate limits can control different operation types with separate thresholds and windows. + // Essential for preventing API abuse while maintaining good performance for legitimate usage. + Ratelimits *[]RatelimitRequest `json:"ratelimits,omitempty"` + + // Roles Assigns existing roles to this key for permission management through role-based access control. + // Roles must already exist in your workspace before assignment. + // During verification, all permissions from assigned roles are checked against requested permissions. + // Roles provide a convenient way to group permissions and apply consistent access patterns across multiple keys. + Roles *[]string `json:"roles,omitempty"` +} + +// V2KeysMigrateKeysMigration defines model for V2KeysMigrateKeysMigration. +type V2KeysMigrateKeysMigration struct { + // Hash The hash provided in the migration request + Hash string `json:"hash"` + + // KeyId The unique identifier for this key in Unkey's system. This is NOT the actual API key, but a reference ID used for management operations like updating or deleting the key. Store this ID in your database to reference the key later. This ID is not sensitive and can be logged or displayed in dashboards. + KeyId string `json:"keyId"` +} + +// V2KeysMigrateKeysRequestBody defines model for V2KeysMigrateKeysRequestBody. +type V2KeysMigrateKeysRequestBody struct { + // ApiId The ID of the API that the keys should be inserted into + ApiId string `json:"apiId"` + Keys []V2KeysMigrateKeyData `json:"keys"` + + // MigrationId Identifier of the configured migration provider/strategy to use (e.g., "your_company"). + MigrationId string `json:"migrationId"` +} + +// V2KeysMigrateKeysResponseBody defines model for V2KeysMigrateKeysResponseBody. +type V2KeysMigrateKeysResponseBody struct { + Data V2KeysMigrateKeysResponseData `json:"data"` + + // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. + Meta Meta `json:"meta"` +} + +// V2KeysMigrateKeysResponseData defines model for V2KeysMigrateKeysResponseData. +type V2KeysMigrateKeysResponseData struct { + // Failed Hashes that could not be migrated (e.g., already exist in the system) + Failed []string `json:"failed"` + + // Migrated Successfully migrated keys with their hash and generated keyId + Migrated []V2KeysMigrateKeysMigration `json:"migrated"` +} + // V2KeysRemovePermissionsRequestBody defines model for V2KeysRemovePermissionsRequestBody. type V2KeysRemovePermissionsRequestBody struct { // KeyId Specifies which key to remove permissions from using the database identifier returned from `keys.createKey`. @@ -1317,6 +1412,9 @@ type V2KeysVerifyKeyRequestBody struct { // Include any prefix - even small changes will cause verification to fail. Key string `json:"key"` + // MigrationId Migrate keys on demand from your previous system. Reach out for migration support at support@unkey.dev + MigrationId *string `json:"migrationId,omitempty"` + // Permissions Checks if the key has the specified permission(s) using a query syntax. // Supports single permissions, logical operators (AND, OR), and parentheses for grouping. // Examples: @@ -2026,6 +2124,9 @@ type DeleteKeyJSONRequestBody = V2KeysDeleteKeyRequestBody // GetKeyJSONRequestBody defines body for GetKey for application/json ContentType. type GetKeyJSONRequestBody = V2KeysGetKeyRequestBody +// MigrateKeysJSONRequestBody defines body for MigrateKeys for application/json ContentType. +type MigrateKeysJSONRequestBody = V2KeysMigrateKeysRequestBody + // RemovePermissionsJSONRequestBody defines body for RemovePermissions for application/json ContentType. type RemovePermissionsJSONRequestBody = V2KeysRemovePermissionsRequestBody diff --git a/go/apps/api/openapi/openapi-generated.yaml b/go/apps/api/openapi/openapi-generated.yaml index ff5134dfef..0ac4463b54 100644 --- a/go/apps/api/openapi/openapi-generated.yaml +++ b/go/apps/api/openapi/openapi-generated.yaml @@ -615,7 +615,7 @@ components: name: type: string minLength: 1 - maxLength: 200 + maxLength: 255 description: | Sets a human-readable identifier for internal organization and dashboard display. Never exposed to end users, only visible in management interfaces and API responses. @@ -841,6 +841,41 @@ components: "$ref": "#/components/schemas/Meta" data: "$ref": "#/components/schemas/KeyResponseData" + V2KeysMigrateKeysRequestBody: + type: object + properties: + migrationId: + type: string + minLength: 3 + maxLength: 255 + description: Identifier of the configured migration provider/strategy to use (e.g., "your_company"). + example: your_company + apiId: + type: string + minLength: 3 + maxLength: 255 + description: The ID of the API that the keys should be inserted into + example: api_123456789 + keys: + type: array + minItems: 1 + items: + $ref: "#/components/schemas/V2KeysMigrateKeyData" + additionalProperties: false + required: + - migrationId + - apiId + - keys + V2KeysMigrateKeysResponseBody: + type: object + required: + - meta + - data + properties: + meta: + "$ref": "#/components/schemas/Meta" + data: + "$ref": "#/components/schemas/V2KeysMigrateKeysResponseData" V2KeysRemovePermissionsRequestBody: type: object required: @@ -1316,6 +1351,11 @@ components: Omitting this field skips rate limit checks entirely, relying only on configured key rate limits. Multiple rate limits can be checked simultaneously, each with different costs and temporary overrides. Rate limit checks are optimized for performance but may allow brief bursts during high concurrency. + migrationId: + type: string + maxLength: 256 + description: Migrate keys on demand from your previous system. Reach out for migration support at support@unkey.dev + example: "m_1234abcd" V2KeysVerifyKeyResponseBody: type: object required: @@ -2513,6 +2553,165 @@ components: required: - keyId - key + V2KeysMigrateKeyData: + type: object + properties: + hash: + type: string + minLength: 3 + description: The current hash of the key on your side + example: qwerty123 + name: + type: string + minLength: 1 + maxLength: 255 + description: | + Sets a human-readable identifier for internal organization and dashboard display. + Never exposed to end users, only visible in management interfaces and API responses. + Avoid generic names like "API Key" when managing multiple keys for the same user or service. + example: Payment Service Production Key + externalId: + type: string + minLength: 1 + maxLength: 255 + description: | + Links this key to a user or entity in your system using your own identifier. + Returned during verification to identify the key owner without additional database lookups. + Essential for user-specific analytics, billing, and multi-tenant key management. + Use your primary user ID, organization ID, or tenant ID for best results. + Accepts letters, numbers, underscores, dots, and hyphens for flexible identifier formats. + example: user_1234abcd + meta: + type: object + additionalProperties: true + maxProperties: 100 + description: | + Stores arbitrary JSON metadata returned during key verification for contextual information. + Eliminates additional database lookups during verification, improving performance for stateless services. + Avoid storing sensitive data here as it's returned in verification responses. + Large metadata objects increase verification latency and should stay under 10KB total size. + example: + plan: enterprise + featureFlags: + betaAccess: true + concurrentConnections: 10 + customerName: Acme Corp + billing: + tier: premium + renewal: "2024-12-31" + roles: + type: array + maxItems: 100 + items: + type: string + minLength: 1 + maxLength: 100 + description: | + Assigns existing roles to this key for permission management through role-based access control. + Roles must already exist in your workspace before assignment. + During verification, all permissions from assigned roles are checked against requested permissions. + Roles provide a convenient way to group permissions and apply consistent access patterns across multiple keys. + example: + - api_admin + - billing_reader + permissions: + type: array + maxItems: 1000 + items: + type: string + minLength: 1 + maxLength: 100 + description: | + Grants specific permissions directly to this key without requiring role membership. + Wildcard permissions like `documents.*` grant access to all sub-permissions including `documents.read` and `documents.write`. + Direct permissions supplement any permissions inherited from assigned roles. + example: + - documents.read + - documents.write + - settings.view + expires: + type: integer + format: int64 + minimum: 0 + maximum: 4102444800000 + description: | + Sets when this key automatically expires as a Unix timestamp in milliseconds. + Verification fails with code=EXPIRED immediately after this time passes. + Omitting this field creates a permanent key that never expires. + + Avoid setting timestamps in the past as they immediately invalidate the key. + Keys expire based on server time, not client time, which prevents timezone-related issues. + Essential for trial periods, temporary access, and security compliance requiring key rotation. + enabled: + type: boolean + default: true + description: | + Controls whether the key is active immediately upon creation. + When set to `false`, the key exists but all verification attempts fail with `code=DISABLED`. + Useful for pre-creating keys that will be activated later or for keys requiring manual approval. + Most keys should be created with `enabled=true` for immediate use. + example: true + credits: + "$ref": "#/components/schemas/KeyCreditsData" + description: | + Controls usage-based limits through credit consumption with optional automatic refills. + Unlike rate limits which control frequency, credits control total usage with global consistency. + Essential for implementing usage-based pricing, subscription tiers, and hard usage quotas. + Omitting this field creates unlimited usage, while setting null is not allowed during creation. + ratelimits: + type: array + maxItems: 50 + items: + "$ref": "#/components/schemas/RatelimitRequest" + description: | + Defines time-based rate limits that protect against abuse by controlling request frequency. + Unlike credits which track total usage, rate limits reset automatically after each window expires. + Multiple rate limits can control different operation types with separate thresholds and windows. + Essential for preventing API abuse while maintaining good performance for legitimate usage. + example: + - name: requests + limit: 100 + duration: 60000 + autoApply: true + - name: heavy_operations + limit: 10 + duration: 3600000 + autoApply: false + additionalProperties: false + required: + - hash + V2KeysMigrateKeysResponseData: + type: object + required: + - migrated + - failed + properties: + migrated: + type: array + description: Successfully migrated keys with their hash and generated keyId + items: + "$ref": "#/components/schemas/V2KeysMigrateKeysMigration" + failed: + type: array + description: Hashes that could not be migrated (e.g., already exist in the system) + items: + type: string + description: The hash that failed to migrate + example: sha256_ghi789jkl012 + V2KeysMigrateKeysMigration: + type: object + required: + - hash + - keyId + properties: + hash: + type: string + description: The hash provided in the migration request + example: sha256_abc123def456 + keyId: + type: string + description: The unique identifier for this key in Unkey's system. This is NOT the actual API key, but a reference ID used for management operations like updating or deleting the key. Store this ID in your database to reference the key later. This ID is not sensitive and can be logged or displayed in dashboards. + example: key_2cGKbMxRyIzhCxo1Idjz8q V2KeysRemovePermissionsResponseData: type: array description: |- @@ -4291,6 +4490,66 @@ paths: tags: - keys x-speakeasy-name-override: getKey + /v2/keys.migrateKeys: + post: + description: | + Returns HTTP 200 even on partial success; hashes that could not be migrated are listed under `data.failed`. + Example usage provided in the docs and tests. + + **Required Permissions** + Your root key must have one of the following permissions for basic key information: + - `api.*.create_key` (to migrate keys to any API) + - `api..create_key` (to migrate keys to a specific API) + operationId: migrateKeys + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/V2KeysMigrateKeysRequestBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/V2KeysMigrateKeysResponseBody' + description: Successfully migrated keys. + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/BadRequestErrorResponse' + description: Bad request + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/UnauthorizedErrorResponse' + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/ForbiddenErrorResponse' + description: Forbidden + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/NotFoundErrorResponse' + description: Not found + "500": + content: + application/json: + schema: + $ref: '#/components/schemas/InternalServerErrorResponse' + description: Internal server error + security: + - rootKey: [] + summary: Migrate API key(s) + tags: + - keys + x-speakeasy-name-override: migrateKeys /v2/keys.removePermissions: post: description: | diff --git a/go/apps/api/openapi/openapi-split.yaml b/go/apps/api/openapi/openapi-split.yaml index 1ded55ff29..9b586b0841 100644 --- a/go/apps/api/openapi/openapi-split.yaml +++ b/go/apps/api/openapi/openapi-split.yaml @@ -168,6 +168,8 @@ paths: $ref: "./spec/paths/v2/keys/whoami/index.yaml" /v2/keys.verifyKey: $ref: "./spec/paths/v2/keys/verifyKey/index.yaml" + /v2/keys.migrateKeys: + $ref: "./spec/paths/v2/keys/migrateKeys/index.yaml" # Ratelimit Endpoints /v2/ratelimit.limit: diff --git a/go/apps/api/openapi/spec/paths/v2/keys/createKey/V2KeysCreateKeyRequestBody.yaml b/go/apps/api/openapi/spec/paths/v2/keys/createKey/V2KeysCreateKeyRequestBody.yaml index 09d89085a4..27088810a8 100644 --- a/go/apps/api/openapi/spec/paths/v2/keys/createKey/V2KeysCreateKeyRequestBody.yaml +++ b/go/apps/api/openapi/spec/paths/v2/keys/createKey/V2KeysCreateKeyRequestBody.yaml @@ -24,7 +24,7 @@ properties: name: type: string minLength: 1 - maxLength: 200 # Human-readable names should be concise but descriptive + maxLength: 255 description: | Sets a human-readable identifier for internal organization and dashboard display. Never exposed to end users, only visible in management interfaces and API responses. diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeyData.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeyData.yaml new file mode 100644 index 0000000000..1bec0a73dc --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeyData.yaml @@ -0,0 +1,131 @@ +type: object +properties: + hash: + type: string + minLength: 3 + description: The current hash of the key on your side + example: qwerty123 + name: + type: string + minLength: 1 + maxLength: 255 + description: | + Sets a human-readable identifier for internal organization and dashboard display. + Never exposed to end users, only visible in management interfaces and API responses. + Avoid generic names like "API Key" when managing multiple keys for the same user or service. + example: Payment Service Production Key + externalId: + type: string + minLength: 1 + maxLength: 255 + description: | + Links this key to a user or entity in your system using your own identifier. + Returned during verification to identify the key owner without additional database lookups. + Essential for user-specific analytics, billing, and multi-tenant key management. + Use your primary user ID, organization ID, or tenant ID for best results. + Accepts letters, numbers, underscores, dots, and hyphens for flexible identifier formats. + example: user_1234abcd + meta: + type: object + additionalProperties: true + maxProperties: 100 # Prevent DoS while allowing rich metadata + description: | + Stores arbitrary JSON metadata returned during key verification for contextual information. + Eliminates additional database lookups during verification, improving performance for stateless services. + Avoid storing sensitive data here as it's returned in verification responses. + Large metadata objects increase verification latency and should stay under 10KB total size. + example: + plan: enterprise + featureFlags: + betaAccess: true + concurrentConnections: 10 + customerName: Acme Corp + billing: + tier: premium + renewal: "2024-12-31" + roles: + type: array + maxItems: 100 # Reasonable limit for role assignments per key + items: + type: string + minLength: 1 + maxLength: 100 # Keep role names concise and readable + description: | + Assigns existing roles to this key for permission management through role-based access control. + Roles must already exist in your workspace before assignment. + During verification, all permissions from assigned roles are checked against requested permissions. + Roles provide a convenient way to group permissions and apply consistent access patterns across multiple keys. + example: + - api_admin + - billing_reader + permissions: + type: array + maxItems: 1000 # Allow extensive permission sets for complex applications + items: + type: string + minLength: 1 + maxLength: 100 # Keep permission names concise and readable + description: | + Grants specific permissions directly to this key without requiring role membership. + Wildcard permissions like `documents.*` grant access to all sub-permissions including `documents.read` and `documents.write`. + Direct permissions supplement any permissions inherited from assigned roles. + example: + - documents.read + - documents.write + - settings.view + expires: + type: integer + format: int64 + minimum: 0 + maximum: 4102444800000 # January 1, 2100 - reasonable future limit + description: | + Sets when this key automatically expires as a Unix timestamp in milliseconds. + Verification fails with code=EXPIRED immediately after this time passes. + Omitting this field creates a permanent key that never expires. + + Avoid setting timestamps in the past as they immediately invalidate the key. + Keys expire based on server time, not client time, which prevents timezone-related issues. + Essential for trial periods, temporary access, and security compliance requiring key rotation. + enabled: + type: boolean + default: true + description: | + Controls whether the key is active immediately upon creation. + When set to `false`, the key exists but all verification attempts fail with `code=DISABLED`. + Useful for pre-creating keys that will be activated later or for keys requiring manual approval. + Most keys should be created with `enabled=true` for immediate use. + example: true + credits: + "$ref": "../../../../common/KeyCreditsData.yaml" + description: | + Controls usage-based limits through credit consumption with optional automatic refills. + Unlike rate limits which control frequency, credits control total usage with global consistency. + Essential for implementing usage-based pricing, subscription tiers, and hard usage quotas. + Omitting this field creates unlimited usage, while setting null is not allowed during creation. + ratelimits: + type: array + maxItems: 50 # Reasonable limit for rate limit configurations per identity + items: + "$ref": "../../../../common/RatelimitRequest.yaml" + description: | + Defines time-based rate limits that protect against abuse by controlling request frequency. + Unlike credits which track total usage, rate limits reset automatically after each window expires. + Multiple rate limits can control different operation types with separate thresholds and windows. + Essential for preventing API abuse while maintaining good performance for legitimate usage. + example: + - name: requests + limit: 100 + duration: 60000 + autoApply: true + - name: heavy_operations + limit: 10 + duration: 3600000 + autoApply: false +additionalProperties: false +required: + - hash +examples: + basicKey: + summary: Basic key + description: Migrate this basic key + value: diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysMigration.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysMigration.yaml new file mode 100644 index 0000000000..6edf0c83ec --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysMigration.yaml @@ -0,0 +1,17 @@ +type: object +required: + - hash + - keyId +properties: + hash: + type: string + description: The hash provided in the migration request + example: sha256_abc123def456 + keyId: + type: string + description: The unique identifier for this key in Unkey's system. This + is NOT the actual API key, but a reference ID used for management operations + like updating or deleting the key. Store this ID in your database to reference + the key later. This ID is not sensitive and can be logged or displayed + in dashboards. + example: key_2cGKbMxRyIzhCxo1Idjz8q \ No newline at end of file diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysRequestBody.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysRequestBody.yaml new file mode 100644 index 0000000000..b6888a732e --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysRequestBody.yaml @@ -0,0 +1,32 @@ +type: object +properties: + migrationId: + type: string + minLength: 3 + maxLength: 255 + description: Identifier of the configured migration provider/strategy to use (e.g., "your_company"). + example: your_company + apiId: + type: string + minLength: 3 + maxLength: 255 + description: The ID of the API that the keys should be inserted into + example: api_123456789 + keys: + type: array + minItems: 1 + items: + $ref: "./V2KeysMigrateKeyData.yaml" +additionalProperties: false +required: + - migrationId + - apiId + - keys +examples: + basic: + summary: Basic migration example + description: A simple migration example with a single key + value: + migrationId: your_company + apiId: api_123456789 + keys: [] diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseBody.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseBody.yaml new file mode 100644 index 0000000000..8fa652ef28 --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseBody.yaml @@ -0,0 +1,9 @@ +type: object +required: + - meta + - data +properties: + meta: + "$ref": "../../../../common/Meta.yaml" + data: + "$ref": "./V2KeysMigrateKeysResponseData.yaml" diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseData.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseData.yaml new file mode 100644 index 0000000000..69cc1537de --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/V2KeysMigrateKeysResponseData.yaml @@ -0,0 +1,17 @@ +type: object +required: + - migrated + - failed +properties: + migrated: + type: array + description: Successfully migrated keys with their hash and generated keyId + items: + "$ref": "./V2KeysMigrateKeysMigration.yaml" + failed: + type: array + description: Hashes that could not be migrated (e.g., already exist in the system) + items: + type: string + description: The hash that failed to migrate + example: sha256_ghi789jkl012 diff --git a/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/index.yaml b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/index.yaml new file mode 100644 index 0000000000..315308acfe --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/keys/migrateKeys/index.yaml @@ -0,0 +1,59 @@ +post: + tags: + - keys + summary: Migrate API key(s) + description: | + Returns HTTP 200 even on partial success; hashes that could not be migrated are listed under `data.failed`. + Example usage provided in the docs and tests. + + **Required Permissions** + Your root key must have one of the following permissions for basic key information: + - `api.*.create_key` (to migrate keys to any API) + - `api..create_key` (to migrate keys to a specific API) + operationId: migrateKeys + x-speakeasy-name-override: migrateKeys + security: + - rootKey: [] + requestBody: + content: + application/json: + schema: + "$ref": "./V2KeysMigrateKeysRequestBody.yaml" + required: true + responses: + "200": + content: + application/json: + schema: + "$ref": "./V2KeysMigrateKeysResponseBody.yaml" + description: Successfully migrated keys. + "400": + description: Bad request + content: + application/json: + schema: + "$ref": "../../../../error/BadRequestErrorResponse.yaml" + "401": + description: Unauthorized + content: + application/json: + schema: + "$ref": "../../../../error/UnauthorizedErrorResponse.yaml" + "403": + description: Forbidden + content: + application/json: + schema: + "$ref": "../../../../error/ForbiddenErrorResponse.yaml" + "404": + description: Not found + content: + application/json: + schema: + "$ref": "../../../../error/NotFoundErrorResponse.yaml" + "500": + description: Internal server error + content: + application/json: + schema: + "$ref": "../../../../error/InternalServerErrorResponse.yaml" diff --git a/go/apps/api/openapi/spec/paths/v2/keys/verifyKey/V2KeysVerifyKeyRequestBody.yaml b/go/apps/api/openapi/spec/paths/v2/keys/verifyKey/V2KeysVerifyKeyRequestBody.yaml index 21c4ab6be8..8071a0b333 100644 --- a/go/apps/api/openapi/spec/paths/v2/keys/verifyKey/V2KeysVerifyKeyRequestBody.yaml +++ b/go/apps/api/openapi/spec/paths/v2/keys/verifyKey/V2KeysVerifyKeyRequestBody.yaml @@ -68,3 +68,8 @@ properties: Omitting this field skips rate limit checks entirely, relying only on configured key rate limits. Multiple rate limits can be checked simultaneously, each with different costs and temporary overrides. Rate limit checks are optimized for performance but may allow brief bursts during high concurrency. + migrationId: + type: string + maxLength: 256 + description: Migrate keys on demand from your previous system. Reach out for migration support at support@unkey.dev + example: "m_1234abcd" diff --git a/go/apps/api/routes/register.go b/go/apps/api/routes/register.go index 5e3ac1a17a..7143699d77 100644 --- a/go/apps/api/routes/register.go +++ b/go/apps/api/routes/register.go @@ -40,6 +40,7 @@ import ( v2KeysCreateKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_create_key" v2KeysDeleteKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_delete_key" v2KeysGetKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + v2KeysMigrateKeys "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" v2KeysRemovePermissions "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_remove_permissions" v2KeysRemoveRoles "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_remove_roles" v2KeysRerollKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_reroll_key" @@ -376,6 +377,18 @@ func Register(srv *zen.Server, svc *Services) { }, ) + // v2/keys.migrateKeys + srv.RegisterRoute( + defaultMiddlewares, + &v2KeysMigrateKeys.Handler{ + Logger: svc.Logger, + ApiCache: svc.Caches.LiveApiByID, + DB: svc.Database, + Auditlogs: svc.Auditlogs, + Keys: svc.Keys, + }, + ) + // v2/keys.createKey srv.RegisterRoute( defaultMiddlewares, @@ -385,6 +398,7 @@ func Register(srv *zen.Server, svc *Services) { Keys: svc.Keys, Auditlogs: svc.Auditlogs, Vault: svc.Vault, + ApiCache: svc.Caches.LiveApiByID, }, ) diff --git a/go/apps/api/routes/v2_keys_create_key/200_test.go b/go/apps/api/routes/v2_keys_create_key/200_test.go index 018e4eedf8..eaaf699eae 100644 --- a/go/apps/api/routes/v2_keys_create_key/200_test.go +++ b/go/apps/api/routes/v2_keys_create_key/200_test.go @@ -26,6 +26,7 @@ func TestCreateKeySuccess(t *testing.T) { Keys: h.Keys, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) @@ -77,6 +78,7 @@ func TestCreateKeyWithOptionalFields(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) @@ -140,6 +142,7 @@ func TestCreateKeyWithEncryption(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) diff --git a/go/apps/api/routes/v2_keys_create_key/400_test.go b/go/apps/api/routes/v2_keys_create_key/400_test.go index 98fc8e791c..eb222eb1f9 100644 --- a/go/apps/api/routes/v2_keys_create_key/400_test.go +++ b/go/apps/api/routes/v2_keys_create_key/400_test.go @@ -24,6 +24,7 @@ func TestCreateKeyBadRequest(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) diff --git a/go/apps/api/routes/v2_keys_create_key/401_test.go b/go/apps/api/routes/v2_keys_create_key/401_test.go index 8ccd2b9846..8d5b0d7434 100644 --- a/go/apps/api/routes/v2_keys_create_key/401_test.go +++ b/go/apps/api/routes/v2_keys_create_key/401_test.go @@ -22,6 +22,7 @@ func TestCreateKeyUnauthorized(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) diff --git a/go/apps/api/routes/v2_keys_create_key/403_test.go b/go/apps/api/routes/v2_keys_create_key/403_test.go index 08a20a4cfc..a87fbf0774 100644 --- a/go/apps/api/routes/v2_keys_create_key/403_test.go +++ b/go/apps/api/routes/v2_keys_create_key/403_test.go @@ -28,6 +28,7 @@ func TestCreateKeyForbidden(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) diff --git a/go/apps/api/routes/v2_keys_create_key/404_test.go b/go/apps/api/routes/v2_keys_create_key/404_test.go index bcbeba7692..b9a2d5688c 100644 --- a/go/apps/api/routes/v2_keys_create_key/404_test.go +++ b/go/apps/api/routes/v2_keys_create_key/404_test.go @@ -22,6 +22,7 @@ func TestCreateKeyNotFound(t *testing.T) { Logger: h.Logger, Auditlogs: h.Auditlogs, Vault: h.Vault, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) @@ -43,7 +44,7 @@ func TestCreateKeyNotFound(t *testing.T) { res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, 404, res.Status) require.NotNil(t, res.Body) - require.Contains(t, res.Body.Error.Detail, "The specified API was not found") + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") }) t.Run("api with valid format but invalid id", func(t *testing.T) { @@ -56,7 +57,7 @@ func TestCreateKeyNotFound(t *testing.T) { res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, 404, res.Status) require.NotNil(t, res.Body) - require.Contains(t, res.Body.Error.Detail, "The specified API was not found") + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") }) t.Run("api from different workspace", func(t *testing.T) { @@ -84,7 +85,7 @@ func TestCreateKeyNotFound(t *testing.T) { res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, otherHeaders, req) require.Equal(t, 404, res.Status) require.NotNil(t, res.Body) - require.Contains(t, res.Body.Error.Detail, "The specified API was not found") + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") }) t.Run("api with minimum valid length but nonexistent", func(t *testing.T) { @@ -97,7 +98,7 @@ func TestCreateKeyNotFound(t *testing.T) { res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, 404, res.Status) require.NotNil(t, res.Body) - require.Contains(t, res.Body.Error.Detail, "The specified API was not found") + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") }) t.Run("deleted api", func(t *testing.T) { @@ -111,7 +112,7 @@ func TestCreateKeyNotFound(t *testing.T) { res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, 404, res.Status) require.NotNil(t, res.Body) - require.Contains(t, res.Body.Error.Detail, "The specified API was not found") + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") }) } diff --git a/go/apps/api/routes/v2_keys_create_key/412_test.go b/go/apps/api/routes/v2_keys_create_key/412_test.go index a0266f8dcd..aeae3cbd93 100644 --- a/go/apps/api/routes/v2_keys_create_key/412_test.go +++ b/go/apps/api/routes/v2_keys_create_key/412_test.go @@ -27,6 +27,7 @@ func TestPreconditionError(t *testing.T) { Keys: h.Keys, Vault: h.Vault, Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, } h.Register(route) diff --git a/go/apps/api/routes/v2_keys_create_key/handler.go b/go/apps/api/routes/v2_keys_create_key/handler.go index 904bb06a14..b1425a9efb 100644 --- a/go/apps/api/routes/v2_keys_create_key/handler.go +++ b/go/apps/api/routes/v2_keys_create_key/handler.go @@ -12,9 +12,11 @@ import ( "github.com/unkeyed/unkey/go/apps/api/openapi" "github.com/unkeyed/unkey/go/internal/services/auditlogs" + "github.com/unkeyed/unkey/go/internal/services/caches" "github.com/unkeyed/unkey/go/internal/services/keys" "github.com/unkeyed/unkey/go/pkg/auditlog" + "github.com/unkeyed/unkey/go/pkg/cache" "github.com/unkeyed/unkey/go/pkg/codes" "github.com/unkeyed/unkey/go/pkg/db" dbtype "github.com/unkeyed/unkey/go/pkg/db/types" @@ -38,6 +40,7 @@ type Handler struct { Keys keys.KeyService Auditlogs auditlogs.AuditLogService Vault *vault.Service + ApiCache cache.Cache[string, db.FindLiveApiByIDRow] } // Method returns the HTTP method this route responds to @@ -54,20 +57,17 @@ func (h *Handler) Path() string { func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { h.Logger.Debug("handling request", "requestId", s.RequestID(), "path", "/v2/keys.createKey") - // 1. Authentication auth, emit, err := h.Keys.GetRootKey(ctx, s) defer emit() if err != nil { return err } - // 2. Request validation req, err := zen.BindBody[Request](s) if err != nil { return err } - // 3. Permission check err = auth.VerifyRootKey(ctx, keys.WithPermissions(rbac.Or( rbac.T(rbac.Tuple{ ResourceType: rbac.Api, @@ -84,40 +84,31 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { return err } - api, err := db.Query.FindApiByID(ctx, h.DB.RO(), req.ApiId) + api, hit, err := h.ApiCache.SWR(ctx, req.ApiId, func(ctx context.Context) (db.FindLiveApiByIDRow, error) { + return db.Query.FindLiveApiByID(ctx, h.DB.RO(), req.ApiId) + }, caches.DefaultFindFirstOp) if err != nil { if db.IsNotFound(err) { - return fault.New("api not found", + return fault.Wrap( + err, fault.Code(codes.Data.Api.NotFound.URN()), - fault.Internal("api not found"), fault.Public("The specified API was not found."), + fault.Internal("api does not exist"), + fault.Public("The requested API does not exist or has been deleted."), ) } return fault.Wrap(err, fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database error"), fault.Public("Failed to retrieve API."), + fault.Internal("database error"), + fault.Public("Failed to retrieve API information."), ) } - if api.WorkspaceID != auth.AuthorizedWorkspaceID { + if hit == cache.Null { return fault.New("api not found", fault.Code(codes.Data.Api.NotFound.URN()), - fault.Internal("api belongs to different workspace"), fault.Public("The specified API was not found."), - ) - } - - keyAuth, err := db.Query.FindKeyringByID(ctx, h.DB.RO(), api.KeyAuthID.String) - if err != nil { - if db.IsNotFound(err) { - return fault.New("api not set up for keys", - fault.Code(codes.App.Precondition.PreconditionFailed.URN()), - fault.Internal("api not set up for keys, keyauth not found"), fault.Public("The requested API is not set up to handle keys."), - ) - } - - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database error"), fault.Public("Failed to retrieve API information."), + fault.Internal("api not found"), + fault.Public("The requested API does not exist or has been deleted."), ) } @@ -157,7 +148,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { return err } - if !keyAuth.StoreEncryptedKeys { + if !api.KeyAuth.StoreEncryptedKeys { return fault.New("api not set up for key encryption", fault.Code(codes.App.Precondition.PreconditionFailed.URN()), fault.Internal("api not set up for key encryption"), fault.Public("This API does not support key encryption."), @@ -239,6 +230,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Public("Failed to create identity."), ) } + insertKeyParams.IdentityID = sql.NullString{Valid: true, String: identityID} } else { // Use existing identity @@ -326,9 +318,8 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if req.Ratelimits != nil && len(*req.Ratelimits) > 0 { ratelimitsToInsert := make([]db.InsertKeyRatelimitParams, len(*req.Ratelimits)) for i, ratelimit := range *req.Ratelimits { - ratelimitID := uid.New(uid.RatelimitPrefix) ratelimitsToInsert[i] = db.InsertKeyRatelimitParams{ - ID: ratelimitID, + ID: uid.New(uid.RatelimitPrefix), WorkspaceID: auth.AuthorizedWorkspaceID, KeyID: sql.NullString{String: keyID, Valid: true}, Name: ratelimit.Name, diff --git a/go/apps/api/routes/v2_keys_migrate_keys/200_test.go b/go/apps/api/routes/v2_keys_migrate_keys/200_test.go new file mode 100644 index 0000000000..edb3481939 --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/200_test.go @@ -0,0 +1,231 @@ +package handler_test + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/oapi-codegen/nullable" + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/prefixedapikey" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" +) + +func TestMigrateKeysSuccess(t *testing.T) { + t.Parallel() + + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + h.Register(route) + + // Create API using testutil helper + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + }) + + err := db.Query.InsertKeyMigration(ctx, h.DB.RW(), db.InsertKeyMigrationParams{ + ID: "unkeyed", + WorkspaceID: h.Resources().UserWorkspace.ID, + Algorithm: db.KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey, + }) + require.NoError(t, err) + + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + generatedKey, err := prefixedapikey.GenerateAPIKey(&prefixedapikey.GenerateAPIKeyOptions{ + KeyPrefix: "unkeyed", + }) + require.NoError(t, err) + + keyToMigrate := openapi.V2KeysMigrateKeyData{ + Hash: generatedKey.LongTokenHash, + Credits: &openapi.KeyCreditsData{ + Remaining: nullable.Nullable[int64]{}, + }, + Enabled: ptr.P(false), + Expires: nil, + ExternalId: ptr.P("ext_123"), + Meta: ptr.P(map[string]interface{}{ + "key": "value", + }), + Name: ptr.P("Migration-Key"), + Permissions: ptr.P([]string{"test"}), + Ratelimits: &[]openapi.RatelimitRequest{ + { + AutoApply: true, + Duration: time.Hour.Milliseconds(), + Limit: 100, + Name: "default", + }, + }, + Roles: ptr.P([]string{"admin"}), + } + + t.Run("basic migration", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + MigrationId: "unkeyed", + Keys: []openapi.V2KeysMigrateKeyData{keyToMigrate}, + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Empty(t, res.Body.Data.Failed) + require.NotEmpty(t, res.Body.Data.Migrated) + require.Equal(t, res.Body.Data.Migrated[0].Hash, generatedKey.LongTokenHash) + + // Verify key was created in database + key, err := db.Query.FindLiveKeyByID(ctx, h.DB.RO(), res.Body.Data.Migrated[0].KeyId) + require.NoError(t, err) + + keydata := db.ToKeyData(key) + + require.Equal(t, res.Body.Data.Migrated[0].KeyId, key.ID) + require.Equal(t, generatedKey.LongTokenHash, key.Hash) + require.Empty(t, keydata.Key.Start) + require.False(t, keydata.Key.Enabled) + require.NotNil(t, keydata.Identity) + require.NotEmpty(t, keydata.Identity.ID) + require.NotEmpty(t, keydata.Key.Name.String) + require.NotEmpty(t, keydata.Key.Meta.String) + require.Len(t, keydata.Permissions, 1) + require.Len(t, keydata.Roles, 1) + require.Len(t, keydata.RolePermissions, 0) + require.Len(t, keydata.Ratelimits, 1) + }) + + t.Run("Finds the correct id's and doesnt double insert", func(t *testing.T) { + // Generate a new key hash for this test + otherGeneratedKey, err := prefixedapikey.GenerateAPIKey(&prefixedapikey.GenerateAPIKeyOptions{ + KeyPrefix: "unkeyed", + }) + require.NoError(t, err) + + // Create a new key with the same identity, permissions, and roles + keyToMigrate2 := openapi.V2KeysMigrateKeyData{ + Hash: otherGeneratedKey.LongTokenHash, + Credits: keyToMigrate.Credits, + Enabled: keyToMigrate.Enabled, + Expires: keyToMigrate.Expires, + ExternalId: keyToMigrate.ExternalId, // Same external ID + Meta: keyToMigrate.Meta, + Name: ptr.P("Migration-Key-2"), + Permissions: keyToMigrate.Permissions, // Same permissions + Ratelimits: keyToMigrate.Ratelimits, + Roles: keyToMigrate.Roles, // Same roles + } + + req := handler.Request{ + ApiId: api.ID, + MigrationId: "unkeyed", + Keys: []openapi.V2KeysMigrateKeyData{keyToMigrate2}, + } + + // First, verify the identity, permission, and role exist from the first migration + identity, err := db.Query.FindIdentitiesByExternalId(ctx, h.DB.RO(), db.FindIdentitiesByExternalIdParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + ExternalIds: []string{"ext_123"}, + }) + require.NoError(t, err, "Identity should exist from first migration") + require.Len(t, identity, 1, "Identity should exist from first migration") + + permissions, err := db.Query.FindPermissionsBySlugs(ctx, h.DB.RO(), db.FindPermissionsBySlugsParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + Slugs: []string{"test"}, + }) + require.NoError(t, err) + require.Len(t, permissions, 1, "Permission should exist from first migration") + + roles, err := db.Query.FindRolesByNames(ctx, h.DB.RO(), db.FindRolesByNamesParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + Names: []string{"admin"}, + }) + require.NoError(t, err) + require.Len(t, roles, 1, "Role should exist from first migration") + + // Perform the second migration + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Empty(t, res.Body.Data.Failed) + require.NotEmpty(t, res.Body.Data.Migrated) + require.Equal(t, res.Body.Data.Migrated[0].Hash, otherGeneratedKey.LongTokenHash) + + // Verify the new key was created with the same identity + key, err := db.Query.FindLiveKeyByID(ctx, h.DB.RO(), res.Body.Data.Migrated[0].KeyId) + require.NoError(t, err) + keydata := db.ToKeyData(key) + + require.NotNil(t, keydata.Identity) + require.Equal(t, identity[0].ID, keydata.Identity.ID, "Should reuse existing identity") + require.Equal(t, "ext_123", keydata.Identity.ExternalID) + + // Verify no duplicate identities were created + identities, err := db.Query.FindIdentitiesByExternalId(ctx, h.DB.RO(), db.FindIdentitiesByExternalIdParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + ExternalIds: []string{"ext_123"}, + Deleted: false, + }) + require.NoError(t, err) + require.Len(t, identities, 1, "Should not create duplicate identities") + + // Verify no duplicate permissions were created + allPermissions, err := db.Query.FindPermissionsBySlugs(ctx, h.DB.RO(), db.FindPermissionsBySlugsParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + Slugs: []string{"test"}, + }) + require.NoError(t, err) + require.Len(t, allPermissions, 1, "Should not create duplicate permissions") + + // Verify no duplicate roles were created + allRoles, err := db.Query.FindRolesByNames(ctx, h.DB.RO(), db.FindRolesByNamesParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + Names: []string{"admin"}, + }) + require.NoError(t, err) + require.Len(t, allRoles, 1, "Should not create duplicate roles") + + // Verify the key has the correct permission and role associations + require.Len(t, keydata.Permissions, 1, "Key should have one permission") + require.Equal(t, permissions[0].ID, keydata.Permissions[0].ID) + require.Len(t, keydata.Roles, 1, "Key should have one role") + require.Equal(t, roles[0].ID, keydata.Roles[0].ID) + }) + + t.Run("Fail duplicate hashes", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + MigrationId: "unkeyed", + Keys: []openapi.V2KeysMigrateKeyData{keyToMigrate}, + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotEmpty(t, res.Body.Data.Failed) + require.Contains(t, res.Body.Data.Failed, keyToMigrate.Hash, "Hash has to be in failed array") + require.Empty(t, res.Body.Data.Migrated) + }) +} diff --git a/go/apps/api/routes/v2_keys_migrate_keys/400_test.go b/go/apps/api/routes/v2_keys_migrate_keys/400_test.go new file mode 100644 index 0000000000..d6e21db54a --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/400_test.go @@ -0,0 +1,188 @@ +package handler_test + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestMigrateKeysBadRequest(t *testing.T) { + h := testutil.NewHarness(t) + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + h.Register(route) + + // Create API using testutil helper + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + }) + + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("missing everything", func(t *testing.T) { + req := handler.Request{ + // Missing everything + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("empty apiId", func(t *testing.T) { + req := handler.Request{ + ApiId: "", + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("empty migrationId", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + MigrationId: "", + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("no keys", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + MigrationId: uid.New(""), + Keys: []openapi.V2KeysMigrateKeyData{}, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("empty hash", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + MigrationId: uid.New(""), + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: "", + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("negative expires timestamp", func(t *testing.T) { + invalidExpires := int64(-1) + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New("prefix Prefix"), + Expires: &invalidExpires, + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("empty permission in list", func(t *testing.T) { + emptyPermissions := []string{""} + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New("prefix Prefix"), + Permissions: &emptyPermissions, + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("empty role in list", func(t *testing.T) { + emptyRoles := []string{""} + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New("prefix Prefix"), + Roles: &emptyRoles, + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("permission too long", func(t *testing.T) { + // Create a permission string that's longer than 512 characters + longPermission := strings.Repeat("a", 513) + longPermissions := []string{longPermission} + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New("prefix Prefix"), + Permissions: &longPermissions, + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("role too long", func(t *testing.T) { + // Create a role string that's longer than 512 characters + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New("prefix Prefix"), + Roles: ptr.P([]string{strings.Repeat("a", 513)}), + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + }) +} diff --git a/go/apps/api/routes/v2_keys_migrate_keys/401_test.go b/go/apps/api/routes/v2_keys_migrate_keys/401_test.go new file mode 100644 index 0000000000..b71a96b8c3 --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/401_test.go @@ -0,0 +1,87 @@ +package handler_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestMigrateKeysUnauthorized(t *testing.T) { + h := testutil.NewHarness(t) + ctx := t.Context() + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + h.Register(route) + + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + }) + + migrationID := uid.New("") + err := db.Query.InsertKeyMigration(ctx, h.DB.RW(), db.InsertKeyMigrationParams{ + ID: migrationID, + WorkspaceID: h.Resources().UserWorkspace.ID, + Algorithm: db.KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey, + }) + require.NoError(t, err) + + // Basic request body + req := handler.Request{ + ApiId: api.ID, + MigrationId: migrationID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + } + + t.Run("invalid bearer token", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {"Bearer invalid_key_12345"}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 401, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("nonexistent key", func(t *testing.T) { + nonexistentKey := uid.New(uid.KeyPrefix) + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", nonexistentKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 401, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("bearer with extra spaces", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {"Bearer invalid_key_with_spaces "}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 401, res.Status) + require.NotNil(t, res.Body) + }) +} diff --git a/go/apps/api/routes/v2_keys_migrate_keys/403_test.go b/go/apps/api/routes/v2_keys_migrate_keys/403_test.go new file mode 100644 index 0000000000..8f223cf68a --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/403_test.go @@ -0,0 +1,177 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestMigrateKeysForbidden(t *testing.T) { + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + h.Register(route) + + // Create API for testing + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create another API for cross-API testing + otherKeyAuthID := uid.New(uid.KeyAuthPrefix) + err = db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: otherKeyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + otherApiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: otherApiID, + Name: "other-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: otherKeyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + req := handler.Request{ + ApiId: apiID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + MigrationId: uid.New(""), + } + + t.Run("no permissions", func(t *testing.T) { + // Create root key with no permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID) + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("wrong permission - has read but not create", func(t *testing.T) { + // Create root key with read permission instead of create + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("permission for different API", func(t *testing.T) { + // Create root key with create permission for other API + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, fmt.Sprintf("api.%s.create_key", otherApiID)) + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("permission for specific API but requesting different API", func(t *testing.T) { + // Create root key with create permission for specific API + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, fmt.Sprintf("api.%s.create_key", otherApiID)) + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + // Try to create key for different API + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("unrelated permission", func(t *testing.T) { + // Create root key with completely unrelated permission + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "workspace.read") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("partial permission match", func(t *testing.T) { + // Create root key with permission that partially matches but isn't sufficient + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.create") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) +} diff --git a/go/apps/api/routes/v2_keys_migrate_keys/404_test.go b/go/apps/api/routes/v2_keys_migrate_keys/404_test.go new file mode 100644 index 0000000000..f4a07adb81 --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/404_test.go @@ -0,0 +1,187 @@ +package handler_test + +import ( + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestMigrateKeysNotFound(t *testing.T) { + h := testutil.NewHarness(t) + ctx := t.Context() + + route := &handler.Handler{ + DB: h.DB, + Logger: h.Logger, + Keys: h.Keys, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + h.Register(route) + + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + IpWhitelist: "", + EncryptedKeys: false, + Name: nil, + CreatedAt: nil, + DefaultPrefix: nil, + DefaultBytes: nil, + }) + + t.Run("nonexistent api", func(t *testing.T) { + // Use a valid API ID format but one that doesn't exist + nonexistentApiID := uid.New(uid.APIPrefix) + req := handler.Request{ + ApiId: nonexistentApiID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + MigrationId: uid.New(""), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") + }) + + t.Run("api with valid format but invalid id", func(t *testing.T) { + // Create a syntactically valid but non-existent API ID + fakeApiID := "api_1234567890abcdef" + req := handler.Request{ + ApiId: fakeApiID, + MigrationId: "unkeyed", + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") + }) + + t.Run("api from different workspace", func(t *testing.T) { + // Create a different workspace to test cross-workspace isolation + otherWorkspace := h.CreateWorkspace() + + // Create root key for the other workspace with proper permissions + otherRootKey := h.CreateRootKey(otherWorkspace.ID, "api.*.create_key") + + otherApi := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + IpWhitelist: "", + EncryptedKeys: false, + Name: nil, + CreatedAt: nil, + DefaultPrefix: nil, + DefaultBytes: nil, + }) + + req := handler.Request{ + ApiId: otherApi.ID, + MigrationId: "unkeyed", + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + } + + otherHeaders := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", otherRootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, otherHeaders, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") + }) + + t.Run("api with minimum valid length but nonexistent", func(t *testing.T) { + // Test with minimum valid API ID length (3 chars as per validation) + minimalApiID := "api" + req := handler.Request{ + ApiId: minimalApiID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: uid.New(""), + }, + }, + MigrationId: uid.New(""), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") + }) + + t.Run("deleted api", func(t *testing.T) { + deletedApi := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: h.Resources().UserWorkspace.ID, + IpWhitelist: "", + EncryptedKeys: false, + Name: nil, + CreatedAt: nil, + DefaultPrefix: nil, + DefaultBytes: nil, + }) + + db.Query.SoftDeleteApi(ctx, h.DB.RW(), db.SoftDeleteApiParams{ + Now: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + ApiID: deletedApi.ID, + }) + + req := handler.Request{ + ApiId: deletedApi.ID, + Keys: []openapi.V2KeysMigrateKeyData{{ + Hash: uid.New(""), + }}, + MigrationId: uid.New(""), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested API does not exist or has been deleted.") + }) + + t.Run("migration doesn't exist", func(t *testing.T) { + req := handler.Request{ + ApiId: api.ID, + Keys: []openapi.V2KeysMigrateKeyData{{Hash: uid.New("")}}, + MigrationId: uid.New("some_migration_id"), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "The requested Migration does not exist or has been deleted.") + }) +} diff --git a/go/apps/api/routes/v2_keys_migrate_keys/handler.go b/go/apps/api/routes/v2_keys_migrate_keys/handler.go new file mode 100644 index 0000000000..fd5cad4d9e --- /dev/null +++ b/go/apps/api/routes/v2_keys_migrate_keys/handler.go @@ -0,0 +1,659 @@ +package handler + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/unkeyed/unkey/go/apps/api/openapi" + "github.com/unkeyed/unkey/go/internal/services/auditlogs" + "github.com/unkeyed/unkey/go/internal/services/caches" + "github.com/unkeyed/unkey/go/internal/services/keys" + + "github.com/unkeyed/unkey/go/pkg/auditlog" + "github.com/unkeyed/unkey/go/pkg/cache" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/db" + dbtype "github.com/unkeyed/unkey/go/pkg/db/types" + "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/rbac" + "github.com/unkeyed/unkey/go/pkg/uid" + "github.com/unkeyed/unkey/go/pkg/zen" +) + +type ( + Request = openapi.V2KeysMigrateKeysRequestBody + Response = openapi.V2KeysMigrateKeysResponseBody +) + +const ( + ChunkSize = 1_000 +) + +type Handler struct { + Logger logging.Logger + DB db.Database + Keys keys.KeyService + Auditlogs auditlogs.AuditLogService + ApiCache cache.Cache[string, db.FindLiveApiByIDRow] +} + +// Method returns the HTTP method this route responds to +func (h *Handler) Method() string { + return "POST" +} + +// Path returns the URL path pattern this route matches +func (h *Handler) Path() string { + return "/v2/keys.migrateKeys" +} + +// Handle processes the HTTP request +func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { + h.Logger.Debug("handling request", "requestId", s.RequestID(), "path", "/v2/keys.migrateKeys") + + auth, emit, err := h.Keys.GetRootKey(ctx, s) + defer emit() + if err != nil { + return err + } + + req, err := zen.BindBody[Request](s) + if err != nil { + return err + } + + err = auth.VerifyRootKey(ctx, keys.WithPermissions(rbac.Or( + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: req.ApiId, + Action: rbac.CreateKey, + }), + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.CreateKey, + }), + ))) + if err != nil { + return err + } + + api, hit, err := h.ApiCache.SWR(ctx, req.ApiId, func(ctx context.Context) (db.FindLiveApiByIDRow, error) { + return db.Query.FindLiveApiByID(ctx, h.DB.RO(), req.ApiId) + }, caches.DefaultFindFirstOp) + if err != nil { + if db.IsNotFound(err) { + return fault.Wrap( + err, + fault.Code(codes.Data.Api.NotFound.URN()), + fault.Internal("api does not exist"), + fault.Public("The requested API does not exist or has been deleted."), + ) + } + + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to retrieve API information."), + ) + } + + if hit == cache.Null { + return fault.New("api not found", + fault.Code(codes.Data.Api.NotFound.URN()), + fault.Internal("api not found"), + fault.Public("The requested API does not exist or has been deleted."), + ) + } + + // Check if API belongs to the authorized workspace + if api.WorkspaceID != auth.AuthorizedWorkspaceID { + return fault.New("wrong workspace", + fault.Code(codes.Data.Api.NotFound.URN()), + fault.Internal("wrong workspace, masking as 404"), + fault.Public("The requested API does not exist or has been deleted."), + ) + } + + migration, err := db.Query.FindKeyMigrationByID(ctx, h.DB.RO(), db.FindKeyMigrationByIDParams{ID: req.MigrationId, WorkspaceID: auth.AuthorizedWorkspaceID}) + if err != nil { + if db.IsNotFound(err) { + return fault.Wrap( + err, + fault.Code(codes.Data.Migration.NotFound.URN()), + fault.Internal("migration does not exist"), + fault.Public("The requested Migration does not exist or has been deleted."), + ) + } + + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to retrieve migration information."), + ) + } + + now := time.Now().UnixMilli() + + var hashes []string + var identitiesToFind []string + var permissionsToFind []string + var rolesToFind []string + + var keysArray []db.InsertKeyParams + var ratelimitsToInsert []db.InsertKeyRatelimitParams + var identitiesToInsert []db.InsertIdentityParams + var keyRolesToInsert []db.InsertKeyRoleParams + var keyPermissionsToInsert []db.InsertKeyPermissionParams + var rolesToInsert []db.InsertRoleParams + var permissionsToInsert []db.InsertPermissionParams + + var keysToInsert = make(map[string]db.InsertKeyParams) + var externalIdToIdentityId = make(map[string]*string) + var permissionSlugToPermissionId = make(map[string]*string) + var roleNameToRoleId = make(map[string]*string) + + var auditLogs []auditlog.AuditLog + var failedHashes = make([]string, 0) + + for _, key := range req.Keys { + hashes = append(hashes, key.Hash) + name := ptr.SafeDeref(key.Name) + + newKey := db.InsertKeyParams{ + ID: uid.New(uid.KeyPrefix), + Hash: key.Hash, + KeyringID: api.KeyAuth.ID, + Start: "", // Unknown at this point + WorkspaceID: auth.AuthorizedWorkspaceID, + Name: sql.NullString{Valid: name != "", String: name}, + Meta: sql.NullString{Valid: false, String: ""}, + PendingMigrationID: sql.NullString{Valid: true, String: migration.ID}, + ForWorkspaceID: sql.NullString{Valid: false, String: ""}, + IdentityID: sql.NullString{Valid: false, String: ""}, + Expires: sql.NullTime{Valid: false, Time: time.Time{}}, + CreatedAtM: now, + Enabled: ptr.SafeDeref(key.Enabled, true), + RemainingRequests: sql.NullInt32{Valid: false, Int32: 0}, + RefillDay: sql.NullInt16{Valid: false, Int16: 0}, + RefillAmount: sql.NullInt32{Valid: false, Int32: 0}, + } // nolint:exhaustruct + + if key.Meta != nil { + metaBytes, marshalErr := json.Marshal(*key.Meta) + if marshalErr != nil { + return fault.Wrap(marshalErr, + fault.Code(codes.App.Validation.InvalidInput.URN()), + fault.Internal("failed to marshal meta"), fault.Public("Invalid metadata format."), + ) + } + + newKey.Meta = sql.NullString{String: string(metaBytes), Valid: true} + } + + if key.Expires != nil { + newKey.Expires = sql.NullTime{Time: time.UnixMilli(*key.Expires), Valid: true} + } + + if key.Credits != nil { + if key.Credits.Remaining.IsSpecified() { + newKey.RemainingRequests = sql.NullInt32{ + Int32: int32(key.Credits.Remaining.MustGet()), // nolint:gosec + Valid: true, + } + } + + if key.Credits.Refill != nil { + newKey.RefillAmount = sql.NullInt32{ + Int32: int32(key.Credits.Refill.Amount), // nolint:gosec + Valid: true, + } + + if key.Credits.Refill.Interval == openapi.Monthly { + if key.Credits.Refill.RefillDay == nil { + return fault.New("missing refillDay", + fault.Code(codes.App.Validation.InvalidInput.URN()), + fault.Internal("refillDay required for monthly interval"), + fault.Public("`refillDay` must be provided when the refill interval is `monthly`."), + ) + } + + newKey.RefillDay = sql.NullInt16{ + Int16: int16(*key.Credits.Refill.RefillDay), // nolint:gosec + Valid: true, + } + } + } + } + + if key.ExternalId != nil { + identitiesToFind = append(identitiesToFind, *key.ExternalId) + + externalIdToIdentityId[*key.ExternalId] = nil + } + + if key.Permissions != nil { + permissionsToFind = append(permissionsToFind, *key.Permissions...) + + for _, permission := range *key.Permissions { + permissionSlugToPermissionId[permission] = nil + } + } + + if key.Roles != nil { + rolesToFind = append(rolesToFind, *key.Roles...) + + for _, role := range *key.Roles { + roleNameToRoleId[role] = nil + } + } + + // Any other data of the key will be set later down the line. + keysToInsert[key.Hash] = newKey + } + + hashes = deduplicate(hashes) + identitiesToFind = deduplicate(identitiesToFind) + permissionsToFind = deduplicate(permissionsToFind) + rolesToFind = deduplicate(rolesToFind) + + err = db.Tx(ctx, h.DB.RW(), func(ctx context.Context, tx db.DBTX) error { + usedHashes, err := db.Query.FindKeysByHash(ctx, h.DB.RO(), hashes) + if err != nil && !db.IsNotFound(err) { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to check for duplicate keys."), + ) + } + + // Respond with that in the response, so the customer knows which keys were already in use. + // and can contact us maybe we can get rid of them. + for _, hash := range usedHashes { + delete(keysToInsert, hash.Hash) + failedHashes = append(failedHashes, hash.Hash) + } + + if len(identitiesToFind) > 0 { + identities, err := db.Query.FindIdentitiesByExternalId(ctx, tx, db.FindIdentitiesByExternalIdParams{ + WorkspaceID: auth.AuthorizedWorkspaceID, + ExternalIds: identitiesToFind, + Deleted: false, + }) + if err != nil && !db.IsNotFound(err) { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to check for duplicate identities."), + ) + } + + for _, identity := range identities { + externalIdToIdentityId[identity.ExternalID] = &identity.ID + } + } + + if len(permissionsToFind) > 0 { + permissions, err := db.Query.FindPermissionsBySlugs(ctx, tx, db.FindPermissionsBySlugsParams{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Slugs: permissionsToFind, + }) + if err != nil && !db.IsNotFound(err) { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to check for duplicate permissions."), + ) + } + + for _, permission := range permissions { + permissionSlugToPermissionId[permission.Slug] = &permission.ID + } + } + + if len(rolesToFind) > 0 { + roles, err := db.Query.FindRolesByNames(ctx, tx, db.FindRolesByNamesParams{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Names: rolesToFind, + }) + if err != nil && !db.IsNotFound(err) { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to check for duplicate roles."), + ) + } + + for _, role := range roles { + roleNameToRoleId[role.Name] = &role.ID + } + } + + // We found the stuff that we could find, now we can just upsert everything that doesn't exist. + for externalId, identityId := range externalIdToIdentityId { + if identityId != nil { + continue + } + + id := uid.New(uid.IdentityPrefix) + identitiesToInsert = append(identitiesToInsert, db.InsertIdentityParams{ + ID: id, + ExternalID: externalId, + WorkspaceID: auth.AuthorizedWorkspaceID, + Environment: "default", + CreatedAt: now, + Meta: []byte("{}"), + }) + + externalIdToIdentityId[externalId] = &id + } + + for slug, permissionId := range permissionSlugToPermissionId { + if permissionId != nil { + continue + } + + id := uid.New(uid.PermissionPrefix) + permissionsToInsert = append(permissionsToInsert, db.InsertPermissionParams{ + PermissionID: id, + WorkspaceID: auth.AuthorizedWorkspaceID, + Name: slug, + Slug: slug, + Description: dbtype.NullString{Valid: false, String: ""}, + CreatedAtM: now, + }) + + permissionSlugToPermissionId[slug] = &id + } + + for name, roleId := range roleNameToRoleId { + if roleId != nil { + continue + } + + id := uid.New(uid.RolePrefix) + rolesToInsert = append(rolesToInsert, db.InsertRoleParams{ + RoleID: id, + WorkspaceID: auth.AuthorizedWorkspaceID, + Name: name, + Description: sql.NullString{Valid: false, String: ""}, + CreatedAt: now, + }) + + roleNameToRoleId[name] = &id + } + + // Now the fun begins. + for _, key := range req.Keys { + keyParams, ok := keysToInsert[key.Hash] + if !ok { + continue + } + + if key.Ratelimits != nil { + for _, ratelimit := range *key.Ratelimits { + ratelimitsToInsert = append(ratelimitsToInsert, db.InsertKeyRatelimitParams{ + ID: uid.New(uid.RatelimitPrefix), + WorkspaceID: auth.AuthorizedWorkspaceID, + KeyID: sql.NullString{String: keyParams.ID, Valid: true}, + Name: ratelimit.Name, + Limit: int32(ratelimit.Limit), // nolint:gosec + Duration: ratelimit.Duration, + CreatedAt: now, + AutoApply: ratelimit.AutoApply, + }) + } + } + + if key.ExternalId != nil { + identityID, ok := externalIdToIdentityId[*key.ExternalId] + if ok { + keyParams.IdentityID = sql.NullString{Valid: true, String: *identityID} + } + } + + if key.Permissions != nil { + for _, permission := range *key.Permissions { + permissionID, ok := permissionSlugToPermissionId[permission] + if !ok { + continue + } + + keyPermissionsToInsert = append(keyPermissionsToInsert, db.InsertKeyPermissionParams{ + KeyID: keyParams.ID, + PermissionID: *permissionID, + WorkspaceID: auth.AuthorizedWorkspaceID, + CreatedAt: now, + }) + + auditLogs = append(auditLogs, auditlog.AuditLog{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Event: auditlog.AuthConnectPermissionKeyEvent, + ActorType: auditlog.RootKeyActor, + ActorID: auth.Key.ID, + ActorName: "root key", + ActorMeta: map[string]any{}, + Display: fmt.Sprintf("Added permission %s to key %s", permission, keyParams.ID), + RemoteIP: s.Location(), + UserAgent: s.UserAgent(), + Resources: []auditlog.AuditLogResource{ + { + Type: auditlog.KeyResourceType, + ID: keyParams.ID, + Name: keyParams.Name.String, + DisplayName: keyParams.Name.String, + Meta: map[string]any{}, + }, + { + Type: auditlog.PermissionResourceType, + ID: *permissionID, + Name: permission, + DisplayName: permission, + Meta: map[string]any{}, + }, + }, + }) + } + } + + if key.Roles != nil { + for _, role := range *key.Roles { + roleID, ok := roleNameToRoleId[role] + if !ok { + continue + } + + keyRolesToInsert = append(keyRolesToInsert, db.InsertKeyRoleParams{ + KeyID: keyParams.ID, + RoleID: *roleID, + WorkspaceID: auth.AuthorizedWorkspaceID, + CreatedAtM: now, + }) + + auditLogs = append(auditLogs, auditlog.AuditLog{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Event: auditlog.AuthConnectRoleKeyEvent, + ActorType: auditlog.RootKeyActor, + ActorID: auth.Key.ID, + ActorName: "root key", + ActorMeta: map[string]any{}, + Display: fmt.Sprintf("Connected role %s to key %s", role, keyParams.ID), + RemoteIP: s.Location(), + UserAgent: s.UserAgent(), + Resources: []auditlog.AuditLogResource{ + { + Type: auditlog.KeyResourceType, + ID: keyParams.ID, + Name: keyParams.Name.String, + DisplayName: keyParams.Name.String, + Meta: map[string]any{}, + }, + { + Type: auditlog.RoleResourceType, + ID: *roleID, + DisplayName: role, + Name: role, + Meta: map[string]any{}, + }, + }, + }) + } + } + + auditLogs = append(auditLogs, auditlog.AuditLog{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Event: auditlog.KeyCreateEvent, + ActorType: auditlog.RootKeyActor, + ActorID: auth.Key.ID, + ActorName: "root key", + ActorMeta: map[string]any{}, + Display: fmt.Sprintf("Created key %s in migration %s", keyParams.ID, migration.ID), + RemoteIP: s.Location(), + UserAgent: s.UserAgent(), + Resources: []auditlog.AuditLogResource{ + { + Type: auditlog.KeyResourceType, + ID: keyParams.ID, + DisplayName: keyParams.Name.String, + Name: keyParams.Name.String, + Meta: map[string]any{}, + }, + { + Type: auditlog.APIResourceType, + ID: req.ApiId, + DisplayName: api.Name, + Name: api.Name, + Meta: map[string]any{}, + }, + }, + }) + + keysArray = append(keysArray, keyParams) + } + + if len(permissionsToInsert) > 0 { + chunks := chunk(permissionsToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertPermissions(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(identitiesToInsert) > 0 { + chunks := chunk(identitiesToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertIdentities(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(rolesToInsert) > 0 { + chunks := chunk(rolesToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertRoles(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(keysArray) > 0 { + chunks := chunk(keysArray, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertKeys(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(keyRolesToInsert) > 0 { + chunks := chunk(keyRolesToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertKeyRoles(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(keyPermissionsToInsert) > 0 { + chunks := chunk(keyPermissionsToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertKeyPermissions(ctx, tx, chunk); err != nil { + return err + } + } + } + + if len(ratelimitsToInsert) > 0 { + chunks := chunk(ratelimitsToInsert, ChunkSize) + for _, chunk := range chunks { + if err := db.BulkQuery.InsertKeyRatelimits(ctx, tx, chunk); err != nil { + return err + } + } + } + + err = h.Auditlogs.Insert(ctx, tx, auditLogs) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + + // Build the response with migrated keys and failed hashes + migratedKeys := []openapi.V2KeysMigrateKeysMigration{} + for _, key := range keysArray { + migratedKeys = append(migratedKeys, openapi.V2KeysMigrateKeysMigration{ + Hash: key.Hash, + KeyId: key.ID, + }) + } + + return s.JSON(http.StatusOK, Response{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Data: openapi.V2KeysMigrateKeysResponseData{ + Migrated: migratedKeys, + Failed: failedHashes, + }, + }) +} + +func deduplicate[T comparable](items []T) []T { + seen := make(map[T]bool) + result := []T{} + + for _, item := range items { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + + return result +} + +func chunk[T any](items []T, size int) [][]T { + var chunks [][]T + for i := 0; i < len(items); i += size { + end := i + size + if end > len(items) { + end = len(items) + } + chunks = append(chunks, items[i:end]) + } + + return chunks +} diff --git a/go/apps/api/routes/v2_keys_verify_key/handler.go b/go/apps/api/routes/v2_keys_verify_key/handler.go index 4c3a68f7c5..d435ca35b6 100644 --- a/go/apps/api/routes/v2_keys_verify_key/handler.go +++ b/go/apps/api/routes/v2_keys_verify_key/handler.go @@ -15,6 +15,7 @@ import ( "github.com/unkeyed/unkey/go/pkg/codes" "github.com/unkeyed/unkey/go/pkg/db" "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/hash" "github.com/unkeyed/unkey/go/pkg/otel/logging" "github.com/unkeyed/unkey/go/pkg/ptr" "github.com/unkeyed/unkey/go/pkg/rbac" @@ -61,10 +62,19 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { return err } - key, emit, err := h.Keys.Get(ctx, s, req.Key) + key, emit, err := h.Keys.Get(ctx, s, hash.Sha256(req.Key)) if err != nil { + return err } + if key.Status == keys.StatusNotFound && req.MigrationId != nil { + + key, emit, err = h.Keys.GetMigrated(ctx, s, req.Key, ptr.SafeDeref(req.MigrationId)) + if err != nil { + return err + } + + } // Validate key belongs to authorized workspace if key.Key.WorkspaceID != auth.AuthorizedWorkspaceID { diff --git a/go/apps/api/routes/v2_keys_verify_key/migration_test.go b/go/apps/api/routes/v2_keys_verify_key/migration_test.go new file mode 100644 index 0000000000..4c61e94cbb --- /dev/null +++ b/go/apps/api/routes/v2_keys_verify_key/migration_test.go @@ -0,0 +1,125 @@ +package handler_test + +import ( + "context" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + migrateHandler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_migrate_keys" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_verify_key" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/hash" + "github.com/unkeyed/unkey/go/pkg/prefixedapikey" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestKeyVerificationWithMigration(t *testing.T) { + ctx := context.Background() + h := testutil.NewHarness(t) + + migrateRoute := &migrateHandler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Auditlogs: h.Auditlogs, + ApiCache: h.Caches.LiveApiByID, + } + + verifyRoute := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Auditlogs: h.Auditlogs, + ClickHouse: h.ClickHouse, + } + + h.Register(verifyRoute) + h.Register(migrateRoute) + + // Create a workspace + workspace := h.Resources().UserWorkspace + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(workspace.ID, "api.*.verify_key", "api.*.create_key") + + api := h.CreateApi(seed.CreateApiRequest{WorkspaceID: workspace.ID}) + + // Set up request headers + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("verifies key with migration ID", func(t *testing.T) { + // Create a migration + migrationID := uid.New("migration") + + // Insert migration directly to database + err := db.Query.InsertKeyMigration(ctx, h.DB.RW(), db.InsertKeyMigrationParams{ + ID: migrationID, + WorkspaceID: workspace.ID, + Algorithm: db.KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey, + }) + require.NoError(t, err, "Failed to insert migration") + + resendKey, err := prefixedapikey.GenerateAPIKey(&prefixedapikey.GenerateAPIKeyOptions{ + KeyPrefix: "re", + }) + require.NoError(t, err) + + migrateReq := migrateHandler.Request{ + ApiId: api.ID, + MigrationId: migrationID, + Keys: []openapi.V2KeysMigrateKeyData{ + { + Hash: resendKey.LongTokenHash, + Enabled: ptr.P(true), + }, + }, + } + + migrateRes := testutil.CallRoute[migrateHandler.Request, migrateHandler.Response](h, migrateRoute, headers, migrateReq) + require.Equal(t, 200, migrateRes.Status, "expected 200, received: %#v", migrateRes) + require.Len(t, migrateRes.Body.Data.Failed, 0, "No keys should fail migration") + require.Len(t, migrateRes.Body.Data.Migrated, 1, "One key should be migrated") + keyID := migrateRes.Body.Data.Migrated[0].KeyId + + req := handler.Request{ + Key: resendKey.Token, + MigrationId: ptr.P(migrationID), + } + + res1 := testutil.CallRoute[handler.Request, handler.Response](h, verifyRoute, headers, req) + + require.Equal(t, 200, res1.Status, "expected 200, received: %#v", res1) + require.NotNil(t, res1.Body) + require.Equal(t, openapi.VALID, res1.Body.Data.Code, "Key should be valid but got %s", res1.Body.Data.Code) + require.True(t, res1.Body.Data.Valid, "Key should be valid but got %t", res1.Body.Data.Valid) + + // Now we should be able to verify the key without the migration ID + req = handler.Request{ + Key: resendKey.Token, + } + + res2 := testutil.CallRoute[handler.Request, handler.Response](h, verifyRoute, headers, req) + require.Equal(t, 200, res2.Status, "expected 200, received: %#v", res2) + require.NotNil(t, res2.Body) + require.Equal(t, openapi.VALID, res2.Body.Data.Code, "Key should be valid but got %s", res2.Body.Data.Code) + require.True(t, res2.Body.Data.Valid, "Key should be valid but got %t", res2.Body.Data.Valid) + + // The migration ID should be removed from the key and the hash updated + key, err := db.Query.FindKeyByID(ctx, h.DB.RW(), keyID) + require.NoError(t, err) + require.False(t, key.PendingMigrationID.Valid) + require.Empty(t, key.PendingMigrationID.String) + require.NotEqual(t, resendKey.LongTokenHash, key.Hash, "Hash should be different after migration") + require.Equal(t, hash.Sha256(resendKey.Token), key.Hash) + require.Equal(t, resendKey.Token[:7], key.Start, "start should match first 6 chars of raw key after migration") + }) +} diff --git a/go/apps/api/routes/v2_keys_verify_key/resend_demo_test.go b/go/apps/api/routes/v2_keys_verify_key/resend_demo_test.go new file mode 100644 index 0000000000..53b4bf13be --- /dev/null +++ b/go/apps/api/routes/v2_keys_verify_key/resend_demo_test.go @@ -0,0 +1,137 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_verify_key" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/prefixedapikey" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestResendDemo(t *testing.T) { + ctx := context.Background() + h := testutil.NewHarness(t) + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Auditlogs: h.Auditlogs, + ClickHouse: h.ClickHouse, + } + + h.Register(route) + + // Create a workspace + workspace := h.Resources().UserWorkspace + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(workspace.ID, "api.*.verify_key") + + api := h.CreateApi(seed.CreateApiRequest{WorkspaceID: workspace.ID}) + + // Set up request headers + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("verifies key with migration ID", func(t *testing.T) { + + // 1. Create a migration + // This will be done by us, no need to think about it. + + // Insert migration directly to database + err := db.Query.InsertKeyMigration(ctx, h.DB.RW(), db.InsertKeyMigrationParams{ + ID: "resend", + WorkspaceID: workspace.ID, + Algorithm: db.KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey, + }) + require.NoError(t, err, "Failed to insert migration") + + // 2. Get an existing key. + // + // In the future you'd use unkey to issue new keys, but for your existing ones, + // we'll create one using your library. + // + // + // ```js + // import { generateAPIKey } from "prefixed-api-key" + // + // const key = await generateAPIKey({ keyPrefix: 'resend' }) + // + // console.log(key) + // /* + // { + // shortToken: "2aGwhSYz", + // longToken: "GEbTboUygK1ixefLDTUM5wf7", + // longTokenHash: "c4fbfe7c69a067cb0841dea343346a750a69908a08ea9656d2a8c19fb0823c64", + // token: "resend_2aGwhSYz_GEbTboUygK1ixefLDTUM5wf7", + // } + // */ + // ``` + + // When migrating keys to unkey, you just need to give us the longTokenHash + // and optional user id etc to link them together so you can later query all + // keys for a specific user. + // longTokenHash := "f8d7af831e76a886cb225e56d0750a54efab6f89c036e01b2ca1f52203425c72" + + // Unkey doesn't store this token, we just use it below to run a demo + // verification. + // token := "re_QgLu9m3D_FMbosT9oDBP3D8RkTu6p24wT" + resendKey, err := prefixedapikey.GenerateAPIKey(&prefixedapikey.GenerateAPIKeyOptions{ + KeyPrefix: "re", + }) + require.NoError(t, err) + + // 3. Migrate existing keys to unkey + // + // We'll give you an api endpoint to send your existing hashes to. + + err = db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: uid.New(uid.KeyPrefix), + KeyringID: api.KeyAuthID.String, + WorkspaceID: workspace.ID, + CreatedAtM: time.Now().UnixMilli(), + Hash: resendKey.LongTokenHash, + Enabled: true, + PendingMigrationID: sql.NullString{Valid: true, String: "resend"}, + }) + require.NoError(t, err) + + // 4. Now we're ready to verify keys. + // You'll grab the key from the request against your api and then make a call to unkey + // + // You need to send the key and the preshared constant migration ID, + + res1 := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, handler.Request{ + Key: resendKey.Token, + MigrationId: ptr.P("resend"), + }) + + require.Equal(t, 200, res1.Status) + require.True(t, res1.Body.Data.Valid) + + // During the first verification, we look up the key using the algorithm from + // your library and then rehash it to use unkey's default algorithm. + // Now this key is fully migrated and just like any other unkey key. + // Sending the migration ID along for this key is no longer necessary, but doesn't hurt either. + // Since you don't know before hand if the key is migrated or not, you can always send the migration ID along with the key and we will handle it accordingly. + res2 := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, handler.Request{ + Key: resendKey.Token, + }) + + require.Equal(t, 200, res2.Status) + require.True(t, res2.Body.Data.Valid) + }) +} diff --git a/go/internal/services/keys/get.go b/go/internal/services/keys/get.go index e39995f69d..607847e873 100644 --- a/go/internal/services/keys/get.go +++ b/go/internal/services/keys/get.go @@ -31,7 +31,7 @@ func (s *service) GetRootKey(ctx context.Context, sess *zen.Session) (*KeyVerifi ) } - key, log, err := s.Get(ctx, sess, rootKey) + key, log, err := s.Get(ctx, sess, hash.Sha256(rootKey)) if err != nil { return nil, log, err } @@ -57,20 +57,19 @@ var emptyLog = func() {} // Get retrieves a key from the database and performs basic validation checks. // It returns a KeyVerifier that can be used for further validation with specific options. // For normal keys, validation failures are indicated by KeyVerifier.Valid=false. -func (s *service) Get(ctx context.Context, sess *zen.Session, rawKey string) (*KeyVerifier, func(), error) { +func (s *service) Get(ctx context.Context, sess *zen.Session, sha256Hash string) (*KeyVerifier, func(), error) { ctx, span := tracing.Start(ctx, "keys.Get") defer span.End() - err := assert.NotEmpty(rawKey) + err := assert.NotEmpty(sha256Hash) if err != nil { return nil, emptyLog, fault.Wrap(err, fault.Internal("rawKey is empty")) } - h := hash.Sha256(rawKey) - key, hit, err := s.keyCache.SWR(ctx, h, func(ctx context.Context) (db.FindKeyForVerificationRow, error) { + key, hit, err := s.keyCache.SWR(ctx, sha256Hash, func(ctx context.Context) (db.FindKeyForVerificationRow, error) { // Use database retry with exponential backoff, skipping non-transient errors return db.WithRetry(func() (db.FindKeyForVerificationRow, error) { - return db.Query.FindKeyForVerification(ctx, s.db.RO(), h) + return db.Query.FindKeyForVerification(ctx, s.db.RO(), sha256Hash) }) }, caches.DefaultFindFirstOp) diff --git a/go/internal/services/keys/get_migrated.go b/go/internal/services/keys/get_migrated.go new file mode 100644 index 0000000000..faade1a4e2 --- /dev/null +++ b/go/internal/services/keys/get_migrated.go @@ -0,0 +1,138 @@ +package keys + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/hex" + "fmt" + "strings" + "time" + + "github.com/unkeyed/unkey/go/pkg/assert" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/hash" + "github.com/unkeyed/unkey/go/pkg/otel/tracing" + "github.com/unkeyed/unkey/go/pkg/zen" +) + +// GetMigrated uses a special hashing algorithm to retrieve a key from the database and +// migrates the key to our own hashing algorithm. +func (s *service) GetMigrated(ctx context.Context, sess *zen.Session, rawKey string, migrationID string) (*KeyVerifier, func(), error) { + ctx, span := tracing.Start(ctx, "keys.GetMigrated") + defer span.End() + + err := assert.NotEmpty(rawKey) + if err != nil { + return nil, emptyLog, fault.Wrap(err, fault.Internal("rawKey is empty")) + } + + migration, err := db.Query.FindKeyMigrationByID(ctx, s.db.RO(), db.FindKeyMigrationByIDParams{ + ID: migrationID, + WorkspaceID: sess.AuthorizedWorkspaceID(), + }) + if err != nil { + if db.IsNotFound(err) { + // nolint:exhaustruct + return &KeyVerifier{ + Status: StatusNotFound, + message: "migration does not exist", + }, emptyLog, nil + } + + return nil, emptyLog, fault.Wrap( + err, + fault.Internal("unable to load migration"), + fault.Public("We could not load the requested migration."), + ) + } + + // h is the result of whatever algorithm we should use. + // The section below is expected to populate this and we can use it to look up a key in the db + var h string + + switch migration.Algorithm { + case db.KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey: + { + parts := strings.Split(rawKey, "_") + err = assert.Equal(len(parts), 3, "Expected prefixed api keys to have 3 segments") + if err != nil { + return nil, emptyLog, fault.Wrap( + err, + fault.Code(codes.URN(codes.Auth.Authentication.Malformed.URN())), + fault.Public("Invalid key format"), + ) + } + + b := sha256.Sum256([]byte(parts[2])) + h = hex.EncodeToString(b[:]) + } + default: + return nil, emptyLog, fault.New( + fmt.Sprintf("unsupported migration algorithm %s", migration.Algorithm), + fault.Public(fmt.Sprintf("We could not load the requested migration for algorithm %s.", migration.Algorithm)), + ) + } + + key, log, err := s.Get(ctx, sess, h) + if err != nil { + return nil, log, err + } + + if key.Key.PendingMigrationID.Valid && key.Key.PendingMigrationID.String == migrationID { + newHash := hash.Sha256(rawKey) + err = db.Query.UpdateKeyHashAndMigration(ctx, s.db.RW(), db.UpdateKeyHashAndMigrationParams{ + ID: key.Key.ID, + Hash: newHash, + Start: extractStart(rawKey), + PendingMigrationID: sql.NullString{Valid: false, String: ""}, + UpdatedAtM: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if err != nil { + return nil, log, fault.Wrap( + err, + fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Public("We could not update the key hash and migration id"), + ) + } + + s.keyCache.Remove( + ctx, + h, + newHash, + ) + } + + return key, log, nil +} + +// extractStart extracts the start value from a key, handling both prefixed and non-prefixed keys +func extractStart(key string) string { + // Check if the key has a prefix (format: prefix_actualkey) + parts := strings.Split(key, "_") + + // If there are 2 or more parts, it's a prefixed key + if len(parts) >= 2 { + // Extract the prefix + prefix := parts[0] + // Get the actual key part (everything after first underscore) + actualKey := strings.Join(parts[1:], "_") + + if len(actualKey) >= 4 { + return fmt.Sprintf("%s_%s", prefix, actualKey[:4]) + } + + // If actual key is shorter than 4 chars, use what we have + // this should never happen, but just in case + return fmt.Sprintf("%s_%s", prefix, actualKey) + } + + // No prefix, just return first 4 characters + if len(key) >= 4 { + return key[:4] + } + + return key +} diff --git a/go/internal/services/keys/interface.go b/go/internal/services/keys/interface.go index 40ea578300..0244a898a0 100644 --- a/go/internal/services/keys/interface.go +++ b/go/internal/services/keys/interface.go @@ -12,6 +12,10 @@ type KeyService interface { // Get retrieves a key and returns a KeyVerifier for validation Get(ctx context.Context, sess *zen.Session, hash string) (*KeyVerifier, func(), error) + // GetMigrated retrieves a key using rawKey and migrationID + // If migration is pending, it performs on-demand migration and returns a KeyVerifier for further validation. + GetMigrated(ctx context.Context, sess *zen.Session, rawKey string, migrationID string) (*KeyVerifier, func(), error) + // GetRootKey retrieves and validates a root key from the session GetRootKey(ctx context.Context, sess *zen.Session) (*KeyVerifier, func(), error) diff --git a/go/pkg/codes/constants_gen.go b/go/pkg/codes/constants_gen.go index 86b1fe0c2f..29b23b0b34 100644 --- a/go/pkg/codes/constants_gen.go +++ b/go/pkg/codes/constants_gen.go @@ -61,6 +61,11 @@ const ( // NotFound indicates the requested API was not found. UnkeyDataErrorsApiNotFound URN = "err:unkey:data:api_not_found" + // Migration + + // NotFound indicates the requested migration was not found. + UnkeyDataErrorsMigrationNotFound URN = "err:unkey:data:migration_not_found" + // Permission // Duplicate indicates the requested permission already exists. diff --git a/go/pkg/codes/unkey_data.go b/go/pkg/codes/unkey_data.go index 5d21739314..c8b0f3db86 100644 --- a/go/pkg/codes/unkey_data.go +++ b/go/pkg/codes/unkey_data.go @@ -20,6 +20,12 @@ type dataApi struct { NotFound Code } +// dataMigration defines errors related to migration operations. +type dataMigration struct { + // NotFound indicates the requested migration was not found. + NotFound Code +} + // dataPermission defines errors related to permission operations. type dataPermission struct { // Duplicate indicates the requested permission already exists. @@ -80,6 +86,7 @@ type UnkeyDataErrors struct { Key dataKey Workspace dataWorkspace Api dataApi + Migration dataMigration Permission dataPermission Role dataRole KeyAuth dataKeyAuth @@ -105,6 +112,10 @@ var Data = UnkeyDataErrors{ NotFound: Code{SystemUnkey, CategoryUnkeyData, "api_not_found"}, }, + Migration: dataMigration{ + NotFound: Code{SystemUnkey, CategoryUnkeyData, "migration_not_found"}, + }, + Permission: dataPermission{ NotFound: Code{SystemUnkey, CategoryUnkeyData, "permission_not_found"}, Duplicate: Code{SystemUnkey, CategoryUnkeyData, "permission_already_exists"}, diff --git a/go/pkg/db/bulk_key_insert.sql.go b/go/pkg/db/bulk_key_insert.sql.go index cfffaa2138..e98f05c95d 100644 --- a/go/pkg/db/bulk_key_insert.sql.go +++ b/go/pkg/db/bulk_key_insert.sql.go @@ -9,7 +9,7 @@ import ( ) // bulkInsertKey is the base query for bulk insert -const bulkInsertKey = `INSERT INTO ` + "`" + `keys` + "`" + ` ( id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, enabled, remaining_requests, refill_day, refill_amount ) VALUES %s` +const bulkInsertKey = `INSERT INTO ` + "`" + `keys` + "`" + ` ( id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, enabled, remaining_requests, refill_day, refill_amount, pending_migration_id ) VALUES %s` // InsertKeys performs bulk insert in a single query func (q *BulkQueries) InsertKeys(ctx context.Context, db DBTX, args []InsertKeyParams) error { @@ -21,7 +21,7 @@ func (q *BulkQueries) InsertKeys(ctx context.Context, db DBTX, args []InsertKeyP // Build the bulk insert query valueClauses := make([]string, len(args)) for i := range args { - valueClauses[i] = "( ?, ?, ?, ?, ?, ?, ?, null, ?, ?, ?, ?, ?, ?, ?, ? )" + valueClauses[i] = "( ?, ?, ?, ?, ?, ?, ?, null, ?, ?, ?, ?, ?, ?, ?, ?, ? )" } bulkQuery := fmt.Sprintf(bulkInsertKey, strings.Join(valueClauses, ", ")) @@ -44,6 +44,7 @@ func (q *BulkQueries) InsertKeys(ctx context.Context, db DBTX, args []InsertKeyP allArgs = append(allArgs, arg.RemainingRequests) allArgs = append(allArgs, arg.RefillDay) allArgs = append(allArgs, arg.RefillAmount) + allArgs = append(allArgs, arg.PendingMigrationID) } // Execute the bulk insert diff --git a/go/pkg/db/bulk_key_migration_insert.sql.go b/go/pkg/db/bulk_key_migration_insert.sql.go new file mode 100644 index 0000000000..4784f699fa --- /dev/null +++ b/go/pkg/db/bulk_key_migration_insert.sql.go @@ -0,0 +1,40 @@ +// Code generated by sqlc bulk insert plugin. DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "strings" +) + +// bulkInsertKeyMigration is the base query for bulk insert +const bulkInsertKeyMigration = `INSERT INTO key_migrations ( id, workspace_id, algorithm ) VALUES %s` + +// InsertKeyMigrations performs bulk insert in a single query +func (q *BulkQueries) InsertKeyMigrations(ctx context.Context, db DBTX, args []InsertKeyMigrationParams) error { + + if len(args) == 0 { + return nil + } + + // Build the bulk insert query + valueClauses := make([]string, len(args)) + for i := range args { + valueClauses[i] = "( ?, ?, ? )" + } + + bulkQuery := fmt.Sprintf(bulkInsertKeyMigration, strings.Join(valueClauses, ", ")) + + // Collect all arguments + var allArgs []any + for _, arg := range args { + allArgs = append(allArgs, arg.ID) + allArgs = append(allArgs, arg.WorkspaceID) + allArgs = append(allArgs, arg.Algorithm) + } + + // Execute the bulk insert + _, err := db.ExecContext(ctx, bulkQuery, allArgs...) + return err +} diff --git a/go/pkg/db/identity_find_many_by_external_id.sql_generated.go b/go/pkg/db/identity_find_many_by_external_id.sql_generated.go new file mode 100644 index 0000000000..cf7e1a2ea7 --- /dev/null +++ b/go/pkg/db/identity_find_many_by_external_id.sql_generated.go @@ -0,0 +1,72 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: identity_find_many_by_external_id.sql + +package db + +import ( + "context" + "strings" +) + +const findIdentitiesByExternalId = `-- name: FindIdentitiesByExternalId :many +SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at +FROM identities +WHERE workspace_id = ? AND external_id IN (/*SLICE:externalIds*/?) AND deleted = ? +` + +type FindIdentitiesByExternalIdParams struct { + WorkspaceID string `db:"workspace_id"` + ExternalIds []string `db:"externalIds"` + Deleted bool `db:"deleted"` +} + +// FindIdentitiesByExternalId +// +// SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at +// FROM identities +// WHERE workspace_id = ? AND external_id IN (/*SLICE:externalIds*/?) AND deleted = ? +func (q *Queries) FindIdentitiesByExternalId(ctx context.Context, db DBTX, arg FindIdentitiesByExternalIdParams) ([]Identity, error) { + query := findIdentitiesByExternalId + var queryParams []interface{} + queryParams = append(queryParams, arg.WorkspaceID) + if len(arg.ExternalIds) > 0 { + for _, v := range arg.ExternalIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:externalIds*/?", strings.Repeat(",?", len(arg.ExternalIds))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:externalIds*/?", "NULL", 1) + } + queryParams = append(queryParams, arg.Deleted) + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Identity + for rows.Next() { + var i Identity + if err := rows.Scan( + &i.ID, + &i.ExternalID, + &i.WorkspaceID, + &i.Environment, + &i.Meta, + &i.Deleted, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/key_find_by_id.sql_generated.go b/go/pkg/db/key_find_by_id.sql_generated.go index adc96c0a42..f53851fee2 100644 --- a/go/pkg/db/key_find_by_id.sql_generated.go +++ b/go/pkg/db/key_find_by_id.sql_generated.go @@ -10,13 +10,13 @@ import ( ) const findKeyByID = `-- name: FindKeyByID :one -SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment FROM ` + "`" + `keys` + "`" + ` k +SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment, pending_migration_id FROM ` + "`" + `keys` + "`" + ` k WHERE k.id = ? ` // FindKeyByID // -// SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment FROM `keys` k +// SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment, pending_migration_id FROM `keys` k // WHERE k.id = ? func (q *Queries) FindKeyByID(ctx context.Context, db DBTX, id string) (Key, error) { row := db.QueryRowContext(ctx, findKeyByID, id) @@ -45,6 +45,7 @@ func (q *Queries) FindKeyByID(ctx context.Context, db DBTX, id string) (Key, err &i.RatelimitLimit, &i.RatelimitDuration, &i.Environment, + &i.PendingMigrationID, ) return i, err } diff --git a/go/pkg/db/key_find_for_verification.sql_generated.go b/go/pkg/db/key_find_for_verification.sql_generated.go index 1093c89159..3005c5fe54 100644 --- a/go/pkg/db/key_find_for_verification.sql_generated.go +++ b/go/pkg/db/key_find_for_verification.sql_generated.go @@ -24,11 +24,13 @@ select k.id, k.last_refill_at, k.enabled, k.remaining_requests, + k.pending_migration_id, a.ip_whitelist, a.workspace_id as api_workspace_id, a.id as api_id, a.deleted_at_m as api_deleted_at_m, + COALESCE( (SELECT JSON_ARRAYAGG(name) FROM (SELECT name @@ -103,6 +105,7 @@ type FindKeyForVerificationRow struct { LastRefillAt sql.NullTime `db:"last_refill_at"` Enabled bool `db:"enabled"` RemainingRequests sql.NullInt32 `db:"remaining_requests"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` IpWhitelist sql.NullString `db:"ip_whitelist"` ApiWorkspaceID string `db:"api_workspace_id"` ApiID string `db:"api_id"` @@ -133,11 +136,13 @@ type FindKeyForVerificationRow struct { // k.last_refill_at, // k.enabled, // k.remaining_requests, +// k.pending_migration_id, // a.ip_whitelist, // a.workspace_id as api_workspace_id, // a.id as api_id, // a.deleted_at_m as api_deleted_at_m, // +// // COALESCE( // (SELECT JSON_ARRAYAGG(name) // FROM (SELECT name @@ -213,6 +218,7 @@ func (q *Queries) FindKeyForVerification(ctx context.Context, db DBTX, hash stri &i.LastRefillAt, &i.Enabled, &i.RemainingRequests, + &i.PendingMigrationID, &i.IpWhitelist, &i.ApiWorkspaceID, &i.ApiID, diff --git a/go/pkg/db/key_find_live_by_hash.sql_generated.go b/go/pkg/db/key_find_live_by_hash.sql_generated.go index 1788c94135..06e1b616d0 100644 --- a/go/pkg/db/key_find_live_by_hash.sql_generated.go +++ b/go/pkg/db/key_find_live_by_hash.sql_generated.go @@ -12,7 +12,7 @@ import ( const findLiveKeyByHash = `-- name: FindLiveKeyByHash :one SELECT - k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -125,6 +125,7 @@ type FindLiveKeyByHashRow struct { RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` Environment sql.NullString `db:"environment"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` Api Api `db:"api"` KeyAuth KeyAuth `db:"key_auth"` Workspace Workspace `db:"workspace"` @@ -142,7 +143,7 @@ type FindLiveKeyByHashRow struct { // FindLiveKeyByHash // // SELECT -// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, +// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, // ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, // ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -256,6 +257,7 @@ func (q *Queries) FindLiveKeyByHash(ctx context.Context, db DBTX, hash string) ( &i.RatelimitLimit, &i.RatelimitDuration, &i.Environment, + &i.PendingMigrationID, &i.Api.ID, &i.Api.Name, &i.Api.WorkspaceID, diff --git a/go/pkg/db/key_find_live_by_id.sql_generated.go b/go/pkg/db/key_find_live_by_id.sql_generated.go index db6a9d0b7e..b410859ee0 100644 --- a/go/pkg/db/key_find_live_by_id.sql_generated.go +++ b/go/pkg/db/key_find_live_by_id.sql_generated.go @@ -12,7 +12,7 @@ import ( const findLiveKeyByID = `-- name: FindLiveKeyByID :one SELECT - k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -126,6 +126,7 @@ type FindLiveKeyByIDRow struct { RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` Environment sql.NullString `db:"environment"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` Api Api `db:"api"` KeyAuth KeyAuth `db:"key_auth"` Workspace Workspace `db:"workspace"` @@ -143,7 +144,7 @@ type FindLiveKeyByIDRow struct { // FindLiveKeyByID // // SELECT -// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, +// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, // ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, // ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -258,6 +259,7 @@ func (q *Queries) FindLiveKeyByID(ctx context.Context, db DBTX, id string) (Find &i.RatelimitLimit, &i.RatelimitDuration, &i.Environment, + &i.PendingMigrationID, &i.Api.ID, &i.Api.Name, &i.Api.WorkspaceID, diff --git a/go/pkg/db/key_find_many_by_hash.sql_generated.go b/go/pkg/db/key_find_many_by_hash.sql_generated.go new file mode 100644 index 0000000000..f9fb954483 --- /dev/null +++ b/go/pkg/db/key_find_many_by_hash.sql_generated.go @@ -0,0 +1,56 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_find_many_by_hash.sql + +package db + +import ( + "context" + "strings" +) + +const findKeysByHash = `-- name: FindKeysByHash :many +SELECT id, hash FROM ` + "`" + `keys` + "`" + ` WHERE hash IN (/*SLICE:hashes*/?) +` + +type FindKeysByHashRow struct { + ID string `db:"id"` + Hash string `db:"hash"` +} + +// FindKeysByHash +// +// SELECT id, hash FROM `keys` WHERE hash IN (/*SLICE:hashes*/?) +func (q *Queries) FindKeysByHash(ctx context.Context, db DBTX, hashes []string) ([]FindKeysByHashRow, error) { + query := findKeysByHash + var queryParams []interface{} + if len(hashes) > 0 { + for _, v := range hashes { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:hashes*/?", strings.Repeat(",?", len(hashes))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:hashes*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FindKeysByHashRow + for rows.Next() { + var i FindKeysByHashRow + if err := rows.Scan(&i.ID, &i.Hash); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/key_insert.sql_generated.go b/go/pkg/db/key_insert.sql_generated.go index e6ef9391d7..960cff37b0 100644 --- a/go/pkg/db/key_insert.sql_generated.go +++ b/go/pkg/db/key_insert.sql_generated.go @@ -27,7 +27,8 @@ INSERT INTO ` + "`" + `keys` + "`" + ` ( enabled, remaining_requests, refill_day, - refill_amount + refill_amount, + pending_migration_id ) VALUES ( ?, ?, @@ -44,26 +45,28 @@ INSERT INTO ` + "`" + `keys` + "`" + ` ( ?, ?, ?, + ?, ? ) ` type InsertKeyParams struct { - ID string `db:"id"` - KeyringID string `db:"keyring_id"` - Hash string `db:"hash"` - Start string `db:"start"` - WorkspaceID string `db:"workspace_id"` - ForWorkspaceID sql.NullString `db:"for_workspace_id"` - Name sql.NullString `db:"name"` - IdentityID sql.NullString `db:"identity_id"` - Meta sql.NullString `db:"meta"` - Expires sql.NullTime `db:"expires"` - CreatedAtM int64 `db:"created_at_m"` - Enabled bool `db:"enabled"` - RemainingRequests sql.NullInt32 `db:"remaining_requests"` - RefillDay sql.NullInt16 `db:"refill_day"` - RefillAmount sql.NullInt32 `db:"refill_amount"` + ID string `db:"id"` + KeyringID string `db:"keyring_id"` + Hash string `db:"hash"` + Start string `db:"start"` + WorkspaceID string `db:"workspace_id"` + ForWorkspaceID sql.NullString `db:"for_workspace_id"` + Name sql.NullString `db:"name"` + IdentityID sql.NullString `db:"identity_id"` + Meta sql.NullString `db:"meta"` + Expires sql.NullTime `db:"expires"` + CreatedAtM int64 `db:"created_at_m"` + Enabled bool `db:"enabled"` + RemainingRequests sql.NullInt32 `db:"remaining_requests"` + RefillDay sql.NullInt16 `db:"refill_day"` + RefillAmount sql.NullInt32 `db:"refill_amount"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` } // InsertKey @@ -84,7 +87,8 @@ type InsertKeyParams struct { // enabled, // remaining_requests, // refill_day, -// refill_amount +// refill_amount, +// pending_migration_id // ) VALUES ( // ?, // ?, @@ -101,6 +105,7 @@ type InsertKeyParams struct { // ?, // ?, // ?, +// ?, // ? // ) func (q *Queries) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) error { @@ -120,6 +125,7 @@ func (q *Queries) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) e arg.RemainingRequests, arg.RefillDay, arg.RefillAmount, + arg.PendingMigrationID, ) return err } diff --git a/go/pkg/db/key_list_by_key_auth_id.sql_generated.go b/go/pkg/db/key_list_by_key_auth_id.sql_generated.go index 87c1ef15cf..c7c0bc19a7 100644 --- a/go/pkg/db/key_list_by_key_auth_id.sql_generated.go +++ b/go/pkg/db/key_list_by_key_auth_id.sql_generated.go @@ -12,7 +12,7 @@ import ( const listKeysByKeyAuthID = `-- name: ListKeysByKeyAuthID :many SELECT - k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, i.id as identity_id, i.external_id as external_id, i.meta as identity_meta, @@ -49,7 +49,7 @@ type ListKeysByKeyAuthIDRow struct { // ListKeysByKeyAuthID // // SELECT -// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, +// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // i.id as identity_id, // i.external_id as external_id, // i.meta as identity_meta, @@ -104,6 +104,7 @@ func (q *Queries) ListKeysByKeyAuthID(ctx context.Context, db DBTX, arg ListKeys &i.Key.RatelimitLimit, &i.Key.RatelimitDuration, &i.Key.Environment, + &i.Key.PendingMigrationID, &i.IdentityID, &i.ExternalID, &i.IdentityMeta, diff --git a/go/pkg/db/key_list_live_by_auth_id.sql_generated.go b/go/pkg/db/key_list_live_by_auth_id.sql_generated.go index 2f7bceb0af..7d0e77122d 100644 --- a/go/pkg/db/key_list_live_by_auth_id.sql_generated.go +++ b/go/pkg/db/key_list_live_by_auth_id.sql_generated.go @@ -11,7 +11,7 @@ import ( ) const listLiveKeysByKeyAuthID = `-- name: ListLiveKeysByKeyAuthID :many -SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, +SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, i.id as identity_table_id, i.external_id as identity_external_id, i.meta as identity_meta, @@ -137,6 +137,7 @@ type ListLiveKeysByKeyAuthIDRow struct { RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` Environment sql.NullString `db:"environment"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` IdentityTableID sql.NullString `db:"identity_table_id"` IdentityExternalID sql.NullString `db:"identity_external_id"` IdentityMeta []byte `db:"identity_meta"` @@ -150,7 +151,7 @@ type ListLiveKeysByKeyAuthIDRow struct { // ListLiveKeysByKeyAuthID // -// SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, +// SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // i.id as identity_table_id, // i.external_id as identity_external_id, // i.meta as identity_meta, @@ -283,6 +284,7 @@ func (q *Queries) ListLiveKeysByKeyAuthID(ctx context.Context, db DBTX, arg List &i.RatelimitLimit, &i.RatelimitDuration, &i.Environment, + &i.PendingMigrationID, &i.IdentityTableID, &i.IdentityExternalID, &i.IdentityMeta, diff --git a/go/pkg/db/key_migration_find_by_id.sql_generated.go b/go/pkg/db/key_migration_find_by_id.sql_generated.go new file mode 100644 index 0000000000..4bfb5d40aa --- /dev/null +++ b/go/pkg/db/key_migration_find_by_id.sql_generated.go @@ -0,0 +1,41 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_migration_find_by_id.sql + +package db + +import ( + "context" +) + +const findKeyMigrationByID = `-- name: FindKeyMigrationByID :one +SELECT + id, + workspace_id, + algorithm +FROM key_migrations +WHERE id = ? +and workspace_id = ? +` + +type FindKeyMigrationByIDParams struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` +} + +// FindKeyMigrationByID +// +// SELECT +// id, +// workspace_id, +// algorithm +// FROM key_migrations +// WHERE id = ? +// and workspace_id = ? +func (q *Queries) FindKeyMigrationByID(ctx context.Context, db DBTX, arg FindKeyMigrationByIDParams) (KeyMigration, error) { + row := db.QueryRowContext(ctx, findKeyMigrationByID, arg.ID, arg.WorkspaceID) + var i KeyMigration + err := row.Scan(&i.ID, &i.WorkspaceID, &i.Algorithm) + return i, err +} diff --git a/go/pkg/db/key_migration_insert.sql_generated.go b/go/pkg/db/key_migration_insert.sql_generated.go new file mode 100644 index 0000000000..1c0e6fb6d6 --- /dev/null +++ b/go/pkg/db/key_migration_insert.sql_generated.go @@ -0,0 +1,44 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_migration_insert.sql + +package db + +import ( + "context" +) + +const insertKeyMigration = `-- name: InsertKeyMigration :exec +INSERT INTO key_migrations ( + id, + workspace_id, + algorithm +) VALUES ( + ?, + ?, + ? +) +` + +type InsertKeyMigrationParams struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` + Algorithm KeyMigrationsAlgorithm `db:"algorithm"` +} + +// InsertKeyMigration +// +// INSERT INTO key_migrations ( +// id, +// workspace_id, +// algorithm +// ) VALUES ( +// ?, +// ?, +// ? +// ) +func (q *Queries) InsertKeyMigration(ctx context.Context, db DBTX, arg InsertKeyMigrationParams) error { + _, err := db.ExecContext(ctx, insertKeyMigration, arg.ID, arg.WorkspaceID, arg.Algorithm) + return err +} diff --git a/go/pkg/db/key_update_hash_and_migration.sql_generated.go b/go/pkg/db/key_update_hash_and_migration.sql_generated.go new file mode 100644 index 0000000000..e824833931 --- /dev/null +++ b/go/pkg/db/key_update_hash_and_migration.sql_generated.go @@ -0,0 +1,49 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_update_hash_and_migration.sql + +package db + +import ( + "context" + "database/sql" +) + +const updateKeyHashAndMigration = `-- name: UpdateKeyHashAndMigration :exec +UPDATE ` + "`" + `keys` + "`" + ` +SET + hash = ?, + pending_migration_id = ?, + start = ?, + updated_at_m = ? +WHERE id = ? +` + +type UpdateKeyHashAndMigrationParams struct { + Hash string `db:"hash"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` + Start string `db:"start"` + UpdatedAtM sql.NullInt64 `db:"updated_at_m"` + ID string `db:"id"` +} + +// UpdateKeyHashAndMigration +// +// UPDATE `keys` +// SET +// hash = ?, +// pending_migration_id = ?, +// start = ?, +// updated_at_m = ? +// WHERE id = ? +func (q *Queries) UpdateKeyHashAndMigration(ctx context.Context, db DBTX, arg UpdateKeyHashAndMigrationParams) error { + _, err := db.ExecContext(ctx, updateKeyHashAndMigration, + arg.Hash, + arg.PendingMigrationID, + arg.Start, + arg.UpdatedAtM, + arg.ID, + ) + return err +} diff --git a/go/pkg/db/models_generated.go b/go/pkg/db/models_generated.go index c4f20fc5ce..74458ab5c1 100644 --- a/go/pkg/db/models_generated.go +++ b/go/pkg/db/models_generated.go @@ -278,6 +278,47 @@ func (ns NullDomainsType) Value() (driver.Value, error) { return string(ns.DomainsType), nil } +type KeyMigrationsAlgorithm string + +const ( + KeyMigrationsAlgorithmGithubcomSeamapiPrefixedApiKey KeyMigrationsAlgorithm = "github.com/seamapi/prefixed-api-key" +) + +func (e *KeyMigrationsAlgorithm) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = KeyMigrationsAlgorithm(s) + case string: + *e = KeyMigrationsAlgorithm(s) + default: + return fmt.Errorf("unsupported scan type for KeyMigrationsAlgorithm: %T", src) + } + return nil +} + +type NullKeyMigrationsAlgorithm struct { + KeyMigrationsAlgorithm KeyMigrationsAlgorithm + Valid bool // Valid is true if KeyMigrationsAlgorithm is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullKeyMigrationsAlgorithm) Scan(value interface{}) error { + if value == nil { + ns.KeyMigrationsAlgorithm, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.KeyMigrationsAlgorithm.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullKeyMigrationsAlgorithm) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.KeyMigrationsAlgorithm), nil +} + type RatelimitOverridesSharding string const ( @@ -596,29 +637,30 @@ type Identity struct { } type Key struct { - ID string `db:"id"` - KeyAuthID string `db:"key_auth_id"` - Hash string `db:"hash"` - Start string `db:"start"` - WorkspaceID string `db:"workspace_id"` - ForWorkspaceID sql.NullString `db:"for_workspace_id"` - Name sql.NullString `db:"name"` - OwnerID sql.NullString `db:"owner_id"` - IdentityID sql.NullString `db:"identity_id"` - Meta sql.NullString `db:"meta"` - Expires sql.NullTime `db:"expires"` - CreatedAtM int64 `db:"created_at_m"` - UpdatedAtM sql.NullInt64 `db:"updated_at_m"` - DeletedAtM sql.NullInt64 `db:"deleted_at_m"` - RefillDay sql.NullInt16 `db:"refill_day"` - RefillAmount sql.NullInt32 `db:"refill_amount"` - LastRefillAt sql.NullTime `db:"last_refill_at"` - Enabled bool `db:"enabled"` - RemainingRequests sql.NullInt32 `db:"remaining_requests"` - RatelimitAsync sql.NullBool `db:"ratelimit_async"` - RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` - RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` - Environment sql.NullString `db:"environment"` + ID string `db:"id"` + KeyAuthID string `db:"key_auth_id"` + Hash string `db:"hash"` + Start string `db:"start"` + WorkspaceID string `db:"workspace_id"` + ForWorkspaceID sql.NullString `db:"for_workspace_id"` + Name sql.NullString `db:"name"` + OwnerID sql.NullString `db:"owner_id"` + IdentityID sql.NullString `db:"identity_id"` + Meta sql.NullString `db:"meta"` + Expires sql.NullTime `db:"expires"` + CreatedAtM int64 `db:"created_at_m"` + UpdatedAtM sql.NullInt64 `db:"updated_at_m"` + DeletedAtM sql.NullInt64 `db:"deleted_at_m"` + RefillDay sql.NullInt16 `db:"refill_day"` + RefillAmount sql.NullInt32 `db:"refill_amount"` + LastRefillAt sql.NullTime `db:"last_refill_at"` + Enabled bool `db:"enabled"` + RemainingRequests sql.NullInt32 `db:"remaining_requests"` + RatelimitAsync sql.NullBool `db:"ratelimit_async"` + RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` + RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` + Environment sql.NullString `db:"environment"` + PendingMigrationID sql.NullString `db:"pending_migration_id"` } type KeyAuth struct { @@ -634,6 +676,12 @@ type KeyAuth struct { SizeLastUpdatedAt int64 `db:"size_last_updated_at"` } +type KeyMigration struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` + Algorithm KeyMigrationsAlgorithm `db:"algorithm"` +} + type KeyMigrationError struct { ID string `db:"id"` MigrationID string `db:"migration_id"` diff --git a/go/pkg/db/querier_bulk_generated.go b/go/pkg/db/querier_bulk_generated.go index 5d6483117b..8a52ea023b 100644 --- a/go/pkg/db/querier_bulk_generated.go +++ b/go/pkg/db/querier_bulk_generated.go @@ -19,6 +19,7 @@ type BulkQuerier interface { InsertKeyEncryptions(ctx context.Context, db DBTX, args []InsertKeyEncryptionParams) error InsertKeys(ctx context.Context, db DBTX, args []InsertKeyParams) error InsertKeyRatelimits(ctx context.Context, db DBTX, args []InsertKeyRatelimitParams) error + InsertKeyMigrations(ctx context.Context, db DBTX, args []InsertKeyMigrationParams) error InsertKeyPermissions(ctx context.Context, db DBTX, args []InsertKeyPermissionParams) error InsertKeyRoles(ctx context.Context, db DBTX, args []InsertKeyRoleParams) error InsertKeyrings(ctx context.Context, db DBTX, args []InsertKeyringParams) error diff --git a/go/pkg/db/querier_generated.go b/go/pkg/db/querier_generated.go index 8abfe5fccb..d04176b8db 100644 --- a/go/pkg/db/querier_generated.go +++ b/go/pkg/db/querier_generated.go @@ -183,6 +183,12 @@ type Querier interface { // WHERE deployment_id = ? // ORDER BY created_at ASC FindDomainsByDeploymentId(ctx context.Context, db DBTX, deploymentID sql.NullString) ([]FindDomainsByDeploymentIdRow, error) + //FindIdentitiesByExternalId + // + // SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at + // FROM identities + // WHERE workspace_id = ? AND external_id IN (/*SLICE:externalIds*/?) AND deleted = ? + FindIdentitiesByExternalId(ctx context.Context, db DBTX, arg FindIdentitiesByExternalIdParams) ([]Identity, error) //FindIdentity // // SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at @@ -193,7 +199,7 @@ type Querier interface { FindIdentity(ctx context.Context, db DBTX, arg FindIdentityParams) (Identity, error) //FindKeyByID // - // SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment FROM `keys` k + // SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment, pending_migration_id FROM `keys` k // WHERE k.id = ? FindKeyByID(ctx context.Context, db DBTX, id string) (Key, error) //FindKeyCredits @@ -219,11 +225,13 @@ type Querier interface { // k.last_refill_at, // k.enabled, // k.remaining_requests, + // k.pending_migration_id, // a.ip_whitelist, // a.workspace_id as api_workspace_id, // a.id as api_id, // a.deleted_at_m as api_deleted_at_m, // + // // COALESCE( // (SELECT JSON_ARRAYAGG(name) // FROM (SELECT name @@ -283,6 +291,16 @@ type Querier interface { // where k.hash = ? // and k.deleted_at_m is null FindKeyForVerification(ctx context.Context, db DBTX, hash string) (FindKeyForVerificationRow, error) + //FindKeyMigrationByID + // + // SELECT + // id, + // workspace_id, + // algorithm + // FROM key_migrations + // WHERE id = ? + // and workspace_id = ? + FindKeyMigrationByID(ctx context.Context, db DBTX, arg FindKeyMigrationByIDParams) (KeyMigration, error) //FindKeyRoleByKeyAndRoleID // // SELECT key_id, role_id, workspace_id, created_at_m, updated_at_m @@ -294,6 +312,10 @@ type Querier interface { // // SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM `key_auth` WHERE id = ? FindKeyringByID(ctx context.Context, db DBTX, id string) (KeyAuth, error) + //FindKeysByHash + // + // SELECT id, hash FROM `keys` WHERE hash IN (/*SLICE:hashes*/?) + FindKeysByHash(ctx context.Context, db DBTX, hashes []string) ([]FindKeysByHashRow, error) //FindLiveApiByID // // SELECT apis.id, apis.name, apis.workspace_id, apis.ip_whitelist, apis.auth_type, apis.key_auth_id, apis.created_at_m, apis.updated_at_m, apis.deleted_at_m, apis.delete_protection, ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at @@ -307,7 +329,7 @@ type Querier interface { //FindLiveKeyByHash // // SELECT - // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, // ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, // ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -398,7 +420,7 @@ type Querier interface { //FindLiveKeyByID // // SELECT - // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, // ka.id, ka.workspace_id, ka.created_at_m, ka.updated_at_m, ka.deleted_at_m, ka.store_encrypted_keys, ka.default_prefix, ka.default_bytes, ka.size_approx, ka.size_last_updated_at, // ws.id, ws.org_id, ws.name, ws.slug, ws.partition_id, ws.plan, ws.tier, ws.stripe_customer_id, ws.stripe_subscription_id, ws.beta_features, ws.features, ws.subscriptions, ws.enabled, ws.delete_protection, ws.created_at_m, ws.updated_at_m, ws.deleted_at_m, @@ -962,7 +984,8 @@ type Querier interface { // enabled, // remaining_requests, // refill_day, - // refill_amount + // refill_amount, + // pending_migration_id // ) VALUES ( // ?, // ?, @@ -979,6 +1002,7 @@ type Querier interface { // ?, // ?, // ?, + // ?, // ? // ) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) error @@ -988,6 +1012,18 @@ type Querier interface { // (workspace_id, key_id, encrypted, encryption_key_id, created_at) // VALUES (?, ?, ?, ?, ?) InsertKeyEncryption(ctx context.Context, db DBTX, arg InsertKeyEncryptionParams) error + //InsertKeyMigration + // + // INSERT INTO key_migrations ( + // id, + // workspace_id, + // algorithm + // ) VALUES ( + // ?, + // ?, + // ? + // ) + InsertKeyMigration(ctx context.Context, db DBTX, arg InsertKeyMigrationParams) error //InsertKeyPermission // // INSERT INTO `keys_permissions` ( @@ -1251,7 +1287,7 @@ type Querier interface { //ListKeysByKeyAuthID // // SELECT - // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // i.id as identity_id, // i.external_id as external_id, // i.meta as identity_meta, @@ -1270,7 +1306,7 @@ type Querier interface { ListKeysByKeyAuthID(ctx context.Context, db DBTX, arg ListKeysByKeyAuthIDParams) ([]ListKeysByKeyAuthIDRow, error) //ListLiveKeysByKeyAuthID // - // SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, + // SELECT k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, k.pending_migration_id, // i.id as identity_table_id, // i.external_id as identity_external_id, // i.meta as identity_meta, @@ -1655,6 +1691,16 @@ type Querier interface { // SET remaining_requests = ? // WHERE id = ? UpdateKeyCreditsSet(ctx context.Context, db DBTX, arg UpdateKeyCreditsSetParams) error + //UpdateKeyHashAndMigration + // + // UPDATE `keys` + // SET + // hash = ?, + // pending_migration_id = ?, + // start = ?, + // updated_at_m = ? + // WHERE id = ? + UpdateKeyHashAndMigration(ctx context.Context, db DBTX, arg UpdateKeyHashAndMigrationParams) error //UpdateKeyringKeyEncryption // // UPDATE `key_auth` SET store_encrypted_keys = ? WHERE id = ? diff --git a/go/pkg/db/queries/identity_find_many_by_external_id.sql b/go/pkg/db/queries/identity_find_many_by_external_id.sql new file mode 100644 index 0000000000..e4c1f8689d --- /dev/null +++ b/go/pkg/db/queries/identity_find_many_by_external_id.sql @@ -0,0 +1,4 @@ +-- name: FindIdentitiesByExternalId :many +SELECT * +FROM identities +WHERE workspace_id = ? AND external_id IN (sqlc.slice('externalIds')) AND deleted = ?; diff --git a/go/pkg/db/queries/key_find_for_verification.sql b/go/pkg/db/queries/key_find_for_verification.sql index d649de3567..dcc7d566b6 100644 --- a/go/pkg/db/queries/key_find_for_verification.sql +++ b/go/pkg/db/queries/key_find_for_verification.sql @@ -12,11 +12,13 @@ select k.id, k.last_refill_at, k.enabled, k.remaining_requests, + k.pending_migration_id, a.ip_whitelist, a.workspace_id as api_workspace_id, a.id as api_id, a.deleted_at_m as api_deleted_at_m, + COALESCE( (SELECT JSON_ARRAYAGG(name) FROM (SELECT name diff --git a/go/pkg/db/queries/key_find_many_by_hash.sql b/go/pkg/db/queries/key_find_many_by_hash.sql new file mode 100644 index 0000000000..631aa50528 --- /dev/null +++ b/go/pkg/db/queries/key_find_many_by_hash.sql @@ -0,0 +1,2 @@ +-- name: FindKeysByHash :many +SELECT id, hash FROM `keys` WHERE hash IN (sqlc.slice(hashes)); diff --git a/go/pkg/db/queries/key_insert.sql b/go/pkg/db/queries/key_insert.sql index 072e9fea12..5fb9fc1d24 100644 --- a/go/pkg/db/queries/key_insert.sql +++ b/go/pkg/db/queries/key_insert.sql @@ -15,7 +15,8 @@ INSERT INTO `keys` ( enabled, remaining_requests, refill_day, - refill_amount + refill_amount, + pending_migration_id ) VALUES ( sqlc.arg(id), sqlc.arg(keyring_id), @@ -32,5 +33,6 @@ INSERT INTO `keys` ( sqlc.arg(enabled), sqlc.arg(remaining_requests), sqlc.arg(refill_day), - sqlc.arg(refill_amount) + sqlc.arg(refill_amount), + sqlc.arg(pending_migration_id) ); diff --git a/go/pkg/db/queries/key_migration_find_by_id.sql b/go/pkg/db/queries/key_migration_find_by_id.sql new file mode 100644 index 0000000000..5273873895 --- /dev/null +++ b/go/pkg/db/queries/key_migration_find_by_id.sql @@ -0,0 +1,8 @@ +-- name: FindKeyMigrationByID :one +SELECT + id, + workspace_id, + algorithm +FROM key_migrations +WHERE id = sqlc.arg(id) +and workspace_id = sqlc.arg(workspace_id); diff --git a/go/pkg/db/queries/key_migration_insert.sql b/go/pkg/db/queries/key_migration_insert.sql new file mode 100644 index 0000000000..b9c127e38a --- /dev/null +++ b/go/pkg/db/queries/key_migration_insert.sql @@ -0,0 +1,10 @@ +-- name: InsertKeyMigration :exec +INSERT INTO key_migrations ( + id, + workspace_id, + algorithm +) VALUES ( + sqlc.arg(id), + sqlc.arg(workspace_id), + sqlc.arg(algorithm) +); \ No newline at end of file diff --git a/go/pkg/db/queries/key_update_hash_and_migration.sql b/go/pkg/db/queries/key_update_hash_and_migration.sql new file mode 100644 index 0000000000..69e8080968 --- /dev/null +++ b/go/pkg/db/queries/key_update_hash_and_migration.sql @@ -0,0 +1,8 @@ +-- name: UpdateKeyHashAndMigration :exec +UPDATE `keys` +SET + hash = sqlc.arg(hash), + pending_migration_id = sqlc.arg(pending_migration_id), + start = sqlc.arg(start), + updated_at_m = sqlc.arg(updated_at_m) +WHERE id = sqlc.arg(id); diff --git a/go/pkg/db/schema.sql b/go/pkg/db/schema.sql index 9a67348e57..9a8226386b 100644 --- a/go/pkg/db/schema.sql +++ b/go/pkg/db/schema.sql @@ -95,6 +95,13 @@ CREATE TABLE `encrypted_keys` ( CONSTRAINT `key_id_idx` UNIQUE(`key_id`) ); +CREATE TABLE `key_migrations` ( + `id` varchar(255) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `algorithm` enum('github.com/seamapi/prefixed-api-key') NOT NULL, + CONSTRAINT `key_migrations_id_workspace_id_pk` PRIMARY KEY(`id`,`workspace_id`) +); + CREATE TABLE `keys` ( `id` varchar(256) NOT NULL, `key_auth_id` varchar(256) NOT NULL, @@ -119,6 +126,7 @@ CREATE TABLE `keys` ( `ratelimit_limit` int, `ratelimit_duration` bigint, `environment` varchar(256), + `pending_migration_id` varchar(256), CONSTRAINT `keys_id` PRIMARY KEY(`id`), CONSTRAINT `hash_idx` UNIQUE(`hash`) ); @@ -391,6 +399,7 @@ CREATE TABLE `acme_challenges` ( CREATE INDEX `workspace_id_idx` ON `apis` (`workspace_id`); CREATE INDEX `workspace_id_idx` ON `roles` (`workspace_id`); CREATE INDEX `key_auth_id_deleted_at_idx` ON `keys` (`key_auth_id`,`deleted_at_m`); +CREATE INDEX `pending_migration_id_idx` ON `keys` (`pending_migration_id`); CREATE INDEX `idx_keys_on_for_workspace_id` ON `keys` (`for_workspace_id`); CREATE INDEX `idx_keys_on_workspace_id` ON `keys` (`workspace_id`); CREATE INDEX `owner_id_idx` ON `keys` (`owner_id`); diff --git a/go/pkg/prefixedapikey/LICENSE b/go/pkg/prefixedapikey/LICENSE new file mode 100644 index 0000000000..95aa932ddc --- /dev/null +++ b/go/pkg/prefixedapikey/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Seam + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/go/pkg/prefixedapikey/prefixedapikey.go b/go/pkg/prefixedapikey/prefixedapikey.go new file mode 100644 index 0000000000..6052341371 --- /dev/null +++ b/go/pkg/prefixedapikey/prefixedapikey.go @@ -0,0 +1,177 @@ +package prefixedapikey + +// This Go package is a port of the https://github.com/seamapi/prefixed-api-key, licensed under MIT. +// See License for copyright and license information. + +import ( + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "fmt" + "strings" + + "github.com/btcsuite/btcutil/base58" +) + +// GenerateAPIKeyOptions holds the options for generating an API key +type GenerateAPIKeyOptions struct { + KeyPrefix string + ShortTokenPrefix string + ShortTokenLength int + LongTokenLength int +} + +// APIKey represents the generated API key components +type APIKey struct { + ShortToken string + LongToken string + LongTokenHash string + Token string +} + +// hashLongTokenToBytes hashes a long token using SHA256 and returns the bytes +func hashLongTokenToBytes(longToken string) []byte { + hash := sha256.Sum256([]byte(longToken)) + return hash[:] +} + +// HashLongToken hashes a long token using SHA256 and returns hex string +func HashLongToken(longToken string) string { + return hex.EncodeToString(hashLongTokenToBytes(longToken)) +} + +// padStart pads a string with a character to reach the specified length +func padStart(str string, length int, padChar string) string { + if len(str) >= length { + return str + } + padding := strings.Repeat(padChar, length-len(str)) + return padding + str +} + +// GenerateAPIKey generates a new API key with the given options +func GenerateAPIKey(opts *GenerateAPIKeyOptions) (*APIKey, error) { + // Set default values if not provided + if opts == nil { + opts = &GenerateAPIKeyOptions{} + } + if opts.KeyPrefix == "" { + return &APIKey{}, nil + } + + if opts.ShortTokenPrefix == "" { + opts.ShortTokenPrefix = "" + } + + if opts.ShortTokenLength == 0 { + opts.ShortTokenLength = 8 + } + + if opts.LongTokenLength == 0 { + opts.LongTokenLength = 24 + } + + // Generate random bytes for tokens + shortTokenBytes := make([]byte, opts.ShortTokenLength) + longTokenBytes := make([]byte, opts.LongTokenLength) + + if _, err := rand.Read(shortTokenBytes); err != nil { + return nil, fmt.Errorf("failed to generate short token: %w", err) + } + if _, err := rand.Read(longTokenBytes); err != nil { + return nil, fmt.Errorf("failed to generate long token: %w", err) + } + + // Encode tokens using base58 + shortToken := padStart( + base58.Encode(shortTokenBytes), + opts.ShortTokenLength, + "0", + ) + if len(shortToken) > opts.ShortTokenLength { + shortToken = shortToken[:opts.ShortTokenLength] + } + + longToken := padStart( + base58.Encode(longTokenBytes), + opts.LongTokenLength, + "0", + ) + if len(longToken) > opts.LongTokenLength { + longToken = longToken[:opts.LongTokenLength] + } + + // Hash the long token + longTokenHash := HashLongToken(longToken) + + // Add prefix to short token and trim if necessary + shortToken = (opts.ShortTokenPrefix + shortToken) + if len(shortToken) > opts.ShortTokenLength { + shortToken = shortToken[:opts.ShortTokenLength] + } + + // Construct the full token + token := fmt.Sprintf("%s_%s_%s", opts.KeyPrefix, shortToken, longToken) + + return &APIKey{ + ShortToken: shortToken, + LongToken: longToken, + LongTokenHash: longTokenHash, + Token: token, + }, nil +} + +// ExtractLongToken extracts the long token from a full API key +func ExtractLongToken(token string) string { + parts := strings.Split(token, "_") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return "" +} + +// ExtractShortToken extracts the short token from a full API key +func ExtractShortToken(token string) string { + parts := strings.Split(token, "_") + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// ExtractLongTokenHash extracts and hashes the long token from a full API key +func ExtractLongTokenHash(token string) string { + return HashLongToken(ExtractLongToken(token)) +} + +// TokenComponents represents the components of an API key +type TokenComponents struct { + LongToken string + ShortToken string + LongTokenHash string + Token string +} + +// GetTokenComponents extracts all components from a full API key +func GetTokenComponents(token string) *TokenComponents { + return &TokenComponents{ + LongToken: ExtractLongToken(token), + ShortToken: ExtractShortToken(token), + LongTokenHash: HashLongToken(ExtractLongToken(token)), + Token: token, + } +} + +// CheckAPIKey verifies if a token matches the expected long token hash using constant-time comparison +func CheckAPIKey(token string, expectedLongTokenHash string) bool { + expectedLongTokenHashBytes, err := hex.DecodeString(expectedLongTokenHash) + if err != nil { + return false + } + + inputLongTokenHashBytes := hashLongTokenToBytes(ExtractLongToken(token)) + + // Use constant-time comparison to prevent timing attacks + return subtle.ConstantTimeCompare(expectedLongTokenHashBytes, inputLongTokenHashBytes) == 1 +} diff --git a/go/pkg/prefixedapikey/prefixedapikey_test.go b/go/pkg/prefixedapikey/prefixedapikey_test.go new file mode 100644 index 0000000000..0e7770b3e9 --- /dev/null +++ b/go/pkg/prefixedapikey/prefixedapikey_test.go @@ -0,0 +1,370 @@ +package prefixedapikey + +import ( + "testing" +) + +// exampleKey represents a known test key for consistent testing +var exampleKey = &APIKey{ + ShortToken: "12345678", + LongToken: "abcdefghijklmnopqrstuvwx", + LongTokenHash: HashLongToken("abcdefghijklmnopqrstuvwx"), + Token: "test_12345678_abcdefghijklmnopqrstuvwx", +} + +func TestHashLongToken(t *testing.T) { + result := HashLongToken(exampleKey.LongToken) + expected := exampleKey.LongTokenHash + + if result != expected { + t.Errorf("HashLongToken() = %v, want %v", result, expected) + } +} + +func TestExtractLongToken(t *testing.T) { + result := ExtractLongToken(exampleKey.Token) + expected := exampleKey.LongToken + + if result != expected { + t.Errorf("ExtractLongToken() = %v, want %v", result, expected) + } + + // Additional test cases + tests := []struct { + name string + token string + expected string + }{ + { + name: "standard token format", + token: "test_12345678_abcdefghijklmnopqrstuvwx", + expected: "abcdefghijklmnopqrstuvwx", + }, + { + name: "token with multiple underscores", + token: "prefix_with_underscores_short_long", + expected: "long", + }, + { + name: "single underscore", + token: "prefix_longtoken", + expected: "longtoken", + }, + { + name: "no underscores", + token: "notokenstructure", + expected: "notokenstructure", + }, + { + name: "empty token", + token: "", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ExtractLongToken(tt.token) + if result != tt.expected { + t.Errorf("ExtractLongToken(%v) = %v, want %v", tt.token, result, tt.expected) + } + }) + } +} + +func TestExtractShortToken(t *testing.T) { + result := ExtractShortToken(exampleKey.Token) + expected := exampleKey.ShortToken + + if result != expected { + t.Errorf("ExtractShortToken() = %v, want %v", result, expected) + } + + // Additional test cases + tests := []struct { + name string + token string + expected string + }{ + { + name: "standard token format", + token: "test_12345678_abcdefghijklmnopqrstuvwx", + expected: "12345678", + }, + { + name: "token with multiple underscores", + token: "prefix_with_underscores_short_long", + expected: "with", + }, + { + name: "single underscore", + token: "prefix_shorttoken", + expected: "shorttoken", + }, + { + name: "no underscores", + token: "notokenstructure", + expected: "", + }, + { + name: "empty token", + token: "", + expected: "", + }, + { + name: "only prefix", + token: "prefix_", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ExtractShortToken(tt.token) + if result != tt.expected { + t.Errorf("ExtractShortToken(%v) = %v, want %v", tt.token, result, tt.expected) + } + }) + } +} + +func TestGetTokenComponents(t *testing.T) { + result := GetTokenComponents(exampleKey.Token) + + expected := &TokenComponents{ + LongToken: exampleKey.LongToken, + ShortToken: exampleKey.ShortToken, + LongTokenHash: exampleKey.LongTokenHash, + Token: exampleKey.Token, + } + + if result.LongToken != expected.LongToken { + t.Errorf("GetTokenComponents().LongToken = %v, want %v", result.LongToken, expected.LongToken) + } + + if result.ShortToken != expected.ShortToken { + t.Errorf("GetTokenComponents().ShortToken = %v, want %v", result.ShortToken, expected.ShortToken) + } + + if result.LongTokenHash != expected.LongTokenHash { + t.Errorf("GetTokenComponents().LongTokenHash = %v, want %v", result.LongTokenHash, expected.LongTokenHash) + } + + if result.Token != expected.Token { + t.Errorf("GetTokenComponents().Token = %v, want %v", result.Token, expected.Token) + } +} + +func TestCheckAPIKey(t *testing.T) { + result := CheckAPIKey(exampleKey.Token, exampleKey.LongTokenHash) + expected := true + + if result != expected { + t.Errorf("CheckAPIKey() = %v, want %v", result, expected) + } + + // Additional test cases + invalidHash := "invalid_hash" + tests := []struct { + name string + token string + expectedHash string + expected bool + }{ + { + name: "valid token and hash", + token: exampleKey.Token, + expectedHash: exampleKey.LongTokenHash, + expected: true, + }, + { + name: "invalid hash", + token: exampleKey.Token, + expectedHash: invalidHash, + expected: false, + }, + { + name: "empty token", + token: "", + expectedHash: exampleKey.LongTokenHash, + expected: false, + }, + { + name: "empty hash", + token: exampleKey.Token, + expectedHash: "", + expected: false, + }, + { + name: "malformed token", + token: "malformed_token", + expectedHash: exampleKey.LongTokenHash, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CheckAPIKey(tt.token, tt.expectedHash) + if result != tt.expected { + t.Errorf("CheckAPIKey(%v, %v) = %v, want %v", tt.token, tt.expectedHash, result, tt.expected) + } + }) + } +} + +func TestGenerateAPIKey(t *testing.T) { + tests := []struct { + name string + opts *GenerateAPIKeyOptions + hasError bool + }{ + { + name: "standard generation", + opts: &GenerateAPIKeyOptions{ + KeyPrefix: "test", + ShortTokenPrefix: "", + ShortTokenLength: 8, + LongTokenLength: 24, + }, + hasError: false, + }, + { + name: "with short token prefix", + opts: &GenerateAPIKeyOptions{ + KeyPrefix: "api", + ShortTokenPrefix: "dev", + ShortTokenLength: 10, + LongTokenLength: 32, + }, + hasError: false, + }, + { + name: "minimal options", + opts: &GenerateAPIKeyOptions{ + KeyPrefix: "min", + }, + hasError: false, + }, + { + name: "nil options with empty result", + opts: nil, + hasError: false, + }, + { + name: "empty key prefix returns empty", + opts: &GenerateAPIKeyOptions{ + KeyPrefix: "", + }, + hasError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := GenerateAPIKey(tt.opts) + + if tt.hasError { + if err == nil { + t.Errorf("GenerateAPIKey() expected error, got nil") + } + return + } + + if err != nil { + t.Errorf("GenerateAPIKey() unexpected error: %v", err) + return + } + + // If we expect an empty result (no key prefix), check that + if tt.opts == nil || tt.opts.KeyPrefix == "" { + if result.Token != "" || result.ShortToken != "" || result.LongToken != "" { + t.Errorf("GenerateAPIKey() expected empty result, got non-empty values") + } + return + } + + // Validate the generated key structure + if result.Token == "" { + t.Errorf("GenerateAPIKey() generated empty token") + } + + if result.ShortToken == "" { + t.Errorf("GenerateAPIKey() generated empty short token") + } + + if result.LongToken == "" { + t.Errorf("GenerateAPIKey() generated empty long token") + } + + if result.LongTokenHash == "" { + t.Errorf("GenerateAPIKey() generated empty long token hash") + } + + // Verify the hash matches + expectedHash := HashLongToken(result.LongToken) + if result.LongTokenHash != expectedHash { + t.Errorf("GenerateAPIKey() hash mismatch: got %v, want %v", result.LongTokenHash, expectedHash) + } + + // Verify token structure + components := GetTokenComponents(result.Token) + if components.LongToken != result.LongToken { + t.Errorf("GenerateAPIKey() token structure invalid: long token mismatch") + } + + if components.ShortToken != result.ShortToken { + t.Errorf("GenerateAPIKey() token structure invalid: short token mismatch") + } + + // Verify token can be validated + if !CheckAPIKey(result.Token, result.LongTokenHash) { + t.Errorf("GenerateAPIKey() generated token fails validation") + } + }) + } +} + +func TestGenerateAPIKeyConsistency(t *testing.T) { + opts := &GenerateAPIKeyOptions{ + KeyPrefix: "test", + ShortTokenLength: 8, + LongTokenLength: 24, + } + + // Generate multiple keys to ensure they're different + keys := make([]*APIKey, 10) + for i := 0; i < 10; i++ { + key, err := GenerateAPIKey(opts) + if err != nil { + t.Fatalf("GenerateAPIKey() unexpected error: %v", err) + } + keys[i] = key + } + + // Ensure all generated keys are unique + for i := 0; i < len(keys); i++ { + for j := i + 1; j < len(keys); j++ { + if keys[i].Token == keys[j].Token { + t.Errorf("GenerateAPIKey() generated duplicate tokens: %v", keys[i].Token) + } + if keys[i].LongToken == keys[j].LongToken { + t.Errorf("GenerateAPIKey() generated duplicate long tokens") + } + if keys[i].ShortToken == keys[j].ShortToken { + t.Errorf("GenerateAPIKey() generated duplicate short tokens") + } + } + } +} + +func TestExtractLongTokenHash(t *testing.T) { + token := "test_12345678_abcdefghijklmnopqrstuvwx" + + result := ExtractLongTokenHash(token) + expected := HashLongToken("abcdefghijklmnopqrstuvwx") + + if result != expected { + t.Errorf("ExtractLongTokenHash(%v) = %v, want %v", token, result, expected) + } +} diff --git a/go/pkg/zen/middleware_errors.go b/go/pkg/zen/middleware_errors.go index 13b9ede930..7888a88302 100644 --- a/go/pkg/zen/middleware_errors.go +++ b/go/pkg/zen/middleware_errors.go @@ -37,6 +37,7 @@ func WithErrorHandling(logger logging.Logger) Middleware { case codes.UnkeyDataErrorsKeyNotFound, codes.UnkeyDataErrorsWorkspaceNotFound, codes.UnkeyDataErrorsApiNotFound, + codes.UnkeyDataErrorsMigrationNotFound, codes.UnkeyDataErrorsPermissionNotFound, codes.UnkeyDataErrorsRoleNotFound, codes.UnkeyDataErrorsKeyAuthNotFound, diff --git a/go/pkg/zen/middleware_openapi_validation.go b/go/pkg/zen/middleware_openapi_validation.go index 429dc2e951..6dc832f288 100644 --- a/go/pkg/zen/middleware_openapi_validation.go +++ b/go/pkg/zen/middleware_openapi_validation.go @@ -24,14 +24,13 @@ import ( func WithValidation(validator *validation.Validator) Middleware { return func(next HandleFunc) HandleFunc { return func(ctx context.Context, s *Session) error { - err, valid := validator.Validate(ctx, s.r) if !valid { err.Meta.RequestId = s.requestID - return s.JSON(err.Error.Status, err) } + return next(ctx, s) } } diff --git a/internal/db/src/schema/keys.ts b/internal/db/src/schema/keys.ts index 220d677f3a..999d4f89be 100644 --- a/internal/db/src/schema/keys.ts +++ b/internal/db/src/schema/keys.ts @@ -5,7 +5,9 @@ import { datetime, index, int, + mysqlEnum, mysqlTable, + primaryKey, text, tinyint, uniqueIndex, @@ -81,6 +83,8 @@ export const keys = mysqlTable( * common settings can be configured by the user. */ environment: varchar("environment", { length: 256 }), + + pendingMigrationId: varchar("pending_migration_id", { length: 256 }), }, (table) => ({ hashIndex: uniqueIndex("hash_idx").on(table.hash), @@ -88,6 +92,7 @@ export const keys = mysqlTable( table.keyAuthId, table.deletedAtM, ), + pendingMigrationIdIndex: index("pending_migration_id_idx").on(table.pendingMigrationId), forWorkspaceIdIndex: index("idx_keys_on_for_workspace_id").on(table.forWorkspaceId), workspaceIdIndex: index("idx_keys_on_workspace_id").on(table.workspaceId), ownerIdIndex: index("owner_id_idx").on(table.ownerId), @@ -152,3 +157,15 @@ export const encryptedKeysRelations = relations(encryptedKeys, ({ one }) => ({ references: [workspaces.id], }), })); + +export const keyMigrations = mysqlTable( + "key_migrations", + { + id: varchar("id", { length: 255 }), + workspaceId: varchar("workspace_id", { length: 256 }).notNull(), + algorithm: mysqlEnum("algorithm", ["github.com/seamapi/prefixed-api-key"]).notNull(), + }, + (table) => ({ + idWorkspacePk: primaryKey({ columns: [table.id, table.workspaceId] }), + }), +); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 20e9011b57..f1f8c811bc 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -599,7 +599,7 @@ importers: version: link:../../packages/error ai: specifier: ^3.0.23 - version: 3.0.23(react@18.3.1)(solid-js@1.9.9)(svelte@4.2.20)(vue@3.5.20)(zod@3.23.8) + version: 3.0.23(react@18.3.1)(solid-js@1.9.9)(svelte@4.2.20)(vue@3.5.21)(zod@3.23.8) zod: specifier: 3.23.8 version: 3.23.8 @@ -621,7 +621,7 @@ importers: devDependencies: checkly: specifier: latest - version: 6.5.0(@types/node@20.14.9)(typescript@5.5.3) + version: 6.0.1(@types/node@20.14.9)(typescript@5.5.3) ts-node: specifier: 10.9.1 version: 10.9.1(@types/node@20.14.9)(typescript@5.5.3) @@ -867,7 +867,7 @@ importers: version: 18.3.1 react-email: specifier: 2.1.1 - version: 2.1.1(eslint@9.34.0) + version: 2.1.1(eslint@9.35.0) resend: specifier: ^4.4.0 version: 4.4.0(react-dom@18.3.1)(react@18.3.1) @@ -916,7 +916,7 @@ importers: version: 1.0.5(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-select': specifier: ^2.0.0 - version: 2.0.0(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + version: 2.0.0(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-separator': specifier: ^1.0.3 version: 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) @@ -925,7 +925,7 @@ importers: version: 1.1.0(@types/react@18.3.11)(react@18.3.1) '@radix-ui/react-tooltip': specifier: ^1.0.7 - version: 1.0.7(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + version: 1.0.7(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@unkey/icons': specifier: workspace:^ version: link:../icons @@ -1573,18 +1573,22 @@ packages: '@babel/types': 7.28.2 dev: false - /@babel/parser@7.28.3: - resolution: {integrity: sha512-7+Ey1mAgYqFAx2h0RuoxcQT5+MlG3GTV0TQrgr7/ZliKsm/MNDxVVutlWaziMq7wJNAz8MTqz55XLpWvva6StA==} + /@babel/parser@7.28.4: + resolution: {integrity: sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.28.2 + '@babel/types': 7.28.4 dev: false /@babel/runtime@7.28.3: resolution: {integrity: sha512-9uIQ10o0WGdpP6GDhXcdOJPJuDgFtIDtN/9+ArJQ2NAfAmiuhTQdzkaTGR33v43GYS2UrSA0eX2pPPHoFVvpxA==} engines: {node: '>=6.9.0'} + /@babel/runtime@7.28.4: + resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==} + engines: {node: '>=6.9.0'} + /@babel/types@7.28.2: resolution: {integrity: sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ==} engines: {node: '>=6.9.0'} @@ -1593,6 +1597,14 @@ packages: '@babel/helper-validator-identifier': 7.27.1 dev: false + /@babel/types@7.28.4: + resolution: {integrity: sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.27.1 + dev: false + /@balena/dockerignore@1.0.2: resolution: {integrity: sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q==} dev: true @@ -2188,8 +2200,8 @@ packages: resolution: {integrity: sha512-z33Il7l5dKjUgGULTqBsQBQwckHh5AbIuxhdsIxDDiZAzBOrZO6q9ogcWC65kU382AfynTfgNumVcNIjuIua6w==} dev: true - /@emnapi/runtime@1.4.5: - resolution: {integrity: sha512-++LApOtY0pEEz1zrd9vy1/zXVaVJJ/EbAF3u0fXIzPJEDtnITsBGbbK0EkM72amhl/R5b+5xx0Y/QhcVOpuulg==} + /@emnapi/runtime@1.5.0: + resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==} requiresBuild: true dependencies: tslib: 2.8.1 @@ -3671,13 +3683,13 @@ packages: dev: true optional: true - /@eslint-community/eslint-utils@4.7.0(eslint@9.34.0): - resolution: {integrity: sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==} + /@eslint-community/eslint-utils@4.9.0(eslint@9.35.0): + resolution: {integrity: sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 dependencies: - eslint: 9.34.0 + eslint: 9.35.0 eslint-visitor-keys: 3.4.3 dev: false @@ -3726,8 +3738,8 @@ packages: - supports-color dev: false - /@eslint/js@9.34.0: - resolution: {integrity: sha512-EoyvqQnBNsV1CWaEJ559rxXL4c8V92gxirbawSmVUOWXlsRxxQXl6LmCpdUblgxgSkDIqKnhzba2SjRTI/A5Rw==} + /@eslint/js@9.35.0: + resolution: {integrity: sha512-30iXE9whjlILfWobBkNerJo+TXYsgVM5ERQwMcMKCHckHflCmf7wXDAHlARoWnh0s1U72WqlbeyE7iAcCzuCPw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} dev: false @@ -3853,12 +3865,12 @@ packages: engines: {node: '>=18.18.0'} dev: false - /@humanfs/node@0.16.6: - resolution: {integrity: sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==} + /@humanfs/node@0.16.7: + resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==} engines: {node: '>=18.18.0'} dependencies: '@humanfs/core': 0.19.1 - '@humanwhocodes/retry': 0.3.1 + '@humanwhocodes/retry': 0.4.3 dev: false /@humanwhocodes/module-importer@1.0.1: @@ -3866,11 +3878,6 @@ packages: engines: {node: '>=12.22'} dev: false - /@humanwhocodes/retry@0.3.1: - resolution: {integrity: sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==} - engines: {node: '>=18.18'} - dev: false - /@humanwhocodes/retry@0.4.3: resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} engines: {node: '>=18.18'} @@ -4205,7 +4212,7 @@ packages: cpu: [wasm32] requiresBuild: true dependencies: - '@emnapi/runtime': 1.4.5 + '@emnapi/runtime': 1.5.0 dev: true optional: true @@ -4215,7 +4222,7 @@ packages: cpu: [wasm32] requiresBuild: true dependencies: - '@emnapi/runtime': 1.4.5 + '@emnapi/runtime': 1.5.0 dev: false optional: true @@ -5684,7 +5691,7 @@ packages: semver: 7.7.2 string-width: 4.2.3 supports-color: 8.1.1 - tinyglobby: 0.2.14 + tinyglobby: 0.2.15 widest-line: 3.1.0 wordwrap: 1.0.0 wrap-ansi: 7.0.0 @@ -7097,7 +7104,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) @@ -7627,7 +7634,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@floating-ui/react-dom': 2.1.6(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) @@ -7674,6 +7681,35 @@ packages: react-dom: 18.3.1(react@18.3.1) dev: false + /@radix-ui/react-popper@1.1.3(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1): + resolution: {integrity: sha512-cKpopj/5RHZWjrbF2846jBNacjQVwkP068DfmgrNJXpvVWrOvlAmE9xSiy5OqeE+Gi8D9fP+oDhUnPqNMY8/5w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 + react-dom: ^16.8 || ^17.0 || ^18.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + dependencies: + '@babel/runtime': 7.28.4 + '@floating-ui/react-dom': 2.1.6(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-rect': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-size': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/rect': 1.0.1 + '@types/react': 18.3.11 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + dev: false + /@radix-ui/react-popper@1.2.0(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1): resolution: {integrity: sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==} peerDependencies: @@ -7774,7 +7810,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@types/react': 18.3.11 '@types/react-dom': 18.3.0 @@ -8087,7 +8123,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) @@ -8257,6 +8293,46 @@ packages: react-remove-scroll: 2.5.5(@types/react@18.3.11)(react@18.3.1) dev: false + /@radix-ui/react-select@2.0.0(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1): + resolution: {integrity: sha512-RH5b7af4oHtkcHS7pG6Sgv5rk5Wxa7XI8W5gvB1N/yiuDGZxko1ynvOiVhFM7Cis2A8zxF9bTOUVbRDzPepe6w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 + react-dom: ^16.8 || ^17.0 || ^18.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + dependencies: + '@babel/runtime': 7.28.3 + '@radix-ui/number': 1.0.1 + '@radix-ui/primitive': 1.0.1 + '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-direction': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.0.5(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-focus-guards': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.0.4(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-popper': 1.1.3(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-portal': 1.0.4(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-previous': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@types/react': 18.3.11 + aria-hidden: 1.2.6 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + react-remove-scroll: 2.5.5(@types/react@18.3.11)(react@18.3.1) + dev: false + /@radix-ui/react-select@2.2.5(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1): resolution: {integrity: sha512-HnMTdXEVuuyzx63ME0ut4+sEMYW6oouHWNGUZc7ddvUWIcfCva/AMoqEW/3wnEllriMWBa0RHspCYnfCWJQYmA==} peerDependencies: @@ -8483,7 +8559,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 '@radix-ui/react-context': 1.0.1(@types/react@18.3.11)(react@18.3.1) '@radix-ui/react-direction': 1.0.1(@types/react@18.3.11)(react@18.3.1) @@ -8510,7 +8586,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.11)(react@18.3.1) @@ -8533,7 +8609,7 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) '@radix-ui/react-context': 1.0.1(@types/react@18.3.11)(react@18.3.1) @@ -8584,6 +8660,37 @@ packages: react-dom: 18.3.1(react@18.3.1) dev: false + /@radix-ui/react-tooltip@1.0.7(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1): + resolution: {integrity: sha512-lPh5iKNFVQ/jav/j6ZrWq3blfDJ0OH9R6FlNUHPMqdLuQ9vwDgFsRxvl8b7Asuy5c8xmoojHUxKHQSOAvMHxyw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 + react-dom: ^16.8 || ^17.0 || ^18.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + dependencies: + '@babel/runtime': 7.28.3 + '@radix-ui/primitive': 1.0.1 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.0.5(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-popper': 1.1.3(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-portal': 1.0.4(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.11)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.3.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1) + '@types/react': 18.3.11 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + dev: false + /@radix-ui/react-use-callback-ref@1.0.1(@types/react@18.3.11)(react@18.3.1): resolution: {integrity: sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==} peerDependencies: @@ -9967,7 +10074,7 @@ packages: engines: {node: '>=18'} dependencies: '@babel/code-frame': 7.27.1 - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 '@types/aria-query': 5.0.4 aria-query: 5.3.0 dom-accessibility-api: 0.5.16 @@ -10352,6 +10459,13 @@ packages: resolution: {integrity: sha512-K7DIaHnh0mzVxreCR9qwgNxp3MH9dltPNIEddW9MYUlcKAzm+3grKNSTe2vCJHI1FaLpvpL5JGJrz1UZDKYvDg==} dependencies: undici-types: 5.26.5 + dev: false + + /@types/node@18.19.124: + resolution: {integrity: sha512-hY4YWZFLs3ku6D2Gqo3RchTd9VRCcrjqp/I0mmohYeUVA5Y8eCXKJEasHxLAJVZRJuQogfd1GiJ9lgogBgKeuQ==} + dependencies: + undici-types: 5.26.5 + dev: true /@types/node@20.14.9: resolution: {integrity: sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==} @@ -10442,7 +10556,7 @@ packages: /@types/ssh2@1.15.5: resolution: {integrity: sha512-N1ASjp/nXH3ovBHddRJpli4ozpk6UdDYIX4RJWFa9L1YKnzdhTlVmiGHm4DZnj/jLbqZpes4aeR30EFGQtvhQQ==} dependencies: - '@types/node': 18.19.123 + '@types/node': 18.19.124 dev: true /@types/unist@2.0.11: @@ -10669,7 +10783,7 @@ packages: resolution: {integrity: sha512-AiLUiuZ0FuA+/8i19mTYd+re5jqjEc2jZbgJ2up0VY0Ddyyxg/uUtBDpIFAy4uzKaQxOW8gMgBdAJJ2ydhu39A==} dependencies: '@vitest/pretty-format': 3.0.9 - magic-string: 0.30.18 + magic-string: 0.30.19 pathe: 2.0.3 dev: true @@ -10757,78 +10871,78 @@ packages: tinyrainbow: 2.0.0 dev: true - /@vue/compiler-core@3.5.20: - resolution: {integrity: sha512-8TWXUyiqFd3GmP4JTX9hbiTFRwYHgVL/vr3cqhr4YQ258+9FADwvj7golk2sWNGHR67QgmCZ8gz80nQcMokhwg==} + /@vue/compiler-core@3.5.21: + resolution: {integrity: sha512-8i+LZ0vf6ZgII5Z9XmUvrCyEzocvWT+TeR2VBUVlzIH6Tyv57E20mPZ1bCS+tbejgUgmjrEh7q/0F0bibskAmw==} dependencies: - '@babel/parser': 7.28.3 - '@vue/shared': 3.5.20 + '@babel/parser': 7.28.4 + '@vue/shared': 3.5.21 entities: 4.5.0 estree-walker: 2.0.2 source-map-js: 1.2.1 dev: false - /@vue/compiler-dom@3.5.20: - resolution: {integrity: sha512-whB44M59XKjqUEYOMPYU0ijUV0G+4fdrHVKDe32abNdX/kJe1NUEMqsi4cwzXa9kyM9w5S8WqFsrfo1ogtBZGQ==} + /@vue/compiler-dom@3.5.21: + resolution: {integrity: sha512-jNtbu/u97wiyEBJlJ9kmdw7tAr5Vy0Aj5CgQmo+6pxWNQhXZDPsRr1UWPN4v3Zf82s2H3kF51IbzZ4jMWAgPlQ==} dependencies: - '@vue/compiler-core': 3.5.20 - '@vue/shared': 3.5.20 + '@vue/compiler-core': 3.5.21 + '@vue/shared': 3.5.21 dev: false - /@vue/compiler-sfc@3.5.20: - resolution: {integrity: sha512-SFcxapQc0/feWiSBfkGsa1v4DOrnMAQSYuvDMpEaxbpH5dKbnEM5KobSNSgU+1MbHCl+9ftm7oQWxvwDB6iBfw==} + /@vue/compiler-sfc@3.5.21: + resolution: {integrity: sha512-SXlyk6I5eUGBd2v8Ie7tF6ADHE9kCR6mBEuPyH1nUZ0h6Xx6nZI29i12sJKQmzbDyr2tUHMhhTt51Z6blbkTTQ==} dependencies: - '@babel/parser': 7.28.3 - '@vue/compiler-core': 3.5.20 - '@vue/compiler-dom': 3.5.20 - '@vue/compiler-ssr': 3.5.20 - '@vue/shared': 3.5.20 + '@babel/parser': 7.28.4 + '@vue/compiler-core': 3.5.21 + '@vue/compiler-dom': 3.5.21 + '@vue/compiler-ssr': 3.5.21 + '@vue/shared': 3.5.21 estree-walker: 2.0.2 - magic-string: 0.30.18 + magic-string: 0.30.19 postcss: 8.5.6 source-map-js: 1.2.1 dev: false - /@vue/compiler-ssr@3.5.20: - resolution: {integrity: sha512-RSl5XAMc5YFUXpDQi+UQDdVjH9FnEpLDHIALg5J0ITHxkEzJ8uQLlo7CIbjPYqmZtt6w0TsIPbo1izYXwDG7JA==} + /@vue/compiler-ssr@3.5.21: + resolution: {integrity: sha512-vKQ5olH5edFZdf5ZrlEgSO1j1DMA4u23TVK5XR1uMhvwnYvVdDF0nHXJUblL/GvzlShQbjhZZ2uvYmDlAbgo9w==} dependencies: - '@vue/compiler-dom': 3.5.20 - '@vue/shared': 3.5.20 + '@vue/compiler-dom': 3.5.21 + '@vue/shared': 3.5.21 dev: false - /@vue/reactivity@3.5.20: - resolution: {integrity: sha512-hS8l8x4cl1fmZpSQX/NXlqWKARqEsNmfkwOIYqtR2F616NGfsLUm0G6FQBK6uDKUCVyi1YOL8Xmt/RkZcd/jYQ==} + /@vue/reactivity@3.5.21: + resolution: {integrity: sha512-3ah7sa+Cwr9iiYEERt9JfZKPw4A2UlbY8RbbnH2mGCE8NwHkhmlZt2VsH0oDA3P08X3jJd29ohBDtX+TbD9AsA==} dependencies: - '@vue/shared': 3.5.20 + '@vue/shared': 3.5.21 dev: false - /@vue/runtime-core@3.5.20: - resolution: {integrity: sha512-vyQRiH5uSZlOa+4I/t4Qw/SsD/gbth0SW2J7oMeVlMFMAmsG1rwDD6ok0VMmjXY3eI0iHNSSOBilEDW98PLRKw==} + /@vue/runtime-core@3.5.21: + resolution: {integrity: sha512-+DplQlRS4MXfIf9gfD1BOJpk5RSyGgGXD/R+cumhe8jdjUcq/qlxDawQlSI8hCKupBlvM+3eS1se5xW+SuNAwA==} dependencies: - '@vue/reactivity': 3.5.20 - '@vue/shared': 3.5.20 + '@vue/reactivity': 3.5.21 + '@vue/shared': 3.5.21 dev: false - /@vue/runtime-dom@3.5.20: - resolution: {integrity: sha512-KBHzPld/Djw3im0CQ7tGCpgRedryIn4CcAl047EhFTCCPT2xFf4e8j6WeKLgEEoqPSl9TYqShc3Q6tpWpz/Xgw==} + /@vue/runtime-dom@3.5.21: + resolution: {integrity: sha512-3M2DZsOFwM5qI15wrMmNF5RJe1+ARijt2HM3TbzBbPSuBHOQpoidE+Pa+XEaVN+czbHf81ETRoG1ltztP2em8w==} dependencies: - '@vue/reactivity': 3.5.20 - '@vue/runtime-core': 3.5.20 - '@vue/shared': 3.5.20 + '@vue/reactivity': 3.5.21 + '@vue/runtime-core': 3.5.21 + '@vue/shared': 3.5.21 csstype: 3.1.3 dev: false - /@vue/server-renderer@3.5.20(vue@3.5.20): - resolution: {integrity: sha512-HthAS0lZJDH21HFJBVNTtx+ULcIbJQRpjSVomVjfyPkFSpCwvsPTA+jIzOaUm3Hrqx36ozBHePztQFg6pj5aKg==} + /@vue/server-renderer@3.5.21(vue@3.5.21): + resolution: {integrity: sha512-qr8AqgD3DJPJcGvLcJKQo2tAc8OnXRcfxhOJCPF+fcfn5bBGz7VCcO7t+qETOPxpWK1mgysXvVT/j+xWaHeMWA==} peerDependencies: - vue: 3.5.20 + vue: 3.5.21 dependencies: - '@vue/compiler-ssr': 3.5.20 - '@vue/shared': 3.5.20 - vue: 3.5.20(typescript@5.7.3) + '@vue/compiler-ssr': 3.5.21 + '@vue/shared': 3.5.21 + vue: 3.5.21(typescript@5.7.3) dev: false - /@vue/shared@3.5.20: - resolution: {integrity: sha512-SoRGP596KU/ig6TfgkCMbXkr4YJ91n/QSdMuqeP5r3hVIYA3CPHUBCc7Skak0EAKV+5lL4KyIh61VA/pK1CIAA==} + /@vue/shared@3.5.21: + resolution: {integrity: sha512-+2k1EQpnYuVuu3N7atWyG3/xoFWIVJZq4Mz8XNOdScFI0etES75fbny/oU4lKWk/577P1zmg0ioYvpGEDZ3DLw==} dev: false /@webassemblyjs/ast@1.14.1: @@ -11062,7 +11176,7 @@ packages: indent-string: 5.0.0 dev: true - /ai@3.0.23(react@18.3.1)(solid-js@1.9.9)(svelte@4.2.20)(vue@3.5.20)(zod@3.23.8): + /ai@3.0.23(react@18.3.1)(solid-js@1.9.9)(svelte@4.2.20)(vue@3.5.21)(zod@3.23.8): resolution: {integrity: sha512-VL8Fx9euEtffzIu0BpLDZkACB+oU6zj4vHXSsSoT5VfwAzE009FJedOMPK1M4u60RpYw/DgwlD7OLN7XQfvSHw==} engines: {node: '>=18'} peerDependencies: @@ -11097,8 +11211,8 @@ packages: svelte: 4.2.20 swr: 2.2.0(react@18.3.1) swr-store: 0.10.6 - swrv: 1.0.4(vue@3.5.20) - vue: 3.5.20(typescript@5.7.3) + swrv: 1.0.4(vue@3.5.21) + vue: 3.5.21(typescript@5.7.3) zod: 3.23.8 zod-to-json-schema: 3.22.5(zod@3.23.8) dev: false @@ -11529,8 +11643,8 @@ packages: dev: true optional: true - /bare-fs@4.2.1: - resolution: {integrity: sha512-mELROzV0IhqilFgsl1gyp48pnZsaV9xhQapHLDsvn4d4ZTfbFhcghQezl7FTEDNBcGqLUnNI3lUlm6ecrLWdFA==} + /bare-fs@4.2.3: + resolution: {integrity: sha512-1aGs5pRVLToMQ79elP+7cc0u0s/wXAzfBv/7hDloT7WFggLqECCas5qqPky7WHCFdsBH5WDq6sD4fAoz5sJbtA==} engines: {bare: '>=1.16.0'} requiresBuild: true peerDependencies: @@ -11687,7 +11801,7 @@ packages: /broker-factory@3.1.9: resolution: {integrity: sha512-MzvndyD6EcbkBtX4NXm/HfdO1+cOR5ONNdMCXEKfHpxGdMtuDz7+o+nJf7HMtyPH1sUVf/lEIP+DMluC5PgaBQ==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 fast-unique-numbers: 9.0.23 tslib: 2.8.1 worker-factory: 7.0.45 @@ -11841,6 +11955,10 @@ packages: /caniuse-lite@1.0.30001737: resolution: {integrity: sha512-BiloLiXtQNrY5UyF0+1nSJLXUENuhka2pzy2Fx5pGxqavdrxSCW4U6Pn/PoG3Efspi2frRbHpBV2XsrPE6EDlw==} + /caniuse-lite@1.0.30001741: + resolution: {integrity: sha512-QGUGitqsc8ARjLdgAfxETDhRbJ0REsP6O3I96TAth/mVjh2cYzN2u+3AzPP3aVSm2FehEItaJw1xd+IGBXWeSw==} + dev: false + /ccount@2.0.1: resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} @@ -11938,8 +12056,8 @@ packages: engines: {node: '>= 16'} dev: true - /checkly@6.5.0(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-i90/P+sWL6CmTBRGOHXFAogisrDdomE928XKSaw2g4t9e2dVGcaDfQ72ZdXFCm27gVQd7siXPGrO6LNFwyEsWg==} + /checkly@6.0.1(@types/node@20.14.9)(typescript@5.5.3): + resolution: {integrity: sha512-tPFERSn3bQ0WyqIXfpx/ith4f1l6Rhb4EI0iz5RcdgltJ6jbnjyJvbsTaWgIBQlAenKdZrKtJFsd4dodWVIgAQ==} engines: {node: ^18.19.0 || >=20.5.0} hasBin: true peerDependencies: @@ -13054,7 +13172,7 @@ packages: /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 csstype: 3.1.3 dev: false @@ -13283,6 +13401,11 @@ packages: /emoji-regex@10.4.0: resolution: {integrity: sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==} + dev: false + + /emoji-regex@10.5.0: + resolution: {integrity: sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==} + dev: true /emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} @@ -13807,31 +13930,31 @@ packages: source-map: 0.6.1 dev: true - /eslint-config-prettier@9.0.0(eslint@9.34.0): + /eslint-config-prettier@9.0.0(eslint@9.35.0): resolution: {integrity: sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw==} hasBin: true peerDependencies: eslint: '>=7.0.0' dependencies: - eslint: 9.34.0 + eslint: 9.35.0 dev: false - /eslint-config-turbo@1.10.12(eslint@9.34.0): + /eslint-config-turbo@1.10.12(eslint@9.35.0): resolution: {integrity: sha512-z3jfh+D7UGYlzMWGh+Kqz++hf8LOE96q3o5R8X4HTjmxaBWlLAWG+0Ounr38h+JLR2TJno0hU9zfzoPNkR9BdA==} peerDependencies: eslint: '>6.6.0' dependencies: - eslint: 9.34.0 - eslint-plugin-turbo: 1.10.12(eslint@9.34.0) + eslint: 9.35.0 + eslint-plugin-turbo: 1.10.12(eslint@9.35.0) dev: false - /eslint-plugin-turbo@1.10.12(eslint@9.34.0): + /eslint-plugin-turbo@1.10.12(eslint@9.35.0): resolution: {integrity: sha512-uNbdj+ohZaYo4tFJ6dStRXu2FZigwulR1b3URPXe0Q8YaE7thuekKNP+54CHtZPH9Zey9dmDx5btAQl9mfzGOw==} peerDependencies: eslint: '>6.6.0' dependencies: dotenv: 16.0.3 - eslint: 9.34.0 + eslint: 9.35.0 dev: false /eslint-scope@5.1.1: @@ -13859,8 +13982,8 @@ packages: resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - /eslint@9.34.0: - resolution: {integrity: sha512-RNCHRX5EwdrESy3Jc9o8ie8Bog+PeYvvSR8sDGoZxNFTvZ4dlxUB3WzQ3bQMztFrSRODGrLLj8g6OFuGY/aiQg==} + /eslint@9.35.0: + resolution: {integrity: sha512-QePbBFMJFjgmlE+cXAlbHZbHpdFVS2E/6vzCy7aKlebddvl1vadiC4JFV5u/wqTkNUwEV8WrQi257jf5f06hrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} hasBin: true peerDependencies: @@ -13869,15 +13992,15 @@ packages: jiti: optional: true dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.34.0) + '@eslint-community/eslint-utils': 4.9.0(eslint@9.35.0) '@eslint-community/regexpp': 4.12.1 '@eslint/config-array': 0.21.0 '@eslint/config-helpers': 0.3.1 '@eslint/core': 0.15.2 '@eslint/eslintrc': 3.3.1 - '@eslint/js': 9.34.0 + '@eslint/js': 9.35.0 '@eslint/plugin-kit': 0.3.5 - '@humanfs/node': 0.16.6 + '@humanfs/node': 0.16.7 '@humanwhocodes/module-importer': 1.0.1 '@humanwhocodes/retry': 0.4.3 '@types/estree': 1.0.8 @@ -14233,7 +14356,7 @@ packages: resolution: {integrity: sha512-jcRIaHo46nfvyvKRMaFSKXmez4jALQ3Qw49gxM5F4siz8HqkyKPPEexpCOYwBSJI1HovrDr4fEedM8QAJ7oX3w==} engines: {node: '>=18.2.0'} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 tslib: 2.8.1 dev: true @@ -14930,6 +15053,11 @@ packages: engines: {node: '>=18'} dev: true + /get-east-asian-width@1.3.1: + resolution: {integrity: sha512-R1QfovbPsKmosqTnPoRFiJ7CF9MLRgb53ChvMZm+r4p76/+8yKDy17qLL2PKInORy2RkZZekuK0efYgmzTkXyQ==} + engines: {node: '>=18'} + dev: true + /get-func-name@2.0.2: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} dev: true @@ -15994,11 +16122,11 @@ packages: resolution: {integrity: sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==} engines: {node: '>=12'} - /is-fullwidth-code-point@5.0.0: - resolution: {integrity: sha512-OVa3u9kkBbw7b8Xw5F9P+D/T9X+Z4+JruYVNapTjPYZYUznQ5YfWeFkOj606XYYW8yugTfC8Pj0hYqvi4ryAhA==} + /is-fullwidth-code-point@5.1.0: + resolution: {integrity: sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==} engines: {node: '>=18'} dependencies: - get-east-asian-width: 1.3.0 + get-east-asian-width: 1.3.1 dev: true /is-generator-function@1.1.0: @@ -16803,6 +16931,12 @@ packages: resolution: {integrity: sha512-yi8swmWbO17qHhwIBNeeZxTceJMeBvWJaId6dyvTSOwTipqeHhMhOrz6513r1sOKnpvQ7zkhlG8tPrpilwTxHQ==} dependencies: '@jridgewell/sourcemap-codec': 1.5.5 + dev: true + + /magic-string@0.30.19: + resolution: {integrity: sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==} + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 /make-error@1.3.6: resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} @@ -17947,7 +18081,7 @@ packages: '@next/env': 14.2.32 '@swc/helpers': 0.5.5 busboy: 1.6.0 - caniuse-lite: 1.0.30001737 + caniuse-lite: 1.0.30001741 graceful-fs: 4.2.11 postcss: 8.4.31 react: 18.3.1 @@ -19236,7 +19370,7 @@ packages: react-is: 18.1.0 dev: false - /react-email@2.1.1(eslint@9.34.0): + /react-email@2.1.1(eslint@9.35.0): resolution: {integrity: sha512-09oMVl/jN0/Re0bT0sEqYjyyFSCN/Tg0YmzjC9wfYpnMx02Apk40XXitySDfUBMR9EgTdr6T4lYknACqiLK3mg==} engines: {node: '>=18.0.0'} hasBin: true @@ -19262,8 +19396,8 @@ packages: commander: 11.1.0 debounce: 2.0.0 esbuild: 0.19.11 - eslint-config-prettier: 9.0.0(eslint@9.34.0) - eslint-config-turbo: 1.10.12(eslint@9.34.0) + eslint-config-prettier: 9.0.0(eslint@9.35.0) + eslint-config-turbo: 1.10.12(eslint@9.35.0) framer-motion: 10.17.4(react-dom@18.3.1)(react@18.3.1) glob: 10.3.4 log-symbols: 4.1.0 @@ -19478,7 +19612,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -20204,8 +20338,8 @@ packages: randombytes: 2.1.0 dev: false - /seroval-plugins@1.3.2(seroval@1.3.2): - resolution: {integrity: sha512-0QvCV2lM3aj/U3YozDiVwx9zpH0q8A60CTWIv4Jszj/givcudPb48B+rkU5D51NJ0pTpweGMttHjboPa9/zoIQ==} + /seroval-plugins@1.3.3(seroval@1.3.2): + resolution: {integrity: sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w==} engines: {node: '>=10'} peerDependencies: seroval: ^1.0 @@ -20497,7 +20631,7 @@ packages: engines: {node: '>=18'} dependencies: ansi-styles: 6.2.1 - is-fullwidth-code-point: 5.0.0 + is-fullwidth-code-point: 5.1.0 dev: true /slugify@1.6.6: @@ -20606,7 +20740,7 @@ packages: dependencies: csstype: 3.1.3 seroval: 1.3.2 - seroval-plugins: 1.3.2(seroval@1.3.2) + seroval-plugins: 1.3.3(seroval@1.3.2) dev: false /solid-swr-store@0.10.7(solid-js@1.9.9)(swr-store@0.10.6): @@ -20823,7 +20957,7 @@ packages: resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==} engines: {node: '>=18'} dependencies: - emoji-regex: 10.4.0 + emoji-regex: 10.5.0 get-east-asian-width: 1.3.0 strip-ansi: 7.1.0 dev: true @@ -21053,7 +21187,7 @@ packages: estree-walker: 3.0.3 is-reference: 3.0.3 locate-character: 3.0.0 - magic-string: 0.30.18 + magic-string: 0.30.19 periscopic: 3.1.0 dev: false @@ -21098,12 +21232,12 @@ packages: resolution: {integrity: sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA==} dev: false - /swrv@1.0.4(vue@3.5.20): + /swrv@1.0.4(vue@3.5.21): resolution: {integrity: sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g==} peerDependencies: vue: '>=3.2.26 < 4' dependencies: - vue: 3.5.20(typescript@5.7.3) + vue: 3.5.21(typescript@5.7.3) dev: false /symbol-tree@3.2.4: @@ -21113,7 +21247,7 @@ packages: /tailwind-merge@2.2.0: resolution: {integrity: sha512-SqqhhaL0T06SW59+JVNfAqKdqLs0497esifRrZ7jOaefP3o64fdFNDMrAQWZFMxTLJPiHVjRLUywT8uFz1xNWQ==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 dev: false /tailwind-merge@2.5.4: @@ -21229,7 +21363,7 @@ packages: pump: 3.0.3 tar-stream: 3.1.7 optionalDependencies: - bare-fs: 4.2.1 + bare-fs: 4.2.3 bare-path: 3.0.0 transitivePeerDependencies: - bare-buffer @@ -21386,6 +21520,14 @@ packages: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 + /tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + dev: true + /tinypool@0.8.4: resolution: {integrity: sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==} engines: {node: '>=14.0.0'} @@ -22839,19 +22981,19 @@ packages: resolution: {integrity: sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==} dev: false - /vue@3.5.20(typescript@5.7.3): - resolution: {integrity: sha512-2sBz0x/wis5TkF1XZ2vH25zWq3G1bFEPOfkBcx2ikowmphoQsPH6X0V3mmPCXA2K1N/XGTnifVyDQP4GfDDeQw==} + /vue@3.5.21(typescript@5.7.3): + resolution: {integrity: sha512-xxf9rum9KtOdwdRkiApWL+9hZEMWE90FHh8yS1+KJAiWYh+iGWV1FquPjoO9VUHQ+VIhsCXNNyZ5Sf4++RVZBA==} peerDependencies: typescript: '*' peerDependenciesMeta: typescript: optional: true dependencies: - '@vue/compiler-dom': 3.5.20 - '@vue/compiler-sfc': 3.5.20 - '@vue/runtime-dom': 3.5.20 - '@vue/server-renderer': 3.5.20(vue@3.5.20) - '@vue/shared': 3.5.20 + '@vue/compiler-dom': 3.5.21 + '@vue/compiler-sfc': 3.5.21 + '@vue/runtime-dom': 3.5.21 + '@vue/server-renderer': 3.5.21(vue@3.5.21) + '@vue/shared': 3.5.21 typescript: 5.7.3 dev: false @@ -23114,7 +23256,7 @@ packages: /worker-factory@7.0.45: resolution: {integrity: sha512-FFPCiSv7MD6ZDEfiik/ErM8IrIAWajaXhezLyCo3v0FjhUWud6GXnG2BiTE91jLywXGAVCT8IF48Hhr+D/omMw==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 fast-unique-numbers: 9.0.23 tslib: 2.8.1 dev: true @@ -23122,7 +23264,7 @@ packages: /worker-timers-broker@8.0.10: resolution: {integrity: sha512-xvo/9GiuduENbJNdWnvZtkriIkjBKKVbMyw7GXvrBu3n1JHemzZgxqaCcCBNlpfXnRXXF4ekqvXWLh1gb65b8w==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 broker-factory: 3.1.9 fast-unique-numbers: 9.0.23 tslib: 2.8.1 @@ -23132,7 +23274,7 @@ packages: /worker-timers-worker@9.0.10: resolution: {integrity: sha512-cfCmAkuoN+nGGJShta/g7CQVP3h7rvQA642EQg72fOHCWP5S2P83rLxDiaGv811Hd+19Cgdqt/tpRBIZ5kj/dw==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 tslib: 2.8.1 worker-factory: 7.0.45 dev: true @@ -23140,7 +23282,7 @@ packages: /worker-timers@8.0.24: resolution: {integrity: sha512-Ydu/7TRHlxIRjYSGDge1F92L7y9kzInpwR4CkocRVObPE0eRqC6d+0GFh52Hm+m520RHVKiytOERtCUu5sQDVQ==} dependencies: - '@babel/runtime': 7.28.3 + '@babel/runtime': 7.28.4 tslib: 2.8.1 worker-timers-broker: 8.0.10 worker-timers-worker: 9.0.10