diff --git a/apps/agent/pkg/api/routes/openapi/handler.go b/apps/agent/pkg/api/routes/openapi/handler.go index 3f42adcd6b..fdc6fa8bb7 100644 --- a/apps/agent/pkg/api/routes/openapi/handler.go +++ b/apps/agent/pkg/api/routes/openapi/handler.go @@ -8,7 +8,6 @@ import ( ) func New(svc routes.Services) *routes.Route { - return routes.NewRoute("GET", "/openapi.json", func(w http.ResponseWriter, r *http.Request) { diff --git a/apps/agent/services/vault/service.go b/apps/agent/services/vault/service.go index a26d9ea072..627595229a 100644 --- a/apps/agent/services/vault/service.go +++ b/apps/agent/services/vault/service.go @@ -37,7 +37,6 @@ type Config struct { } func New(cfg Config) (*Service, error) { - encryptionKey, decryptionKeys, err := loadMasterKeys(cfg.MasterKeys) if err != nil { return nil, fmt.Errorf("unable to load master keys: %w", err) diff --git a/go/apps/api/openapi/gen.go b/go/apps/api/openapi/gen.go index d9bbb8735b..d56e8775e1 100644 --- a/go/apps/api/openapi/gen.go +++ b/go/apps/api/openapi/gen.go @@ -22,12 +22,6 @@ const ( KeyCreditsRefillIntervalMonthly KeyCreditsRefillInterval = "monthly" ) -// Defines values for KeysGetKeyResponseDataCreditsRefillInterval. -const ( - KeysGetKeyResponseDataCreditsRefillIntervalDaily KeysGetKeyResponseDataCreditsRefillInterval = "daily" - KeysGetKeyResponseDataCreditsRefillIntervalMonthly KeysGetKeyResponseDataCreditsRefillInterval = "monthly" -) - // Defines values for KeysUpdateRemainingResponseDataRefillSettingsInterval. const ( KeysUpdateRemainingResponseDataRefillSettingsIntervalDaily KeysUpdateRemainingResponseDataRefillSettingsInterval = "daily" @@ -49,18 +43,6 @@ const ( VALID KeysVerifyKeyResponseDataCode = "VALID" ) -// Defines values for V2KeysCreateKeyRequestBodyCreditsRefillInterval. -const ( - V2KeysCreateKeyRequestBodyCreditsRefillIntervalDaily V2KeysCreateKeyRequestBodyCreditsRefillInterval = "daily" - V2KeysCreateKeyRequestBodyCreditsRefillIntervalMonthly V2KeysCreateKeyRequestBodyCreditsRefillInterval = "monthly" -) - -// Defines values for V2KeysUpdateKeyRequestBodyCreditsRefillInterval. -const ( - Daily V2KeysUpdateKeyRequestBodyCreditsRefillInterval = "daily" - Monthly V2KeysUpdateKeyRequestBodyCreditsRefillInterval = "monthly" -) - // Defines values for V2KeysVerifyKeyRequestBodyPermissions1Type. const ( And V2KeysVerifyKeyRequestBodyPermissions1Type = "and" @@ -99,7 +81,7 @@ type ApisGetApiResponseData struct { } // ApisListKeysResponseData Array of API keys with complete configuration and metadata. -type ApisListKeysResponseData = []KeyResponse +type ApisListKeysResponseData = []KeyResponseData // BadRequestErrorDetails defines model for BadRequestErrorDetails. type BadRequestErrorDetails struct { @@ -211,7 +193,7 @@ type IdentitiesGetIdentityResponseData struct { Meta *map[string]interface{} `json:"meta,omitempty"` // Ratelimits Rate limits associated with this identity. These limits are shared across all API keys linked to this identity, providing consistent rate limiting regardless of which key is used. - Ratelimits *[]Ratelimit `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitResponse `json:"ratelimits,omitempty"` } // IdentitiesListIdentitiesResponseData List of identities matching the specified criteria. @@ -229,7 +211,7 @@ type IdentitiesUpdateIdentityResponseData struct { Meta *map[string]interface{} `json:"meta,omitempty"` // Ratelimits Rate limits associated with this identity after the update. - Ratelimits *[]Ratelimit `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitResponse `json:"ratelimits,omitempty"` } // Identity defines model for Identity. @@ -244,7 +226,7 @@ type Identity struct { Meta *map[string]interface{} `json:"meta,omitempty"` // Ratelimits Identity ratelimits - Ratelimits []Ratelimit `json:"ratelimits"` + Ratelimits []RatelimitResponse `json:"ratelimits"` } // InternalServerErrorResponse Error response for unexpected server-side issues that prevented the request from being processed correctly. This is typically caused by problems with the service infrastructure, database connectivity issues, unexpected exceptions, or service failures. When receiving this error, clients should implement appropriate retry strategies with backoff and report the issue if it persists. The `requestId` in the `meta` object is essential for troubleshooting and should be included in any support inquiries. @@ -256,12 +238,12 @@ type InternalServerErrorResponse struct { Meta Meta `json:"meta"` } -// KeyCredits Credit configuration and remaining balance for this key. -type KeyCredits struct { +// KeyCreditsData Credit configuration and remaining balance for this key. +type KeyCreditsData struct { Refill *KeyCreditsRefill `json:"refill,omitempty"` - // Remaining Number of credits remaining (-1 for unlimited). - Remaining int64 `json:"remaining"` + // Remaining Number of credits remaining (null for unlimited). + Remaining nullable.Nullable[int64] `json:"remaining"` } // KeyCreditsRefill defines model for KeyCreditsRefill. @@ -272,9 +254,6 @@ type KeyCreditsRefill struct { // Interval How often credits are automatically refilled. Interval KeyCreditsRefillInterval `json:"interval"` - // LastRefillAt Unix timestamp in milliseconds of last refill. - LastRefillAt *int64 `json:"lastRefillAt,omitempty"` - // RefillDay Day of month for monthly refills (1-31). RefillDay *int `json:"refillDay,omitempty"` } @@ -282,16 +261,16 @@ type KeyCreditsRefill struct { // KeyCreditsRefillInterval How often credits are automatically refilled. type KeyCreditsRefillInterval string -// KeyResponse defines model for KeyResponse. -type KeyResponse struct { +// KeyResponseData defines model for KeyResponseData. +type KeyResponseData struct { // CreatedAt Unix timestamp in milliseconds when key was created. CreatedAt int64 `json:"createdAt"` // Credits Credit configuration and remaining balance for this key. - Credits *KeyCredits `json:"credits,omitempty"` + Credits *KeyCreditsData `json:"credits,omitempty"` - // Environment Environment tag for this key. - Environment *string `json:"environment,omitempty"` + // Enabled Whether the key is enabled or disabled. + Enabled bool `json:"enabled"` // Expires Unix timestamp in milliseconds when key expires. Expires *int64 `json:"expires,omitempty"` @@ -325,39 +304,6 @@ type KeyResponse struct { UpdatedAt *int64 `json:"updatedAt,omitempty"` } -// KeyWhoamiData defines model for KeyWhoamiData. -type KeyWhoamiData struct { - // CreatedAt The timestamp in milliseconds when the key was created - CreatedAt int64 `json:"createdAt"` - - // Enabled Whether the key is enabled and can be used - Enabled bool `json:"enabled"` - - // Environment The environment the key is associated with (e.g., production, staging, development) - Environment nullable.Nullable[string] `json:"environment,omitempty"` - - // Id The unique identifier of the key - Id string `json:"id"` - - // Identity The identity object associated with the key (null if no identity is associated) - Identity nullable.Nullable[struct { - // ExternalId The external identity ID associated with the key (e.g., user ID in your system) - ExternalId string `json:"externalId"` - - // Id The unique identity ID associated with the key - Id string `json:"id"` - }] `json:"identity,omitempty"` - - // Meta Custom metadata associated with the key (null if no metadata is present) - Meta nullable.Nullable[map[string]interface{}] `json:"meta,omitempty"` - - // Name The human-readable name of the key (optional) - Name nullable.Nullable[string] `json:"name,omitempty"` - - // Remaining The remaining number of requests for the key (null means unlimited) - Remaining nullable.Nullable[int64] `json:"remaining,omitempty"` -} - // KeysCreateKeyResponseData defines model for KeysCreateKeyResponseData. type KeysCreateKeyResponseData struct { // Key The full generated API key that should be securely provided to your user. SECURITY WARNING: This is the only time you'll receive the complete key - Unkey only stores a securely hashed version. Never log or store this value in your own systems; provide it directly to your end user via secure channels. After this API call completes, this value cannot be retrieved again (unless created with `recoverable=true`). @@ -373,99 +319,6 @@ type KeysCreateKeyResponseData struct { // Monitor your application logs during the propagation period to ensure no unexpected authentication successes occur. type KeysDeleteKeyResponseData = map[string]interface{} -// KeysGetKeyResponseData defines model for KeysGetKeyResponseData. -type KeysGetKeyResponseData struct { - // ApiId The ID of the API this key belongs to. - ApiId *string `json:"apiId,omitempty"` - - // CreatedAt Unix timestamp (in milliseconds) when the key was created. - CreatedAt int64 `json:"createdAt"` - - // Credits Usage limits configuration for this key. Credits provide a way to limit the number of times a key can be used before becoming invalid. Unlike ratelimits, credits are globally consistent (using database transactions) providing 100% accuracy at the cost of slightly higher latency. Ideal for monetization, usage quotas, or strict limits that must not be exceeded. See the 'refill' field for automatic replenishment options. - Credits *struct { - // Refill Configuration for automatic credit refills. - Refill *struct { - // Amount Number of credits added during each refill. - Amount int `json:"amount"` - - // Interval How often the credits are automatically refilled. - Interval KeysGetKeyResponseDataCreditsRefillInterval `json:"interval"` - - // LastRefillAt Unix timestamp (in milliseconds) when credits were last refilled. - LastRefillAt *int64 `json:"lastRefillAt,omitempty"` - - // RefillDay For monthly refills, the day of month when refills occur. - RefillDay *int `json:"refillDay,omitempty"` - } `json:"refill,omitempty"` - - // Remaining The number of times this key can still be used before becoming invalid. - Remaining *int32 `json:"remaining,omitempty"` - } `json:"credits,omitempty"` - - // Enabled Whether the key is currently active. Disabled keys will fail verification with `code=DISABLED`. Toggling this allows you to temporarily suspend access without deleting the key, which is useful for maintenance, account freezing, or debugging. Can be updated using the `keys.updateKey` endpoint. - Enabled bool `json:"enabled"` - - // Expires Unix timestamp (in milliseconds) when this key will automatically expire. If null, the key has no expiration. - Expires *int64 `json:"expires,omitempty"` - - // ExternalId Your user's unique identifier, creating a link between Unkey and your system. This ID is returned during verification so you can identify which customer/entity is making the request without performing additional database lookups. Use consistent identifiers that match your primary user/tenant identifiers for seamless integration. - ExternalId *string `json:"externalId,omitempty"` - - // Id The unique identifier of the key in Unkey's system. - Id string `json:"id"` - - // Identity The identity associated with this key, if any. Identities allow resource sharing (like ratelimits) across multiple keys belonging to the same user/entity. This enables scenarios like issuing separate keys for different devices/services while maintaining global usage limits for the user. An identity's externalId typically matches your user ID or tenant ID. - Identity *struct { - // ExternalId Your identifier for this identity in your system. - ExternalId string `json:"externalId"` - - // Id The unique ID of the identity in Unkey's system. - Id string `json:"id"` - - // Meta Additional metadata associated with this identity. - Meta *map[string]interface{} `json:"meta,omitempty"` - } `json:"identity,omitempty"` - - // Meta Arbitrary JSON metadata associated with this key. This can include additional context like subscription plans, feature flags, or any custom data. Metadata is stored as-is and returned during verification, allowing you to access important information without additional database queries. Consider including data relevant to authorization decisions, usage tracking, and user context. - Meta *map[string]interface{} `json:"meta,omitempty"` - - // Name A descriptive name for the key for internal reference. Shown in dashboards and logs but never exposed to end users. - Name *string `json:"name,omitempty"` - - // Permissions List of permission names directly assigned to this key. - Permissions *[]string `json:"permissions,omitempty"` - - // Plaintext The full `API key` in plaintext. Only included when `decrypt` is `true` and the key was created with `recoverable: true`. SECURITY RISK: This field contains the actual secret key which should never be logged, stored in databases, or exposed in any frontend code. It should only be displayed directly to users through secure channels. Most applications should avoid setting decrypt=true unless absolutely necessary. - Plaintext *string `json:"plaintext,omitempty"` - - // Ratelimits Array of ratelimits applied to this key. Multiple named ratelimits can control different aspects of key usage. For example, a 'requests' ratelimit might control overall API calls while a separate 'computations' limit manages access to resource-intensive operations. Ratelimits are optimized for performance and typically add minimal latency to verifications. They can be shared across keys through identities. - Ratelimits *[]struct { - // Async Whether this ratelimit uses fast (async=true) or consistent (async=false) mode. Fast mode has lower latency but less accuracy. - Async *bool `json:"async,omitempty"` - - // Duration Duration of the ratelimit window in milliseconds. - Duration int32 `json:"duration"` - - // Limit Maximum number of operations allowed within the time window. - Limit int32 `json:"limit"` - - // Name Identifier for this ratelimit. - Name string `json:"name"` - } `json:"ratelimits,omitempty"` - - // Roles List of role names assigned to this key. Roles are collections of permissions. - Roles *[]string `json:"roles,omitempty"` - - // Start The first few characters of the key to visually identify it without exposing the full key. Used in dashboards and logs to help users recognize which key is being used without revealing sensitive information. Typically includes the prefix if one was specified. - Start string `json:"start"` - - // UpdatedAt Unix timestamp (in milliseconds) when the key was last updated. - UpdatedAt *int64 `json:"updatedAt,omitempty"` -} - -// KeysGetKeyResponseDataCreditsRefillInterval How often the credits are automatically refilled. -type KeysGetKeyResponseDataCreditsRefillInterval string - // KeysUpdateKeyResponseData Empty response object by design. A successful response indicates the key was updated successfully. The endpoint doesn't return the updated key to reduce response size and avoid exposing sensitive information. Changes may take up to 30 seconds to propagate to all regions due to cache invalidation delays. If you need the updated key state, use a subsequent call to `keys.getKey`. type KeysUpdateKeyResponseData = map[string]interface{} @@ -486,7 +339,7 @@ type KeysUpdateRemainingResponseData struct { RefillDay *int `json:"refillDay,omitempty"` }] `json:"refillSettings,omitempty"` - // Remaining The updated remaining credits value for the key after the operation completes. This reflects the exact value that was set in the request. A value of -1 indicates unlimited usage, meaning the key can be used an unlimited number of times without being rejected for credit exhaustion. This field is guaranteed to be present in every response. + // Remaining The updated remaining credits value for the key after the operation completes. This reflects the exact value that was set in the request. A value of null indicates unlimited usage, meaning the key can be used an unlimited number of times without being rejected for credit exhaustion. This field is guaranteed to be present in every response. Remaining int64 `json:"remaining"` } @@ -504,9 +357,6 @@ type KeysVerifyKeyResponseData struct { // Enabled Indicates if the key is currently enabled. Disabled keys will always fail verification with `code=DISABLED`. This is useful for implementing temporary suspensions without deleting the key. Enabled *bool `json:"enabled,omitempty"` - // Environment The environment tag associated with the key (e.g., 'production', 'staging', 'development'). Use this to further segment keys within an API beyond just the apiId separation. - Environment *string `json:"environment,omitempty"` - // Expires Unix timestamp (in milliseconds) when the key will expire. If null or not present, the key has no expiration. You can use this to warn users about upcoming expirations or to understand the validity period. Expires *int64 `json:"expires,omitempty"` @@ -524,22 +374,8 @@ type KeysVerifyKeyResponseData struct { Name *string `json:"name,omitempty"` // Permissions A list of all permission names assigned to this key, either directly or through roles. These permissions determine what actions the key can perform. Only returned when permissions were checked during verification or when the key fails with `code=INSUFFICIENT_PERMISSIONS`. - Permissions *[]string `json:"permissions,omitempty"` - - // Ratelimits Information about the rate limits applied during verification. Only included when rate limits were checked. If verification failed with `code=RATE_LIMITED`, this will show which specific rate limit was exceeded. - Ratelimits *[]struct { - // Limit The maximum number of operations allowed within the current time window for this rate limit. - Limit int32 `json:"limit"` - - // Name The name of the rate limit that was checked. This matches the name provided in the request. - Name string `json:"name"` - - // Remaining The number of operations still allowed within the current time window after this verification. Your application can use this to inform users about remaining capacity or to implement your own backoff strategies. - Remaining int32 `json:"remaining"` - - // Reset Unix timestamp in milliseconds when the rate limit window will reset and 'remaining' will return to 'limit'. Use this to implement retry-after logic or to display wait times to users. - Reset int64 `json:"reset"` - } `json:"ratelimits,omitempty"` + Permissions *[]string `json:"permissions,omitempty"` + Ratelimits *[]RatelimitResponse `json:"ratelimits,omitempty"` // Roles A list of all role names assigned to this key. Roles are collections of permissions that grant access to specific functionality. Only returned when permissions were checked during verification. Roles *[]string `json:"roles,omitempty"` @@ -653,42 +489,6 @@ type PreconditionFailedErrorResponse struct { Meta Meta `json:"meta"` } -// Ratelimit defines model for Ratelimit. -type Ratelimit struct { - // Duration The duration for each ratelimit window in milliseconds. - // - // This controls how long the rate limit counter accumulates before resetting. Common values include: - // - 1000 (1 second): For strict per-second limits on high-frequency operations - // - 60000 (1 minute): For moderate API usage control - // - 3600000 (1 hour): For less frequent but costly operations - // - 86400000 (24 hours): For daily quotas - // - // Shorter windows provide more frequent resets but may allow large burst usage. Longer windows provide more consistent usage patterns but take longer to reset after limit exhaustion. - Duration int64 `json:"duration"` - - // Limit The maximum number of operations allowed within the specified time window. - // - // When this limit is reached, verification requests will fail with `code=RATE_LIMITED` until the window resets. The limit should reflect: - // - Your infrastructure capacity and scaling limitations - // - Fair usage expectations for your service - // - Different tier levels for various user types - // - The relative cost of the operations being limited - // - // Higher values allow more frequent access but may impact service performance. - Limit int64 `json:"limit"` - - // Name The name of this rate limit. This name is used to identify which limit to check during key verification. - // - // Best practices for limit names: - // - Use descriptive, semantic names like 'api_requests', 'heavy_operations', or 'downloads' - // - Be consistent with naming conventions across your application - // - Create separate limits for different resource types or operation costs - // - Consider using namespaced names for better organization (e.g., 'files.downloads', 'compute.training') - // - // You will reference this exact name when verifying keys to check against this specific limit. - Name string `json:"name"` -} - // RatelimitDeleteOverrideResponseData Empty response object. A successful response indicates the override was successfully deleted. The operation is immediate - as soon as this response is received, the override no longer exists and affected identifiers have reverted to using the default rate limit for the namespace. No other data is returned as part of the deletion operation. type RatelimitDeleteOverrideResponseData = map[string]interface{} @@ -773,8 +573,51 @@ type RatelimitOverride struct { OverrideId string `json:"overrideId"` } +// RatelimitRequest defines model for RatelimitRequest. +type RatelimitRequest struct { + // AutoApply Whether this ratelimit should be automatically applied when verifying a key. + AutoApply bool `json:"autoApply"` + + // Duration The duration for each ratelimit window in milliseconds. + // + // This controls how long the rate limit counter accumulates before resetting. Common values include: + // - 1000 (1 second): For strict per-second limits on high-frequency operations + // - 60000 (1 minute): For moderate API usage control + // - 3600000 (1 hour): For less frequent but costly operations + // - 86400000 (24 hours): For daily quotas + // + // Shorter windows provide more frequent resets but may allow large burst usage. Longer windows provide more consistent usage patterns but take longer to reset after limit exhaustion. + Duration int64 `json:"duration"` + + // Limit The maximum number of operations allowed within the specified time window. + // + // When this limit is reached, verification requests will fail with `code=RATE_LIMITED` until the window resets. The limit should reflect: + // - Your infrastructure capacity and scaling limitations + // - Fair usage expectations for your service + // - Different tier levels for various user types + // - The relative cost of the operations being limited + // + // Higher values allow more frequent access but may impact service performance. + Limit int64 `json:"limit"` + + // Name The name of this rate limit. This name is used to identify which limit to check during key verification. + // + // Best practices for limit names: + // - Use descriptive, semantic names like 'api_requests', 'heavy_operations', or 'downloads' + // - Be consistent with naming conventions across your application + // - Create separate limits for different resource types or operation costs + // - Consider using namespaced names for better organization (e.g., 'files.downloads', 'compute.training') + // + // You will reference this exact name when verifying keys to check against this specific limit. + Name string `json:"name"` +} + // RatelimitResponse defines model for RatelimitResponse. type RatelimitResponse struct { + // AutoApply Whether this rate limit should be automatically applied when verifying keys. + // When true, we will automatically apply this limit during verification without it being explicitly listed. + AutoApply bool `json:"autoApply"` + // Duration Rate limit window duration in milliseconds. Duration int64 `json:"duration"` @@ -1005,7 +848,7 @@ type V2IdentitiesCreateIdentityRequestBody struct { // - Each named limit can have different thresholds and windows // // When verifying keys, you can specify which limits you want to use and all keys attached to this identity will share the limits, regardless of which specific key is used. - Ratelimits *[]Ratelimit `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitRequest `json:"ratelimits,omitempty"` } // V2IdentitiesCreateIdentityResponseBody defines model for V2IdentitiesCreateIdentityResponseBody. @@ -1114,7 +957,7 @@ type V2IdentitiesUpdateIdentityRequestBody struct { // Omitting this field preserves existing rate limits, while providing an empty array removes all rate limits. // These limits are shared across all keys belonging to this identity, preventing abuse through multiple keys. // Rate limit changes take effect immediately but may take up to 30 seconds to propagate across all regions. - Ratelimits *[]Ratelimit `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitRequest `json:"ratelimits,omitempty"` union json.RawMessage } @@ -1298,37 +1141,8 @@ type V2KeysCreateKeyRequestBody struct { // Consider 32 bytes for highly sensitive APIs, but avoid values above 64 bytes unless specifically required. ByteLength *int `json:"byteLength,omitempty"` - // Credits Controls usage-based limits through credit consumption with optional automatic refills. - // Unlike rate limits which control frequency, credits control total usage with global consistency. - // Essential for implementing usage-based pricing, subscription tiers, and hard usage quotas. - // Omitting this field creates unlimited usage, while setting null is not allowed during creation. - Credits *struct { - // Refill Configures automatic credit refills on a schedule for subscription-like recurring quotas. - // Refills add to existing credits rather than replacing them, allowing unused quotas to accumulate. - // Essential for implementing predictable billing cycles and user-friendly quota management. - Refill *struct { - // Amount Specifies how many credits to add during each refill cycle. - // This amount gets added to remaining credits, not replaced, so unused credits carry over. - // Typically matches your subscription plan's quota for predictable billing cycles. - Amount int `json:"amount"` - - // Interval Sets how often credits automatically refill. Daily refills occur at midnight UTC, - // while monthly refills support specific days via refillDay. - // Choose daily for high-frequency APIs and monthly for subscription-based quotas. - Interval V2KeysCreateKeyRequestBodyCreditsRefillInterval `json:"interval"` - - // RefillDay Sets the day of month for monthly refills (1-31). Only valid with monthly interval. - // Days beyond month length (like 31 in February) default to the last valid day. - // Useful for aligning refills with billing cycles and subscription renewals. - RefillDay *int `json:"refillDay,omitempty"` - } `json:"refill,omitempty"` - - // Remaining Sets the initial number of times this key can be used before becoming invalid. - // Each verification reduces this count by the verification cost (default 1). - // When reaching 0, further verifications fail with code=USAGE_EXCEEDED. - // Provides globally consistent usage limits, ideal for implementing usage-based pricing and strict quotas. - Remaining int64 `json:"remaining"` - } `json:"credits,omitempty"` + // Credits Credit configuration and remaining balance for this key. + Credits *KeyCreditsData `json:"credits,omitempty"` // Enabled Controls whether the key is active immediately upon creation. // When set to `false`, the key exists but all verification attempts fail with `code=DISABLED`. @@ -1378,28 +1192,7 @@ type V2KeysCreateKeyRequestBody struct { // Unlike credits which track total usage, rate limits reset automatically after each window expires. // Multiple rate limits can control different operation types with separate thresholds and windows. // Essential for preventing API abuse while maintaining good performance for legitimate usage. - Ratelimits *[]struct { - // Async Controls whether this rate limit uses fast (async=true) or consistent (async=false) mode. - // Fast mode has lower latency but may allow brief bursts above the limit during high concurrency. - // Consistent mode provides strict guarantees but adds latency to every verification. - // Use consistent mode only when precise rate limiting is essential for billing or security. - Async *bool `json:"async,omitempty"` - - // Duration Duration of the rate limit window in milliseconds. Common values include 60000 (1 minute), - // 3600000 (1 hour), and 86400000 (24 hours). The rate limit automatically resets after this period elapses. - // Windows shorter than 1 second are not supported for performance reasons. - Duration int32 `json:"duration"` - - // Limit Sets the maximum operations allowed within the duration window. - // When this limit is reached, verification fails with code=RATE_LIMITED until the window resets. - // Adjust this based on your API's capacity and expected usage patterns. - Limit int64 `json:"limit"` - - // Name Identifies this rate limit uniquely within the key. Names must start with a letter and use semantic - // identifiers like 'requests', 'computations', or 'write_operations' rather than generic terms. - // Duplicate names within the same key are not allowed. - Name string `json:"name"` - } `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitRequest `json:"ratelimits,omitempty"` // Recoverable Controls whether the plaintext key is stored in an encrypted vault for later retrieval. // When true, allows recovering the actual key value using keys.getKey with decrypt=true. @@ -1414,11 +1207,6 @@ type V2KeysCreateKeyRequestBody struct { Roles *[]string `json:"roles,omitempty"` } -// V2KeysCreateKeyRequestBodyCreditsRefillInterval Sets how often credits automatically refill. Daily refills occur at midnight UTC, -// while monthly refills support specific days via refillDay. -// Choose daily for high-frequency APIs and monthly for subscription-based quotas. -type V2KeysCreateKeyRequestBodyCreditsRefillInterval string - // V2KeysCreateKeyResponseBody defines model for V2KeysCreateKeyResponseBody. type V2KeysCreateKeyResponseBody struct { Data KeysCreateKeyResponseData `json:"data"` @@ -1468,16 +1256,28 @@ type V2KeysGetKeyRequestBody struct { // Decryption requests are audited and may trigger security alerts in enterprise environments. Decrypt *bool `json:"decrypt,omitempty"` + // Key The complete API key string provided by you, including any prefix. + // Never log, cache, or store API keys in your system as they provide full access to user resources. + // Include the full key exactly as provided - even minor modifications will cause a not found error. + Key *string `json:"key,omitempty"` + // KeyId Specifies which key to retrieve using the database identifier returned from `keys.createKey`. // Do not confuse this with the actual API key string that users include in requests. // Key data includes metadata, permissions, usage statistics, and configuration but never the plaintext key value unless `decrypt=true`. // Find this ID in creation responses, key listings, dashboard, or verification responses. - KeyId string `json:"keyId"` + KeyId *string `json:"keyId,omitempty"` + union json.RawMessage } +// V2KeysGetKeyRequestBody0 defines model for . +type V2KeysGetKeyRequestBody0 = interface{} + +// V2KeysGetKeyRequestBody1 defines model for . +type V2KeysGetKeyRequestBody1 = interface{} + // V2KeysGetKeyResponseBody defines model for V2KeysGetKeyResponseBody. type V2KeysGetKeyResponseBody struct { - Data KeysGetKeyResponseData `json:"data"` + Data KeyResponseData `json:"data"` // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. Meta Meta `json:"meta"` @@ -1791,37 +1591,8 @@ type V2KeysSetRolesResponseData = []struct { // V2KeysUpdateKeyRequestBody defines model for V2KeysUpdateKeyRequestBody. type V2KeysUpdateKeyRequestBody struct { - // Credits Controls usage-based limits for this key through credit consumption. - // Omitting this field preserves current credit settings, while setting null enables unlimited usage. - // Cannot configure refill settings when credits is null, and refillDay requires monthly interval. - // Essential for implementing usage-based pricing and subscription quotas. - Credits nullable.Nullable[struct { - // Refill Configures automatic credit refills on a schedule. - // Omitting this field preserves existing refill settings, while setting null disables refills entirely. - // Refills add to existing credits rather than replacing them, allowing unused quotas to accumulate. - Refill nullable.Nullable[struct { - // Amount Specifies how many credits to add during each refill cycle. - // This amount gets added to remaining credits, not replaced, so unused credits carry over. - // Typically matches your subscription plan's quota. - Amount int `json:"amount"` - - // Interval Sets how often credits automatically refill. Monthly refills support specific days via refillDay, - // while daily refills occur at midnight UTC. Choose daily for high-frequency APIs and monthly for - // subscription-based quotas. - Interval V2KeysUpdateKeyRequestBodyCreditsRefillInterval `json:"interval"` - - // RefillDay Sets the day of month for monthly refills (1-31). Only valid with monthly interval. - // Days beyond month length (like 31 in February) default to the last valid day. - // Useful for aligning refills with billing cycles. - RefillDay *int `json:"refillDay,omitempty"` - }] `json:"refill,omitempty"` - - // Remaining Counts how many times this key can be used before becoming invalid. - // Each verification reduces this count by the verification cost (default 1). - // When reaching 0, further verifications fail with `USAGE_EXCEEDED`. - // Required when specifying credits for usage-based pricing or quotas. - Remaining int64 `json:"remaining"` - }] `json:"credits,omitempty"` + // Credits Credit configuration and remaining balance for this key. + Credits *KeyCreditsData `json:"credits,omitempty"` // Enabled Controls whether the key is currently active for verification requests. // When set to `false`, all verification attempts fail with `code=DISABLED` regardless of other settings. @@ -1863,29 +1634,8 @@ type V2KeysUpdateKeyRequestBody struct { // Omitting this field preserves existing rate limits, while setting null removes all rate limits. // Unlike credits which track total usage, rate limits reset automatically after each window expires. // Multiple rate limits can control different operation types with separate thresholds and windows. - Ratelimits nullable.Nullable[[]struct { - // Duration Duration of the rate limit window in milliseconds. Common values include 60000 (1 minute), - // 3600000 (1 hour), and 86400000 (24 hours). The rate limit automatically resets after this period elapses. - // Windows shorter than 1 second are not supported for performance reasons. - Duration int32 `json:"duration"` - - // Limit Sets the maximum operations allowed within the duration window. - // When this limit is reached, verification fails with code=RATE_LIMITED until the window resets. - // Adjust this based on your API's capacity and expected usage patterns. - Limit int64 `json:"limit"` - - // Name Identifies this rate limit uniquely within the key. Names must start with a letter and use semantic - // identifiers like 'requests', 'computations', or 'write_operations' rather than generic terms. - // Use only letters, numbers, underscores, and hyphens after the initial letter. - // Duplicate names within the same key are not allowed. - Name string `json:"name"` - }] `json:"ratelimits,omitempty"` -} - -// V2KeysUpdateKeyRequestBodyCreditsRefillInterval Sets how often credits automatically refill. Monthly refills support specific days via refillDay, -// while daily refills occur at midnight UTC. Choose daily for high-frequency APIs and monthly for -// subscription-based quotas. -type V2KeysUpdateKeyRequestBodyCreditsRefillInterval string + Ratelimits nullable.Nullable[[]RatelimitRequest] `json:"ratelimits,omitempty"` +} // V2KeysUpdateKeyResponseBody defines model for V2KeysUpdateKeyResponseBody. type V2KeysUpdateKeyResponseBody struct { @@ -1917,13 +1667,13 @@ type V2KeysUpdateRemainingRequestBody struct { // Key behaviors: // - This completely replaces the current remaining credits value // - To add credits, first get the current value and then set remaining = current + additional - // - To make a key unlimited, set remaining = -1 + // - To make a key unlimited, set remaining = null // - To make a key with unlimited usage have a specific limit, set remaining to a positive number // - Credits are decremented each time the key is successfully verified (by the cost value, default 1) // - When credits reach zero, verification fails with code=USAGE_EXCEEDED // // This field is useful for implementing usage-based pricing, subscription tiers, trial periods, or consumption quotas. - Remaining int64 `json:"remaining"` + Remaining nullable.Nullable[int64] `json:"remaining"` } // V2KeysUpdateRemainingResponse defines model for V2KeysUpdateRemainingResponse. @@ -1970,27 +1720,7 @@ type V2KeysVerifyKeyRequestBody struct { // Omitting this field skips rate limit checks entirely, relying only on configured key rate limits. // Multiple rate limits can be checked simultaneously, each with different costs and temporary overrides. // Rate limit checks are optimized for performance but may allow brief bursts during high concurrency. - Ratelimits *[]struct { - // Cost Sets how much of the rate limit quota this operation consumes. - // Use higher values for expensive operations and 0 for operations that should not count against the limit. - // Cost is applied immediately, even if other rate limits or permissions cause verification to fail. - Cost *int64 `json:"cost,omitempty"` - - // Duration Temporarily overrides the rate limit window duration in milliseconds for this request only. - // Does not modify the stored configuration and applies only to this single verification. - // Common values include 60000 (1 minute), 3600000 (1 hour), and 86400000 (24 hours). - Duration *int64 `json:"duration,omitempty"` - - // Limit Temporarily overrides the configured rate limit for this request only. - // Does not modify the stored configuration and applies only to this single verification. - // Useful for implementing per-request dynamic limits based on user tier or operation type. - Limit *int64 `json:"limit,omitempty"` - - // Name Identifies which rate limit configuration to check. Must match a rate limit defined on the key or identity. - // Use semantic names that clearly describe what's being limited, avoiding generic terms like 'limit1'. - // Rate limit names are case-sensitive and must exist in the key's configuration. - Name string `json:"name"` - } `json:"ratelimits,omitempty"` + Ratelimits *[]RatelimitRequest `json:"ratelimits,omitempty"` // Tags Attaches metadata tags for analytics and monitoring without affecting verification outcomes. // Enables segmentation of API usage in dashboards by endpoint, client version, region, or custom dimensions. @@ -2038,20 +1768,6 @@ type V2KeysVerifyKeyResponseBody struct { Meta Meta `json:"meta"` } -// V2KeysWhoamiRequestBody defines model for V2KeysWhoamiRequestBody. -type V2KeysWhoamiRequestBody struct { - // Key The API key to identify and retrieve information about - Key string `json:"key"` -} - -// V2KeysWhoamiResponse defines model for V2KeysWhoamiResponse. -type V2KeysWhoamiResponse struct { - Data KeyWhoamiData `json:"data"` - - // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. - Meta Meta `json:"meta"` -} - // V2LivenessResponseBody defines model for V2LivenessResponseBody. type V2LivenessResponseBody struct { // Data Response data for the liveness check endpoint. This provides a simple indication of whether the Unkey API service is running and able to process requests. Monitoring systems can use this endpoint to track service availability and trigger alerts if the service becomes unhealthy. @@ -2556,9 +2272,6 @@ type UpdateRemainingJSONRequestBody = V2KeysUpdateRemainingRequestBody // VerifyKeyJSONRequestBody defines body for VerifyKey for application/json ContentType. type VerifyKeyJSONRequestBody = V2KeysVerifyKeyRequestBody -// WhoamiJSONRequestBody defines body for Whoami for application/json ContentType. -type WhoamiJSONRequestBody = V2KeysWhoamiRequestBody - // CreatePermissionJSONRequestBody defines body for CreatePermission for application/json ContentType. type CreatePermissionJSONRequestBody = V2PermissionsCreatePermissionRequestBody @@ -2956,6 +2669,130 @@ func (t *V2IdentitiesUpdateIdentityRequestBody) UnmarshalJSON(b []byte) error { return err } +// AsV2KeysGetKeyRequestBody0 returns the union data inside the V2KeysGetKeyRequestBody as a V2KeysGetKeyRequestBody0 +func (t V2KeysGetKeyRequestBody) AsV2KeysGetKeyRequestBody0() (V2KeysGetKeyRequestBody0, error) { + var body V2KeysGetKeyRequestBody0 + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromV2KeysGetKeyRequestBody0 overwrites any union data inside the V2KeysGetKeyRequestBody as the provided V2KeysGetKeyRequestBody0 +func (t *V2KeysGetKeyRequestBody) FromV2KeysGetKeyRequestBody0(v V2KeysGetKeyRequestBody0) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeV2KeysGetKeyRequestBody0 performs a merge with any union data inside the V2KeysGetKeyRequestBody, using the provided V2KeysGetKeyRequestBody0 +func (t *V2KeysGetKeyRequestBody) MergeV2KeysGetKeyRequestBody0(v V2KeysGetKeyRequestBody0) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsV2KeysGetKeyRequestBody1 returns the union data inside the V2KeysGetKeyRequestBody as a V2KeysGetKeyRequestBody1 +func (t V2KeysGetKeyRequestBody) AsV2KeysGetKeyRequestBody1() (V2KeysGetKeyRequestBody1, error) { + var body V2KeysGetKeyRequestBody1 + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromV2KeysGetKeyRequestBody1 overwrites any union data inside the V2KeysGetKeyRequestBody as the provided V2KeysGetKeyRequestBody1 +func (t *V2KeysGetKeyRequestBody) FromV2KeysGetKeyRequestBody1(v V2KeysGetKeyRequestBody1) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeV2KeysGetKeyRequestBody1 performs a merge with any union data inside the V2KeysGetKeyRequestBody, using the provided V2KeysGetKeyRequestBody1 +func (t *V2KeysGetKeyRequestBody) MergeV2KeysGetKeyRequestBody1(v V2KeysGetKeyRequestBody1) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t V2KeysGetKeyRequestBody) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + if err != nil { + return nil, err + } + object := make(map[string]json.RawMessage) + if t.union != nil { + err = json.Unmarshal(b, &object) + if err != nil { + return nil, err + } + } + + if t.Decrypt != nil { + object["decrypt"], err = json.Marshal(t.Decrypt) + if err != nil { + return nil, fmt.Errorf("error marshaling 'decrypt': %w", err) + } + } + + if t.Key != nil { + object["key"], err = json.Marshal(t.Key) + if err != nil { + return nil, fmt.Errorf("error marshaling 'key': %w", err) + } + } + + if t.KeyId != nil { + object["keyId"], err = json.Marshal(t.KeyId) + if err != nil { + return nil, fmt.Errorf("error marshaling 'keyId': %w", err) + } + } + b, err = json.Marshal(object) + return b, err +} + +func (t *V2KeysGetKeyRequestBody) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + if err != nil { + return err + } + object := make(map[string]json.RawMessage) + err = json.Unmarshal(b, &object) + if err != nil { + return err + } + + if raw, found := object["decrypt"]; found { + err = json.Unmarshal(raw, &t.Decrypt) + if err != nil { + return fmt.Errorf("error reading 'decrypt': %w", err) + } + } + + if raw, found := object["key"]; found { + err = json.Unmarshal(raw, &t.Key) + if err != nil { + return fmt.Errorf("error reading 'key': %w", err) + } + } + + if raw, found := object["keyId"]; found { + err = json.Unmarshal(raw, &t.KeyId) + if err != nil { + return fmt.Errorf("error reading 'keyId': %w", err) + } + } + + return err +} + // AsV2KeysVerifyKeyRequestBodyPermissions0 returns the union data inside the V2KeysVerifyKeyRequestBody_Permissions as a V2KeysVerifyKeyRequestBodyPermissions0 func (t V2KeysVerifyKeyRequestBody_Permissions) AsV2KeysVerifyKeyRequestBodyPermissions0() (V2KeysVerifyKeyRequestBodyPermissions0, error) { var body V2KeysVerifyKeyRequestBodyPermissions0 diff --git a/go/apps/api/openapi/openapi.yaml b/go/apps/api/openapi/openapi.yaml index f68285da40..1bfcc5f6b8 100644 --- a/go/apps/api/openapi/openapi.yaml +++ b/go/apps/api/openapi/openapi.yaml @@ -194,60 +194,7 @@ components: Active sessions continue until their next verification attempt after expiry. example: 1704067200000 credits: - type: object - nullable: true - properties: - remaining: - type: integer - format: int64 - minimum: 0 - maximum: 9223372036854775807 # Max int64 value - description: | - Counts how many times this key can be used before becoming invalid. - Each verification reduces this count by the verification cost (default 1). - When reaching 0, further verifications fail with `USAGE_EXCEEDED`. - Required when specifying credits for usage-based pricing or quotas. - example: 1000 - refill: - type: object - nullable: true - properties: - interval: - type: string - enum: - - daily - - monthly - description: | - Sets how often credits automatically refill. Monthly refills support specific days via refillDay, - while daily refills occur at midnight UTC. Choose daily for high-frequency APIs and monthly for - subscription-based quotas. - amount: - type: integer - minimum: 1 - maximum: 1000000 # Reasonable upper bound for subscription quotas - description: | - Specifies how many credits to add during each refill cycle. - This amount gets added to remaining credits, not replaced, so unused credits carry over. - Typically matches your subscription plan's quota. - example: 1000 - refillDay: - type: integer - minimum: 1 - maximum: 31 # Valid calendar days - description: | - Sets the day of month for monthly refills (1-31). Only valid with monthly interval. - Days beyond month length (like 31 in February) default to the last valid day. - Useful for aligning refills with billing cycles. - example: 15 - required: - - interval - - amount - description: | - Configures automatic credit refills on a schedule. - Omitting this field preserves existing refill settings, while setting null disables refills entirely. - Refills add to existing credits rather than replacing them, allowing unused quotas to accumulate. - required: - - remaining + "$ref": "#/components/schemas/KeyCreditsData" description: | Controls usage-based limits for this key through credit consumption. Omitting this field preserves current credit settings, while setting null enables unlimited usage. @@ -258,44 +205,7 @@ components: nullable: true maxItems: 50 # Reasonable limit for rate limit configurations per key items: - type: object - required: - - name - - limit - - duration - properties: - name: - type: string - minLength: 1 - maxLength: 50 # Keep rate limit names concise and readable - pattern: "^[a-zA-Z][a-zA-Z0-9_-]*$" - description: | - Identifies this rate limit uniquely within the key. Names must start with a letter and use semantic - identifiers like 'requests', 'computations', or 'write_operations' rather than generic terms. - Use only letters, numbers, underscores, and hyphens after the initial letter. - Duplicate names within the same key are not allowed. - example: requests - limit: - type: integer - format: int64 - minimum: 1 - maximum: 1000000 # Reasonable upper bound for rate limits - description: | - Sets the maximum operations allowed within the duration window. - When this limit is reached, verification fails with code=RATE_LIMITED until the window resets. - Adjust this based on your API's capacity and expected usage patterns. - example: 100 - duration: - type: integer - format: int32 - minimum: 1000 # 1 second minimum window - maximum: 2592000000 # 30 days maximum window - description: | - Duration of the rate limit window in milliseconds. Common values include 60000 (1 minute), - 3600000 (1 hour), and 86400000 (24 hours). The rate limit automatically resets after this period elapses. - Windows shorter than 1 second are not supported for performance reasons. - example: 60000 - + "$ref": "#/components/schemas/RatelimitRequest" description: | Defines time-based rate limits that protect against abuse by controlling request frequency. Omitting this field preserves existing rate limits, while setting null removes all rate limits. @@ -376,8 +286,6 @@ components: "$ref": "#/components/schemas/KeysDeleteKeyResponseData" V2KeysGetKeyRequestBody: type: object - required: - - keyId properties: keyId: type: string @@ -401,231 +309,22 @@ components: Use only for legitimate recovery scenarios like user password resets or emergency access. Most applications should keep this false to maintain security best practices and avoid accidental key exposure. Decryption requests are audited and may trigger security alerts in enterprise environments. - additionalProperties: false - KeysGetKeyResponseData: - type: object - required: - - id - - start - - createdAt - - enabled - properties: - id: - type: string - description: The unique identifier of the key in Unkey's system. - example: key_2cGKbMxRyIzhCxo1Idjz8q - start: - type: string - description: - The first few characters of the key to visually identify it - without exposing the full key. Used in dashboards and logs to help users - recognize which key is being used without revealing sensitive information. - Typically includes the prefix if one was specified. - example: prod_5j1 - apiId: - type: string - description: The ID of the API this key belongs to. - example: api_2cGKbMxRjIzhCxo1IdjH3a - name: - type: string - description: - A descriptive name for the key for internal reference. Shown - in dashboards and logs but never exposed to end users. - example: Payment Service Production Key - externalId: - type: string - description: - Your user's unique identifier, creating a link between Unkey - and your system. This ID is returned during verification so you can identify - which customer/entity is making the request without performing additional - database lookups. Use consistent identifiers that match your primary user/tenant - identifiers for seamless integration. - example: user_912a841d - meta: - type: object - additionalProperties: true - description: - Arbitrary JSON metadata associated with this key. This can - include additional context like subscription plans, feature flags, or - any custom data. Metadata is stored as-is and returned during verification, - allowing you to access important information without additional database - queries. Consider including data relevant to authorization decisions, - usage tracking, and user context. - example: - plan: enterprise - limits: - storage: 500GB - compute: 1000 minutes/month - contactInfo: - primaryEmail: admin@example.com - technicalContact: tech@example.com - integrations: - slack: true - github: - enabled: true - repositories: 10 - lastActivityTimestamp: 1671048264000 - createdAt: - type: integer - format: int64 - description: Unix timestamp (in milliseconds) when the key was created. - example: 1671048264000 - updatedAt: - type: integer - format: int64 - description: Unix timestamp (in milliseconds) when the key was last updated. - example: 1671135600000 - expires: - type: integer - format: int64 - description: - Unix timestamp (in milliseconds) when this key will automatically - expire. If null, the key has no expiration. - example: 1704067200000 - credits: - type: object - properties: - remaining: - type: integer - format: int32 - description: - The number of times this key can still be used before becoming - invalid. - example: 753 - refill: - type: object - required: - - interval - - amount - properties: - interval: - type: string - enum: - - daily - - monthly - description: How often the credits are automatically refilled. - amount: - type: integer - minimum: 1 - description: Number of credits added during each refill. - refillDay: - type: integer - minimum: 1 - maximum: 31 - description: - For monthly refills, the day of month when refills - occur. - lastRefillAt: - type: integer - format: int64 - description: - Unix timestamp (in milliseconds) when credits were - last refilled. - description: Configuration for automatic credit refills. - description: - Usage limits configuration for this key. Credits provide a - way to limit the number of times a key can be used before becoming invalid. - Unlike ratelimits, credits are globally consistent (using database transactions) - providing 100% accuracy at the cost of slightly higher latency. Ideal - for monetization, usage quotas, or strict limits that must not be exceeded. - See the 'refill' field for automatic replenishment options. - ratelimits: - type: array - items: - type: object - required: - - name - - limit - - duration - properties: - name: - type: string - description: Identifier for this ratelimit. - example: requests - limit: - type: integer - format: int32 - description: - Maximum number of operations allowed within the time - window. - example: 100 - duration: - type: integer - format: int32 - description: Duration of the ratelimit window in milliseconds. - example: 60000 - async: - type: boolean - description: - Whether this ratelimit uses fast (async=true) or consistent - (async=false) mode. Fast mode has lower latency but less accuracy. - default: true - description: - Array of ratelimits applied to this key. Multiple named ratelimits - can control different aspects of key usage. For example, a 'requests' - ratelimit might control overall API calls while a separate 'computations' - limit manages access to resource-intensive operations. Ratelimits are - optimized for performance and typically add minimal latency to verifications. - They can be shared across keys through identities. - roles: - type: array - items: - type: string - description: - List of role names assigned to this key. Roles are collections - of permissions. - example: - - api_admin - - billing_reader - permissions: - type: array - items: - type: string - description: List of permission names directly assigned to this key. - example: - - documents.read - - documents.write - - settings.view - enabled: - type: boolean - description: - Whether the key is currently active. Disabled keys will fail - verification with `code=DISABLED`. Toggling this allows you to temporarily - suspend access without deleting the key, which is useful for maintenance, - account freezing, or debugging. Can be updated using the `keys.updateKey` endpoint. - example: true - plaintext: + key: type: string - description: - "The full `API key` in plaintext. Only included when `decrypt` - is `true` and the key was created with `recoverable: true`. SECURITY RISK: - This field contains the actual secret key which should never be logged, - stored in databases, or exposed in any frontend code. It should only be - displayed directly to users through secure channels. Most applications - should avoid setting decrypt=true unless absolutely necessary." - identity: - type: object - properties: - id: - type: string - description: The unique ID of the identity in Unkey's system. - externalId: - type: string - description: Your identifier for this identity in your system. - meta: - type: object - additionalProperties: true - description: Additional metadata associated with this identity. - required: - - id - - externalId - description: - The identity associated with this key, if any. Identities allow - resource sharing (like ratelimits) across multiple keys belonging to the - same user/entity. This enables scenarios like issuing separate keys for - different devices/services while maintaining global usage limits for the - user. An identity's externalId typically matches your user ID or tenant - ID. + minLength: 1 + maxLength: 512 # Reasonable upper bound for API key strings + description: | + The complete API key string provided by you, including any prefix. + Never log, cache, or store API keys in your system as they provide full access to user resources. + Include the full key exactly as provided - even minor modifications will cause a not found error. + example: prefix_f4cc2d765275c206b7d76ff0e92e583067c4e33603fb4055d7ba3031cd7ce36a + + additionalProperties: false + oneOf: + - required: + - keyId + - required: + - key V2KeysGetKeyResponseBody: type: object required: @@ -635,7 +334,7 @@ components: meta: "$ref": "#/components/schemas/Meta" data: - "$ref": "#/components/schemas/KeysGetKeyResponseData" + "$ref": "#/components/schemas/KeyResponseData" V2KeysVerifyKeyResponseBody: type: object required: @@ -1314,13 +1013,15 @@ components: remaining: type: integer format: int64 + nullable: true + minimum: 0 description: |- The new value for the remaining credits. This is an absolute value replacement, not an increment or decrement operation. Key behaviors: - This completely replaces the current remaining credits value - To add credits, first get the current value and then set remaining = current + additional - - To make a key unlimited, set remaining = -1 + - To make a key unlimited, set remaining = null - To make a key with unlimited usage have a specific limit, set remaining to a positive number - Credits are decremented each time the key is successfully verified (by the cost value, default 1) - When credits reach zero, verification fails with code=USAGE_EXCEEDED @@ -1352,7 +1053,7 @@ components: description: The updated remaining credits value for the key after the operation completes. This reflects the exact value that was set in the request. - A value of -1 indicates unlimited usage, meaning the key can be used an + A value of null indicates unlimited usage, meaning the key can be used an unlimited number of times without being rejected for credit exhaustion. This field is guaranteed to be present in every response. example: 1000 @@ -1538,58 +1239,7 @@ components: Essential for trial periods, temporary access, and security compliance requiring key rotation. example: 1704067200000 credits: - type: object - required: - - remaining - properties: - remaining: - type: integer - format: int64 - minimum: 0 - maximum: 9223372036854775807 # Max int64 value - description: | - Sets the initial number of times this key can be used before becoming invalid. - Each verification reduces this count by the verification cost (default 1). - When reaching 0, further verifications fail with code=USAGE_EXCEEDED. - Provides globally consistent usage limits, ideal for implementing usage-based pricing and strict quotas. - example: 1000 - refill: - type: object - properties: - interval: - type: string - enum: - - daily - - monthly - description: | - Sets how often credits automatically refill. Daily refills occur at midnight UTC, - while monthly refills support specific days via refillDay. - Choose daily for high-frequency APIs and monthly for subscription-based quotas. - amount: - type: integer - minimum: 1 - maximum: 1000000 # Reasonable upper bound for subscription quotas - description: | - Specifies how many credits to add during each refill cycle. - This amount gets added to remaining credits, not replaced, so unused credits carry over. - Typically matches your subscription plan's quota for predictable billing cycles. - example: 1000 - refillDay: - type: integer - minimum: 1 - maximum: 31 # Valid calendar days - description: | - Sets the day of month for monthly refills (1-31). Only valid with monthly interval. - Days beyond month length (like 31 in February) default to the last valid day. - Useful for aligning refills with billing cycles and subscription renewals. - example: 15 - required: - - interval - - amount - description: | - Configures automatic credit refills on a schedule for subscription-like recurring quotas. - Refills add to existing credits rather than replacing them, allowing unused quotas to accumulate. - Essential for implementing predictable billing cycles and user-friendly quota management. + "$ref": "#/components/schemas/KeyCreditsData" description: | Controls usage-based limits through credit consumption with optional automatic refills. Unlike rate limits which control frequency, credits control total usage with global consistency. @@ -1597,52 +1247,9 @@ components: Omitting this field creates unlimited usage, while setting null is not allowed during creation. ratelimits: type: array - maxItems: 50 # Reasonable limit for rate limit configurations per key + maxItems: 50 # Reasonable limit for rate limit configurations per identity items: - type: object - required: - - name - - limit - - duration - properties: - name: - type: string - minLength: 1 - maxLength: 50 # Keep rate limit names concise and readable - pattern: "^[a-zA-Z][a-zA-Z0-9_-]*$" - description: | - Identifies this rate limit uniquely within the key. Names must start with a letter and use semantic - identifiers like 'requests', 'computations', or 'write_operations' rather than generic terms. - Duplicate names within the same key are not allowed. - example: requests - limit: - type: integer - format: int64 - minimum: 1 - maximum: 1000000 # Reasonable upper bound for rate limits - description: | - Sets the maximum operations allowed within the duration window. - When this limit is reached, verification fails with code=RATE_LIMITED until the window resets. - Adjust this based on your API's capacity and expected usage patterns. - example: 100 - duration: - type: integer - format: int32 - minimum: 1000 # 1 second minimum window - maximum: 2592000000 # 30 days maximum window - description: | - Duration of the rate limit window in milliseconds. Common values include 60000 (1 minute), - 3600000 (1 hour), and 86400000 (24 hours). The rate limit automatically resets after this period elapses. - Windows shorter than 1 second are not supported for performance reasons. - example: 60000 - async: - type: boolean - default: true - description: | - Controls whether this rate limit uses fast (async=true) or consistent (async=false) mode. - Fast mode has lower latency but may allow brief bursts above the limit during high concurrency. - Consistent mode provides strict guarantees but adds latency to every verification. - Use consistent mode only when precise rate limiting is essential for billing or security. + "$ref": "#/components/schemas/RatelimitRequest" description: | Defines time-based rate limits that protect against abuse by controlling request frequency. Unlike credits which track total usage, rate limits reset automatically after each window expires. @@ -2027,22 +1634,23 @@ components: ratelimits: type: array items: - "$ref": "#/components/schemas/Ratelimit" + "$ref": "#/components/schemas/RatelimitResponse" description: Identity ratelimits required: - id - externalId - ratelimits - KeyCredits: + KeyCreditsData: type: object description: Credit configuration and remaining balance for this key. properties: remaining: type: integer format: int64 - minimum: -1 # -1 represents unlimited + nullable: true + minimum: 0 maximum: 9223372036854775807 - description: Number of credits remaining (-1 for unlimited). + description: Number of credits remaining (null for unlimited). example: 1000 refill: "$ref": "#/components/schemas/KeyCreditsRefill" @@ -2072,18 +1680,11 @@ components: maximum: 31 description: Day of month for monthly refills (1-31). example: 1 - lastRefillAt: - type: integer - format: int64 - minimum: 0 - maximum: 9223372036854775807 - description: Unix timestamp in milliseconds of last refill. - example: 1701425400000 required: - interval - amount additionalProperties: false - KeyResponse: + KeyResponseData: type: object properties: keyId: @@ -2099,6 +1700,10 @@ components: maxLength: 50 description: First few characters of the key for identification. example: sk_test_abc123 + enabled: + type: boolean + description: Whether the key is enabled or disabled. + example: true name: type: string maxLength: 255 @@ -2134,13 +1739,7 @@ components: description: Unix timestamp in milliseconds when key expires. example: 1733000000000 credits: - "$ref": "#/components/schemas/KeyCredits" - environment: - type: string - maxLength: 100 - pattern: "^[a-zA-Z][a-zA-Z0-9_-]*$" - description: Environment tag for this key. - example: production + "$ref": "#/components/schemas/KeyCreditsData" plaintext: type: string description: Decrypted key value (only when decrypt=true). @@ -2174,7 +1773,9 @@ components: required: - keyId - start + - apiId - createdAt + - enabled additionalProperties: false LivenessResponseData: type: object @@ -2225,11 +1826,18 @@ components: maximum: 2592000000 description: Rate limit window duration in milliseconds. example: 3600000 + autoApply: + type: boolean + description: | + Whether this rate limit should be automatically applied when verifying keys. + When true, we will automatically apply this limit during verification without it being explicitly listed. + example: true required: - id - name - limit - duration + - autoApply additionalProperties: false V2LivenessResponseBody: type: object @@ -2689,7 +2297,7 @@ components: type: array maxItems: 50 # Reasonable limit for rate limit configurations per identity items: - "$ref": "#/components/schemas/Ratelimit" + "$ref": "#/components/schemas/RatelimitRequest" description: | Defines shared rate limits that apply to all keys belonging to this identity. Prevents abuse by users with multiple keys by enforcing consistent limits across their entire key portfolio. @@ -2701,12 +2309,13 @@ components: - Each named limit can have different thresholds and windows When verifying keys, you can specify which limits you want to use and all keys attached to this identity will share the limits, regardless of which specific key is used. - Ratelimit: + RatelimitRequest: type: object required: - name - limit - duration + - autoApply properties: name: description: |- @@ -2736,6 +2345,7 @@ components: Higher values allow more frequent access but may impact service performance. type: integer format: int64 + minimum: 1 duration: description: |- The duration for each ratelimit window in milliseconds. @@ -2749,6 +2359,12 @@ components: Shorter windows provide more frequent resets but may allow large burst usage. Longer windows provide more consistent usage patterns but take longer to reset after limit exhaustion. type: integer format: int64 + minimum: 1000 + autoApply: + description: |- + Whether this ratelimit should be automatically applied when verifying a key. + type: boolean + default: false IdentitiesCreateIdentityResponseData: type: object properties: @@ -2833,7 +2449,7 @@ components: ratelimits: type: array items: - "$ref": "#/components/schemas/Ratelimit" + "$ref": "#/components/schemas/RatelimitResponse" description: Rate limits associated with this identity. These limits are shared across all API keys linked to this identity, providing consistent @@ -3679,7 +3295,7 @@ components: type: array maxItems: 100 # DoS protection matching request limit items: - "$ref": "#/components/schemas/KeyResponse" + "$ref": "#/components/schemas/KeyResponseData" description: Array of API keys with complete configuration and metadata. V2ApisListKeysResponseBody: type: object @@ -3758,96 +3374,6 @@ components: - duration - identifier - limit - V2KeysWhoamiRequestBody: - type: object - required: - - key - properties: - key: - type: string - description: The API key to identify and retrieve information about - example: sk_1234567890abcdef - minLength: 1 - additionalProperties: false - KeyWhoamiData: - type: object - properties: - id: - type: string - description: The unique identifier of the key - example: key_1234567890abcdef - name: - type: string - nullable: true - description: The human-readable name of the key (optional) - example: Production API Key - remaining: - type: integer - nullable: true - format: int64 - description: The remaining number of requests for the key (null means unlimited) - example: 1000 - identity: - type: object - nullable: true - properties: - id: - type: string - description: The unique identity ID associated with the key - example: id_1234567890abcdef - externalId: - type: string - description: - The external identity ID associated with the key (e.g., - user ID in your system) - example: user_12345 - required: - - id - - externalId - description: - The identity object associated with the key (null if no identity - is associated) - meta: - type: object - nullable: true - additionalProperties: true - description: - Custom metadata associated with the key (null if no metadata - is present) - example: - role: admin - plan: premium - teamId: team_12345 - createdAt: - type: integer - format: int64 - description: The timestamp in milliseconds when the key was created - example: 1620000000000 - enabled: - type: boolean - description: Whether the key is enabled and can be used - example: true - environment: - type: string - nullable: true - description: - The environment the key is associated with (e.g., production, - staging, development) - example: production - required: - - id - - createdAt - - enabled - V2KeysWhoamiResponse: - type: object - required: - - meta - - data - properties: - meta: - "$ref": "#/components/schemas/Meta" - data: - "$ref": "#/components/schemas/KeyWhoamiData" V2KeysVerifyKeyRequestBody: type: object required: @@ -3962,54 +3488,8 @@ components: Verification can succeed while credit deduction fails if the key has insufficient credits. ratelimits: type: array - maxItems: 10 # Reasonable limit for rate limit checks per verification items: - type: object - required: - - name - properties: - name: - type: string - minLength: 1 - maxLength: 50 # Keep rate limit names concise and readable - pattern: "^[a-zA-Z][a-zA-Z0-9_-]*$" - description: | - Identifies which rate limit configuration to check. Must match a rate limit defined on the key or identity. - Use semantic names that clearly describe what's being limited, avoiding generic terms like 'limit1'. - Rate limit names are case-sensitive and must exist in the key's configuration. - example: api_requests - cost: - type: integer - format: int64 - minimum: 0 - maximum: 1000 # Reasonable upper bound for operation costs - default: 1 - description: | - Sets how much of the rate limit quota this operation consumes. - Use higher values for expensive operations and 0 for operations that should not count against the limit. - Cost is applied immediately, even if other rate limits or permissions cause verification to fail. - example: 3 - limit: - type: integer - format: int64 - minimum: 1 - maximum: 1000000 # Reasonable upper bound for rate limits - description: | - Temporarily overrides the configured rate limit for this request only. - Does not modify the stored configuration and applies only to this single verification. - Useful for implementing per-request dynamic limits based on user tier or operation type. - example: 1000 - duration: - type: integer - format: int64 - minimum: 1000 # 1 second minimum window - maximum: 2592000000 # 30 days maximum window - description: | - Temporarily overrides the rate limit window duration in milliseconds for this request only. - Does not modify the stored configuration and applies only to this single verification. - Common values include 60000 (1 minute), 3600000 (1 hour), and 86400000 (24 hours). - example: 60000 - additionalProperties: false + "$ref": "#/components/schemas/RatelimitRequest" description: | Enforces time-based rate limiting during verification to prevent abuse and ensure fair usage. Omitting this field skips rate limit checks entirely, relying only on configured key rate limits. @@ -4111,12 +3591,6 @@ components: A list of all role names assigned to this key. Roles are collections of permissions that grant access to specific functionality. Only returned when permissions were checked during verification. - environment: - type: string - description: - The environment tag associated with the key (e.g., 'production', - 'staging', 'development'). Use this to further segment keys within an - API beyond just the apiId separation. identity: "$ref": "#/components/schemas/Identity" description: @@ -4126,43 +3600,11 @@ components: ratelimits: type: array items: - type: object - required: - - name - - limit - - remaining - - reset - properties: - name: - type: string - description: - The name of the rate limit that was checked. This matches - the name provided in the request. - limit: - type: integer - format: int32 - description: - The maximum number of operations allowed within the current - time window for this rate limit. - remaining: - type: integer - format: int32 - description: - The number of operations still allowed within the current - time window after this verification. Your application can use this - to inform users about remaining capacity or to implement your own - backoff strategies. - reset: - type: integer - format: int64 - description: - Unix timestamp in milliseconds when the rate limit window - will reset and 'remaining' will return to 'limit'. Use this to implement - retry-after logic or to display wait times to users. - description: - Information about the rate limits applied during verification. - Only included when rate limits were checked. If verification failed with - `code=RATE_LIMITED`, this will show which specific rate limit was exceeded. + "$ref": "#/components/schemas/RatelimitResponse" + description: + Information about the rate limits applied during verification. + Only included when rate limits were checked. If verification failed with + `code=RATE_LIMITED`, this will show which specific rate limit was exceeded. required: - valid - code @@ -4209,7 +3651,7 @@ components: type: array maxItems: 50 # Reasonable limit for rate limit configurations per identity items: - "$ref": "#/components/schemas/Ratelimit" + "$ref": "#/components/schemas/RatelimitRequest" description: | Replaces all existing identity rate limits with this complete list of rate limits. Omitting this field preserves existing rate limits, while providing an empty array removes all rate limits. @@ -4252,7 +3694,7 @@ components: ratelimits: type: array items: - "$ref": "#/components/schemas/Ratelimit" + "$ref": "#/components/schemas/RatelimitResponse" description: Rate limits associated with this identity after the update. example: - name: requests @@ -4269,124 +3711,6 @@ components: meta: "$ref": "#/components/schemas/Meta" paths: - "/v2/keys.whoami": - post: - tags: - - keys - summary: Get information about an API key - description: |- - Identifies the owner and details of an API key without triggering billing or usage tracking. This endpoint is designed for scenarios where users provide API keys to your application and you need to determine key ownership and basic information without performing a billed verification. - - Common use cases: - - User onboarding flows where customers enter their API keys and you need to identify the account owner - - Admin dashboards displaying which user or organization owns specific keys - - Key validation during setup without consuming credits or affecting usage metrics - - Support tools for troubleshooting key ownership and configuration issues - - The response includes key metadata, identity information (if associated), permissions, and current status. When a key has an associated identity, you can use the `externalId` to map it back to your user system without storing the raw API key. - - This endpoint provides an alternative to verification when you only need identification rather than access control. Unlike verification, it does not consume credits, increment usage counters, or affect rate limits associated with the key. - - Since the response may contain sensitive business data in metadata fields, restrict usage to secure contexts and consider implementing rate limiting to prevent abuse. - operationId: whoami - x-speakeasy-name-override: whoami - security: - - rootKey: [] - requestBody: - required: true - content: - application/json: - schema: - "$ref": "#/components/schemas/V2KeysWhoamiRequestBody" - examples: - basic: - summary: Basic key lookup - value: - key: sk_1234567890abcdef - responses: - "200": - description: Key information successfully retrieved - content: - application/json: - schema: - "$ref": "#/components/schemas/V2KeysWhoamiResponse" - examples: - full: - summary: Complete key information - value: - meta: - requestId: req_1234567890abcdef - data: - id: key_1234567890abcdef - name: Production API Key - remaining: 1000 - identity: - id: id_1234567890abcdef - externalId: user_12345 - meta: - role: admin - plan: premium - teamId: team_12345 - createdAt: 1620000000000 - enabled: true - environment: production - minimal: - summary: Key with minimal information - value: - meta: - requestId: req_1234567890abcdef - data: - id: key_1234567890abcdef - createdAt: 1620000000000 - enabled: true - "400": - description: Bad request - Missing required parameters or invalid format - content: - application/json: - schema: - "$ref": "#/components/schemas/BadRequestErrorResponse" - examples: - missingApiId: - summary: Missing apiId - value: - meta: - requestId: req_01H9TQPP77V5E48E9SH0BG0ZQX - error: - title: Bad Request - detail: apiId is required - status: 400 - type: https://unkey.dev/errors/bad-request - errors: - - message: apiId is required - location: body.apiId - "401": - description: Unauthorized - content: - application/json: - schema: - "$ref": "#/components/schemas/UnauthorizedErrorResponse" - "403": - description: Forbidden - content: - application/json: - schema: - "$ref": "#/components/schemas/ForbiddenErrorResponse" - "404": - description: Not found - Specified API ID doesn't exist - content: - application/json: - schema: - "$ref": "#/components/schemas/NotFoundErrorResponse" - "500": - content: - application/json: - schema: - "$ref": "#/components/schemas/InternalServerErrorResponse" - description: - Internal server error. This may occur if there are connection - issues with the database or if the verification service is experiencing - problems. Implement proper retry logic with backoff in your clients to - handle these situations. "/v2/keys.setPermissions": post: tags: @@ -5466,7 +4790,7 @@ paths: tags: - keys summary: Update remaining credits for an API key - description: |- + description: | Sets the remaining number of credits for a key, providing precise control over usage limits for implementing usage-based business models. Use this endpoint when customers make purchases to add credits, during subscription renewals to reset credits, or for adjustments due to refunds and promotions. The credits feature provides globally consistent usage limiting where each successful verification decrements the counter by the specified cost value (default 1). When credits reach zero, verification fails with `code=USAGE_EXCEEDED`, making this ideal for monetization models where accuracy is critical compared to rate limits which control frequency. @@ -5496,12 +4820,12 @@ paths: setUnlimited: summary: Set unlimited usage description: - Setting remaining to -1 makes the key usable an unlimited + Setting remaining to null makes the key usable an unlimited number of times. This is useful for premium tiers, trusted partners, or internal applications where usage counting isn't needed. value: keyId: key_2cGKbMxRyIzhCxo1Idjz8q - remaining: -1 + remaining: null removeRefills: summary: Remove refill settings description: @@ -5556,7 +4880,7 @@ paths: meta: requestId: req_1234567890abcdef data: - remaining: -1 + remaining: null refillSettings: "400": description: @@ -6722,9 +6046,11 @@ paths: - name: requests limit: 10000 duration: 3600000 + autoApply: true - name: heavy_compute limit: 100 duration: 86400000 + autoApply: false required: true responses: "200": @@ -6904,12 +6230,10 @@ paths: basic: summary: Basic identity listing value: - environment: default limit: 50 withPagination: summary: Fetch next page with cursor value: - environment: default limit: 50 cursor: cursor_eyJrZXkiOiJrZXlfMTIzNCJ9 required: true diff --git a/go/apps/api/routes/openapi/handler.go b/go/apps/api/routes/openapi/handler.go new file mode 100644 index 0000000000..d471597a29 --- /dev/null +++ b/go/apps/api/routes/openapi/handler.go @@ -0,0 +1,31 @@ +package handler + +import ( + "context" + + "github.com/unkeyed/unkey/go/apps/api/openapi" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/zen" +) + +// Handler implements zen.Route interface for the API reference endpoint +type Handler struct { + // Services as public fields (even though not used in this handler, showing the pattern) + Logger logging.Logger +} + +// Method returns the HTTP method this route responds to +func (h *Handler) Method() string { + return "GET" +} + +// Path returns the URL path pattern this route matches +func (h *Handler) Path() string { + return "/openapi.yaml" +} + +// Handle processes the HTTP request +func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { + s.AddHeader("Content-Type", "text/html") + return s.Send(200, openapi.Spec) +} diff --git a/go/apps/api/routes/register.go b/go/apps/api/routes/register.go index 8125f5d788..9cef358c2d 100644 --- a/go/apps/api/routes/register.go +++ b/go/apps/api/routes/register.go @@ -1,6 +1,7 @@ package routes import ( + openapi "github.com/unkeyed/unkey/go/apps/api/routes/openapi" "github.com/unkeyed/unkey/go/apps/api/routes/reference" v2Liveness "github.com/unkeyed/unkey/go/apps/api/routes/v2_liveness" @@ -33,6 +34,7 @@ import ( v2KeysAddPermissions "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_add_permissions" v2KeysAddRoles "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_add_roles" v2KeysCreateKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_create_key" + v2KeysGetKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" v2KeysRemovePermissions "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_remove_permissions" v2KeysRemoveRoles "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_remove_roles" v2KeysSetPermissions "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_set_permissions" @@ -344,6 +346,20 @@ func Register(srv *zen.Server, svc *Services) { Keys: svc.Keys, Permissions: svc.Permissions, Auditlogs: svc.Auditlogs, + Vault: svc.Vault, + }, + ) + + // v2/keys.getKey + srv.RegisterRoute( + defaultMiddlewares, + &v2KeysGetKey.Handler{ + Logger: svc.Logger, + DB: svc.Database, + Keys: svc.Keys, + Permissions: svc.Permissions, + Auditlogs: svc.Auditlogs, + Vault: svc.Vault, }, ) @@ -429,5 +445,13 @@ func Register(srv *zen.Server, svc *Services) { }, &reference.Handler{ Logger: svc.Logger, }) + srv.RegisterRoute([]zen.Middleware{ + withTracing, + withMetrics, + withLogging, + withErrorHandling, + }, &openapi.Handler{ + Logger: svc.Logger, + }) } diff --git a/go/apps/api/routes/v2_apis_create_api/handler.go b/go/apps/api/routes/v2_apis_create_api/handler.go index 56d6f3f2ef..951bca8cb4 100644 --- a/go/apps/api/routes/v2_apis_create_api/handler.go +++ b/go/apps/api/routes/v2_apis_create_api/handler.go @@ -126,10 +126,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return "", fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), - ) + return "", err } return apiId, nil diff --git a/go/apps/api/routes/v2_apis_delete_api/handler.go b/go/apps/api/routes/v2_apis_delete_api/handler.go index e3f6663206..73d95ef543 100644 --- a/go/apps/api/routes/v2_apis_delete_api/handler.go +++ b/go/apps/api/routes/v2_apis_delete_api/handler.go @@ -155,10 +155,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { UserAgent: s.UserAgent(), }}) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for API deletion."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_apis_get_api/handler.go b/go/apps/api/routes/v2_apis_get_api/handler.go index 4e441af2a8..a799433289 100644 --- a/go/apps/api/routes/v2_apis_get_api/handler.go +++ b/go/apps/api/routes/v2_apis_get_api/handler.go @@ -87,6 +87,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("database error"), fault.Public("Failed to retrieve API information."), ) } + // Check if API belongs to the authorized workspace if api.WorkspaceID != auth.AuthorizedWorkspaceID { return fault.New("wrong workspace", diff --git a/go/apps/api/routes/v2_apis_list_keys/200_test.go b/go/apps/api/routes/v2_apis_list_keys/200_test.go index 440792b9e7..d31a46f62c 100644 --- a/go/apps/api/routes/v2_apis_list_keys/200_test.go +++ b/go/apps/api/routes/v2_apis_list_keys/200_test.go @@ -13,8 +13,11 @@ import ( handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_apis_list_keys" "github.com/unkeyed/unkey/go/pkg/db" "github.com/unkeyed/unkey/go/pkg/hash" + "github.com/unkeyed/unkey/go/pkg/ptr" "github.com/unkeyed/unkey/go/pkg/testutil" "github.com/unkeyed/unkey/go/pkg/uid" + + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" ) func TestSuccess(t *testing.T) { @@ -35,10 +38,10 @@ func TestSuccess(t *testing.T) { workspace := h.Resources().UserWorkspace // Create a root key with appropriate permissions - rootKey := h.CreateRootKey(workspace.ID, "api.*.read_key", "api.*.read_api") + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_key", "api.*.read_api", "api.*.decrypt_key") // Create a keyAuth (keyring) for the API - keyAuthID := uid.New("keyauth") + keyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: keyAuthID, WorkspaceID: workspace.ID, @@ -48,6 +51,12 @@ func TestSuccess(t *testing.T) { }) require.NoError(t, err) + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + // Create a test API apiID := uid.New("api") err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ @@ -85,6 +94,7 @@ func TestSuccess(t *testing.T) { }) require.NoError(t, err) + encryptedKeysMap := make(map[string]struct{}) // Create test keys with various configurations testKeys := []struct { id string @@ -139,10 +149,12 @@ func TestSuccess(t *testing.T) { metaBytes, _ = json.Marshal(keyData.meta) } + key := keyData.start + uid.New("") + insertParams := db.InsertKeyParams{ ID: keyData.id, KeyringID: keyAuthID, - Hash: hash.Sha256(keyData.start + uid.New("")), + Hash: hash.Sha256(key), Start: keyData.start, WorkspaceID: workspace.ID, ForWorkspaceID: sql.NullString{Valid: false}, @@ -166,6 +178,22 @@ func TestSuccess(t *testing.T) { err := db.Query.InsertKey(ctx, h.DB.RW(), insertParams) require.NoError(t, err) + + encryption, err := h.Vault.Encrypt(ctx, &vaultv1.EncryptRequest{ + Keyring: h.Resources().UserWorkspace.ID, + Data: key, + }) + require.NoError(t, err) + + err = db.Query.InsertKeyEncryption(ctx, h.DB.RW(), db.InsertKeyEncryptionParams{ + WorkspaceID: h.Resources().UserWorkspace.ID, + KeyID: keyData.id, + CreatedAt: time.Now().UnixMilli(), + Encrypted: encryption.GetEncrypted(), + EncryptionKeyID: encryption.GetKeyId(), + }) + require.NoError(t, err) + encryptedKeysMap[keyData.id] = struct{}{} } // Set up request headers @@ -407,7 +435,7 @@ func TestSuccess(t *testing.T) { t.Run("empty API returns empty result", func(t *testing.T) { // Create a new API with no keys - emptyKeyAuthID := uid.New("keyauth") + emptyKeyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: emptyKeyAuthID, WorkspaceID: workspace.ID, @@ -570,4 +598,30 @@ func TestSuccess(t *testing.T) { require.True(t, foundKeyWithRatelimits, "Should find the key with ratelimits in response") require.True(t, foundKeyWithoutRatelimits, "Should find the key without ratelimits in response") }) + + t.Run("verify encrypted key is returned correctly", func(t *testing.T) { + req := handler.Request{ + ApiId: apiID, + Decrypt: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, handler.Response]( + h, + route, + headers, + req, + ) + + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body.Data) + + for _, key := range res.Body.Data { + _, exists := encryptedKeysMap[key.KeyId] + if !exists { + continue + } + + require.NotEmpty(t, ptr.SafeDeref(key.Plaintext), "Key should be decrypted and have plaintext") + } + }) } diff --git a/go/apps/api/routes/v2_apis_list_keys/403_test.go b/go/apps/api/routes/v2_apis_list_keys/403_test.go index 729ec077a1..72ed3c0397 100644 --- a/go/apps/api/routes/v2_apis_list_keys/403_test.go +++ b/go/apps/api/routes/v2_apis_list_keys/403_test.go @@ -34,7 +34,7 @@ func TestAuthorizationErrors(t *testing.T) { workspace := h.Resources().UserWorkspace // Create a keyAuth (keyring) for the API - keyAuthID := uid.New("keyauth") + keyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: keyAuthID, WorkspaceID: workspace.ID, @@ -44,6 +44,12 @@ func TestAuthorizationErrors(t *testing.T) { }) require.NoError(t, err) + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + // Create a test API apiID := uid.New("api") err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ diff --git a/go/apps/api/routes/v2_apis_list_keys/404_test.go b/go/apps/api/routes/v2_apis_list_keys/404_test.go index 6d9094d26a..b8056bda24 100644 --- a/go/apps/api/routes/v2_apis_list_keys/404_test.go +++ b/go/apps/api/routes/v2_apis_list_keys/404_test.go @@ -64,7 +64,7 @@ func TestNotFoundErrors(t *testing.T) { // Test case for API in different workspace t.Run("API in different workspace", func(t *testing.T) { // Create a keyAuth for the API in the different workspace - otherKeyAuthID := uid.New("keyauth") + otherKeyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: otherKeyAuthID, WorkspaceID: workspace2.ID, @@ -105,7 +105,7 @@ func TestNotFoundErrors(t *testing.T) { // Test case for deleted API t.Run("deleted API", func(t *testing.T) { // Create a keyAuth for the API - deletedKeyAuthID := uid.New("keyauth") + deletedKeyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: deletedKeyAuthID, WorkspaceID: workspace1.ID, @@ -244,7 +244,7 @@ func TestNotFoundErrors(t *testing.T) { // Test case for API that exists but has no keys (should return 200 with empty array) t.Run("API exists but has no keys", func(t *testing.T) { // Create a keyAuth for the API - emptyKeyAuthID := uid.New("keyauth") + emptyKeyAuthID := uid.New(uid.KeyAuthPrefix) err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ ID: emptyKeyAuthID, WorkspaceID: workspace1.ID, diff --git a/go/apps/api/routes/v2_apis_list_keys/412_test.go b/go/apps/api/routes/v2_apis_list_keys/412_test.go new file mode 100644 index 0000000000..b4ee049fec --- /dev/null +++ b/go/apps/api/routes/v2_apis_list_keys/412_test.go @@ -0,0 +1,83 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_apis_list_keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestPreconditionError(t *testing.T) { + ctx := context.Background() + h := testutil.NewHarness(t) + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Permissions: h.Permissions, + Vault: h.Vault, + } + + h.Register(route) + + // Create API manually + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key", "api.*.read_api", "api.*.decrypt_key") + + // Set up request headers + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + // Test case for API ID with special characters + t.Run("Try decrypting key without opt-in", func(t *testing.T) { + req := handler.Request{ + ApiId: apiID, + Decrypt: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, openapi.PreconditionFailedErrorResponse]( + h, + route, + headers, + req, + ) + + require.Equal(t, 412, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) +} diff --git a/go/apps/api/routes/v2_apis_list_keys/handler.go b/go/apps/api/routes/v2_apis_list_keys/handler.go index 925faa28e4..630f571aea 100644 --- a/go/apps/api/routes/v2_apis_list_keys/handler.go +++ b/go/apps/api/routes/v2_apis_list_keys/handler.go @@ -6,6 +6,7 @@ import ( "encoding/json" "net/http" + "github.com/oapi-codegen/nullable" "github.com/unkeyed/unkey/go/apps/api/openapi" vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" "github.com/unkeyed/unkey/go/internal/services/keys" @@ -25,7 +26,6 @@ type Response = openapi.V2ApisListKeysResponseBody // Handler implements zen.Route interface for the v2 APIs list keys endpoint type Handler struct { - // Services as public fields Logger logging.Logger DB db.Database Keys keys.KeyService @@ -110,6 +110,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("database error"), fault.Public("Failed to retrieve API information."), ) } + // Check if API belongs to the authorized workspace if api.WorkspaceID != auth.AuthorizedWorkspaceID { return fault.New("wrong workspace", @@ -117,6 +118,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("wrong workspace, masking as 404"), fault.Public("The requested API does not exist or has been deleted."), ) } + // Check if API is deleted if api.DeletedAtM.Valid { return fault.New("api not found", @@ -133,6 +135,49 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ) } + keyAuth, err := db.Query.FindKeyringByID(ctx, h.DB.RO(), api.KeyAuthID.String) + if err != nil { + if db.IsNotFound(err) { + return fault.New("api not set up for keys", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for keys, keyauth not found"), fault.Public("The requested API is not set up to handle keys."), + ) + } + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to retrieve API information."), + ) + } + + if ptr.SafeDeref(req.Decrypt, false) { + err = h.Permissions.Check( + ctx, + auth.KeyID, + rbac.Or( + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.DecryptKey, + }), + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: api.ID, + Action: rbac.DecryptKey, + }), + ), + ) + if err != nil { + return err + } + + if !keyAuth.StoreEncryptedKeys { + return fault.New("api not set up for key encryption", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for key encryption"), fault.Public("The requested API does not support key encryption."), + ) + } + } + // 5. Query the keys var identityId string if req.ExternalId != nil && *req.ExternalId != "" { @@ -153,7 +198,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { Meta: openapi.Meta{ RequestId: s.RequestID(), }, - Data: []openapi.KeyResponse{}, + Data: []openapi.KeyResponseData{}, Pagination: &openapi.Pagination{ Cursor: nil, HasMore: false, @@ -185,6 +230,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { // Query ratelimits for all returned keys ratelimitsMap := make(map[string][]db.ListRatelimitsByKeyIDsRow) + identityRatelimitsMap := make(map[string][]db.Ratelimit) if len(keys) > 0 { // Extract key IDs and convert to sql.NullString slice keyIDs := make([]sql.NullString, len(keys)) @@ -207,31 +253,41 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ratelimitsMap[rl.KeyID.String] = append(ratelimitsMap[rl.KeyID.String], rl) } } + + uniqIdentityIds := make(map[string]struct{}) + for _, key := range keys { + if !key.IdentityID.Valid { + continue + } + + uniqIdentityIds[key.IdentityID.String] = struct{}{} + } + + identityIDs := make([]sql.NullString, 0) + for identityID := range uniqIdentityIds { + identityIDs = append(identityIDs, sql.NullString{String: identityID, Valid: true}) + } + + // Query ratelimits for these identities + identityRatelimits, listErr := db.Query.ListIdentityRatelimitsByIDs(ctx, h.DB.RO(), identityIDs) + if listErr != nil { + return fault.Wrap(listErr, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to retrieve ratelimits."), + ) + } + + // Group ratelimits by identity_id + for _, rl := range identityRatelimits { + if rl.IdentityID.Valid { + identityRatelimitsMap[rl.IdentityID.String] = append(identityRatelimitsMap[rl.IdentityID.String], rl) + } + } } // If user requested decryption, check permissions and decrypt plaintextMap := map[string]string{} // nolint:staticcheck if req.Decrypt != nil && *req.Decrypt { - err = h.Permissions.Check( - ctx, - auth.KeyID, - rbac.Or( - rbac.T(rbac.Tuple{ - ResourceType: rbac.Api, - ResourceID: "*", - Action: rbac.DecryptKey, - }), - rbac.T(rbac.Tuple{ - ResourceType: rbac.Api, - ResourceID: api.ID, - Action: rbac.DecryptKey, - }), - ), - ) - if err != nil { - return err - } - // If we have permission, proceed with decryption for _, key := range keys { if key.EncryptedKey.Valid && key.EncryptionKeyID.Valid { @@ -246,6 +302,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ) continue } + plaintextMap[key.Key.ID] = decrypted.GetPlaintext() } } @@ -272,15 +329,15 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } // Transform keys into the response format - responseData := make([]openapi.KeyResponse, numKeysToReturn) + responseData := make([]openapi.KeyResponseData, numKeysToReturn) for i := 0; i < numKeysToReturn; i++ { key := filteredKeys[i] - k := openapi.KeyResponse{ + k := openapi.KeyResponseData{ KeyId: key.Key.ID, Start: key.Key.Start, CreatedAt: key.Key.CreatedAtM, + Enabled: key.Key.Enabled, Credits: nil, - Environment: nil, Expires: nil, Identity: nil, Meta: nil, @@ -296,6 +353,14 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { k.Name = ptr.P(key.Key.Name.String) } + if key.Key.UpdatedAtM.Valid { + k.UpdatedAt = ptr.P(key.Key.UpdatedAtM.Int64) + } + + if key.Key.Expires.Valid { + k.Expires = ptr.P(key.Key.Expires.Time.UnixMilli()) + } + if key.Key.Meta.Valid { err = json.Unmarshal([]byte(key.Key.Meta.String), &k.Meta) if err != nil { @@ -306,28 +371,24 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } } - if key.Key.UpdatedAtM.Valid { - k.UpdatedAt = ptr.P(key.Key.UpdatedAtM.Int64) - } - - if key.Key.Expires.Valid { - k.Expires = ptr.P(key.Key.Expires.Time.UnixMilli()) - } - if key.Key.RemainingRequests.Valid { - k.Credits = &openapi.KeyCredits{ - Remaining: int64(key.Key.RemainingRequests.Int32), + k.Credits = &openapi.KeyCreditsData{ + Remaining: nullable.NewNullableWithValue(int64(key.Key.RemainingRequests.Int32)), Refill: nil, } + if key.Key.RefillAmount.Valid { - k.Credits.Refill = &openapi.KeyCreditsRefill{ - Amount: int64(key.Key.RefillAmount.Int32), - Interval: "", - RefillDay: ptr.P(int(key.Key.RefillDay.Int16)), - LastRefillAt: nil, + var refillDay *int + interval := openapi.KeyCreditsRefillIntervalDaily + if key.Key.RefillDay.Valid { + interval = openapi.KeyCreditsRefillIntervalMonthly + refillDay = ptr.P(int(key.Key.RefillDay.Int16)) // nolint:gosec } - if key.Key.LastRefillAt.Valid { - k.Credits.Refill.LastRefillAt = ptr.P(key.Key.LastRefillAt.Time.UnixMilli()) + + k.Credits.Refill = &openapi.KeyCreditsRefill{ + Amount: int64(key.Key.RefillAmount.Int32), + Interval: interval, + RefillDay: refillDay, } } } @@ -339,13 +400,13 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { // Add identity information if available if key.IdentityID.Valid { - k.Identity = &openapi.Identity{ ExternalId: key.ExternalID.String, Id: key.IdentityID.String, Meta: nil, Ratelimits: nil, } + if len(key.IdentityMeta) > 0 { err = json.Unmarshal(key.IdentityMeta, &k.Identity.Meta) if err != nil { @@ -353,6 +414,21 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("unable to unmarshal identity meta"), fault.Public("We encountered an error while trying to unmarshal the identity meta data.")) } } + + if ratelimits, ok := identityRatelimitsMap[key.IdentityID.String]; ok { + ratelimitsResponse := make([]openapi.RatelimitResponse, len(ratelimits)) + for idx, rl := range ratelimits { + ratelimitsResponse[idx] = openapi.RatelimitResponse{ + Id: rl.ID, + Name: rl.Name, + Duration: rl.Duration, + AutoApply: rl.AutoApply, + Limit: int64(rl.Limit), + } + } + + k.Identity.Ratelimits = ratelimitsResponse + } } // Get permissions for the key @@ -384,10 +460,11 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ratelimitsResponse := make([]openapi.RatelimitResponse, len(keyRatelimits)) for j, rl := range keyRatelimits { ratelimitsResponse[j] = openapi.RatelimitResponse{ - Id: rl.ID, - Name: rl.Name, - Limit: int64(rl.Limit), - Duration: rl.Duration, + Id: rl.ID, + Name: rl.Name, + Limit: int64(rl.Limit), + Duration: rl.Duration, + AutoApply: rl.AutoApply, } } k.Ratelimits = ptr.P(ratelimitsResponse) diff --git a/go/apps/api/routes/v2_identities_create_identity/200_test.go b/go/apps/api/routes/v2_identities_create_identity/200_test.go index a12b7415c8..81ad8c6a22 100644 --- a/go/apps/api/routes/v2_identities_create_identity/200_test.go +++ b/go/apps/api/routes/v2_identities_create_identity/200_test.go @@ -20,7 +20,6 @@ import ( func TestCreateIdentitySuccessfully(t *testing.T) { ctx := context.Background() h := testutil.NewHarness(t) - route := &handler.Handler{ Logger: h.Logger, DB: h.DB, @@ -149,16 +148,18 @@ func TestCreateIdentitySuccessfully(t *testing.T) { t.Run("create identity with ratelimits", func(t *testing.T) { externalTestID := uid.New("test_external_id") - identityRateLimits := []openapi.Ratelimit{ + identityRateLimits := []openapi.RatelimitRequest{ { - Duration: time.Minute.Milliseconds(), - Limit: 100, - Name: "test", + Duration: time.Minute.Milliseconds(), + Limit: 100, + Name: "test", + AutoApply: true, }, { - Duration: time.Minute.Milliseconds(), - Limit: 200, - Name: "test2", + Duration: time.Minute.Milliseconds(), + Limit: 200, + Name: "test2", + AutoApply: true, }, } @@ -205,16 +206,18 @@ func TestCreateIdentitySuccessfully(t *testing.T) { meta := &map[string]any{"userId": "user_123", "role": "admin"} - identityRateLimits := []openapi.Ratelimit{ + identityRateLimits := []openapi.RatelimitRequest{ { - Duration: time.Minute.Milliseconds(), - Limit: 100, - Name: "requests_per_minute", + Duration: time.Minute.Milliseconds(), + Limit: 100, + Name: "requests_per_minute", + AutoApply: true, }, { - Duration: (time.Hour * 24).Milliseconds(), - Limit: 1000, - Name: "requests_per_day", + Duration: (time.Hour * 24).Milliseconds(), + Limit: 1000, + Name: "requests_per_day", + AutoApply: true, }, } diff --git a/go/apps/api/routes/v2_identities_create_identity/400_test.go b/go/apps/api/routes/v2_identities_create_identity/400_test.go index 5319f917ac..ad7c2561fd 100644 --- a/go/apps/api/routes/v2_identities_create_identity/400_test.go +++ b/go/apps/api/routes/v2_identities_create_identity/400_test.go @@ -98,10 +98,11 @@ func TestBadRequests(t *testing.T) { t.Run("missing rate limit name", func(t *testing.T) { req := handler.Request{ ExternalId: uid.New("test"), - Ratelimits: &[]openapi.Ratelimit{ + Ratelimits: &[]openapi.RatelimitRequest{ { - Duration: 1000, - Limit: 1, + Duration: 1000, + Limit: 1, + AutoApply: true, }, }, } @@ -121,11 +122,12 @@ func TestBadRequests(t *testing.T) { t.Run("negative rate limit value", func(t *testing.T) { req := handler.Request{ ExternalId: uid.New("test"), - Ratelimits: &[]openapi.Ratelimit{ + Ratelimits: &[]openapi.RatelimitRequest{ { - Name: "test_limit", - Duration: 1000, // valid duration - Limit: -10, // negative limit + Name: "test_limit", + Duration: 1000, // valid duration + Limit: -10, // negative limit + AutoApply: true, }, }, } @@ -134,7 +136,7 @@ func TestBadRequests(t *testing.T) { require.NotNil(t, res.Body) require.Equal(t, "https://unkey.com/docs/api-reference/errors-v2/unkey/application/invalid_input", res.Body.Error.Type) - require.Equal(t, "Rate limit value must be greater than zero.", res.Body.Error.Detail) + require.Equal(t, "POST request body for '/v2/identities.createIdentity' failed to validate schema", res.Body.Error.Detail) require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) require.Equal(t, "Bad Request", res.Body.Error.Title) require.NotEmpty(t, res.Body.Meta.RequestId) @@ -144,11 +146,12 @@ func TestBadRequests(t *testing.T) { t.Run("zero rate limit value", func(t *testing.T) { req := handler.Request{ ExternalId: uid.New("test"), - Ratelimits: &[]openapi.Ratelimit{ + Ratelimits: &[]openapi.RatelimitRequest{ { - Name: "test_limit", - Duration: 1000, // valid duration - Limit: 0, // zero limit + Name: "test_limit", + Duration: 1000, // valid duration + Limit: 0, // zero limit + AutoApply: true, }, }, } @@ -157,7 +160,7 @@ func TestBadRequests(t *testing.T) { require.NotNil(t, res.Body) require.Equal(t, "https://unkey.com/docs/api-reference/errors-v2/unkey/application/invalid_input", res.Body.Error.Type) - require.Equal(t, "Rate limit value must be greater than zero.", res.Body.Error.Detail) + require.Equal(t, "POST request body for '/v2/identities.createIdentity' failed to validate schema", res.Body.Error.Detail) require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) require.Equal(t, "Bad Request", res.Body.Error.Title) require.NotEmpty(t, res.Body.Meta.RequestId) @@ -167,11 +170,12 @@ func TestBadRequests(t *testing.T) { t.Run("duration less than 1000ms", func(t *testing.T) { req := handler.Request{ ExternalId: uid.New("test"), - Ratelimits: &[]openapi.Ratelimit{ + Ratelimits: &[]openapi.RatelimitRequest{ { - Name: "test_limit", - Duration: 999, // less than 1000ms - Limit: 100, // valid limit + Name: "test_limit", + Duration: 999, // less than 1000ms + Limit: 100, // valid limit + AutoApply: true, }, }, } @@ -180,7 +184,7 @@ func TestBadRequests(t *testing.T) { require.NotNil(t, res.Body) require.Equal(t, "https://unkey.com/docs/api-reference/errors-v2/unkey/application/invalid_input", res.Body.Error.Type) - require.Equal(t, "Rate limit duration must be at least 1000ms (1 second).", res.Body.Error.Detail) + require.Equal(t, "POST request body for '/v2/identities.createIdentity' failed to validate schema", res.Body.Error.Detail) require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) require.Equal(t, "Bad Request", res.Body.Error.Title) require.NotEmpty(t, res.Body.Meta.RequestId) diff --git a/go/apps/api/routes/v2_identities_create_identity/handler.go b/go/apps/api/routes/v2_identities_create_identity/handler.go index 88b69a29f5..d2b9a14e4d 100644 --- a/go/apps/api/routes/v2_identities_create_identity/handler.go +++ b/go/apps/api/routes/v2_identities_create_identity/handler.go @@ -103,35 +103,6 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { meta = rawMeta } - // Validate rate limits - if req.Ratelimits != nil { - for _, ratelimit := range *req.Ratelimits { - // Validate rate limit name is provided - if ratelimit.Name == "" { - return fault.New("invalid rate limit", - fault.Code(codes.App.Validation.InvalidInput.URN()), - fault.Internal("missing rate limit name"), fault.Public("Rate limit name is required."), - ) - } - - // Validate rate limit value is positive - if ratelimit.Limit <= 0 { - return fault.New("invalid rate limit", - fault.Code(codes.App.Validation.InvalidInput.URN()), - fault.Internal("invalid rate limit value"), fault.Public("Rate limit value must be greater than zero."), - ) - } - - // Validate duration is at least 1000ms (1 second) - if ratelimit.Duration < 1000 { - return fault.New("invalid rate limit", - fault.Code(codes.App.Validation.InvalidInput.URN()), - fault.Internal("invalid rate limit duration"), fault.Public("Rate limit duration must be at least 1000ms (1 second)."), - ) - } - } - } - identityID, err := db.TxWithResult(ctx, h.DB.RW(), func(ctx context.Context, tx db.DBTX) (string, error) { identityID := uid.New(uid.IdentityPrefix) args := db.InsertIdentityParams{ @@ -142,9 +113,6 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { CreatedAt: time.Now().UnixMilli(), Meta: meta, } - h.Logger.Warn("inserting identity", - "args", args, - ) err = db.Query.InsertIdentity(ctx, tx, args) if err != nil { @@ -184,9 +152,10 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } if req.Ratelimits != nil { - for _, ratelimit := range *req.Ratelimits { + rateLimitsToInsert := make([]db.InsertIdentityRatelimitParams, len(*req.Ratelimits)) + for i, ratelimit := range *req.Ratelimits { ratelimitID := uid.New(uid.RatelimitPrefix) - err = db.Query.InsertIdentityRatelimit(ctx, tx, db.InsertIdentityRatelimitParams{ + rateLimitsToInsert[i] = db.InsertIdentityRatelimitParams{ ID: ratelimitID, WorkspaceID: auth.AuthorizedWorkspaceID, IdentityID: sql.NullString{String: identityID, Valid: true}, @@ -194,11 +163,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { Limit: int32(ratelimit.Limit), // nolint:gosec Duration: ratelimit.Duration, CreatedAt: time.Now().UnixMilli(), - }) - if err != nil { - return "", fault.Wrap(err, - fault.Internal("unable to create ratelimit"), fault.Public("We're unable to create a ratelimit for the identity."), - ) + AutoApply: ratelimit.AutoApply, } auditLogs = append(auditLogs, auditlog.AuditLog{ @@ -229,14 +194,23 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) } + + err = db.BulkInsert( + ctx, + tx, + "INSERT INTO ratelimits (id, workspace_id, identity_id, name, `limit`, duration, created_at, auto_apply) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + rateLimitsToInsert, + ) + if err != nil { + return "", fault.Wrap(err, + fault.Internal("unable to create ratelimit"), fault.Public("We're unable to create a ratelimit for the identity."), + ) + } } err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return "", fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), - ) + return "", err } return identityID, nil diff --git a/go/apps/api/routes/v2_identities_delete_identity/handler.go b/go/apps/api/routes/v2_identities_delete_identity/handler.go index 862fc162c0..83b81c6b4b 100644 --- a/go/apps/api/routes/v2_identities_delete_identity/handler.go +++ b/go/apps/api/routes/v2_identities_delete_identity/handler.go @@ -256,10 +256,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_identities_get_identity/200_test.go b/go/apps/api/routes/v2_identities_get_identity/200_test.go index 62a56e36d6..033c955ead 100644 --- a/go/apps/api/routes/v2_identities_get_identity/200_test.go +++ b/go/apps/api/routes/v2_identities_get_identity/200_test.go @@ -113,7 +113,7 @@ func TestSuccess(t *testing.T) { require.Len(t, *res.Body.Data.Ratelimits, 2) // Ratelimits can be in any order, so we need to find the specific ones - var apiCallsLimit, specialFeatureLimit *openapi.Ratelimit + var apiCallsLimit, specialFeatureLimit *openapi.RatelimitResponse for i := range *res.Body.Data.Ratelimits { switch (*res.Body.Data.Ratelimits)[i].Name { case "api_calls": @@ -424,7 +424,7 @@ func TestSuccess(t *testing.T) { require.Len(t, *res.Body.Data.Ratelimits, len(rateLimits)) // Create a map of rate limits by name to verify each one - returnedRateLimits := make(map[string]openapi.Ratelimit) + returnedRateLimits := make(map[string]openapi.RatelimitResponse) for _, rl := range *res.Body.Data.Ratelimits { returnedRateLimits[rl.Name] = rl } diff --git a/go/apps/api/routes/v2_identities_get_identity/400_test.go b/go/apps/api/routes/v2_identities_get_identity/400_test.go index 0c6e007007..faca2afeb7 100644 --- a/go/apps/api/routes/v2_identities_get_identity/400_test.go +++ b/go/apps/api/routes/v2_identities_get_identity/400_test.go @@ -37,8 +37,8 @@ func TestBadRequests(t *testing.T) { require.Equal(t, "https://unkey.com/docs/api-reference/errors-v2/unkey/application/invalid_input", res.Body.Error.Type) require.Equal(t, "POST request body for '/v2/identities.getIdentity' failed to validate schema", res.Body.Error.Detail) require.GreaterOrEqual(t, len(res.Body.Error.Errors), 1) - require.Equal(t, "/oneOf/0/required", res.Body.Error.Errors[0].Location) - require.Equal(t, "missing property 'identityId'", res.Body.Error.Errors[0].Message) + require.Equal(t, "/oneOf", res.Body.Error.Errors[0].Location) + require.Equal(t, "'oneOf' failed, none matched", res.Body.Error.Errors[0].Message) require.Equal(t, 400, res.Body.Error.Status) require.Equal(t, "Bad Request", res.Body.Error.Title) require.NotEmpty(t, res.Body.Meta.RequestId) diff --git a/go/apps/api/routes/v2_identities_get_identity/handler.go b/go/apps/api/routes/v2_identities_get_identity/handler.go index ca2f260cd3..7b44e0a811 100644 --- a/go/apps/api/routes/v2_identities_get_identity/handler.go +++ b/go/apps/api/routes/v2_identities_get_identity/handler.go @@ -84,23 +84,26 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } if err != nil { - if err == sql.ErrNoRows { + if db.IsNotFound(err) { return IdentityResult{}, fault.New("identity not found", fault.Code(codes.Data.Identity.NotFound.URN()), - fault.Internal("identity not found"), fault.Public("This identity does not exist."), + fault.Internal("identity not found"), + fault.Public("This identity does not exist."), ) } + return IdentityResult{}, fault.Wrap(err, - fault.Internal("unable to find identity"), fault.Public("We're unable to retrieve the identity."), + fault.Internal("unable to find identity"), + fault.Public("We're unable to retrieve the identity."), ) } // Get the ratelimits for this identity ratelimits, listErr := db.Query.ListIdentityRatelimitsByID(ctx, tx, sql.NullString{Valid: true, String: identity.ID}) - if listErr != nil { - + if listErr != nil && !db.IsNotFound(listErr) { return IdentityResult{}, fault.Wrap(listErr, - fault.Internal("unable to fetch ratelimits"), fault.Public("We're unable to retrieve the identity's ratelimits."), + fault.Internal("unable to fetch ratelimits"), + fault.Public("We're unable to retrieve the identity's ratelimits."), ) } @@ -146,12 +149,14 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } // Format ratelimits for the response - responseRatelimits := make([]openapi.Ratelimit, 0, len(ratelimits)) + responseRatelimits := make([]openapi.RatelimitResponse, 0, len(ratelimits)) for _, r := range ratelimits { - responseRatelimits = append(responseRatelimits, openapi.Ratelimit{ - Name: r.Name, - Limit: int64(r.Limit), - Duration: r.Duration, + responseRatelimits = append(responseRatelimits, openapi.RatelimitResponse{ + Name: r.Name, + Limit: int64(r.Limit), + Duration: r.Duration, + Id: r.ID, + AutoApply: r.AutoApply, }) } diff --git a/go/apps/api/routes/v2_identities_list_identities/handler.go b/go/apps/api/routes/v2_identities_list_identities/handler.go index 8f11dfb23a..c5a3fa9dc3 100644 --- a/go/apps/api/routes/v2_identities_list_identities/handler.go +++ b/go/apps/api/routes/v2_identities_list_identities/handler.go @@ -113,12 +113,14 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } // Format ratelimits - formattedRatelimits := make([]openapi.Ratelimit, 0, len(ratelimits)) + formattedRatelimits := make([]openapi.RatelimitResponse, 0, len(ratelimits)) for _, r := range ratelimits { - formattedRatelimits = append(formattedRatelimits, openapi.Ratelimit{ - Name: r.Name, - Limit: int64(r.Limit), - Duration: r.Duration, + formattedRatelimits = append(formattedRatelimits, openapi.RatelimitResponse{ + Id: r.ID, + Name: r.Name, + Limit: int64(r.Limit), + Duration: r.Duration, + AutoApply: r.AutoApply, }) } diff --git a/go/apps/api/routes/v2_identities_update_identity/200_test.go b/go/apps/api/routes/v2_identities_update_identity/200_test.go index 0db742bd40..bfdaebdad7 100644 --- a/go/apps/api/routes/v2_identities_update_identity/200_test.go +++ b/go/apps/api/routes/v2_identities_update_identity/200_test.go @@ -175,16 +175,18 @@ func TestSuccess(t *testing.T) { // 2. Add a new 'new_feature' limit // 3. Delete 'special_feature' limit (by not including it) - ratelimits := []openapi.Ratelimit{ + ratelimits := []openapi.RatelimitRequest{ { - Name: "api_calls", - Limit: 200, - Duration: 60000, + Name: "api_calls", + Limit: 200, + Duration: 60000, + AutoApply: true, }, { - Name: "new_feature", - Limit: 5, - Duration: 86400000, // 1 day + Name: "new_feature", + Limit: 5, + Duration: 86400000, // 1 day + AutoApply: false, }, } @@ -204,7 +206,7 @@ func TestSuccess(t *testing.T) { require.Len(t, *res.Body.Data.Ratelimits, 2) // Check ratelimit values - var apiCallsLimit, newFeatureLimit *openapi.Ratelimit + var apiCallsLimit, newFeatureLimit *openapi.RatelimitResponse for i := range *res.Body.Data.Ratelimits { switch (*res.Body.Data.Ratelimits)[i].Name { case "api_calls": @@ -233,7 +235,7 @@ func TestSuccess(t *testing.T) { t.Run("remove all ratelimits", func(t *testing.T) { // Empty array should remove all ratelimits - emptyRatelimits := []openapi.Ratelimit{} + emptyRatelimits := []openapi.RatelimitRequest{} req := handler.Request{ IdentityId: &identityID, @@ -277,11 +279,12 @@ func TestSuccess(t *testing.T) { "credits": 1000, } - ratelimits := []openapi.Ratelimit{ + ratelimits := []openapi.RatelimitRequest{ { - Name: "enterprise_feature", - Limit: 50, - Duration: 3600000, + Name: "enterprise_feature", + Limit: 50, + Duration: 3600000, + AutoApply: true, }, } diff --git a/go/apps/api/routes/v2_identities_update_identity/400_test.go b/go/apps/api/routes/v2_identities_update_identity/400_test.go index e9f109e0ee..539a107385 100644 --- a/go/apps/api/routes/v2_identities_update_identity/400_test.go +++ b/go/apps/api/routes/v2_identities_update_identity/400_test.go @@ -90,16 +90,18 @@ func TestBadRequests(t *testing.T) { t.Run("duplicate ratelimit names", func(t *testing.T) { identityID := "identity_123" - ratelimits := []openapi.Ratelimit{ + ratelimits := []openapi.RatelimitRequest{ { - Name: "api_calls", - Limit: 100, - Duration: 60000, + Name: "api_calls", + Limit: 100, + Duration: 60000, + AutoApply: true, }, { - Name: "api_calls", // Duplicate name - Limit: 200, - Duration: 120000, + Name: "api_calls", // Duplicate name + Limit: 200, + Duration: 120000, + AutoApply: true, }, } diff --git a/go/apps/api/routes/v2_identities_update_identity/handler.go b/go/apps/api/routes/v2_identities_update_identity/handler.go index 04449275e3..f5d76f8d52 100644 --- a/go/apps/api/routes/v2_identities_update_identity/handler.go +++ b/go/apps/api/routes/v2_identities_update_identity/handler.go @@ -112,7 +112,8 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if _, exists := nameSet[ratelimit.Name]; exists { return fault.New("duplicate ratelimit name", fault.Code(codes.App.Validation.InvalidInput.URN()), - fault.Internal("duplicate ratelimit name"), fault.Public(fmt.Sprintf("Ratelimit with name \"%s\" is already defined in the request", ratelimit.Name)), + fault.Internal("duplicate ratelimit name"), + fault.Public(fmt.Sprintf("Ratelimit with name \"%s\" is already defined in the request", ratelimit.Name)), ) } nameSet[ratelimit.Name] = true @@ -238,122 +239,83 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { existingRatelimitMap[rl.Name] = rl } - newRatelimitMap := make(map[string]openapi.Ratelimit) + newRatelimitMap := make(map[string]openapi.RatelimitRequest) if req.Ratelimits != nil { for _, rl := range *req.Ratelimits { newRatelimitMap[rl.Name] = rl } } + rateLimitsToDelete := make([]string, 0) // Delete ratelimits that are not in the new list for _, existingRL := range existingRatelimits { - if _, exists := newRatelimitMap[existingRL.Name]; !exists { - // Delete this ratelimit - err = db.Query.DeleteRatelimit(ctx, tx, existingRL.ID) - if err != nil { - return fault.Wrap(err, - fault.Internal("unable to delete ratelimit"), fault.Public("We're unable to delete a ratelimit."), - ) - } - - // Add audit log for deletion - auditLogs = append(auditLogs, auditlog.AuditLog{ - WorkspaceID: auth.AuthorizedWorkspaceID, - Event: auditlog.RatelimitDeleteEvent, - Display: fmt.Sprintf("Deleted ratelimit %s", existingRL.ID), - ActorID: auth.KeyID, - ActorName: "root key", - ActorType: auditlog.RootKeyActor, - ActorMeta: map[string]any{}, - RemoteIP: s.Location(), - UserAgent: s.UserAgent(), - Resources: []auditlog.AuditLogResource{ - { - ID: identity.ID, - Type: auditlog.IdentityResourceType, - DisplayName: identity.ExternalID, - Name: identity.ExternalID, - Meta: nil, - }, - { - ID: existingRL.ID, - Type: auditlog.RatelimitResourceType, - DisplayName: existingRL.Name, - Name: existingRL.Name, - Meta: nil, - }, + _, exists := newRatelimitMap[existingRL.Name] + if exists { + continue + } + + rateLimitsToDelete = append(rateLimitsToDelete, existingRL.ID) + + // Add audit log for deletion + auditLogs = append(auditLogs, auditlog.AuditLog{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Event: auditlog.RatelimitDeleteEvent, + Display: fmt.Sprintf("Deleted ratelimit %s", existingRL.ID), + ActorID: auth.KeyID, + ActorName: "root key", + ActorType: auditlog.RootKeyActor, + ActorMeta: map[string]any{}, + RemoteIP: s.Location(), + UserAgent: s.UserAgent(), + Resources: []auditlog.AuditLogResource{ + { + ID: identity.ID, + Type: auditlog.IdentityResourceType, + DisplayName: identity.ExternalID, + Name: identity.ExternalID, + Meta: nil, }, - }) + { + ID: existingRL.ID, + Type: auditlog.RatelimitResourceType, + DisplayName: existingRL.Name, + Name: existingRL.Name, + Meta: nil, + }, + }, + }) + } + + if len(rateLimitsToDelete) > 0 { + err = db.Query.DeleteManyRatelimitsByIDs(ctx, tx, rateLimitsToDelete) + if err != nil { + return fault.Wrap(err, + fault.Internal("unable to delete ratelimits"), fault.Public("We're unable to delete ratelimits."), + ) } } + rateLimitsToInsert := make([]db.InsertIdentityRatelimitParams, 0) // Update existing ratelimits or create new ones for name, newRL := range newRatelimitMap { - if existingRL, exists := existingRatelimitMap[name]; exists { - // Update this ratelimit - err = db.Query.UpdateRatelimit(ctx, tx, db.UpdateRatelimitParams{ - ID: existingRL.ID, - Name: newRL.Name, - Limit: int32(newRL.Limit), // nolint:gosec - Duration: newRL.Duration, - }) - if err != nil { - return fault.Wrap(err, - fault.Internal("unable to update ratelimit"), fault.Public("We're unable to update a ratelimit."), - ) - } + existingRL, exists := existingRatelimitMap[name] - // Add audit log for update - auditLogs = append(auditLogs, auditlog.AuditLog{ - WorkspaceID: auth.AuthorizedWorkspaceID, - Event: auditlog.RatelimitUpdateEvent, - Display: fmt.Sprintf("Updated ratelimit %s", existingRL.ID), - ActorID: auth.KeyID, - ActorName: "root key", - ActorType: auditlog.RootKeyActor, - ActorMeta: map[string]any{}, - RemoteIP: s.Location(), - UserAgent: s.UserAgent(), - Resources: []auditlog.AuditLogResource{ - { - ID: identity.ID, - Type: auditlog.IdentityResourceType, - Name: identity.ExternalID, - DisplayName: identity.ExternalID, - Meta: nil, - }, - { - ID: existingRL.ID, - Type: auditlog.RatelimitResourceType, - Name: newRL.Name, - DisplayName: newRL.Name, - Meta: nil, - }, - }, - }) - } else { - // Create new ratelimit - ratelimitID := uid.New(uid.RatelimitPrefix) - err = db.Query.InsertIdentityRatelimit(ctx, tx, db.InsertIdentityRatelimitParams{ - ID: ratelimitID, + if exists { + rateLimitsToInsert = append(rateLimitsToInsert, db.InsertIdentityRatelimitParams{ + ID: existingRL.ID, WorkspaceID: auth.AuthorizedWorkspaceID, IdentityID: sql.NullString{String: identity.ID, Valid: true}, Name: newRL.Name, Limit: int32(newRL.Limit), // nolint:gosec Duration: newRL.Duration, + AutoApply: newRL.AutoApply, CreatedAt: time.Now().UnixMilli(), }) - if err != nil { - return fault.Wrap(err, - fault.Internal("unable to create ratelimit"), fault.Public("We're unable to create a new ratelimit."), - ) - } - // Add audit log for creation auditLogs = append(auditLogs, auditlog.AuditLog{ WorkspaceID: auth.AuthorizedWorkspaceID, - Event: auditlog.RatelimitCreateEvent, - Display: fmt.Sprintf("Created ratelimit %s", ratelimitID), + Event: auditlog.RatelimitUpdateEvent, + Display: fmt.Sprintf("Updated ratelimit %s", existingRL.ID), ActorID: auth.KeyID, ActorName: "root key", ActorType: auditlog.RootKeyActor, @@ -364,30 +326,89 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { { ID: identity.ID, Type: auditlog.IdentityResourceType, - DisplayName: identity.ExternalID, Name: identity.ExternalID, + DisplayName: identity.ExternalID, Meta: nil, }, { - ID: ratelimitID, + ID: existingRL.ID, Type: auditlog.RatelimitResourceType, - DisplayName: newRL.Name, Name: newRL.Name, + DisplayName: newRL.Name, Meta: nil, }, }, }) + + continue + } + + // Create new ratelimit + ratelimitID := uid.New(uid.RatelimitPrefix) + rateLimitsToInsert = append(rateLimitsToInsert, db.InsertIdentityRatelimitParams{ + ID: ratelimitID, + WorkspaceID: auth.AuthorizedWorkspaceID, + IdentityID: sql.NullString{String: identity.ID, Valid: true}, + Name: newRL.Name, + Limit: int32(newRL.Limit), // nolint:gosec + Duration: newRL.Duration, + CreatedAt: time.Now().UnixMilli(), + AutoApply: newRL.AutoApply, + }) + + // Add audit log for creation + auditLogs = append(auditLogs, auditlog.AuditLog{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Event: auditlog.RatelimitCreateEvent, + Display: fmt.Sprintf("Created ratelimit %s", ratelimitID), + ActorID: auth.KeyID, + ActorName: "root key", + ActorType: auditlog.RootKeyActor, + ActorMeta: map[string]any{}, + RemoteIP: s.Location(), + UserAgent: s.UserAgent(), + Resources: []auditlog.AuditLogResource{ + { + ID: identity.ID, + Type: auditlog.IdentityResourceType, + DisplayName: identity.ExternalID, + Name: identity.ExternalID, + Meta: nil, + }, + { + ID: ratelimitID, + Type: auditlog.RatelimitResourceType, + DisplayName: newRL.Name, + Name: newRL.Name, + Meta: nil, + }, + }, + }) + } + + if len(rateLimitsToInsert) > 0 { + err = db.BulkInsert(ctx, tx, + "INSERT INTO ratelimits (id, workspace_id, identity_id, name,`limit`"+`, duration, created_at, auto_apply) VALUES (?, ?, ?, ?, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE`+"`limit` = VALUES(`limit`),"+` + name = VALUES(name), + duration = VALUES(duration), + auto_apply = VALUES(auto_apply), + updated_at = NOW()`, + rateLimitsToInsert, + ) + + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database failed to insert ratelimits"), fault.Public("Failed to insert ratelimits"), + ) } } } - // Insert audit logs err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), - ) + return err } return nil @@ -424,12 +445,14 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } // Format ratelimits for response - responseRatelimits := make([]openapi.Ratelimit, 0, len(updatedRatelimits)) + responseRatelimits := make([]openapi.RatelimitResponse, 0, len(updatedRatelimits)) for _, r := range updatedRatelimits { - responseRatelimits = append(responseRatelimits, openapi.Ratelimit{ - Name: r.Name, - Limit: int64(r.Limit), - Duration: r.Duration, + responseRatelimits = append(responseRatelimits, openapi.RatelimitResponse{ + Id: r.ID, + Name: r.Name, + Limit: int64(r.Limit), + Duration: r.Duration, + AutoApply: r.AutoApply, }) } diff --git a/go/apps/api/routes/v2_keys_add_permissions/handler.go b/go/apps/api/routes/v2_keys_add_permissions/handler.go index 8edfce8ffa..f7967dea16 100644 --- a/go/apps/api/routes/v2_keys_add_permissions/handler.go +++ b/go/apps/api/routes/v2_keys_add_permissions/handler.go @@ -227,10 +227,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for permission additions."), - ) + return err } } diff --git a/go/apps/api/routes/v2_keys_add_roles/handler.go b/go/apps/api/routes/v2_keys_add_roles/handler.go index 537112f289..2debeab857 100644 --- a/go/apps/api/routes/v2_keys_add_roles/handler.go +++ b/go/apps/api/routes/v2_keys_add_roles/handler.go @@ -227,10 +227,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for role additions."), - ) + return err } } diff --git a/go/apps/api/routes/v2_keys_create_key/200_test.go b/go/apps/api/routes/v2_keys_create_key/200_test.go index bc5d70ad53..a818c683b3 100644 --- a/go/apps/api/routes/v2_keys_create_key/200_test.go +++ b/go/apps/api/routes/v2_keys_create_key/200_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_create_key" "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" "github.com/unkeyed/unkey/go/pkg/testutil" "github.com/unkeyed/unkey/go/pkg/uid" ) @@ -27,6 +28,7 @@ func Test_CreateKey_Success(t *testing.T) { Keys: h.Keys, Permissions: h.Permissions, Auditlogs: h.Auditlogs, + Vault: h.Vault, } h.Register(route) @@ -95,6 +97,7 @@ func Test_CreateKey_WithOptionalFields(t *testing.T) { Logger: h.Logger, Permissions: h.Permissions, Auditlogs: h.Auditlogs, + Vault: h.Vault, } h.Register(route) @@ -162,3 +165,88 @@ func Test_CreateKey_WithOptionalFields(t *testing.T) { require.Equal(t, name, key.Name.String) require.True(t, key.Enabled) } + +func TestCreateKeyWithEncryption(t *testing.T) { + t.Parallel() + + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + // Create API manually + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key", "api.*.encrypt_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + // Test key creation with optional fields + name := "Test Key" + + req := handler.Request{ + ApiId: apiID, + Name: &name, + ExternalId: ptr.P("user_123"), + Enabled: ptr.P(true), + Recoverable: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + + require.NotEmpty(t, res.Body.Data.KeyId) + require.NotEmpty(t, res.Body.Data.Key) + + // Verify key fields in database + key, err := db.Query.FindKeyByID(ctx, h.DB.RO(), res.Body.Data.KeyId) + require.NoError(t, err) + + require.True(t, key.Name.Valid) + require.Equal(t, name, key.Name.String) + require.True(t, key.Enabled) + + // Verify key fields in database + keyEncryption, err := db.Query.FindKeyEncryptionByKeyID(ctx, h.DB.RO(), res.Body.Data.KeyId) + require.NoError(t, err) + require.Equal(t, keyEncryption.KeyID, res.Body.Data.KeyId) + require.Equal(t, keyEncryption.WorkspaceID, h.Resources().UserWorkspace.ID) +} diff --git a/go/apps/api/routes/v2_keys_create_key/403_test.go b/go/apps/api/routes/v2_keys_create_key/403_test.go index 40d4a8fd4d..e92f3b3b6e 100644 --- a/go/apps/api/routes/v2_keys_create_key/403_test.go +++ b/go/apps/api/routes/v2_keys_create_key/403_test.go @@ -12,6 +12,7 @@ import ( "github.com/unkeyed/unkey/go/apps/api/openapi" handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_create_key" "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" "github.com/unkeyed/unkey/go/pkg/testutil" "github.com/unkeyed/unkey/go/pkg/uid" ) @@ -42,6 +43,12 @@ func Test_CreateKey_Forbidden(t *testing.T) { }) require.NoError(t, err) + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + apiID := uid.New(uid.APIPrefix) err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ ID: apiID, @@ -163,4 +170,23 @@ func Test_CreateKey_Forbidden(t *testing.T) { require.Equal(t, 403, res.Status) require.NotNil(t, res.Body) }) + + t.Run("create recoverable key without perms", func(t *testing.T) { + // Create root key with permission that partially matches but isn't sufficient + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key") + + req := handler.Request{ + ApiId: apiID, + Recoverable: ptr.P(true), + } + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) } diff --git a/go/apps/api/routes/v2_keys_create_key/412_test.go b/go/apps/api/routes/v2_keys_create_key/412_test.go new file mode 100644 index 0000000000..bcc7827c6a --- /dev/null +++ b/go/apps/api/routes/v2_keys_create_key/412_test.go @@ -0,0 +1,83 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_create_key" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestPreconditionError(t *testing.T) { + ctx := context.Background() + h := testutil.NewHarness(t) + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Permissions: h.Permissions, + Vault: h.Vault, + } + + h.Register(route) + + // Create API manually + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key", "api.*.encrypt_key") + + // Set up request headers + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + // Test case for API ID with special characters + t.Run("Try creating a recoverable key without being opt-in", func(t *testing.T) { + req := handler.Request{ + ApiId: apiID, + Recoverable: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, openapi.PreconditionFailedErrorResponse]( + h, + route, + headers, + req, + ) + + require.Equal(t, 412, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) +} diff --git a/go/apps/api/routes/v2_keys_create_key/handler.go b/go/apps/api/routes/v2_keys_create_key/handler.go index 85e6bb2b5d..ca19200ab5 100644 --- a/go/apps/api/routes/v2_keys_create_key/handler.go +++ b/go/apps/api/routes/v2_keys_create_key/handler.go @@ -8,6 +8,8 @@ import ( "net/http" "time" + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + "github.com/unkeyed/unkey/go/apps/api/openapi" "github.com/unkeyed/unkey/go/internal/services/auditlogs" "github.com/unkeyed/unkey/go/internal/services/keys" @@ -21,6 +23,7 @@ import ( "github.com/unkeyed/unkey/go/pkg/ptr" "github.com/unkeyed/unkey/go/pkg/rbac" "github.com/unkeyed/unkey/go/pkg/uid" + "github.com/unkeyed/unkey/go/pkg/vault" "github.com/unkeyed/unkey/go/pkg/zen" ) @@ -33,6 +36,7 @@ type Handler struct { Keys keys.KeyService Permissions permissions.PermissionService Auditlogs auditlogs.AuditLogService + Vault *vault.Service } // Method returns the HTTP method this route responds to @@ -91,6 +95,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("api not found"), fault.Public("The specified API was not found."), ) } + return fault.Wrap(err, fault.Code(codes.App.Internal.ServiceUnavailable.URN()), fault.Internal("database error"), fault.Public("Failed to retrieve API."), @@ -105,6 +110,21 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ) } + keyAuth, err := db.Query.FindKeyringByID(ctx, h.DB.RO(), api.KeyAuthID.String) + if err != nil { + if db.IsNotFound(err) { + return fault.New("api not set up for keys", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for keys, keyauth not found"), fault.Public("The requested API is not set up to handle keys."), + ) + } + + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to retrieve API information."), + ) + } + // 5. Generate key using key service keyID := uid.New(uid.KeyPrefix) keyResult, err := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ @@ -115,6 +135,49 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { return err } + encrypt := ptr.SafeDeref(req.Recoverable, false) + var encryption *vaultv1.EncryptResponse + if encrypt { + err = h.Permissions.Check( + ctx, + auth.KeyID, + rbac.Or( + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.EncryptKey, + }), + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: api.ID, + Action: rbac.EncryptKey, + }), + ), + ) + if err != nil { + return err + } + + if !keyAuth.StoreEncryptedKeys { + return fault.New("api not set up for key encryption", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for key encryption"), fault.Public("This API does not support key encryption."), + ) + } + + encryption, err = h.Vault.Encrypt(ctx, &vaultv1.EncryptRequest{ + Keyring: s.AuthorizedWorkspaceID(), + Data: keyResult.Key, + }) + + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("vault error"), fault.Public("Failed to encrypt key in vault."), + ) + } + } + now := time.Now().UnixMilli() // 6. Resolve permissions if provided @@ -178,6 +241,8 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { CreatedAtM: now, Enabled: true, RemainingRequests: sql.NullInt32{Int32: 0, Valid: false}, + RefillDay: sql.NullInt16{Int16: 0, Valid: false}, + RefillAmount: sql.NullInt32{Int32: 0, Valid: false}, Name: sql.NullString{String: "", Valid: false}, IdentityID: sql.NullString{String: "", Valid: false}, Meta: sql.NullString{String: "", Valid: false}, @@ -194,7 +259,6 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } // Note: owner_id is set to null in the SQL query, so we skip setting it here - if req.Meta != nil { metaBytes, marshalErr := json.Marshal(*req.Meta) if marshalErr != nil { @@ -203,6 +267,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { fault.Internal("failed to marshal meta"), fault.Public("Invalid metadata format."), ) } + insertKeyParams.Meta = sql.NullString{String: string(metaBytes), Valid: true} } @@ -211,9 +276,33 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { } if req.Credits != nil { - insertKeyParams.RemainingRequests = sql.NullInt32{ - Int32: int32(req.Credits.Remaining), // nolint:gosec - Valid: true, + if req.Credits.Remaining.IsSpecified() { + insertKeyParams.RemainingRequests = sql.NullInt32{ + Int32: int32(req.Credits.Remaining.MustGet()), // nolint:gosec + Valid: true, + } + } + + if req.Credits.Refill != nil { + insertKeyParams.RefillAmount = sql.NullInt32{ + Int32: int32(req.Credits.Refill.Amount), // nolint:gosec + Valid: true, + } + + if req.Credits.Refill.Interval == openapi.KeyCreditsRefillIntervalMonthly { + if req.Credits.Refill.RefillDay == nil { + return fault.New("missing refillDay", + fault.Code(codes.App.Validation.InvalidInput.URN()), + fault.Internal("refillDay required for monthly interval"), + fault.Public("`refillDay` must be provided when the refill interval is `monthly`."), + ) + } + + insertKeyParams.RefillDay = sql.NullInt16{ + Int16: int16(*req.Credits.Refill.RefillDay), // nolint:gosec + Valid: true, + } + } } } @@ -230,42 +319,61 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ) } + if encryption != nil { + err = db.Query.InsertKeyEncryption(ctx, tx, db.InsertKeyEncryptionParams{ + WorkspaceID: auth.AuthorizedWorkspaceID, + KeyID: keyID, + CreatedAt: now, + Encrypted: encryption.GetEncrypted(), + EncryptionKeyID: encryption.GetKeyId(), + }) + + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to create key encryption."), + ) + } + } + // 10. Handle rate limits if provided - if req.Ratelimits != nil { - for _, ratelimit := range *req.Ratelimits { + if req.Ratelimits != nil && len(*req.Ratelimits) > 0 { + ratelimitsToInsert := make([]db.InsertKeyRatelimitParams, len(*req.Ratelimits)) + for i, ratelimit := range *req.Ratelimits { ratelimitID := uid.New(uid.RatelimitPrefix) - err = db.Query.InsertKeyRatelimit(ctx, tx, db.InsertKeyRatelimitParams{ + ratelimitsToInsert[i] = db.InsertKeyRatelimitParams{ ID: ratelimitID, WorkspaceID: auth.AuthorizedWorkspaceID, KeyID: sql.NullString{String: keyID, Valid: true}, Name: ratelimit.Name, Limit: int32(ratelimit.Limit), // nolint:gosec - Duration: int64(ratelimit.Duration), + Duration: ratelimit.Duration, CreatedAt: now, - }) - if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database error"), fault.Public("Failed to create rate limit."), - ) + AutoApply: ratelimit.AutoApply, } } + + err = db.BulkInsert(ctx, tx, + "INSERT INTO ratelimits (id, workspace_id, key_id, name, `limit`, duration, created_at, auto_apply) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + ratelimitsToInsert, + ) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to create rate limit."), + ) + } } // 11. Handle permissions if provided var auditLogs []auditlog.AuditLog - for _, permission := range resolvedPermissions { - err = db.Query.InsertKeyPermission(ctx, tx, db.InsertKeyPermissionParams{ + permissionsToInsert := make([]db.InsertKeyPermissionParams, len(resolvedPermissions)) + for idx, permission := range resolvedPermissions { + permissionsToInsert[idx] = db.InsertKeyPermissionParams{ KeyID: keyID, PermissionID: permission.ID, WorkspaceID: auth.AuthorizedWorkspaceID, CreatedAt: now, - }) - if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database error"), fault.Public("Failed to assign permission."), - ) } auditLogs = append(auditLogs, auditlog.AuditLog{ @@ -297,19 +405,29 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }) } + if len(permissionsToInsert) > 0 { + err = db.BulkInsert( + ctx, + tx, + "INSERT INTO key_permissions (key_id, permission_id, workspace_id, created_at_m) VALUES (?, ?, ?, ?)", + permissionsToInsert, + ) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to assign permissions."), + ) + } + } + // 12. Handle roles if provided - for _, role := range resolvedRoles { - err = db.Query.InsertKeyRole(ctx, tx, db.InsertKeyRoleParams{ + rolesToInsert := make([]db.InsertKeyRoleParams, len(resolvedRoles)) + for idx, role := range resolvedRoles { + rolesToInsert[idx] = db.InsertKeyRoleParams{ KeyID: keyID, RoleID: role.ID, WorkspaceID: auth.AuthorizedWorkspaceID, CreatedAtM: now, - }) - if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database error"), fault.Public("Failed to assign role."), - ) } auditLogs = append(auditLogs, auditlog.AuditLog{ @@ -341,6 +459,21 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }) } + if len(rolesToInsert) > 0 { + err = db.BulkInsert( + ctx, + tx, + "INSERT INTO keys_roles (key_id, role_id, workspace_id, created_at_m) VALUES (?, ?, ?, ?)", + rolesToInsert, + ) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to assign roles."), + ) + } + } + // 13. Create main audit log for key creation auditLogs = append(auditLogs, auditlog.AuditLog{ WorkspaceID: auth.AuthorizedWorkspaceID, @@ -373,10 +506,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { // 14. Insert audit logs err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_keys_get_key/200_test.go b/go/apps/api/routes/v2_keys_get_key/200_test.go new file mode 100644 index 0000000000..b750de0f86 --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/200_test.go @@ -0,0 +1,595 @@ +package handler_test + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/http" + "testing" + "time" + + "github.com/oapi-codegen/nullable" + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/internal/services/keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestGetKeyByKeyID(t *testing.T) { + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + // Create a workspace and user + workspace := h.Resources().UserWorkspace + + // Create a keyAuth (keyring) for the API + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: workspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false}, + DefaultBytes: sql.NullInt32{Valid: false}, + }) + require.NoError(t, err) + + err = db.Query.UpdateKeyringKeyEncryption(ctx, h.DB.RW(), db.UpdateKeyringKeyEncryptionParams{ + ID: keyAuthID, + StoreEncryptedKeys: true, + }) + require.NoError(t, err) + + // Create a test API + apiID := uid.New("api") + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "Test API", + WorkspaceID: workspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create test identities + identityID := uid.New("identity") + identityExternalID := "test_user" + err = db.Query.InsertIdentity(ctx, h.DB.RW(), db.InsertIdentityParams{ + ID: identityID, + ExternalID: identityExternalID, + WorkspaceID: workspace.ID, + Environment: "", + CreatedAt: time.Now().UnixMilli(), + Meta: []byte(`{"role": "admin"}`), + }) + require.NoError(t, err) + + ratelimitID := uid.New(uid.RatelimitPrefix) + err = db.Query.InsertIdentityRatelimit(ctx, h.DB.RW(), db.InsertIdentityRatelimitParams{ + ID: ratelimitID, + WorkspaceID: h.Resources().UserWorkspace.ID, + IdentityID: sql.NullString{String: identityID, Valid: true}, + Name: "api_calls", + Limit: 100, + Duration: 60000, // 1 minute + CreatedAt: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + insertParams := db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + ForWorkspaceID: sql.NullString{Valid: false}, + Name: sql.NullString{Valid: true, String: "test-key"}, + Expires: sql.NullTime{Valid: false}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + IdentityID: sql.NullString{Valid: true, String: identityID}, + } + + err = db.Query.InsertKey(ctx, h.DB.RW(), insertParams) + require.NoError(t, err) + + encryption, err := h.Vault.Encrypt(ctx, &vaultv1.EncryptRequest{ + Keyring: workspace.ID, + Data: key.Key, + }) + require.NoError(t, err) + + err = db.Query.InsertKeyEncryption(ctx, h.DB.RW(), db.InsertKeyEncryptionParams{ + WorkspaceID: workspace.ID, + KeyID: keyID, + CreatedAt: time.Now().UnixMilli(), + Encrypted: encryption.GetEncrypted(), + EncryptionKeyID: encryption.GetKeyId(), + }) + require.NoError(t, err) + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_key", "api.*.decrypt_key") + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + // This also tests that we have the correct data for the key. + t.Run("get key by keyId without decrypting", func(t *testing.T) { + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Equal(t, res.Body.Data.KeyId, keyID) + }) + + t.Run("get key by keyId with decrypting", func(t *testing.T) { + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Equal(t, ptr.SafeDeref(res.Body.Data.Plaintext), key.Key) + }) + + t.Run("get key by plaintext key", func(t *testing.T) { + req := handler.Request{ + Key: ptr.P(key.Key), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Equal(t, res.Body.Data.KeyId, keyID) + }) + + t.Run("get key by plaintext key with decrypting", func(t *testing.T) { + req := handler.Request{ + Key: ptr.P(key.Key), + Decrypt: ptr.P(true), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Equal(t, ptr.SafeDeref(res.Body.Data.Plaintext), key.Key) + }) +} + +func TestGetKey_AdditionalScenarios(t *testing.T) { + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + workspace := h.Resources().UserWorkspace + + // Create keyAuth (keyring) for the API + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: workspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false}, + DefaultBytes: sql.NullInt32{Valid: false}, + }) + require.NoError(t, err) + + // Create test API + apiID := uid.New("api") + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "Test API", + WorkspaceID: workspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create root key with appropriate permissions + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_key") + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("key with complex meta data", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + complexMeta := map[string]interface{}{ + "user_id": 12345, + "plan": "premium", + "features": []string{"analytics", "webhooks"}, + "created_by": "admin@example.com", + "nested": map[string]string{ + "department": "engineering", + "team": "backend", + }, + } + metaBytes, _ := json.Marshal(complexMeta) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "complex-meta-key"}, + Meta: sql.NullString{Valid: true, String: string(metaBytes)}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Meta) + + // Verify meta data was properly unmarshaled + metaMap := *res.Body.Data.Meta + require.Equal(t, float64(12345), metaMap["user_id"]) // JSON numbers become float64 + require.Equal(t, "premium", metaMap["plan"]) + }) + + t.Run("key with expiration date", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + futureDate := time.Now().Add(24 * time.Hour).Truncate(time.Hour) + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "expiring-key"}, + Expires: sql.NullTime{Valid: true, Time: futureDate}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Expires) + require.Equal(t, futureDate.UnixMilli(), *res.Body.Data.Expires) + }) + + t.Run("key with credits and daily refill", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "credits-key"}, + RemainingRequests: sql.NullInt32{Valid: true, Int32: 50}, + RefillAmount: sql.NullInt32{Valid: true, Int32: 100}, + RefillDay: sql.NullInt16{Valid: false, Int16: 0}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Credits) + require.Equal(t, nullable.NewNullableWithValue(int64(50)), res.Body.Data.Credits.Remaining) + require.NotNil(t, res.Body.Data.Credits.Refill) + require.Equal(t, int64(100), res.Body.Data.Credits.Refill.Amount) + require.Equal(t, "daily", string(res.Body.Data.Credits.Refill.Interval)) + }) + + t.Run("key with monthly refill", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "monthly-refill-key"}, + RemainingRequests: sql.NullInt32{Valid: true, Int32: 1000}, + RefillAmount: sql.NullInt32{Valid: true, Int32: 2000}, + RefillDay: sql.NullInt16{Valid: true, Int16: 1}, // 1st of month + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Credits) + require.NotNil(t, res.Body.Data.Credits.Refill) + require.Equal(t, "monthly", string(res.Body.Data.Credits.Refill.Interval)) + require.Equal(t, 1, *res.Body.Data.Credits.Refill.RefillDay) + }) + + t.Run("key with roles and permissions", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "rbac-key"}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + // Create permissions + perm1ID := uid.New(uid.PermissionPrefix) + err = db.Query.InsertPermission(ctx, h.DB.RW(), db.InsertPermissionParams{ + PermissionID: perm1ID, + WorkspaceID: workspace.ID, + Name: "read_data", + Slug: "read_data", + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + perm2ID := uid.New(uid.PermissionPrefix) + err = db.Query.InsertPermission(ctx, h.DB.RW(), db.InsertPermissionParams{ + PermissionID: perm2ID, + WorkspaceID: workspace.ID, + Name: "write_data", + Slug: "write_data", + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create role + roleID := uid.New(uid.RolePrefix) + err = db.Query.InsertRole(ctx, h.DB.RW(), db.InsertRoleParams{ + RoleID: roleID, + WorkspaceID: workspace.ID, + Name: "data_admin", + }) + require.NoError(t, err) + + // Assign permissions to key + err = db.Query.InsertKeyPermission(ctx, h.DB.RW(), db.InsertKeyPermissionParams{ + KeyID: keyID, + PermissionID: perm1ID, + WorkspaceID: workspace.ID, + }) + require.NoError(t, err) + + err = db.Query.InsertKeyPermission(ctx, h.DB.RW(), db.InsertKeyPermissionParams{ + KeyID: keyID, + PermissionID: perm2ID, + WorkspaceID: workspace.ID, + }) + require.NoError(t, err) + + // Assign role to key + err = db.Query.InsertKeyRole(ctx, h.DB.RW(), db.InsertKeyRoleParams{ + KeyID: keyID, + RoleID: roleID, + WorkspaceID: workspace.ID, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Permissions) + require.NotNil(t, res.Body.Data.Roles) + + permissions := *res.Body.Data.Permissions + require.Len(t, permissions, 2) + require.Contains(t, permissions, "read_data") + require.Contains(t, permissions, "write_data") + + roles := *res.Body.Data.Roles + require.Len(t, roles, 1) + require.Contains(t, roles, "data_admin") + }) + + t.Run("key with ratelimits", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "ratelimited-key"}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + }) + require.NoError(t, err) + + // Create ratelimits for the key + rl1ID := uid.New(uid.RatelimitPrefix) + err = db.Query.InsertKeyRatelimit(ctx, h.DB.RW(), db.InsertKeyRatelimitParams{ + ID: rl1ID, + WorkspaceID: workspace.ID, + KeyID: sql.NullString{Valid: true, String: keyID}, + Name: "api_calls", + Limit: 100, + Duration: 60000, // 1 minute + CreatedAt: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + rl2ID := uid.New(uid.RatelimitPrefix) + err = db.Query.InsertKeyRatelimit(ctx, h.DB.RW(), db.InsertKeyRatelimitParams{ + ID: rl2ID, + WorkspaceID: workspace.ID, + KeyID: sql.NullString{Valid: true, String: keyID}, + Name: "data_transfer", + Limit: 1000, + Duration: 3600000, // 1 hour + AutoApply: true, + CreatedAt: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Data.Ratelimits) + + ratelimits := *res.Body.Data.Ratelimits + require.Len(t, ratelimits, 2) + + // Find each ratelimit and verify + var apiCallsRL, dataTransferRL *openapi.RatelimitResponse + for _, rl := range ratelimits { + switch rl.Name { + case "api_calls": + apiCallsRL = &rl + case "data_transfer": + dataTransferRL = &rl + } + } + + require.NotNil(t, apiCallsRL) + require.Equal(t, int64(100), apiCallsRL.Limit) + require.Equal(t, int64(60000), apiCallsRL.Duration) + require.False(t, apiCallsRL.AutoApply) + + require.NotNil(t, dataTransferRL) + require.Equal(t, int64(1000), dataTransferRL.Limit) + require.Equal(t, int64(3600000), dataTransferRL.Duration) + require.True(t, dataTransferRL.AutoApply) + }) + + t.Run("disabled key", func(t *testing.T) { + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + err := db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: workspace.ID, + Name: sql.NullString{Valid: true, String: "disabled-key"}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: false, // Key is disabled + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.False(t, res.Body.Data.Enabled) + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/400_test.go b/go/apps/api/routes/v2_keys_get_key/400_test.go new file mode 100644 index 0000000000..fd71d13872 --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/400_test.go @@ -0,0 +1,87 @@ +package handler_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" +) + +func Test_GetKey_BadRequest(t *testing.T) { + h := testutil.NewHarness(t) + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + // Create root key with read permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("missing both keyId and key", func(t *testing.T) { + req := handler.Request{ + KeyId: nil, + Key: nil, + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + require.Contains(t, res.Body.Error.Detail, "POST request body for '/v2/keys.getKey' failed to validate schema") + }) + + t.Run("both keyId and key provided", func(t *testing.T) { + req := handler.Request{ + KeyId: ptr.P("key_123"), + Key: ptr.P("test_key"), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + require.Contains(t, res.Body.Error.Detail, "POST request body for '/v2/keys.getKey' failed to validate schema") + }) + + t.Run("empty keyId string", func(t *testing.T) { + req := handler.Request{ + KeyId: ptr.P(""), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) + + t.Run("empty key string", func(t *testing.T) { + req := handler.Request{ + Key: ptr.P(""), + } + + res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/401_test.go b/go/apps/api/routes/v2_keys_get_key/401_test.go new file mode 100644 index 0000000000..fcd649ecd2 --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/401_test.go @@ -0,0 +1,92 @@ +package handler_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test_GetKey_Unauthorized(t *testing.T) { + h := testutil.NewHarness(t) + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + req := handler.Request{ + KeyId: ptr.P(uid.New(uid.KeyPrefix)), + Decrypt: ptr.P(false), + } + + t.Run("missing authorization header", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) + + t.Run("empty authorization header", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {""}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) + + t.Run("malformed authorization header - no Bearer prefix", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {"invalid_token_without_bearer"}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) + + t.Run("malformed authorization header - Bearer only", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {"Bearer"}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) + + t.Run("nonexistent root key", func(t *testing.T) { + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {"Bearer " + uid.New(uid.KeyPrefix)}, + } + + res := testutil.CallRoute[handler.Request, openapi.UnauthorizedErrorResponse](h, route, headers, req) + require.Equal(t, 401, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/403_test.go b/go/apps/api/routes/v2_keys_get_key/403_test.go new file mode 100644 index 0000000000..8c9f21545e --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/403_test.go @@ -0,0 +1,206 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/hash" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test_GetKey_Forbidden(t *testing.T) { + + h := testutil.NewHarness(t) + ctx := context.Background() + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + } + + h.Register(route) + + // Create API for testing + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create another API for cross-API testing + otherKeyAuthID := uid.New(uid.KeyAuthPrefix) + err = db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: otherKeyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + otherApiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: otherApiID, + Name: "other-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: otherKeyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create another Workspace for cross-API testing + otherWorkspace := h.CreateWorkspace() + + otherWsKeyAuthID := uid.New(uid.KeyAuthPrefix) + err = db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: otherWsKeyAuthID, + WorkspaceID: otherWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + otherWsApiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: otherWsApiID, + Name: "test-api", + WorkspaceID: otherWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: otherWsKeyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create a test key + keyID := uid.New(uid.KeyPrefix) + keyString := "test_" + uid.New("") + err = db.Query.InsertKey(ctx, h.DB.RW(), db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: hash.Sha256(keyString), + Start: keyString[:4], + WorkspaceID: h.Resources().UserWorkspace.ID, + ForWorkspaceID: sql.NullString{Valid: false}, + Name: sql.NullString{Valid: true, String: "Test Key"}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + IdentityID: sql.NullString{Valid: false}, + Meta: sql.NullString{Valid: false}, + Expires: sql.NullTime{Valid: false}, + RemainingRequests: sql.NullInt32{Valid: false}, + RatelimitAsync: sql.NullBool{Valid: false}, + RatelimitLimit: sql.NullInt32{Valid: false}, + RatelimitDuration: sql.NullInt64{Valid: false}, + Environment: sql.NullString{Valid: false}, + }) + require.NoError(t, err) + + req := handler.Request{ + KeyId: ptr.P(keyID), + Decrypt: ptr.P(true), + } + + t.Run("no permissions", func(t *testing.T) { + // Create root key with no permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID) + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("wrong permission - has create but not read", func(t *testing.T) { + // Create root key with read permission instead of create + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.create_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("wrong permission - trying to decrypt key but no decrypt permissions", func(t *testing.T) { + // Create root key with read permission instead of create + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("cross workspace access", func(t *testing.T) { + // Create a different workspace + differentWorkspace := h.CreateWorkspace() + + // Create a root key for the different workspace with full permissions + rootKey := h.CreateRootKey(differentWorkspace.ID, "api.*.read_key", "api.*.read_api") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + }) + + t.Run("cross api access", func(t *testing.T) { + // Create root key with read permission for a single api + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, fmt.Sprintf("api.%s.read_key", otherApiID)) + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + + require.Equal(t, 403, res.Status) + require.NotNil(t, res.Body) + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/404_test.go b/go/apps/api/routes/v2_keys_get_key/404_test.go new file mode 100644 index 0000000000..04e420658b --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/404_test.go @@ -0,0 +1,61 @@ +package handler_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test_GetKey_NotFound(t *testing.T) { + h := testutil.NewHarness(t) + + route := &handler.Handler{ + DB: h.DB, + Keys: h.Keys, + Logger: h.Logger, + Permissions: h.Permissions, + Auditlogs: h.Auditlogs, + Vault: h.Vault, + } + + h.Register(route) + + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key") + + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + t.Run("nonexistent keyId", func(t *testing.T) { + nonexistentKeyID := uid.New(uid.KeyPrefix) + req := handler.Request{ + KeyId: ptr.P(nonexistentKeyID), + Decrypt: ptr.P(false), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "We could not find the requested key") + }) + + t.Run("nonexistent raw key", func(t *testing.T) { + nonexistentKey := uid.New("api") + req := handler.Request{ + Key: ptr.P(nonexistentKey), + } + + res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) + require.Equal(t, 404, res.Status) + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Detail, "We could not find the requested key") + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/412_test.go b/go/apps/api/routes/v2_keys_get_key/412_test.go new file mode 100644 index 0000000000..da5c63572c --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/412_test.go @@ -0,0 +1,106 @@ +package handler_test + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + handler "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_get_key" + "github.com/unkeyed/unkey/go/internal/services/keys" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func TestPreconditionError(t *testing.T) { + ctx := context.Background() + h := testutil.NewHarness(t) + + route := &handler.Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + Permissions: h.Permissions, + Vault: h.Vault, + } + + h.Register(route) + + // Create API manually + keyAuthID := uid.New(uid.KeyAuthPrefix) + err := db.Query.InsertKeyring(ctx, h.DB.RW(), db.InsertKeyringParams{ + ID: keyAuthID, + WorkspaceID: h.Resources().UserWorkspace.ID, + CreatedAtM: time.Now().UnixMilli(), + DefaultPrefix: sql.NullString{Valid: false, String: ""}, + DefaultBytes: sql.NullInt32{Valid: false, Int32: 0}, + }) + require.NoError(t, err) + + apiID := uid.New(uid.APIPrefix) + err = db.Query.InsertApi(ctx, h.DB.RW(), db.InsertApiParams{ + ID: apiID, + Name: "test-api", + WorkspaceID: h.Resources().UserWorkspace.ID, + AuthType: db.NullApisAuthType{Valid: true, ApisAuthType: db.ApisAuthTypeKey}, + KeyAuthID: sql.NullString{Valid: true, String: keyAuthID}, + CreatedAtM: time.Now().UnixMilli(), + }) + require.NoError(t, err) + + // Create a root key with appropriate permissions + rootKey := h.CreateRootKey(h.Resources().UserWorkspace.ID, "api.*.read_key", "api.*.decrypt_key") + + // Set up request headers + headers := http.Header{ + "Content-Type": {"application/json"}, + "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, + } + + keyID := uid.New(uid.KeyPrefix) + key, _ := h.Keys.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: "test", + ByteLength: 16, + }) + + insertParams := db.InsertKeyParams{ + ID: keyID, + KeyringID: keyAuthID, + Hash: key.Hash, + Start: key.Start, + WorkspaceID: h.Resources().UserWorkspace.ID, + ForWorkspaceID: sql.NullString{Valid: false}, + Name: sql.NullString{Valid: true, String: "test-key"}, + Expires: sql.NullTime{Valid: false}, + CreatedAtM: time.Now().UnixMilli(), + Enabled: true, + } + + err = db.Query.InsertKey(ctx, h.DB.RW(), insertParams) + require.NoError(t, err) + + // Test case for API ID with special characters + t.Run("Try getting a recoverable key without being opt-in", func(t *testing.T) { + req := handler.Request{ + Decrypt: ptr.P(true), + KeyId: ptr.P(keyID), + } + + res := testutil.CallRoute[handler.Request, openapi.PreconditionFailedErrorResponse]( + h, + route, + headers, + req, + ) + + require.Equal(t, 412, res.Status) + require.NotNil(t, res.Body) + require.NotNil(t, res.Body.Error) + }) +} diff --git a/go/apps/api/routes/v2_keys_get_key/handler.go b/go/apps/api/routes/v2_keys_get_key/handler.go new file mode 100644 index 0000000000..7f456c84a4 --- /dev/null +++ b/go/apps/api/routes/v2_keys_get_key/handler.go @@ -0,0 +1,369 @@ +package handler + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + + "github.com/oapi-codegen/nullable" + "github.com/unkeyed/unkey/go/apps/api/openapi" + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + "github.com/unkeyed/unkey/go/internal/services/auditlogs" + "github.com/unkeyed/unkey/go/internal/services/keys" + "github.com/unkeyed/unkey/go/internal/services/permissions" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/hash" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/ptr" + "github.com/unkeyed/unkey/go/pkg/rbac" + "github.com/unkeyed/unkey/go/pkg/vault" + "github.com/unkeyed/unkey/go/pkg/zen" +) + +type Request = openapi.V2KeysGetKeyRequestBody +type Response = openapi.V2KeysGetKeyResponseBody + +// Handler implements zen.Route interface for the v2 keys.getKey endpoint +type Handler struct { + // Services as public fields + Logger logging.Logger + DB db.Database + Keys keys.KeyService + Permissions permissions.PermissionService + Auditlogs auditlogs.AuditLogService + Vault *vault.Service +} + +// Method returns the HTTP method this route responds to +func (h *Handler) Method() string { + return "POST" +} + +// Path returns the URL path pattern this route matches +func (h *Handler) Path() string { + return "/v2/keys.getKey" +} + +func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { + h.Logger.Debug("handling request", "requestId", s.RequestID(), "path", "/v2/keys.getKey") + + // Authentication + auth, err := h.Keys.VerifyRootKey(ctx, s) + if err != nil { + return err + } + + // Request validation + req, err := zen.BindBody[Request](s) + if err != nil { + return err + } + + // nolint:exhaustruct + args := db.FindKeyByIdOrHashParams{} + if req.KeyId != nil { + args.ID = sql.NullString{String: *req.KeyId, Valid: true} + } else if req.Key != nil { + args.Hash = sql.NullString{String: hash.Sha256(*req.Key), Valid: true} + } else { + return fault.New("invalid request", + fault.Code(codes.App.Validation.InvalidInput.URN()), + fault.Internal("missing keyId or key identifier"), + fault.Public("Either keyId or key must be provided."), + ) + } + + key, err := db.Query.FindKeyByIdOrHash(ctx, h.DB.RO(), args) + if err != nil { + if db.IsNotFound(err) { + return fault.Wrap( + err, + fault.Code(codes.Data.Key.NotFound.URN()), + fault.Internal("key does not exist"), + fault.Public("We could not find the requested key."), + ) + } + + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to retrieve Key information."), + ) + } + + // Validate key belongs to authorized workspace + if key.WorkspaceID != auth.AuthorizedWorkspaceID { + return fault.New("key not found", + fault.Code(codes.Data.Key.NotFound.URN()), + fault.Internal("key belongs to different workspace"), + fault.Public("The specified key was not found."), + ) + } + + // Check if API is deleted + if key.Api.DeletedAtM.Valid { + return fault.New("key not found", + fault.Code(codes.Data.Key.NotFound.URN()), + fault.Internal("key belongs to deleted api"), + fault.Public("The specified key was not found."), + ) + } + + // Permission check + err = h.Permissions.Check( + ctx, + auth.KeyID, + rbac.Or( + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.ReadKey, + }), + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: key.Api.ID, + Action: rbac.ReadKey, + }), + ), + ) + if err != nil { + return err + } + + keyAuth, err := db.Query.FindKeyringByID(ctx, h.DB.RO(), key.KeyAuthID) + if err != nil { + if db.IsNotFound(err) { + return fault.New("api not set up for keys", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for keys, keyauth not found"), fault.Public("The requested API is not set up to handle keys."), + ) + } + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), fault.Public("Failed to retrieve API information."), + ) + } + + decrypt := ptr.SafeDeref(req.Decrypt, false) + var plaintext *string + if decrypt { + err = h.Permissions.Check( + ctx, + auth.KeyID, + rbac.Or( + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.DecryptKey, + }), + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: key.Api.ID, + Action: rbac.DecryptKey, + }), + ), + ) + if err != nil { + return err + } + + if !keyAuth.StoreEncryptedKeys { + return fault.New("api not set up for key encryption", + fault.Code(codes.App.Precondition.PreconditionFailed.URN()), + fault.Internal("api not set up for key encryption"), fault.Public("The API for this key does not support key encryption."), + ) + } + + // If the key is encrypted and the encryption key ID is valid, decrypt the key. + // Otherwise the key was never encrypted to begin with. + if key.EncryptedKey.Valid && key.EncryptionKeyID.Valid && req.Key == nil { + decrypted, decryptErr := h.Vault.Decrypt(ctx, &vaultv1.DecryptRequest{ + Keyring: key.WorkspaceID, + Encrypted: key.EncryptedKey.String, + }) + + if decryptErr != nil { + h.Logger.Error("failed to decrypt key", + "keyId", key.ID, + "error", decryptErr, + ) + } else { + plaintext = ptr.P(decrypted.GetPlaintext()) + } + } + + if req.Key != nil { + // Only respond with the plaintext key if EXPLICITLY requested. + plaintext = req.Key + } + } + + k := openapi.KeyResponseData{ + CreatedAt: key.CreatedAtM, + Enabled: key.Enabled, + KeyId: key.ID, + Start: key.Start, + Plaintext: plaintext, + Name: nil, + Meta: nil, + Identity: nil, + Credits: nil, + Expires: nil, + Permissions: nil, + Ratelimits: nil, + Roles: nil, + UpdatedAt: nil, + } + + if key.Name.Valid { + k.Name = ptr.P(key.Name.String) + } + + if key.UpdatedAtM.Valid { + k.UpdatedAt = ptr.P(key.UpdatedAtM.Int64) + } + + if key.Expires.Valid { + k.Expires = ptr.P(key.Expires.Time.UnixMilli()) + } + + if key.RemainingRequests.Valid { + k.Credits = &openapi.KeyCreditsData{ + Remaining: nullable.NewNullableWithValue(int64(key.RemainingRequests.Int32)), + Refill: nil, + } + + if key.RefillAmount.Valid { + var refillDay *int + interval := openapi.KeyCreditsRefillIntervalDaily + if key.RefillDay.Valid { + interval = openapi.KeyCreditsRefillIntervalMonthly + refillDay = ptr.P(int(key.RefillDay.Int16)) + } + + k.Credits.Refill = &openapi.KeyCreditsRefill{ + Amount: int64(key.RefillAmount.Int32), + Interval: interval, + RefillDay: refillDay, + } + } + } + + if key.IdentityID.Valid { + identity, idErr := db.Query.FindIdentityByID(ctx, h.DB.RO(), db.FindIdentityByIDParams{ID: key.IdentityID.String, Deleted: false}) + if idErr != nil { + if db.IsNotFound(idErr) { + return fault.New("identity not found for key", + fault.Code(codes.Data.Identity.NotFound.URN()), + fault.Internal("identity not found"), + fault.Public("The requested identity does not exist or has been deleted."), + ) + } + + return fault.Wrap(idErr, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database error"), + fault.Public("Failed to retrieve Identity information."), + ) + } + + k.Identity = &openapi.Identity{ + ExternalId: identity.ExternalID, + Id: identity.ID, + Meta: nil, + Ratelimits: nil, + } + + if len(identity.Meta) > 0 { + err = json.Unmarshal(identity.Meta, &k.Identity.Meta) + if err != nil { + return fault.Wrap(err, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to unmarshal identity meta"), + fault.Public("We encountered an error while trying to unmarshal the identity meta data."), + ) + } + } + + ratelimits, rlErr := db.Query.ListIdentityRatelimitsByID(ctx, h.DB.RO(), sql.NullString{Valid: true, String: identity.ID}) + if rlErr != nil && !db.IsNotFound(rlErr) { + return fault.Wrap(rlErr, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to retrieve identity ratelimits"), + fault.Public("We encountered an error while trying to retrieve the identity ratelimits."), + ) + } + + for _, ratelimit := range ratelimits { + k.Identity.Ratelimits = append(k.Identity.Ratelimits, openapi.RatelimitResponse{ + Id: ratelimit.ID, + Duration: ratelimit.Duration, + Limit: int64(ratelimit.Limit), + Name: ratelimit.Name, + AutoApply: ratelimit.AutoApply, + }) + } + } + + ratelimits, err := db.Query.ListRatelimitsByKeyID(ctx, h.DB.RO(), sql.NullString{String: key.ID, Valid: true}) + if err != nil && !db.IsNotFound(err) { + return fault.Wrap(err, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to retrieve key ratelimits"), + fault.Public("We encountered an error while trying to retrieve the key ratelimits."), + ) + } + + ratelimitsResponse := make([]openapi.RatelimitResponse, len(ratelimits)) + for idx, ratelimit := range ratelimits { + ratelimitsResponse[idx] = openapi.RatelimitResponse{ + Id: ratelimit.ID, + Duration: ratelimit.Duration, + Limit: int64(ratelimit.Limit), + Name: ratelimit.Name, + AutoApply: ratelimit.AutoApply, + } + } + + k.Ratelimits = ptr.P(ratelimitsResponse) + + if key.Meta.Valid { + err = json.Unmarshal([]byte(key.Meta.String), &k.Meta) + if err != nil { + return fault.Wrap(err, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to unmarshal key meta"), + fault.Public("We encountered an error while trying to unmarshal the key meta data."), + ) + } + } + + permissionSlugs, err := db.Query.ListPermissionsByKeyID(ctx, h.DB.RO(), db.ListPermissionsByKeyIDParams{ + KeyID: k.KeyId, + }) + if err != nil { + return fault.Wrap(err, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to find permissions for key"), fault.Public("Could not load permissions for key.")) + } + k.Permissions = ptr.P(permissionSlugs) + + // Get roles for the key + roles, err := db.Query.ListRolesByKeyID(ctx, h.DB.RO(), k.KeyId) + if err != nil { + return fault.Wrap(err, fault.Code(codes.App.Internal.UnexpectedError.URN()), + fault.Internal("unable to find roles for key"), fault.Public("Could not load roles for key.")) + } + + roleNames := make([]string, len(roles)) + for i, role := range roles { + roleNames[i] = role.Name + } + + k.Roles = ptr.P(roleNames) + + return s.JSON(http.StatusOK, Response{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Data: k, + }) +} diff --git a/go/apps/api/routes/v2_keys_remove_permissions/handler.go b/go/apps/api/routes/v2_keys_remove_permissions/handler.go index e6be1383d9..28e6f98a60 100644 --- a/go/apps/api/routes/v2_keys_remove_permissions/handler.go +++ b/go/apps/api/routes/v2_keys_remove_permissions/handler.go @@ -227,10 +227,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for permission removals."), - ) + return err } } diff --git a/go/apps/api/routes/v2_keys_remove_roles/handler.go b/go/apps/api/routes/v2_keys_remove_roles/handler.go index 15a4148e32..0c7c84369f 100644 --- a/go/apps/api/routes/v2_keys_remove_roles/handler.go +++ b/go/apps/api/routes/v2_keys_remove_roles/handler.go @@ -227,10 +227,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for role removals."), - ) + return err } } diff --git a/go/apps/api/routes/v2_keys_set_permissions/handler.go b/go/apps/api/routes/v2_keys_set_permissions/handler.go index eaef0effb3..d4ebe2ac43 100644 --- a/go/apps/api/routes/v2_keys_set_permissions/handler.go +++ b/go/apps/api/routes/v2_keys_set_permissions/handler.go @@ -328,10 +328,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for permission changes."), - ) + return err } } diff --git a/go/apps/api/routes/v2_keys_set_roles/handler.go b/go/apps/api/routes/v2_keys_set_roles/handler.go index b7c4f5192e..c453b7f708 100644 --- a/go/apps/api/routes/v2_keys_set_roles/handler.go +++ b/go/apps/api/routes/v2_keys_set_roles/handler.go @@ -295,10 +295,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { if len(auditLogs) > 0 { err = h.Auditlogs.Insert(ctx, tx, auditLogs) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for role changes."), - ) + return err } } diff --git a/go/apps/api/routes/v2_permissions_create_permission/handler.go b/go/apps/api/routes/v2_permissions_create_permission/handler.go index 6b14453550..90fb58b2ed 100644 --- a/go/apps/api/routes/v2_permissions_create_permission/handler.go +++ b/go/apps/api/routes/v2_permissions_create_permission/handler.go @@ -127,10 +127,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for permission creation."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_permissions_create_role/handler.go b/go/apps/api/routes/v2_permissions_create_role/handler.go index 7eba48f856..a8e3ccf2ef 100644 --- a/go/apps/api/routes/v2_permissions_create_role/handler.go +++ b/go/apps/api/routes/v2_permissions_create_role/handler.go @@ -132,10 +132,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for role creation."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_permissions_delete_permission/handler.go b/go/apps/api/routes/v2_permissions_delete_permission/handler.go index 4c23061daa..d94efbec80 100644 --- a/go/apps/api/routes/v2_permissions_delete_permission/handler.go +++ b/go/apps/api/routes/v2_permissions_delete_permission/handler.go @@ -149,10 +149,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for permission deletion."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_permissions_delete_role/handler.go b/go/apps/api/routes/v2_permissions_delete_role/handler.go index 6290770158..293993f61b 100644 --- a/go/apps/api/routes/v2_permissions_delete_role/handler.go +++ b/go/apps/api/routes/v2_permissions_delete_role/handler.go @@ -151,10 +151,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("audit log error"), fault.Public("Failed to create audit log for role deletion."), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_ratelimit_delete_override/handler.go b/go/apps/api/routes/v2_ratelimit_delete_override/handler.go index 37ec8b654f..c7566d4123 100644 --- a/go/apps/api/routes/v2_ratelimit_delete_override/handler.go +++ b/go/apps/api/routes/v2_ratelimit_delete_override/handler.go @@ -163,11 +163,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), - fault.Public("Failed to insert audit logs"), - ) + return err } return nil diff --git a/go/apps/api/routes/v2_ratelimit_set_override/handler.go b/go/apps/api/routes/v2_ratelimit_set_override/handler.go index 9a21c89c5e..99a9f79f50 100644 --- a/go/apps/api/routes/v2_ratelimit_set_override/handler.go +++ b/go/apps/api/routes/v2_ratelimit_set_override/handler.go @@ -143,10 +143,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { }, }) if err != nil { - return "", fault.Wrap(err, - fault.Code(codes.App.Internal.ServiceUnavailable.URN()), - fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), - ) + return "", err } return overrideID, nil diff --git a/go/internal/services/auditlogs/insert.go b/go/internal/services/auditlogs/insert.go index db91d47ecc..2a299a9cc2 100644 --- a/go/internal/services/auditlogs/insert.go +++ b/go/internal/services/auditlogs/insert.go @@ -4,14 +4,73 @@ import ( "context" "database/sql" "encoding/json" - "errors" "time" "github.com/unkeyed/unkey/go/pkg/auditlog" + "github.com/unkeyed/unkey/go/pkg/codes" "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/fault" "github.com/unkeyed/unkey/go/pkg/uid" ) +// Generated Insert query which we are re-using for bulk inserts +const insertAuditLog = `INSERT INTO ` + "`" + `audit_log` + "`" + ` ( + id, + workspace_id, + bucket_id, + bucket, + event, + time, + display, + remote_ip, + user_agent, + actor_type, + actor_id, + actor_name, + actor_meta, + created_at +) VALUES ( + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ? +)` + +// Generated Insert query which we are re-using for bulk inserts +const insertAuditLogTarget = `INSERT INTO ` + "`" + `audit_log_target` + "`" + ` ( + workspace_id, + bucket_id, + bucket, + audit_log_id, + display_name, + type, + id, + name, + meta, + created_at +) VALUES ( + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ? +)` + // DEFAULT_BUCKET is the default bucket name used for audit logs when no bucket // is specified. All audit logs are categorized into buckets for organization // and querying purposes, with "unkey_mutations" serving as the standard bucket @@ -48,35 +107,23 @@ func (s *service) Insert(ctx context.Context, tx db.DBTX, logs []auditlog.AuditL return nil } - auditLogs := make([]db.InsertAuditLogParams, 0) - auditLogTargets := make([]db.InsertAuditLogTargetParams, 0) - - var dbTx = tx - var rwTx *sql.Tx if tx == nil { - // If we didn't get a transaction, start a new one so we can commit all - // audit logs together to not miss anything - newTx, err := s.db.RW().Begin(ctx) - if err != nil { - return err - } + return db.Tx(ctx, s.db.RW(), func(ctx context.Context, tx db.DBTX) error { + return s.insertLogs(ctx, tx, logs) + }) + } - dbTx = newTx - rwTx = newTx + return s.insertLogs(ctx, tx, logs) +} - defer func() { - rollbackErr := rwTx.Rollback() - if rollbackErr != nil && !errors.Is(rollbackErr, sql.ErrTxDone) { - s.logger.Error("rollback failed", "error", rollbackErr) - } - }() - } +func (s *service) insertLogs(ctx context.Context, tx db.DBTX, logs []auditlog.AuditLog) error { + auditLogs := make([]db.InsertAuditLogParams, 0) + auditLogTargets := make([]db.InsertAuditLogTargetParams, 0) for _, l := range logs { auditLogID := uid.New(uid.AuditLogPrefix) now := time.Now().UnixMilli() - actorMeta, err := json.Marshal(l.ActorMeta) if err != nil { return err @@ -111,7 +158,6 @@ func (s *service) Insert(ctx context.Context, tx db.DBTX, logs []auditlog.AuditL WorkspaceID: l.WorkspaceID, BucketID: "dummy", Bucket: DEFAULT_BUCKET, - Type: string(resource.Type), DisplayName: resource.DisplayName, Name: sql.NullString{String: resource.DisplayName, Valid: resource.DisplayName != ""}, @@ -121,23 +167,20 @@ func (s *service) Insert(ctx context.Context, tx db.DBTX, logs []auditlog.AuditL } } - for _, log := range auditLogs { - if err := db.Query.InsertAuditLog(ctx, dbTx, log); err != nil { - return err - } - } - - for _, logTarget := range auditLogTargets { - if err := db.Query.InsertAuditLogTarget(ctx, dbTx, logTarget); err != nil { - return err - } + err := db.BulkInsert(ctx, tx, insertAuditLog, auditLogs) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database failed to insert audit logs"), fault.Public("Failed to insert audit logs"), + ) } - // If we are not using a transaction that has been passed in we will just commit all logs - if rwTx != nil { - if err := rwTx.Commit(); err != nil { - return err - } + err = db.BulkInsert(ctx, tx, insertAuditLogTarget, auditLogTargets) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + fault.Internal("database failed to insert audit log targets"), fault.Public("Failed to insert audit log targets"), + ) } return nil diff --git a/go/internal/services/keys/verify.go b/go/internal/services/keys/verify.go index d5fa382034..831fb33118 100644 --- a/go/internal/services/keys/verify.go +++ b/go/internal/services/keys/verify.go @@ -36,7 +36,6 @@ func (s *service) Verify(ctx context.Context, rawKey string) (VerifyResponse, er } if err != nil { - return VerifyResponse{}, fault.Wrap( err, fault.Internal("unable to load key"), @@ -62,8 +61,8 @@ func (s *service) Verify(ctx context.Context, rawKey string) (VerifyResponse, er fault.Public("The key has been deleted."), ) } - if !key.Enabled { + if !key.Enabled { return VerifyResponse{}, fault.New( "key is disabled", fault.Code(codes.Auth.Authorization.KeyDisabled.URN()), @@ -113,6 +112,7 @@ func (s *service) Verify(ctx context.Context, rawKey string) (VerifyResponse, er AuthorizedWorkspaceID: authorizedWorkspaceID, KeyID: key.ID, } + // Root keys store the user's workspace id in `ForWorkspaceID` and we're // interested in the user, not our rootkey workspace. if key.ForWorkspaceID.Valid { diff --git a/go/pkg/db/bulk_insert.go b/go/pkg/db/bulk_insert.go new file mode 100644 index 0000000000..3ea972f971 --- /dev/null +++ b/go/pkg/db/bulk_insert.go @@ -0,0 +1,197 @@ +// Package db provides bulk database operations for the Unkey platform. +// This file contains utilities for performing bulk inserts that are not +// supported by sqlc code generation, allowing efficient batch operations +// on large datasets while maintaining type safety and error handling. +// +// Bulk operations are essential for performance when dealing with many +// records, reducing round-trips to the database and improving throughput +// for batch processing scenarios common in API key management systems. +package db + +import ( + "context" + "fmt" + "reflect" + "strings" +) + +// BulkInsert executes a bulk insert operation using a provided SQL query template +// and a slice of argument sets. Since sqlc does not support bulk insert generation, +// this function provides a way to perform batch inserts with proper placeholder +// expansion and error handling. +// +// The function takes a SQL query template with placeholder values and expands +// it to accommodate multiple rows of data. The query parameter should contain +// a single VALUES clause that will be replicated for each argument set. +// +// The args parameter must be a slice of []interface{} where each element contains +// the values for one row in the same order as the placeholders in the query. +// Each row must have the exact same number of values matching the query placeholders. +// +// Query template format: +// - Use MySQL-style ? placeholders for parameters +// - Include a single VALUES clause that matches one argument set +// - Support for INSERT with ON DUPLICATE KEY UPDATE clauses +// - Compatible with any INSERT variant (INSERT, INSERT IGNORE, REPLACE, etc.) +// +// The function handles: +// - Automatic placeholder expansion for multiple rows +// - Type-safe argument passing through generics +// - Context cancellation and timeout propagation +// - Consistent error handling with database operation patterns +// +// Performance considerations: +// - Bulk inserts are significantly faster than individual INSERT statements +// - MySQL has limits on query size and parameter count (max_allowed_packet) +// - Consider batching very large datasets to avoid hitting database limits +// - Use transactions when bulk operations are part of larger atomic operations +// +// The ctx parameter provides cancellation and timeout control for the operation. +// The db parameter must implement the DBTX interface, supporting both direct +// database connections and transaction contexts for atomic operations. +// +// BulkInsert returns nil on successful execution or an error if the operation +// fails. Database errors are returned directly without additional wrapping, +// allowing callers to handle specific error conditions as needed. +// +// Common usage patterns: +// - Batch API key creation during workspace initialization +// - Bulk permission assignments for role-based access control +// - Mass import operations from external systems +// - Periodic data synchronization between services +// +// Example basic bulk insert: +// +// keys := []db.InsertKeyParams{ +// {ID: "key1", KeyAuthID: "auth1", Hash: "hash1"}, +// {ID: "key2", KeyAuthID: "auth2", Hash: "hash2"}, +// {ID: "key3", KeyAuthID: "auth3", Hash: "hash3"}, +// } +// +// query := "INSERT INTO keys (id, key_auth_id, hash) VALUES (?, ?, ?)" +// err := db.BulkInsert(ctx, database.RW(), query, keys) +// if err != nil { +// return fmt.Errorf("failed to bulk insert keys: %w", err) +// } +// +// Example with ON DUPLICATE KEY UPDATE: +// +// permissions := []PermissionParams{ +// {KeyID: "key1", Permission: "read"}, +// {KeyID: "key2", Permission: "write"}, +// } +// +// query := `INSERT INTO key_permissions (key_id, permission) VALUES (?, ?) +// ON DUPLICATE KEY UPDATE permission = VALUES(permission)` +// err := db.BulkInsert(ctx, tx, query, permissions) +// if err != nil { +// return fmt.Errorf("failed to upsert permissions: %w", err) +// } +// +// Example within transaction: +// +// err := db.Tx(ctx, database.RW(), func(ctx context.Context, tx db.DBTX) error { +// // Create workspace +// workspace, err := db.Query.InsertWorkspace(ctx, tx, workspaceParams) +// if err != nil { +// return err +// } +// +// // Bulk insert initial API keys +// query := "INSERT INTO keys (id, workspace_id, name) VALUES (?, ?, ?)" +// err = db.BulkInsert(ctx, tx, query, initialKeys) +// if err != nil { +// return fmt.Errorf("failed to create initial keys: %w", err) +// } +// +// return nil +// }) +// +// Limitations and considerations: +// - The query template must match the structure of each argument set exactly +// - No validation is performed on the SQL query syntax or parameter count +// - Very large batches may hit database limits on query size or parameters +// - The function assumes all argument sets have the same structure and type +// - Error messages may not clearly indicate which specific row caused failures +// +// Anti-patterns to avoid: +// - Using BulkInsert for single-row operations (use generated sqlc functions) +// - Mixing different parameter structures in the same args slice +// - Including multiple VALUES clauses in the query template +// - Ignoring context cancellation in long-running bulk operations +// +// For very large datasets, consider: +// - Splitting into smaller batches to avoid memory pressure +// - Using transactions to ensure consistency across batches +// - Implementing retry logic for transient database errors +// - Monitoring query execution time and database resource usage +// +// See [DBTX] for available database interfaces and [Tx] for transaction +// utilities when bulk operations need atomic guarantees. +func BulkInsert[T any](ctx context.Context, db DBTX, query string, args []T) error { + if len(args) == 0 { + return nil + } + + valuesIndex := strings.Index(strings.ToUpper(query), "VALUES") + if valuesIndex == -1 { + return fmt.Errorf("bulk insert query must contain VALUES clause") + } + + beforeValues := query[:valuesIndex+6] + afterValues := "" + + valueStart := strings.Index(query[valuesIndex:], "(") + if valueStart == -1 { + return fmt.Errorf("bulk insert query must contain parenthesized VALUES clause") + } + + valueStart += valuesIndex + parenCount := 0 + valueEnd := valueStart + + // Find the matching closing parenthesis + for i := valueStart; i < len(query); i++ { + if query[i] == '(' { + parenCount++ + } else if query[i] == ')' { + parenCount-- + if parenCount == 0 { + valueEnd = i + 1 + break + } + } + } + + if parenCount != 0 { + return fmt.Errorf("bulk insert query has unmatched parentheses in VALUES clause") + } + + valuesClause := query[valueStart:valueEnd] + if valueEnd < len(query) { + afterValues = query[valueEnd:] + } + + // Build the expanded query with multiple VALUES clauses + valuesClauses := make([]string, len(args)) + for i := range args { + valuesClauses[i] = valuesClause + } + + expandedQuery := beforeValues + " " + strings.Join(valuesClauses, ", ") + afterValues + + flatArgs := make([]any, 0) + for _, arg := range args { + v := reflect.ValueOf(arg) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + for i := 0; i < v.NumField(); i++ { + flatArgs = append(flatArgs, v.Field(i).Interface()) + } + } + + _, err := db.ExecContext(ctx, expandedQuery, flatArgs...) + return err +} diff --git a/go/pkg/db/identity_insert_ratelimit.sql_generated.go b/go/pkg/db/identity_insert_ratelimit.sql_generated.go index dda245eb67..cfa3d4e03f 100644 --- a/go/pkg/db/identity_insert_ratelimit.sql_generated.go +++ b/go/pkg/db/identity_insert_ratelimit.sql_generated.go @@ -18,7 +18,8 @@ INSERT INTO ` + "`" + `ratelimits` + "`" + ` ( name, ` + "`" + `limit` + "`" + `, duration, - created_at + created_at, + auto_apply ) VALUES ( ?, ?, @@ -26,6 +27,7 @@ INSERT INTO ` + "`" + `ratelimits` + "`" + ` ( ?, ?, ?, + ?, ? ) ` @@ -38,6 +40,7 @@ type InsertIdentityRatelimitParams struct { Limit int32 `db:"limit"` Duration int64 `db:"duration"` CreatedAt int64 `db:"created_at"` + AutoApply bool `db:"auto_apply"` } // InsertIdentityRatelimit @@ -49,7 +52,8 @@ type InsertIdentityRatelimitParams struct { // name, // `limit`, // duration, -// created_at +// created_at, +// auto_apply // ) VALUES ( // ?, // ?, @@ -57,6 +61,7 @@ type InsertIdentityRatelimitParams struct { // ?, // ?, // ?, +// ?, // ? // ) func (q *Queries) InsertIdentityRatelimit(ctx context.Context, db DBTX, arg InsertIdentityRatelimitParams) error { @@ -68,6 +73,7 @@ func (q *Queries) InsertIdentityRatelimit(ctx context.Context, db DBTX, arg Inse arg.Limit, arg.Duration, arg.CreatedAt, + arg.AutoApply, ) return err } diff --git a/go/pkg/db/identity_list_ratelimits.sql_generated.go b/go/pkg/db/identity_list_ratelimits.sql_generated.go index 76c71d6c97..4b214de79f 100644 --- a/go/pkg/db/identity_list_ratelimits.sql_generated.go +++ b/go/pkg/db/identity_list_ratelimits.sql_generated.go @@ -11,7 +11,7 @@ import ( ) const listIdentityRatelimits = `-- name: ListIdentityRatelimits :many -SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, ` + "`" + `limit` + "`" + `, duration +SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, ` + "`" + `limit` + "`" + `, duration, auto_apply FROM ratelimits WHERE identity_id = ? ORDER BY id ASC @@ -19,7 +19,7 @@ ORDER BY id ASC // ListIdentityRatelimits // -// SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration +// SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply // FROM ratelimits // WHERE identity_id = ? // ORDER BY id ASC @@ -42,6 +42,7 @@ func (q *Queries) ListIdentityRatelimits(ctx context.Context, db DBTX, identityI &i.IdentityID, &i.Limit, &i.Duration, + &i.AutoApply, ); err != nil { return nil, err } diff --git a/go/pkg/db/identity_list_ratelimits_by_id.sql_generated.go b/go/pkg/db/identity_list_ratelimits_by_id.sql_generated.go index ead5241623..76d9e6cb0b 100644 --- a/go/pkg/db/identity_list_ratelimits_by_id.sql_generated.go +++ b/go/pkg/db/identity_list_ratelimits_by_id.sql_generated.go @@ -11,12 +11,12 @@ import ( ) const listIdentityRatelimitsByID = `-- name: ListIdentityRatelimitsByID :many -SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, ` + "`" + `limit` + "`" + `, duration FROM ratelimits WHERE identity_id = ? +SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, ` + "`" + `limit` + "`" + `, duration, auto_apply FROM ratelimits WHERE identity_id = ? ` // ListIdentityRatelimitsByID // -// SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration FROM ratelimits WHERE identity_id = ? +// SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply FROM ratelimits WHERE identity_id = ? func (q *Queries) ListIdentityRatelimitsByID(ctx context.Context, db DBTX, identityID sql.NullString) ([]Ratelimit, error) { rows, err := db.QueryContext(ctx, listIdentityRatelimitsByID, identityID) if err != nil { @@ -36,6 +36,7 @@ func (q *Queries) ListIdentityRatelimitsByID(ctx context.Context, db DBTX, ident &i.IdentityID, &i.Limit, &i.Duration, + &i.AutoApply, ); err != nil { return nil, err } diff --git a/go/pkg/db/identity_list_ratelimits_by_ids.sql_generated.go b/go/pkg/db/identity_list_ratelimits_by_ids.sql_generated.go new file mode 100644 index 0000000000..7791ef4334 --- /dev/null +++ b/go/pkg/db/identity_list_ratelimits_by_ids.sql_generated.go @@ -0,0 +1,63 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: identity_list_ratelimits_by_ids.sql + +package db + +import ( + "context" + "database/sql" + "strings" +) + +const listIdentityRatelimitsByIDs = `-- name: ListIdentityRatelimitsByIDs :many +SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, ` + "`" + `limit` + "`" + `, duration, auto_apply FROM ratelimits WHERE identity_id IN (/*SLICE:ids*/?) +` + +// ListIdentityRatelimitsByIDs +// +// SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply FROM ratelimits WHERE identity_id IN (/*SLICE:ids*/?) +func (q *Queries) ListIdentityRatelimitsByIDs(ctx context.Context, db DBTX, ids []sql.NullString) ([]Ratelimit, error) { + query := listIdentityRatelimitsByIDs + var queryParams []interface{} + if len(ids) > 0 { + for _, v := range ids { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:ids*/?", strings.Repeat(",?", len(ids))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:ids*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Ratelimit + for rows.Next() { + var i Ratelimit + if err := rows.Scan( + &i.ID, + &i.Name, + &i.WorkspaceID, + &i.CreatedAt, + &i.UpdatedAt, + &i.KeyID, + &i.IdentityID, + &i.Limit, + &i.Duration, + &i.AutoApply, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/key_encryption_find_by_key_id.sql_generated.go b/go/pkg/db/key_encryption_find_by_key_id.sql_generated.go new file mode 100644 index 0000000000..3eb2198def --- /dev/null +++ b/go/pkg/db/key_encryption_find_by_key_id.sql_generated.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: key_encryption_find_by_key_id.sql + +package db + +import ( + "context" +) + +const findKeyEncryptionByKeyID = `-- name: FindKeyEncryptionByKeyID :one +SELECT workspace_id, key_id, created_at, updated_at, encrypted, encryption_key_id FROM encrypted_keys WHERE key_id = ? +` + +// FindKeyEncryptionByKeyID +// +// SELECT workspace_id, key_id, created_at, updated_at, encrypted, encryption_key_id FROM encrypted_keys WHERE key_id = ? +func (q *Queries) FindKeyEncryptionByKeyID(ctx context.Context, db DBTX, keyID string) (EncryptedKey, error) { + row := db.QueryRowContext(ctx, findKeyEncryptionByKeyID, keyID) + var i EncryptedKey + err := row.Scan( + &i.WorkspaceID, + &i.KeyID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Encrypted, + &i.EncryptionKeyID, + ) + return i, err +} diff --git a/go/pkg/db/key_encryption_insert.sql_generated.go b/go/pkg/db/key_encryption_insert.sql_generated.go new file mode 100644 index 0000000000..8be70e3e8b --- /dev/null +++ b/go/pkg/db/key_encryption_insert.sql_generated.go @@ -0,0 +1,40 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: key_encryption_insert.sql + +package db + +import ( + "context" +) + +const insertKeyEncryption = `-- name: InsertKeyEncryption :exec +INSERT INTO encrypted_keys +(workspace_id, key_id, encrypted, encryption_key_id, created_at) +VALUES (?, ?, ?, ?, ?) +` + +type InsertKeyEncryptionParams struct { + WorkspaceID string `db:"workspace_id"` + KeyID string `db:"key_id"` + Encrypted string `db:"encrypted"` + EncryptionKeyID string `db:"encryption_key_id"` + CreatedAt int64 `db:"created_at"` +} + +// InsertKeyEncryption +// +// INSERT INTO encrypted_keys +// (workspace_id, key_id, encrypted, encryption_key_id, created_at) +// VALUES (?, ?, ?, ?, ?) +func (q *Queries) InsertKeyEncryption(ctx context.Context, db DBTX, arg InsertKeyEncryptionParams) error { + _, err := db.ExecContext(ctx, insertKeyEncryption, + arg.WorkspaceID, + arg.KeyID, + arg.Encrypted, + arg.EncryptionKeyID, + arg.CreatedAt, + ) + return err +} diff --git a/go/pkg/db/key_find_by_id_or_hash.sql_generated.go b/go/pkg/db/key_find_by_id_or_hash.sql_generated.go new file mode 100644 index 0000000000..d12c51e996 --- /dev/null +++ b/go/pkg/db/key_find_by_id_or_hash.sql_generated.go @@ -0,0 +1,122 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: key_find_by_id_or_hash.sql + +package db + +import ( + "context" + "database/sql" +) + +const findKeyByIdOrHash = `-- name: FindKeyByIdOrHash :one +SELECT + k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, + ek.encrypted as encrypted_key, + ek.encryption_key_id as encryption_key_id +FROM ` + "`" + `keys` + "`" + ` k +JOIN apis a USING(key_auth_id) +LEFT JOIN encrypted_keys ek ON k.id = ek.key_id +WHERE (CASE + WHEN ? IS NOT NULL THEN k.id = ? + WHEN ? IS NOT NULL THEN k.hash = ? + ELSE FALSE +END) AND k.deleted_at_m IS NULL AND a.deleted_at_m IS NULL +` + +type FindKeyByIdOrHashParams struct { + ID sql.NullString `db:"id"` + Hash sql.NullString `db:"hash"` +} + +type FindKeyByIdOrHashRow struct { + ID string `db:"id"` + KeyAuthID string `db:"key_auth_id"` + Hash string `db:"hash"` + Start string `db:"start"` + WorkspaceID string `db:"workspace_id"` + ForWorkspaceID sql.NullString `db:"for_workspace_id"` + Name sql.NullString `db:"name"` + OwnerID sql.NullString `db:"owner_id"` + IdentityID sql.NullString `db:"identity_id"` + Meta sql.NullString `db:"meta"` + Expires sql.NullTime `db:"expires"` + CreatedAtM int64 `db:"created_at_m"` + UpdatedAtM sql.NullInt64 `db:"updated_at_m"` + DeletedAtM sql.NullInt64 `db:"deleted_at_m"` + RefillDay sql.NullInt16 `db:"refill_day"` + RefillAmount sql.NullInt32 `db:"refill_amount"` + LastRefillAt sql.NullTime `db:"last_refill_at"` + Enabled bool `db:"enabled"` + RemainingRequests sql.NullInt32 `db:"remaining_requests"` + RatelimitAsync sql.NullBool `db:"ratelimit_async"` + RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` + RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` + Environment sql.NullString `db:"environment"` + Api Api `db:"api"` + EncryptedKey sql.NullString `db:"encrypted_key"` + EncryptionKeyID sql.NullString `db:"encryption_key_id"` +} + +// FindKeyByIdOrHash +// +// SELECT +// k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, +// ek.encrypted as encrypted_key, +// ek.encryption_key_id as encryption_key_id +// FROM `keys` k +// JOIN apis a USING(key_auth_id) +// LEFT JOIN encrypted_keys ek ON k.id = ek.key_id +// WHERE (CASE +// WHEN ? IS NOT NULL THEN k.id = ? +// WHEN ? IS NOT NULL THEN k.hash = ? +// ELSE FALSE +// END) AND k.deleted_at_m IS NULL AND a.deleted_at_m IS NULL +func (q *Queries) FindKeyByIdOrHash(ctx context.Context, db DBTX, arg FindKeyByIdOrHashParams) (FindKeyByIdOrHashRow, error) { + row := db.QueryRowContext(ctx, findKeyByIdOrHash, + arg.ID, + arg.ID, + arg.Hash, + arg.Hash, + ) + var i FindKeyByIdOrHashRow + err := row.Scan( + &i.ID, + &i.KeyAuthID, + &i.Hash, + &i.Start, + &i.WorkspaceID, + &i.ForWorkspaceID, + &i.Name, + &i.OwnerID, + &i.IdentityID, + &i.Meta, + &i.Expires, + &i.CreatedAtM, + &i.UpdatedAtM, + &i.DeletedAtM, + &i.RefillDay, + &i.RefillAmount, + &i.LastRefillAt, + &i.Enabled, + &i.RemainingRequests, + &i.RatelimitAsync, + &i.RatelimitLimit, + &i.RatelimitDuration, + &i.Environment, + &i.Api.ID, + &i.Api.Name, + &i.Api.WorkspaceID, + &i.Api.IpWhitelist, + &i.Api.AuthType, + &i.Api.KeyAuthID, + &i.Api.CreatedAtM, + &i.Api.UpdatedAtM, + &i.Api.DeletedAtM, + &i.Api.DeleteProtection, + &i.EncryptedKey, + &i.EncryptionKeyID, + ) + return i, err +} diff --git a/go/pkg/db/key_insert.sql_generated.go b/go/pkg/db/key_insert.sql_generated.go index fdfdf49fd9..5b9ccdd520 100644 --- a/go/pkg/db/key_insert.sql_generated.go +++ b/go/pkg/db/key_insert.sql_generated.go @@ -26,6 +26,8 @@ INSERT INTO ` + "`" + `keys` + "`" + ` ( created_at_m, enabled, remaining_requests, + refill_day, + refill_amount, ratelimit_async, ratelimit_limit, ratelimit_duration, @@ -48,6 +50,8 @@ INSERT INTO ` + "`" + `keys` + "`" + ` ( ?, ?, ?, + ?, + ?, ? ) ` @@ -66,6 +70,8 @@ type InsertKeyParams struct { CreatedAtM int64 `db:"created_at_m"` Enabled bool `db:"enabled"` RemainingRequests sql.NullInt32 `db:"remaining_requests"` + RefillDay sql.NullInt16 `db:"refill_day"` + RefillAmount sql.NullInt32 `db:"refill_amount"` RatelimitAsync sql.NullBool `db:"ratelimit_async"` RatelimitLimit sql.NullInt32 `db:"ratelimit_limit"` RatelimitDuration sql.NullInt64 `db:"ratelimit_duration"` @@ -89,6 +95,8 @@ type InsertKeyParams struct { // created_at_m, // enabled, // remaining_requests, +// refill_day, +// refill_amount, // ratelimit_async, // ratelimit_limit, // ratelimit_duration, @@ -111,6 +119,8 @@ type InsertKeyParams struct { // ?, // ?, // ?, +// ?, +// ?, // ? // ) func (q *Queries) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) error { @@ -128,6 +138,8 @@ func (q *Queries) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) e arg.CreatedAtM, arg.Enabled, arg.RemainingRequests, + arg.RefillDay, + arg.RefillAmount, arg.RatelimitAsync, arg.RatelimitLimit, arg.RatelimitDuration, diff --git a/go/pkg/db/key_insert_ratelimit.sql_generated.go b/go/pkg/db/key_insert_ratelimit.sql_generated.go index 3d424648f2..86128db93d 100644 --- a/go/pkg/db/key_insert_ratelimit.sql_generated.go +++ b/go/pkg/db/key_insert_ratelimit.sql_generated.go @@ -18,6 +18,7 @@ INSERT INTO ` + "`" + `ratelimits` + "`" + ` ( name, ` + "`" + `limit` + "`" + `, duration, + auto_apply, created_at ) VALUES ( ?, @@ -26,6 +27,7 @@ INSERT INTO ` + "`" + `ratelimits` + "`" + ` ( ?, ?, ?, + ?, ? ) ` @@ -37,6 +39,7 @@ type InsertKeyRatelimitParams struct { Name string `db:"name"` Limit int32 `db:"limit"` Duration int64 `db:"duration"` + AutoApply bool `db:"auto_apply"` CreatedAt int64 `db:"created_at"` } @@ -49,6 +52,7 @@ type InsertKeyRatelimitParams struct { // name, // `limit`, // duration, +// auto_apply, // created_at // ) VALUES ( // ?, @@ -57,6 +61,7 @@ type InsertKeyRatelimitParams struct { // ?, // ?, // ?, +// ?, // ? // ) func (q *Queries) InsertKeyRatelimit(ctx context.Context, db DBTX, arg InsertKeyRatelimitParams) error { @@ -67,6 +72,7 @@ func (q *Queries) InsertKeyRatelimit(ctx context.Context, db DBTX, arg InsertKey arg.Name, arg.Limit, arg.Duration, + arg.AutoApply, arg.CreatedAt, ) return err diff --git a/go/pkg/db/keyring_find_by_id.sql_generated.go b/go/pkg/db/keyring_find_by_id.sql_generated.go index e916a8818d..4a9e2d1415 100644 --- a/go/pkg/db/keyring_find_by_id.sql_generated.go +++ b/go/pkg/db/keyring_find_by_id.sql_generated.go @@ -10,14 +10,12 @@ import ( ) const findKeyringByID = `-- name: FindKeyringByID :one -SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM ` + "`" + `key_auth` + "`" + ` -WHERE id = ? +SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM ` + "`" + `key_auth` + "`" + ` WHERE id = ? ` // FindKeyringByID // -// SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM `key_auth` -// WHERE id = ? +// SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM `key_auth` WHERE id = ? func (q *Queries) FindKeyringByID(ctx context.Context, db DBTX, id string) (KeyAuth, error) { row := db.QueryRowContext(ctx, findKeyringByID, id) var i KeyAuth diff --git a/go/pkg/db/keyring_update_key_encryption.sql_generated.go b/go/pkg/db/keyring_update_key_encryption.sql_generated.go new file mode 100644 index 0000000000..b839326fcc --- /dev/null +++ b/go/pkg/db/keyring_update_key_encryption.sql_generated.go @@ -0,0 +1,27 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: keyring_update_key_encryption.sql + +package db + +import ( + "context" +) + +const updateKeyringKeyEncryption = `-- name: UpdateKeyringKeyEncryption :exec +UPDATE ` + "`" + `key_auth` + "`" + ` SET store_encrypted_keys = ? WHERE id = ? +` + +type UpdateKeyringKeyEncryptionParams struct { + StoreEncryptedKeys bool `db:"store_encrypted_keys"` + ID string `db:"id"` +} + +// UpdateKeyringKeyEncryption +// +// UPDATE `key_auth` SET store_encrypted_keys = ? WHERE id = ? +func (q *Queries) UpdateKeyringKeyEncryption(ctx context.Context, db DBTX, arg UpdateKeyringKeyEncryptionParams) error { + _, err := db.ExecContext(ctx, updateKeyringKeyEncryption, arg.StoreEncryptedKeys, arg.ID) + return err +} diff --git a/go/pkg/db/models_generated.go b/go/pkg/db/models_generated.go index ed09354917..c431335b3f 100644 --- a/go/pkg/db/models_generated.go +++ b/go/pkg/db/models_generated.go @@ -389,6 +389,7 @@ type Ratelimit struct { IdentityID sql.NullString `db:"identity_id"` Limit int32 `db:"limit"` Duration int64 `db:"duration"` + AutoApply bool `db:"auto_apply"` } type RatelimitNamespace struct { diff --git a/go/pkg/db/querier_generated.go b/go/pkg/db/querier_generated.go index d21646acc0..85fa556ebf 100644 --- a/go/pkg/db/querier_generated.go +++ b/go/pkg/db/querier_generated.go @@ -102,6 +102,25 @@ type Querier interface { // FROM `keys` // WHERE id = ? FindKeyByID(ctx context.Context, db DBTX, id string) (Key, error) + //FindKeyByIdOrHash + // + // SELECT + // k.id, k.key_auth_id, k.hash, k.start, k.workspace_id, k.for_workspace_id, k.name, k.owner_id, k.identity_id, k.meta, k.expires, k.created_at_m, k.updated_at_m, k.deleted_at_m, k.refill_day, k.refill_amount, k.last_refill_at, k.enabled, k.remaining_requests, k.ratelimit_async, k.ratelimit_limit, k.ratelimit_duration, k.environment, a.id, a.name, a.workspace_id, a.ip_whitelist, a.auth_type, a.key_auth_id, a.created_at_m, a.updated_at_m, a.deleted_at_m, a.delete_protection, + // ek.encrypted as encrypted_key, + // ek.encryption_key_id as encryption_key_id + // FROM `keys` k + // JOIN apis a USING(key_auth_id) + // LEFT JOIN encrypted_keys ek ON k.id = ek.key_id + // WHERE (CASE + // WHEN ? IS NOT NULL THEN k.id = ? + // WHEN ? IS NOT NULL THEN k.hash = ? + // ELSE FALSE + // END) AND k.deleted_at_m IS NULL AND a.deleted_at_m IS NULL + FindKeyByIdOrHash(ctx context.Context, db DBTX, arg FindKeyByIdOrHashParams) (FindKeyByIdOrHashRow, error) + //FindKeyEncryptionByKeyID + // + // SELECT workspace_id, key_id, created_at, updated_at, encrypted, encryption_key_id FROM encrypted_keys WHERE key_id = ? + FindKeyEncryptionByKeyID(ctx context.Context, db DBTX, keyID string) (EncryptedKey, error) //FindKeyForVerification // // WITH direct_permissions AS ( @@ -170,8 +189,7 @@ type Querier interface { FindKeyRoleByKeyAndRoleID(ctx context.Context, db DBTX, arg FindKeyRoleByKeyAndRoleIDParams) ([]KeysRole, error) //FindKeyringByID // - // SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM `key_auth` - // WHERE id = ? + // SELECT id, workspace_id, created_at_m, updated_at_m, deleted_at_m, store_encrypted_keys, default_prefix, default_bytes, size_approx, size_last_updated_at FROM `key_auth` WHERE id = ? FindKeyringByID(ctx context.Context, db DBTX, id string) (KeyAuth, error) // Finds a permission record by its ID // Returns: The permission record if found @@ -365,7 +383,8 @@ type Querier interface { // name, // `limit`, // duration, - // created_at + // created_at, + // auto_apply // ) VALUES ( // ?, // ?, @@ -373,6 +392,7 @@ type Querier interface { // ?, // ?, // ?, + // ?, // ? // ) InsertIdentityRatelimit(ctx context.Context, db DBTX, arg InsertIdentityRatelimitParams) error @@ -393,6 +413,8 @@ type Querier interface { // created_at_m, // enabled, // remaining_requests, + // refill_day, + // refill_amount, // ratelimit_async, // ratelimit_limit, // ratelimit_duration, @@ -415,9 +437,17 @@ type Querier interface { // ?, // ?, // ?, + // ?, + // ?, // ? // ) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) error + //InsertKeyEncryption + // + // INSERT INTO encrypted_keys + // (workspace_id, key_id, encrypted, encryption_key_id, created_at) + // VALUES (?, ?, ?, ?, ?) + InsertKeyEncryption(ctx context.Context, db DBTX, arg InsertKeyEncryptionParams) error //InsertKeyPermission // // INSERT INTO `keys_permissions` ( @@ -441,6 +471,7 @@ type Querier interface { // name, // `limit`, // duration, + // auto_apply, // created_at // ) VALUES ( // ?, @@ -449,6 +480,7 @@ type Querier interface { // ?, // ?, // ?, + // ?, // ? // ) InsertKeyRatelimit(ctx context.Context, db DBTX, arg InsertKeyRatelimitParams) error @@ -631,15 +663,19 @@ type Querier interface { ListIdentities(ctx context.Context, db DBTX, arg ListIdentitiesParams) ([]Identity, error) //ListIdentityRatelimits // - // SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration + // SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply // FROM ratelimits // WHERE identity_id = ? // ORDER BY id ASC ListIdentityRatelimits(ctx context.Context, db DBTX, identityID sql.NullString) ([]Ratelimit, error) //ListIdentityRatelimitsByID // - // SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration FROM ratelimits WHERE identity_id = ? + // SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply FROM ratelimits WHERE identity_id = ? ListIdentityRatelimitsByID(ctx context.Context, db DBTX, identityID sql.NullString) ([]Ratelimit, error) + //ListIdentityRatelimitsByIDs + // + // SELECT id, name, workspace_id, created_at, updated_at, key_id, identity_id, `limit`, duration, auto_apply FROM ratelimits WHERE identity_id IN (/*SLICE:ids*/?) + ListIdentityRatelimitsByIDs(ctx context.Context, db DBTX, ids []sql.NullString) ([]Ratelimit, error) //ListKeysByKeyAuthID // // SELECT @@ -718,6 +754,17 @@ type Querier interface { // workspace_id = ? // AND namespace_id = ? ListRatelimitOverridesByNamespaceID(ctx context.Context, db DBTX, arg ListRatelimitOverridesByNamespaceIDParams) ([]RatelimitOverride, error) + //ListRatelimitsByKeyID + // + // SELECT + // id, + // name, + // `limit`, + // duration, + // auto_apply + // FROM ratelimits + // WHERE key_id = ? + ListRatelimitsByKeyID(ctx context.Context, db DBTX, keyID sql.NullString) ([]ListRatelimitsByKeyIDRow, error) //ListRatelimitsByKeyIDs // // SELECT @@ -725,7 +772,8 @@ type Querier interface { // key_id, // name, // `limit`, - // duration + // duration, + // auto_apply // FROM ratelimits // WHERE key_id IN (/*SLICE:key_ids*/?) // ORDER BY key_id, id @@ -811,6 +859,10 @@ type Querier interface { // WHERE // id = ? UpdateIdentity(ctx context.Context, db DBTX, arg UpdateIdentityParams) error + //UpdateKeyringKeyEncryption + // + // UPDATE `key_auth` SET store_encrypted_keys = ? WHERE id = ? + UpdateKeyringKeyEncryption(ctx context.Context, db DBTX, arg UpdateKeyringKeyEncryptionParams) error //UpdateRatelimit // // UPDATE `ratelimits` @@ -818,6 +870,7 @@ type Querier interface { // name = ?, // `limit` = ?, // duration = ?, + // auto_apply = ?, // updated_at = NOW() // WHERE // id = ? diff --git a/go/pkg/db/queries/api_soft_delete.sql b/go/pkg/db/queries/api_soft_delete.sql index 0c9aef2722..192242a8e4 100644 --- a/go/pkg/db/queries/api_soft_delete.sql +++ b/go/pkg/db/queries/api_soft_delete.sql @@ -1,5 +1,4 @@ -- name: SoftDeleteApi :exec UPDATE apis SET deleted_at_m = sqlc.Arg(now) -WHERE id = sqlc.Arg(api_id) -; +WHERE id = sqlc.Arg(api_id); diff --git a/go/pkg/db/queries/api_update_delete_protection.sql b/go/pkg/db/queries/api_update_delete_protection.sql index 13c2ff1ee7..1fa5f606a7 100644 --- a/go/pkg/db/queries/api_update_delete_protection.sql +++ b/go/pkg/db/queries/api_update_delete_protection.sql @@ -1,5 +1,4 @@ -- name: UpdateApiDeleteProtection :exec UPDATE apis -SET delete_protection = sqlc.Arg(delete_protection) -WHERE id = sqlc.Arg(api_id) -; +SET delete_protection = sqlc.arg(delete_protection) +WHERE id = sqlc.arg(api_id); diff --git a/go/pkg/db/queries/identity_insert_ratelimit.sql b/go/pkg/db/queries/identity_insert_ratelimit.sql index 094551fd5a..9a7e75241d 100644 --- a/go/pkg/db/queries/identity_insert_ratelimit.sql +++ b/go/pkg/db/queries/identity_insert_ratelimit.sql @@ -6,7 +6,8 @@ INSERT INTO `ratelimits` ( name, `limit`, duration, - created_at + created_at, + auto_apply ) VALUES ( sqlc.arg('id'), sqlc.arg('workspace_id'), @@ -14,5 +15,6 @@ INSERT INTO `ratelimits` ( sqlc.arg('name'), sqlc.arg('limit'), sqlc.arg('duration'), - sqlc.arg('created_at') + sqlc.arg('created_at'), + sqlc.arg('auto_apply') ); diff --git a/go/pkg/db/queries/identity_list_ratelimits_by_ids.sql b/go/pkg/db/queries/identity_list_ratelimits_by_ids.sql new file mode 100644 index 0000000000..7b96c304be --- /dev/null +++ b/go/pkg/db/queries/identity_list_ratelimits_by_ids.sql @@ -0,0 +1,2 @@ +-- name: ListIdentityRatelimitsByIDs :many +SELECT * FROM ratelimits WHERE identity_id IN (sqlc.slice(ids)); diff --git a/go/pkg/db/queries/key_encryption_find_by_key_id.sql b/go/pkg/db/queries/key_encryption_find_by_key_id.sql new file mode 100644 index 0000000000..430f76c796 --- /dev/null +++ b/go/pkg/db/queries/key_encryption_find_by_key_id.sql @@ -0,0 +1,2 @@ +-- name: FindKeyEncryptionByKeyID :one +SELECT * FROM encrypted_keys WHERE key_id = ?; diff --git a/go/pkg/db/queries/key_encryption_insert.sql b/go/pkg/db/queries/key_encryption_insert.sql new file mode 100644 index 0000000000..8e628a5ec5 --- /dev/null +++ b/go/pkg/db/queries/key_encryption_insert.sql @@ -0,0 +1,4 @@ +-- name: InsertKeyEncryption :exec +INSERT INTO encrypted_keys +(workspace_id, key_id, encrypted, encryption_key_id, created_at) +VALUES (?, ?, ?, ?, ?); diff --git a/go/pkg/db/queries/key_find_by_id_or_hash.sql b/go/pkg/db/queries/key_find_by_id_or_hash.sql new file mode 100644 index 0000000000..441827fe0a --- /dev/null +++ b/go/pkg/db/queries/key_find_by_id_or_hash.sql @@ -0,0 +1,13 @@ +-- name: FindKeyByIdOrHash :one +SELECT + k.*, sqlc.embed(a), + ek.encrypted as encrypted_key, + ek.encryption_key_id as encryption_key_id +FROM `keys` k +JOIN apis a USING(key_auth_id) +LEFT JOIN encrypted_keys ek ON k.id = ek.key_id +WHERE (CASE + WHEN sqlc.narg(id) IS NOT NULL THEN k.id = sqlc.narg(id) + WHEN sqlc.narg(hash) IS NOT NULL THEN k.hash = sqlc.narg(hash) + ELSE FALSE +END) AND k.deleted_at_m IS NULL AND a.deleted_at_m IS NULL; diff --git a/go/pkg/db/queries/key_insert.sql b/go/pkg/db/queries/key_insert.sql index b37c42a2a5..8461b8c85c 100644 --- a/go/pkg/db/queries/key_insert.sql +++ b/go/pkg/db/queries/key_insert.sql @@ -14,6 +14,8 @@ INSERT INTO `keys` ( created_at_m, enabled, remaining_requests, + refill_day, + refill_amount, ratelimit_async, ratelimit_limit, ratelimit_duration, @@ -33,6 +35,8 @@ INSERT INTO `keys` ( sqlc.arg(created_at_m), sqlc.arg(enabled), sqlc.arg(remaining_requests), + sqlc.arg(refill_day), + sqlc.arg(refill_amount), sqlc.arg(ratelimit_async), sqlc.arg(ratelimit_limit), sqlc.arg(ratelimit_duration), diff --git a/go/pkg/db/queries/key_insert_ratelimit.sql b/go/pkg/db/queries/key_insert_ratelimit.sql index 138c55bf6a..f3d69ba0db 100644 --- a/go/pkg/db/queries/key_insert_ratelimit.sql +++ b/go/pkg/db/queries/key_insert_ratelimit.sql @@ -6,6 +6,7 @@ INSERT INTO `ratelimits` ( name, `limit`, duration, + auto_apply, created_at ) VALUES ( sqlc.arg('id'), @@ -14,5 +15,6 @@ INSERT INTO `ratelimits` ( sqlc.arg('name'), sqlc.arg('limit'), sqlc.arg('duration'), + sqlc.arg('auto_apply'), sqlc.arg('created_at') -); \ No newline at end of file +); diff --git a/go/pkg/db/queries/keyring_find_by_id.sql b/go/pkg/db/queries/keyring_find_by_id.sql index 339639c625..ba611726ee 100644 --- a/go/pkg/db/queries/keyring_find_by_id.sql +++ b/go/pkg/db/queries/keyring_find_by_id.sql @@ -1,3 +1,2 @@ -- name: FindKeyringByID :one -SELECT * FROM `key_auth` -WHERE id = sqlc.arg(id); +SELECT * FROM `key_auth` WHERE id = sqlc.arg(id); diff --git a/go/pkg/db/queries/keyring_update_key_encryption.sql b/go/pkg/db/queries/keyring_update_key_encryption.sql new file mode 100644 index 0000000000..97bcff1866 --- /dev/null +++ b/go/pkg/db/queries/keyring_update_key_encryption.sql @@ -0,0 +1,2 @@ +-- name: UpdateKeyringKeyEncryption :exec +UPDATE `key_auth` SET store_encrypted_keys = sqlc.arg(store_encrypted_keys) WHERE id = sqlc.arg(id); diff --git a/go/pkg/db/queries/ratelimit_list_by_key_id.sql b/go/pkg/db/queries/ratelimit_list_by_key_id.sql new file mode 100644 index 0000000000..def96964d5 --- /dev/null +++ b/go/pkg/db/queries/ratelimit_list_by_key_id.sql @@ -0,0 +1,9 @@ +-- name: ListRatelimitsByKeyID :many +SELECT + id, + name, + `limit`, + duration, + auto_apply +FROM ratelimits +WHERE key_id = ?; diff --git a/go/pkg/db/queries/ratelimit_list_by_key_ids.sql b/go/pkg/db/queries/ratelimit_list_by_key_ids.sql index 4e68de5a8a..a40ef18186 100644 --- a/go/pkg/db/queries/ratelimit_list_by_key_ids.sql +++ b/go/pkg/db/queries/ratelimit_list_by_key_ids.sql @@ -1,10 +1,11 @@ -- name: ListRatelimitsByKeyIDs :many -SELECT +SELECT id, key_id, name, `limit`, - duration -FROM ratelimits + duration, + auto_apply +FROM ratelimits WHERE key_id IN (sqlc.slice(key_ids)) -ORDER BY key_id, id; \ No newline at end of file +ORDER BY key_id, id; diff --git a/go/pkg/db/queries/ratelimit_update.sql b/go/pkg/db/queries/ratelimit_update.sql index 559970b8ce..b9b87729cb 100644 --- a/go/pkg/db/queries/ratelimit_update.sql +++ b/go/pkg/db/queries/ratelimit_update.sql @@ -1,9 +1,10 @@ -- name: UpdateRatelimit :exec -UPDATE `ratelimits` -SET +UPDATE `ratelimits` +SET name = sqlc.arg('name'), `limit` = sqlc.arg('limit'), duration = sqlc.arg('duration'), + auto_apply = sqlc.arg('auto_apply'), updated_at = NOW() -WHERE - id = sqlc.arg('id'); \ No newline at end of file +WHERE + id = sqlc.arg('id'); diff --git a/go/pkg/db/ratelimit_list_by_key_id.sql_generated.go b/go/pkg/db/ratelimit_list_by_key_id.sql_generated.go new file mode 100644 index 0000000000..6e5bc59c1f --- /dev/null +++ b/go/pkg/db/ratelimit_list_by_key_id.sql_generated.go @@ -0,0 +1,69 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.27.0 +// source: ratelimit_list_by_key_id.sql + +package db + +import ( + "context" + "database/sql" +) + +const listRatelimitsByKeyID = `-- name: ListRatelimitsByKeyID :many +SELECT + id, + name, + ` + "`" + `limit` + "`" + `, + duration, + auto_apply +FROM ratelimits +WHERE key_id = ? +` + +type ListRatelimitsByKeyIDRow struct { + ID string `db:"id"` + Name string `db:"name"` + Limit int32 `db:"limit"` + Duration int64 `db:"duration"` + AutoApply bool `db:"auto_apply"` +} + +// ListRatelimitsByKeyID +// +// SELECT +// id, +// name, +// `limit`, +// duration, +// auto_apply +// FROM ratelimits +// WHERE key_id = ? +func (q *Queries) ListRatelimitsByKeyID(ctx context.Context, db DBTX, keyID sql.NullString) ([]ListRatelimitsByKeyIDRow, error) { + rows, err := db.QueryContext(ctx, listRatelimitsByKeyID, keyID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListRatelimitsByKeyIDRow + for rows.Next() { + var i ListRatelimitsByKeyIDRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Limit, + &i.Duration, + &i.AutoApply, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/ratelimit_list_by_key_ids.sql_generated.go b/go/pkg/db/ratelimit_list_by_key_ids.sql_generated.go index 436a522d46..2ff6783298 100644 --- a/go/pkg/db/ratelimit_list_by_key_ids.sql_generated.go +++ b/go/pkg/db/ratelimit_list_by_key_ids.sql_generated.go @@ -12,23 +12,25 @@ import ( ) const listRatelimitsByKeyIDs = `-- name: ListRatelimitsByKeyIDs :many -SELECT +SELECT id, key_id, name, ` + "`" + `limit` + "`" + `, - duration -FROM ratelimits + duration, + auto_apply +FROM ratelimits WHERE key_id IN (/*SLICE:key_ids*/?) ORDER BY key_id, id ` type ListRatelimitsByKeyIDsRow struct { - ID string `db:"id"` - KeyID sql.NullString `db:"key_id"` - Name string `db:"name"` - Limit int32 `db:"limit"` - Duration int64 `db:"duration"` + ID string `db:"id"` + KeyID sql.NullString `db:"key_id"` + Name string `db:"name"` + Limit int32 `db:"limit"` + Duration int64 `db:"duration"` + AutoApply bool `db:"auto_apply"` } // ListRatelimitsByKeyIDs @@ -38,7 +40,8 @@ type ListRatelimitsByKeyIDsRow struct { // key_id, // name, // `limit`, -// duration +// duration, +// auto_apply // FROM ratelimits // WHERE key_id IN (/*SLICE:key_ids*/?) // ORDER BY key_id, id @@ -67,6 +70,7 @@ func (q *Queries) ListRatelimitsByKeyIDs(ctx context.Context, db DBTX, keyIds [] &i.Name, &i.Limit, &i.Duration, + &i.AutoApply, ); err != nil { return nil, err } diff --git a/go/pkg/db/ratelimit_update.sql_generated.go b/go/pkg/db/ratelimit_update.sql_generated.go index 0fbdd11eb5..9b5ed9b75b 100644 --- a/go/pkg/db/ratelimit_update.sql_generated.go +++ b/go/pkg/db/ratelimit_update.sql_generated.go @@ -10,21 +10,23 @@ import ( ) const updateRatelimit = `-- name: UpdateRatelimit :exec -UPDATE ` + "`" + `ratelimits` + "`" + ` -SET +UPDATE ` + "`" + `ratelimits` + "`" + ` +SET name = ?, ` + "`" + `limit` + "`" + ` = ?, duration = ?, + auto_apply = ?, updated_at = NOW() -WHERE +WHERE id = ? ` type UpdateRatelimitParams struct { - Name string `db:"name"` - Limit int32 `db:"limit"` - Duration int64 `db:"duration"` - ID string `db:"id"` + Name string `db:"name"` + Limit int32 `db:"limit"` + Duration int64 `db:"duration"` + AutoApply bool `db:"auto_apply"` + ID string `db:"id"` } // UpdateRatelimit @@ -34,6 +36,7 @@ type UpdateRatelimitParams struct { // name = ?, // `limit` = ?, // duration = ?, +// auto_apply = ?, // updated_at = NOW() // WHERE // id = ? @@ -42,6 +45,7 @@ func (q *Queries) UpdateRatelimit(ctx context.Context, db DBTX, arg UpdateRateli arg.Name, arg.Limit, arg.Duration, + arg.AutoApply, arg.ID, ) return err diff --git a/go/pkg/db/schema.sql b/go/pkg/db/schema.sql index e560138fb8..40fbada6a3 100644 --- a/go/pkg/db/schema.sql +++ b/go/pkg/db/schema.sql @@ -1,309 +1,310 @@ CREATE TABLE `apis` ( - `id` varchar(256) NOT NULL, - `name` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `ip_whitelist` varchar(512), - `auth_type` enum('key','jwt'), - `key_auth_id` varchar(256), - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - `delete_protection` boolean DEFAULT false, - CONSTRAINT `apis_id` PRIMARY KEY(`id`), - CONSTRAINT `apis_key_auth_id_unique` UNIQUE(`key_auth_id`) -); + `id` varchar(256) NOT NULL, + `name` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `ip_whitelist` varchar(512) DEFAULT NULL, + `auth_type` enum('key','jwt') DEFAULT NULL, + `key_auth_id` varchar(256) DEFAULT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + `delete_protection` tinyint(1) DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `apis_key_auth_id_unique` (`key_auth_id`), + KEY `workspace_id_idx` (`workspace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `keys_permissions` ( - `temp_id` bigint AUTO_INCREMENT NOT NULL, - `key_id` varchar(256) NOT NULL, - `permission_id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - CONSTRAINT `keys_permissions_key_id_permission_id_workspace_id` PRIMARY KEY(`key_id`,`permission_id`,`workspace_id`), - CONSTRAINT `keys_permissions_temp_id_unique` UNIQUE(`temp_id`), - CONSTRAINT `key_id_permission_id_idx` UNIQUE(`key_id`,`permission_id`) -); +CREATE TABLE `audit_log` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `bucket` varchar(256) NOT NULL DEFAULT 'unkey_mutations', + `bucket_id` varchar(256) NOT NULL, + `event` varchar(256) NOT NULL, + `time` bigint NOT NULL, + `display` varchar(256) NOT NULL, + `remote_ip` varchar(256) DEFAULT NULL, + `user_agent` varchar(256) DEFAULT NULL, + `actor_type` varchar(256) NOT NULL, + `actor_id` varchar(256) NOT NULL, + `actor_name` varchar(256) DEFAULT NULL, + `actor_meta` json DEFAULT NULL, + `created_at` bigint NOT NULL, + `updated_at` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `workspace_id_idx` (`workspace_id`), + KEY `bucket_id_idx` (`bucket_id`), + KEY `bucket_idx` (`bucket`), + KEY `event_idx` (`event`), + KEY `actor_id_idx` (`actor_id`), + KEY `time_idx` (`time`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `keys_roles` ( - `key_id` varchar(256) NOT NULL, - `role_id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - CONSTRAINT `keys_roles_role_id_key_id_workspace_id` PRIMARY KEY(`role_id`,`key_id`,`workspace_id`), - CONSTRAINT `unique_key_id_role_id` UNIQUE(`key_id`,`role_id`) -); +CREATE TABLE `audit_log_bucket` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `name` varchar(256) NOT NULL, + `retention_days` int DEFAULT NULL, + `created_at` bigint NOT NULL, + `updated_at` bigint DEFAULT NULL, + `delete_protection` tinyint(1) DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `unique_name_per_workspace_idx` (`workspace_id`,`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `permissions` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `name` varchar(512) NOT NULL, - `slug` varchar(128) NOT NULL, - `description` varchar(512), - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - CONSTRAINT `permissions_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_name_per_workspace_idx` UNIQUE(`name`,`workspace_id`), - CONSTRAINT `unique_slug_per_workspace_idx` UNIQUE(`slug`,`workspace_id`) -); +CREATE TABLE `audit_log_target` ( + `workspace_id` varchar(256) NOT NULL, + `bucket_id` varchar(256) NOT NULL, + `bucket` varchar(256) NOT NULL DEFAULT 'unkey_mutations', + `audit_log_id` varchar(256) NOT NULL, + `display_name` varchar(256) NOT NULL, + `type` varchar(256) NOT NULL, + `id` varchar(256) NOT NULL, + `name` varchar(256) DEFAULT NULL, + `meta` json DEFAULT NULL, + `created_at` bigint NOT NULL, + `updated_at` bigint DEFAULT NULL, + PRIMARY KEY (`audit_log_id`,`id`), + KEY `bucket` (`bucket`), + KEY `audit_log_id` (`audit_log_id`), + KEY `id_idx` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `roles` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `name` varchar(512) NOT NULL, - `description` varchar(512), - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - CONSTRAINT `roles_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_name_per_workspace_idx` UNIQUE(`name`,`workspace_id`) -); +CREATE TABLE `encrypted_keys` ( + `workspace_id` varchar(256) NOT NULL, + `key_id` varchar(256) NOT NULL, + `created_at` bigint NOT NULL DEFAULT '0', + `updated_at` bigint DEFAULT NULL, + `encrypted` varchar(1024) NOT NULL, + `encryption_key_id` varchar(256) NOT NULL, + UNIQUE KEY `key_id_idx` (`key_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `roles_permissions` ( - `role_id` varchar(256) NOT NULL, - `permission_id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - CONSTRAINT `roles_permissions_role_id_permission_id_workspace_id` PRIMARY KEY(`role_id`,`permission_id`,`workspace_id`), - CONSTRAINT `unique_tuple_permission_id_role_id` UNIQUE(`permission_id`,`role_id`) -); +CREATE TABLE `identities` ( + `id` varchar(256) NOT NULL, + `external_id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `environment` varchar(256) NOT NULL DEFAULT 'default', + `meta` json DEFAULT NULL, + `deleted` tinyint(1) NOT NULL DEFAULT '0', + `created_at` bigint NOT NULL, + `updated_at` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `workspace_id_external_id_deleted_idx` (`workspace_id`,`external_id`,`deleted`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `key_auth` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - `store_encrypted_keys` boolean NOT NULL DEFAULT false, - `default_prefix` varchar(8), - `default_bytes` int DEFAULT 16, - `size_approx` int NOT NULL DEFAULT 0, - `size_last_updated_at` bigint NOT NULL DEFAULT 0, - CONSTRAINT `key_auth_id` PRIMARY KEY(`id`) -); + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + `store_encrypted_keys` tinyint(1) NOT NULL DEFAULT '0', + `default_prefix` varchar(8) DEFAULT NULL, + `default_bytes` int DEFAULT '16', + `size_approx` int NOT NULL DEFAULT '0', + `size_last_updated_at` bigint NOT NULL DEFAULT '0', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `encrypted_keys` ( - `workspace_id` varchar(256) NOT NULL, - `key_id` varchar(256) NOT NULL, - `created_at` bigint NOT NULL DEFAULT 0, - `updated_at` bigint, - `encrypted` varchar(1024) NOT NULL, - `encryption_key_id` varchar(256) NOT NULL, - CONSTRAINT `key_id_idx` UNIQUE(`key_id`) -); +CREATE TABLE `key_migration_errors` ( + `id` varchar(256) NOT NULL, + `migration_id` varchar(256) NOT NULL, + `created_at` bigint NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `message` json NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `keys` ( - `id` varchar(256) NOT NULL, - `key_auth_id` varchar(256) NOT NULL, - `hash` varchar(256) NOT NULL, - `start` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `for_workspace_id` varchar(256), - `name` varchar(256), - `owner_id` varchar(256), - `identity_id` varchar(256), - `meta` text, - `expires` datetime(3), - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - `refill_day` tinyint, - `refill_amount` int, - `last_refill_at` datetime(3), - `enabled` boolean NOT NULL DEFAULT true, - `remaining_requests` int, - `ratelimit_async` boolean, - `ratelimit_limit` int, - `ratelimit_duration` bigint, - `environment` varchar(256), - CONSTRAINT `keys_id` PRIMARY KEY(`id`), - CONSTRAINT `hash_idx` UNIQUE(`hash`) -); - -CREATE TABLE `vercel_bindings` ( - `id` varchar(256) NOT NULL, - `integration_id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `project_id` varchar(256) NOT NULL, - `environment` enum('development','preview','production') NOT NULL, - `resource_id` varchar(256) NOT NULL, - `resource_type` enum('rootKey','apiId') NOT NULL, - `vercel_env_id` varchar(256) NOT NULL, - `last_edited_by` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - CONSTRAINT `vercel_bindings_id` PRIMARY KEY(`id`), - CONSTRAINT `project_environment_resource_type_idx` UNIQUE(`project_id`,`environment`,`resource_type`) -); + `id` varchar(256) NOT NULL, + `key_auth_id` varchar(256) NOT NULL, + `hash` varchar(256) NOT NULL, + `start` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `for_workspace_id` varchar(256) DEFAULT NULL, + `name` varchar(256) DEFAULT NULL, + `owner_id` varchar(256) DEFAULT NULL, + `identity_id` varchar(256) DEFAULT NULL, + `meta` text, + `expires` datetime(3) DEFAULT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + `refill_day` tinyint DEFAULT NULL, + `refill_amount` int DEFAULT NULL, + `last_refill_at` datetime(3) DEFAULT NULL, + `enabled` tinyint(1) NOT NULL DEFAULT '1', + `remaining_requests` int DEFAULT NULL, + `ratelimit_async` tinyint(1) DEFAULT NULL, + `ratelimit_limit` int DEFAULT NULL, + `ratelimit_duration` bigint DEFAULT NULL, + `environment` varchar(256) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `hash_idx` (`hash`), + KEY `key_auth_id_deleted_at_idx` (`key_auth_id`,`deleted_at_m`), + KEY `idx_keys_on_for_workspace_id` (`for_workspace_id`), + KEY `owner_id_idx` (`owner_id`), + KEY `identity_id_idx` (`identity_id`), + KEY `deleted_at_idx` (`deleted_at_m`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `vercel_integrations` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `team_id` varchar(256), - `access_token` varchar(256) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - CONSTRAINT `vercel_integrations_id` PRIMARY KEY(`id`) -); +CREATE TABLE `keys_permissions` ( + `temp_id` bigint NOT NULL AUTO_INCREMENT, + `key_id` varchar(256) NOT NULL, + `permission_id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`key_id`,`permission_id`,`workspace_id`), + UNIQUE KEY `keys_permissions_temp_id_unique` (`temp_id`), + UNIQUE KEY `key_id_permission_id_idx` (`key_id`,`permission_id`) +) ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `ratelimit_namespaces` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `name` varchar(512) NOT NULL, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - CONSTRAINT `ratelimit_namespaces_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_name_per_workspace_idx` UNIQUE(`name`,`workspace_id`) -); +CREATE TABLE `keys_roles` ( + `key_id` varchar(256) NOT NULL, + `role_id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`role_id`,`key_id`,`workspace_id`), + UNIQUE KEY `unique_key_id_role_id` (`key_id`,`role_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `ratelimit_overrides` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `namespace_id` varchar(256) NOT NULL, - `identifier` varchar(512) NOT NULL, - `limit` int NOT NULL, - `duration` int NOT NULL, - `async` boolean, - `sharding` enum('edge'), - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - CONSTRAINT `ratelimit_overrides_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_identifier_per_namespace_idx` UNIQUE(`identifier`,`namespace_id`) -); +CREATE TABLE `permissions` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `name` varchar(512) NOT NULL, + `slug` varchar(128) NOT NULL, + `description` varchar(512) DEFAULT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `unique_name_per_workspace_idx` (`name`,`workspace_id`), + UNIQUE KEY `unique_slug_per_workspace_idx` (`slug`,`workspace_id`), + KEY `workspace_id_idx` (`workspace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `workspaces` ( - `id` varchar(256) NOT NULL, - `org_id` varchar(256) NOT NULL, - `name` varchar(256) NOT NULL, - `plan` enum('free','pro','enterprise') DEFAULT 'free', - `tier` varchar(256) DEFAULT 'Free', - `stripe_customer_id` varchar(256), - `stripe_subscription_id` varchar(256), - `beta_features` json NOT NULL, - `features` json NOT NULL, - `subscriptions` json, - `enabled` boolean NOT NULL DEFAULT true, - `delete_protection` boolean DEFAULT false, - `created_at_m` bigint NOT NULL DEFAULT 0, - `updated_at_m` bigint, - `deleted_at_m` bigint, - CONSTRAINT `workspaces_id` PRIMARY KEY(`id`), - CONSTRAINT `workspaces_org_id_unique` UNIQUE(`org_id`) -); +CREATE TABLE `quota` ( + `workspace_id` varchar(256) NOT NULL, + `requests_per_month` bigint NOT NULL DEFAULT '0', + `logs_retention_days` int NOT NULL DEFAULT '0', + `audit_logs_retention_days` int NOT NULL DEFAULT '0', + `team` tinyint(1) NOT NULL DEFAULT '0', + PRIMARY KEY (`workspace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `key_migration_errors` ( - `id` varchar(256) NOT NULL, - `migration_id` varchar(256) NOT NULL, - `created_at` bigint NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `message` json NOT NULL, - CONSTRAINT `key_migration_errors_id` PRIMARY KEY(`id`) -); +CREATE TABLE `ratelimit_namespaces` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `name` varchar(512) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `unique_name_per_workspace_idx` (`name`,`workspace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `identities` ( - `id` varchar(256) NOT NULL, - `external_id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `environment` varchar(256) NOT NULL DEFAULT 'default', - `meta` json, - `deleted` boolean NOT NULL DEFAULT false, - `created_at` bigint NOT NULL, - `updated_at` bigint, - CONSTRAINT `identities_id` PRIMARY KEY(`id`), - CONSTRAINT `workspace_id_external_id_deleted_idx` UNIQUE(`workspace_id`,`external_id`,`deleted`) -); +CREATE TABLE `ratelimit_overrides` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `namespace_id` varchar(256) NOT NULL, + `identifier` varchar(512) NOT NULL, + `limit` int NOT NULL, + `duration` int NOT NULL, + `async` tinyint(1) DEFAULT NULL, + `sharding` enum('edge') DEFAULT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `unique_identifier_per_namespace_idx` (`identifier`,`namespace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `ratelimits` ( - `id` varchar(256) NOT NULL, - `name` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `created_at` bigint NOT NULL, - `updated_at` bigint, - `key_id` varchar(256), - `identity_id` varchar(256), - `limit` int NOT NULL, - `duration` bigint NOT NULL, - CONSTRAINT `ratelimits_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_name_idx` UNIQUE(`name`,`key_id`,`identity_id`) -); + `id` varchar(256) NOT NULL, + `name` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `created_at` bigint NOT NULL, + `updated_at` bigint DEFAULT NULL, + `key_id` varchar(256) DEFAULT NULL, + `identity_id` varchar(256) DEFAULT NULL, + `limit` int NOT NULL, + `duration` bigint NOT NULL, + `auto_apply` tinyint(1) NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `unique_name_per_key_idx` (`name`,`key_id`), + UNIQUE KEY `unique_name_per_identity_idx` (`name`,`identity_id`), + KEY `name_idx` (`name`), + KEY `identity_id_idx` (`identity_id`), + KEY `key_id_idx` (`key_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `quota` ( - `workspace_id` varchar(256) NOT NULL, - `requests_per_month` bigint NOT NULL DEFAULT 0, - `logs_retention_days` int NOT NULL DEFAULT 0, - `audit_logs_retention_days` int NOT NULL DEFAULT 0, - `team` boolean NOT NULL DEFAULT false, - CONSTRAINT `quota_workspace_id` PRIMARY KEY(`workspace_id`) -); +CREATE TABLE `roles` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `name` varchar(512) NOT NULL, + `description` varchar(512) DEFAULT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `unique_name_per_workspace_idx` (`name`,`workspace_id`), + KEY `workspace_id_idx` (`workspace_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `audit_log` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `bucket` varchar(256) NOT NULL DEFAULT 'unkey_mutations', - `bucket_id` varchar(256) NOT NULL, - `event` varchar(256) NOT NULL, - `time` bigint NOT NULL, - `display` varchar(256) NOT NULL, - `remote_ip` varchar(256), - `user_agent` varchar(256), - `actor_type` varchar(256) NOT NULL, - `actor_id` varchar(256) NOT NULL, - `actor_name` varchar(256), - `actor_meta` json, - `created_at` bigint NOT NULL, - `updated_at` bigint, - CONSTRAINT `audit_log_id` PRIMARY KEY(`id`) -); +CREATE TABLE `roles_permissions` ( + `role_id` varchar(256) NOT NULL, + `permission_id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`role_id`,`permission_id`,`workspace_id`), + UNIQUE KEY `unique_tuple_permission_id_role_id` (`permission_id`,`role_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `audit_log_bucket` ( - `id` varchar(256) NOT NULL, - `workspace_id` varchar(256) NOT NULL, - `name` varchar(256) NOT NULL, - `retention_days` int, - `created_at` bigint NOT NULL, - `updated_at` bigint, - `delete_protection` boolean DEFAULT false, - CONSTRAINT `audit_log_bucket_id` PRIMARY KEY(`id`), - CONSTRAINT `unique_name_per_workspace_idx` UNIQUE(`workspace_id`,`name`) -); +CREATE TABLE `vercel_bindings` ( + `id` varchar(256) NOT NULL, + `integration_id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `project_id` varchar(256) NOT NULL, + `environment` enum('development','preview','production') NOT NULL, + `resource_id` varchar(256) NOT NULL, + `resource_type` enum('rootKey','apiId') NOT NULL, + `vercel_env_id` varchar(256) NOT NULL, + `last_edited_by` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `project_environment_resource_type_idx` (`project_id`,`environment`,`resource_type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE TABLE `audit_log_target` ( - `workspace_id` varchar(256) NOT NULL, - `bucket_id` varchar(256) NOT NULL, - `bucket` varchar(256) NOT NULL DEFAULT 'unkey_mutations', - `audit_log_id` varchar(256) NOT NULL, - `display_name` varchar(256) NOT NULL, - `type` varchar(256) NOT NULL, - `id` varchar(256) NOT NULL, - `name` varchar(256), - `meta` json, - `created_at` bigint NOT NULL, - `updated_at` bigint, - CONSTRAINT `audit_log_target_audit_log_id_id_pk` PRIMARY KEY(`audit_log_id`,`id`) -); +CREATE TABLE `vercel_integrations` ( + `id` varchar(256) NOT NULL, + `workspace_id` varchar(256) NOT NULL, + `team_id` varchar(256) DEFAULT NULL, + `access_token` varchar(256) NOT NULL, + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -CREATE INDEX `workspace_id_idx` ON `apis` (`workspace_id`); -CREATE INDEX `workspace_id_idx` ON `permissions` (`workspace_id`); -CREATE INDEX `workspace_id_idx` ON `roles` (`workspace_id`); -CREATE INDEX `key_auth_id_deleted_at_idx` ON `keys` (`key_auth_id`,`deleted_at_m`); -CREATE INDEX `idx_keys_on_for_workspace_id` ON `keys` (`for_workspace_id`); -CREATE INDEX `owner_id_idx` ON `keys` (`owner_id`); -CREATE INDEX `identity_id_idx` ON `keys` (`identity_id`); -CREATE INDEX `deleted_at_idx` ON `keys` (`deleted_at_m`); -CREATE INDEX `name_idx` ON `ratelimits` (`name`); -CREATE INDEX `identity_id_idx` ON `ratelimits` (`identity_id`); -CREATE INDEX `key_id_idx` ON `ratelimits` (`key_id`); -CREATE INDEX `workspace_id_idx` ON `audit_log` (`workspace_id`); -CREATE INDEX `bucket_id_idx` ON `audit_log` (`bucket_id`); -CREATE INDEX `bucket_idx` ON `audit_log` (`bucket`); -CREATE INDEX `event_idx` ON `audit_log` (`event`); -CREATE INDEX `actor_id_idx` ON `audit_log` (`actor_id`); -CREATE INDEX `time_idx` ON `audit_log` (`time`); -CREATE INDEX `bucket` ON `audit_log_target` (`bucket`); -CREATE INDEX `audit_log_id` ON `audit_log_target` (`audit_log_id`); -CREATE INDEX `id_idx` ON `audit_log_target` (`id`); +CREATE TABLE `workspaces` ( + `id` varchar(256) NOT NULL, + `org_id` varchar(256) NOT NULL, + `name` varchar(256) NOT NULL, + `plan` enum('free','pro','enterprise') DEFAULT 'free', + `tier` varchar(256) DEFAULT 'Free', + `stripe_customer_id` varchar(256) DEFAULT NULL, + `stripe_subscription_id` varchar(256) DEFAULT NULL, + `beta_features` json NOT NULL, + `features` json NOT NULL, + `subscriptions` json DEFAULT NULL, + `enabled` tinyint(1) NOT NULL DEFAULT '1', + `delete_protection` tinyint(1) DEFAULT '0', + `created_at_m` bigint NOT NULL DEFAULT '0', + `updated_at_m` bigint DEFAULT NULL, + `deleted_at_m` bigint DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `workspaces_org_id_unique` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/go/pkg/testutil/http.go b/go/pkg/testutil/http.go index 7f74c5a5d2..7a14e3f116 100644 --- a/go/pkg/testutil/http.go +++ b/go/pkg/testutil/http.go @@ -199,6 +199,7 @@ func (h *Harness) Register(route zen.Route, middleware ...zen.Middleware) { func (h *Harness) CreateRootKey(workspaceID string, permissions ...string) string { return h.seeder.CreateRootKey(context.Background(), workspaceID, permissions...) } + func (h *Harness) CreateWorkspace() db.Workspace { return h.seeder.CreateWorkspace(context.Background()) } diff --git a/go/pkg/testutil/seed/seed.go b/go/pkg/testutil/seed/seed.go index dba1db3808..dd7df7df72 100644 --- a/go/pkg/testutil/seed/seed.go +++ b/go/pkg/testutil/seed/seed.go @@ -99,6 +99,8 @@ func (s *Seeder) CreateRootKey(ctx context.Context, workspaceID string, permissi Meta: sql.NullString{String: "", Valid: false}, Expires: sql.NullTime{Time: time.Time{}, Valid: false}, RemainingRequests: sql.NullInt32{Int32: 0, Valid: false}, + RefillDay: sql.NullInt16{Int16: 0, Valid: false}, + RefillAmount: sql.NullInt32{Int32: 0, Valid: false}, RatelimitAsync: sql.NullBool{Bool: false, Valid: false}, RatelimitLimit: sql.NullInt32{Int32: 0, Valid: false}, RatelimitDuration: sql.NullInt64{Int64: 0, Valid: false}, @@ -122,8 +124,7 @@ func (s *Seeder) CreateRootKey(ctx context.Context, workspaceID string, permissi mysqlErr := &mysql.MySQLError{} // nolint:exhaustruct if errors.As(err, &mysqlErr) { - // Error 1062 (23000): Duplicate entry - require.Equal(s.t, uint16(1062), mysqlErr.Number, "Unexpected MySQL error number, got %d, expected %d", mysqlErr.Number, uint16(1062)) + require.True(s.t, db.IsDuplicateKeyError(err), "Expected duplicate key error, got MySQL error number %d", mysqlErr.Number) existing, findErr := db.Query.FindPermissionByNameAndWorkspaceID(ctx, s.DB.RO(), db.FindPermissionByNameAndWorkspaceIDParams{ WorkspaceID: s.Resources.RootWorkspace.ID, Name: permission,