diff --git a/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/copy-key-space-id.tsx b/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/copy-key-space-id.tsx new file mode 100644 index 0000000000..83ef26c3fc --- /dev/null +++ b/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/copy-key-space-id.tsx @@ -0,0 +1,25 @@ +import { SettingCard } from "@unkey/ui"; +import { CopyButton } from "@unkey/ui"; + +export const CopyKeySpaceId = ({ keySpaceId }: { keySpaceId: string }) => { + return ( + Identifier for the underlying keyspace.} + border="bottom" + contentWidth="w-full lg:w-[420px] justify-end" + > + {/* TODO: make this a Code component in UI for CopyKeys with optional hidden button like in Code.*/} +
+
+
{keySpaceId}
+ +
+
+
+ ); +}; diff --git a/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/settings-client.tsx b/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/settings-client.tsx index 38cf74d965..5c03e39f3d 100644 --- a/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/settings-client.tsx +++ b/apps/dashboard/app/(app)/[workspaceSlug]/apis/[apiId]/settings/components/settings-client.tsx @@ -2,6 +2,7 @@ import { trpc } from "@/lib/trpc/client"; import { CopyApiId } from "./copy-api-id"; +import { CopyKeySpaceId } from "./copy-key-space-id"; import { DefaultBytes } from "./default-bytes"; import { DefaultPrefix } from "./default-prefix"; import { DeleteApi } from "./delete-api"; @@ -62,6 +63,7 @@ export const SettingsClient = ({ apiId }: { apiId: string }) => {
+
diff --git a/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/[keyId]/permissions/permissions.ts b/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/[keyId]/permissions/permissions.ts index 25e997738b..df05eee9dc 100644 --- a/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/[keyId]/permissions/permissions.ts +++ b/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/[keyId]/permissions/permissions.ts @@ -25,6 +25,10 @@ export const workspacePermissions = { description: "Delete apis in this workspace.", permission: "api.*.delete_api", }, + read_analytics: { + description: "Query analytics data for any API in this workspace using SQL.", + permission: "api.*.read_analytics", + }, }, Keys: { verify_key: { @@ -170,6 +174,10 @@ export function apiPermissions(apiId: string): { description: "Update this API.", permission: `api.${apiId}.update_api`, }, + read_analytics: { + description: "Query analytics data for this API using SQL.", + permission: `api.${apiId}.read_analytics`, + }, }, Keys: { verify_key: { diff --git a/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/components/root-key/permissions.ts b/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/components/root-key/permissions.ts index 25e997738b..9af32c1652 100644 --- a/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/components/root-key/permissions.ts +++ b/apps/dashboard/app/(app)/[workspaceSlug]/settings/root-keys/components/root-key/permissions.ts @@ -25,6 +25,10 @@ export const workspacePermissions = { description: "Delete apis in this workspace.", permission: "api.*.delete_api", }, + read_analytics: { + description: "Query analytics data for all API's in this workspace using SQL.", + permission: "api.*.read_analytics", + }, }, Keys: { verify_key: { @@ -170,6 +174,10 @@ export function apiPermissions(apiId: string): { description: "Update this API.", permission: `api.${apiId}.update_api`, }, + read_analytics: { + description: "Query analytics data for this API using SQL.", + permission: `api.${apiId}.read_analytics`, + }, }, Keys: { verify_key: { diff --git a/apps/docs/analytics/getting-started.mdx b/apps/docs/analytics/getting-started.mdx new file mode 100644 index 0000000000..0dd6d95baf --- /dev/null +++ b/apps/docs/analytics/getting-started.mdx @@ -0,0 +1,171 @@ +--- +title: Getting Started +description: "Request access and run your first analytics query" +--- + +## Request Access + + + **Analytics is currently in private beta and available by request only.** + + +To get started: + +1. **Find your workspace ID** in the Unkey dashboard settings +2. **Email us** at [support@unkey.dev](mailto:support@unkey.dev) with: + - Your workspace ID + - Your use case (billing, dashboards, reporting, etc.) + - Expected query volume + +We'll enable analytics for your workspace and send you confirmation. + +## Authentication + +Analytics queries require a root key with analytics permissions. Create one in your dashboard: + +1. Go to **Settings** → **Root Keys** +2. Click **Create New Root Key** +3. Select permissions: `api.*.read_analytics` OR `api..read_analytics` +4. Copy and securely store your root key + + + Root keys have powerful permissions. Store them securely and never commit them + to version control. + + +## Your First Query + +Once you have access, execute your first analytics query using the `/v2/analytics.getVerifications` endpoint. + +### Count Total Verifications + +```sql +SELECT COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + +Execute this query with curl: + +```bash +curl -X POST https://api.unkey.com/v2/analytics.getVerifications \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE time >= now() - INTERVAL 7 DAY" + }' +``` + +### Break Down by Outcome + +```sql +SELECT + outcome, + COUNT(*) as count +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 24 HOUR +GROUP BY outcome +ORDER BY count DESC +``` + +Execute this query with curl: + +```bash +curl -X POST https://api.unkey.com/v2/analytics.getVerifications \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT outcome, COUNT(*) as count FROM key_verifications_v1 WHERE time >= now() - INTERVAL 24 HOUR GROUP BY outcome ORDER BY count DESC" + }' +``` + +### Top Users by Usage + +```sql +SELECT + external_id, + SUM(count) as verifications +FROM key_verifications_per_day_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY external_id +ORDER BY verifications DESC +LIMIT 10 +``` + +Execute this query with curl: + +```bash +curl -X POST https://api.unkey.com/v2/analytics.getVerifications \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT external_id, SUM(count) as verifications FROM key_verifications_per_day_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY external_id ORDER BY verifications DESC LIMIT 10" + }' +``` + + + **Performance tip:** For longer time ranges, use pre-aggregated tables instead of the raw table: + - `key_verifications_per_minute_v1` - For queries spanning hours + - `key_verifications_per_hour_v1` - For queries spanning days + - `key_verifications_per_day_v1` - For queries spanning weeks/months + - `key_verifications_per_month_v1` - For queries spanning years + +Use `SUM(count)` instead of `COUNT(*)` with aggregated tables. They scan far fewer rows and are much faster. + + + + + Check out the [Query Examples](/analytics/query-examples) page for 30+ + ready-to-use queries covering billing, monitoring, and analytics use cases. + + +## Understanding the Response + +Analytics queries return data as an array of objects: + +```json +{ + "meta": { + "requestId": "req_xxx" + }, + "data": [ + { "outcome": "VALID", "count": 1234 }, + { "outcome": "RATE_LIMITED", "count": 56 }, + { "outcome": "USAGE_EXCEEDED", "count": 12 } + ] +} +``` + +Each object in the `data` array contains fields from your SELECT clause. The field names match the column names or aliases you specified in your query. + +## Filtering by API or User + +You can filter queries to specific APIs or users. Use `key_space_id` to filter by API (find this identifier in your API settings) and `external_id` to filter by user. These fields support standard SQL operators: `=`, `!=`, `IN`, `NOT IN`, `<`, `>`, etc. + + + Queries are subject to resource limits (execution time, memory, result size, + and quota). See [Query Restrictions](/analytics/query-restrictions) for + complete details on limits and error codes. + + +## Next Steps + + + + Explore common SQL patterns for analytics and billing + + + Browse available tables, columns, and data types + + + View limits, quotas, and permissions + + diff --git a/apps/docs/analytics/overview.mdx b/apps/docs/analytics/overview.mdx index 77d2038ba9..2d5ba1ed10 100644 --- a/apps/docs/analytics/overview.mdx +++ b/apps/docs/analytics/overview.mdx @@ -1,188 +1,76 @@ --- title: Overview -description: "Unkey tracks everything for you" +description: "Query your verification data with SQL" --- - -Analytics endpoints are currently only available in our v1 API. We're working on bringing these to v2 - stay tuned for updates! - + + **Analytics is currently in private beta and available by request only.** + See [Getting Started](/analytics/getting-started) for access instructions. + -Consumption based billing for APIs is getting more and more popular, but it's tedious to build in house. For low frequency events, it's quite possible to emit usage events directly to Stripe or similar, but this becomes very noisy quickly. Furthermore if you want to build end-user facing or internal analytics, you need to be able to query the events from Stripe, which often does not provide the granularity required. +## What is Unkey Analytics? -Most teams end up without end-user facing analytics, or build their own system to store and query usage metrics. +Unkey Analytics provides a powerful SQL interface to query your API key verification data. Instead of building your own analytics pipeline, you can leverage Unkey's built-in data warehouse to: -Since Unkey already stores and aggregates verification events by time, outcome and identity, we can offer this data via an API. +- **Build custom dashboards** for internal teams or end-users +- **Power usage-based billing** by querying verification counts per user/organization +- **Generate reports** on API usage patterns, top users, and performance metrics +- **Monitor and alert** on verification outcomes, rate limits, and errors +## How it Works -## Available data +Every key verification request is automatically stored and aggregated across multiple time-series tables: -Unkey stores an event for every single verification, the relevent fields are described below: - -| Data | Type | Explanation | -|----------------|---------------|----------------------------------------------------------------------------------------| -| `request_id` | String | Each request has a unique id, making it possible to retrieve later. | -| `time` | Int64 | A unix milli timestamp. | -| `key_space_id` | String | Each workspace may have multiple key spaces. Each API you create has its own keyspace. | -| `key_id` | String | The individual key being verified. | -| `outcome` | String | The outcome of the verification. `VALID`, `RATE_LIMITED` etc. | -| `identity_id` | String | The identity connected to this key. | -| `tags` | Array(String) | Arbitrary tags you may add during the verification to filter later. | - -We can return this data aggregated by `hour`, `day`, `month`, `tag`, `tags`, `identity`, `key` and `outcome`. -As well as filter by `identity_id`, `key_space_id`, `key_id`, `tags`, `outcome`, `start` and `end` time. - -## Example - -For an internal dashboard you want to find the top 5 users of a specific endpoint. In order to let Unkey know about the endpoint, you specify it as a tag when verifying keys: - -```bash Tagging a verification {6} -curl -XPOST 'https://api.unkey.dev/v1/keys.verifyKey' \ - -H 'Content-Type: application/json' \ - -d '{ - "key": "", - "apiId": "api_", - "tags": [ "path=/my/endpoint" ], - }' +```mermaid +graph LR + A[Verify Key Request] --> B[Raw Events Table] + B --> C[Minute Aggregates] + C --> D[Hour Aggregates] + D --> E[Day Aggregates] + E --> F[Month Aggregates] ``` - -You can now query `api.unkey.dev/v1/analytics.getVerifications` via query parameters. -While we can't provide raw SQL access, we wanted to stay as close to SQL semantics as possible, so you didn't need to learn a new concept and to keep the translation layer simple. - -| Name | Value | Explanation | -|----------------|----------------------------------|--------------------------------------------------------------------------------| -| `start` | 1733749385000 | A unix milli timestamp to limit the query to a specific time frame. | -| `end` | 1736431397000 | A unix milli timestamp to limit the query to a specific time frame. | -| `apiId` | api_262b3iR7gkmP7aUyZ24uihcijsCe | The API ID to filter keys. | -| `groupBy` | identity | We're not interested in individual keys, but the user/org. | -| `orderBy` | total | We want to see the most active users, by how many verifications they're doing. | -| `order` | desc | We're ordering from most active to least active user. | -| `limit` | 5 | Only return the top 5. | - -Below is a curl command putting everythign together: - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1733749385000&end=1736431397000&apiId=api_262b3iR7gkmP7aUyZ24uihcijsCe&groupBy=identity&orderBy=total&order=desc&limit=5' \ - -H 'Content-Type: application/json' \ - -H 'Authorization: Bearer ' -``` - -You'll receive a json response with a breakdown of each outcome, per identity ordered by `total`. - - - -```json First Row -[ - { - "valid": 186, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 184, - "unauthorized": 0, - "disabled": 182, - "insufficientPermissions": 0, - "expired": 0, - "total": 552, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuAgat7xuVNGpK6AuPQ2Lbk11", - "externalId": "user_2rNBR4YXxKwzM8bzVrCR5q6dFlc" - } - }, - ... -] -``` - -```json Full Response -[ - { - "valid": 186, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 184, - "unauthorized": 0, - "disabled": 182, - "insufficientPermissions": 0, - "expired": 0, - "total": 552, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuAgat7xuVNGpK6AuPQ2Lbk11", - "externalId": "user_2rNBR4YXxKwzM8bzVrCR5q6dFlc" - } - }, - { - "valid": 190, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 161, - "unauthorized": 0, - "disabled": 200, - "insufficientPermissions": 0, - "expired": 0, - "total": 551, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuAiGJ3L3TUNKA6gp5eLeuyj7", - "externalId": "user_2rLz6cM63ZQ2v3IU0mryKbHetjK" - } - }, - { - "valid": 197, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 154, - "unauthorized": 0, - "disabled": 200, - "insufficientPermissions": 0, - "expired": 0, - "total": 551, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuAwJVE4Hdet3dyEpYreP8ob7", - "externalId": "user_2rLwFchrbyIDb4LUfFp4CpTG0L3" - } - }, - { - "valid": 191, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 184, - "unauthorized": 0, - "disabled": 171, - "insufficientPermissions": 0, - "expired": 0, - "total": 546, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuB23PVchmbkt9mMjjcpvLM8N", - "externalId": "user_2rLwCGvQKtnfnemH8HTL4cxWBFo" - } - }, - { - "valid": 207, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 171, - "unauthorized": 0, - "disabled": 162, - "insufficientPermissions": 0, - "expired": 0, - "total": 540, - "apiId": "api_262b3iR7gkmP7aUyZ24uihcijsCe", - "identity": { - "id": "test_2ipPuApEvEAXJo9UParPL6inHLLJ", - "externalId": "user_2rLDPPVfeNB2hn1ARMh2808CdwG" - } - } -] - -``` - +You can query these tables using standard SQL to: +- Aggregate verification counts by time period +- Group by API, user, or outcome +- Filter by region, tags, or custom criteria +- Calculate metrics for billing or monitoring + +## Available Data + +Every verification event contains: + +| Field | Type | Description | +| --------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `request_id` | String | Unique identifier for each request | +| `time` | Int64 | Unix millisecond timestamp | +| `workspace_id` | String | Your workspace identifier (automatically filtered) | +| `key_space_id` | String | Your KeySpace identifier (e.g., `ks_1234`). Find this in your API settings. | +| `external_id` | String | Your user's identifier (e.g., `user_abc`) | +| `key_id` | String | Individual key identifier | +| `outcome` | String | Verification result: `VALID`, `RATE_LIMITED`, `INVALID`, `EXPIRED`, `DISABLED`, `INSUFFICIENT_PERMISSIONS`, `FORBIDDEN`, `USAGE_EXCEEDED` | +| `region` | String | Unkey region that handled the verification | +| `tags` | Array(String) | Custom tags added during verification | +| `spent_credits` | Int64 | Number of credits spent on this verification (0 if no credits were spent) | + +## Next Steps + + + + Learn how to request access and execute your first query + + + Explore common SQL patterns for analytics and billing + + + Browse available tables, columns, and data types + + + View limits, quotas, and permissions + + diff --git a/apps/docs/analytics/query-examples.mdx b/apps/docs/analytics/query-examples.mdx new file mode 100644 index 0000000000..934ab438d4 --- /dev/null +++ b/apps/docs/analytics/query-examples.mdx @@ -0,0 +1,762 @@ +--- +title: Query Examples +description: "Common SQL patterns for analytics and billing" +--- + +This guide provides SQL query examples for common analytics scenarios covering all the use cases from the legacy API and more. All examples use ClickHouse SQL syntax and work with the `/v2/analytics.getVerifications` endpoint. + +## Using Queries in API Requests + +When making API requests, you need to format the SQL query as a JSON string on a single line. Here's how: + +**Readable format (for documentation):** + +```sql +SELECT COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + +**JSON format (for API requests):** + +```json +{ + "query": "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE time >= now() - INTERVAL 7 DAY" +} +``` + + + Each example below shows both the readable multi-line SQL and the single-line + JSON format you can copy directly into your API requests. + + +## Usage Analytics + + + + + +```sql +SELECT COUNT(*) as total_verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total_verifications FROM key_verifications_v1 WHERE time >= now() - INTERVAL 7 DAY" +} +``` + + + + + +```sql +SELECT + outcome, + COUNT(*) as count +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY outcome +ORDER BY count DESC +``` + +**JSON format:** + +```json +{ + "query": "SELECT outcome, COUNT(*) as count FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY outcome ORDER BY count DESC" +} +``` + + + + + +```sql +SELECT + time as date, + SUM(count) as verifications +FROM key_verifications_per_day_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY date +ORDER BY date +``` + +**JSON format:** + +```json +{ + "query": "SELECT time as date, SUM(count) as verifications FROM key_verifications_per_day_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY date ORDER BY date" +} +``` + + + + + +```sql +SELECT + time as hour, + outcome, + SUM(count) as verifications +FROM key_verifications_per_hour_v1 +WHERE time >= toStartOfDay(now()) +GROUP BY time, outcome +ORDER BY time, outcome +``` + +**JSON format:** + +```json +{ + "query": "SELECT time as hour, outcome, SUM(count) as verifications FROM key_verifications_per_hour_v1 WHERE time >= toStartOfDay(now()) GROUP BY time, outcome ORDER BY time, outcome" +} +``` + + + + + +## Usage by User + + + + + +```sql +SELECT + external_id, + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful, + countIf(outcome = 'RATE_LIMITED') as rate_limited +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY + AND external_id != '' + GROUP BY external_id +ORDER BY total_verifications DESC +LIMIT 100 +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful, countIf(outcome = 'RATE_LIMITED') as rate_limited FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY AND external_id != '' GROUP BY external_id ORDER BY total_verifications DESC LIMIT 100" +} +``` + + + + + +```sql +SELECT + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful, + countIf(outcome = 'RATE_LIMITED') as rate_limited +FROM key_verifications_v1 +WHERE external_id = 'user_123' + AND time >= now() - INTERVAL 30 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful, countIf(outcome = 'RATE_LIMITED') as rate_limited FROM key_verifications_v1 WHERE external_id = 'user_123' AND time >= now() - INTERVAL 30 DAY" +} +``` + + + + + +```sql +SELECT + external_id, + COUNT(*) as total_verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY + AND external_id != '' + GROUP BY external_id +ORDER BY total_verifications DESC +LIMIT 10 +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, COUNT(*) as total_verifications FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY AND external_id != '' GROUP BY external_id ORDER BY total_verifications DESC LIMIT 10" +} +``` + + + + + +```sql +SELECT + external_id, + toDate(time) as date, + COUNT(*) as verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY external_id, date +ORDER BY external_id, date +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, toDate(time) as date, COUNT(*) as verifications FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY external_id, date ORDER BY external_id, date" +} +``` + + + + + +## API Analytics + + + + + +```sql +SELECT + key_space_id, + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY key_space_id +ORDER BY total_verifications DESC +``` + +**JSON format:** + +```json +{ + "query": "SELECT key_space_id, COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY key_space_id ORDER BY total_verifications DESC" +} +``` + + + + + +```sql +SELECT + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful, + countIf(outcome = 'RATE_LIMITED') as rate_limited, + countIf(outcome = 'INVALID') as invalid +FROM key_verifications_v1 +WHERE key_space_id = 'ks_1234' + AND time >= now() - INTERVAL 30 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful, countIf(outcome = 'RATE_LIMITED') as rate_limited, countIf(outcome = 'INVALID') as invalid FROM key_verifications_v1 WHERE key_space_id = 'ks_1234' AND time >= now() - INTERVAL 30 DAY" +} +``` + + + + + +```sql +SELECT + key_space_id, + COUNT(*) as verifications, + round(countIf(outcome = 'VALID') / COUNT(*) * 100, 2) as success_rate +FROM key_verifications_v1 +WHERE key_space_id IN ('ks_1234', 'ks_5678') + AND time >= now() - INTERVAL 7 DAY +GROUP BY key_space_id +``` + +**JSON format:** + +```json +{ + "query": "SELECT key_space_id, COUNT(*) as verifications, round(countIf(outcome = 'VALID') / COUNT(*) * 100, 2) as success_rate FROM key_verifications_v1 WHERE key_space_id IN ('ks_1234', 'ks_5678') AND time >= now() - INTERVAL 7 DAY GROUP BY key_space_id" +} +``` + + + + + +## Key Analytics + + + + + +```sql +SELECT + key_id, + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY key_id +ORDER BY total_verifications DESC +LIMIT 100 +``` + +**JSON format:** + +```json +{ + "query": "SELECT key_id, COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful FROM key_verifications_v1 WHERE time >= now() - INTERVAL 30 DAY GROUP BY key_id ORDER BY total_verifications DESC LIMIT 100" +} +``` + + + + + +```sql +SELECT + COUNT(*) as total_verifications, + countIf(outcome = 'VALID') as successful, + countIf(outcome = 'RATE_LIMITED') as rate_limited +FROM key_verifications_v1 +WHERE key_id = 'key_1234' + AND time >= now() - INTERVAL 30 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total_verifications, countIf(outcome = 'VALID') as successful, countIf(outcome = 'RATE_LIMITED') as rate_limited FROM key_verifications_v1 WHERE key_id = 'key_1234' AND time >= now() - INTERVAL 30 DAY" +} +``` + + + + + +```sql +SELECT + key_id, + COUNT(*) as total_errors, + groupArray(DISTINCT outcome) as error_types +FROM key_verifications_v1 +WHERE outcome != 'VALID' + AND time >= now() - INTERVAL 7 DAY +GROUP BY key_id +ORDER BY total_errors DESC +LIMIT 20 +``` + +**JSON format:** + +```json +{ + "query": "SELECT key_id, COUNT(*) as total_errors, groupArray(DISTINCT outcome) as error_types FROM key_verifications_v1 WHERE outcome != 'VALID' AND time >= now() - INTERVAL 7 DAY GROUP BY key_id ORDER BY total_errors DESC LIMIT 20" +} +``` + + + + + +## Tag-Based Analytics + +Tags allow you to add custom metadata to verification requests for filtering and aggregation. + + + + + +```sql +SELECT COUNT(*) as total +FROM key_verifications_v1 +WHERE has(tags, 'path=/api/v1/users') + AND time >= now() - INTERVAL 7 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE has(tags, 'path=/api/v1/users') AND time >= now() - INTERVAL 7 DAY" +} +``` + + + + + +```sql +SELECT COUNT(*) as total +FROM key_verifications_v1 +WHERE hasAny(tags, ['path=/api/v1/users', 'path=/api/v1/posts']) + AND time >= now() - INTERVAL 7 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE hasAny(tags, ['path=/api/v1/users', 'path=/api/v1/posts']) AND time >= now() - INTERVAL 7 DAY" +} +``` + + + + + +```sql +SELECT COUNT(*) as total +FROM key_verifications_v1 +WHERE hasAll(tags, ['environment=production', 'team=backend']) + AND time >= now() - INTERVAL 7 DAY +``` + +**JSON format:** + +```json +{ + "query": "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE hasAll(tags, ['environment=production', 'team=backend']) AND time >= now() - INTERVAL 7 DAY" +} +``` + + + + + +```sql +SELECT + arrayJoin(tags) as tag, + COUNT(*) as verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY tag +ORDER BY verifications DESC +LIMIT 20 +``` + +**JSON format:** + +```json +{ + "query": "SELECT arrayJoin(tags) as tag, COUNT(*) as verifications FROM key_verifications_v1 WHERE time >= now() - INTERVAL 7 DAY GROUP BY tag ORDER BY verifications DESC LIMIT 20" +} +``` + + + + + +```sql +SELECT + arrayJoin(arrayFilter(x -> startsWith(x, 'path='), tags)) as endpoint, + COUNT(*) as requests +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 24 HOUR +GROUP BY endpoint +ORDER BY requests DESC +``` + +**JSON format:** + +```json +{ + "query": "SELECT arrayJoin(arrayFilter(x -> startsWith(x, 'path='), tags)) as endpoint, COUNT(*) as requests FROM key_verifications_v1 WHERE time >= now() - INTERVAL 24 HOUR GROUP BY endpoint ORDER BY requests DESC" +} +``` + + + + + +## Billing & Usage-Based Pricing + + + + + +```sql +SELECT + external_id, + toStartOfMonth(time) as month, + SUM(spent_credits) as total_credits +FROM key_verifications_v1 +WHERE external_id != '' + AND time >= toStartOfMonth(now()) +GROUP BY external_id, month +ORDER BY total_credits DESC +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, toStartOfMonth(time) as month, SUM(spent_credits) as total_credits FROM key_verifications_v1 WHERE external_id != '' AND time >= toStartOfMonth(now()) GROUP BY external_id, month ORDER BY total_credits DESC" +} +``` + + + + + +```sql +SELECT + external_id, + SUM(spent_credits) as credits_this_period +FROM key_verifications_v1 +WHERE external_id = 'user_123' + AND time >= 1704067200000 -- Start of billing period (Unix millis) + AND time < 1706745600000 -- End of billing period (Unix millis) +GROUP BY external_id +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, SUM(spent_credits) as credits_this_period FROM key_verifications_v1 WHERE external_id = 'user_123' AND time >= 1704067200000 AND time < 1706745600000 GROUP BY external_id" +} +``` + + + + + +```sql +SELECT + external_id, + SUM(spent_credits) as total_credits, + CASE + WHEN total_credits <= 1000 THEN 'free' + WHEN total_credits <= 10000 THEN 'starter' + WHEN total_credits <= 100000 THEN 'pro' + ELSE 'enterprise' + END as tier +FROM key_verifications_v1 +WHERE time >= toStartOfMonth(now()) + AND external_id = 'user_123' +GROUP BY external_id +``` + +**JSON format:** + +```json +{ + "query": "SELECT external_id, SUM(spent_credits) as total_credits, CASE WHEN total_credits <= 1000 THEN 'free' WHEN total_credits <= 10000 THEN 'starter' WHEN total_credits <= 100000 THEN 'pro' ELSE 'enterprise' END as tier FROM key_verifications_v1 WHERE time >= toStartOfMonth(now()) AND external_id = 'user_123' GROUP BY external_id" +} +``` + + + + + +```sql +SELECT + toDate(time) as date, + SUM(spent_credits) as credits_used, + credits_used * 0.001 as estimated_cost -- $0.001 per credit +FROM key_verifications_v1 +WHERE external_id = 'user_123' + AND time >= now() - INTERVAL 30 DAY +GROUP BY date +ORDER BY date +``` + +**JSON format:** + +```json +{ + "query": "SELECT toDate(time) as date, SUM(spent_credits) as credits_used, credits_used * 0.001 as estimated_cost FROM key_verifications_v1 WHERE external_id = 'user_123' AND time >= now() - INTERVAL 30 DAY GROUP BY date ORDER BY date" +} +``` + + + + + +## Advanced Queries + + + + + +```sql +WITH first_seen AS ( + SELECT + external_id, + min(time) as first_verification + FROM key_verifications_v1 + WHERE external_id != '' + GROUP BY external_id +) +SELECT + toDate(kv.time) as date, + countIf(kv.time = fs.first_verification) as new_users, + countIf(kv.time > fs.first_verification) as returning_users +FROM key_verifications_v1 kv +JOIN first_seen fs ON kv.external_id = fs.external_id +WHERE kv.time >= now() - INTERVAL 30 DAY +GROUP BY date +ORDER BY date +``` + +**JSON format:** + +```json +{ + "query": "WITH first_seen AS ( SELECT external_id, min(time) as first_verification FROM key_verifications_v1 WHERE external_id != '' GROUP BY external_id ) SELECT toDate(kv.time) as date, countIf(kv.time = fs.first_verification) as new_users, countIf(kv.time > fs.first_verification) as returning_users FROM key_verifications_v1 kv JOIN first_seen fs ON kv.external_id = fs.external_id WHERE kv.time >= now() - INTERVAL 30 DAY GROUP BY date ORDER BY date" +} +``` + + + + + +```sql +SELECT + date, + verifications, + avg(verifications) OVER ( + ORDER BY date + ROWS BETWEEN 6 PRECEDING AND CURRENT ROW + ) as moving_avg_7d +FROM ( + SELECT + time as date, + SUM(count) as verifications + FROM key_verifications_per_day_v1 + WHERE time >= now() - INTERVAL 60 DAY + GROUP BY date +) +ORDER BY date +``` + +**JSON format:** + +```json +{ + "query": "SELECT date, verifications, avg(verifications) OVER ( ORDER BY date ROWS BETWEEN 6 PRECEDING AND CURRENT ROW ) as moving_avg_7d FROM ( SELECT time as date, SUM(count) as verifications FROM key_verifications_per_day_v1 WHERE time >= now() - INTERVAL 60 DAY GROUP BY date ) ORDER BY date" +} +``` + + + + + +## Using Aggregated Tables + +For better performance on large time ranges, use pre-aggregated tables: + + + + + +```sql +SELECT + time, + SUM(count) as total +FROM key_verifications_per_hour_v1 +WHERE time >= toStartOfHour(now() - INTERVAL 7 DAY) +GROUP BY time +ORDER BY time +``` + +**JSON format:** + +```json +{ + "query": "SELECT time, SUM(count) as total FROM key_verifications_per_hour_v1 WHERE time >= toStartOfHour(now() - INTERVAL 7 DAY) GROUP BY time ORDER BY time" +} +``` + + + + + +```sql +SELECT + time, + SUM(count) as total +FROM key_verifications_per_day_v1 +WHERE time >= toStartOfDay(now() - INTERVAL 30 DAY) +GROUP BY time +ORDER BY time +``` + +**JSON format:** + +```json +{ + "query": "SELECT time, SUM(count) as total FROM key_verifications_per_day_v1 WHERE time >= toStartOfDay(now() - INTERVAL 30 DAY) GROUP BY time ORDER BY time" +} +``` + + + + + +```sql +SELECT + time, + SUM(count) as total +FROM key_verifications_per_month_v1 +WHERE time >= toStartOfMonth(now() - INTERVAL 12 MONTH) +GROUP BY time +ORDER BY time +``` + +**JSON format:** + +```json +{ + "query": "SELECT time, SUM(count) as total FROM key_verifications_per_month_v1 WHERE time >= toStartOfMonth(now() - INTERVAL 12 MONTH) GROUP BY time ORDER BY time" +} +``` + + + + + +## Tips for Efficient Queries + +1. **Always filter by time** - Use indexes by including time filters +2. **Use aggregated tables** - Hourly/daily/monthly tables for longer ranges +3. **Add LIMIT clauses** - Prevent returning too much data +4. **Filter before grouping** - Use WHERE instead of HAVING when possible + +## Next Steps + + + + Browse available tables, columns, and data types + + + View limits, quotas, and permissions + + + Explore ClickHouse SQL functions (external link) + + diff --git a/apps/docs/analytics/query-restrictions.mdx b/apps/docs/analytics/query-restrictions.mdx new file mode 100644 index 0000000000..23bd43e8c8 --- /dev/null +++ b/apps/docs/analytics/query-restrictions.mdx @@ -0,0 +1,167 @@ +--- +title: Query Restrictions +description: "Limits, quotas, and permissions for analytics queries" +--- + +This page explains the restrictions, resource limits, and permissions for analytics queries. + +## Workspace Isolation + +Every query is automatically scoped to your workspace. You can only access your own verification data - it's impossible to view data from other workspaces. + +## Query Restrictions + +### Only SELECT Allowed + +Only `SELECT` queries are permitted. All other SQL statement types return a `query_not_supported` error. + +**Allowed query patterns:** + +- `SELECT` statements +- `WITH` (Common Table Expressions) +- `UNION` +- Subqueries +- Joins +- Aggregations +- Window functions + +**Not allowed:** `INSERT`, `UPDATE`, `DELETE`, `DROP`, `ALTER`, `CREATE`, `TRUNCATE`, `GRANT`, `REVOKE` + +### Table Access Control + +Only explicitly allowed analytics tables are accessible. Any attempt to access tables not on the allow list (including `system.*` or `information_schema.*`) will return an `invalid_table` error. + +### Function Allow List + +Only explicitly approved functions are allowed. Any function not on this list will be rejected with an `invalid_function` error. + +#### Allowed Functions + + + + +`count`, `sum`, `avg`, `min`, `max`, `any`, `groupArray`, `groupUniqArray`, `uniq`, `uniqExact`, `quantile`, `countIf` + + + +`now`, `now64`, `today`, `toDate`, `toDateTime`, `toDateTime64`, `toStartOfDay`, `toStartOfWeek`, `toStartOfMonth`, `toStartOfYear`, `toStartOfHour`, `toStartOfMinute`, `date_trunc`, `formatDateTime`, `fromUnixTimestamp64Milli`, `toUnixTimestamp64Milli`, `toIntervalDay`, `toIntervalWeek`, `toIntervalMonth`, `toIntervalYear`, `toIntervalHour`, `toIntervalMinute`, `toIntervalSecond` + + + +`lower`, `upper`, `substring`, `concat`, `length`, `trim`, `startsWith`, `endsWith` + + + +`round`, `floor`, `ceil`, `abs` + + + +`if`, `case`, `coalesce` + + + +`toString`, `toInt32`, `toInt64`, `toFloat64` + + + +`has`, `hasAny`, `hasAll`, `arrayJoin`, `arrayFilter`, `length` + + + + + + If you need a function that's not listed, please contact us at + [support@unkey.dev](mailto:support@unkey.dev) and we'll review it for + inclusion. + + +## Resource Limits + +To ensure fair usage and prevent abuse, queries are subject to resource limits: + +### Execution Limits + +| Resource | Limit | Purpose | +| ------------------------------- | --------------------- | ----------------------------- | +| Max execution time | 30 seconds | Prevent long-running queries | +| Max execution time (per window) | 1800 seconds (30 min) | Total execution time per hour | +| Max memory usage | 1 GB | Prevent memory exhaustion | +| Max rows to read | 10 million | Limit data scanned | +| Max result rows | 10 million | Limit result set size | + +### Query Quotas + +| Quota | Limit | Window | +| --------------------- | ----- | -------- | +| Queries per workspace | 1000 | Per hour | + + + If you need higher limits for your use case, please contact us at + [support@unkey.dev](mailto:support@unkey.dev). + + +### Error Codes + +When limits are exceeded, you'll receive specific error codes: + +| Error Code | Description | Solution | +| ----------------------------- | --------------------------------- | ---------------------------------------------------------- | +| `query_execution_timeout` | Query took longer than 30 seconds | Add more filters, reduce time range, use aggregated tables | +| `query_memory_limit_exceeded` | Query used more than 1GB memory | Reduce result set size, add LIMIT clause, use aggregation | +| `query_rows_limit_exceeded` | Query scanned more than 10M rows | Add time filters, use aggregated tables (hour/day/month) | +| `query_quota_exceeded` | Exceeded 1000 queries per hour | Wait for quota to reset, optimize query frequency | + +## Authentication + +Analytics queries require a root key with specific permissions: + +### Required Permissions + +You need to grant analytics access for per API or for all APIs: + +**Workspace-level access** (all APIs): + +``` +api.*.read_analytics +``` + +**Per-API access** (specific API): + +``` +api..read_analytics +``` + +Choose workspace-level for broad access or per-API for fine-grained control. + +### Root Key Best Practices + +1. **Use environment variables** - Never hardcode root keys +2. **Rotate keys regularly** - Create new keys and revoke old ones +3. **Limit permissions** - Only grant `read_analytics` permission +4. **Use separate keys** - Different keys for different services +5. **Monitor usage** - Track which keys are making queries + + + See [Getting Started](/analytics/getting-started) for step-by-step instructions on creating an analytics root key. + + +## Next Steps + + + + Create a root key and run your first query + + + Explore common query patterns + + + Browse available tables and columns + + + View all error codes and responses + + diff --git a/apps/docs/analytics/quickstarts.mdx b/apps/docs/analytics/quickstarts.mdx deleted file mode 100644 index 8564411dc4..0000000000 --- a/apps/docs/analytics/quickstarts.mdx +++ /dev/null @@ -1,459 +0,0 @@ ---- -title: Quickstarts -description: Power your own dashboard, reports or usage-based billing ---- - - -Analytics endpoints are currently only available in our v1 API. We're working on bringing these to v2 - stay tuned for updates! - - - - -These scenarios should give you a good starting point to understand what is possible and what you need to do. -They are in no particular order and don't build upon each other. - - -We are using cURL here for demo purposes, but you can use any of our [SDKs](/libraries) for this as well. - - - - Almost all query parameters can be combined to build powerful queries. - - If you run into issues or something doesn't seem possible, please get in touch, so we can figure it out together: support@unkey.dev - - - -Detailed explanations about each individual parameter can be found in the [api-reference](/api-reference/analytics/get_verifications). - - -## User's usage over the past 24h - - -Assuming you have an identity with `externalId=user_123` and an API with `apiId=api_123`. - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1736673687000&end=1736760087000&externalId=user_123&groupBy=hour&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -This will return 24 elements, one per hour over the last 24h. Each element tells you about the outcomes of verifications in that interval. - -```json -[ - { - "time": 1736672400000, - "valid": 15125, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 1225, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 16350 - }, - { - "time": 1736676000000, - "valid": 765, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 765 - }, - ... 21 elements omited - { - "time": 1736755200000, - "valid": 20016, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 51, - "total": 20067 - } -] -``` - -## Daily usage of a user per key in the last month - -Assuming you have an identity with `externalId=user_123` and an API with `apiId=api_123`. - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&externalId=user_123&groupBy=key&groupBy=day&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -This returns 1 element per active key per day and includes the keyId. - -```json -[ - // ... - { - "time": 1736726400000, - "valid": 13, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 10, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 23, - "keyId": "key_2zeYsLbpULnEUsvYeFGMeJzACp4j" - }, - { - "time": 1736726400000, - "valid": 5, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 6, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 11, - "keyId": "key_2zeViCGkJpu5zQ8G12jcBoXWy4KH" - } -] -``` - -## Total usage per month for an identity - - -Assuming you have an identity with `externalId=user_123` and an API with `apiId=api_123`. -You should set your `start` to the beginning of the month and `end` to now or end of the month. - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&externalId=user_123&groupBy=month&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -This returns one element per month. - -```json -[ - { - "time": 1733011200000, - "valid": 1356136098, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 925255, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 1357061353 - } -] -``` - - -## Showing usage in the current billing period - -If you want to show a guage or similar to your user about their consumption in the current billing period. - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&externalId=user_123&groupBy=day&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -This will return one element per day, which you can either display in a chart, or sum up to have a total value. - - -```json -[ - // ... - { - "time": 1736553600000, - "valid": 98267, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 6816, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 105083 - }, - { - "time": 1736640000000, - "valid": 20125, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 2525, - "unauthorized": 0, - "disabled": 6261, - "insufficientPermissions": 0, - "expired": 0, - "total": 28911 - } -] -``` - -## Internal dashboard showing top 10 users by API usage over the past 30 days - - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&orderBy=total&order=desc&limit=10&groupBy=identity&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -Returns 10 elements, ordered from most total verifications to least. Each element includes the `identityId` as well as the `externalId` for your reference. - -```json -[ - { - "identity": { "id": "id_123", "externalId": "user_123"}, - "valid": 54, - "notFound": 0, - "forbidden": 3, - "usageExceeded": 6, - "rateLimited": 10, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 73 - }, - { - "identity": { "id": "id_456", "externalId": "user_6dg"}, - "valid": 24, - "notFound": 0, - "forbidden": 1, - "usageExceeded": 32, - "rateLimited": 10, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 67 - }, - ... -] - -``` - - -## Filter by tags - - -Find out how many verifications were done, where the tag `myTag` was specified. -You can combine this with other parameters to group by days for example. - - - You can provide multiple tags by providing them as separate query paramters: `?tag=myTag&tag=myOthertag`. - Filtering multiple tags is a logical `OR`. The result includes all verifications where at least one of the filtered tags was specified. - - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&tag=myTag&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -This returns 1 element, a sum of all verifications in the selected time, where the tag `myTag` was specified. - -```json - -[ - { - "valid": 5, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 5 - } -] -``` - -## Filter by key - - -This only includes verifications of a specific key. You can provide multiple keyIds to filter verifications of any one of those keys. - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&keyId=key_123&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -```json -[ - { - "valid": 14, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 10, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 24 - } -] -``` - - -## Grouping by tags - -To understand usage across your tags, you can group by tags, breaking down all verifications and summing them up per tag combination. - - - Note this is plural: `&groupBy=tags`. - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&groupBy=tags&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -You'll receive an array of elements. Each element corresponds to one tag combination. - -```json -[ - { - "valid": 50, - "notFound": 0, - "forbidden": 3, - "usageExceeded": 6, - "rateLimited": 10, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 69, - "tags": [] // these did not have tags specified - }, - { - "valid": 1, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 1, - "tags": [ - "a", - "b" - ] - }, - { - "valid": 2, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 2, - "tags": [ - "a", - "c" - ] - }, - { - "valid": 2, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 2, - "tags": [ - "a" - ] - } -] -``` - -## Breakdown by individual tag - - -If you want to see usage for an individual tag, regardless of combination with other tags, you can group by tag. - - - Note this is singular `&groupBy=tag`. - - -```bash -curl 'https://api.unkey.dev/v1/analytics.getVerifications?start=1734168087000&end=1736760087000&groupBy=tag&apiId=api_123' \ - -H 'Authorization: Bearer unkey_XXX' -``` - -You'll receive one element per unique tag. - - -```json -[ - { - "valid": 1, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 1, - "tag": "b" - }, - { - "valid": 2, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 2, - "tag": "c" - }, - { - "valid": 5, - "notFound": 0, - "forbidden": 0, - "usageExceeded": 0, - "rateLimited": 0, - "unauthorized": 0, - "disabled": 0, - "insufficientPermissions": 0, - "expired": 0, - "total": 5, - "tag": "a" - } -] -``` diff --git a/apps/docs/analytics/schema-reference.mdx b/apps/docs/analytics/schema-reference.mdx new file mode 100644 index 0000000000..35137faefc --- /dev/null +++ b/apps/docs/analytics/schema-reference.mdx @@ -0,0 +1,322 @@ +--- +title: Schema Reference +description: "Tables, columns, and data types in Unkey Analytics" +--- + +Unkey Analytics stores verification events across multiple time-series tables for efficient querying. This reference documents all available tables and their columns. + + + Use aggregated tables (`per_hour`, `per_day`, `per_month`) for queries spanning long time periods + to improve performance. + + +## Raw Events Table + +The `key_verifications_v1` table contains individual verification events as they occur. + +### Columns + +| Column | Type | Description | +| --------------- | ------------- | ------------------------------------------------------------------------------------------------------- | +| `request_id` | String | Unique identifier for each verification request | +| `time` | Int64 | Unix timestamp in milliseconds when verification occurred | +| `workspace_id` | String | Workspace identifier (automatically filtered to your workspace) | +| `key_space_id` | String | Your KeySpace identifier (e.g., `ks_1234`) - use this to filter by API. Find this in your API settings. | +| `external_id` | String | Your user's identifier (e.g., `user_abc`) - use this to filter by user | +| `key_id` | String | Individual API key identifier | +| `outcome` | String | Verification result (see [Outcome Values](#outcome-values)) | +| `region` | String | Unkey region that handled the verification | +| `tags` | Array(String) | Custom tags added during verification | +| `spent_credits` | Int64 | Number of credits spent on this verification (0 if no credits were spent) | + +### Outcome Values + +The `outcome` column contains one of these values: + +| Outcome | Description | +| -------------------------- | --------------------------------------- | +| `VALID` | Key is valid and verification succeeded | +| `RATE_LIMITED` | Verification exceeded rate limit | +| `INVALID` | Key not found or malformed | +| `EXPIRED` | Key has expired | +| `DISABLED` | Key is disabled | +| `INSUFFICIENT_PERMISSIONS` | Key lacks required permissions | +| `FORBIDDEN` | Operation not allowed for this key | +| `USAGE_EXCEEDED` | Key has exceeded usage limit | + +## Aggregated Tables + +Pre-aggregated tables provide better query performance for long time ranges. Each aggregated table includes outcome counts. + +### Per Minute Table + +`key_verifications_per_minute_v1` - Aggregated by minute + +| Column | Type | Description | +| -------------------------------- | -------- | ----------------------------------------------- | +| `time` | DateTime | Timestamp (DateTime for minute/hour, Date for day/month) | +| `workspace_id` | String | Workspace identifier | +| `key_space_id` | String | API identifier | +| `external_id` | String | Your user identifier | +| `key_id` | String | API key identifier | +| `outcome` | String | Verification outcome (VALID, RATE_LIMITED, INVALID, etc.) | +| `tags` | Array | Tags associated with verifications | +| `count` | UInt64 | Total verification count for this aggregation | +| `spent_credits` | UInt64 | Total credits spent | + +### Per Hour Table + +`key_verifications_per_hour_v1` - Aggregated by hour. Same columns as per-minute table. + +### Per Day Table + +`key_verifications_per_day_v1` - Aggregated by day. Same columns as per-minute table. + +### Per Month Table + +`key_verifications_per_month_v1` - Aggregated by month. Same columns as per-minute table. + +## Filtering by API and User + +You can use your familiar identifiers directly in queries: + +- **`key_space_id`** - Your API identifier (e.g., `ks_1234`). Find this in your API settings. +- **`external_id`** - Your user identifiers (e.g., `user_abc123`) from your application + +All standard comparison operators are supported: `=`, `!=`, `<`, `>`, `<=`, `>=`, `IN`, `NOT IN` + +### Filter by API + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE key_space_id = 'ks_1234' +``` + +### Filter by User + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE external_id = 'user_abc123' +``` + +### Multiple Values + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE key_space_id IN ('ks_1234', 'ks_5678') + AND external_id IN ('user_abc', 'user_xyz') +``` + +## Working with Tags + +Tags are stored as `Array(String)` and require array functions to query. + +### Check if tag exists + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE has(tags, 'path=/api/users') +``` + +### Check if any tag exists + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE hasAny(tags, ['environment=prod', 'environment=staging']) +``` + +### Check if all tags exist + +```sql +SELECT COUNT(*) FROM key_verifications_v1 +WHERE hasAll(tags, ['environment=production', 'team=backend']) +``` + +### Extract and group by tags + +```sql +SELECT + arrayJoin(tags) as tag, + COUNT(*) as count +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 24 HOUR +GROUP BY tag +ORDER BY count DESC +``` + +### Filter tags with pattern + +```sql +-- Get all tags starting with "path=" +SELECT + arrayJoin(arrayFilter(x -> startsWith(x, 'path='), tags)) as path, + COUNT(*) as requests +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 24 HOUR +GROUP BY path +``` + +## Time Functions + +Timestamps are stored differently depending on the table: + +- **Raw table (`key_verifications_v1`)**: `time` is `Int64` (Unix milliseconds) +- **Aggregated tables**: `time` is `DateTime` + +### Current Time + +```sql +SELECT now() as current_datetime +SELECT toUnixTimestamp(now()) * 1000 as current_millis +``` + +### Time Ranges (Raw Table) + +For the raw `key_verifications_v1` table, compare `time` with millisecond timestamps: + +```sql +-- Last hour +WHERE time >= toUnixTimestamp(now() - INTERVAL 1 HOUR) * 1000 + +-- Last 24 hours +WHERE time >= toUnixTimestamp(now() - INTERVAL 24 HOUR) * 1000 + +-- Last 7 days +WHERE time >= toUnixTimestamp(now() - INTERVAL 7 DAY) * 1000 + +-- Last 30 days +WHERE time >= toUnixTimestamp(now() - INTERVAL 30 DAY) * 1000 + +-- This month +WHERE time >= toUnixTimestamp(toStartOfMonth(now())) * 1000 + +-- Today +WHERE time >= toUnixTimestamp(toStartOfDay(now())) * 1000 +``` + +### Time Ranges (Aggregated Tables) + +For aggregated tables, use DateTime comparisons directly: + +```sql +-- Last 7 days +WHERE time >= now() - INTERVAL 7 DAY + +-- This month +WHERE time >= toStartOfMonth(now()) + +-- Today +WHERE time >= toStartOfDay(now()) +``` + +### Time Rounding (Raw Table) + +```sql +-- Round to start of hour +SELECT toStartOfHour(toDateTime(time / 1000)) as hour + +-- Round to start of day +SELECT toStartOfDay(toDateTime(time / 1000)) as day + +-- Round to start of month +SELECT toStartOfMonth(toDateTime(time / 1000)) as month + +-- Convert to date +SELECT toDate(toDateTime(time / 1000)) as date +``` + +### Specific Date Ranges + +```sql +-- Between specific dates (Unix milliseconds) +WHERE time >= 1704067200000 -- Jan 1, 2024 00:00:00 UTC + AND time < 1735689600000 -- Jan 1, 2025 00:00:00 UTC +``` + +## Common ClickHouse Functions + +### Aggregate Functions + +| Function | Description | Example | +| ----------- | ----------------- | ----------------------------------------------------------- | +| `COUNT()` | Count rows | `SELECT COUNT(*) FROM key_verifications_v1` | +| `SUM()` | Sum values | `SELECT SUM(valid_count) FROM key_verifications_per_day_v1` | +| `AVG()` | Average | `SELECT AVG(spent_credits) FROM key_verifications_v1` | +| `MIN()` | Minimum value | `SELECT MIN(time) FROM key_verifications_v1` | +| `MAX()` | Maximum value | `SELECT MAX(time) FROM key_verifications_v1` | +| `countIf()` | Conditional count | `SELECT countIf(outcome = 'VALID')` | +| `uniq()` | Count distinct | `SELECT uniq(key_id) FROM key_verifications_v1` | + +### String Functions + +| Function | Description | Example | +| -------------- | -------------------- | ------------------------------------- | +| `lower()` | Convert to lowercase | `WHERE lower(outcome) = 'valid'` | +| `upper()` | Convert to uppercase | `WHERE upper(region) = 'US-EAST-1'` | +| `concat()` | Concatenate strings | `SELECT concat(region, '-', outcome)` | +| `substring()` | Extract substring | `SELECT substring(key_id, 1, 8)` | +| `startsWith()` | Check prefix | `WHERE startsWith(key_id, 'key_')` | + +### Array Functions + +| Function | Description | Example | +| --------------- | ------------------ | --------------------------------------------------- | +| `has()` | Check element | `WHERE has(tags, 'environment=production')` | +| `hasAny()` | Check any element | `WHERE hasAny(tags, ['team=backend', 'team=api'])` | +| `hasAll()` | Check all elements | `WHERE hasAll(tags, ['environment=prod', 'tier=1'])` | +| `arrayJoin()` | Expand array | `SELECT arrayJoin(tags) as tag` | +| `arrayFilter()` | Filter array | `arrayFilter(x -> startsWith(x, 'path='), tags)` | +| `length()` | Array length | `WHERE length(tags) > 0` | + +### Math Functions + +| Function | Description | Example | +| --------- | -------------- | ---------------------------------------------------------- | +| `round()` | Round number | `SELECT round(AVG(spent_credits), 2)` | +| `floor()` | Round down | `SELECT floor(spent_credits / 100) * 100 as credit_bucket` | +| `ceil()` | Round up | `SELECT ceil(spent_credits)` | +| `abs()` | Absolute value | `SELECT abs(difference)` | + +### Conditional Functions + +| Function | Description | Example | +| -------- | --------------- | --------------------------------------------------------------- | +| `if()` | If-then-else | `SELECT if(outcome = 'VALID', 1, 0)` | +| `CASE` | Multi-condition | `CASE WHEN outcome = 'VALID' THEN 'success' ELSE 'failure' END` | + +## Performance Tips + +1. **Always filter by time** - Use time-based WHERE clauses to leverage indexes +2. **Use aggregated tables** - Query hourly/daily/month tables for long ranges +3. **Limit result sets** - Add LIMIT clauses to prevent large results +4. **Filter before grouping** - Use WHERE instead of HAVING when possible +5. **Avoid SELECT \*** - Only select columns you need + +## Query Limits + +| Resource | Limit | Error Code | +| ---------------- | ---------- | ----------------------------- | +| Execution time | 30 seconds | `query_execution_timeout` | +| Memory usage | 1 GB | `query_memory_limit_exceeded` | +| Rows to read | 10 million | `query_rows_limit_exceeded` | +| Queries per hour | 1000 | `query_quota_exceeded` | + +See [Query Restrictions](/analytics/query-restrictions) for more details on query limits and restrictions. + +## Next Steps + + + + Explore common SQL patterns for analytics + + + View limits, quotas, and permissions + + + Browse ClickHouse SQL reference (external) + + diff --git a/apps/docs/apis/features/analytics.mdx b/apps/docs/apis/features/analytics.mdx index ef3e92a396..8abb9e6103 100644 --- a/apps/docs/apis/features/analytics.mdx +++ b/apps/docs/apis/features/analytics.mdx @@ -5,6 +5,10 @@ description: 'Per key and per API analytics' Unkey offers both per key and per API analytics that allow you to drive business decisions. + + We're currently working on a v2 API for analytics with advanced SQL querying capabilities. This feature is opt-in only and not yet publicly available. + + ## Per API Analytics Our per API analytics offer a broad overview of the usage for a specific API with total keys, active keys and verifications in the last 30 days. @@ -13,7 +17,6 @@ Our per API analytics offer a broad overview of the usage for a specific API wit Per API Analytics - ## Per Key Analytics Our per key analytics give you a deep dive into each individual key, giving usage data, key data and where the requests originated from. This data can be useful for finding your top users, and where verifications are coming from. @@ -22,7 +25,6 @@ Our per key analytics give you a deep dive into each individual key, giving usag Per key analytics - ## Tags You can add tags to verification requests to aggregate or filter data when querying. @@ -75,6 +77,4 @@ curl --request POST \ ### Querying tags -We have only rolled out tag ingestion so far to allow you to start recording data as early as possible. - -We're working on new query capabilities including filtering and aggregating by tags. +Tags can be used to filter and aggregate data in your analytics. diff --git a/apps/docs/docs.json b/apps/docs/docs.json index c0d0249640..0f1c806bd8 100644 --- a/apps/docs/docs.json +++ b/apps/docs/docs.json @@ -1,18 +1,48 @@ { "$schema": "https://mintlify.com/docs.json", - "theme": "maple", - "name": "Unkey Docs", + "background": { + "color": { + "dark": "#0c0a09", + "light": "#fafaf9" + } + }, "colors": { - "primary": "#09090b", + "dark": "#18181b", "light": "#EAE6E0", - "dark": "#18181b" + "primary": "#09090b" }, "favicon": "/unkey.png", + "footer": { + "socials": { + "github": "https://github.com/unkeyed/unkey", + "x": "https://x.com/unkeydev" + } + }, + "name": "Unkey Docs", + "navbar": { + "links": [ + { + "href": "https://unkey.com/blog", + "label": "Blog \u0026 Tutorials" + }, + { + "href": "https://unkey.com/discord", + "label": "Discord" + }, + { + "href": "https://app.unkey.com", + "label": "Dashboard" + } + ], + "primary": { + "href": "https://github.com/unkeyed/unkey", + "type": "github" + } + }, "navigation": { "dropdowns": [ { "dropdown": "Documentation", - "icon": "book-open", "groups": [ { "group": "Unkey", @@ -139,8 +169,15 @@ }, { "group": "Analytics", + "hidden": true, "icon": "chart-bar", - "pages": ["analytics/overview", "analytics/quickstarts"] + "pages": [ + "analytics/overview", + "analytics/getting-started", + "analytics/query-examples", + "analytics/schema-reference", + "analytics/query-restrictions" + ] } ] }, @@ -148,8 +185,8 @@ "group": "Migrations", "pages": [ { - "icon": "plane", "group": "Migrating API Keys", + "icon": "plane", "pages": ["migrations/introduction", "migrations/keys"] } ] @@ -167,6 +204,7 @@ "pages": [ "errors/unkey/application/assertion_failed", "errors/unkey/application/invalid_input", + "errors/unkey/application/precondition_failed", "errors/unkey/application/protected_resource", "errors/unkey/application/service_unavailable", "errors/unkey/application/unexpected_error" @@ -192,15 +230,21 @@ { "group": "Data", "pages": [ + "errors/unkey/data/analytics_connection_failed", + "errors/unkey/data/analytics_not_configured", "errors/unkey/data/api_not_found", "errors/unkey/data/audit_log_not_found", "errors/unkey/data/identity_already_exists", "errors/unkey/data/identity_not_found", "errors/unkey/data/key_auth_not_found", "errors/unkey/data/key_not_found", + "errors/unkey/data/key_space_not_found", + "errors/unkey/data/permission_already_exists", "errors/unkey/data/permission_not_found", + "errors/unkey/data/ratelimit_namespace_gone", "errors/unkey/data/ratelimit_namespace_not_found", "errors/unkey/data/ratelimit_override_not_found", + "errors/unkey/data/role_already_exists", "errors/unkey/data/role_not_found", "errors/unkey/data/workspace_not_found" ] @@ -210,19 +254,40 @@ { "group": "User Errors", "pages": [ - "errors/user/bad_request/permissions_query_syntax_error", - "errors/user/bad_request/request_body_too_large", - "errors/user/bad_request/client_closed_request", - "errors/user/bad_request/request_timeout" + { + "group": "Bad Request", + "pages": [ + "errors/user/bad_request/client_closed_request", + "errors/user/bad_request/invalid_analytics_function", + "errors/user/bad_request/invalid_analytics_query", + "errors/user/bad_request/invalid_analytics_query_type", + "errors/user/bad_request/invalid_analytics_table", + "errors/user/bad_request/permissions_query_syntax_error", + "errors/user/bad_request/request_body_too_large", + "errors/user/bad_request/request_timeout" + ] + }, + { + "group": "Too Many Requests", + "pages": ["errors/user/too_many_requests/query_quota_exceeded"] + }, + { + "group": "Unprocessable Entity", + "pages": [ + "errors/user/unprocessable_entity/query_execution_timeout", + "errors/user/unprocessable_entity/query_memory_limit_exceeded", + "errors/user/unprocessable_entity/query_rows_limit_exceeded" + ] + } ] } ] } - ] + ], + "icon": "book-open" }, { "dropdown": "api.unkey.dev/v1 (deprecated)", - "icon": "terminal", "groups": [ { "group": "Introduction", @@ -247,15 +312,15 @@ { "group": "Endpoints", "openapi": { - "source": "https://api.unkey.dev/openapi.json", - "directory": "api-reference/v1" + "directory": "api-reference/v1", + "source": "https://api.unkey.dev/openapi.json" } } - ] + ], + "icon": "terminal" }, { "dropdown": "api.unkey.com/v2", - "icon": "terminal", "groups": [ { "group": "Introduction", @@ -269,15 +334,15 @@ { "group": "Endpoints", "openapi": { - "source": "https://spec.speakeasy.com/unkey/unkey/openapi-json-with-code-samples", - "directory": "api-reference/v2" + "directory": "api-reference/v2", + "source": "https://spec.speakeasy.com/unkey/unkey/openapi-json-with-code-samples" } } - ] + ], + "icon": "terminal" }, { "dropdown": "SDKs", - "icon": "code", "groups": [ { "group": "Official Libraries", @@ -359,61 +424,31 @@ } ] } - ] + ], + "icon": "code" } ] }, - "styling": { - "codeblocks": "system" - }, - "background": { - "color": { - "light": "#fafaf9", - "dark": "#0c0a09" - } - }, - "navbar": { - "links": [ - { - "label": "Blog & Tutorials", - "href": "https://unkey.com/blog" - }, - { - "label": "Discord", - "href": "https://unkey.com/discord" - }, - { - "label": "Dashboard", - "href": "https://app.unkey.com" - } - ], - "primary": { - "type": "github", - "href": "https://github.com/unkeyed/unkey" - } - }, - "footer": { - "socials": { - "x": "https://x.com/unkeydev", - "github": "https://github.com/unkeyed/unkey" - } - }, "redirects": [ { - "source": "/onboarding", - "destination": "/onboarding/onboarding-api" + "destination": "/onboarding/onboarding-api", + "source": "/onboarding" }, { - "source": "/onboarding/onboarding-api", - "destination": "/quickstart/onboarding/onboarding-api" + "destination": "/quickstart/onboarding/onboarding-api", + "source": "/onboarding/onboarding-api" }, { - "source": "/onboarding/onboarding-ratelimiting", - "destination": "/quickstart/onboarding/onboarding-ratelimiting" + "destination": "/quickstart/onboarding/onboarding-ratelimiting", + "source": "/onboarding/onboarding-ratelimiting" }, { - "source": "/api-reference", - "destination": "api-reference/v2" + "destination": "api-reference/v2", + "source": "/api-reference" } - ] + ], + "styling": { + "codeblocks": "system" + }, + "theme": "maple" } diff --git a/apps/docs/errors/unkey/application/precondition_failed.mdx b/apps/docs/errors/unkey/application/precondition_failed.mdx new file mode 100644 index 0000000000..425d60e1b9 --- /dev/null +++ b/apps/docs/errors/unkey/application/precondition_failed.mdx @@ -0,0 +1,104 @@ +--- +title: "precondition_failed" +description: "PreconditionFailed indicates a precondition check failed." +--- + +`err:unkey:application:precondition_failed` + + +```json Example +{ + "meta": { + "requestId": "req_2c9a0jf23l4k567" + }, + "error": { + "detail": "Vault hasn't been set up.", + "status": 412, + "title": "Precondition Failed", + "type": "https://unkey.com/docs/api-reference/errors-v2/unkey/application/precondition_failed" + } +} +``` + +## What Happened? + +This error occurs when your request is valid, but a precondition required to fulfill it is not met. Unlike validation errors where your input is invalid, precondition failures indicate that the system or resource is not configured correctly to handle your request. + +Common scenarios that trigger this error: + +- **API not configured for keys**: The API you're trying to create keys for doesn't have key authentication set up +- **Vault not configured**: You're trying to create recoverable keys or decrypt keys, but the vault service isn't set up for your workspace +- **Encryption not enabled**: You're requesting key encryption/decryption on an API that doesn't have encryption enabled +- **Rate limit configuration missing**: You're checking a rate limit that doesn't exist for the key or its associated identity + +Here's an example of a request that would trigger this error: + +```bash +# Attempting to create a recoverable key when vault isn't configured +curl -X POST https://api.unkey.com/v2/keys.createKey \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer unkey_YOUR_ROOT_KEY" \ + -d '{ + "apiId": "api_1234567890", + "prefix": "test", + "recoverable": true + }' +``` + +## How To Fix + +The fix depends on which precondition failed. Check the error's `detail` field for specific information: + +### Vault Not Set Up + +If you see "Vault hasn't been set up", you have two options: + +1. **Configure the vault service** for your workspace (contact Unkey support if you need assistance) +2. **Remove the encryption requirement** from your request: + +```bash +# Create a non-recoverable key instead +curl -X POST https://api.unkey.com/v2/keys.createKey \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer unkey_YOUR_ROOT_KEY" \ + -d '{ + "apiId": "api_1234567890", + "prefix": "test", + "recoverable": false + }' +``` + +### API Not Set Up for Keys + +If you see "The requested API is not set up to handle keys", you need to: + +1. Verify the API ID is correct +2. Ensure the API has key authentication configured in your Unkey dashboard +3. Create or configure the key authentication settings for your API + +### API Not Set Up for Encryption + +If you see "This API does not support key encryption", either: + +1. Enable encrypted key storage for the API in your Unkey dashboard settings +2. Remove the `recoverable: true` option or `decrypt: true` parameter from your request + +### Rate Limit Not Found + +If you see a message about a requested rate limit not existing: + +1. Verify the rate limit name is correct +2. Create the rate limit configuration for the key or its identity +3. Ensure the rate limit is associated with the correct resource + +## Common Mistakes + +- **Assuming features are enabled by default**: Features like vault encryption and rate limits require explicit configuration +- **Wrong API configuration**: Trying to use encryption features on an API that wasn't set up for it +- **Missing rate limit setup**: Referencing rate limits that haven't been created yet +- **Workspace-level configuration issues**: Some features need to be enabled at the workspace level before they can be used + +## Related Errors +- [err:unkey:application:invalid_input](./invalid_input) - When your request input fails validation +- [err:unkey:data:api_not_found](../data/api_not_found) - When the API itself doesn't exist +- [err:unkey:application:service_unavailable](./service_unavailable) - When a required service is temporarily unavailable diff --git a/apps/docs/errors/unkey/data/analytics_connection_failed.mdx b/apps/docs/errors/unkey/data/analytics_connection_failed.mdx new file mode 100644 index 0000000000..0ec9394533 --- /dev/null +++ b/apps/docs/errors/unkey/data/analytics_connection_failed.mdx @@ -0,0 +1,7 @@ +--- +title: "analytics_connection_failed" +description: "ConnectionFailed indicates the connection to the analytics database failed." +--- + +`err:unkey:data:analytics_connection_failed` + diff --git a/apps/docs/errors/unkey/data/analytics_not_configured.mdx b/apps/docs/errors/unkey/data/analytics_not_configured.mdx new file mode 100644 index 0000000000..a0288aaaa7 --- /dev/null +++ b/apps/docs/errors/unkey/data/analytics_not_configured.mdx @@ -0,0 +1,7 @@ +--- +title: "analytics_not_configured" +description: "NotConfigured indicates analytics is not configured for the workspace." +--- + +`err:unkey:data:analytics_not_configured` + diff --git a/apps/docs/errors/unkey/data/key_space_not_found.mdx b/apps/docs/errors/unkey/data/key_space_not_found.mdx new file mode 100644 index 0000000000..f7b2bf45b7 --- /dev/null +++ b/apps/docs/errors/unkey/data/key_space_not_found.mdx @@ -0,0 +1,7 @@ +--- +title: "key_space_not_found" +description: "NotFound indicates the requested key space was not found." +--- + +`err:unkey:data:key_space_not_found` + diff --git a/apps/docs/errors/user/bad_request/invalid_analytics_function.mdx b/apps/docs/errors/user/bad_request/invalid_analytics_function.mdx new file mode 100644 index 0000000000..fc8b072136 --- /dev/null +++ b/apps/docs/errors/user/bad_request/invalid_analytics_function.mdx @@ -0,0 +1,124 @@ +--- +title: "invalid_analytics_function" +description: "Your query uses a function that isn't allowed for security reasons." +--- + +`err:user:bad_request:invalid_analytics_function` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Function 'file' is not allowed", + "status": 400, + "title": "Bad Request", + "type": "https://unkey.com/docs/errors/user/bad_request/invalid_analytics_function" + } +} +``` + +## What Happened? + +Your query tried to use a function that's blocked for security reasons! + +For security, only safe functions are allowed in analytics queries. Functions that could: +- Read files from the server (`file`, `executable`) +- Make network requests (`url`, `remote`) +- Access external systems (`mysql`, `postgresql`, `s3`, `hdfs`) +- Modify data or system state + +...are all blocked. + +## How to Fix It + +### 1. Use Allowed Functions + +Stick to standard analytics functions: + + + +```sql Wrong - Blocked function +SELECT file('/etc/passwd') FROM key_verifications_v1 +``` + +```sql Correct - Safe analytics functions +SELECT + toStartOfHour(time) as hour, + COUNT(*) as total, + AVG(response_time) as avg_response +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY hour +``` + + + +### 2. Common Safe Functions + +These are examples of allowed functions: + +**Aggregate functions:** +- `COUNT()`, `SUM()`, `AVG()`, `MIN()`, `MAX()` +- `uniq()`, `groupArray()` + +**Date/time functions:** +- `now()`, `today()`, `yesterday()` +- `toStartOfHour()`, `toStartOfDay()`, `toStartOfWeek()` +- `toDate()`, `toDateTime()` + +**String functions:** +- `concat()`, `substring()`, `lower()`, `upper()` +- `length()`, `position()` + +**Mathematical functions:** +- `round()`, `floor()`, `ceil()` +- `abs()`, `sqrt()`, `pow()` + +**Conditional functions:** +- `if()`, `multiIf()` +- `CASE WHEN ... THEN ... END` + +### 3. Remove Dangerous Functions + + + +```sql Blocked - File access +SELECT file('/path/to/file') FROM key_verifications_v1 +``` + +```sql Blocked - Network access +SELECT * FROM url('http://example.com/data') +``` + +```sql Blocked - External DB +SELECT * FROM mysql('host:port', 'db', 'table', 'user', 'pass') +``` + +```sql Safe Alternative - Use only your analytics data +SELECT + api_id, + COUNT(*) as verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 1 DAY +GROUP BY api_id +``` + + + +## Commonly Blocked Functions + +These functions are blocked for security: + +| Function | Why Blocked | +|----------|-------------| +| `file()`, `executable()` | File system access | +| `url()`, `remote()` | Network requests | +| `mysql()`, `postgresql()`, `mongodb()` | External database access | +| `s3()`, `hdfs()`, `azureBlobStorage()` | External storage access | +| `dictGet()`, `dictGetOrDefault()` | Dictionary access | + + +Need a specific function that's blocked? [Contact support](mailto:support@unkey.dev) to discuss your use case - we may be able to safely enable it! + diff --git a/apps/docs/errors/user/bad_request/invalid_analytics_query.mdx b/apps/docs/errors/user/bad_request/invalid_analytics_query.mdx new file mode 100644 index 0000000000..cc2255c6e9 --- /dev/null +++ b/apps/docs/errors/user/bad_request/invalid_analytics_query.mdx @@ -0,0 +1,100 @@ +--- +title: "invalid_analytics_query" +description: "Your SQL query has a syntax error." +--- + +`err:user:bad_request:invalid_analytics_query` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Syntax error: Expected identifier, got 'FROM' at position 15", + "status": 400, + "title": "Bad Request", + "type": "https://unkey.com/docs/errors/user/bad_request/invalid_analytics_query" + } +} +``` + +## What Happened? + +Your SQL query has a syntax error! The query parser found invalid SQL syntax that prevents it from being executed. + +Common causes include: + +- Missing or extra commas +- Unclosed quotes or parentheses +- Typos in SQL keywords +- Invalid column or table names + +## How to Fix It + +### 1. Check for Missing Commas + + + +```sql Wrong - Missing comma +SELECT + api_id + COUNT(*) as total +FROM key_verifications_v1 +``` + +```sql Correct +SELECT + api_id, + COUNT(*) as total +FROM key_verifications_v1 +``` + + + +### 2. Match Quotes and Parentheses + + + +```sql Wrong - Unclosed quote +SELECT * FROM key_verifications_v1 WHERE api_id = 'api_123 +``` + +```sql Correct +SELECT * FROM key_verifications_v1 WHERE api_id = 'api_123' +``` + + + +### 3. Use Correct SQL Keywords + + + +```sql Wrong - Typo in SELECT +SELCT * FROM key_verifications_v1 +``` + +```sql Correct +SELECT * FROM key_verifications_v1 +``` + + + +### 4. Verify Column Names + +Make sure you're using valid column names from your analytics tables: + +```sql +-- ✓ Valid columns +SELECT time, api_id, outcome, key_id +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + +## Need Help? + +If you're stuck with a syntax error: + +1. **Check the error message** - It usually tells you exactly where the problem is +2. **Test incrementally** - Start with a simple `SELECT *` and add complexity step by step +3. **Use a SQL validator** - Many online tools can help spot syntax errors diff --git a/apps/docs/errors/user/bad_request/invalid_analytics_query_type.mdx b/apps/docs/errors/user/bad_request/invalid_analytics_query_type.mdx new file mode 100644 index 0000000000..c8ad43b0e4 --- /dev/null +++ b/apps/docs/errors/user/bad_request/invalid_analytics_query_type.mdx @@ -0,0 +1,120 @@ +--- +title: "invalid_analytics_query_type" +description: "Only SELECT queries are allowed for analytics - you tried to use INSERT, UPDATE, DELETE, or another unsupported operation." +--- + +`err:user:bad_request:invalid_analytics_query_type` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Only SELECT queries are allowed", + "status": 400, + "title": "Bad Request", + "type": "https://unkey.com/docs/errors/user/bad_request/invalid_analytics_query_type" + } +} +``` + +## What Happened? + +You tried to run a query that modifies data or isn't a SELECT! Analytics queries are read-only - you can only SELECT data, not modify it. + +Blocked query types: +- `INSERT` - Adding new data +- `UPDATE` - Modifying existing data +- `DELETE` - Removing data +- `DROP` - Deleting tables +- `CREATE` - Creating tables +- `ALTER` - Modifying table structure +- `TRUNCATE` - Clearing tables + +## How to Fix It + +### 1. Use SELECT Instead + +Analytics is for querying data, not modifying it: + + + +```sql Wrong - INSERT not allowed +INSERT INTO key_verifications_v1 VALUES (...) +``` + +```sql Wrong - UPDATE not allowed +UPDATE key_verifications_v1 SET outcome = 'VALID' +``` + +```sql Wrong - DELETE not allowed +DELETE FROM key_verifications_v1 WHERE time < now() - INTERVAL 30 DAY +``` + +```sql Correct - SELECT queries only +SELECT + api_id, + outcome, + COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY api_id, outcome +``` + + + +### 2. Query Your Data + +The analytics endpoint is for analyzing your verification data: + +```sql +-- ✓ Count verifications by outcome +SELECT outcome, COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 1 DAY +GROUP BY outcome + +-- ✓ Get hourly verification rates +SELECT + toStartOfHour(time) as hour, + COUNT(*) as verifications +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 24 HOUR +GROUP BY hour +ORDER BY hour + +-- ✓ Find most active APIs +SELECT + api_id, + COUNT(*) as requests +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY api_id +ORDER BY requests DESC +LIMIT 10 +``` + +### 3. Use the Correct API for Data Modification + +If you need to modify data, use the appropriate Unkey API endpoints: + +| What You Want | Use This API | +|---------------|--------------| +| Create a key | `POST /v2/keys.createKey` | +| Update a key | `PATCH /v2/keys.updateKey` | +| Delete a key | `POST /v2/keys.deleteKey` | +| Modify permissions | `POST /v2/keys.addPermissions` | + +## Why Read-Only? + +Analytics queries are read-only for several reasons: + +1. **Data integrity** - Verification history should never be modified +2. **Performance** - Read-only queries can be heavily optimized +3. **Security** - Prevents accidental or malicious data corruption +4. **Audit trail** - Preserves accurate historical records + + +Analytics is for understanding your data, not changing it. Use the main Unkey API for creating, updating, or deleting resources. + diff --git a/apps/docs/errors/user/bad_request/invalid_analytics_table.mdx b/apps/docs/errors/user/bad_request/invalid_analytics_table.mdx new file mode 100644 index 0000000000..49302c81ae --- /dev/null +++ b/apps/docs/errors/user/bad_request/invalid_analytics_table.mdx @@ -0,0 +1,76 @@ +--- +title: "invalid_analytics_table" +description: "Your query references a table that doesn't exist or isn't allowed." +--- + +`err:user:bad_request:invalid_analytics_table` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Access to table 'system.tables' is not allowed", + "status": 400, + "title": "Bad Request", + "type": "https://unkey.com/docs/errors/user/bad_request/invalid_analytics_table" + } +} +``` + +## What Happened? + +Your query tried to access a table that either doesn't exist or isn't allowed for security reasons. + +For security, only specific analytics tables are accessible: +- `key_verifications_v1` - Raw key verification events +- `key_verifications_per_minute_v1` - Minute-level aggregates +- `key_verifications_per_hour_v1` - Hour-level aggregates +- `key_verifications_per_day_v1` - Day-level aggregates +- `key_verifications_per_month_v1` - Month-level aggregates + +System tables (like `system.*`) and other database tables are blocked. + +## How to Fix It + +### 1. Use the Correct Table Name + + + +```sql Wrong - System table +SELECT * FROM system.tables +``` + +```sql Correct - Analytics table +SELECT * FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + + + +### 2. Fix Typos in Table Names + + + +```sql Wrong - Typo +SELECT * FROM key_verification +WHERE time >= now() - INTERVAL 1 DAY +``` + +```sql Correct +SELECT * FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 1 DAY +``` + + + +## Available Tables + +| Table Name | Description | +|------------|-------------| +| `key_verifications_v1` | Raw verification events | +| `key_verifications_per_minute_v1` | Minute-level aggregates | +| `key_verifications_per_hour_v1` | Hour-level aggregates | +| `key_verifications_per_day_v1` | Day-level aggregates | +| `key_verifications_per_month_v1` | Month-level aggregates | diff --git a/apps/docs/errors/user/too_many_requests/query_quota_exceeded.mdx b/apps/docs/errors/user/too_many_requests/query_quota_exceeded.mdx new file mode 100644 index 0000000000..0db0273f0c --- /dev/null +++ b/apps/docs/errors/user/too_many_requests/query_quota_exceeded.mdx @@ -0,0 +1,106 @@ +--- +title: "query_quota_exceeded" +description: "You've exceeded your workspace's analytics query quota for the current time window." +--- + +`err:user:too_many_requests:query_quota_exceeded` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Workspace has exceeded the analytics query quota of 1000 queries per hour", + "status": 429, + "title": "Too Many Requests", + "type": "https://unkey.com/docs/errors/user/too_many_requests/query_quota_exceeded" + } +} +``` + +## What Happened? + +Your workspace has made too many analytics queries in a short period! We limit the number of queries you can run per hour to keep the analytics service fast and reliable for everyone. + +This is a rate limit on the **number of queries**, not about individual query complexity. + +## How to Fix It + +### 1. Wait and Retry + +The quota resets every hour. Wait a bit and try your query again. + +### 2. Cache Your Results + +Instead of running the same query repeatedly, cache the results in your application: + + + +```typescript Bad - Queries on Every Request +app.get('/dashboard', async (req, res) => { + // This runs a query EVERY time someone loads the dashboard + const stats = await fetch('https://api.unkey.com/v1/analytics', { + method: 'POST', + headers: { 'Authorization': 'Bearer unkey_XXX' }, + body: JSON.stringify({ query: 'SELECT COUNT(*) FROM key_verifications' }) + }) + res.json(stats) +}) +``` + +```typescript Better - Cache for 5 Minutes +import { Cache } from 'your-cache-library' + +const cache = new Cache() + +app.get('/dashboard', async (req, res) => { + // Check cache first + let stats = cache.get('dashboard-stats') + + if (!stats) { + // Only query if cache is empty + stats = await fetch('https://api.unkey.com/v1/analytics', { + method: 'POST', + headers: { 'Authorization': 'Bearer unkey_XXX' }, + body: JSON.stringify({ query: 'SELECT COUNT(*) FROM key_verifications' }) + }) + + // Cache for 5 minutes + cache.set('dashboard-stats', stats, { ttl: 300 }) + } + + res.json(stats) +}) +``` + + + +### 3. Batch Your Queries + +If you're making multiple queries, try to combine them into a single query with JOINs or subqueries. + +### 4. Use Webhooks Instead + +For real-time updates, consider using webhooks instead of polling the analytics API repeatedly. + +## Default Quota + +| Plan | Queries per Hour | +|------|------------------| +| Free | 1,000 | +| Pro | 10,000 | +| Enterprise | Custom | + +## Need a Higher Quota? + + +**Running into limits often?** We can increase your quota! + +[Contact our support team](mailto:support@unkey.dev) and tell us: +- What you're building +- Why you need more queries per hour +- Your current usage patterns + +We'll work with you to find the right quota for your needs, or help optimize your query patterns. + diff --git a/apps/docs/errors/user/unprocessable_entity/query_execution_timeout.mdx b/apps/docs/errors/user/unprocessable_entity/query_execution_timeout.mdx new file mode 100644 index 0000000000..24f7cadff1 --- /dev/null +++ b/apps/docs/errors/user/unprocessable_entity/query_execution_timeout.mdx @@ -0,0 +1,124 @@ +--- +title: "query_execution_timeout" +description: "Your query took longer than the maximum execution time allowed." +--- + +`err:user:unprocessable_entity:query_execution_timeout` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Query exceeded the maximum execution time of 30 seconds", + "status": 422, + "title": "Unprocessable Entity", + "type": "https://unkey.com/docs/errors/user/unprocessable_entity/query_execution_timeout" + } +} +``` + +## What Happened? + +Your analytics query took too long to execute! We limit queries to 30 seconds to keep the analytics service responsive for everyone. + +This usually happens when you're querying a large time range or complex data without enough filters. + +## How to Fix It + +### 1. Query Smaller Time Ranges + +The most common fix is to reduce the time range: + + + +```sql Too Long +SELECT COUNT(*) +FROM key_verifications +WHERE time >= now() - INTERVAL 1 YEAR +GROUP BY toStartOfDay(time) +``` + +```sql Better +SELECT COUNT(*) +FROM key_verifications +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY toStartOfDay(time) +``` + + + +### 2. Add More Filters + +Filter your data to reduce the amount of work the query needs to do: + + + +```sql Slow +SELECT api_id, COUNT(*) as total +FROM key_verifications +GROUP BY api_id +``` + +```sql Faster +SELECT api_id, COUNT(*) as total +FROM key_verifications +WHERE time >= now() - INTERVAL 1 DAY + AND outcome = 'VALID' +GROUP BY api_id +``` + + + +### 3. Use Aggregated Tables + +For historical data, use pre-aggregated tables instead of raw events: + + + +```sql Slow - Scans millions of raw events +SELECT + toStartOfHour(time) as hour, + COUNT(*) as total +FROM key_verifications_raw_v2 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY hour +``` + +```sql Fast - Uses pre-aggregated data +SELECT + time as hour, + SUM(count) as total +FROM key_verifications_per_hour_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY hour +``` + + + +### 4. Limit Result Size + +Add a LIMIT clause to stop processing once you have enough data: + +```sql +SELECT api_id, COUNT(*) as total +FROM key_verifications +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY api_id +ORDER BY total DESC +LIMIT 100 +``` + +## Need Longer Execution Time? + + +**Have a legitimate need for longer-running queries?** Contact our support team! + +[Reach out to support](mailto:support@unkey.dev) and tell us: +- What you're trying to analyze +- Why the query needs more than 30 seconds +- An example of the query you're running + +We'll review your use case and see if we can accommodate your needs. + diff --git a/apps/docs/errors/user/unprocessable_entity/query_memory_limit_exceeded.mdx b/apps/docs/errors/user/unprocessable_entity/query_memory_limit_exceeded.mdx new file mode 100644 index 0000000000..9e53141d2b --- /dev/null +++ b/apps/docs/errors/user/unprocessable_entity/query_memory_limit_exceeded.mdx @@ -0,0 +1,122 @@ +--- +title: "query_memory_limit_exceeded" +description: "Your query used more memory than allowed." +--- + +`err:user:unprocessable_entity:query_memory_limit_exceeded` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Query exceeded the maximum memory limit of 2GB", + "status": 422, + "title": "Unprocessable Entity", + "type": "https://unkey.com/docs/errors/user/unprocessable_entity/query_memory_limit_exceeded" + } +} +``` + +## What Happened? + +Your query tried to use more than 2GB of memory! We limit memory usage to keep the analytics service stable and fast for everyone. + +This typically happens when you're selecting too many rows, using large GROUP BY operations, or performing complex JOINs without enough filtering. + +## How to Fix It + +### 1. Use Aggregations Instead of Raw Data + +Instead of fetching all rows, aggregate the data: + + + +```sql Memory Intensive - Fetches all rows +SELECT * +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +``` + +```sql Memory Efficient - Aggregates data +SELECT + toStartOfHour(time) as hour, + api_id, + COUNT(*) as total, + AVG(response_time) as avg_response +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +GROUP BY hour, api_id +``` + + + +### 2. Add More Filters + +Reduce the amount of data the query needs to process: + + + +```sql Too Much Data +SELECT api_id, key_id, outcome, time +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +``` + +```sql Filtered Query +SELECT api_id, key_id, outcome, time +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 1 DAY + AND api_id = 'api_123' + AND outcome = 'VALID' +``` + + + +### 3. Limit Result Size + +Add a LIMIT to cap the number of rows: + +```sql +SELECT api_id, key_id, outcome, time +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY +ORDER BY time DESC +LIMIT 10000 +``` + +### 4. Avoid Large GROUP BY Cardinality + +GROUP BY on high-cardinality columns (like `key_id`) uses a lot of memory. Instead, group by lower-cardinality columns: + + + +```sql High Memory - Millions of unique keys +SELECT key_id, COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY key_id +``` + +```sql Lower Memory - Hundreds of APIs +SELECT api_id, COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 30 DAY +GROUP BY api_id +``` + + + +## Need More Memory? + + +**Have a legitimate need for higher memory limits?** Contact our support team! + +[Reach out to support](mailto:support@unkey.dev) and tell us: +- What you're trying to analyze +- Why the query needs more than 2GB of memory +- An example of the query you're running + +We'll review your use case and see if we can accommodate your needs. + diff --git a/apps/docs/errors/user/unprocessable_entity/query_rows_limit_exceeded.mdx b/apps/docs/errors/user/unprocessable_entity/query_rows_limit_exceeded.mdx new file mode 100644 index 0000000000..5e20d420b6 --- /dev/null +++ b/apps/docs/errors/user/unprocessable_entity/query_rows_limit_exceeded.mdx @@ -0,0 +1,134 @@ +--- +title: "query_rows_limit_exceeded" +description: "Your query tried to scan more rows than allowed." +--- + +`err:user:unprocessable_entity:query_rows_limit_exceeded` + +```json Example +{ + "meta": { + "requestId": "req_4dgzrNP3Je5mU1tD" + }, + "error": { + "detail": "Query exceeded the maximum rows to scan limit of 100 million", + "status": 422, + "title": "Unprocessable Entity", + "type": "https://unkey.com/docs/errors/user/unprocessable_entity/query_rows_limit_exceeded" + } +} +``` + +## What Happened? + +Your query tried to scan more than 100 million rows! We limit the number of rows that can be scanned to keep queries fast and prevent resource exhaustion. + +This happens when you query large time ranges or don't filter your data enough, causing ClickHouse to scan millions of rows even if the final result is small. + +## How to Fix It + +### 1. Add Time Range Filters + +Always filter by time to limit the number of rows scanned: + + + +```sql Scans Too Many Rows +SELECT COUNT(*) +FROM key_verifications_v1 +WHERE outcome = 'VALID' +``` + +```sql Limited Scan +SELECT COUNT(*) +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY + AND outcome = 'VALID' +``` + + + +### 2. Use More Selective Filters + +Add filters that reduce the data before aggregation: + + + +```sql Scans Everything +SELECT api_id, COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 90 DAY +GROUP BY api_id +``` + +```sql Scans Less +SELECT api_id, COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 7 DAY + AND api_id IN ('api_123', 'api_456') +GROUP BY api_id +``` + + + +### 3. Use Pre-Aggregated Tables + +For historical queries, use aggregated tables that have fewer rows: + + + +```sql Raw Table - 100M+ rows +SELECT + toStartOfDay(time) as day, + COUNT(*) as total +FROM key_verifications_v1 +WHERE time >= now() - INTERVAL 90 DAY +GROUP BY day +``` + +```sql Aggregated Table - 2K rows +SELECT + time as day, + SUM(count) as total +FROM key_verifications_v1_per_day_v1 +WHERE time >= now() - INTERVAL 90 DAY +GROUP BY day +``` + + + +### 4. Query in Smaller Batches + +Instead of one large query, break it into smaller time windows: + +```javascript +// Instead of querying 90 days at once +const results = []; +for (let i = 0; i < 90; i += 7) { + const start = `now() - INTERVAL ${i + 7} DAY`; + const end = `now() - INTERVAL ${i} DAY`; + + const result = await query(` + SELECT COUNT(*) as total + FROM key_verifications_v1 + WHERE time >= ${start} AND time < ${end} + `); + + results.push(result); +} +``` + +## Need Higher Row Limits? + + +**Have a legitimate need to scan more rows?** Contact our support team! + +[Reach out to support](mailto:support@unkey.dev) and tell us: + +- What you're trying to analyze +- Why you need to scan more than 100 million rows +- An example of the query you're running + +We'll review your use case and help optimize your query or adjust limits if needed. + + diff --git a/apps/docs/package.json b/apps/docs/package.json index 23155dd8ae..28a734c5ea 100644 --- a/apps/docs/package.json +++ b/apps/docs/package.json @@ -3,12 +3,12 @@ "version": "1.1.0", "private": true, "scripts": { - "dev": "npx mintlify@latest dev" + "dev": "mintlify dev" }, "keywords": [], "author": "Andreas Thomas & James Perkins", "devDependencies": { - "mintlify": "^4.2.31" + "mintlify": "^4.2.144" }, "dependencies": { "sharp": "^0.34.3" diff --git a/apps/docs/todo-golang-section.mint.json b/apps/docs/todo-golang-section.mint.json deleted file mode 100644 index 3d85c79584..0000000000 --- a/apps/docs/todo-golang-section.mint.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "group": "Golang SDK", - "pages": [ - "libraries/go/overview", - { - "group": "Keys", - "pages": [ - "libraries/go/keys/create", - "libraries/go/keys/verify", - "libraries/go/keys/delete", - "libraries/go/keys/get", - "libraries/go/keys/get-verifications", - "libraries/go/keys/update", - "libraries/go/keys/update-remaining" - ] - }, - { - "group": "APIs", - "pages": ["libraries/go/apis/get", "libraries/go/apis/list-keys"] - }, - { - "group": "Ratelimits", - "pages": ["libraries/go/ratelimits/limit"] - }, - { - "group": "API", - "pages": ["libraries/go/api/list", "libraries/go/api/get"] - } - ] -} diff --git a/apps/engineering/content/docs/architecture/services/analytics.mdx b/apps/engineering/content/docs/architecture/services/analytics.mdx new file mode 100644 index 0000000000..c991edf7e9 --- /dev/null +++ b/apps/engineering/content/docs/architecture/services/analytics.mdx @@ -0,0 +1,371 @@ +--- +title: Analytics API Security +--- + +The Analytics API (`/v2/analytics.getVerifications`) allows workspace users to query their verification data using SQL. This is a powerful feature that requires multiple layers of security to prevent abuse and ensure data isolation. + +## Security Model + +### Multi-Layer Defense + +We implement security at three levels: + +1. **API Level**: Query parsing, validation, and rewriting +2. **RBAC Level**: Permission-based access control +3. **ClickHouse Level**: Per-workspace users with quotas and resource limits + +This defense-in-depth approach ensures that even if one layer is bypassed, the others still protect the system. + +## API Level Security + +### Query Parser (`pkg/clickhouse/query-parser`) + +The query parser is responsible for validating, rewriting, and securing user-submitted SQL queries before they reach ClickHouse. + +**What it does:** + +1. **Parse SQL**: Uses `github.com/AfterShip/clickhouse-sql-parser` to parse the query into an AST +2. **Validate query type**: Only SELECT queries are allowed (no INSERT, UPDATE, DELETE, DROP, etc.) +3. **Enforce workspace isolation**: Automatically injects `WHERE workspace_id = 'ws_xxx'` to every query +4. **Validate table access**: Only allows queries against pre-approved tables +5. **Enforce limits**: Adds `LIMIT` clause if not present, caps at configured maximum +6. **Validate functions**: Blocks dangerous or expensive functions + +**Example transformation:** + +```sql +-- User submits: +SELECT key_space_id, COUNT(*) FROM key_verifications WHERE time >= now() - INTERVAL 7 DAY + +-- Parser rewrites to: +SELECT key_space_id, COUNT(*) +FROM default.key_verifications_raw_v2 +WHERE workspace_id = 'ws_4qD3194xe2x56qmv' + AND time >= now() - INTERVAL 7 DAY +LIMIT 10000 +``` + +### Direct Column Access + +Users query ClickHouse tables directly using the actual column names. The schema exposes: + +- **`key_space_id`**: The API's KeyAuth ID (e.g., `ks_1234`) +- **`identity_id`**: Internal identity identifier +- **`external_id`**: User-provided external identifier +- **`key_id`**: Individual key identifier + +Users can find their `key_space_id` in the API settings in the dashboard. + +**No ID transformation is performed** - users query with the actual IDs stored in ClickHouse, and results contain those same IDs. + +### Table Aliases + +Users query against friendly table names that map to actual ClickHouse tables: + +```go +TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + "key_verifications_per_minute": "default.key_verifications_per_minute_v2", + "key_verifications_per_hour": "default.key_verifications_per_hour_v2", + "key_verifications_per_day": "default.key_verifications_per_day_v2", + "key_verifications_per_month": "default.key_verifications_per_month_v2", +} +``` + +### Limits Enforcement + +Multiple limits protect against resource exhaustion: + +- **Query result rows**: Max 10,000 rows returned +- **Memory usage**: Max memory per query +- **Execution time**: Max seconds per query + +These are enforced both at the parser level and at the ClickHouse user level. + +## RBAC Level Security + +### Permission Model + +Access to analytics requires one of these permissions: + +1. **`analytics.read`**: Workspace-level access to all analytics +2. **`api.*.read_analytics`**: Wildcard access to analytics for all APIs +3. **`api..read_analytics`**: Per-API analytics access. The system translates `api_id` to `key_space_id` internally. + +**Permission checking logic** (`handler.go:170-212`): + +```go +permissionChecks := []rbac.PermissionQuery{ + // Workspace-level analytics access + rbac.T(rbac.Tuple{ + ResourceType: rbac.Analytics, + Action: rbac.Read, + }), + // Wildcard API analytics access + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.ReadAnalytics, + }), +} + +// If query filters by key_space_id, translate to api_id and check permissions +if len(extractedKeySpaceIds) > 0 { + // Translate key_space_id to api_id for permission check + apiIDs := translateKeySpaceToApiID(extractedKeySpaceIds) + + apiPermissions := make([]rbac.PermissionQuery, len(apiIDs)) + for i, apiID := range apiIDs { + apiPermissions[i] = rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: apiID, // Uses api_id, not key_space_id + Action: rbac.ReadAnalytics, + }) + } + // Must have ALL specific API permissions + permissionChecks = append(permissionChecks, rbac.And(apiPermissions...)) +} + +// User needs at least one of these permission sets +err = auth.VerifyRootKey(ctx, keys.WithPermissions(rbac.Or(permissionChecks...))) +``` + +This ensures users with per-API permissions cannot access data they shouldn't see. + +## ClickHouse Level Security + +### Per-Workspace Database Users + +Each workspace gets its own ClickHouse user created by the `create-clickhouse-user` CLI command. + +**User configuration:** + +- Username: `workspace__user` +- Password: Random 32-character string, encrypted with Vault +- Database access: Only the `default` database +- Table grants: `SELECT` only on approved tables + +**Creation command:** + +```bash +go run ./cmd/create-clickhouse-user \ + --workspace-id ws_xxx \ + --max-queries-per-window 1000 \ + --quota-duration-seconds 3600 \ + --max-query-execution-time 30 \ + --max-query-memory-bytes 1073741824 \ + --max-query-result-rows 10000 +``` + +### ClickHouse QUOTA + +Quotas limit query volume over time windows: + +```sql +CREATE QUOTA OR REPLACE workspace_ws_xxx_quota +FOR INTERVAL 3600 second + MAX queries = 1000, + MAX errors = 100 +TO workspace_ws_xxx_user +``` + +This prevents runaway query volume even if API-level rate limits are bypassed. + +### ClickHouse SETTINGS PROFILE + +Settings profiles enforce resource limits per query: + +```sql +CREATE SETTINGS PROFILE OR REPLACE workspace_ws_xxx_profile +SETTINGS + max_execution_time = 30, -- Max 30 seconds per query + max_memory_usage = 1073741824, -- Max 1GB memory per query + max_result_rows = 10000, -- Max 10k rows returned + readonly = 2 -- Read-only, can set query-level settings +TO workspace_ws_xxx_user +``` + +**Why `readonly = 2`?** + +- `readonly = 0`: Full access (not suitable for users) +- `readonly = 1`: Read-only, **cannot set any settings** (breaks ClickHouse driver) +- `readonly = 2`: Read-only for data, **can set query-level settings** within profile limits + +The ClickHouse HTTP driver needs to set query execution parameters, so we use `readonly = 2` which allows the driver to set settings while the SETTINGS PROFILE enforces maximum values. + +### Connection Management + +The `ConnectionManager` (`internal/services/analytics/connection_manager.go`) handles per-workspace connections: + +**Features:** + +- Two-layer caching: + - Workspace settings cache (24hr) with SWR for database lookups + - Connection cache (24hr) with health checks +- Vault integration for password decryption +- DSN template-based connection building +- Automatic connection health verification (10% sampling) +- Graceful connection cleanup on shutdown + +**DSN Template:** + +``` +http://{username}:{password}@clickhouse:8123/default +``` + +The API uses HTTP protocol instead of native TCP because: +- Simpler connection model (stateless requests) +- No persistent connection pool overhead per workspace +- Easier to debug and monitor +- Works well with ClickHouse Cloud + +**Connection lifecycle:** + +1. Request comes in with workspace ID +2. Check connection cache for existing connection +3. If cache miss or failed health check: + - Fetch workspace settings from cache (SWR) + - Decrypt password using Vault + - Build DSN from template + - Create new ClickHouse connection + - Store in cache +4. Execute query using workspace-specific connection + +## Error Handling + +### Error Codes + +Analytics-specific error codes: + +- **`analytics_not_configured`** (404): Workspace doesn't have analytics enabled +- **`analytics_connection_failed`** (503): Cannot connect to workspace's ClickHouse user +- **`invalid_analytics_query`** (400): SQL syntax error +- **`invalid_table`** (400): Table not in allowed list +- **`invalid_function`** (400): Function not allowed +- **`query_not_supported`** (400): Non-SELECT query attempted +- **`query_execution_timeout`** (400): Query exceeded time limit +- **`query_memory_limit_exceeded`** (400): Query exceeded memory limit +- **`query_rows_limit_exceeded`** (400): Query exceeded rows-to-read limit + +These are mapped in `pkg/zen/middleware_errors.go` to appropriate HTTP status codes. + +## Monitoring and Debugging + +### Query Logging + +All analytics queries are logged with: + +- Request ID +- Workspace ID +- Original user query +- Rewritten safe query +- Execution time +- Error details (if any) + +### ClickHouse System Tables + +Monitor analytics usage: + +```sql +-- Recent queries from workspace users +SELECT + event_time, + user, + query_duration_ms, + read_rows, + read_bytes, + query, + exception +FROM system.query_log +WHERE user LIKE 'workspace_%' +ORDER BY event_time DESC +LIMIT 50; + +-- Current quota usage +SELECT + quota_name, + quota_key, + max_queries, + queries +FROM system.quotas_usage +WHERE quota_name LIKE 'workspace_%'; + +-- Failed queries +SELECT + event_time, + user, + query, + exception +FROM system.query_log +WHERE user LIKE 'workspace_%' + AND exception != '' +ORDER BY event_time DESC; +``` + +### Connection Health + +The connection manager performs periodic health checks: + +- 10% of requests trigger a `PING` before query execution +- Failed pings remove the connection from cache +- Next request will create a fresh connection +- Prevents using stale or dead connections + +## Common Issues and Solutions + +### "Cannot modify setting in readonly mode" + +**Cause**: User has `readonly = 1` instead of `readonly = 2` + +**Solution**: Re-run `create-clickhouse-user` with the correct settings profile (already fixed to use `readonly = 2`) + +### "No KEK found for key ID" + +**Cause**: API's Vault service doesn't have access to the KEK used to encrypt the password + +**Solution**: Ensure API and `create-clickhouse-user` use the same Vault configuration (S3 bucket, master keys) + +### Query timeout errors + +**Cause**: Query is too complex or scanning too many rows + +**Solutions**: +- Query aggregated tables (`per_hour`, `per_day`) instead of raw tables +- Add more specific WHERE filters to reduce data scanned +- Increase workspace's `max_execution_time` setting +- Use indexed columns in WHERE clauses + +### Permission denied errors + +**Cause**: User's root key doesn't have required permissions + +**Solutions**: +- Grant `analytics.read` for workspace-level access +- Grant `api.*.read_analytics` for all APIs +- Grant specific `api..read_analytics` permissions. The system translates `api_id` to `key_space_id` for the permission check. + +## Best Practices + +### For Query Performance + +1. **Use aggregated tables** when possible (per_hour, per_day, per_month) +2. **Filter by workspace_id first** (automatic, but good to know) +3. **Use indexed columns** in WHERE clauses (time, workspace_id, key_space_id) +4. **Limit result size** to what you actually need +5. **Avoid expensive functions** like complex string operations on large datasets + +### For Security + +1. **Never bypass the query parser** - always use the safe, rewritten query +2. **Verify permissions before query execution** - check after virtual column resolution +3. **Use workspace-specific connections** - never share connections between workspaces +4. **Encrypt passwords at rest** - use Vault for all credential storage +5. **Monitor quota usage** - alert when workspaces approach limits + +### For Development + +1. **Test queries locally** using Docker Compose ClickHouse instance +2. **Validate parser changes** with comprehensive test cases +3. **Check query plans** with `EXPLAIN` for performance +4. **Monitor error rates** in production query logs +5. **Keep parser and ClickHouse settings in sync** - both should enforce same limits diff --git a/apps/engineering/content/docs/architecture/services/meta.json b/apps/engineering/content/docs/architecture/services/meta.json index 6ff3476ff8..d4a2970fc3 100644 --- a/apps/engineering/content/docs/architecture/services/meta.json +++ b/apps/engineering/content/docs/architecture/services/meta.json @@ -4,6 +4,7 @@ "root": false, "pages": [ "api", + "analytics", "clickhouse", "clickhouse-proxy", "ctrl", diff --git a/deployment/docker-compose.yaml b/deployment/docker-compose.yaml index d85c8246c3..7df4d7db61 100644 --- a/deployment/docker-compose.yaml +++ b/deployment/docker-compose.yaml @@ -71,6 +71,8 @@ services: depends_on: mysql: condition: service_healthy + s3: + condition: service_healthy redis: condition: service_healthy clickhouse: @@ -84,12 +86,13 @@ services: UNKEY_CLICKHOUSE_URL: "clickhouse://default:password@clickhouse:9000?secure=false&skip_verify=true" UNKEY_CHPROXY_AUTH_TOKEN: "chproxy-test-token-123" UNKEY_OTEL: false - VAULT_S3_URL: "http://s3:3902" - VAULT_S3_BUCKET: "vault" - VAULT_S3_ACCESS_KEY_ID: "minio_root_user" - VAULT_S3_ACCESS_KEY_SECRET: "minio_root_password" - VAULT_MASTER_KEYS: "Ch9rZWtfMmdqMFBJdVhac1NSa0ZhNE5mOWlLSnBHenFPENTt7an5MRogENt9Si6wms4pQ2XIvqNSIgNpaBenJmXgcInhu6Nfv2U=" + UNKEY_VAULT_S3_URL: "http://s3:3902" + UNKEY_VAULT_S3_BUCKET: "vault" + UNKEY_VAULT_S3_ACCESS_KEY_ID: "minio_root_user" + UNKEY_VAULT_S3_ACCESS_KEY_SECRET: "minio_root_password" + UNKEY_VAULT_MASTER_KEYS: "Ch9rZWtfMmdqMFBJdVhac1NSa0ZhNE5mOWlLSnBHenFPENTt7an5MRogENt9Si6wms4pQ2XIvqNSIgNpaBenJmXgcInhu6Nfv2U=" UNKEY_KAFKA_BROKERS: "kafka:9092" + UNKEY_CLICKHOUSE_ANALYTICS_URL: "http://clickhouse:8123/default" redis: networks: @@ -108,18 +111,10 @@ services: # The Kafka broker, available at localhost:9092 kafka: container_name: kafka - image: bitnamilegacy/kafka:4.0.0-debian-12-r10 - ports: - - 9092:9092 - environment: - KAFKA_CFG_NODE_ID: 0 - KAFKA_CFG_PROCESS_ROLES: controller,broker - KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093 - KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092 - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@localhost:9093 - KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: true + image: bufbuild/bufstream:0.4.4 + network_mode: host + command: ["serve", "--inmemory"] + agent: networks: - default @@ -461,6 +456,7 @@ volumes: clickhouse: clickhouse-keeper: s3: + kafka_data: networks: default: diff --git a/go/Makefile b/go/Makefile index ec47cd2b33..aebd13f4d3 100644 --- a/go/Makefile +++ b/go/Makefile @@ -24,7 +24,7 @@ pull: @docker compose -f ../deployment/docker-compose.yaml pull up: pull - @docker compose -f ../deployment/docker-compose.yaml up -d planetscale mysql redis clickhouse s3 otel kafka restate + @docker compose -f ../deployment/docker-compose.yaml up -d planetscale mysql redis clickhouse s3 otel kafka restate --wait clean: @docker compose -f ../deployment/docker-compose.yaml down --volumes @@ -34,8 +34,8 @@ build: generate: go install github.com/restatedev/sdk-go/protoc-gen-go-restate@latest - buf generate --template ./buf.gen.connect.yaml --clean --path "./proto/ctrl" --path "./proto/krane" --path "./proto/partition" --path "./proto/vault" - buf generate --template ./buf.gen.restate.yaml --path "./proto/hydra" + go run github.com/bufbuild/buf/cmd/buf generate --template ./buf.gen.connect.yaml --clean --path "./proto/cache" --path "./proto/ctrl" --path "./proto/krane" --path "./proto/partition" --path "./proto/vault" + go run github.com/bufbuild/buf/cmd/buf generate --template ./buf.gen.restate.yaml --path "./proto/hydra" go generate ./... go fmt ./... diff --git a/go/apps/api/config.go b/go/apps/api/config.go index 5cfa66c514..01ccd7ca1f 100644 --- a/go/apps/api/config.go +++ b/go/apps/api/config.go @@ -53,6 +53,11 @@ type Config struct { // ClickhouseURL is the ClickHouse database connection string ClickhouseURL string + // ClickhouseAnalyticsURL is the base URL for workspace-specific analytics connections + // Workspace credentials are injected programmatically at connection time + // Examples: "http://clickhouse:8123/default", "clickhouse://clickhouse:9000/default" + ClickhouseAnalyticsURL string + // --- Database configuration --- // DatabasePrimary is the primary database connection string for read and write operations diff --git a/go/apps/api/openapi/gen.go b/go/apps/api/openapi/gen.go index 076cefe6af..cdc207dbca 100644 --- a/go/apps/api/openapi/gen.go +++ b/go/apps/api/openapi/gen.go @@ -461,6 +461,36 @@ type Role struct { Permissions *[]Permission `json:"permissions,omitempty"` } +// ServiceUnavailableErrorResponse Error response when a required service is temporarily unavailable. This indicates that the service exists but cannot be reached or is not responding. +// +// When you encounter this error: +// - The service is likely experiencing temporary issues +// - Retrying the request after a short delay may succeed +// - If the error persists, the service may be undergoing maintenance +// - Contact Unkey support if the issue continues +type ServiceUnavailableErrorResponse struct { + // Error Base error structure following Problem Details for HTTP APIs (RFC 7807). This provides a standardized way to carry machine-readable details of errors in HTTP response content. + Error BaseError `json:"error"` + + // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. + Meta Meta `json:"meta"` +} + +// TooManyRequestsErrorResponse Error response when the client has sent too many requests in a given time period. This occurs when you've exceeded a rate limit or quota for the resource you're accessing. +// +// The rate limit resets automatically after the time window expires. To avoid this error: +// - Implement exponential backoff when retrying requests +// - Cache results where appropriate to reduce request frequency +// - Check the error detail message for specific quota information +// - Contact support if you need a higher quota for your use case +type TooManyRequestsErrorResponse struct { + // Error Base error structure following Problem Details for HTTP APIs (RFC 7807). This provides a standardized way to carry machine-readable details of errors in HTTP response content. + Error BaseError `json:"error"` + + // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. + Meta Meta `json:"meta"` +} + // UnauthorizedErrorResponse Error response when authentication has failed or credentials are missing. This occurs when: // - No authentication token is provided in the request // - The provided token is invalid, expired, or malformed @@ -475,6 +505,21 @@ type UnauthorizedErrorResponse struct { Meta Meta `json:"meta"` } +// UnprocessableEntityErrorResponse Error response when the request is syntactically valid but cannot be processed due to semantic constraints or resource limitations. This occurs when: +// - A query exceeds execution time limits +// - A query uses more memory than allowed +// - A query scans too many rows +// - A query result exceeds size limits +// +// The request syntax is correct, but the operation cannot be completed due to business rules or resource constraints. Review the error details for specific limitations and adjust your request accordingly. +type UnprocessableEntityErrorResponse struct { + // Error Base error structure following Problem Details for HTTP APIs (RFC 7807). This provides a standardized way to carry machine-readable details of errors in HTTP response content. + Error BaseError `json:"error"` + + // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. + Meta Meta `json:"meta"` +} + // UpdateKeyCreditsData Credit configuration and remaining balance for this key. type UpdateKeyCreditsData struct { // Refill Configuration for automatic credit refill behavior. @@ -501,6 +546,25 @@ type UpdateKeyCreditsRefill struct { // UpdateKeyCreditsRefillInterval How often credits are automatically refilled. type UpdateKeyCreditsRefillInterval string +// V2AnalyticsGetVerificationsRequestBody defines model for V2AnalyticsGetVerificationsRequestBody. +type V2AnalyticsGetVerificationsRequestBody struct { + // Query SQL query to execute against your analytics data. + // Only SELECT queries are allowed. + Query string `json:"query"` +} + +// V2AnalyticsGetVerificationsResponseBody defines model for V2AnalyticsGetVerificationsResponseBody. +type V2AnalyticsGetVerificationsResponseBody struct { + // Data Array of verification rows returned by the query. Fields vary based on the SQL SELECT clause. + Data V2AnalyticsGetVerificationsResponseData `json:"data"` + + // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. + Meta Meta `json:"meta"` +} + +// V2AnalyticsGetVerificationsResponseData Array of verification rows returned by the query. Fields vary based on the SQL SELECT clause. +type V2AnalyticsGetVerificationsResponseData = []map[string]interface{} + // V2ApisCreateApiRequestBody defines model for V2ApisCreateApiRequestBody. type V2ApisCreateApiRequestBody struct { // Name Unique identifier for this API namespace within your workspace. @@ -2016,6 +2080,9 @@ type ChproxyRatelimitsJSONRequestBody = ChproxyRatelimitsRequestBody // ChproxyVerificationsJSONRequestBody defines body for ChproxyVerifications for application/json ContentType. type ChproxyVerificationsJSONRequestBody = ChproxyVerificationsRequestBody +// AnalyticsGetVerificationsJSONRequestBody defines body for AnalyticsGetVerifications for application/json ContentType. +type AnalyticsGetVerificationsJSONRequestBody = V2AnalyticsGetVerificationsRequestBody + // CreateApiJSONRequestBody defines body for CreateApi for application/json ContentType. type CreateApiJSONRequestBody = V2ApisCreateApiRequestBody diff --git a/go/apps/api/openapi/generate.go b/go/apps/api/openapi/generate.go index 5174097830..9244541b90 100644 --- a/go/apps/api/openapi/generate.go +++ b/go/apps/api/openapi/generate.go @@ -1,4 +1,4 @@ package openapi //go:generate go run generate_bundle.go -input openapi-split.yaml -output openapi-generated.yaml -//go:generate go tool oapi-codegen -config=config.yaml ./openapi-generated.yaml +//go:generate go run github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen -config=config.yaml ./openapi-generated.yaml diff --git a/go/apps/api/openapi/generate_bundle.go b/go/apps/api/openapi/generate_bundle.go index 01a31e2a8f..b22b560889 100644 --- a/go/apps/api/openapi/generate_bundle.go +++ b/go/apps/api/openapi/generate_bundle.go @@ -57,12 +57,9 @@ func main() { } // Build the OpenAPI v3 model - v3Model, errs := document.BuildV3Model() - if len(errs) > 0 { - for _, e := range errs { - log.Printf("Error building model: %v", e) - } - log.Fatal("Failed to build v3 model") + v3Model, err := document.BuildV3Model() + if err != nil { + log.Fatalf("Failed to build v3 model: %v", err) } log.Printf("Model built successfully using version %s", v3Model.Model.Version) diff --git a/go/apps/api/openapi/openapi-generated.yaml b/go/apps/api/openapi/openapi-generated.yaml index 6ee40b30e9..22bf93a6f1 100644 --- a/go/apps/api/openapi/openapi-generated.yaml +++ b/go/apps/api/openapi/openapi-generated.yaml @@ -94,22 +94,18 @@ components: type: string description: Processing status example: "OK" - V2ApisCreateApiRequestBody: + V2AnalyticsGetVerificationsRequestBody: type: object required: - - name + - query properties: - name: + query: type: string - minLength: 3 - maxLength: 255 - pattern: "^[a-zA-Z][a-zA-Z0-9._-]*$" description: | - Unique identifier for this API namespace within your workspace. - Use descriptive names like 'payment-service-prod' or 'user-api-dev' to clearly identify purpose and environment. - example: payment-service-production - additionalProperties: false - V2ApisCreateApiResponseBody: + SQL query to execute against your analytics data. + Only SELECT queries are allowed. + example: "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE outcome = 'VALID' AND time >= now() - INTERVAL 7 DAY" + V2AnalyticsGetVerificationsResponseBody: type: object required: - meta @@ -118,8 +114,7 @@ components: meta: $ref: "#/components/schemas/Meta" data: - $ref: "#/components/schemas/V2ApisCreateApiResponseData" - additionalProperties: false + $ref: "#/components/schemas/V2AnalyticsGetVerificationsResponseData" UnauthorizedErrorResponse: type: object required: @@ -154,6 +149,103 @@ components: - Access to the requested resource is restricted based on workspace settings To resolve this error, ensure your root key has the necessary permissions or contact your workspace administrator. + NotFoundErrorResponse: + type: object + required: + - meta + - error + properties: + meta: + $ref: "#/components/schemas/Meta" + error: + $ref: "#/components/schemas/BaseError" + description: |- + Error response when the requested resource cannot be found. This occurs when: + - The specified resource ID doesn't exist in your workspace + - The resource has been deleted or moved + - The resource exists but is not accessible with current permissions + + To resolve this error, verify the resource ID is correct and that you have access to it. + UnprocessableEntityErrorResponse: + type: object + required: + - meta + - error + properties: + meta: + $ref: "#/components/schemas/Meta" + error: + $ref: "#/components/schemas/BaseError" + description: |- + Error response when the request is syntactically valid but cannot be processed due to semantic constraints or resource limitations. This occurs when: + - A query exceeds execution time limits + - A query uses more memory than allowed + - A query scans too many rows + - A query result exceeds size limits + + The request syntax is correct, but the operation cannot be completed due to business rules or resource constraints. Review the error details for specific limitations and adjust your request accordingly. + TooManyRequestsErrorResponse: + type: object + required: + - meta + - error + properties: + meta: + $ref: "#/components/schemas/Meta" + error: + $ref: "#/components/schemas/BaseError" + description: |- + Error response when the client has sent too many requests in a given time period. This occurs when you've exceeded a rate limit or quota for the resource you're accessing. + + The rate limit resets automatically after the time window expires. To avoid this error: + - Implement exponential backoff when retrying requests + - Cache results where appropriate to reduce request frequency + - Check the error detail message for specific quota information + - Contact support if you need a higher quota for your use case + ServiceUnavailableErrorResponse: + type: object + required: + - meta + - error + properties: + meta: + $ref: "#/components/schemas/Meta" + error: + $ref: "#/components/schemas/BaseError" + description: |- + Error response when a required service is temporarily unavailable. This indicates that the service exists but cannot be reached or is not responding. + + When you encounter this error: + - The service is likely experiencing temporary issues + - Retrying the request after a short delay may succeed + - If the error persists, the service may be undergoing maintenance + - Contact Unkey support if the issue continues + V2ApisCreateApiRequestBody: + type: object + required: + - name + properties: + name: + type: string + minLength: 3 + maxLength: 255 + pattern: "^[a-zA-Z][a-zA-Z0-9._-]*$" + description: | + Unique identifier for this API namespace within your workspace. + Use descriptive names like 'payment-service-prod' or 'user-api-dev' to clearly identify purpose and environment. + example: payment-service-production + additionalProperties: false + V2ApisCreateApiResponseBody: + type: object + required: + - meta + - data + properties: + meta: + $ref: "#/components/schemas/Meta" + data: + $ref: "#/components/schemas/V2ApisCreateApiResponseData" + additionalProperties: false V2ApisDeleteApiRequestBody: type: object required: @@ -182,23 +274,6 @@ components: data: $ref: "#/components/schemas/EmptyResponse" additionalProperties: false - NotFoundErrorResponse: - type: object - required: - - meta - - error - properties: - meta: - $ref: "#/components/schemas/Meta" - error: - $ref: "#/components/schemas/BaseError" - description: |- - Error response when the requested resource cannot be found. This occurs when: - - The specified resource ID doesn't exist in your workspace - - The resource has been deleted or moved - - The resource exists but is not accessible with current permissions - - To resolve this error, verify the resource ID is correct and that you have access to it. PreconditionFailedErrorResponse: type: object required: @@ -2034,6 +2109,20 @@ components: - message type: object description: Individual validation error details. Each validation error provides precise information about what failed, where it failed, and how to fix it, enabling efficient error resolution. + V2AnalyticsGetVerificationsResponseData: + type: array + description: Array of verification rows returned by the query. Fields vary based on the SQL SELECT clause. + items: + type: object + additionalProperties: true + description: Dynamic row with fields determined by the query. Can include any combination of fields like time, outcome, count, key_id, etc. + example: + - outcome: "VALID" + count: 1234 + time: 1696118400000 + - outcome: "RATE_LIMITED" + count: 56 + time: 1696118400000 V2ApisCreateApiResponseData: type: object properties: @@ -3256,6 +3345,81 @@ paths: - chproxy x-excluded: true x-speakeasy-ignore: true + /v2/analytics.getVerifications: + post: + description: | + Execute custom SQL queries against your key verification analytics. + For complete documentation including available tables, columns, data types, query examples, see the schema reference in the API documentation. + operationId: analytics.getVerifications + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/V2AnalyticsGetVerificationsRequestBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/V2AnalyticsGetVerificationsResponseBody' + description: Query executed successfully + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/BadRequestErrorResponse' + description: Invalid SQL query or blocked operation + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/UnauthorizedErrorResponse' + description: Missing or invalid authentication + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/ForbiddenErrorResponse' + description: Insufficient permissions + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/NotFoundErrorResponse' + description: Referenced API or identity not found + "422": + content: + application/json: + schema: + $ref: '#/components/schemas/UnprocessableEntityErrorResponse' + description: Query exceeds resource limits (timeout, memory, rows scanned, or result size) + "429": + content: + application/json: + schema: + $ref: '#/components/schemas/TooManyRequestsErrorResponse' + description: Query quota exceeded + "500": + content: + application/json: + schema: + $ref: '#/components/schemas/InternalServerErrorResponse' + description: Query execution failed + "503": + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceUnavailableErrorResponse' + description: Connection to the database failed + security: + - rootKey: [] + summary: Query key verification data + tags: + - analytics + x-hidden: true + x-speakeasy-ignore: true + x-speakeasy-name-override: getVerifications /v2/apis.createApi: post: description: | @@ -6046,6 +6210,8 @@ security: servers: - url: https://api.unkey.com tags: + - description: Analytics query operations + name: analytics - description: API management operations name: apis - description: Identity management operations diff --git a/go/apps/api/openapi/openapi-split.yaml b/go/apps/api/openapi/openapi-split.yaml index 1ded55ff29..8b2c348a3d 100644 --- a/go/apps/api/openapi/openapi-split.yaml +++ b/go/apps/api/openapi/openapi-split.yaml @@ -98,6 +98,8 @@ security: - rootKey: [] tags: + - name: analytics + description: Analytics query operations - name: apis description: API management operations - name: identities @@ -116,6 +118,10 @@ paths: /v2/liveness: $ref: "./spec/paths/v2/liveness/index.yaml" + # Analytics Endpoints + /v2/analytics.getVerifications: + $ref: "./spec/paths/v2/analytics/getVerifications/index.yaml" + # API Endpoints /v2/apis.createApi: $ref: "./spec/paths/v2/apis/createApi/index.yaml" diff --git a/go/apps/api/openapi/spec/error/ServiceUnavailableErrorResponse.yaml b/go/apps/api/openapi/spec/error/ServiceUnavailableErrorResponse.yaml new file mode 100644 index 0000000000..1fe4ed1d44 --- /dev/null +++ b/go/apps/api/openapi/spec/error/ServiceUnavailableErrorResponse.yaml @@ -0,0 +1,17 @@ +type: object +required: + - meta + - error +properties: + meta: + $ref: "../common/Meta.yaml" + error: + $ref: "./BaseError.yaml" +description: |- + Error response when a required service is temporarily unavailable. This indicates that the service exists but cannot be reached or is not responding. + + When you encounter this error: + - The service is likely experiencing temporary issues + - Retrying the request after a short delay may succeed + - If the error persists, the service may be undergoing maintenance + - Contact Unkey support if the issue continues diff --git a/go/apps/api/openapi/spec/error/TooManyRequestsErrorResponse.yaml b/go/apps/api/openapi/spec/error/TooManyRequestsErrorResponse.yaml new file mode 100644 index 0000000000..0e09d6b084 --- /dev/null +++ b/go/apps/api/openapi/spec/error/TooManyRequestsErrorResponse.yaml @@ -0,0 +1,17 @@ +type: object +required: + - meta + - error +properties: + meta: + $ref: "../common/Meta.yaml" + error: + $ref: "./BaseError.yaml" +description: |- + Error response when the client has sent too many requests in a given time period. This occurs when you've exceeded a rate limit or quota for the resource you're accessing. + + The rate limit resets automatically after the time window expires. To avoid this error: + - Implement exponential backoff when retrying requests + - Cache results where appropriate to reduce request frequency + - Check the error detail message for specific quota information + - Contact support if you need a higher quota for your use case diff --git a/go/apps/api/openapi/spec/error/UnprocessableEntityErrorResponse.yaml b/go/apps/api/openapi/spec/error/UnprocessableEntityErrorResponse.yaml new file mode 100644 index 0000000000..ce77284678 --- /dev/null +++ b/go/apps/api/openapi/spec/error/UnprocessableEntityErrorResponse.yaml @@ -0,0 +1,17 @@ +type: object +required: + - meta + - error +properties: + meta: + $ref: "../common/Meta.yaml" + error: + $ref: "./BaseError.yaml" +description: |- + Error response when the request is syntactically valid but cannot be processed due to semantic constraints or resource limitations. This occurs when: + - A query exceeds execution time limits + - A query uses more memory than allowed + - A query scans too many rows + - A query result exceeds size limits + + The request syntax is correct, but the operation cannot be completed due to business rules or resource constraints. Review the error details for specific limitations and adjust your request accordingly. diff --git a/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsRequestBody.yaml b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsRequestBody.yaml new file mode 100644 index 0000000000..4f4396f9b8 --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsRequestBody.yaml @@ -0,0 +1,10 @@ +type: object +required: + - query +properties: + query: + type: string + description: | + SQL query to execute against your analytics data. + Only SELECT queries are allowed. + example: "SELECT COUNT(*) as total FROM key_verifications_v1 WHERE outcome = 'VALID' AND time >= now() - INTERVAL 7 DAY" diff --git a/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseBody.yaml b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseBody.yaml new file mode 100644 index 0000000000..7156d3d507 --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseBody.yaml @@ -0,0 +1,9 @@ +type: object +required: + - meta + - data +properties: + meta: + $ref: "../../../../common/Meta.yaml" + data: + $ref: "./V2AnalyticsGetVerificationsResponseData.yaml" diff --git a/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseData.yaml b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseData.yaml new file mode 100644 index 0000000000..83655b2c5a --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/V2AnalyticsGetVerificationsResponseData.yaml @@ -0,0 +1,13 @@ +type: array +description: Array of verification rows returned by the query. Fields vary based on the SQL SELECT clause. +items: + type: object + additionalProperties: true + description: Dynamic row with fields determined by the query. Can include any combination of fields like time, outcome, count, key_id, etc. +example: + - outcome: "VALID" + count: 1234 + time: 1696118400000 + - outcome: "RATE_LIMITED" + count: 56 + time: 1696118400000 diff --git a/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/index.yaml b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/index.yaml new file mode 100644 index 0000000000..3e6ce834ba --- /dev/null +++ b/go/apps/api/openapi/spec/paths/v2/analytics/getVerifications/index.yaml @@ -0,0 +1,76 @@ +post: + tags: + - analytics + security: + - rootKey: [] + # Hides from mintlify + x-hidden: true + x-speakeasy-name-override: getVerifications + # Hides from sdk generation + x-speakeasy-ignore: true + operationId: analytics.getVerifications + summary: Query key verification data + description: | + Execute custom SQL queries against your key verification analytics. + For complete documentation including available tables, columns, data types, query examples, see the schema reference in the API documentation. + requestBody: + required: true + content: + application/json: + schema: + "$ref": "./V2AnalyticsGetVerificationsRequestBody.yaml" + responses: + "200": + content: + application/json: + schema: + "$ref": "./V2AnalyticsGetVerificationsResponseBody.yaml" + description: Query executed successfully + "400": + content: + application/json: + schema: + $ref: "../../../../error/BadRequestErrorResponse.yaml" + description: Invalid SQL query or blocked operation + "401": + content: + application/json: + schema: + $ref: "../../../../error/UnauthorizedErrorResponse.yaml" + description: Missing or invalid authentication + "403": + content: + application/json: + schema: + $ref: "../../../../error/ForbiddenErrorResponse.yaml" + description: Insufficient permissions + "404": + content: + application/json: + schema: + $ref: "../../../../error/NotFoundErrorResponse.yaml" + description: Referenced API or identity not found + "422": + content: + application/json: + schema: + $ref: "../../../../error/UnprocessableEntityErrorResponse.yaml" + description: Query exceeds resource limits (timeout, memory, rows scanned, or result size) + "429": + content: + application/json: + schema: + $ref: "../../../../error/TooManyRequestsErrorResponse.yaml" + description: Query quota exceeded + "500": + content: + application/json: + schema: + $ref: "../../../../error/InternalServerErrorResponse.yaml" + description: Query execution failed + "503": + content: + application/json: + schema: + $ref: "../../../../error/ServiceUnavailableErrorResponse.yaml" + description: Connection to the database failed diff --git a/go/apps/api/routes/register.go b/go/apps/api/routes/register.go index 7bf670540a..6f3f211dcb 100644 --- a/go/apps/api/routes/register.go +++ b/go/apps/api/routes/register.go @@ -52,6 +52,8 @@ import ( v2KeysVerifyKey "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_verify_key" v2KeysWhoami "github.com/unkeyed/unkey/go/apps/api/routes/v2_keys_whoami" + v2AnalyticsGetVerifications "github.com/unkeyed/unkey/go/apps/api/routes/v2_analytics_get_verifications" + zen "github.com/unkeyed/unkey/go/pkg/zen" ) @@ -539,6 +541,22 @@ func Register(srv *zen.Server, svc *Services) { }, ) + // --------------------------------------------------------------------------- + // v2/analytics + + // v2/analytics.getVerifications + srv.RegisterRoute( + defaultMiddlewares, + &v2AnalyticsGetVerifications.Handler{ + Logger: svc.Logger, + DB: svc.Database, + Keys: svc.Keys, + ClickHouse: svc.ClickHouse, + AnalyticsConnectionManager: svc.AnalyticsConnectionManager, + Caches: svc.Caches, + }, + ) + // --------------------------------------------------------------------------- // misc diff --git a/go/apps/api/routes/services.go b/go/apps/api/routes/services.go index 2de61bb143..7c9ea58914 100644 --- a/go/apps/api/routes/services.go +++ b/go/apps/api/routes/services.go @@ -1,6 +1,7 @@ package routes import ( + "github.com/unkeyed/unkey/go/internal/services/analytics" "github.com/unkeyed/unkey/go/internal/services/auditlogs" "github.com/unkeyed/unkey/go/internal/services/caches" "github.com/unkeyed/unkey/go/internal/services/keys" @@ -14,15 +15,16 @@ import ( ) type Services struct { - Logger logging.Logger - Database db.Database - Keys keys.KeyService - ClickHouse clickhouse.ClickHouse - Validator *validation.Validator - Ratelimit ratelimit.Service - Auditlogs auditlogs.AuditLogService - Caches caches.Caches - Vault *vault.Service - ChproxyToken string - UsageLimiter usagelimiter.Service + Logger logging.Logger + Database db.Database + Keys keys.KeyService + ClickHouse clickhouse.ClickHouse + Validator *validation.Validator + Ratelimit ratelimit.Service + Auditlogs auditlogs.AuditLogService + Caches caches.Caches + Vault *vault.Service + ChproxyToken string + UsageLimiter usagelimiter.Service + AnalyticsConnectionManager analytics.ConnectionManager } diff --git a/go/apps/api/routes/v2_analytics_get_verifications/200_test.go b/go/apps/api/routes/v2_analytics_get_verifications/200_test.go new file mode 100644 index 0000000000..3a5c9b3a08 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/200_test.go @@ -0,0 +1,241 @@ +package handler + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/clickhouse/schema" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test200_Success(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + now := h.Clock.Now().UnixMilli() + + // Buffer some key verifications + for i := range 5 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-west-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) as count FROM key_verifications_v1", + } + + // Wait for buffered data to be available + time.Sleep(2 * time.Second) + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + t.Logf("Status: %d, RawBody: %s", res.Status, res.RawBody) + require.Equal(t, 200, res.Status) + require.NotNil(t, res.Body) + require.Len(t, res.Body.Data, 1) +} + +func Test200_PermissionFiltersByApiId(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api1 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + api2 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + + // Create root key with permission ONLY for api1 + rootKey := h.CreateRootKey(workspace.ID, "api."+api1.ID+".read_analytics") + + now := h.Clock.Now().UnixMilli() + + // Buffer verifications for api1 + for i := range 3 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api1.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-west-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + // Buffer verifications for api2 (should NOT be returned) + for i := range 5 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api2.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-east-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + // Query all verifications - should only return api1's due to permission filter + req := Request{ + Query: "SELECT COUNT(*) as count FROM key_verifications_v1", + } + + // Wait for buffered data to be available + require.EventuallyWithT(t, func(c *assert.CollectT) { + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(c, 200, res.Status) + require.NotNil(c, res.Body) + require.Len(c, res.Body.Data, 1) + + // Verify the count is 3 (only api1's verifications), not 8 (api1 + api2) + count, ok := res.Body.Data[0]["count"] + require.True(c, ok, "count field should exist") + require.Equal(c, float64(3), count, "should only return verifications for api1") + }, 30*time.Second, time.Second) +} + +func Test200_PermissionFiltersByKeySpaceId(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api1 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + api2 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + + // Create root key with permission ONLY for api1 + rootKey := h.CreateRootKey(workspace.ID, "api."+api1.ID+".read_analytics") + + now := h.Clock.Now().UnixMilli() + + // Buffer verifications for api1 + for i := range 3 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api1.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-west-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + // Buffer verifications for api2 (should NOT be returned) + for i := range 5 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api2.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-east-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + // Query with both key_space_ids in WHERE clause + // Should only return data for api1 due to permission filter + req := Request{ + Query: "SELECT key_space_id, COUNT(*) as count FROM key_verifications_v1 GROUP BY key_space_id", + } + + // Wait for buffered data to be available + require.EventuallyWithT(t, func(c *assert.CollectT) { + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(c, 200, res.Status) + require.NotNil(c, res.Body) + + // Should only return 1 group (api1's key_space_id), not 2 + require.Len(c, res.Body.Data, 1) + + // Verify it's api1's key_space_id + keySpaceID, ok := res.Body.Data[0]["key_space_id"] + require.True(c, ok, "key_space_id field should exist") + require.Equal(c, api1.KeyAuthID.String, keySpaceID) + + // Verify the count is 3 + count, ok := res.Body.Data[0]["count"] + require.True(c, ok, "count field should exist") + require.Equal(c, float64(3), count) + }, 30*time.Second, time.Second) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/400_test.go b/go/apps/api/routes/v2_analytics_get_verifications/400_test.go new file mode 100644 index 0000000000..7c8b7addfe --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/400_test.go @@ -0,0 +1,187 @@ +package handler + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/apps/api/openapi" + "github.com/unkeyed/unkey/go/pkg/testutil" +) + +func Test400_EmptyQuery(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "", + } + + res := testutil.CallRoute[Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status, "Empty query should return 400") + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Type, "invalid_input") + require.NotEmpty(t, res.Body.Error.Detail, "Error should have a descriptive message") +} + +func Test400_InvalidSQLSyntax(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT * FROM key_verifications_v1 WHERE invalid syntax here", + } + + res := testutil.CallRoute[Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status, "Invalid SQL syntax should return 400") + require.NotNil(t, res.Body) + // Parser may catch this as invalid_input or invalid_analytics_query depending on when it's detected + require.True(t, + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/application/invalid_input" || + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/user/bad_request/invalid_analytics_query", + "Error type should be invalid_input or invalid_analytics_query") + require.NotEmpty(t, res.Body.Error.Detail, "Error should show syntax error message") +} + +func Test400_UnknownColumn(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT nonexistent_column FROM key_verifications_v1 WHERE time >= now() - INTERVAL 7 DAY", + } + + res := testutil.CallRoute[Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status, "Unknown column should return 400") + require.NotNil(t, res.Body) + require.Contains(t, res.Body.Error.Type, "invalid_analytics_query") + require.Contains(t, res.Body.Error.Detail, "Unknown", "Error should mention unknown column") +} + +func Test400_InvalidTable(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT * FROM system.tables", + } + + res := testutil.CallRoute[Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status, "Invalid table should return 400") + require.NotNil(t, res.Body) + // Parser may catch this as invalid_input or invalid_analytics_table depending on when it's detected + require.True(t, + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/application/invalid_input" || + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/user/bad_request/invalid_analytics_table", + "Error type should be invalid_input or invalid_analytics_table") + require.NotEmpty(t, res.Body.Error.Detail, "Error should have a descriptive message") +} + +func Test400_NonSelectQuery(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "DELETE FROM key_verifications_v1 WHERE time < now()", + } + + res := testutil.CallRoute[Request, openapi.BadRequestErrorResponse](h, route, headers, req) + require.Equal(t, 400, res.Status, "Non-SELECT query should return 400") + require.NotNil(t, res.Body) + // Parser may catch this as invalid_input or invalid_analytics_query_type depending on when it's detected + require.True(t, + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/application/invalid_input" || + res.Body.Error.Type == "https://unkey.com/docs/errors/unkey/user/bad_request/invalid_analytics_query_type", + "Error type should be invalid_input or invalid_analytics_query_type") + require.NotEmpty(t, res.Body.Error.Detail, "Error should have a descriptive message") +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/401_test.go b/go/apps/api/routes/v2_analytics_get_verifications/401_test.go new file mode 100644 index 0000000000..d9d877bea7 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/401_test.go @@ -0,0 +1,60 @@ +package handler + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/testutil" +) + +func Test401_NoAuthHeader(t *testing.T) { + h := testutil.NewHarness(t) + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) FROM key_verifications_v1", + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 400, res.Status) // Auth failures return 400, not 401 +} + +func Test401_InvalidRootKey(t *testing.T) { + h := testutil.NewHarness(t) + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer invalid_key_123"}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) FROM key_verifications_v1", + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 401, res.Status) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/403_test.go b/go/apps/api/routes/v2_analytics_get_verifications/403_test.go new file mode 100644 index 0000000000..48133eccec --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/403_test.go @@ -0,0 +1,85 @@ +package handler + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" +) + +func Test403_NoAnalyticsPermission(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + _ = h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + + // Create root key WITHOUT read_analytics permission + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_api") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) FROM key_verifications_v1", + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 403, res.Status) +} + +func Test403_WrongApiPermission(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api1 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + api2 := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + + // Create root key with permission only for api1 + rootKey := h.CreateRootKey(workspace.ID, "api."+api1.ID+".read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + // Query filtering by api2's key_space_id but user only has permission for api1 + req := Request{ + Query: fmt.Sprintf("SELECT COUNT(*) FROM key_verifications_v1 WHERE key_space_id = '%s'", api2.KeyAuthID.String), + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 403, res.Status) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/404_test.go b/go/apps/api/routes/v2_analytics_get_verifications/404_test.go new file mode 100644 index 0000000000..5448074ab2 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/404_test.go @@ -0,0 +1,45 @@ +package handler + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" +) + +func Test404_KeySpaceNotFound(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + _ = h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + h.SetupAnalytics(workspace.ID) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + // Query with non-existent key_space_id + req := Request{ + Query: fmt.Sprintf("SELECT COUNT(*) FROM key_verifications_v1 WHERE key_space_id = '%s'", "ks_nonexistent123"), + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 404, res.Status) // Key space not found +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/412_test.go b/go/apps/api/routes/v2_analytics_get_verifications/412_test.go new file mode 100644 index 0000000000..49d70ec76a --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/412_test.go @@ -0,0 +1,45 @@ +package handler + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" +) + +func Test412_AnalyticsNotConfigured(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + _ = h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + // Do NOT set up ClickHouse workspace settings + // This will cause GetConnection to fail + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) FROM key_verifications_v1", + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 412, res.Status) // Analytics not configured returns 412 Precondition Failed +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/422_test.go b/go/apps/api/routes/v2_analytics_get_verifications/422_test.go new file mode 100644 index 0000000000..0ad8e3b6a2 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/422_test.go @@ -0,0 +1,70 @@ +package handler + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/clickhouse/schema" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test422_ExceedsMaxMemory(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + + // Set up analytics with very low MaxQueryMemoryBytes (10KB - very restrictive) + h.SetupAnalytics(workspace.ID, testutil.WithMaxQueryMemoryBytes(10_000)) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + now := h.Clock.Now().UnixMilli() + + // Buffer many verifications to ensure memory usage exceeds limit + for i := range 50_000 { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-west-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + // Use a memory-intensive query with aggregation and grouping + req := Request{ + Query: "SELECT key_id, region, outcome, COUNT(*) as count FROM key_verifications_v1 GROUP BY key_id, region, outcome", + } + + // Wait for data to be buffered and flushed to ClickHouse + time.Sleep(10 * time.Second) + + // Query should fail with 422 due to max_memory_usage limit being exceeded + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 422, res.Status) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/429_test.go b/go/apps/api/routes/v2_analytics_get_verifications/429_test.go new file mode 100644 index 0000000000..d6e7fffb33 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/429_test.go @@ -0,0 +1,72 @@ +package handler + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/clickhouse/schema" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +func Test429_QueryQuotaExceeded(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + api := h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + // Set quota to allow only 1 query per window + h.SetupAnalytics(workspace.ID, testutil.WithMaxQueriesPerWindow(1)) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + now := h.Clock.Now().UnixMilli() + + // Buffer some key verifications + for i := 0; i < 5; i++ { + h.ClickHouse.BufferKeyVerification(schema.KeyVerificationRequestV1{ + RequestID: uid.New(uid.RequestPrefix), + Time: now - int64(i*1000), + WorkspaceID: workspace.ID, + KeySpaceID: api.KeyAuthID.String, + KeyID: uid.New(uid.KeyPrefix), + Region: "us-west-1", + Outcome: "VALID", + IdentityID: "", + Tags: []string{}, + }) + } + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) as count FROM key_verifications_v1", + } + + // Wait for data, first query should succeed + require.EventuallyWithT(t, func(c *assert.CollectT) { + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(c, 200, res.Status) + }, 30*time.Second, time.Second) + + // Second query should fail with 429 (quota exceeded) + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 429, res.Status) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/503_test.go b/go/apps/api/routes/v2_analytics_get_verifications/503_test.go new file mode 100644 index 0000000000..7aee3bec69 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/503_test.go @@ -0,0 +1,62 @@ +package handler + +import ( + "context" + "database/sql" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/testutil" + "github.com/unkeyed/unkey/go/pkg/testutil/seed" +) + +func Test503_ClickHouseConnectionFailure(t *testing.T) { + h := testutil.NewHarness(t) + + workspace := h.CreateWorkspace() + _ = h.CreateApi(seed.CreateApiRequest{ + WorkspaceID: workspace.ID, + }) + rootKey := h.CreateRootKey(workspace.ID, "api.*.read_analytics") + + // Set up ClickHouse workspace settings with invalid connection info + now := h.Clock.Now().UnixMilli() + err := db.Query.InsertClickhouseWorkspaceSettings(context.Background(), h.DB.RW(), db.InsertClickhouseWorkspaceSettingsParams{ + WorkspaceID: workspace.ID, + Username: workspace.ID, + PasswordEncrypted: "invalid_password", // Invalid password will cause connection failure + QuotaDurationSeconds: 3600, + MaxQueriesPerWindow: 1000, + MaxExecutionTimePerWindow: 1800, + MaxQueryExecutionTime: 30, + MaxQueryMemoryBytes: 1_000_000_000, + MaxQueryResultRows: 10_000_000, + CreatedAt: now, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + require.NoError(t, err) + + route := &Handler{ + Logger: h.Logger, + DB: h.DB, + Keys: h.Keys, + ClickHouse: h.ClickHouse, + AnalyticsConnectionManager: h.AnalyticsConnectionManager, + Caches: h.Caches, + } + h.Register(route) + + headers := http.Header{ + "Authorization": []string{"Bearer " + rootKey}, + "Content-Type": []string{"application/json"}, + } + + req := Request{ + Query: "SELECT COUNT(*) FROM key_verifications_v1", + } + + res := testutil.CallRoute[Request, Response](h, route, headers, req) + require.Equal(t, 503, res.Status) // Invalid password causes connection failure (503) +} diff --git a/go/apps/api/routes/v2_analytics_get_verifications/handler.go b/go/apps/api/routes/v2_analytics_get_verifications/handler.go new file mode 100644 index 0000000000..a4e1bb2cc4 --- /dev/null +++ b/go/apps/api/routes/v2_analytics_get_verifications/handler.go @@ -0,0 +1,305 @@ +package handler + +import ( + "context" + "fmt" + "net/http" + "slices" + "strings" + + "github.com/unkeyed/unkey/go/apps/api/openapi" + "github.com/unkeyed/unkey/go/internal/services/analytics" + "github.com/unkeyed/unkey/go/internal/services/caches" + "github.com/unkeyed/unkey/go/internal/services/keys" + "github.com/unkeyed/unkey/go/pkg/array" + "github.com/unkeyed/unkey/go/pkg/cache" + "github.com/unkeyed/unkey/go/pkg/clickhouse" + chquery "github.com/unkeyed/unkey/go/pkg/clickhouse/query-parser" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/rbac" + "github.com/unkeyed/unkey/go/pkg/zen" +) + +type Request = openapi.V2AnalyticsGetVerificationsRequestBody +type Response = openapi.V2AnalyticsGetVerificationsResponseBody +type ResponseData = openapi.V2AnalyticsGetVerificationsResponseData + +var ( + tableAliases = map[string]string{ + "key_verifications_v1": "default.key_verifications_raw_v2", + "key_verifications_per_minute_v1": "default.key_verifications_per_minute_v2", + "key_verifications_per_hour_v1": "default.key_verifications_per_hour_v2", + "key_verifications_per_day_v1": "default.key_verifications_per_day_v2", + "key_verifications_per_month_v1": "default.key_verifications_per_month_v2", + } + + allowedTables = []string{ + "default.key_verifications_raw_v2", + "default.key_verifications_per_minute_v2", + "default.key_verifications_per_hour_v2", + "default.key_verifications_per_day_v2", + "default.key_verifications_per_month_v2", + } +) + +// Handler implements zen.Route interface for the v2 Analytics get verifications endpoint +type Handler struct { + Logger logging.Logger + DB db.Database + Keys keys.KeyService + ClickHouse clickhouse.ClickHouse + AnalyticsConnectionManager analytics.ConnectionManager + Caches caches.Caches +} + +// Method returns the HTTP method this route responds to +func (h *Handler) Method() string { + return "POST" +} + +// Path returns the URL path pattern this route matches +func (h *Handler) Path() string { + return "/v2/analytics.getVerifications" +} + +// Handle processes the HTTP request +func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { + h.Logger.Debug("handling request", "requestId", s.RequestID(), "path", "/v2/analytics.getVerifications") + + auth, emit, err := h.Keys.GetRootKey(ctx, s) + defer emit() + if err != nil { + return err + } + + req, err := zen.BindBody[Request](s) + if err != nil { + return err + } + + // Get workspace-specific ClickHouse connection and settings first + conn, settings, err := h.AnalyticsConnectionManager.GetConnection(ctx, auth.AuthorizedWorkspaceID) + if err != nil { + return err + } + + // Build a list of keySpaceIds that the root key has permissions for. + securityFilters, err := h.buildSecurityFilters(ctx, auth) + if err != nil { + return err + } + + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: auth.AuthorizedWorkspaceID, + Limit: int(settings.MaxQueryResultRows), + SecurityFilters: securityFilters, + TableAliases: tableAliases, + AllowedTables: allowedTables, + }) + + parsedQuery, err := parser.Parse(ctx, req.Query) + if err != nil { + return fault.Wrap(err, + fault.Code(codes.App.Validation.InvalidInput.URN()), + fault.Public("Invalid SQL query"), + ) + } + + // Now we build permission checks based on the key_space_id(s) one specified in the query itself + // If none are specified, we will just check if there is wildcard read_analytics permission set + permissionChecks := []rbac.PermissionQuery{ + // Wildcard API analytics access + rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: "*", + Action: rbac.ReadAnalytics, + }), + } + + keySpaceIds := parser.ExtractColumn("key_space_id") + if len(keySpaceIds) > 0 { + apiPermissions, err := h.buildAPIPermissionsFromKeySpaces(ctx, auth, keySpaceIds) + if err != nil { + return err + } + permissionChecks = append(permissionChecks, rbac.And(apiPermissions...)) + } + + // Verify user has at least one of: api.*.read_analytics OR (api..read_analytics AND api..read_analytics) + err = auth.VerifyRootKey(ctx, keys.WithPermissions(rbac.Or(permissionChecks...))) + if err != nil { + return err + } + + h.Logger.Debug("executing query", "original", req.Query, "parsed", parsedQuery) + + // Execute query using workspace connection + verifications, err := conn.QueryToMaps(ctx, parsedQuery) + if err != nil { + return clickhouse.WrapClickHouseError(err) + } + + return s.JSON(http.StatusOK, Response{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Data: verifications, + }) +} + +// buildSecurityFilters creates ClickHouse security filters based on user permissions. +// Returns filters that restrict queries to only the key_space_ids the user has access to. +func (h *Handler) buildSecurityFilters(ctx context.Context, auth *keys.KeyVerifier) ([]chquery.SecurityFilter, error) { + allowedAPIIds := extractAllowedAPIIds(auth.Permissions) + if len(allowedAPIIds) == 0 { + return []chquery.SecurityFilter{}, nil + } + + // Fetch key auths for the allowed API IDs + apis, err := h.fetchKeyAuthsByAPIIds(ctx, auth.AuthorizedWorkspaceID, allowedAPIIds) + if err != nil { + return nil, err + } + + // Extract key space IDs from the fetched APIs + keySpaceIds := make([]string, 0, len(apis)) + for _, api := range apis { + keySpaceIds = append(keySpaceIds, api.KeyAuthID) + } + + return []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: keySpaceIds, + }, + }, nil +} + +// fetchKeyAuthsByAPIIds fetches key auth rows for the given API IDs using the cache. +func (h *Handler) fetchKeyAuthsByAPIIds(ctx context.Context, workspaceID string, apiIDs []string) (map[cache.ScopedKey]db.FindKeyAuthsByIdsRow, error) { + cacheKeys := array.Map(apiIDs, func(apiID string) cache.ScopedKey { + return cache.ScopedKey{ + WorkspaceID: workspaceID, + Key: apiID, + } + }) + + apis, _, err := h.Caches.ApiToKeyAuthRow.SWRMany( + ctx, + cacheKeys, + func(ctx context.Context, keys []cache.ScopedKey) (map[cache.ScopedKey]db.FindKeyAuthsByIdsRow, error) { + apis, err := db.Query.FindKeyAuthsByIds(ctx, h.DB.RO(), db.FindKeyAuthsByIdsParams{ + WorkspaceID: workspaceID, + ApiIds: apiIDs, + }) + if err != nil { + return nil, err + } + + return array.Reduce( + apis, + func(acc map[cache.ScopedKey]db.FindKeyAuthsByIdsRow, api db.FindKeyAuthsByIdsRow) map[cache.ScopedKey]db.FindKeyAuthsByIdsRow { + acc[cache.ScopedKey{WorkspaceID: workspaceID, Key: api.ApiID}] = api + return acc + }, + map[cache.ScopedKey]db.FindKeyAuthsByIdsRow{}, + ), nil + }, + caches.DefaultFindFirstOp, + ) + + return apis, err +} + +// buildAPIPermissionsFromKeySpaces fetches key spaces and builds RBAC permissions for them. +// Returns an error if any key space is not found. +func (h *Handler) buildAPIPermissionsFromKeySpaces(ctx context.Context, auth *keys.KeyVerifier, keySpaceIds []string) ([]rbac.PermissionQuery, error) { + keySpaces, keySpaceHits, err := h.fetchKeyAuthsByKeyAuthIds(ctx, auth.AuthorizedWorkspaceID, keySpaceIds) + if err != nil { + return nil, err + } + + // Check for missing key_space_ids and build permissions + apiPermissions := make([]rbac.PermissionQuery, 0, len(keySpaceHits)) + for key, hit := range keySpaceHits { + if hit == cache.Null { + return nil, fault.New("key_space_id not found", + fault.Code(codes.Data.KeySpace.NotFound.URN()), + fault.Public(fmt.Sprintf("KeySpace '%s' was not found.", key.Key)), + ) + } + + apiPermissions = append(apiPermissions, rbac.T(rbac.Tuple{ + ResourceType: rbac.Api, + ResourceID: keySpaces[key].ApiID, + Action: rbac.ReadAnalytics, + })) + } + + return apiPermissions, nil +} + +// fetchKeyAuthsByKeyAuthIds fetches key auth rows for the given key auth IDs using the cache. +func (h *Handler) fetchKeyAuthsByKeyAuthIds(ctx context.Context, workspaceID string, keyAuthIDs []string) (map[cache.ScopedKey]db.FindKeyAuthsByKeyAuthIdsRow, map[cache.ScopedKey]cache.CacheHit, error) { + cacheKeys := array.Map(keyAuthIDs, func(keyAuthID string) cache.ScopedKey { + return cache.ScopedKey{ + WorkspaceID: workspaceID, + Key: keyAuthID, + } + }) + + return h.Caches.KeyAuthToApiRow.SWRMany( + ctx, + cacheKeys, + func(ctx context.Context, keys []cache.ScopedKey) (map[cache.ScopedKey]db.FindKeyAuthsByKeyAuthIdsRow, error) { + keySpaces, err := db.Query.FindKeyAuthsByKeyAuthIds(ctx, h.DB.RO(), db.FindKeyAuthsByKeyAuthIdsParams{ + WorkspaceID: workspaceID, + KeyAuthIds: array.Map(keys, func(keySpace cache.ScopedKey) string { + return keySpace.Key + }), + }) + if err != nil { + return nil, err + } + + return array.Reduce( + keySpaces, + func(acc map[cache.ScopedKey]db.FindKeyAuthsByKeyAuthIdsRow, api db.FindKeyAuthsByKeyAuthIdsRow) map[cache.ScopedKey]db.FindKeyAuthsByKeyAuthIdsRow { + acc[cache.ScopedKey{WorkspaceID: workspaceID, Key: api.KeyAuthID}] = api + return acc + }, + map[cache.ScopedKey]db.FindKeyAuthsByKeyAuthIdsRow{}, + ), nil + }, + caches.DefaultFindFirstOp, + ) +} + +// extractAllowedAPIIds extracts API IDs from permissions +// Returns empty slice if user has wildcard access (api.*.read_analytics) +// Returns specific API IDs if user has limited access (api.api_123.read_analytics, etc.) +func extractAllowedAPIIds(permissions []string) []string { + if slices.Contains(permissions, "api.*.read_analytics") { + return nil + } + + // Extract specific API IDs from permissions like "api.api_123.read_analytics" + apiIDs := make([]string, 0) + for _, perm := range permissions { + pattern := strings.Split(perm, ".") + if len(pattern) != 3 { + continue + } + + if pattern[0] != "api" || pattern[2] != "read_analytics" { + continue + } + + apiIDs = append(apiIDs, pattern[1]) + } + + return apiIDs +} diff --git a/go/apps/api/run.go b/go/apps/api/run.go index d7ba2e63d8..0da2f4d259 100644 --- a/go/apps/api/run.go +++ b/go/apps/api/run.go @@ -10,6 +10,7 @@ import ( "github.com/unkeyed/unkey/go/apps/api/routes" cachev1 "github.com/unkeyed/unkey/go/gen/proto/cache/v1" + "github.com/unkeyed/unkey/go/internal/services/analytics" "github.com/unkeyed/unkey/go/internal/services/auditlogs" "github.com/unkeyed/unkey/go/internal/services/caches" "github.com/unkeyed/unkey/go/internal/services/keys" @@ -145,7 +146,6 @@ func Run(ctx context.Context, cfg Config) error { } // Caches will be created after invalidation consumer is set up - srv, err := zen.New(zen.Config{ Logger: logger, Flags: &zen.Flags{ @@ -275,19 +275,37 @@ func Run(ctx context.Context, cfg Config) error { shutdowns.Register(keySvc.Close) shutdowns.Register(ctr.Close) + // Initialize analytics connection manager + analyticsConnMgr := analytics.NewNoopConnectionManager() + if cfg.ClickhouseAnalyticsURL != "" && vaultSvc != nil { + analyticsConnMgr, err = analytics.NewConnectionManager(analytics.ConnectionManagerConfig{ + SettingsCache: caches.ClickhouseSetting, + Database: db, + Logger: logger, + Clock: clk, + BaseURL: cfg.ClickhouseAnalyticsURL, + Vault: vaultSvc, + }) + if err != nil { + return fmt.Errorf("unable to create analytics connection manager: %w", err) + } + } + routes.Register(srv, &routes.Services{ - Logger: logger, - Database: db, - ClickHouse: ch, - Keys: keySvc, - Validator: validator, - Ratelimit: rlSvc, - Auditlogs: auditlogSvc, - Caches: caches, - Vault: vaultSvc, - ChproxyToken: cfg.ChproxyToken, - UsageLimiter: ulSvc, + Logger: logger, + Database: db, + ClickHouse: ch, + Keys: keySvc, + Validator: validator, + Ratelimit: rlSvc, + Auditlogs: auditlogSvc, + Caches: caches, + Vault: vaultSvc, + ChproxyToken: cfg.ChproxyToken, + UsageLimiter: ulSvc, + AnalyticsConnectionManager: analyticsConnMgr, }) + if cfg.Listener == nil { // Create listener from HttpPort (production) cfg.Listener, err = net.Listen("tcp", fmt.Sprintf(":%d", cfg.HttpPort)) diff --git a/go/cmd/api/main.go b/go/cmd/api/main.go index 5d42e3df6b..1319e49f7d 100644 --- a/go/cmd/api/main.go +++ b/go/cmd/api/main.go @@ -48,6 +48,8 @@ var Cmd = &cli.Command{ cli.EnvVar("UNKEY_REDIS_URL")), cli.String("clickhouse-url", "ClickHouse connection string for analytics. Recommended for production. Example: clickhouse://user:pass@host:9000/unkey", cli.EnvVar("UNKEY_CLICKHOUSE_URL")), + cli.String("clickhouse-analytics-url", "ClickHouse base URL for workspace-specific analytics connections. Workspace credentials are injected programmatically. Example: http://clickhouse:8123/default", + cli.EnvVar("UNKEY_CLICKHOUSE_ANALYTICS_URL")), // Observability cli.Bool("otel", "Enable OpenTelemetry tracing and metrics", @@ -138,7 +140,8 @@ func action(ctx context.Context, cmd *cli.Command) error { DatabaseReadonlyReplica: cmd.String("database-replica"), // ClickHouse - ClickhouseURL: cmd.String("clickhouse-url"), + ClickhouseURL: cmd.String("clickhouse-url"), + ClickhouseAnalyticsURL: cmd.String("clickhouse-analytics-url"), // OpenTelemetry configuration OtelEnabled: cmd.Bool("otel"), diff --git a/go/cmd/create-clickhouse-user/main.go b/go/cmd/create-clickhouse-user/main.go new file mode 100644 index 0000000000..12d3dfa7a9 --- /dev/null +++ b/go/cmd/create-clickhouse-user/main.go @@ -0,0 +1,247 @@ +package createclickhouseuser + +import ( + "context" + "crypto/rand" + "database/sql" + "encoding/base64" + "fmt" + + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + "github.com/unkeyed/unkey/go/pkg/cli" + "github.com/unkeyed/unkey/go/pkg/clickhouse" + "github.com/unkeyed/unkey/go/pkg/clock" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/vault" + "github.com/unkeyed/unkey/go/pkg/vault/storage" +) + +var Cmd = &cli.Command{ + Name: "create-clickhouse-user", + Usage: "Create or update ClickHouse user with quotas and permissions", + Description: `Create or update a ClickHouse user for a workspace with resource quotas and table permissions. + +This command: +1. Generates a secure password (or reuses existing) +2. Encrypts and stores credentials in MySQL +3. Creates/alters ClickHouse user +4. Revokes all permissions +5. Grants SELECT on specified tables +6. Creates row-level security policies +7. Creates/updates quota +8. Creates/updates settings profile + +The script is idempotent - it can be run multiple times to update quotas without regenerating passwords. + +By default, grants SELECT on all v2 analytics tables: + - Key verifications (raw + per minute/hour/day/month) + - Ratelimits (raw + per minute/hour/day/month + last used) + - API requests (raw + per minute/hour/day/month) + +EXAMPLES: +unkey create-clickhouse-user --workspace-id ws_123 +unkey create-clickhouse-user --workspace-id ws_123 --username custom_user --max-queries-per-window 5000`, + Flags: []cli.Flag{ + cli.String("workspace-id", "Workspace ID", cli.Required()), + cli.String("username", "ClickHouse username (default: workspace_id)"), + cli.String("database-primary", "MySQL database DSN", cli.EnvVar("UNKEY_DATABASE_PRIMARY"), cli.Required()), + cli.String("clickhouse-url", "ClickHouse URL", cli.EnvVar("CLICKHOUSE_URL"), cli.Required()), + cli.StringSlice("vault-master-keys", "Vault master key for encryption", cli.EnvVar("UNKEY_VAULT_MASTER_KEY"), cli.Required()), + cli.String("vault-s3-url", "Vault S3 URL", cli.EnvVar("UNKEY_VAULT_S3_URL"), cli.Required()), + cli.String("vault-s3-bucket", "Vault S3 bucket", cli.EnvVar("UNKEY_VAULT_S3_BUCKET"), cli.Required()), + cli.String("vault-s3-access-key-id", "Vault S3 access key ID", cli.EnvVar("UNKEY_VAULT_S3_ACCESS_KEY_ID"), cli.Required()), + cli.String("vault-s3-access-key-secret", "Vault S3 access key secret", cli.EnvVar("UNKEY_VAULT_S3_ACCESS_KEY_SECRET"), cli.Required()), + + // Quota overrides (optional - uses schema defaults if not set) + cli.Int("quota-duration-seconds", "Quota window duration in seconds", cli.Default(3_600)), + cli.Int("max-queries-per-window", "Max queries per window", cli.Default(1_000)), + cli.Int("max-execution-time-per-window", "Max execution time per window in seconds", cli.Default(1_800)), + cli.Int("max-query-execution-time", "Max single query execution time in seconds", cli.Default(30)), + cli.Int64("max-query-memory-bytes", "Max memory per query in bytes", cli.Default(int64(1_000_000_000))), + cli.Int("max-query-result-rows", "Max result rows per query", cli.Default(10_000_000)), + }, + Action: run, +} + +var allowedTables = []string{ + // Key verifications + "default.key_verifications_raw_v2", + "default.key_verifications_per_minute_v2", + "default.key_verifications_per_hour_v2", + "default.key_verifications_per_day_v2", + "default.key_verifications_per_month_v2", + // Not used ATM + // // Ratelimits + // "default.ratelimits_raw_v2", + // "default.ratelimits_per_minute_v2", + // "default.ratelimits_per_hour_v2", + // "default.ratelimits_per_day_v2", + // "default.ratelimits_per_month_v2", + // "default.ratelimits_last_used_v2", + // // API requests + // "default.api_requests_raw_v2", + // "default.api_requests_per_minute_v2", + // "default.api_requests_per_hour_v2", + // "default.api_requests_per_day_v2", + // "default.api_requests_per_month_v2", +} + +func run(ctx context.Context, cmd *cli.Command) error { + logger := logging.New() + + workspaceID := cmd.RequireString("workspace-id") + username := cmd.String("username") + if username == "" { + username = workspaceID + } + + // Connect to MySQL + database, err := db.New(db.Config{ + PrimaryDSN: cmd.RequireString("database-primary"), + Logger: logger, + }) + if err != nil { + return fmt.Errorf("failed to connect to database: %w", err) + } + + // Connect to ClickHouse + ch, err := clickhouse.New(clickhouse.Config{ + URL: cmd.RequireString("clickhouse-url"), + Logger: logger, + }) + if err != nil { + return fmt.Errorf("failed to connect to clickhouse: %w", err) + } + + // Initialize Vault storage + vaultStorage, err := storage.NewS3(storage.S3Config{ + Logger: logger, + S3URL: cmd.RequireString("vault-s3-url"), + S3Bucket: cmd.RequireString("vault-s3-bucket"), + S3AccessKeyID: cmd.RequireString("vault-s3-access-key-id"), + S3AccessKeySecret: cmd.RequireString("vault-s3-access-key-secret"), + }) + if err != nil { + return fmt.Errorf("failed to initialize vault storage: %w", err) + } + + // Initialize Vault for encryption + v, err := vault.New(vault.Config{ + Logger: logger, + Storage: vaultStorage, + MasterKeys: cmd.RequireStringSlice("vault-master-keys"), + }) + if err != nil { + return fmt.Errorf("failed to initialize vault: %w", err) + } + + clk := clock.New() + now := clk.Now().UnixMilli() + + // Check if user already exists + existing, err := db.Query.FindClickhouseWorkspaceSettingsByWorkspaceID(ctx, database.RO(), workspaceID) + var password string + var passwordEncrypted string + + if err != nil { + if !db.IsNotFound(err) { + return fmt.Errorf("failed to check existing user: %w", err) + } + + // User doesn't exist - generate new password + logger.Info("creating new user", "workspace_id", workspaceID, "username", username) + password, err = generateSecurePassword(64) + if err != nil { + return fmt.Errorf("failed to generate password: %w", err) + } + + // Encrypt password + encRes, err := v.Encrypt(ctx, &vaultv1.EncryptRequest{ + Keyring: workspaceID, + Data: password, + }) + if err != nil { + return fmt.Errorf("failed to encrypt password: %w", err) + } + passwordEncrypted = encRes.Encrypted + + // Insert into MySQL + err = db.Query.InsertClickhouseWorkspaceSettings(ctx, database.RW(), db.InsertClickhouseWorkspaceSettingsParams{ + WorkspaceID: workspaceID, + Username: username, + PasswordEncrypted: passwordEncrypted, + QuotaDurationSeconds: int32(cmd.Int("quota-duration-seconds")), + MaxQueriesPerWindow: int32(cmd.Int("max-queries-per-window")), + MaxExecutionTimePerWindow: int32(cmd.Int("max-execution-time-per-window")), + MaxQueryExecutionTime: int32(cmd.Int("max-query-execution-time")), + MaxQueryMemoryBytes: cmd.Int64("max-query-memory-bytes"), + MaxQueryResultRows: int32(cmd.Int("max-query-result-rows")), + CreatedAt: now, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + if err != nil { + return fmt.Errorf("failed to insert settings: %w", err) + } + + logger.Info("stored credentials in database") + } else { + // User exists - update quotas only (preserve password) + logger.Info("updating existing user quotas", "workspace_id", workspaceID, "username", existing.Username) + username = existing.Username + passwordEncrypted = existing.PasswordEncrypted + decrypted, err := v.Decrypt(ctx, &vaultv1.DecryptRequest{ + Keyring: workspaceID, + Encrypted: existing.PasswordEncrypted, + }) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + password = decrypted.GetPlaintext() + + // Update limits + err = db.Query.UpdateClickhouseWorkspaceSettingsLimits(ctx, database.RW(), db.UpdateClickhouseWorkspaceSettingsLimitsParams{ + WorkspaceID: workspaceID, + QuotaDurationSeconds: int32(cmd.Int("quota-duration-seconds")), + MaxQueriesPerWindow: int32(cmd.Int("max-queries-per-window")), + MaxExecutionTimePerWindow: int32(cmd.Int("max-execution-time-per-window")), + MaxQueryExecutionTime: int32(cmd.Int("max-query-execution-time")), + MaxQueryMemoryBytes: cmd.Int64("max-query-memory-bytes"), + MaxQueryResultRows: int32(cmd.Int("max-query-result-rows")), + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + if err != nil { + return fmt.Errorf("failed to update settings: %w", err) + } + + logger.Info("updated quotas in database") + } + + // Configure ClickHouse user with permissions, quotas, and settings + err = ch.ConfigureUser(ctx, clickhouse.UserConfig{ + WorkspaceID: workspaceID, + Username: username, + Password: password, + AllowedTables: allowedTables, + QuotaDurationSeconds: int32(cmd.Int("quota-duration-seconds")), + MaxQueriesPerWindow: int32(cmd.Int("max-queries-per-window")), + MaxExecutionTimePerWindow: int32(cmd.Int("max-execution-time-per-window")), + MaxQueryExecutionTime: int32(cmd.Int("max-query-execution-time")), + MaxQueryMemoryBytes: cmd.Int64("max-query-memory-bytes"), + MaxQueryResultRows: int32(cmd.Int("max-query-result-rows")), + }) + if err != nil { + return fmt.Errorf("failed to configure clickhouse user: %w", err) + } + + return nil +} + +func generateSecurePassword(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(bytes)[:length], nil +} diff --git a/go/cmd/dev/main.go b/go/cmd/dev/main.go new file mode 100644 index 0000000000..dc19538cb1 --- /dev/null +++ b/go/cmd/dev/main.go @@ -0,0 +1,15 @@ +package dev + +import ( + "github.com/unkeyed/unkey/go/cmd/dev/seed" + "github.com/unkeyed/unkey/go/pkg/cli" +) + +var Cmd = &cli.Command{ + Name: "dev", + Usage: "All of our development tools", + Commands: []*cli.Command{ + seed.Cmd, + // Future: apiRequestsCmd, ratelimitsCmd, etc. + }, +} diff --git a/go/cmd/dev/seed/run.go b/go/cmd/dev/seed/run.go new file mode 100644 index 0000000000..f51cbdacc0 --- /dev/null +++ b/go/cmd/dev/seed/run.go @@ -0,0 +1,13 @@ +package seed + +import ( + "github.com/unkeyed/unkey/go/pkg/cli" +) + +var Cmd = &cli.Command{ + Name: "seed", + Usage: "Seed data for testing", + Commands: []*cli.Command{ + verificationsCmd, + }, +} diff --git a/go/cmd/dev/seed/verifications.go b/go/cmd/dev/seed/verifications.go new file mode 100644 index 0000000000..561c930a7d --- /dev/null +++ b/go/cmd/dev/seed/verifications.go @@ -0,0 +1,516 @@ +package seed + +import ( + "context" + "database/sql" + "fmt" + "log" + "math" + "math/rand/v2" + "slices" + "time" + + "github.com/unkeyed/unkey/go/internal/services/keys" + "github.com/unkeyed/unkey/go/pkg/array" + "github.com/unkeyed/unkey/go/pkg/cli" + "github.com/unkeyed/unkey/go/pkg/clickhouse" + "github.com/unkeyed/unkey/go/pkg/clickhouse/schema" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/uid" +) + +var verificationsCmd = &cli.Command{ + Name: "verifications", + Usage: "Seed key verification events", + Flags: []cli.Flag{ + cli.String("api-id", "API ID to use for seeding", cli.Required()), + cli.Int("num-verifications", "Number of verifications to generate", cli.Required()), + cli.Float("unique-keys-percent", "Percentage of verifications that are unique keys (0.0-100.0)", cli.Default(1.0)), + cli.Float("keys-with-identity-percent", "Percentage of keys that have an identity attached (0.0-100.0)", cli.Default(30.0)), + cli.Float("identity-usage-percent", "Percentage chance to use identity in verification if key has one (0.0-100.0)", cli.Default(90.0)), + cli.Int("days-back", "Number of days back to generate data", cli.Default(30)), + cli.Int("days-forward", "Number of days forward to generate data", cli.Default(30)), + cli.String("clickhouse-url", "ClickHouse URL", cli.Default("clickhouse://default:password@127.0.0.1:9000")), + cli.String("database-primary", "MySQL database DSN", cli.Default("unkey:password@tcp(127.0.0.1:3306)/unkey?parseTime=true&interpolateParams=true"), cli.EnvVar("UNKEY_DATABASE_PRIMARY"), cli.Required()), + }, + Action: seedVerifications, +} + +const chunkSize = 50_000 + +func seedVerifications(ctx context.Context, cmd *cli.Command) error { + logger := logging.New() + + // Connect to MySQL + database, err := db.New(db.Config{ + PrimaryDSN: cmd.RequireString("database-primary"), + Logger: logger, + }) + if err != nil { + return fmt.Errorf("failed to connect to MySQL: %w", err) + } + + // Connect to ClickHouse + ch, err := clickhouse.New(clickhouse.Config{ + URL: cmd.String("clickhouse-url"), + Logger: logger, + }) + if err != nil { + return fmt.Errorf("failed to connect to ClickHouse: %w", err) + } + + // Create key service for proper key generation + keyService, err := keys.New(keys.Config{ + DB: database, + Logger: logger, + }) + if err != nil { + return fmt.Errorf("failed to create key service: %w", err) + } + + // Calculate derived values + numVerifications := cmd.RequireInt("num-verifications") + uniqueKeysPercent := cmd.Float("unique-keys-percent") + keysWithIdentityPercent := cmd.Float("keys-with-identity-percent") + identityUsagePercent := cmd.Float("identity-usage-percent") + + // Calculate number of unique keys based on percentage + numKeys := max(1, int(float64(numVerifications)*(uniqueKeysPercent/100.0))) + + // Calculate number of identities based on percentage of keys + numIdentities := int(float64(numKeys) * (keysWithIdentityPercent / 100.0)) + + seeder := &Seeder{ + apiID: cmd.RequireString("api-id"), + numKeys: numKeys, + numIdentities: numIdentities, + numVerifications: numVerifications, + keysWithIdentityPercent: keysWithIdentityPercent, + identityUsagePercent: identityUsagePercent, + daysBack: cmd.Int("days-back"), + daysForward: cmd.Int("days-forward"), + db: database, + clickhouse: ch, + keyService: keyService, + } + + return seeder.Seed(ctx) +} + +type Seeder struct { + apiID string + numKeys int + numIdentities int + numVerifications int + keysWithIdentityPercent float64 + identityUsagePercent float64 + daysBack int + daysForward int + db db.Database + clickhouse clickhouse.ClickHouse + keyService keys.KeyService +} + +type Key struct { + ID string + KeyAuthID string + Hash string + Start string + Name string + Enabled bool + IdentityID string // Empty string if no identity attached + ExternalID string // Empty string if no external ID attached +} + +type Identity struct { + ID string + ExternalID string +} + +func (s *Seeder) Seed(ctx context.Context) error { + log.Printf("Starting seed for API: %s", s.apiID) + + // 1. Get API details including workspace_id + log.Printf("Fetching API details...") + workspaceID, keyAuthID, prefix, err := s.getAPIDetails(ctx) + if err != nil { + return fmt.Errorf("failed to get API details: %w", err) + } + log.Printf(" Using workspace %s, API %s with keyAuth %s (prefix: %s)", workspaceID, s.apiID, keyAuthID, prefix) + + // 2. Create Identities and Keys with batched transactions + var identities []Identity + var allKeys []Key + + // Create identities first (if needed) + if s.numIdentities > 0 { + log.Printf("Creating %d identities...", s.numIdentities) + identities, err = s.createIdentitiesBatched(ctx, workspaceID) + if err != nil { + return fmt.Errorf("failed to create identities: %w", err) + } + } else { + log.Printf("No identities will be created (0 keys will have identities)") + } + + // Create keys and attach identities to some of them + log.Printf("Creating %d keys (%.1f%% will have identities)...", s.numKeys, s.keysWithIdentityPercent) + allKeys, err = s.createKeysBatched(ctx, workspaceID, keyAuthID, prefix, identities) + if err != nil { + return fmt.Errorf("failed to create keys: %w", err) + } + + // 4. Generate and insert verifications + log.Printf("Generating %d verifications...", s.numVerifications) + if err := s.generateVerifications(ctx, workspaceID, allKeys, keyAuthID); err != nil { + return fmt.Errorf("failed to generate verifications: %w", err) + } + + log.Println("Seeding completed successfully!") + return nil +} + +func (s *Seeder) getAPIDetails(ctx context.Context) (workspaceID, keyAuthID, prefix string, err error) { + // Fetch API from database + api, err := db.Query.FindApiByID(ctx, s.db.RO(), s.apiID) + if err != nil { + return "", "", "", fmt.Errorf("failed to find API: %w", err) + } + + workspaceID = api.WorkspaceID + + if !api.KeyAuthID.Valid { + return "", "", "", fmt.Errorf("API %s does not have key authentication enabled", s.apiID) + } + + keyAuthID = api.KeyAuthID.String + + // Fetch keyAuth to get the prefix + keyAuth, err := db.Query.GetKeyAuthByID(ctx, s.db.RO(), keyAuthID) + if err != nil { + return "", "", "", fmt.Errorf("failed to get keyAuth: %w", err) + } + + if keyAuth.DefaultPrefix.Valid { + prefix = keyAuth.DefaultPrefix.String + } else { + prefix = "key" // fallback prefix + } + + return workspaceID, keyAuthID, prefix, nil +} + +func (s *Seeder) createKeysBatched(ctx context.Context, workspaceID, keyAuthID, prefix string, identities []Identity) ([]Key, error) { + allKeys := make([]Key, s.numKeys) + keyParams := make([]db.InsertKeyParams, s.numKeys) + + environments := []string{"development", "staging", "production", "test"} + keyNames := []string{"Backend Service", "Frontend App", "Mobile Client", "Admin Dashboard", "API Integration", "Test Key"} + + // Calculate how many keys should have identities + keysWithIdentityCount := int(float64(s.numKeys) * (s.keysWithIdentityPercent / 100.0)) + + now := time.Now().UnixMilli() + + for i := range s.numKeys { + // Use the key service to create a proper key with real hash + keyResult, err := s.keyService.CreateKey(ctx, keys.CreateKeyRequest{ + Prefix: prefix, + ByteLength: 16, + }) + if err != nil { + return nil, fmt.Errorf("failed to create key: %w", err) + } + + keyID := uid.New(uid.Prefix(prefix)) + + // Some keys are disabled (5%) + enabled := rand.Float64() > 0.05 // 95% enabled + + name := fmt.Sprintf("%s - %s", + keyNames[rand.IntN(len(keyNames))], + environments[rand.IntN(len(environments))], + ) + + // Determine if this key should have an identity attached + var identityID string + var externalID string + if i < keysWithIdentityCount && len(identities) > 0 { + // Attach an identity to this key + identity := identities[rand.IntN(len(identities))] + identityID = identity.ID + externalID = identity.ExternalID + } + + key := Key{ + ID: keyID, + KeyAuthID: keyAuthID, + Hash: keyResult.Hash, + Start: keyResult.Start, + Name: name, + Enabled: enabled, + IdentityID: identityID, + ExternalID: externalID, + } + + // Prepare key params for bulk insert + var identityIDParam sql.NullString + if identityID != "" { + identityIDParam = sql.NullString{String: identityID, Valid: true} + } + + keyParams[i] = db.InsertKeyParams{ + ID: keyID, + KeySpaceID: keyAuthID, + Hash: keyResult.Hash, + Start: keyResult.Start, + WorkspaceID: workspaceID, + Name: sql.NullString{String: name, Valid: true}, + IdentityID: identityIDParam, + CreatedAtM: now, + Enabled: enabled, + ForWorkspaceID: sql.NullString{}, + Meta: sql.NullString{}, + Expires: sql.NullTime{}, + RemainingRequests: sql.NullInt32{}, + RefillDay: sql.NullInt16{}, + RefillAmount: sql.NullInt32{}, + } + + allKeys[i] = key + + // Log progress periodically + if (i+1)%1000 == 0 || i == s.numKeys-1 { + log.Printf(" Prepared %d/%d keys", i+1, s.numKeys) + } + } + + chunks := array.Chunk(keyParams, chunkSize) + + // Commit every 5 chunks (250k rows per transaction) + batchSize := 5 + for i := 0; i < len(chunks); i += batchSize { + err := db.Tx(ctx, s.db.RW(), func(ctx context.Context, tx db.DBTX) error { + end := min(i+batchSize, len(chunks)) + for j := i; j < end; j++ { + log.Printf(" Inserting %d keys... chunk %d/%d", len(chunks[j]), j+1, len(chunks)) + if err := db.BulkQuery.InsertKeys(ctx, tx, chunks[j]); err != nil { + return fmt.Errorf("failed to bulk insert keys: %w", err) + } + } + return nil + }) + if err != nil { + return nil, err + } + } + + keysWithIdentity := 0 + for _, k := range allKeys { + if k.IdentityID != "" { + keysWithIdentity++ + } + } + + log.Printf(" Created %d total keys (%d with identities attached)", s.numKeys, keysWithIdentity) + + return allKeys, nil +} + +func (s *Seeder) createIdentitiesBatched(ctx context.Context, workspaceID string) ([]Identity, error) { + identities := make([]Identity, s.numIdentities) + identityParams := make([]db.InsertIdentityParams, s.numIdentities) + + domains := []string{"example.com", "test.com", "demo.org", "app.io"} + now := time.Now().UnixMilli() + for i := range s.numIdentities { + identityID := uid.New("id") + // Generate unique external ID using the identity ID to guarantee uniqueness + externalID := fmt.Sprintf("user-%s@%s", + uid.New("ex"), + domains[rand.IntN(len(domains))], + ) + + identity := Identity{ + ID: identityID, + ExternalID: externalID, + } + + // Prepare identity params for bulk insert + identityParams[i] = db.InsertIdentityParams{ + ID: identityID, + ExternalID: externalID, + WorkspaceID: workspaceID, + Environment: "default", + CreatedAt: now, + Meta: []byte(`{}`), + } + + identities[i] = identity + } + + chunks := array.Chunk(identityParams, chunkSize) + + // Commit every 5 chunks (250k rows per transaction) + batchSize := 5 + for i := 0; i < len(chunks); i += batchSize { + err := db.Tx(ctx, s.db.RW(), func(ctx context.Context, tx db.DBTX) error { + end := min(i+batchSize, len(chunks)) + for j := i; j < end; j++ { + log.Printf(" Inserting %d identities... chunk %d/%d", len(chunks[j]), j+1, len(chunks)) + if err := db.BulkQuery.InsertIdentities(ctx, tx, chunks[j]); err != nil { + return fmt.Errorf("failed to bulk insert identities: %w", err) + } + } + return nil + }) + if err != nil { + return nil, err + } + } + + log.Printf(" Created %d identities", s.numIdentities) + + return identities, nil +} + +func (s *Seeder) generateVerifications(ctx context.Context, workspaceID string, keys []Key, keyAuthID string) error { + startTime := time.Now().AddDate(0, 0, -s.daysBack) + endTime := time.Now().AddDate(0, 0, s.daysForward) + + outcomes := []struct { + name string + weight float64 + }{ + {"VALID", 0.85}, + {"RATE_LIMITED", 0.05}, + {"EXPIRED", 0.03}, + {"DISABLED", 0.02}, + {"FORBIDDEN", 0.02}, + {"USAGE_EXCEEDED", 0.02}, + {"INSUFFICIENT_PERMISSIONS", 0.01}, + } + + regions := []string{"us-east-1", "us-west-2", "eu-west-1", "ap-southeast-1", "sa-east-1"} + tagOptions := []string{"api", "web", "mobile", "server", "client", "frontend", "backend", "test", "prod"} + + // Use normal distribution to make some keys "hot" + mean := float64(len(keys)) * 0.2 + stdDev := float64(len(keys)) / 5.0 + + for i := range s.numVerifications { + // Generate timestamp with bias towards recent data + timeFraction := rand.Float64() + timeFraction = math.Pow(timeFraction, 0.5) + timestamp := startTime.Add(time.Duration(timeFraction * float64(endTime.Sub(startTime)))) + + // Select key with normal distribution (creates hot keys) + keyIndex := int(normalDistribution(mean, stdDev, 0, float64(len(keys)-1))) + key := keys[keyIndex] + + // Select outcome + outcomeRand := rand.Float64() + var outcome string + cumulative := 0.0 + for _, o := range outcomes { + cumulative += o.weight + if outcomeRand < cumulative { + outcome = o.name + break + } + } + + // Bias outcome based on key properties + if !key.Enabled && rand.Float64() < 0.6 { + outcome = "DISABLED" + } + + // Determine identity for this verification + var identityID string + var externalID string + if key.IdentityID != "" { + // Key has an identity - use it X% of the time (default 90%) + if rand.Float64()*100 < s.identityUsagePercent { + identityID = key.IdentityID + externalID = key.ExternalID + } + // Otherwise leave it blank (simulating key used before identity was attached) + } + // If key doesn't have identity, identityID stays empty + + // Generate tags (0-2 tags) + tagCount := rand.IntN(3) + tags := make([]string, 0, tagCount) + for range tagCount { + tag := tagOptions[rand.IntN(len(tagOptions))] + if !slices.Contains(tags, tag) { + tags = append(tags, tag) + } + } + + // random latency + latency := rand.ExpFloat64()*50 + 1 // 1-100ms base range + if rand.Float64() < 0.1 { // 10% chance of high latency + latency += rand.Float64() * 400 // Up to 500ms + } + + // random credit spent between 0 and 50 but 70% should be 0 + credit := rand.Int64N(51) + if rand.Float64() < 0.7 { + credit = 0 + } + + // Use BufferKeyVerification to let the clickhouse client batch automatically + s.clickhouse.BufferKeyVerificationV2(schema.KeyVerificationV2{ + RequestID: uid.New("req"), + Time: timestamp.UnixMilli(), + WorkspaceID: workspaceID, + KeySpaceID: keyAuthID, + KeyID: key.ID, + Region: regions[rand.IntN(len(regions))], + Tags: tags, + Outcome: outcome, + IdentityID: identityID, + ExternalID: externalID, + Latency: latency, + SpentCredits: credit, + }) + + // Log progress periodically + if (i+1)%10000 == 0 { + log.Printf(" Buffered %d/%d verifications", i+1, s.numVerifications) + } + } + + log.Printf(" Buffered all %d verifications, waiting for flush...", s.numVerifications) + + err := s.clickhouse.Close() + if err != nil { + return fmt.Errorf("failed to close clickhouse: %w", err) + } + + log.Printf(" All verifications sent to ClickHouse") + return nil +} + +// normalDistribution returns a random value from a normal distribution +func normalDistribution(mean, stdDev, min, max float64) float64 { + // If min == max, just return that value to avoid infinite loop + if min >= max { + return min + } + + for { + // Box-Muller transform + u1 := rand.Float64() + u2 := rand.Float64() + z := math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2) + + value := mean + z*stdDev + + if value >= min && value <= max { + return value + } + } +} diff --git a/go/gen/proto/cache/v1/invalidation.pb.go b/go/gen/proto/cache/v1/invalidation.pb.go index 6e09b7e462..dff139c441 100644 --- a/go/gen/proto/cache/v1/invalidation.pb.go +++ b/go/gen/proto/cache/v1/invalidation.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.8 // protoc (unknown) -// source: proto/cache/v1/invalidation.proto +// source: cache/v1/invalidation.proto package cachev1 @@ -38,7 +38,7 @@ type CacheInvalidationEvent struct { func (x *CacheInvalidationEvent) Reset() { *x = CacheInvalidationEvent{} - mi := &file_proto_cache_v1_invalidation_proto_msgTypes[0] + mi := &file_cache_v1_invalidation_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -50,7 +50,7 @@ func (x *CacheInvalidationEvent) String() string { func (*CacheInvalidationEvent) ProtoMessage() {} func (x *CacheInvalidationEvent) ProtoReflect() protoreflect.Message { - mi := &file_proto_cache_v1_invalidation_proto_msgTypes[0] + mi := &file_cache_v1_invalidation_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -63,7 +63,7 @@ func (x *CacheInvalidationEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheInvalidationEvent.ProtoReflect.Descriptor instead. func (*CacheInvalidationEvent) Descriptor() ([]byte, []int) { - return file_proto_cache_v1_invalidation_proto_rawDescGZIP(), []int{0} + return file_cache_v1_invalidation_proto_rawDescGZIP(), []int{0} } func (x *CacheInvalidationEvent) GetCacheName() string { @@ -94,35 +94,36 @@ func (x *CacheInvalidationEvent) GetSourceInstance() string { return "" } -var File_proto_cache_v1_invalidation_proto protoreflect.FileDescriptor +var File_cache_v1_invalidation_proto protoreflect.FileDescriptor -const file_proto_cache_v1_invalidation_proto_rawDesc = "" + +const file_cache_v1_invalidation_proto_rawDesc = "" + "\n" + - "!proto/cache/v1/invalidation.proto\x12\bcache.v1\"\x9b\x01\n" + + "\x1bcache/v1/invalidation.proto\x12\bcache.v1\"\x9b\x01\n" + "\x16CacheInvalidationEvent\x12\x1d\n" + "\n" + "cache_name\x18\x01 \x01(\tR\tcacheName\x12\x1b\n" + "\tcache_key\x18\x02 \x01(\tR\bcacheKey\x12\x1c\n" + "\ttimestamp\x18\x03 \x01(\x03R\ttimestamp\x12'\n" + - "\x0fsource_instance\x18\x04 \x01(\tR\x0esourceInstanceB8Z6github.com/unkeyed/unkey/go/gen/proto/cache/v1;cachev1b\x06proto3" + "\x0fsource_instance\x18\x04 \x01(\tR\x0esourceInstanceB\x9a\x01\n" + + "\fcom.cache.v1B\x11InvalidationProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/cache/v1;cachev1\xa2\x02\x03CXX\xaa\x02\bCache.V1\xca\x02\bCache\\V1\xe2\x02\x14Cache\\V1\\GPBMetadata\xea\x02\tCache::V1b\x06proto3" var ( - file_proto_cache_v1_invalidation_proto_rawDescOnce sync.Once - file_proto_cache_v1_invalidation_proto_rawDescData []byte + file_cache_v1_invalidation_proto_rawDescOnce sync.Once + file_cache_v1_invalidation_proto_rawDescData []byte ) -func file_proto_cache_v1_invalidation_proto_rawDescGZIP() []byte { - file_proto_cache_v1_invalidation_proto_rawDescOnce.Do(func() { - file_proto_cache_v1_invalidation_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_cache_v1_invalidation_proto_rawDesc), len(file_proto_cache_v1_invalidation_proto_rawDesc))) +func file_cache_v1_invalidation_proto_rawDescGZIP() []byte { + file_cache_v1_invalidation_proto_rawDescOnce.Do(func() { + file_cache_v1_invalidation_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cache_v1_invalidation_proto_rawDesc), len(file_cache_v1_invalidation_proto_rawDesc))) }) - return file_proto_cache_v1_invalidation_proto_rawDescData + return file_cache_v1_invalidation_proto_rawDescData } -var file_proto_cache_v1_invalidation_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_proto_cache_v1_invalidation_proto_goTypes = []any{ +var file_cache_v1_invalidation_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_cache_v1_invalidation_proto_goTypes = []any{ (*CacheInvalidationEvent)(nil), // 0: cache.v1.CacheInvalidationEvent } -var file_proto_cache_v1_invalidation_proto_depIdxs = []int32{ +var file_cache_v1_invalidation_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type 0, // [0:0] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name @@ -130,26 +131,26 @@ var file_proto_cache_v1_invalidation_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for field type_name } -func init() { file_proto_cache_v1_invalidation_proto_init() } -func file_proto_cache_v1_invalidation_proto_init() { - if File_proto_cache_v1_invalidation_proto != nil { +func init() { file_cache_v1_invalidation_proto_init() } +func file_cache_v1_invalidation_proto_init() { + if File_cache_v1_invalidation_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_cache_v1_invalidation_proto_rawDesc), len(file_proto_cache_v1_invalidation_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cache_v1_invalidation_proto_rawDesc), len(file_cache_v1_invalidation_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_proto_cache_v1_invalidation_proto_goTypes, - DependencyIndexes: file_proto_cache_v1_invalidation_proto_depIdxs, - MessageInfos: file_proto_cache_v1_invalidation_proto_msgTypes, + GoTypes: file_cache_v1_invalidation_proto_goTypes, + DependencyIndexes: file_cache_v1_invalidation_proto_depIdxs, + MessageInfos: file_cache_v1_invalidation_proto_msgTypes, }.Build() - File_proto_cache_v1_invalidation_proto = out.File - file_proto_cache_v1_invalidation_proto_goTypes = nil - file_proto_cache_v1_invalidation_proto_depIdxs = nil + File_cache_v1_invalidation_proto = out.File + file_cache_v1_invalidation_proto_goTypes = nil + file_cache_v1_invalidation_proto_depIdxs = nil } diff --git a/go/go.mod b/go/go.mod index 97ebdb7e0a..d937738780 100644 --- a/go/go.mod +++ b/go/go.mod @@ -8,6 +8,7 @@ require ( buf.build/gen/go/depot/api/connectrpc/go v1.19.0-20250915125527-3af9e416de91.1 buf.build/gen/go/depot/api/protocolbuffers/go v1.36.10-20250915125527-3af9e416de91.1 connectrpc.com/connect v1.19.0 + github.com/AfterShip/clickhouse-sql-parser v0.4.15 github.com/ClickHouse/clickhouse-go/v2 v2.40.1 github.com/aws/aws-sdk-go-v2 v1.36.6 github.com/aws/aws-sdk-go-v2/config v1.29.18 @@ -22,7 +23,6 @@ require ( github.com/getkin/kin-openapi v0.133.0 github.com/go-acme/lego/v4 v4.25.2 github.com/go-sql-driver/mysql v1.9.3 - github.com/golangci/golangci-lint/v2 v2.5.0 github.com/lmittmann/tint v1.1.2 github.com/maypok86/otter v1.2.4 github.com/moby/buildkit v0.25.0 @@ -39,7 +39,7 @@ require ( github.com/shirou/gopsutil/v4 v4.25.8 github.com/spiffe/go-spiffe/v2 v2.6.0 github.com/sqlc-dev/plugin-sdk-go v1.23.0 - github.com/sqlc-dev/sqlc v1.30.0 + github.com/sqlc-dev/sqlc v1.29.0 github.com/stretchr/testify v1.11.1 github.com/unkeyed/unkey/go/deploy/pkg/spiffe v0.0.0-20250929110415-ca2de7336e18 go.opentelemetry.io/contrib/bridges/otelslog v0.13.0 @@ -63,8 +63,6 @@ require ( ) require ( - 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect - 4d63.com/gochecknoglobals v0.2.2 // indirect buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1 // indirect buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 // indirect buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.0-20250924144421-cb55f06efbd2.1 // indirect @@ -79,38 +77,14 @@ require ( buf.build/go/standard v0.1.0 // indirect cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.123.0 // indirect - codeberg.org/chavacava/garif v0.2.0 // indirect connectrpc.com/otelconnect v0.8.0 // indirect - dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect - dev.gaijin.team/go/golib v0.7.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/4meepo/tagalign v1.4.3 // indirect - github.com/Abirdcfly/dupword v0.1.6 // indirect - github.com/AdminBenni/iota-mixing v1.0.0 // indirect - github.com/AlwxSin/noinlineerr v1.0.5 // indirect - github.com/Antonboom/errname v1.1.1 // indirect - github.com/Antonboom/nilnil v1.1.1 // indirect - github.com/Antonboom/testifylint v1.6.4 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/BurntSushi/toml v1.5.0 // indirect github.com/ClickHouse/ch-go v0.68.0 // indirect - github.com/Djarvur/go-err113 v0.1.1 // indirect - github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/MirrexOne/unqueryvet v1.2.1 // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect github.com/TwiN/go-color v1.4.1 // indirect - github.com/alecthomas/chroma/v2 v2.20.0 // indirect - github.com/alecthomas/go-check-sumtype v0.3.1 // indirect - github.com/alexkohler/nakedret/v2 v2.0.6 // indirect - github.com/alexkohler/prealloc v1.0.0 // indirect - github.com/alfatraining/structtag v1.0.0 // indirect - github.com/alingse/asasalint v0.0.11 // indirect - github.com/alingse/nilnesserr v0.2.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/ashanbrown/forbidigo/v2 v2.1.0 // indirect - github.com/ashanbrown/makezero/v2 v2.0.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect @@ -125,33 +99,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect github.com/aws/smithy-go v1.23.0 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.3 // indirect - github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.7.0 // indirect - github.com/bombsimon/wsl/v5 v5.2.0 // indirect - github.com/breml/bidichk v0.3.3 // indirect - github.com/breml/errchkjson v0.4.1 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/butuzov/ireturn v0.4.0 // indirect - github.com/butuzov/mirror v1.3.0 // indirect - github.com/catenacyber/perfsprint v0.9.1 // indirect - github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/charithe/durationcheck v0.0.10 // indirect - github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/lipgloss v1.1.0 // indirect - github.com/charmbracelet/x/ansi v0.10.1 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/ckaznocha/intrange v0.3.1 // indirect - github.com/clipperhouse/uax29/v2 v2.2.0 // indirect github.com/coder/websocket v1.8.14 // indirect github.com/containerd/console v1.0.5 // indirect github.com/containerd/containerd/api v1.9.0 // indirect @@ -166,14 +121,9 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/cubicdaiya/gonp v1.0.4 // indirect - github.com/curioswitch/go-reassign v0.3.0 // indirect - github.com/daixiang0/gci v0.13.7 // indirect - github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/denis-tingaikin/go-header v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/dlclark/regexp2 v1.11.5 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -184,18 +134,11 @@ require ( github.com/elastic/go-sysinfo v1.15.4 // indirect github.com/elastic/go-windows v1.0.2 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect - github.com/ettle/strcase v0.2.0 // indirect - github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/firefart/nonamedreturns v1.0.6 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/fzipp/gocyclo v0.6.0 // indirect github.com/gammazero/deque v1.1.0 // indirect - github.com/ghostiam/protogetter v0.3.16 // indirect github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-critic/go-critic v0.13.0 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-jose/go-jose/v4 v4.1.2 // indirect @@ -216,17 +159,6 @@ require ( github.com/go-openapi/swag/stringutils v0.25.1 // indirect github.com/go-openapi/swag/typeutils v0.25.1 // indirect github.com/go-openapi/swag/yamlutils v0.25.1 // indirect - github.com/go-toolsmith/astcast v1.1.0 // indirect - github.com/go-toolsmith/astcopy v1.1.0 // indirect - github.com/go-toolsmith/astequal v1.2.0 // indirect - github.com/go-toolsmith/astfmt v1.1.0 // indirect - github.com/go-toolsmith/astp v1.1.0 // indirect - github.com/go-toolsmith/strparse v1.1.0 // indirect - github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/godoc-lint/godoc-lint v0.10.0 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect @@ -234,33 +166,15 @@ require ( github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golangci/asciicheck v0.5.0 // indirect - github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect - github.com/golangci/go-printf-func-name v0.1.1 // indirect - github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6 // indirect - github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370 // indirect - github.com/golangci/misspell v0.7.0 // indirect - github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe // indirect - github.com/golangci/plugin-module-register v0.1.2 // indirect - github.com/golangci/revgrep v0.8.0 // indirect - github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect - github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect github.com/google/cel-go v0.26.1 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-containerregistry v0.20.6 // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gordonklaus/ineffassign v0.2.0 // indirect - github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.5.0 // indirect - github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hexops/gotextdiff v1.0.3 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/jsonschema v0.13.0 // indirect @@ -269,44 +183,19 @@ require ( github.com/jackc/pgx/v5 v5.7.6 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jdx/go-netrc v1.0.0 // indirect - github.com/jgautheron/goconst v1.8.2 // indirect - github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jjti/go-spancheck v0.6.5 // indirect github.com/joho/godotenv v1.5.1 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 // indirect - github.com/julz/importas v0.2.0 // indirect - github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect - github.com/kisielk/errcheck v1.9.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/pgzip v1.2.6 // indirect - github.com/kulti/thelper v0.7.1 // indirect - github.com/kunwardeep/paralleltest v1.0.14 // indirect - github.com/lasiar/canonicalheader v1.1.2 // indirect - github.com/ldez/exptostd v0.4.4 // indirect - github.com/ldez/gomoddirectives v0.7.1 // indirect - github.com/ldez/grignotin v0.10.1 // indirect - github.com/ldez/tagliatelle v0.7.2 // indirect - github.com/ldez/usetesting v0.5.0 // indirect - github.com/leonklingele/grouper v1.1.2 // indirect - github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect - github.com/macabu/inamedparam v0.2.0 // indirect github.com/mailru/easyjson v0.9.1 // indirect - github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect - github.com/manuelarte/funcorder v0.5.0 // indirect - github.com/maratori/testableexamples v1.0.0 // indirect - github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v1.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mfridman/interpolate v0.0.2 // indirect github.com/mfridman/xflag v0.1.0 // indirect - github.com/mgechev/revive v1.12.0 // indirect github.com/microsoft/go-mssqldb v1.9.3 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -318,24 +207,19 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/moricho/tparallel v0.3.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nakabonne/nestif v0.3.1 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect - github.com/nishanths/exhaustive v0.12.0 // indirect - github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.21.0 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect + github.com/onsi/ginkgo/v2 v2.25.3 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/paulmach/orb v0.12.0 // indirect github.com/pb33f/jsonpath v0.1.2 // indirect github.com/pb33f/ordered-map/v2 v2.3.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pganalyze/pg_query_go/v6 v6.1.0 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect @@ -347,76 +231,42 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/polyfloyd/go-errorlint v1.8.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect - github.com/quasilyte/go-ruleguard v0.4.5 // indirect - github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect - github.com/quasilyte/gogrep v0.5.0 // indirect - github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect - github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect - github.com/raeperd/recvcheck v0.2.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.4.7 // indirect github.com/riza-io/grpc-go v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/cors v1.11.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.4.1 // indirect - github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sagikazarmark/locafero v0.12.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect - github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect - github.com/securego/gosec/v2 v2.22.9 // indirect github.com/segmentio/asm v1.2.1 // indirect github.com/segmentio/encoding v0.5.3 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sonatard/noctx v0.4.0 // indirect - github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/speakeasy-api/jsonpath v0.6.2 // indirect github.com/speakeasy-api/openapi-overlay v0.10.3 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect github.com/spf13/cobra v1.10.1 // indirect github.com/spf13/pflag v1.0.10 // indirect - github.com/spf13/viper v1.21.0 // indirect - github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stoewer/go-strcase v1.3.1 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/tetafro/godot v1.5.4 // indirect github.com/tetratelabs/wazero v1.9.0 // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.2.0 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect - github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect - github.com/timonwong/loggercheck v0.11.0 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect - github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f // indirect github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d // indirect - github.com/ultraware/funlen v0.2.0 // indirect - github.com/ultraware/whitespace v0.2.0 // indirect - github.com/uudashr/gocognit v1.2.0 // indirect - github.com/uudashr/iface v1.4.1 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/vertica/vertica-sql-go v1.3.4 // indirect github.com/vmware-labs/yaml-jsonpath v0.3.2 // indirect @@ -426,21 +276,11 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/woodsbury/decimal128 v1.4.0 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xen0n/gosmopolitan v1.3.0 // indirect - github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yagipy/maintidx v1.0.0 // indirect github.com/yargevad/filepathx v1.0.0 // indirect github.com/ydb-platform/ydb-go-genproto v0.0.0-20250911135631-b3beddd517d9 // indirect github.com/ydb-platform/ydb-go-sdk/v3 v3.116.3 // indirect - github.com/yeya24/promlinter v0.3.0 // indirect - github.com/ykadowak/zerologlint v0.1.5 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/ziutek/mymysql v1.5.4 // indirect - gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.14.0 // indirect - go-simpler.org/sloglint v0.11.1 // indirect - go.augendre.info/arangolint v0.2.0 // indirect - go.augendre.info/fatcontext v0.9.0 // indirect go.lsp.dev/jsonrpc2 v0.10.0 // indirect go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect go.lsp.dev/protocol v0.12.0 // indirect @@ -453,7 +293,6 @@ require ( go.opentelemetry.io/otel/log v0.14.0 // indirect go.opentelemetry.io/proto/otlp v1.8.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/mock v0.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -462,7 +301,6 @@ require ( go.yaml.in/yaml/v4 v4.0.0-rc.2 // indirect golang.org/x/crypto v0.42.0 // indirect golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect - golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621 // indirect golang.org/x/mod v0.28.0 // indirect golang.org/x/oauth2 v0.31.0 // indirect golang.org/x/sync v0.17.0 // indirect @@ -478,7 +316,6 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.6.1 // indirect howett.net/plist v1.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect @@ -487,8 +324,6 @@ require ( modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect modernc.org/sqlite v1.39.0 // indirect - mvdan.cc/gofumpt v0.9.1 // indirect - mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 // indirect pluginrpc.com/pluginrpc v0.5.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect @@ -496,4 +331,10 @@ require ( sigs.k8s.io/yaml v1.6.0 // indirect ) +// Yaml parsing errors +replace github.com/dprotaso/go-yit => github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960 + +// Sqlc engine errors +replace github.com/pingcap/tidb/pkg/parser => github.com/pingcap/tidb/pkg/parser v0.0.0-20250806091815-327a22d5ebf8 + tool github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen diff --git a/go/go.sum b/go/go.sum index b37c647c23..32ffee36ef 100644 --- a/go/go.sum +++ b/go/go.sum @@ -1,7 +1,3 @@ -4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= -4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= -4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= -4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1 h1:HiLfreYRsqycF5QDlsnvSQOnl4tvhBoROl8+DkbaphI= buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1/go.mod h1:WSxC6zKCpqVRcGZCpOgVwkATp9XBIleoAdSAnkq7dhw= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 h1:DQLS/rRxLHuugVzjJU5AvOwD57pdFl9he/0O7e5P294= @@ -36,34 +32,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= -codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= connectrpc.com/connect v1.19.0 h1:LuqUbq01PqbtL0o7vn0WMRXzR2nNsiINe5zfcJ24pJM= connectrpc.com/connect v1.19.0/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= connectrpc.com/otelconnect v0.8.0 h1:a4qrN4H8aEE2jAoCxheZYYfEjXMgVPyL9OzPQLBEFXU= connectrpc.com/otelconnect v0.8.0/go.mod h1:AEkVLjCPXra+ObGFCOClcJkNjS7zPaQSqvO0lCyjfZc= -dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= -dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= -dev.gaijin.team/go/golib v0.7.0 h1:Ho2217eFXSnP78iCX21Bq7KkcY8b2cJJULtc5SXOeF0= -dev.gaijin.team/go/golib v0.7.0/go.mod h1:c5fu7t1RSGMxSQgcUYO1sODbzsYnOCXJLmHeNG1Eb+0= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= -github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= -github.com/Abirdcfly/dupword v0.1.6 h1:qeL6u0442RPRe3mcaLcbaCi2/Y/hOcdtw6DE9odjz9c= -github.com/Abirdcfly/dupword v0.1.6/go.mod h1:s+BFMuL/I4YSiFv29snqyjwzDp4b65W2Kvy+PKzZ6cw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AdminBenni/iota-mixing v1.0.0 h1:Os6lpjG2dp/AE5fYBPAA1zfa2qMdCAWwPMCgpwKq7wo= -github.com/AdminBenni/iota-mixing v1.0.0/go.mod h1:i4+tpAaB+qMVIV9OK3m4/DAynOd5bQFaOu+2AhtBCNY= -github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= -github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= -github.com/Antonboom/errname v1.1.1 h1:bllB7mlIbTVzO9jmSWVWLjxTEbGBVQ1Ff/ClQgtPw9Q= -github.com/Antonboom/errname v1.1.1/go.mod h1:gjhe24xoxXp0ScLtHzjiXp0Exi1RFLKJb0bVBtWKCWQ= -github.com/Antonboom/nilnil v1.1.1 h1:9Mdr6BYd8WHCDngQnNVV0b554xyisFioEKi30sksufQ= -github.com/Antonboom/nilnil v1.1.1/go.mod h1:yCyAmSw3doopbOWhJlVci+HuyNRuHJKIv6V2oYQa8II= -github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS87UiqmQ= -github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= +github.com/AfterShip/clickhouse-sql-parser v0.4.15 h1:OJCabxkbtnWHtjkEUH0BHZQGzDGyRrRIoMdR1ayRvJA= +github.com/AfterShip/clickhouse-sql-parser v0.4.15/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= @@ -80,45 +58,19 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg6 github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ClickHouse/ch-go v0.68.0 h1:zd2VD8l2aVYnXFRyhTyKCrxvhSz1AaY4wBUXu/f0GiU= github.com/ClickHouse/ch-go v0.68.0/go.mod h1:C89Fsm7oyck9hr6rRo5gqqiVtaIY6AjdD0WFMyNRQ5s= github.com/ClickHouse/clickhouse-go/v2 v2.40.1 h1:PbwsHBgqXRydU7jKULD1C8CHmifczffvQqmFvltM2W4= github.com/ClickHouse/clickhouse-go/v2 v2.40.1/go.mod h1:GDzSBLVhladVm8V01aEB36IoBOVLLICfyeuiIp/8Ezc= -github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g= -github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= -github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A= -github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= -github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= -github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/TwiN/go-color v1.4.1 h1:mqG0P/KBgHKVqmtL5ye7K0/Gr4l6hTksPgTgMk3mUzc= github.com/TwiN/go-color v1.4.1/go.mod h1:WcPf/jtiW95WBIsEeY1Lc/b8aaWoiqQpu5cf8WFxu+s= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= -github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= -github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= -github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= -github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= -github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= -github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= -github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= -github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= -github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= -github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= -github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= -github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= -github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= @@ -126,10 +78,6 @@ github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUS github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= -github.com/ashanbrown/forbidigo/v2 v2.1.0 h1:NAxZrWqNUQiDz19FKScQ/xvwzmij6BiOw3S0+QUQ+Hs= -github.com/ashanbrown/forbidigo/v2 v2.1.0/go.mod h1:0zZfdNAuZIL7rSComLGthgc/9/n2FqspBOH90xlCHdA= -github.com/ashanbrown/makezero/v2 v2.0.1 h1:r8GtKetWOgoJ4sLyUx97UTwyt2dO7WkGFHizn/Lo8TY= -github.com/ashanbrown/makezero/v2 v2.0.1/go.mod h1:kKU4IMxmYW1M4fiEHMb2vc5SFoPzXvgbMR9gIp5pjSw= github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU= github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= @@ -166,25 +114,11 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk= github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= -github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= -github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= -github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= -github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= -github.com/bombsimon/wsl/v5 v5.2.0 h1:PyCCwd3Q7abGs3e34IW4jLYlBS+FbsU6iK+Tb3NnDp4= -github.com/bombsimon/wsl/v5 v5.2.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= -github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= -github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= -github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= -github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -207,14 +141,6 @@ github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 h1:V1xulAoqLq github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E= -github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= -github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= -github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= -github.com/catenacyber/perfsprint v0.9.1 h1:5LlTp4RwTooQjJCvGEFV6XksZvWE7wCOUvjD2z0vls0= -github.com/catenacyber/perfsprint v0.9.1/go.mod h1:q//VWC2fWbcdSLEY1R3l8n0zQCDPdE4IjZwyY1HMunM= -github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= -github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= @@ -223,23 +149,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= -github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= -github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= -github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= -github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= -github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ= -github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= -github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= -github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY= -github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -287,21 +197,11 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cubicdaiya/gonp v1.0.4 h1:ky2uIAJh81WiLcGKBVD5R7KsM/36W6IqqTy6Bo6rGws= github.com/cubicdaiya/gonp v1.0.4/go.mod h1:iWGuP/7+JVTn02OWhRemVbMmG1DOUnmrGTYYACpOI0I= -github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= -github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= -github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= -github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= -github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= -github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= -github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= -github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingaikin/go-header v1.0.0 h1:QIwZWb3jLC6pOp9NEFldiD8raqRmCE/n0VUdZKW32x8= -github.com/denis-tingaikin/go-header v1.0.0/go.mod h1:NT3qKwqsXQYp8WHVgkwxL49qB5jsRmdr9dGQCDfpmZ0= github.com/depot/depot-go v0.5.1 h1:Kdrsk8q7W2fQvoudWNjxsXG4ZbdlUAa6EV18udDnTFQ= github.com/depot/depot-go v0.5.1/go.mod h1:QQtSqwRn0flx4KxrUVSJGlh0hTFeZ19MLYvOcJbxtP0= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -324,9 +224,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ= github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4= +github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960 h1:aRd8M7HJVZOqn/vhOzrGcQH0lNAMkqMn+pXUYkatmcA= github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960/go.mod h1:9HQzr9D/0PGwMEbC3d5AB7oi67+h4TsQqItC1GVYG58= -github.com/dprotaso/go-yit v0.0.0-20250909171706-0a81c39169bc h1:YxqE1wh+qGVXQFinuRq5lT77h6baDtBnAVh61LlXp1o= -github.com/dprotaso/go-yit v0.0.0-20250909171706-0a81c39169bc/go.mod h1:5NQLChvz4dnEIQ8WcHIFbZ1bp0GEUZHiBH+EpTZ4lBc= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k= @@ -345,38 +244,22 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= -github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/firefart/nonamedreturns v1.0.6 h1:vmiBcKV/3EqKY3ZiPxCINmpS431OcE1S47AQUwhrg8E= -github.com/firefart/nonamedreturns v1.0.6/go.mod h1:R8NisJnSIpvPWheCq0mNRXJok6D8h7fagJTF8EMEwCo= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= -github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo= github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghostiam/protogetter v0.3.16 h1:UkrisuJBYLnZW6FcYUNBDJOqY3X22RtoYMlCsiNlFFA= -github.com/ghostiam/protogetter v0.3.16/go.mod h1:4SRRIv6PcjkIMpUkRUsP4TsUTqO/N3Fmvwivuc/sCHA= github.com/go-acme/lego/v4 v4.25.2 h1:+D1Q+VnZrD+WJdlkgUEGHFFTcDrwGlE7q24IFtMmHDI= github.com/go-acme/lego/v4 v4.25.2/go.mod h1:OORYyVNZPaNdIdVYCGSBNRNZDIjhQbPuFxwGDgWj/yM= github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-critic/go-critic v0.13.0 h1:kJzM7wzltQasSUXtYyTl6UaPVySO6GkaR1thFnJ6afY= -github.com/go-critic/go-critic v0.13.0/go.mod h1:M/YeuJ3vOCQDnP2SU+ZhjgRzwzcBW87JqLpMJLrZDLI= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= @@ -421,41 +304,12 @@ github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3 github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8= github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk= github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg= -github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= -github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= -github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= -github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= -github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= -github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= -github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= -github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= -github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= -github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= -github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= -github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= -github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= -github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= -github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= -github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= -github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= -github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godoc-lint/godoc-lint v0.10.0 h1:OcyrziBi18sQSEpib6NesVHEJ/Xcng97NunePBA48g4= -github.com/godoc-lint/godoc-lint v0.10.0/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -488,30 +342,6 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= -github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= -github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= -github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= -github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarogrvjO9AfiW3B4U= -github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= -github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6 h1:jlKy3uQkETB3zMBK8utduvojT+If2nDAM1pWpEzXjaY= -github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6/go.mod h1:OyaRySOXorMn8zJqFku8YsKptIhPkANyKKTMC+rqMCs= -github.com/golangci/golangci-lint/v2 v2.5.0 h1:BDRg4ASm4J1y/DSRY6zwJ5tr5Yy8ZqbZ79XrCeFxaQo= -github.com/golangci/golangci-lint/v2 v2.5.0/go.mod h1:IJtWJBZkLbx7AVrIUzLd8Oi3ADtwaNpWbR3wthVWHcc= -github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370 h1:O2u8NCU/gGczNpU7/yjZIAvXMHLwKCAKsNc8axyQPWU= -github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= -github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= -github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= -github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe h1:F1pK9tBy41i7eesBFkSNMldwtiAaWiU+3fT/24sTnNI= -github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe/go.mod h1:CtTxAluxD2ng9aIT9bPrVoMuISFWCD+SaxtvYtdWA2k= -github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= -github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= -github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= -github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= -github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2bRA5htgAG9r7s3tHsfjIhN98WshBTJ9jM= -github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= -github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= -github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= @@ -522,10 +352,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= @@ -538,34 +366,12 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= -github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= -github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= -github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= -github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= -github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= -github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= -github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= -github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= @@ -585,16 +391,10 @@ github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.8.2 h1:y0XF7X8CikZ93fSNT6WBTb/NElBu9IjaY7CCYQrCMX4= -github.com/jgautheron/goconst v1.8.2/go.mod h1:A0oxgBCHy55NQn6sYpO7UdnA9p+h7cPtoOZUmvNIako= github.com/jhump/protoreflect/v2 v2.0.0-beta.2 h1:qZU+rEZUOYTz1Bnhi3xbwn+VxdXkLVeEpAeZzVXLY88= github.com/jhump/protoreflect/v2 v2.0.0-beta.2/go.mod h1:4tnOYkB/mq7QTyS3YKtVtNrJv4Psqout8HA1U+hZtgM= -github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= -github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jjti/go-spancheck v0.6.5 h1:lmi7pKxa37oKYIMScialXUK6hP3iY5F1gu+mLBPgYB8= -github.com/jjti/go-spancheck v0.6.5/go.mod h1:aEogkeatBrbYsyW6y5TgDfihCulDYciL1B7rG2vSsrU= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= @@ -605,16 +405,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 h1:9Nu54bhS/H/Kgo2/7xNSUuC5G28VR8ljfrLKU2G4IjU= github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12/go.mod h1:TBzl5BIHNXfS9+C35ZyJaklL7mLDbgUkcgXzSLa8Tk0= -github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= -github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= -github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= -github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= -github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= -github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -628,62 +420,24 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= -github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= -github.com/kunwardeep/paralleltest v1.0.14 h1:wAkMoMeGX/kGfhQBPODT/BL8XhK23ol/nuQ3SwFaUw8= -github.com/kunwardeep/paralleltest v1.0.14/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= -github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= -github.com/ldez/exptostd v0.4.4 h1:58AtQjnLcT/tI5W/1KU7xE/O7zW9RAWB6c/ScQAnfus= -github.com/ldez/exptostd v0.4.4/go.mod h1:QfdzPw6oHjFVdNV7ILoPu5sw3OZ3OG1JS0I5JN3J4Js= -github.com/ldez/gomoddirectives v0.7.1 h1:FaULkvUIG36hj6chpwa+FdCNGZBsD7/fO+p7CCsM6pE= -github.com/ldez/gomoddirectives v0.7.1/go.mod h1:auDNtakWJR1rC+YX7ar+HmveqXATBAyEK1KYpsIRW/8= -github.com/ldez/grignotin v0.10.1 h1:keYi9rYsgbvqAZGI1liek5c+jv9UUjbvdj3Tbn5fn4o= -github.com/ldez/grignotin v0.10.1/go.mod h1:UlDbXFCARrXbWGNGP3S5vsysNXAPhnSuBufpTEbwOas= -github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= -github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= -github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= -github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= -github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= -github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= github.com/lmittmann/tint v1.1.2 h1:2CQzrL6rslrsyjqLDwD11bZ5OpLBPU+g3G/r5LSfS8w= github.com/lmittmann/tint v1.1.2/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= -github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= -github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg= github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= -github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= -github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/manuelarte/embeddedstructfieldcheck v0.4.0 h1:3mAIyaGRtjK6EO9E73JlXLtiy7ha80b2ZVGyacxgfww= -github.com/manuelarte/embeddedstructfieldcheck v0.4.0/go.mod h1:z8dFSyXqp+fC6NLDSljRJeNQJJDWnY7RoWFzV3PC6UM= -github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= -github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= -github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= -github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= -github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= -github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= -github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= -github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= -github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= -github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= -github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/maypok86/otter v1.2.4 h1:HhW1Pq6VdJkmWwcZZq19BlEQkHtI8xgsQzBVXJU0nfc= github.com/maypok86/otter v1.2.4/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4= github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/mfridman/xflag v0.1.0 h1:TWZrZwG1QklFX5S4j1vxfF1sZbZeZSGofMwPMLAF29M= github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxWWnjRaE= -github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= -github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= github.com/microsoft/go-mssqldb v1.9.3 h1:hy4p+LDC8LIGvI3JATnLVmBOLMJbmn5X400mr5j0lPs= github.com/microsoft/go-mssqldb v1.9.3/go.mod h1:GBbW9ASTiDC+mpgWDGKdm3FnFLTUsLYN3iFL90lQ+PA= github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= @@ -721,30 +475,16 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWu github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= -github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= -github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= -github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= -github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= -github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= -github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/nunnatsa/ginkgolinter v0.21.0 h1:IYwuX+ajy3G1MezlMLB1BENRtFj16+Evyi4uki1NOOQ= -github.com/nunnatsa/ginkgolinter v0.21.0/go.mod h1:QlzY9UP9zaqu58FjYxhp9bnjuwXwG1bfW5rid9ChNMw= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oapi-codegen/nullable v1.1.0 h1:eAh8JVc5430VtYVnq00Hrbpag9PFRGWLjxR1/3KntMs= github.com/oapi-codegen/nullable v1.1.0/go.mod h1:KUZ3vUzkmEKY90ksAmit2+5juDIhIZhfDl+0PwOQlFY= github.com/oapi-codegen/oapi-codegen/v2 v2.5.0 h1:iJvF8SdB/3/+eGOXEpsWkD8FQAHj6mqkb6Fnsoc8MFU= @@ -757,9 +497,8 @@ github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletI github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94= github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -774,13 +513,6 @@ github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8= github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/paulmach/orb v0.12.0 h1:z+zOwjmG3MyEEqzv92UN49Lg1JFYx0L9GpGKNVDKk1s= github.com/paulmach/orb v0.12.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= @@ -794,8 +526,6 @@ github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwK github.com/pb33f/ordered-map/v2 v2.3.0/go.mod h1:oe5ue+6ZNhy7QN9cPZvPA23Hx0vMHnNVeMg4fGdCANw= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= @@ -809,8 +539,8 @@ github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXH github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pingcap/tidb/pkg/parser v0.0.0-20250930040325-007861065ee1 h1:Mlxp81Fre/xHTmszxCHbuIeKd+KZQp8xoN7CgV6ypJc= -github.com/pingcap/tidb/pkg/parser v0.0.0-20250930040325-007861065ee1/go.mod h1:F7q7Qh9lNJyVS5ZliB1hA4uioJN1tOfnNf9KleLZVqo= +github.com/pingcap/tidb/pkg/parser v0.0.0-20250806091815-327a22d5ebf8 h1:q/BiM/E7N9M7zWhTwyRbVVmU2XQ/1PrYuefr5Djni0g= +github.com/pingcap/tidb/pkg/parser v0.0.0-20250806091815-327a22d5ebf8/go.mod h1:mpCcwRdMnmvNkBxcT4AqiE0yuvfJTdmCJs7cfznJw1w= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -821,12 +551,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.8.0 h1:DL4RestQqRLr8U4LygLw8g2DX6RN1eBJOpa2mzsrl1Q= -github.com/polyfloyd/go-errorlint v1.8.0/go.mod h1:G2W0Q5roxbLCt0ZQbdoxQxXktTjwNyDbEaj3n7jvl4s= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/pressly/goose/v3 v3.25.0 h1:6WeYhMWGRCzpyd89SpODFnCBCKz41KrVbRT58nVjGng= github.com/pressly/goose/v3 v3.25.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= @@ -839,22 +565,10 @@ github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+ github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= -github.com/quasilyte/go-ruleguard v0.4.5/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= -github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= -github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= -github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= -github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= -github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE= github.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rekby/fixenv v0.6.1 h1:jUFiSPpajT4WY2cYuc++7Y1zWrnCxnovGCIX72PZniM= @@ -863,8 +577,6 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/restatedev/sdk-go v0.21.0 h1:A0Ss0o8ZvUReGmiGJYe9dB8lIXWu/tytsKDt/UIGXAA= github.com/restatedev/sdk-go v0.21.0/go.mod h1:T3G/P3VBSRTvdverfEiCVVcsNSymzO5ebIyUU6uRqk8= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/riza-io/grpc-go v0.2.0 h1:2HxQKFVE7VuYstcJ8zqpN84VnAoJ4dCL6YFhJewNcHQ= github.com/riza-io/grpc-go v0.2.0/go.mod h1:2bDvR9KkKC3KhtlSHfR3dAXjUMT86kg4UfWFyVGWqi8= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -874,24 +586,12 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= -github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= -github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= -github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= -github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= -github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= -github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= -github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= -github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= -github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk= -github.com/securego/gosec/v2 v2.22.9 h1:njwnorLl1pJMkwaymi1iyWDy8xeaVUByW4oteJzYNHc= -github.com/securego/gosec/v2 v2.22.9/go.mod h1:x3qEF4J5bkDFIm8siAwsYZ40Uu5tD4JWpfVDPx3P3+0= github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/encoding v0.5.3 h1:OjMgICtcSFuNvQCdwqMCv9Tg7lEOXGwm1J5RPQccx6w= @@ -909,44 +609,27 @@ github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTO github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= -github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= -github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= -github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= -github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= github.com/speakeasy-api/jsonpath v0.6.2 h1:Mys71yd6u8kuowNCR0gCVPlVAHCmKtoGXYoAtcEbqXQ= github.com/speakeasy-api/jsonpath v0.6.2/go.mod h1:ymb2iSkyOycmzKwbEAYPJV/yi2rSmvBCLZJcyD+VVWw= github.com/speakeasy-api/openapi-overlay v0.10.3 h1:70een4vwHyslIp796vM+ox6VISClhtXsCjrQNhxwvWs= github.com/speakeasy-api/openapi-overlay v0.10.3/go.mod h1:RJjV0jbUHqXLS0/Mxv5XE7LAnJHqHw+01RDdpoGqiyY= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/sqlc-dev/plugin-sdk-go v1.23.0 h1:iSeJhnXPlbDXlbzUEebw/DxsGzE9rdDJArl8Hvt0RMM= github.com/sqlc-dev/plugin-sdk-go v1.23.0/go.mod h1:I1r4THOfyETD+LI2gogN2LX8wCjwUZrgy/NU4In3llA= +github.com/sqlc-dev/sqlc v1.29.0 h1:HQctoD7y/i29Bao53qXO7CZ/BV9NcvpGpsJWvz9nKWs= +github.com/sqlc-dev/sqlc v1.29.0/go.mod h1:BavmYw11px5AdPOjAVHmb9fctP5A8GTziC38wBF9tp0= github.com/sqlc-dev/sqlc v1.30.0 h1:H4HrNwPc0hntxGWzAbhlfplPRN4bQpXFx+CaEMcKz6c= github.com/sqlc-dev/sqlc v1.30.0/go.mod h1:QnEN+npugyhUg1A+1kkYM3jc2OMOFsNlZ1eh8mdhad0= -github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= -github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= -github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -964,14 +647,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg= -github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU= github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -986,18 +661,10 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= -github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= -github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= -github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= -github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU= -github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU= -github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= -github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI= github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f/go.mod h1:BKdcez7BiVtBvIcef90ZPc6ebqIWr4JWD7+EvLm6J98= github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE= @@ -1010,16 +677,8 @@ github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d h1: github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d/go.mod h1:l8xTsYB90uaVdMHXMCxKKLSgw5wLYBwBKKefNIUnm9s= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= -github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= -github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= -github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= github.com/unkeyed/unkey/go/deploy/pkg/spiffe v0.0.0-20250929110415-ca2de7336e18 h1:8TyQZ28XT82Rji0UdPoZpwk3p7zsMKpdChCIBxlRMwo= github.com/unkeyed/unkey/go/deploy/pkg/spiffe v0.0.0-20250929110415-ca2de7336e18/go.mod h1:fvqbcz5BljbvrZ1p8LWkNAty8vRsTo1aPLw1sOmLXEI= -github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= -github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= -github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= -github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vertica/vertica-sql-go v1.3.4 h1:Fe9Jjg2uK755Xrn2eyI/cvulMaRmVjaGWBqvrf+EnPY= @@ -1046,48 +705,21 @@ github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3k github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= -github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= -github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= github.com/ydb-platform/ydb-go-genproto v0.0.0-20250911135631-b3beddd517d9 h1:SKqSRP6/ocY2Z4twOqKEKxpmawVTHTvQiom7hrU6jt0= github.com/ydb-platform/ydb-go-genproto v0.0.0-20250911135631-b3beddd517d9/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= github.com/ydb-platform/ydb-go-sdk/v3 v3.116.3 h1:8ZDfEmp1CRi57ExlDttLwwl0DKleZuED9fzLVm0vk9E= github.com/ydb-platform/ydb-go-sdk/v3 v3.116.3/go.mod h1:IgDKkfYE4FyJilTRe2BTtaurb2EWdMIsQbO02UW3wKM= -github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= -github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= -github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= -github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= -gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= -go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= -go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= -go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= -go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= -go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= -go.augendre.info/arangolint v0.2.0 h1:2NP/XudpPmfBhQKX4rMk+zDYIj//qbt4hfZmSSTcpj8= -go.augendre.info/arangolint v0.2.0/go.mod h1:Vx4KSJwu48tkE+8uxuf0cbBnAPgnt8O1KWiT7bljq7w= -go.augendre.info/fatcontext v0.9.0 h1:Gt5jGD4Zcj8CDMVzjOJITlSb9cEch54hjRRlN3qDojE= -go.augendre.info/fatcontext v0.9.0/go.mod h1:L94brOAT1OOUNue6ph/2HnwxoNlds9aXDF2FcUntbNw= go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE= @@ -1170,32 +802,18 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= -golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621 h1:Yl4H5w2RV7L/dvSHp2GerziT5K2CORgFINPaMFxWGWw= -golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1207,18 +825,9 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1230,13 +839,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1249,41 +853,22 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= @@ -1296,24 +881,10 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= -golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= -golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= -golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= -golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1362,6 +933,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1387,8 +959,6 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= -honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= @@ -1430,10 +1000,6 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -mvdan.cc/gofumpt v0.9.1 h1:p5YT2NfFWsYyTieYgwcQ8aKV3xRvFH4uuN/zB2gBbMQ= -mvdan.cc/gofumpt v0.9.1/go.mod h1:3xYtNemnKiXaTh6R4VtlqDATFwBbdXI8lJvH/4qk7mw= -mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 h1:WjUu4yQoT5BHT1w8Zu56SP8367OuBV5jvo+4Ulppyf8= -mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4/go.mod h1:rthT7OuvRbaGcd5ginj6dA2oLE7YNlta9qhBNNdCaLE= pluginrpc.com/pluginrpc v0.5.0 h1:tOQj2D35hOmvHyPu8e7ohW2/QvAnEtKscy2IJYWQ2yo= pluginrpc.com/pluginrpc v0.5.0/go.mod h1:UNWZ941hcVAoOZUn8YZsMmOZBzbUjQa3XMns8RQLp9o= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= diff --git a/go/internal/services/analytics/connection_manager.go b/go/internal/services/analytics/connection_manager.go new file mode 100644 index 0000000000..079d75dfc3 --- /dev/null +++ b/go/internal/services/analytics/connection_manager.go @@ -0,0 +1,203 @@ +package analytics + +import ( + "context" + "net/url" + "time" + + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + "github.com/unkeyed/unkey/go/internal/services/caches" + "github.com/unkeyed/unkey/go/pkg/assert" + "github.com/unkeyed/unkey/go/pkg/cache" + "github.com/unkeyed/unkey/go/pkg/clickhouse" + "github.com/unkeyed/unkey/go/pkg/clock" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/fault" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/vault" +) + +// ConnectionManager is the interface for managing per-workspace ClickHouse connections for analytics +type ConnectionManager interface { + GetConnection(ctx context.Context, workspaceID string) (clickhouse.ClickHouse, db.ClickhouseWorkspaceSetting, error) +} + +// connectionManager is the default implementation that manages per-workspace ClickHouse connections +type connectionManager struct { + settingsCache cache.Cache[string, db.ClickhouseWorkspaceSetting] + connectionCache cache.Cache[string, clickhouse.ClickHouse] + database db.Database + logger logging.Logger + baseURL string + vault *vault.Service +} + +// ConnectionManagerConfig contains configuration for the connection manager +type ConnectionManagerConfig struct { + SettingsCache cache.Cache[string, db.ClickhouseWorkspaceSetting] + Database db.Database + Logger logging.Logger + Clock clock.Clock + BaseURL string // e.g., "http://clickhouse:8123/default" or "clickhouse://clickhouse:9000/default" + Vault *vault.Service +} + +// NewConnectionManager creates a new connection manager +func NewConnectionManager(config ConnectionManagerConfig) (ConnectionManager, error) { + err := assert.All( + assert.NotNilAndNotZero(config.Vault, "vault is required"), + assert.NotNilAndNotZero(config.SettingsCache, "settings cache is required"), + assert.NotNilAndNotZero(config.Database, "database is required"), + assert.NotNilAndNotZero(config.Logger, "logger is required"), + assert.NotNilAndNotZero(config.Clock, "clock is required"), + assert.NotNilAndNotZero(config.BaseURL, "base URL is required"), + ) + if err != nil { + return nil, fault.Wrap(err, + fault.Code(codes.App.Validation.AssertionFailed.URN()), + fault.Public("Analytics are not configured for this instance"), + ) + } + + // Create cache for ClickHouse connections + connectionCache, err := cache.New(cache.Config[string, clickhouse.ClickHouse]{ + // It's fine to keep a long cache time for this. + Fresh: 24 * time.Hour, + Stale: 24 * time.Hour, + Logger: config.Logger, + MaxSize: 1_000, + Resource: "clickhouse_analytics_connection", + Clock: config.Clock, + }) + if err != nil { + return nil, fault.Wrap(err, fault.Public("Failed to create connection cache")) + } + + return &connectionManager{ + settingsCache: config.SettingsCache, + connectionCache: connectionCache, + database: config.Database, + logger: config.Logger, + baseURL: config.BaseURL, + vault: config.Vault, + }, nil +} + +// GetConnection returns a cached connection and settings for the workspace or creates a new one +func (m *connectionManager) GetConnection(ctx context.Context, workspaceID string) (clickhouse.ClickHouse, db.ClickhouseWorkspaceSetting, error) { + // Try to get cached connection + conn, hit := m.connectionCache.Get(ctx, workspaceID) + if hit == cache.Hit { + // Still need to get settings + settings, err := m.getSettings(ctx, workspaceID) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, err + } + + return conn, settings, nil + } + + // Create new connection + conn, settings, err := m.createConnection(ctx, workspaceID) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, err + } + + // Store in cache + m.connectionCache.Set(ctx, workspaceID, conn) + + return conn, settings, nil +} + +// getSettings retrieves the workspace settings from cache +func (m *connectionManager) getSettings(ctx context.Context, workspaceID string) (db.ClickhouseWorkspaceSetting, error) { + settings, hit, err := m.settingsCache.SWR(ctx, workspaceID, func(ctx context.Context) (db.ClickhouseWorkspaceSetting, error) { + return db.Query.FindClickhouseWorkspaceSettingsByWorkspaceID(ctx, m.database.RO(), workspaceID) + }, caches.DefaultFindFirstOp) + if err != nil { + if db.IsNotFound(err) { + return db.ClickhouseWorkspaceSetting{}, fault.New( + "workspace settings not found", + fault.Public("ClickHouse analytics is not configured for this workspace"), + fault.Code(codes.Data.Analytics.NotConfigured.URN()), + ) + } + + return db.ClickhouseWorkspaceSetting{}, fault.Wrap(err, + fault.Public("Failed to fetch workspace analytics configuration"), + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + ) + } + + if hit == cache.Null { + return db.ClickhouseWorkspaceSetting{}, fault.New( + "workspace settings null", + fault.Public("ClickHouse analytics is not configured for this workspace"), + fault.Code(codes.Data.Analytics.NotConfigured.URN()), + ) + } + + return settings, nil +} + +// createConnection creates a new ClickHouse connection for a workspace +func (m *connectionManager) createConnection(ctx context.Context, workspaceID string) (clickhouse.ClickHouse, db.ClickhouseWorkspaceSetting, error) { + settings, err := m.getSettings(ctx, workspaceID) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, err + } + + // Decrypt password using vault + decrypted, err := m.vault.Decrypt(ctx, &vaultv1.DecryptRequest{ + Encrypted: settings.PasswordEncrypted, + Keyring: settings.WorkspaceID, + }) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, fault.Wrap(err, + fault.Public("Failed to connect to ClickHouse analytics database"), + fault.Code(codes.Data.Analytics.ConnectionFailed.URN()), + ) + } + + // Parse base URL and inject workspace-specific credentials + parsedURL, err := url.Parse(m.baseURL) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, fault.Wrap(err, + fault.Public("Invalid ClickHouse URL configuration"), + fault.Code(codes.App.Internal.ServiceUnavailable.URN()), + ) + } + + // Inject workspace credentials + parsedURL.User = url.UserPassword(settings.Username, decrypted.GetPlaintext()) + conn, err := clickhouse.New(clickhouse.Config{ + URL: parsedURL.String(), + Logger: m.logger, + }) + if err != nil { + return nil, db.ClickhouseWorkspaceSetting{}, fault.Wrap(err, + fault.Public("Failed to connect to ClickHouse analytics database"), + fault.Code(codes.Data.Analytics.ConnectionFailed.URN()), + ) + } + + return conn, settings, nil +} + +// noopConnectionManager is a no-op implementation that returns errors indicating analytics is not configured +type noopConnectionManager struct{} + +// NewNoopConnectionManager creates a new no-op connection manager for when analytics is not configured +func NewNoopConnectionManager() ConnectionManager { + return &noopConnectionManager{} +} + +// GetConnection always returns an error indicating analytics is not configured +func (m *noopConnectionManager) GetConnection(ctx context.Context, workspaceID string) (clickhouse.ClickHouse, db.ClickhouseWorkspaceSetting, error) { + return nil, db.ClickhouseWorkspaceSetting{}, fault.New( + "analytics not configured", + fault.Code(codes.Data.Analytics.NotConfigured.URN()), + fault.Public("Analytics are not configured for this instance"), + ) +} diff --git a/go/internal/services/caches/caches.go b/go/internal/services/caches/caches.go index 6f974ac774..85c7d4ee5e 100644 --- a/go/internal/services/caches/caches.go +++ b/go/internal/services/caches/caches.go @@ -31,6 +31,18 @@ type Caches struct { // Keys are string (ID) and values are db.FindLiveApiByIDRow. LiveApiByID cache.Cache[cache.ScopedKey, db.FindLiveApiByIDRow] + // Clickhouse Configuration caches clickhouse configuration lookups by workspace ID. + // Keys are string (workspace ID) and values are db.ClickhouseWorkspaceSetting. + ClickhouseSetting cache.Cache[string, db.ClickhouseWorkspaceSetting] + + // KeyAuthToApiRow caches key_auth_id to api row mappings. + // Keys are string (key_auth_id) and values are db.FindKeyAuthsByKeyAuthIdsRow (has both KeyAuthID and ApiID). + KeyAuthToApiRow cache.Cache[cache.ScopedKey, db.FindKeyAuthsByKeyAuthIdsRow] + + // ApiToKeyAuthRow caches api_id to key_auth row mappings. + // Keys are string (api_id) and values are db.FindKeyAuthsByIdsRow (has both KeyAuthID and ApiID). + ApiToKeyAuthRow cache.Cache[cache.ScopedKey, db.FindKeyAuthsByIdsRow] + // dispatcher handles routing of invalidation events to all caches in this process. // This is not exported as it's an internal implementation detail. dispatcher *clustering.InvalidationDispatcher @@ -233,10 +245,69 @@ func New(config Config) (Caches, error) { return Caches{}, err } + clickhouseSetting, err := createCache( + config, + dispatcher, + cache.Config[string, db.ClickhouseWorkspaceSetting]{ + Fresh: time.Minute, + Stale: 24 * time.Hour, + Logger: config.Logger, + MaxSize: 1_000_000, + Resource: "clickhouse_setting", + Clock: config.Clock, + }, + nil, + nil, + ) + if err != nil { + return Caches{}, err + } + + // Create key_auth_id -> api row cache + keyAuthToApiRow, err := createCache( + config, + dispatcher, + cache.Config[cache.ScopedKey, db.FindKeyAuthsByKeyAuthIdsRow]{ + Fresh: 10 * time.Minute, + Stale: 24 * time.Hour, + Logger: config.Logger, + MaxSize: 1_000_000, + Resource: "key_auth_to_api_row", + Clock: config.Clock, + }, + cache.ScopedKeyToString, + cache.ScopedKeyFromString, + ) + if err != nil { + return Caches{}, err + } + + // Create api_id -> key_auth row cache + apiToKeyAuthRow, err := createCache( + config, + dispatcher, + cache.Config[cache.ScopedKey, db.FindKeyAuthsByIdsRow]{ + Fresh: 10 * time.Minute, + Stale: 24 * time.Hour, + Logger: config.Logger, + MaxSize: 1_000_000, + Resource: "api_to_key_auth_row", + Clock: config.Clock, + }, + cache.ScopedKeyToString, + cache.ScopedKeyFromString, + ) + if err != nil { + return Caches{}, err + } + return Caches{ RatelimitNamespace: middleware.WithTracing(ratelimitNamespace), LiveApiByID: middleware.WithTracing(liveApiByID), VerificationKeyByHash: middleware.WithTracing(verificationKeyByHash), + ClickhouseSetting: middleware.WithTracing(clickhouseSetting), + KeyAuthToApiRow: middleware.WithTracing(keyAuthToApiRow), + ApiToKeyAuthRow: middleware.WithTracing(apiToKeyAuthRow), dispatcher: dispatcher, }, nil } diff --git a/go/internal/services/caches/op.go b/go/internal/services/caches/op.go index cfe5524fee..d63a74f1d7 100644 --- a/go/internal/services/caches/op.go +++ b/go/internal/services/caches/op.go @@ -7,16 +7,16 @@ import ( // DefaultFindFirstOp returns the appropriate cache operation based on the sql error func DefaultFindFirstOp(err error) cache.Op { - if err == nil { - // everything went well and we have a row response - return cache.WriteValue - } - if db.IsNotFound(err) { // the response is empty, we need to store that the row does not exist return cache.WriteNull } + if err == nil { + // everything went well and we have a row response + return cache.WriteValue + } + // this is a noop in the cache return cache.Noop } diff --git a/go/k8s/manifests/api.yaml b/go/k8s/manifests/api.yaml index 6d7f945516..ad082dcdfa 100644 --- a/go/k8s/manifests/api.yaml +++ b/go/k8s/manifests/api.yaml @@ -47,6 +47,8 @@ spec: value: "redis://redis:6379" - name: UNKEY_CLICKHOUSE_URL value: "clickhouse://default:password@clickhouse:9000?secure=false&skip_verify=true" + - name: UNKEY_CLICKHOUSE_ANALYTICS_URL + value: "http://clickhouse:8123/default" # Observability - DISABLED for development - name: UNKEY_OTEL value: "false" diff --git a/go/main.go b/go/main.go index 3d55af01e9..0f2ec0c5f8 100644 --- a/go/main.go +++ b/go/main.go @@ -5,7 +5,9 @@ import ( "fmt" "os" + clickhouseUser "github.com/unkeyed/unkey/go/cmd/create-clickhouse-user" "github.com/unkeyed/unkey/go/cmd/deploy" + dev "github.com/unkeyed/unkey/go/cmd/dev" gateway "github.com/unkeyed/unkey/go/cmd/gw" "github.com/unkeyed/unkey/go/cmd/healthcheck" "github.com/unkeyed/unkey/go/cmd/quotacheck" @@ -31,6 +33,8 @@ func main() { healthcheck.Cmd, quotacheck.Cmd, gateway.Cmd, + clickhouseUser.Cmd, + dev.Cmd, }, } diff --git a/go/pkg/array/chunk.go b/go/pkg/array/chunk.go new file mode 100644 index 0000000000..ee57e8934b --- /dev/null +++ b/go/pkg/array/chunk.go @@ -0,0 +1,16 @@ +package array + +// Chunk splits a slice into chunks of size n. +func Chunk[T any](slice []T, n int) [][]T { + if n <= 0 { + panic("n must be greater than 0") + } + + var chunks [][]T + for i := 0; i < len(slice); i += n { + end := min(i+n, len(slice)) + chunks = append(chunks, slice[i:end]) + } + + return chunks +} diff --git a/go/pkg/cache/cache.go b/go/pkg/cache/cache.go index cec6b4aae0..d46d6ee910 100644 --- a/go/pkg/cache/cache.go +++ b/go/pkg/cache/cache.go @@ -129,6 +129,31 @@ func (c *cache[K, V]) Get(ctx context.Context, key K) (value V, hit CacheHit) { return value, Miss } +func (c *cache[K, V]) GetMany(ctx context.Context, keys []K) (values map[K]V, hits map[K]CacheHit) { + values = make(map[K]V, len(keys)) + hits = make(map[K]CacheHit, len(keys)) + now := c.clock.Now() + + for _, key := range keys { + e, ok := c.get(ctx, key) + if !ok { + hits[key] = Miss + continue + } + + if now.Before(e.Stale) { + values[key] = e.Value + hits[key] = e.Hit + continue + } + + c.otter.Delete(key) + hits[key] = Miss + } + + return values, hits +} + func (c *cache[K, V]) SetNull(_ context.Context, key K) { now := c.clock.Now() @@ -141,6 +166,20 @@ func (c *cache[K, V]) SetNull(_ context.Context, key K) { }) } +func (c *cache[K, V]) SetNullMany(ctx context.Context, keys []K) { + now := c.clock.Now() + var v V + + for _, key := range keys { + c.otter.Set(key, swrEntry[V]{ + Value: v, + Fresh: now.Add(c.fresh), + Stale: now.Add(c.stale), + Hit: Null, + }) + } +} + func (c *cache[K, V]) Set(_ context.Context, key K, value V) { now := c.clock.Now() @@ -152,6 +191,19 @@ func (c *cache[K, V]) Set(_ context.Context, key K, value V) { }) } +func (c *cache[K, V]) SetMany(ctx context.Context, values map[K]V) { + now := c.clock.Now() + + for key, value := range values { + c.otter.Set(key, swrEntry[V]{ + Value: value, + Fresh: now.Add(c.fresh), + Stale: now.Add(c.stale), + Hit: Hit, + }) + } +} + func (c *cache[K, V]) get(_ context.Context, key K) (swrEntry[V], bool) { v, ok := c.otter.Get(key) @@ -310,3 +362,157 @@ func (c *cache[K, V]) SWR( return v, hit, err } + +func (c *cache[K, V]) SWRMany( + ctx context.Context, + keys []K, + refreshFromOrigin func(context.Context, []K) (map[K]V, error), + op func(error) Op, +) (map[K]V, map[K]CacheHit, error) { + // Use GetMany to handle deduplication and basic cache lookups + values, hits := c.GetMany(ctx, keys) + + now := c.clock.Now() + var staleKeys []K + var missingKeys []K + + // Check each unique key for freshness/staleness + seen := make(map[K]bool) + for _, key := range keys { + if seen[key] { + continue + } + + seen[key] = true + + hit := hits[key] + if hit == Miss { + missingKeys = append(missingKeys, key) + continue + } + + if hit == Null { + // Null values are cached, no need to refresh + continue + } + + // For hits, check if they're fresh or stale + e, ok := c.get(ctx, key) + if ok && now.After(e.Fresh) && now.Before(e.Stale) { + // Stale but valid - queue for background refresh + staleKeys = append(staleKeys, key) + } + } + + // Queue stale keys for background refresh + if len(staleKeys) > 0 { + c.revalidateC <- func() { + c.revalidateMany(context.WithoutCancel(ctx), staleKeys, refreshFromOrigin, op) + } + } + + // Fetch missing keys synchronously + if len(missingKeys) > 0 { + fetchedValues, err := refreshFromOrigin(ctx, missingKeys) + + switch op(err) { + case WriteValue: + if fetchedValues != nil { + // Write the values we got + c.SetMany(ctx, fetchedValues) + for key, value := range fetchedValues { + values[key] = value + hits[key] = Hit + } + + // Automatically write NULL for keys that weren't returned + var notFoundKeys []K + for _, key := range missingKeys { + if _, found := fetchedValues[key]; !found { + notFoundKeys = append(notFoundKeys, key) + } + } + + if len(notFoundKeys) > 0 { + c.SetNullMany(ctx, notFoundKeys) + for _, key := range notFoundKeys { + hits[key] = Null + } + } + } + case WriteNull: + c.SetNullMany(ctx, missingKeys) + for _, key := range missingKeys { + hits[key] = Null + } + case Noop: + // Don't cache anything + } + + if err != nil { + return values, hits, err + } + } + + return values, hits, nil +} + +func (c *cache[K, V]) revalidateMany( + ctx context.Context, + keys []K, + refreshFromOrigin func(context.Context, []K) (map[K]V, error), + op func(error) Op, +) { + // Lock to prevent duplicate revalidations + c.inflightMu.Lock() + var keysToRefresh []K + for _, key := range keys { + if !c.inflightRefreshes[key] { + c.inflightRefreshes[key] = true + keysToRefresh = append(keysToRefresh, key) + } + } + c.inflightMu.Unlock() + + if len(keysToRefresh) == 0 { + return + } + + defer func() { + c.inflightMu.Lock() + for _, key := range keysToRefresh { + delete(c.inflightRefreshes, key) + } + c.inflightMu.Unlock() + }() + + metrics.CacheRevalidations.WithLabelValues(c.resource).Add(float64(len(keysToRefresh))) + values, err := refreshFromOrigin(ctx, keysToRefresh) + + if err != nil && !db.IsNotFound(err) { + c.logger.Warn("failed to revalidate many", "error", err.Error(), "keys", keysToRefresh) + } + + switch op(err) { + case WriteValue: + if values != nil { + // Write the values we got + c.SetMany(ctx, values) + + // Automatically write NULL for keys that weren't returned + var notFoundKeys []K + for _, key := range keysToRefresh { + if _, found := values[key]; !found { + notFoundKeys = append(notFoundKeys, key) + } + } + if len(notFoundKeys) > 0 { + c.SetNullMany(ctx, notFoundKeys) + } + } + case WriteNull: + c.SetNullMany(ctx, keysToRefresh) + case Noop: + // Don't cache anything + } +} diff --git a/go/pkg/cache/clustering/cluster_cache.go b/go/pkg/cache/clustering/cluster_cache.go index 69b6e403be..b16566b297 100644 --- a/go/pkg/cache/clustering/cluster_cache.go +++ b/go/pkg/cache/clustering/cluster_cache.go @@ -143,6 +143,13 @@ func (c *ClusterCache[K, V]) Get(ctx context.Context, key K) (value V, hit cache return c.localCache.Get(ctx, key) } +// GetMany retrieves multiple values from the local cache +func (c *ClusterCache[K, V]) GetMany(ctx context.Context, keys []K) (values map[K]V, hits map[K]cache.CacheHit) { + return c.localCache.GetMany(ctx, keys) +} + +// Set stores a value in the local cache and broadcasts an invalidation event +// to other nodes in the cluster // Set stores a value in the local cache without broadcasting. // This is used when populating the cache after a database read. // The stale/fresh timers handle cache expiration, so there's no need to @@ -151,12 +158,31 @@ func (c *ClusterCache[K, V]) Set(ctx context.Context, key K, value V) { c.localCache.Set(ctx, key, value) } +// SetMany stores multiple values in the local cache and broadcasts invalidation events +func (c *ClusterCache[K, V]) SetMany(ctx context.Context, values map[K]V) { + // Update local cache first + c.localCache.SetMany(ctx, values) + + // Broadcast invalidation for all keys + keys := make([]K, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + c.broadcastInvalidation(ctx, keys...) +} + // SetNull stores a null value in the local cache and broadcasts invalidation func (c *ClusterCache[K, V]) SetNull(ctx context.Context, key K) { c.localCache.SetNull(ctx, key) c.broadcastInvalidation(ctx, key) } +// SetNullMany stores multiple null values in the local cache and broadcasts invalidation +func (c *ClusterCache[K, V]) SetNullMany(ctx context.Context, keys []K) { + c.localCache.SetNullMany(ctx, keys) + c.broadcastInvalidation(ctx, keys...) +} + // Remove removes one or more values from the local cache and broadcasts invalidation func (c *ClusterCache[K, V]) Remove(ctx context.Context, keys ...K) { // Remove from local cache @@ -175,6 +201,16 @@ func (c *ClusterCache[K, V]) SWR( return c.localCache.SWR(ctx, key, refreshFromOrigin, op) } +// SWRMany performs stale-while-revalidate lookup for multiple keys +func (c *ClusterCache[K, V]) SWRMany( + ctx context.Context, + keys []K, + refreshFromOrigin func(context.Context, []K) (map[K]V, error), + op func(error) cache.Op, +) (map[K]V, map[K]cache.CacheHit, error) { + return c.localCache.SWRMany(ctx, keys, refreshFromOrigin, op) +} + // Dump returns a serialized representation of the cache func (c *ClusterCache[K, V]) Dump(ctx context.Context) ([]byte, error) { return c.localCache.Dump(ctx) diff --git a/go/pkg/cache/interface.go b/go/pkg/cache/interface.go index a3e00feba6..e78e13e8e6 100644 --- a/go/pkg/cache/interface.go +++ b/go/pkg/cache/interface.go @@ -9,18 +9,32 @@ type Cache[K comparable, V any] interface { // If the key is not found, found will be false. Get(ctx context.Context, key K) (value V, hit CacheHit) + // GetMany returns values for multiple keys. + // Returns maps of values and cache hits indexed by key. + GetMany(ctx context.Context, keys []K) (values map[K]V, hits map[K]CacheHit) + // Sets the value for the given key. Set(ctx context.Context, key K, value V) + // SetMany sets multiple key-value pairs. + SetMany(ctx context.Context, values map[K]V) + // Sets the given key to null, indicating that the value does not exist in the origin. SetNull(ctx context.Context, key K) + // SetNullMany sets multiple keys to null. + SetNullMany(ctx context.Context, keys []K) + // Remove removes one or more keys from the cache. // Multiple keys can be provided for efficient bulk removal. Remove(ctx context.Context, keys ...K) SWR(ctx context.Context, key K, refreshFromOrigin func(ctx context.Context) (V, error), op func(error) Op) (value V, hit CacheHit, err error) + // SWRMany performs stale-while-revalidate for multiple keys. + // refreshFromOrigin receives keys that need to be fetched and returns a map of values. + SWRMany(ctx context.Context, keys []K, refreshFromOrigin func(ctx context.Context, keys []K) (map[K]V, error), op func(error) Op) (values map[K]V, hits map[K]CacheHit, err error) + // Dump returns a serialized representation of the cache. Dump(ctx context.Context) ([]byte, error) diff --git a/go/pkg/cache/many_test.go b/go/pkg/cache/many_test.go new file mode 100644 index 0000000000..4d7b1f42fc --- /dev/null +++ b/go/pkg/cache/many_test.go @@ -0,0 +1,529 @@ +package cache_test + +import ( + "context" + "database/sql" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/cache" + "github.com/unkeyed/unkey/go/pkg/clock" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" +) + +func TestGetMany(t *testing.T) { + ctx := context.Background() + c, err := cache.New(cache.Config[string, string]{ + MaxSize: 10_000, + Fresh: time.Minute, + Stale: time.Minute * 5, + Logger: logging.NewNoop(), + Resource: "test", + Clock: clock.New(), + }) + require.NoError(t, err) + + t.Run("all keys miss", func(t *testing.T) { + keys := []string{"key1", "key2", "key3"} + values, hits := c.GetMany(ctx, keys) + + require.Len(t, values, 0) + require.Len(t, hits, 3) + for _, key := range keys { + require.Equal(t, cache.Miss, hits[key]) + } + }) + + t.Run("some keys hit", func(t *testing.T) { + // Set some values + c.Set(ctx, "key1", "value1") + c.Set(ctx, "key2", "value2") + + keys := []string{"key1", "key2", "key3", "key4"} + values, hits := c.GetMany(ctx, keys) + + require.Len(t, values, 2) + require.Len(t, hits, 4) + + require.Equal(t, "value1", values["key1"]) + require.Equal(t, cache.Hit, hits["key1"]) + + require.Equal(t, "value2", values["key2"]) + require.Equal(t, cache.Hit, hits["key2"]) + + require.Equal(t, cache.Miss, hits["key3"]) + require.Equal(t, cache.Miss, hits["key4"]) + }) + + t.Run("all keys hit", func(t *testing.T) { + // Set all values + c.Set(ctx, "a", "va") + c.Set(ctx, "b", "vb") + c.Set(ctx, "c", "vc") + + keys := []string{"a", "b", "c"} + values, hits := c.GetMany(ctx, keys) + + require.Len(t, values, 3) + require.Len(t, hits, 3) + + for _, key := range keys { + require.Equal(t, cache.Hit, hits[key]) + require.Equal(t, "v"+key, values[key]) + } + }) + + t.Run("null values", func(t *testing.T) { + c.SetNull(ctx, "null1") + c.SetNull(ctx, "null2") + + keys := []string{"null1", "null2"} + values, hits := c.GetMany(ctx, keys) + + require.Len(t, values, 2) + require.Len(t, hits, 2) + + require.Equal(t, cache.Null, hits["null1"]) + require.Equal(t, cache.Null, hits["null2"]) + require.Equal(t, "", values["null1"]) + require.Equal(t, "", values["null2"]) + }) + + t.Run("empty keys slice", func(t *testing.T) { + values, hits := c.GetMany(ctx, []string{}) + + require.Len(t, values, 0) + require.Len(t, hits, 0) + }) +} + +func TestGetMany_Eviction(t *testing.T) { + ctx := context.Background() + clk := clock.NewTestClock() + + c, err := cache.New(cache.Config[string, string]{ + MaxSize: 10_000, + Fresh: time.Second, + Stale: time.Second, + Logger: logging.NewNoop(), + Resource: "test", + Clock: clk, + }) + require.NoError(t, err) + + // Set values + c.Set(ctx, "key1", "value1") + c.Set(ctx, "key2", "value2") + + // Move past stale time + clk.Tick(2 * time.Second) + + keys := []string{"key1", "key2"} + values, hits := c.GetMany(ctx, keys) + + require.Len(t, values, 0) + require.Len(t, hits, 2) + require.Equal(t, cache.Miss, hits["key1"]) + require.Equal(t, cache.Miss, hits["key2"]) +} + +func TestSetMany(t *testing.T) { + ctx := context.Background() + c, err := cache.New(cache.Config[string, string]{ + MaxSize: 10_000, + Fresh: time.Minute, + Stale: time.Minute * 5, + Logger: logging.NewNoop(), + Resource: "test", + Clock: clock.New(), + }) + require.NoError(t, err) + + t.Run("set multiple values", func(t *testing.T) { + values := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + c.SetMany(ctx, values) + + // Verify all values are set + for key, expectedValue := range values { + value, hit := c.Get(ctx, key) + require.Equal(t, cache.Hit, hit) + require.Equal(t, expectedValue, value) + } + }) + + t.Run("overwrite existing values", func(t *testing.T) { + c.Set(ctx, "old", "old_value") + + c.SetMany(ctx, map[string]string{ + "old": "new_value", + "new": "new_value2", + }) + + value, hit := c.Get(ctx, "old") + require.Equal(t, cache.Hit, hit) + require.Equal(t, "new_value", value) + + value, hit = c.Get(ctx, "new") + require.Equal(t, cache.Hit, hit) + require.Equal(t, "new_value2", value) + }) + + t.Run("empty map", func(t *testing.T) { + c.SetMany(ctx, map[string]string{}) + // Should not panic + }) +} + +func TestSetNullMany(t *testing.T) { + ctx := context.Background() + c, err := cache.New(cache.Config[string, string]{ + MaxSize: 10_000, + Fresh: time.Minute, + Stale: time.Minute * 5, + Logger: logging.NewNoop(), + Resource: "test", + Clock: clock.New(), + }) + require.NoError(t, err) + + t.Run("set multiple null values", func(t *testing.T) { + keys := []string{"null1", "null2", "null3"} + c.SetNullMany(ctx, keys) + + // Verify all are null + for _, key := range keys { + value, hit := c.Get(ctx, key) + require.Equal(t, cache.Null, hit) + require.Equal(t, "", value) + } + }) + + t.Run("overwrite existing values with null", func(t *testing.T) { + c.Set(ctx, "existing", "value") + + c.SetNullMany(ctx, []string{"existing"}) + + value, hit := c.Get(ctx, "existing") + require.Equal(t, cache.Null, hit) + require.Equal(t, "", value) + }) + + t.Run("empty slice", func(t *testing.T) { + c.SetNullMany(ctx, []string{}) + // Should not panic + }) +} + +func TestSWRMany(t *testing.T) { + ctx := context.Background() + mockClock := clock.NewTestClock() + + c, err := cache.New(cache.Config[string, string]{ + Fresh: 1 * time.Minute, + Stale: 5 * time.Minute, + Logger: logging.NewNoop(), + MaxSize: 100, + Resource: "test", + Clock: mockClock, + }) + require.NoError(t, err) + + t.Run("all keys miss - fetch from origin", func(t *testing.T) { + keys := []string{"key1", "key2", "key3"} + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + result := make(map[string]string) + for _, k := range keysToFetch { + result[k] = "value_" + k + } + return result, nil + }, func(err error) cache.Op { + if err != nil { + return cache.Noop + } + return cache.WriteValue + }) + + require.NoError(t, err) + require.Len(t, values, 3) + require.Len(t, hits, 3) + + for _, key := range keys { + require.Equal(t, cache.Hit, hits[key]) + require.Equal(t, "value_"+key, values[key]) + } + }) + + t.Run("all keys hit - no origin call", func(t *testing.T) { + // Populate cache + c.Set(ctx, "a", "va") + c.Set(ctx, "b", "vb") + + keys := []string{"a", "b"} + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + t.Fatal("should not call refresh function") + return nil, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err) + require.Len(t, values, 2) + require.Len(t, hits, 2) + + require.Equal(t, cache.Hit, hits["a"]) + require.Equal(t, "va", values["a"]) + + require.Equal(t, cache.Hit, hits["b"]) + require.Equal(t, "vb", values["b"]) + }) + + t.Run("mixed hits and misses", func(t *testing.T) { + // Populate some keys + c.Set(ctx, "cached1", "value1") + c.Set(ctx, "cached2", "value2") + + keys := []string{"cached1", "cached2", "new1", "new2"} + fetchedKeys := []string{} + + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + fetchedKeys = keysToFetch + result := make(map[string]string) + for _, k := range keysToFetch { + result[k] = "fetched_" + k + } + return result, nil + }, func(err error) cache.Op { + if err != nil { + return cache.Noop + } + return cache.WriteValue + }) + + require.NoError(t, err) + require.Len(t, values, 4) + require.Len(t, hits, 4) + + // Cached keys should return cached values + require.Equal(t, cache.Hit, hits["cached1"]) + require.Equal(t, "value1", values["cached1"]) + + require.Equal(t, cache.Hit, hits["cached2"]) + require.Equal(t, "value2", values["cached2"]) + + // New keys should be fetched + require.Equal(t, cache.Hit, hits["new1"]) + require.Equal(t, "fetched_new1", values["new1"]) + + require.Equal(t, cache.Hit, hits["new2"]) + require.Equal(t, "fetched_new2", values["new2"]) + + // Verify only missing keys were fetched + require.Len(t, fetchedKeys, 2) + require.Contains(t, fetchedKeys, "new1") + require.Contains(t, fetchedKeys, "new2") + }) + + t.Run("stale keys return cached value and refresh in background", func(t *testing.T) { + // Populate cache + c.Set(ctx, "stale1", "old_value1") + c.Set(ctx, "stale2", "old_value2") + + // Move past fresh but within stale + mockClock.Tick(2 * time.Minute) + + keys := []string{"stale1", "stale2"} + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + // This will be called in background + result := make(map[string]string) + for _, k := range keysToFetch { + result[k] = "new_" + k + } + return result, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err) + require.Len(t, values, 2) + + // Should return old cached values + require.Equal(t, "old_value1", values["stale1"]) + require.Equal(t, "old_value2", values["stale2"]) + + // All should be hits + require.Equal(t, cache.Hit, hits["stale1"]) + require.Equal(t, cache.Hit, hits["stale2"]) + }) + + t.Run("null values", func(t *testing.T) { + keys := []string{"notfound1", "notfound2"} + _, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + return nil, sql.ErrNoRows + }, func(err error) cache.Op { + if db.IsNotFound(err) { + return cache.WriteNull + } + return cache.Noop + }) + + require.Error(t, err) + require.True(t, db.IsNotFound(err)) + + // Keys should be marked as null + for _, key := range keys { + require.Equal(t, cache.Null, hits[key]) + } + + // Second call should return null hits without calling origin + values2, hits2, err2 := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + t.Fatal("should not call refresh function for null values") + return nil, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err2) + for _, key := range keys { + require.Equal(t, cache.Null, hits2[key]) + require.Equal(t, "", values2[key]) + } + }) + + t.Run("partial null values", func(t *testing.T) { + keys := []string{"found", "notfound"} + _, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + // Return value for "found" but indicate not found overall + return map[string]string{"found": "value"}, sql.ErrNoRows + }, func(err error) cache.Op { + if db.IsNotFound(err) { + return cache.WriteNull + } + return cache.WriteValue + }) + + require.Error(t, err) + require.True(t, db.IsNotFound(err)) + + // All keys should be marked as null when WriteNull is used + for _, key := range keys { + require.Equal(t, cache.Null, hits[key]) + } + }) + + t.Run("error handling", func(t *testing.T) { + keys := []string{"err1", "err2"} + expectedErr := fmt.Errorf("fetch error") + + _, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + return nil, expectedErr + }, func(err error) cache.Op { + return cache.Noop + }) + + require.Error(t, err) + require.Equal(t, expectedErr, err) + + // Keys should be miss on error + for _, key := range keys { + require.Equal(t, cache.Miss, hits[key]) + } + }) + + t.Run("empty keys slice", func(t *testing.T) { + values, hits, err := c.SWRMany(ctx, []string{}, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + t.Fatal("should not call refresh for empty keys") + return nil, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err) + require.Len(t, values, 0) + require.Len(t, hits, 0) + }) + + t.Run("deduplication - only fetch unique keys", func(t *testing.T) { + // Pre-populate one key + c.Set(ctx, "cached", "cached_value") + + keys := []string{"cached", "new", "new"} // duplicate "new" + var fetchedKeys []string + + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + fetchedKeys = keysToFetch + return map[string]string{"new": "new_value"}, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err) + + // Should only fetch "new" once + require.Len(t, fetchedKeys, 1) + require.Equal(t, "new", fetchedKeys[0]) + + // Should have results for unique keys only (maps deduplicate) + require.Len(t, hits, 2) + require.Len(t, values, 2) + require.Equal(t, "cached_value", values["cached"]) + require.Equal(t, "new_value", values["new"]) + }) + + t.Run("partial results - cache NULL for missing keys", func(t *testing.T) { + keys := []string{"exists1", "exists2", "missing1", "missing2"} + + // First call - DB only returns 2 of 4 keys + values, hits, err := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + // Simulate DB returning partial results + return map[string]string{ + "exists1": "value1", + "exists2": "value2", + // "missing1" and "missing2" not returned + }, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err) + + // The found keys should be Hit with values + require.Equal(t, cache.Hit, hits["exists1"]) + require.Equal(t, "value1", values["exists1"]) + require.Equal(t, cache.Hit, hits["exists2"]) + require.Equal(t, "value2", values["exists2"]) + + // The missing keys should be cached as Null (values map will have zero values) + require.Equal(t, cache.Null, hits["missing1"]) + require.Equal(t, "", values["missing1"]) // zero value for string + require.Equal(t, cache.Null, hits["missing2"]) + require.Equal(t, "", values["missing2"]) // zero value for string + + // Second call - should return cached values without calling origin + values2, hits2, err2 := c.SWRMany(ctx, keys, func(ctx context.Context, keysToFetch []string) (map[string]string, error) { + t.Fatal("should not call refresh - all keys should be cached") + return nil, nil + }, func(err error) cache.Op { + return cache.WriteValue + }) + + require.NoError(t, err2) + + // All keys should be in cache now + require.Equal(t, cache.Hit, hits2["exists1"]) + require.Equal(t, "value1", values2["exists1"]) + require.Equal(t, cache.Hit, hits2["exists2"]) + require.Equal(t, "value2", values2["exists2"]) + require.Equal(t, cache.Null, hits2["missing1"]) // Cached as null + require.Equal(t, "", values2["missing1"]) + require.Equal(t, cache.Null, hits2["missing2"]) // Cached as null + require.Equal(t, "", values2["missing2"]) + }) +} diff --git a/go/pkg/cache/middleware/tracing.go b/go/pkg/cache/middleware/tracing.go index 550e269a4f..9c62dd0aaa 100644 --- a/go/pkg/cache/middleware/tracing.go +++ b/go/pkg/cache/middleware/tracing.go @@ -31,6 +31,29 @@ func (mw *tracingMiddleware[K, V]) Get(ctx context.Context, key K) (V, cache.Cac return value, hit } +func (mw *tracingMiddleware[K, V]) GetMany(ctx context.Context, keys []K) (map[K]V, map[K]cache.CacheHit) { + ctx, span := tracing.Start(ctx, "cache.GetMany") + defer span.End() + span.SetAttributes( + attribute.Int("count", len(keys)), + ) + + values, hits := mw.next.GetMany(ctx, keys) + + hitCount := 0 + for _, hit := range hits { + if hit != cache.Miss { + hitCount++ + } + } + span.SetAttributes( + attribute.Int("hits", hitCount), + attribute.Int("misses", len(keys)-hitCount), + ) + + return values, hits +} + func (mw *tracingMiddleware[K, V]) Set(ctx context.Context, key K, value V) { ctx, span := tracing.Start(ctx, "cache.Set") defer span.End() @@ -39,6 +62,14 @@ func (mw *tracingMiddleware[K, V]) Set(ctx context.Context, key K, value V) { mw.next.Set(ctx, key, value) } +func (mw *tracingMiddleware[K, V]) SetMany(ctx context.Context, values map[K]V) { + ctx, span := tracing.Start(ctx, "cache.SetMany") + defer span.End() + span.SetAttributes(attribute.Int("count", len(values))) + + mw.next.SetMany(ctx, values) +} + func (mw *tracingMiddleware[K, V]) SetNull(ctx context.Context, key K) { ctx, span := tracing.Start(ctx, "cache.SetNull") defer span.End() @@ -47,6 +78,14 @@ func (mw *tracingMiddleware[K, V]) SetNull(ctx context.Context, key K) { mw.next.SetNull(ctx, key) } +func (mw *tracingMiddleware[K, V]) SetNullMany(ctx context.Context, keys []K) { + ctx, span := tracing.Start(ctx, "cache.SetNullMany") + defer span.End() + span.SetAttributes(attribute.Int("count", len(keys))) + + mw.next.SetNullMany(ctx, keys) +} + func (mw *tracingMiddleware[K, V]) Remove(ctx context.Context, keys ...K) { ctx, span := tracing.Start(ctx, "cache.Remove") defer span.End() @@ -111,3 +150,33 @@ func (mw *tracingMiddleware[K, V]) SWR(ctx context.Context, key K, refreshFromOr return value, hit, err } + +func (mw *tracingMiddleware[K, V]) SWRMany(ctx context.Context, keys []K, refreshFromOrigin func(ctx context.Context, keys []K) (map[K]V, error), op func(err error) cache.Op) (map[K]V, map[K]cache.CacheHit, error) { + ctx, span := tracing.Start(ctx, "cache.SWRMany") + defer span.End() + span.SetAttributes(attribute.Int("count", len(keys))) + + values, hits, err := mw.next.SWRMany(ctx, keys, func(innerCtx context.Context, innerKeys []K) (map[K]V, error) { + innerCtx, innerSpan := tracing.Start(innerCtx, "refreshFromOrigin") + defer innerSpan.End() + innerSpan.SetAttributes(attribute.Int("keys", len(innerKeys))) + return refreshFromOrigin(innerCtx, innerKeys) + }, op) + + if err != nil { + tracing.RecordError(span, err) + } + + hitCount := 0 + for _, hit := range hits { + if hit != cache.Miss { + hitCount++ + } + } + span.SetAttributes( + attribute.Int("hits", hitCount), + attribute.Int("misses", len(keys)-hitCount), + ) + + return values, hits, err +} diff --git a/go/pkg/cache/noop.go b/go/pkg/cache/noop.go index 07528823ed..bd16f47f3d 100644 --- a/go/pkg/cache/noop.go +++ b/go/pkg/cache/noop.go @@ -10,8 +10,23 @@ func (c *noopCache[K, V]) Get(ctx context.Context, key K) (value V, hit CacheHit var v V return v, Miss } + +func (c *noopCache[K, V]) GetMany(ctx context.Context, keys []K) (values map[K]V, hits map[K]CacheHit) { + values = make(map[K]V) + hits = make(map[K]CacheHit) + for _, key := range keys { + hits[key] = Miss + } + return values, hits +} + func (c *noopCache[K, V]) Set(ctx context.Context, key K, value V) {} -func (c *noopCache[K, V]) SetNull(ctx context.Context, key K) {} + +func (c *noopCache[K, V]) SetMany(ctx context.Context, values map[K]V) {} + +func (c *noopCache[K, V]) SetNull(ctx context.Context, key K) {} + +func (c *noopCache[K, V]) SetNullMany(ctx context.Context, keys []K) {} func (c *noopCache[K, V]) Remove(ctx context.Context, keys ...K) {} @@ -32,6 +47,15 @@ func (c *noopCache[K, V]) SWR(ctx context.Context, key K, refreshFromOrigin func return v, Miss, nil } +func (c *noopCache[K, V]) SWRMany(ctx context.Context, keys []K, refreshFromOrigin func(context.Context, []K) (map[K]V, error), op func(err error) Op) (map[K]V, map[K]CacheHit, error) { + values := make(map[K]V) + hits := make(map[K]CacheHit) + for _, key := range keys { + hits[key] = Miss + } + return values, hits, nil +} + func NewNoopCache[K comparable, V any]() Cache[K, V] { return &noopCache[K, V]{} } diff --git a/go/pkg/clickhouse/client.go b/go/pkg/clickhouse/client.go index c021519af6..39085405d5 100644 --- a/go/pkg/clickhouse/client.go +++ b/go/pkg/clickhouse/client.go @@ -3,6 +3,7 @@ package clickhouse import ( "context" "fmt" + "strings" "time" ch "github.com/ClickHouse/clickhouse-go/v2" @@ -21,10 +22,11 @@ type clickhouse struct { logger logging.Logger // Batched processors for different event types - requests *batch.BatchProcessor[schema.ApiRequestV1] - apiRequests *batch.BatchProcessor[schema.ApiRequestV2] - keyVerifications *batch.BatchProcessor[schema.KeyVerificationRequestV1] - ratelimits *batch.BatchProcessor[schema.RatelimitRequestV1] + requests *batch.BatchProcessor[schema.ApiRequestV1] + apiRequests *batch.BatchProcessor[schema.ApiRequestV2] + keyVerifications *batch.BatchProcessor[schema.KeyVerificationRequestV1] + keyVerificationsV2 *batch.BatchProcessor[schema.KeyVerificationV2] + ratelimits *batch.BatchProcessor[schema.RatelimitRequestV1] } var _ Bufferer = (*clickhouse)(nil) @@ -71,6 +73,7 @@ func New(config Config) (*clickhouse, error) { opts.MaxOpenConns = 50 opts.ConnMaxLifetime = time.Hour opts.ConnOpenStrategy = ch.ConnOpenRoundRobin + opts.DialTimeout = 5 * time.Second // Fail fast on connection issues config.Logger.Info("connecting to clickhouse") conn, err := ch.Open(opts) @@ -84,6 +87,10 @@ func New(config Config) (*clickhouse, error) { retry.Backoff(func(n int) time.Duration { return time.Duration(n) * time.Second }), + retry.ShouldRetry(func(err error) bool { + // Don't retry authentication errors - they won't succeed without credential changes + return !isAuthenticationError(err) + }), ). Do(func() error { return conn.Ping(context.Background()) @@ -114,6 +121,7 @@ func New(config Config) (*clickhouse, error) { } }, }), + apiRequests: batch.New(batch.Config[schema.ApiRequestV2]{ Name: "api_requests", Drop: true, @@ -132,6 +140,7 @@ func New(config Config) (*clickhouse, error) { } }, }), + keyVerifications: batch.New[schema.KeyVerificationRequestV1]( batch.Config[schema.KeyVerificationRequestV1]{ Name: "key_verifications", @@ -150,7 +159,30 @@ func New(config Config) (*clickhouse, error) { ) } }, - }), + }, + ), + + keyVerificationsV2: batch.New[schema.KeyVerificationV2]( + batch.Config[schema.KeyVerificationV2]{ + Name: "key_verifications_v2", + Drop: true, + BatchSize: 50_000, + BufferSize: 200_000, + FlushInterval: 5 * time.Second, + Consumers: 2, + Flush: func(ctx context.Context, rows []schema.KeyVerificationV2) { + table := "default.key_verifications_raw_v2" + err := flush(ctx, conn, table, rows) + if err != nil { + config.Logger.Error("failed to flush batch", + "table", table, + "error", err.Error(), + ) + } + }, + }, + ), + ratelimits: batch.New[schema.RatelimitRequestV1]( batch.Config[schema.RatelimitRequestV1]{ Name: "ratelimits", @@ -175,26 +207,21 @@ func New(config Config) (*clickhouse, error) { return c, nil } -// Shutdown gracefully closes the ClickHouse client, ensuring that any -// pending batches are flushed before shutting down. -// -// This method should be called during application shutdown to prevent -// data loss. It will wait for all batch processors to complete their -// current work and close their channels. -// -// Example: -// -// err := clickhouse.Shutdown(ctx) -// if err != nil { -// logger.Error("failed to shutdown clickhouse client", err) -// } -func (c *clickhouse) Shutdown(ctx context.Context) error { - c.requests.Close() - err := c.conn.Close() - if err != nil { - return fault.Wrap(err, fault.Internal("clickhouse couldn't shut down")) +// isAuthenticationError checks if an error is related to authentication/authorization +// These errors should not be retried as they won't succeed without credential changes +func isAuthenticationError(err error) bool { + if err == nil { + return false } - return nil + + errStr := strings.ToLower(err.Error()) + // ClickHouse authentication/authorization error patterns + return strings.Contains(errStr, "authentication") || + strings.Contains(errStr, "password") || + strings.Contains(errStr, "unauthorized") || + strings.Contains(errStr, "access denied") || + strings.Contains(errStr, "code: 516") || // Authentication failed + strings.Contains(errStr, "code: 517") // Wrong password } // BufferRequest adds an API request event to the buffer for batch processing. @@ -264,6 +291,27 @@ func (c *clickhouse) BufferKeyVerification(req schema.KeyVerificationRequestV1) c.keyVerifications.Buffer(req) } +// BufferKeyVerificationV2 adds a key verification event to the buffer for batch processing. +// The event will be flushed to ClickHouse automatically based on the configured +// batch size and flush interval. +// +// This method is non-blocking if the buffer has available capacity. If the buffer +// is full and the Drop option is enabled (which is the default), the event will +// be silently dropped. +// +// Example: +// +// ch.BufferKeyVerificationV2(schema.KeyVerificationV2{ +// RequestID: requestID, +// Time: time.Now().UnixMilli(), +// WorkspaceID: workspaceID, +// KeyID: keyID, +// Outcome: "success", +// }) +func (c *clickhouse) BufferKeyVerificationV2(req schema.KeyVerificationV2) { + c.keyVerificationsV2.Buffer(req) +} + // BufferRatelimit adds a ratelimit event to the buffer for batch processing. // The event will be flushed to ClickHouse automatically based on the configured // batch size and flush interval. @@ -289,3 +337,72 @@ func (c *clickhouse) BufferRatelimit(req schema.RatelimitRequestV1) { func (c *clickhouse) Conn() ch.Conn { return c.conn } + +// QueryToMaps executes a query and scans all rows into a slice of maps. +// Each map represents a row with column names as keys and values as ch.Dynamic. +// Returns fault-wrapped errors with appropriate codes for resource limits, +// user query errors, and system errors. +func (c *clickhouse) QueryToMaps(ctx context.Context, query string, args ...any) ([]map[string]any, error) { + rows, err := c.conn.Query(ctx, query, args...) + if err != nil { + return nil, WrapClickHouseError(err) + } + defer rows.Close() + + columns := rows.Columns() + results := make([]map[string]any, 0) + + for rows.Next() { + // Create slice of ch.Dynamic to scan into + values := make([]ch.Dynamic, len(columns)) + valuePtrs := make([]any, len(columns)) + for i := range values { + valuePtrs[i] = &values[i] + } + + if err := rows.Scan(valuePtrs...); err != nil { + return nil, fault.Wrap(err, fault.Public("Failed to read query results")) + } + + row := make(map[string]any) + for i, col := range columns { + row[col] = values[i] + } + + results = append(results, row) + } + + if err := rows.Err(); err != nil { + return nil, WrapClickHouseError(err) + } + + return results, nil +} + +// Exec executes a DDL or DML statement that doesn't return rows. +// Used for CREATE, ALTER, DROP, GRANT, REVOKE, etc. +func (c *clickhouse) Exec(ctx context.Context, sql string, args ...any) error { + return c.conn.Exec(ctx, sql, args...) +} + +func (c *clickhouse) Ping(ctx context.Context) error { + return c.conn.Ping(ctx) +} + +// Close gracefully shuts down the ClickHouse client. +// It closes all batch processors (waiting for them to flush remaining data), +// then closes the underlying ClickHouse connection. +func (c *clickhouse) Close() error { + c.requests.Close() + c.apiRequests.Close() + c.keyVerifications.Close() + c.keyVerificationsV2.Close() + c.ratelimits.Close() + + err := c.conn.Close() + if err != nil { + return fault.Wrap(err, fault.Internal("clickhouse couldn't shut down")) + } + + return nil +} diff --git a/go/pkg/clickhouse/errors.go b/go/pkg/clickhouse/errors.go new file mode 100644 index 0000000000..f60a3974b0 --- /dev/null +++ b/go/pkg/clickhouse/errors.go @@ -0,0 +1,253 @@ +package clickhouse + +import ( + "errors" + "strings" + + ch "github.com/ClickHouse/clickhouse-go/v2" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/fault" +) + +// Common user error patterns in ClickHouse error messages +var userErrorPatterns = map[string]bool{ + "unknown identifier": true, + "unknown expression": true, + "unknown function": true, + "unknown column": true, + "unknown table": true, + "missing columns": true, + "there is no column": true, + "type mismatch": true, + "cannot convert": true, + "syntax error": true, + "expected": true, + "illegal type": true, + "ambiguous column": true, + "not an aggregate function": true, + "division by zero": true, + "aggregate function": true, + "window function": true, + "unknown_identifier": true, // ClickHouse error code name + "db::exception": true, // Treat all DB exceptions as user errors + "maybe you meant": true, // ClickHouse suggestions + "no such column": true, + "doesn't exist": true, + "does not exist": true, + "failed at position": true, + "unexpected token": true, + "invalid expression": true, + "invalid number of arguments": true, + "wrong number of arguments": true, + "cannot parse": true, + "unrecognized token": true, + "no matching signature": true, + "incompatible types": true, + "illegal aggregation": true, + "cannot find column": true, + "not allowed in this context": true, + "not supported": true, + "invalid combination": true, + "invalid or illegal": true, +} + +// ClickHouse exception codes that indicate user query errors +var userErrorCodes = map[int32]bool{ + 47: true, // UNKNOWN_IDENTIFIER + 60: true, // UNKNOWN_TABLE + 62: true, // SYNTAX_ERROR + 386: true, // ILLEGAL_TYPE_OF_ARGUMENT + 43: true, // ILLEGAL_COLUMN + 352: true, // AMBIGUOUS_COLUMN_NAME +} + +// IsUserQueryError checks if the ClickHouse error is due to a bad query (user error) +// vs a system/infrastructure error. +// +// Returns true for errors like: +// - Unknown column/identifier +// - Type mismatches +// - Syntax errors +// - Division by zero +// +// Returns false for errors like: +// - Connection failures +// - Timeouts +// - Infrastructure issues +func IsUserQueryError(err error) bool { + if err == nil { + return false + } + + errMsg := strings.ToLower(err.Error()) + + // Check error message patterns + for pattern := range userErrorPatterns { + if strings.Contains(errMsg, pattern) { + return true + } + } + + // Check ClickHouse exception codes + var chErr *ch.Exception + if errors.As(err, &chErr) { + return userErrorCodes[chErr.Code] + } + + return false +} + +// ExtractUserFriendlyError extracts a user-friendly error message from ClickHouse error. +// It preserves the key information like unknown identifiers, suggestions, and error context. +func ExtractUserFriendlyError(err error) string { + if err == nil { + return "Query failed" + } + + errMsg := err.Error() + + // ClickHouse errors from HTTP interface often contain the actual DB::Exception message + // Format: "Code: 47. DB::Exception: . (ERROR_NAME)" + if idx := strings.Index(errMsg, "DB::Exception: "); idx != -1 { + errMsg = errMsg[idx+15:] // Skip "DB::Exception: " + + // Find the end marker (usually the error code in parentheses at the end) + if endIdx := strings.LastIndex(errMsg, " (version "); endIdx != -1 { + errMsg = errMsg[:endIdx] + } + + // Remove the final error code if present like ". (UNKNOWN_IDENTIFIER)" + if endIdx := strings.LastIndex(errMsg, ". ("); endIdx != -1 { + errMsg = errMsg[:endIdx] + } + + return strings.TrimSpace(errMsg) + } + + // Try to extract from exception object + var chErr *ch.Exception + if errors.As(err, &chErr) { + return chErr.Message + } + + // Clean up common prefixes for other formats + errMsg = strings.TrimPrefix(errMsg, "clickhouse: ") + errMsg = strings.TrimPrefix(errMsg, "sendQuery: ") + errMsg = strings.TrimPrefix(errMsg, "[HTTP 404] response body: ") + errMsg = strings.Trim(errMsg, "\"") + + // If the message is too long, try to extract the first sentence + if len(errMsg) > 500 { + if idx := strings.Index(errMsg, ". "); idx != -1 && idx < 500 { + errMsg = errMsg[:idx+1] + } else { + errMsg = errMsg[:500] + "..." + } + } + + return strings.TrimSpace(errMsg) +} + +// errorResponse defines a structured error response with code and message +type errorResponse struct { + code codes.URN + message string +} + +// resourceLimitPatterns maps error message patterns to error responses +var resourceLimitPatterns = map[string]errorResponse{ + "timeout": { + code: codes.User.UnprocessableEntity.QueryExecutionTimeout.URN(), + message: "Query execution time limit exceeded. Try simplifying your query or reducing the time range.", + }, + "execution time": { + code: codes.User.UnprocessableEntity.QueryExecutionTimeout.URN(), + message: "Query execution time limit exceeded. Try simplifying your query or reducing the time range.", + }, + "memory": { + code: codes.User.UnprocessableEntity.QueryMemoryLimitExceeded.URN(), + message: "Query memory limit exceeded. Try simplifying your query or reducing the result set size.", + }, + "too many rows": { + code: codes.User.UnprocessableEntity.QueryRowsLimitExceeded.URN(), + message: "Query attempted to read too many rows. Try adding more filters or reducing the time range.", + }, + "limit for rows_to_read": { + code: codes.User.UnprocessableEntity.QueryRowsLimitExceeded.URN(), + message: "Query attempted to read too many rows. Try adding more filters or reducing the time range.", + }, + "limit for rows": { + code: codes.User.UnprocessableEntity.QueryRowsLimitExceeded.URN(), + message: "Query attempted to read too many rows. Try adding more filters or reducing the time range.", + }, + "quota": { + code: codes.User.TooManyRequests.QueryQuotaExceeded.URN(), + message: "Query quota exceeded for the current time window. Please try again later.", + }, +} + +// resourceLimitCodes maps ClickHouse exception codes to error responses +var resourceLimitCodes = map[int32]errorResponse{ + 159: { // TIMEOUT_EXCEEDED + code: codes.User.UnprocessableEntity.QueryExecutionTimeout.URN(), + message: "Query execution time limit exceeded. Try simplifying your query or reducing the time range.", + }, + 241: { // MEMORY_LIMIT_EXCEEDED + code: codes.User.UnprocessableEntity.QueryMemoryLimitExceeded.URN(), + message: "Query memory limit exceeded. Try simplifying your query or reducing the result set size.", + }, + 396: { // QUERY_WAS_CANCELLED + code: codes.User.UnprocessableEntity.QueryExecutionTimeout.URN(), + message: "Query was cancelled due to resource limits.", + }, + 158: { // TOO_MANY_ROWS_OR_BYTES + code: codes.User.UnprocessableEntity.QueryRowsLimitExceeded.URN(), + message: "Query attempted to read too many rows. Try adding more filters or reducing the time range.", + }, + 198: { // TOO_MANY_ROWS + code: codes.User.UnprocessableEntity.QueryRowsLimitExceeded.URN(), + message: "Query attempted to read too many rows. Try adding more filters or reducing the time range.", + }, + 202: { // TOO_MANY_SIMULTANEOUS_QUERIES / QUOTA_EXCEEDED + code: codes.User.TooManyRequests.QueryQuotaExceeded.URN(), + message: "Query quota exceeded for the current time window. Please try again later.", + }, +} + +// WrapClickHouseError wraps a ClickHouse error with appropriate error codes and user-friendly messages. +// It detects resource limit violations and other user errors and tags them with specific error codes. +func WrapClickHouseError(err error) error { + if err == nil { + return nil + } + + errMsg := strings.ToLower(err.Error()) + + // Check for resource limit violations via message patterns + for pattern, response := range resourceLimitPatterns { + if strings.Contains(errMsg, pattern) { + return fault.Wrap(err, + fault.Code(response.code), + fault.Public(response.message), + ) + } + } + + // Check ClickHouse exception codes for resource errors + var chErr *ch.Exception + if errors.As(err, &chErr) { + if response, ok := resourceLimitCodes[chErr.Code]; ok { + return fault.Wrap(err, + fault.Code(response.code), + fault.Public(response.message), + ) + } + } + + // All other ClickHouse errors are treated as user query errors (400) + // This ensures we never return 500 for query execution issues + return fault.Wrap(err, + fault.Code(codes.User.BadRequest.InvalidAnalyticsQuery.URN()), + fault.Public(ExtractUserFriendlyError(err)), + ) +} diff --git a/go/pkg/clickhouse/errors_test.go b/go/pkg/clickhouse/errors_test.go new file mode 100644 index 0000000000..4c159bfeba --- /dev/null +++ b/go/pkg/clickhouse/errors_test.go @@ -0,0 +1,40 @@ +package clickhouse + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExtractUserFriendlyError(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "unknown identifier with suggestion", + input: `sendQuery: [HTTP 404] response body: "Code: 47. DB::Exception: Unknown expression identifier 'external_idd' in scope SELECT external_id, COUNT(*) AS total FROM default.key_verifications_raw_v2 WHERE (workspace_id = 'ws_4qD3194xe2x56qmv') AND (outcome = 'VALID') AND (time >= (now() - toIntervalDay(7))) GROUP BY external_idd LIMIT 10000000. Maybe you meant: ['external_id']. (UNKNOWN_IDENTIFIER) (version 25.6.4.12 (official build))\n"`, + expected: "Unknown expression identifier 'external_idd' in scope SELECT external_id, COUNT(*) AS total FROM default.key_verifications_raw_v2 WHERE (workspace_id = 'ws_4qD3194xe2x56qmv') AND (outcome = 'VALID') AND (time >= (now() - toIntervalDay(7))) GROUP BY external_idd LIMIT 10000000. Maybe you meant: ['external_id']", + }, + { + name: "syntax error", + input: `sendQuery: [HTTP 400] response body: "Code: 62. DB::Exception: Syntax error: failed at position 10. (SYNTAX_ERROR) (version 25.6.4.12)\n"`, + expected: "Syntax error: failed at position 10", + }, + { + name: "unknown table", + input: `Code: 60. DB::Exception: Table default.nonexistent doesn't exist. (UNKNOWN_TABLE) (version 25.6.4.12)`, + expected: "Table default.nonexistent doesn't exist", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := errors.New(tt.input) + result := ExtractUserFriendlyError(err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/go/pkg/clickhouse/interface.go b/go/pkg/clickhouse/interface.go index 0133a4185b..b1a5d5c719 100644 --- a/go/pkg/clickhouse/interface.go +++ b/go/pkg/clickhouse/interface.go @@ -26,6 +26,10 @@ type Bufferer interface { // These represent API key validation operations with their outcomes. BufferKeyVerification(schema.KeyVerificationRequestV1) + // BufferKeyVerification adds a key verification event to the buffer. + // These represent API key validation operations with their outcomes. + BufferKeyVerificationV2(schema.KeyVerificationV2) + // BufferRatelimit adds a ratelimit event to the buffer. // These represent API ratelimit operations with their outcome. BufferRatelimit(schema.RatelimitRequestV1) @@ -35,11 +39,30 @@ type Querier interface { // Conn returns a connection to the ClickHouse database. Conn() ch.Conn + // QueryToMaps executes a query and scans all rows into a slice of maps. + // Each map represents a row with column names as keys. + // This is useful for dynamic queries where the schema is not known at compile time. + QueryToMaps(ctx context.Context, query string, args ...any) ([]map[string]any, error) + + // Exec executes a DDL or DML statement (CREATE, ALTER, DROP, etc.) + Exec(ctx context.Context, sql string, args ...any) error + + // ConfigureUser creates or updates a ClickHouse user with permissions, quotas, and settings. + // This is idempotent and can be called multiple times to update configuration. + ConfigureUser(ctx context.Context, config UserConfig) error + GetBillableVerifications(ctx context.Context, workspaceID string, year, month int) (int64, error) + GetBillableRatelimits(ctx context.Context, workspaceID string, year, month int) (int64, error) } type ClickHouse interface { Bufferer Querier + + // Closes the underlying ClickHouse connection. + Close() error + + // Ping verifies the connection to the ClickHouse database. + Ping(ctx context.Context) error } diff --git a/go/pkg/clickhouse/key_verifications_test.go b/go/pkg/clickhouse/key_verifications_test.go index de89fe9e73..7e96c0bdbf 100644 --- a/go/pkg/clickhouse/key_verifications_test.go +++ b/go/pkg/clickhouse/key_verifications_test.go @@ -2,6 +2,8 @@ package clickhouse_test import ( "context" + "database/sql" + "errors" "math/rand" "slices" "testing" @@ -47,6 +49,12 @@ func TestKeyVerifications(t *testing.T) { keySpaces := array.Fill(numKeySpaces, func() string { return uid.New(uid.KeySpacePrefix) }) identities := array.Fill(numIdentities, func() string { return uid.New(uid.IdentityPrefix) }) + // Map each identity to an external_id (1:1 mapping to ensure external_id uniqueness per identity) + identityToExternalID := make(map[string]string) + for _, identityID := range identities { + identityToExternalID[identityID] = "ext_" + uid.New("") + } + outcomes := []string{"VALID", "INVALID", "EXPIRED", "RATE_LIMITED", "DISABLED"} regions := []string{"us-east-1", "us-west-2", "eu-west-1", "ap-southeast-1"} tags := [][]string{ @@ -68,11 +76,13 @@ func TestKeyVerifications(t *testing.T) { if rand.Float64() < 0.1 { // 10% chance of high latency latency += rand.Float64() * 400 // Up to 500ms } + identityID := array.Random(identities) return schema.KeyVerificationV2{ RequestID: uid.New(uid.RequestPrefix), Time: timestamp.UnixMilli(), WorkspaceID: workspaceID, - IdentityID: array.Random(identities), + IdentityID: identityID, + ExternalID: identityToExternalID[identityID], KeySpaceID: array.Random(keySpaces), Outcome: array.Random(outcomes), Region: array.Random(regions), @@ -314,6 +324,151 @@ func TestKeyVerifications(t *testing.T) { } }) + t.Run("external_id is stored correctly per identity", func(t *testing.T) { + t.Parallel() + // Test that external_id is correctly stored for each identity across all aggregation tables + for _, identityID := range identities[:10] { + id := identityID + expectedExternalID := identityToExternalID[id] + + for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + tbl := table + t.Run(tbl, func(t *testing.T) { + t.Parallel() + require.EventuallyWithT(t, func(c *assert.CollectT) { + var queriedExternalID string + err := conn.QueryRow(ctx, "SELECT external_id FROM ? WHERE workspace_id = ? AND identity_id = ? LIMIT 1;", tbl, workspaceID, id).Scan(&queriedExternalID) + require.NoError(c, err) + require.Equal(c, expectedExternalID, queriedExternalID, "external_id should match for identity %s in table %s", id, tbl) + }, time.Minute, time.Second) + }) + } + } + }) + + t.Run("external_id filtering works in all tables", func(t *testing.T) { + t.Parallel() + // Pick a random identity and verify filtering by external_id returns correct count + for _, identityID := range identities[:10] { + id := identityID + extID := identityToExternalID[id] + expectedCount := array.Reduce(verifications, func(acc int, v schema.KeyVerificationV2) int { + if v.ExternalID == extID { + acc++ + } + return acc + }, 0) + + for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + tbl := table + t.Run(tbl, func(t *testing.T) { + t.Parallel() + require.EventuallyWithT(t, func(c *assert.CollectT) { + var queriedCount int64 + err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ?;", tbl, workspaceID, extID).Scan(&queriedCount) + require.NoError(c, err) + require.Equal(c, expectedCount, int(queriedCount), "count should match for external_id %s in table %s", extID, tbl) + }, time.Minute, time.Second) + }) + } + } + }) + + t.Run("external_id + outcome combinations are correct", func(t *testing.T) { + t.Parallel() + // Test that we can group by external_id and outcome correctly + for _, identityID := range identities[:10] { + id := identityID + extID := identityToExternalID[id] + + countByOutcome := array.Reduce(verifications, func(acc map[string]int, v schema.KeyVerificationV2) map[string]int { + if v.ExternalID == extID { + acc[v.Outcome]++ + } + return acc + }, map[string]int{}) + + for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + tbl := table + t.Run(tbl, func(t *testing.T) { + t.Parallel() + for outcome, expectedCount := range countByOutcome { + out := outcome + expCount := expectedCount + require.EventuallyWithT(t, func(c *assert.CollectT) { + var queriedCount int64 + err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ? AND outcome = ?;", tbl, workspaceID, extID, out).Scan(&queriedCount) + require.NoError(c, err) + require.Equal(c, expCount, int(queriedCount), "count for external_id %s and outcome %s should match in table %s", extID, out, tbl) + }, time.Minute, time.Second) + } + }) + } + } + }) + + t.Run("external_id and identity_id are consistently mapped", func(t *testing.T) { + t.Parallel() + // Verify that each external_id is always paired with the same identity_id + for _, identityID := range identities[:10] { + id := identityID + extID := identityToExternalID[id] + + for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + tbl := table + t.Run(tbl, func(t *testing.T) { + t.Parallel() + require.EventuallyWithT(t, func(c *assert.CollectT) { + // Count should be zero when querying for this external_id with a different identity_id + var countWithWrongIdentity int64 + wrongIdentityID := identities[(slices.Index(identities, id)+1)%len(identities)] + if wrongIdentityID == id { + wrongIdentityID = identities[(slices.Index(identities, id)+2)%len(identities)] + } + + err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ? AND identity_id = ?;", tbl, workspaceID, extID, wrongIdentityID).Scan(&countWithWrongIdentity) + if err != nil { + // It's OK if there are no rows, that means the mapping is correct + if errors.Is(err, sql.ErrNoRows) { + countWithWrongIdentity = 0 + } else { + require.NoError(c, err, "unexpected error querying for wrong identity mapping in table %s", tbl) + } + } + require.Equal(c, int64(0), countWithWrongIdentity, "external_id %s should never be paired with identity_id %s in table %s", extID, wrongIdentityID, tbl) + }, time.Minute, time.Second) + }) + } + } + }) + + t.Run("credits spent per external_id are correct", func(t *testing.T) { + t.Parallel() + for _, identityID := range identities[:10] { + id := identityID + extID := identityToExternalID[id] + expectedCredits := array.Reduce(verifications, func(acc int64, v schema.KeyVerificationV2) int64 { + if v.ExternalID == extID { + acc += v.SpentCredits + } + return acc + }, int64(0)) + + for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + tbl := table + t.Run(tbl, func(t *testing.T) { + t.Parallel() + require.EventuallyWithT(t, func(c *assert.CollectT) { + var queriedCredits int64 + err := conn.QueryRow(ctx, "SELECT SUM(spent_credits) FROM ? WHERE workspace_id = ? AND external_id = ?;", tbl, workspaceID, extID).Scan(&queriedCredits) + require.NoError(c, err) + require.Equal(c, expectedCredits, queriedCredits, "spent_credits for external_id %s should match in table %s", extID, tbl) + }, time.Minute, time.Second) + }) + } + } + }) + t.Run("billing per workspace is correct", func(t *testing.T) { t.Parallel() billableVerifications := array.Reduce(verifications, func(acc int64, v schema.KeyVerificationV2) int64 { diff --git a/go/pkg/clickhouse/migrations/20251010160229.sql b/go/pkg/clickhouse/migrations/20251010160229.sql new file mode 100644 index 0000000000..a11712c85f --- /dev/null +++ b/go/pkg/clickhouse/migrations/20251010160229.sql @@ -0,0 +1,26 @@ +-- Drop "key_verifications_per_month_mv_v2" view +DROP VIEW `default`.`key_verifications_per_month_mv_v2`; +ALTER TABLE `default`.`key_verifications_per_day_v2` ADD COLUMN `external_id` String; +-- Drop "key_verifications_per_day_mv_v2" view +DROP VIEW `default`.`key_verifications_per_day_mv_v2`; +ALTER TABLE `default`.`key_verifications_per_hour_v2` ADD COLUMN `external_id` String; +-- Drop "key_verifications_per_hour_mv_v2" view +DROP VIEW `default`.`key_verifications_per_hour_mv_v2`; +ALTER TABLE `default`.`key_verifications_per_minute_v2` ADD COLUMN `external_id` String; +ALTER TABLE `default`.`key_verifications_per_month_v2` ADD COLUMN `external_id` String; +-- Drop "key_verifications_per_minute_mv_v2" view +DROP VIEW `default`.`key_verifications_per_minute_mv_v2`; +ALTER TABLE `default`.`key_verifications_raw_v2` ADD COLUMN `external_id` String; +ALTER TABLE `default`.`key_verifications_raw_v2` ADD INDEX `idx_external_id` ((external_id)) TYPE bloom_filter GRANULARITY 1; +-- Create "key_verifications_per_day_mv_v2" view +CREATE MATERIALIZED VIEW `default`.`key_verifications_per_day_mv_v2` TO `default`.`key_verifications_per_day_v2` AS SELECT workspace_id, key_space_id, identity_id, external_id, key_id, outcome, tags, sum(count) AS count, sum(spent_credits) AS spent_credits, avgMergeState(latency_avg) AS latency_avg, quantilesTDigestMergeState(0.75)(latency_p75) AS latency_p75, quantilesTDigestMergeState(0.99)(latency_p99) AS latency_p99, toDate(toStartOfDay(time)) AS time FROM default.key_verifications_per_hour_v2 GROUP BY workspace_id, time, key_space_id, identity_id, external_id, key_id, outcome, tags; +-- Create "key_verifications_per_hour_mv_v2" view +CREATE MATERIALIZED VIEW `default`.`key_verifications_per_hour_mv_v2` TO `default`.`key_verifications_per_hour_v2` AS SELECT workspace_id, key_space_id, identity_id, external_id, key_id, outcome, tags, sum(count) AS count, sum(spent_credits) AS spent_credits, avgMergeState(latency_avg) AS latency_avg, quantilesTDigestMergeState(0.75)(latency_p75) AS latency_p75, quantilesTDigestMergeState(0.99)(latency_p99) AS latency_p99, toStartOfHour(time) AS time FROM default.key_verifications_per_minute_v2 GROUP BY workspace_id, time, key_space_id, identity_id, external_id, key_id, outcome, tags; +-- Create "key_verifications_per_minute_mv_v2" view +CREATE MATERIALIZED VIEW `default`.`key_verifications_per_minute_mv_v2` TO `default`.`key_verifications_per_minute_v2` AS SELECT workspace_id, key_space_id, identity_id, external_id, key_id, outcome, tags, count(*) AS count, sum(spent_credits) AS spent_credits, avgState(latency) AS latency_avg, quantilesTDigestState(0.75)(latency) AS latency_p75, quantilesTDigestState(0.99)(latency) AS latency_p99, toStartOfMinute(fromUnixTimestamp64Milli(time)) AS time FROM default.key_verifications_raw_v2 GROUP BY workspace_id, time, key_space_id, identity_id, external_id, key_id, outcome, tags; +-- Create "key_verifications_per_month_mv_v2" view +CREATE MATERIALIZED VIEW `default`.`key_verifications_per_month_mv_v2` TO `default`.`key_verifications_per_month_v2` AS SELECT workspace_id, key_space_id, identity_id, external_id, key_id, outcome, tags, sum(count) AS count, sum(spent_credits) AS spent_credits, avgMergeState(latency_avg) AS latency_avg, quantilesTDigestMergeState(0.75)(latency_p75) AS latency_p75, quantilesTDigestMergeState(0.99)(latency_p99) AS latency_p99, toDate(toStartOfMonth(time)) AS time FROM default.key_verifications_per_day_v2 GROUP BY workspace_id, time, key_space_id, identity_id, external_id, key_id, outcome, tags; +-- Drop "temp_sync_key_verifications_v1_to_v2" view +DROP VIEW `default`.`temp_sync_key_verifications_v1_to_v2`; +-- Create "temp_sync_key_verifications_v1_to_v2" view +CREATE MATERIALIZED VIEW `default`.`temp_sync_key_verifications_v1_to_v2` TO `default`.`key_verifications_raw_v2` AS SELECT request_id, time, workspace_id, key_space_id, identity_id, '' AS external_id, key_id, region, outcome, tags, 0 AS spent_credits, 0. AS latency FROM verifications.raw_key_verifications_v1; diff --git a/go/pkg/clickhouse/migrations/atlas.sum b/go/pkg/clickhouse/migrations/atlas.sum index f1e5e2c3ed..904f223e02 100644 --- a/go/pkg/clickhouse/migrations/atlas.sum +++ b/go/pkg/clickhouse/migrations/atlas.sum @@ -1,4 +1,5 @@ -h1:h1Zqu+nE4zMrsTYqHenf8IWm120LNPQkeJ4cZh73MXc= +h1:B08Sp0NBD8HIfwRM3OXvYylIOhzHOWu5gPMgosYMwgg= 20250903085516_init.sql h1:Id61mpzn/VxahUVr4XYj7LFcOF/VrefEbefdL57k3Sw= 20250911070454.sql h1:Fwr5vMWtnDvRISOG1Ul9si+VFbzMrUlHb3lxW4yKGB4= 20250925091254.sql h1:zHWbGFg//sSLbARpVmSCnfaSwQH8eU9Tsf7hdF/+TRY= +20251010160229.sql h1:XYT+uL8ZZsqXasXs26XyqH6RPK6RCD4g63A0JIqsFGo= diff --git a/go/pkg/clickhouse/noop.go b/go/pkg/clickhouse/noop.go index 55f9b022ba..b39978a32b 100644 --- a/go/pkg/clickhouse/noop.go +++ b/go/pkg/clickhouse/noop.go @@ -30,6 +30,11 @@ func (n *noop) BufferKeyVerification(schema.KeyVerificationRequestV1) { // Intentionally empty - discards the event } +// BufferKeyVerificationV2 implements the Bufferer interface but discards the event. +func (n *noop) BufferKeyVerificationV2(schema.KeyVerificationV2) { + // Intentionally empty - discards the event +} + // BufferRatelimit implements the Bufferer interface but discards the event. func (n *noop) BufferRatelimit(req schema.RatelimitRequestV1) { // Intentionally empty - discards the event @@ -49,6 +54,30 @@ func (n *noop) Conn() ch.Conn { return nil } +// QueryToMaps implements the Querier interface but always returns an empty slice. +func (n *noop) QueryToMaps(ctx context.Context, query string, args ...any) ([]map[string]any, error) { + return []map[string]any{}, nil +} + +// Exec implements the Querier interface but does nothing. +func (n *noop) Exec(ctx context.Context, sql string, args ...any) error { + return nil +} + +// ConfigureUser implements the Querier interface but does nothing. +func (n *noop) ConfigureUser(ctx context.Context, config UserConfig) error { + return nil +} + +func (n *noop) Ping(ctx context.Context) error { + return nil +} + +// Close closes the underlying ClickHouse connection. +func (n *noop) Close() error { + return nil +} + // NewNoop creates a new no-op implementation of the Bufferer interface. // This implementation simply discards all events without processing them. // diff --git a/go/pkg/clickhouse/query-parser/cte.go b/go/pkg/clickhouse/query-parser/cte.go new file mode 100644 index 0000000000..70a9502088 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/cte.go @@ -0,0 +1,48 @@ +package queryparser + +import ( + "strings" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" +) + +// buildCTERegistry scans the WITH clause and registers all CTE (Common Table Expression) names. +// +// CTEs are temporary named result sets defined with WITH clauses, like: +// +// WITH cte_name AS (SELECT ...) SELECT * FROM cte_name +// +// We need to track CTE names separately because: +// 1. CTEs act as virtual tables that exist only for the duration of the query +// 2. When validating table names, we must distinguish between: +// - Real tables (must be in allowedTables list) +// - CTEs (allowed because they're user-defined subqueries) +// +// 3. Without this registry, we'd incorrectly reject valid queries that reference CTEs +// +// Example query that requires CTE tracking: +// +// WITH filtered_data AS (SELECT * FROM key_verifications_v1 WHERE ...) +// SELECT * FROM filtered_data +// +// Here, "filtered_data" is not a real table, but it's valid because it's a CTE. +func (p *Parser) buildCTERegistry() { + if p.stmt.With == nil || len(p.stmt.With.CTEs) == 0 { + return + } + + for _, cte := range p.stmt.With.CTEs { + // CTE Expr is the name we reference it by + ident, ok := cte.Expr.(*clickhouse.Ident) + if !ok { + continue + } + + p.cteNames[strings.ToLower(ident.Name)] = true + } +} + +// isCTE checks if a table name is a CTE +func (p *Parser) isCTE(tableName string) bool { + return p.cteNames[strings.ToLower(tableName)] +} diff --git a/go/pkg/clickhouse/query-parser/errors_test.go b/go/pkg/clickhouse/query-parser/errors_test.go new file mode 100644 index 0000000000..24f0da6893 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/errors_test.go @@ -0,0 +1,87 @@ +package queryparser + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/fault" +) + +func TestParser_ErrorCodes(t *testing.T) { + tests := []struct { + name string + config Config + query string + expectedCode codes.URN + expectedError string + }{ + { + name: "invalid SQL syntax", + config: Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }, + query: "SELECT * FROM @@@", + expectedCode: codes.User.BadRequest.InvalidAnalyticsQuery.URN(), + expectedError: "Invalid SQL syntax", + }, + { + name: "invalid table", + config: Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }, + query: "SELECT * FROM system.tables", + expectedCode: codes.User.BadRequest.InvalidAnalyticsTable.URN(), + expectedError: "Access to table 'system.tables' is not allowed", + }, + { + name: "invalid function", + config: Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }, + query: "SELECT file('test') FROM default.keys_v2", + expectedCode: codes.User.BadRequest.InvalidAnalyticsFunction.URN(), + expectedError: "Function 'file' is not allowed", + }, + { + name: "query not supported", + config: Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }, + query: "INSERT INTO default.keys_v2 VALUES (1)", + expectedCode: codes.User.BadRequest.InvalidAnalyticsQueryType.URN(), + expectedError: "Only SELECT queries are allowed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewParser(tt.config) + _, err := parser.Parse(context.Background(), tt.query) + + require.Error(t, err) + + // Check error code + code, ok := fault.GetCode(err) + require.True(t, ok, "Expected error to have a code") + require.Equal(t, tt.expectedCode, code) + + // Check public message + publicMsg := fault.UserFacingMessage(err) + require.Contains(t, publicMsg, tt.expectedError) + }) + } +} diff --git a/go/pkg/clickhouse/query-parser/extract.go b/go/pkg/clickhouse/query-parser/extract.go new file mode 100644 index 0000000000..8bc48a6ac5 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/extract.go @@ -0,0 +1,90 @@ +package queryparser + +import ( + "strings" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" +) + +// ExtractColumn extracts all string literal values for a given column name from WHERE and HAVING clauses. +// Only extracts from positive assertions (= and IN operators), ignores negative conditions (!=, NOT IN, <, >, etc). +// Returns a deduplicated slice of values found for the column. Returns empty slice if no values found. +// Must be called after Parse(). +func (p *Parser) ExtractColumn(columnName string) []string { + uniqueValues := make(map[string]bool) + + extractFunc := func(node clickhouse.Expr) bool { + binOp, ok := node.(*clickhouse.BinaryOperation) + if !ok { + return true + } + + // Check if left side is our column + leftIdent, ok := binOp.LeftExpr.(*clickhouse.Ident) + if !ok || !strings.EqualFold(leftIdent.Name, columnName) { + return true + } + + // Only extract from positive assertions (= or IN) + // Ignore negative operators: !=, NOT IN, <, >, <=, >= + if binOp.Operation == clickhouse.TokenKindSingleEQ || strings.EqualFold(string(binOp.Operation), "IN") { + extractValues(binOp.RightExpr, uniqueValues) + } + + return true + } + + if p.stmt.Where != nil { + clickhouse.Walk(p.stmt.Where.Expr, extractFunc) + } + + if p.stmt.Having != nil { + clickhouse.Walk(p.stmt.Having.Expr, extractFunc) + } + + if len(uniqueValues) == 0 { + return []string{} + } + + // Convert map to slice + result := make([]string, 0, len(uniqueValues)) + for value := range uniqueValues { + result = append(result, value) + } + + return result +} + +func extractValues(expr clickhouse.Expr, values map[string]bool) { + // Handle single string literal (for = operator) + strLit, ok := expr.(*clickhouse.StringLiteral) + if ok { + values[strLit.Literal] = true + return + } + + // Handle IN operator: IN ('val1', 'val2', 'val3') + paramList, ok := expr.(*clickhouse.ParamExprList) + if !ok { + return + } + + if paramList.Items == nil { + return + } + + for _, item := range paramList.Items.Items { + // Each item is wrapped in a ColumnExpr + colExpr, ok := item.(*clickhouse.ColumnExpr) + if !ok { + continue + } + + strLit, ok := colExpr.Expr.(*clickhouse.StringLiteral) + if !ok { + continue + } + + values[strLit.Literal] = true + } +} diff --git a/go/pkg/clickhouse/query-parser/extract_test.go b/go/pkg/clickhouse/query-parser/extract_test.go new file mode 100644 index 0000000000..70145692bd --- /dev/null +++ b/go/pkg/clickhouse/query-parser/extract_test.go @@ -0,0 +1,124 @@ +package queryparser + +import ( + "context" + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExtractColumnValues(t *testing.T) { + tests := []struct { + name string + query string + columnName string + expected []string + }{ + { + name: "single equality", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234'", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "IN clause with multiple values", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id IN ('ks_1234', 'ks_5678')", + columnName: "key_space_id", + expected: []string{"ks_1234", "ks_5678"}, + }, + { + name: "multiple conditions with AND", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234' AND outcome = 'VALID'", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "no matching column", + query: "SELECT COUNT(*) FROM key_verifications WHERE outcome = 'VALID'", + columnName: "key_space_id", + expected: []string{}, + }, + { + name: "case insensitive column matching", + query: "SELECT COUNT(*) FROM key_verifications WHERE KEY_SPACE_ID = 'ks_1234'", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "OR expression", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234' OR key_space_id = 'ks_5678'", + columnName: "key_space_id", + expected: []string{"ks_1234", "ks_5678"}, + }, + { + name: "complex query with multiple operators", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234' AND (outcome = 'VALID' OR outcome = 'INVALID')", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "extract different column", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234' AND outcome = 'VALID'", + columnName: "outcome", + expected: []string{"VALID"}, + }, + { + name: "no WHERE clause", + query: "SELECT COUNT(*) FROM key_verifications", + columnName: "key_space_id", + expected: []string{}, + }, + { + name: "duplicate values deduplicated", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_1234' OR key_space_id = 'ks_1234'", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "HAVING clause", + query: "SELECT key_space_id, COUNT(*) FROM key_verifications GROUP BY key_space_id HAVING key_space_id = 'ks_1234'", + columnName: "key_space_id", + expected: []string{"ks_1234"}, + }, + { + name: "negative operator != ignored", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id != 'ks_bad'", + columnName: "key_space_id", + expected: []string{}, + }, + { + name: "mix of positive and negative operators", + query: "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_good' AND outcome != 'INVALID'", + columnName: "key_space_id", + expected: []string{"ks_good"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewParser(Config{ + WorkspaceID: "ws_test", + TableAliases: map[string]string{"key_verifications": "default.key_verifications_raw_v2"}, + AllowedTables: []string{"default.key_verifications_raw_v2"}, + }) + + // Parse the query first + _, err := parser.Parse(context.Background(), tt.query) + require.NoError(t, err) + + // Extract values + values := parser.ExtractColumn(tt.columnName) + + // Sort for consistent comparison + if values != nil { + slices.Sort(values) + } + if tt.expected != nil { + slices.Sort(tt.expected) + } + + require.Equal(t, tt.expected, values) + }) + } +} diff --git a/go/pkg/clickhouse/query-parser/filter.go b/go/pkg/clickhouse/query-parser/filter.go new file mode 100644 index 0000000000..2f2e792f93 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/filter.go @@ -0,0 +1,137 @@ +package queryparser + +import ( + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" +) + +func (p *Parser) injectWorkspaceFilter() error { + // Walk the AST to inject workspace filter only on SELECT statements that directly access tables + clickhouse.Walk(p.stmt, func(node clickhouse.Expr) bool { + // Check if this is a SELECT query + if selectQuery, ok := node.(*clickhouse.SelectQuery); ok { + // Only inject if this SELECT directly references a table (not a subquery) + if p.selectReferencesTable(selectQuery) { + p.injectWorkspaceFilterOnSelect(selectQuery) + } + } + return true + }) + + return nil +} + +// injectWorkspaceFilterOnSelect injects the workspace filter on a single SELECT statement +func (p *Parser) injectWorkspaceFilterOnSelect(stmt *clickhouse.SelectQuery) { + // Create: workspace_id = 'ws_xxx' + filter := &clickhouse.BinaryOperation{ + LeftExpr: &clickhouse.NestedIdentifier{ + Ident: &clickhouse.Ident{Name: "workspace_id"}, + }, + Operation: "=", + RightExpr: &clickhouse.StringLiteral{ + Literal: p.config.WorkspaceID, + }, + } + + if stmt.Where == nil { + stmt.Where = &clickhouse.WhereClause{Expr: filter} + return + } + + stmt.Where.Expr = &clickhouse.BinaryOperation{ + LeftExpr: filter, + Operation: "AND", + RightExpr: stmt.Where.Expr, + } +} + +func (p *Parser) injectSecurityFilters() error { + for _, securityFilter := range p.config.SecurityFilters { + if len(securityFilter.AllowedValues) == 0 { + continue + } + + // Walk the AST to inject security filter only on SELECT statements that directly access tables + clickhouse.Walk(p.stmt, func(node clickhouse.Expr) bool { + // Check if this is a SELECT query + if selectQuery, ok := node.(*clickhouse.SelectQuery); ok { + // Only inject if this SELECT directly references a table (not a subquery) + if p.selectReferencesTable(selectQuery) { + p.injectSecurityFilterOnSelect(selectQuery, securityFilter) + } + } + return true + }) + } + + return nil +} + +// selectReferencesTable checks if a SELECT statement directly references a table in its FROM clause +// Returns true if the FROM clause contains a table, false if it contains only a subquery +func (p *Parser) selectReferencesTable(stmt *clickhouse.SelectQuery) bool { + if stmt.From == nil { + return false + } + + // Check if the FROM clause directly contains a subquery + // If it does, we should NOT inject filters here + hasSubquery := false + clickhouse.Walk(stmt.From, func(node clickhouse.Expr) bool { + if _, ok := node.(*clickhouse.SelectQuery); ok { + hasSubquery = true + return false // Stop walking + } + return true + }) + + // If there's a subquery in the FROM, don't inject filters + if hasSubquery { + return false + } + + // Otherwise, check if there's a table reference + hasTable := false + clickhouse.Walk(stmt.From, func(node clickhouse.Expr) bool { + if _, ok := node.(*clickhouse.TableExpr); ok { + hasTable = true + return false // Stop walking + } + return true + }) + + return hasTable +} + +// injectSecurityFilterOnSelect injects a security filter on a single SELECT statement +func (p *Parser) injectSecurityFilterOnSelect(stmt *clickhouse.SelectQuery, securityFilter SecurityFilter) { + // Build IN list: {column} IN ('val1', 'val2', ...) + items := make([]clickhouse.Expr, len(securityFilter.AllowedValues)) + for i, value := range securityFilter.AllowedValues { + items[i] = &clickhouse.ColumnExpr{ + Expr: &clickhouse.StringLiteral{ + Literal: value, + }, + } + } + + // Create filter using column name + filter := &clickhouse.BinaryOperation{ + LeftExpr: &clickhouse.Ident{Name: securityFilter.Column}, + Operation: "IN", + RightExpr: &clickhouse.ParamExprList{ + Items: &clickhouse.ColumnExprList{Items: items}, + }, + } + + // Add to WHERE clause + if stmt.Where == nil { + stmt.Where = &clickhouse.WhereClause{Expr: filter} + } else { + stmt.Where.Expr = &clickhouse.BinaryOperation{ + LeftExpr: filter, + Operation: "AND", + RightExpr: stmt.Where.Expr, + } + } +} diff --git a/go/pkg/clickhouse/query-parser/filter_test.go b/go/pkg/clickhouse/query-parser/filter_test.go new file mode 100644 index 0000000000..533b0528ef --- /dev/null +++ b/go/pkg/clickhouse/query-parser/filter_test.go @@ -0,0 +1,328 @@ +package queryparser_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + chquery "github.com/unkeyed/unkey/go/pkg/clickhouse/query-parser" +) + +func TestParser_WorkspaceFilter(t *testing.T) { + p := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_123'", output) +} + +func TestParser_WorkspaceFilterWithExistingWhere(t *testing.T) { + p := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_456", + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2 WHERE active = 1") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_456' AND active = 1", output) +} + +func TestSecurityFilterInjection(t *testing.T) { + t.Run("no filter when SecurityFilters is empty", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: nil, // No restriction + Limit: 100, + TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + }, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + query := "SELECT COUNT(*) FROM key_verifications" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + require.Equal(t, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_test' LIMIT 100", result) + }) + + t.Run("injects single key_space_id filter", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: []string{"ks_123"}, + }, + }, + Limit: 100, + TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + }, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + query := "SELECT COUNT(*) FROM key_verifications" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + require.Equal(t, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_test' AND key_space_id IN ('ks_123') LIMIT 100", result) + }) + + t.Run("injects multiple key_space_id filter", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: []string{"ks_123", "ks_456", "ks_789"}, + }, + }, + Limit: 100, + TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + }, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + query := "SELECT COUNT(*) FROM key_verifications" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + require.Equal(t, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_test' AND key_space_id IN ('ks_123', 'ks_456', 'ks_789') LIMIT 100", result) + }) + + t.Run("combines with existing WHERE clause", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: []string{"ks_123"}, + }, + }, + Limit: 100, + TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + }, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + query := "SELECT COUNT(*) FROM key_verifications WHERE outcome = 'VALID'" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + require.Equal(t, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_test' AND key_space_id IN ('ks_123') AND outcome = 'VALID' LIMIT 100", result) + }) + + t.Run("restricts access even when user queries different key_space_id", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: []string{"ks_123"}, // User only has access to ks_123 + }, + }, + Limit: 100, + TableAliases: map[string]string{ + "key_verifications": "default.key_verifications_raw_v2", + }, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + // User tries to query ks_999 which they don't have access to + query := "SELECT COUNT(*) FROM key_verifications WHERE key_space_id = 'ks_999'" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + // Both filters are present, creating impossible AND condition + // Injected: key_space_id IN ('ks_123') - only ks_123 + // User's: key_space_id = 'ks_999' + // Result: no rows (ks_123 AND ks_999 = impossible) + require.Equal(t, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_test' AND key_space_id IN ('ks_123') AND key_space_id = 'ks_999' LIMIT 100", result) + }) + + t.Run("supports multiple security filters simultaneously", func(t *testing.T) { + parser := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_test", + SecurityFilters: []chquery.SecurityFilter{ + { + Column: "key_space_id", + AllowedValues: []string{"ks_123", "ks_456"}, + }, + { + Column: "namespace_id", + AllowedValues: []string{"nsid_111", "nsid_222"}, + }, + }, + Limit: 100, + TableAliases: map[string]string{ + "ratelimits": "default.ratelimits_v2", + }, + AllowedTables: []string{ + "default.ratelimits_v2", + }, + }) + + query := "SELECT COUNT(*) FROM ratelimits" + result, err := parser.Parse(context.Background(), query) + require.NoError(t, err) + + require.Equal(t, "SELECT COUNT(*) FROM default.ratelimits_v2 WHERE workspace_id = 'ws_test' AND namespace_id IN ('nsid_111', 'nsid_222') AND key_space_id IN ('ks_123', 'ks_456') LIMIT 100", result) + }) +} + +func TestParser_WorkspaceFilterInjection(t *testing.T) { + p := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_victim", + Limit: 1000, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + expected string + }{ + { + name: "OR to bypass workspace filter", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_attacker' OR 1=1", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_victim' AND workspace_id = 'ws_attacker' OR 1 = 1 LIMIT 1000", + }, + { + name: "NOT to invert workspace filter", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE NOT workspace_id = 'ws_victim'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_victim' AND NOT workspace_id = 'ws_victim' LIMIT 1000", + }, + { + name: "workspace_id in SELECT to confuse parser", + query: "SELECT workspace_id FROM default.key_verifications_raw_v2 WHERE key_id = 'test'", + expected: "SELECT workspace_id FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_victim' AND key_id = 'test' LIMIT 1000", + }, + { + name: "workspace_id with different case", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE WORKSPACE_ID = 'ws_attacker'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_victim' AND WORKSPACE_ID = 'ws_attacker' LIMIT 1000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.Parse(context.Background(), tt.query) + require.NoError(t, err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestParser_SQLInjectionWithFilters(t *testing.T) { + p := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_123", + Limit: 1000, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + expected string + }{ + { + name: "injection in WHERE clause with quotes", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '' OR '1'='1'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '' OR '1' = '1' LIMIT 1000", + }, + { + name: "injection with comment", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '' -- comment", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '' LIMIT 1000", + }, + { + name: "injection with multiline comment", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '/* comment */'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '/* comment */' LIMIT 1000", + }, + { + name: "injection with semicolon", + query: "SELECT * FROM default.key_verifications_raw_v2; DROP TABLE users", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 1000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.Parse(context.Background(), tt.query) + require.NoError(t, err) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestParser_SpecialCharactersInFilters(t *testing.T) { + p := chquery.NewParser(chquery.Config{ + WorkspaceID: "ws_123", + Limit: 1000, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + expected string + }{ + { + name: "null bytes", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '\x00'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '\x00' LIMIT 1000", + }, + { + name: "unicode characters", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '你好'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '你好' LIMIT 1000", + }, + { + name: "emoji", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '🔥'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '🔥' LIMIT 1000", + }, + { + name: "backslashes", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id = '\\\\'", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND key_id = '\\\\' LIMIT 1000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.Parse(context.Background(), tt.query) + require.NoError(t, err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/go/pkg/clickhouse/query-parser/limits.go b/go/pkg/clickhouse/query-parser/limits.go new file mode 100644 index 0000000000..788eaf838e --- /dev/null +++ b/go/pkg/clickhouse/query-parser/limits.go @@ -0,0 +1,65 @@ +package queryparser + +import ( + "fmt" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" +) + +// EnforceLimit enforces the limit configuration on the query statement. +// It walks the entire AST to enforce limits on all SELECT statements including subqueries. +func (p *Parser) enforceLimit() error { + if p.config.Limit == 0 { + return nil + } + + // Walk the AST to enforce limits on all SELECT statements including subqueries + clickhouse.Walk(p.stmt, func(node clickhouse.Expr) bool { + // Check if this is a SELECT query + if selectQuery, ok := node.(*clickhouse.SelectQuery); ok { + p.enforceLimitOnSelect(selectQuery) + } + return true + }) + + return nil +} + +// enforceLimitOnSelect enforces the limit on a single SELECT statement +func (p *Parser) enforceLimitOnSelect(stmt *clickhouse.SelectQuery) { + // Check if there's an existing limit + if stmt.Limit == nil || stmt.Limit.Limit == nil { + stmt.Limit = &clickhouse.LimitClause{ + Limit: &clickhouse.NumberLiteral{ + Literal: fmt.Sprintf("%d", p.config.Limit), + }, + } + return + } + + numLit, ok := stmt.Limit.Limit.(*clickhouse.NumberLiteral) + if !ok { + // Not a number literal (e.g., LIMIT ALL), enforce max limit + stmt.Limit.Limit = &clickhouse.NumberLiteral{ + Literal: fmt.Sprintf("%d", p.config.Limit), + } + return + } + + var existingLimit int + _, err := fmt.Sscanf(numLit.Literal, "%d", &existingLimit) + if err != nil { + // Can't parse the number, enforce max limit for safety + stmt.Limit.Limit = &clickhouse.NumberLiteral{ + Literal: fmt.Sprintf("%d", p.config.Limit), + } + return + } + + // Enforce max limit if existing is greater OR if it's negative/invalid + if existingLimit > p.config.Limit || existingLimit < 0 { + stmt.Limit.Limit = &clickhouse.NumberLiteral{ + Literal: fmt.Sprintf("%d", p.config.Limit), + } + } +} diff --git a/go/pkg/clickhouse/query-parser/limits_test.go b/go/pkg/clickhouse/query-parser/limits_test.go new file mode 100644 index 0000000000..a71c4ec5de --- /dev/null +++ b/go/pkg/clickhouse/query-parser/limits_test.go @@ -0,0 +1,98 @@ +package queryparser + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParser_EnforceLimit(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 100, + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2 LIMIT 1000") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_123' LIMIT 100", output) +} + +func TestParser_AddLimit(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 50, + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_123' LIMIT 50", output) +} + +func TestParser_PreserveSmallerLimit(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 100, + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2 LIMIT 10") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_123' LIMIT 10", output) +} + +func TestParser_LimitBypassAttempts(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 10, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + expected string + }{ + { + name: "LIMIT with OFFSET to read more", + query: "SELECT * FROM default.key_verifications_raw_v2 LIMIT 100000 OFFSET 0", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 10 OFFSET 0", + }, + { + name: "extremely high LIMIT", + query: "SELECT * FROM default.key_verifications_raw_v2 LIMIT 999999999", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 10", + }, + { + name: "negative LIMIT", + query: "SELECT * FROM default.key_verifications_raw_v2 LIMIT -1", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 10", + }, + { + name: "LIMIT ALL", + query: "SELECT * FROM default.key_verifications_raw_v2 LIMIT ALL", + expected: "SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 10", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.Parse(context.Background(), tt.query) + require.NoError(t, err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/go/pkg/clickhouse/query-parser/parser.go b/go/pkg/clickhouse/query-parser/parser.go new file mode 100644 index 0000000000..cd5282fb30 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/parser.go @@ -0,0 +1,75 @@ +package queryparser + +import ( + "context" + "fmt" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/fault" +) + +// NewParser creates a new parser +func NewParser(config Config) *Parser { + return &Parser{ + config: config, + cteNames: make(map[string]bool), + } +} + +// Parse parses and rewrites a query +func (p *Parser) Parse(ctx context.Context, query string) (string, error) { + // Parse SQL + parser := clickhouse.NewParser(query) + stmts, err := parser.ParseStmts() + if err != nil { + return "", fault.Wrap(err, + fault.Code(codes.User.BadRequest.InvalidAnalyticsQuery.URN()), + fault.Public(fmt.Sprintf("Invalid SQL syntax: %v", err)), + ) + } + + if len(stmts) == 0 { + return "", fault.New("no statements found", + fault.Code(codes.User.BadRequest.InvalidAnalyticsQuery.URN()), + fault.Public("No SQL statements found"), + ) + } + + // Only allow SELECT + stmt, ok := stmts[0].(*clickhouse.SelectQuery) + if !ok { + return "", fault.New("only SELECT queries allowed", + fault.Code(codes.User.BadRequest.InvalidAnalyticsQueryType.URN()), + fault.Public("Only SELECT queries are allowed"), + ) + } + + p.stmt = stmt + + // Build CTE registry FIRST so we know which table references are CTEs + p.buildCTERegistry() + + // Inject security filters + if err := p.injectSecurityFilters(); err != nil { + return "", err + } + + if err := p.rewriteTables(); err != nil { + return "", err + } + + if err := p.injectWorkspaceFilter(); err != nil { + return "", err + } + + if err := p.enforceLimit(); err != nil { + return "", err + } + + if err := p.validateFunctions(); err != nil { + return "", err + } + + return p.stmt.String(), nil +} diff --git a/go/pkg/clickhouse/query-parser/tables.go b/go/pkg/clickhouse/query-parser/tables.go new file mode 100644 index 0000000000..6c3b057928 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/tables.go @@ -0,0 +1,84 @@ +package queryparser + +import ( + "fmt" + "slices" + "strings" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/fault" +) + +func (p *Parser) rewriteTables() error { + if p.stmt.From == nil || p.stmt.From.Expr == nil { + return fault.New("query must have FROM clause", + fault.Code(codes.User.BadRequest.InvalidAnalyticsQuery.URN()), + fault.Public("Query must have a FROM clause"), + ) + } + + var rewriteErr error + + // Walk the ENTIRE statement to find all tables, including those in UNION queries + clickhouse.WalkWithBreak(p.stmt, func(node clickhouse.Expr) bool { + tableIdent, ok := node.(*clickhouse.TableIdentifier) + if !ok { + return true + } + + // Get table name + tableName := tableIdent.Table.Name + if tableIdent.Database != nil { + tableName = tableIdent.Database.Name + "." + tableIdent.Table.Name + } + + // Resolve alias + if actualTable, ok := p.config.TableAliases[tableName]; ok { + tableName = actualTable + } + + // Validate access + if !p.isTableAllowed(tableName) { + rewriteErr = fault.New(fmt.Sprintf("table '%s' not allowed", tableName), + fault.Code(codes.User.BadRequest.InvalidAnalyticsTable.URN()), + fault.Public(fmt.Sprintf("Access to table '%s' is not allowed", tableName)), + ) + + return false + } + + // Update AST + parts := strings.Split(tableName, ".") + if len(parts) == 2 { + tableIdent.Database = &clickhouse.Ident{Name: parts[0]} + tableIdent.Table = &clickhouse.Ident{Name: parts[1]} + } else { + tableIdent.Database = nil + tableIdent.Table = &clickhouse.Ident{Name: tableName} + } + + return true + }) + + return rewriteErr +} + +func (p *Parser) isTableAllowed(tableName string) bool { + // Check if it's a CTE first - CTEs are always allowed, as we check if the cbe is using allowed tables as well + if p.isCTE(tableName) { + return true + } + + if len(p.config.AllowedTables) == 0 { + return false + } + + // Always block system and information_schema tables + lowerTableName := strings.ToLower(tableName) + if strings.HasPrefix(lowerTableName, "system.") || strings.HasPrefix(lowerTableName, "information_schema.") { + return false + } + + return slices.Contains(p.config.AllowedTables, tableName) +} diff --git a/go/pkg/clickhouse/query-parser/tables_test.go b/go/pkg/clickhouse/query-parser/tables_test.go new file mode 100644 index 0000000000..29308aaccb --- /dev/null +++ b/go/pkg/clickhouse/query-parser/tables_test.go @@ -0,0 +1,450 @@ +package queryparser + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParser_TableAliases(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + TableAliases: map[string]string{ + "keys": "default.keys_v2", + }, + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + output, err := p.Parse(context.Background(), "SELECT * FROM keys") + require.NoError(t, err) + + require.Equal(t, "SELECT * FROM default.keys_v2 WHERE workspace_id = 'ws_123'", output) +} + +func TestParser_BlockSystemTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + }) + + tests := []struct { + name string + query string + }{ + { + name: "system.tables", + query: "SELECT * FROM system.tables", + }, + { + name: "system.columns", + query: "SELECT * FROM system.columns", + }, + { + name: "system.databases", + query: "SELECT * FROM system.databases", + }, + { + name: "system.users", + query: "SELECT * FROM system.users", + }, + { + name: "system.query_log", + query: "SELECT * FROM system.query_log", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + require.Error(t, err, "Access to system tables should be blocked") + require.Contains(t, err.Error(), "not allowed") + }) + } +} + +func TestParser_AllowedTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + // Allowed table should work + _, err := p.Parse(context.Background(), "SELECT * FROM default.keys_v2") + require.NoError(t, err) + + // Non-allowed table should fail + _, err = p.Parse(context.Background(), "SELECT * FROM default.other_table") + require.Error(t, err) + require.Contains(t, err.Error(), "not allowed") +} + +func TestParser_BlockInformationSchema(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + }) + + tests := []struct { + name string + query string + }{ + { + name: "information_schema.tables", + query: "SELECT * FROM information_schema.tables", + }, + { + name: "INFORMATION_SCHEMA.COLUMNS uppercase", + query: "SELECT * FROM INFORMATION_SCHEMA.COLUMNS", + }, + { + name: "information_schema.schemata", + query: "SELECT * FROM information_schema.schemata", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + require.Error(t, err, "Access to information_schema should be blocked") + require.Contains(t, err.Error(), "not allowed") + }) + } +} + +func TestParser_UNIONWithTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 1000, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + shouldBlock bool + }{ + { + name: "UNION DISTINCT to system tables", + query: "SELECT key_id FROM default.key_verifications_raw_v2 UNION DISTINCT SELECT name FROM system.tables", + shouldBlock: true, + }, + { + name: "UNION ALL to bypass deduplication", + query: "SELECT key_id FROM default.key_verifications_raw_v2 UNION ALL SELECT name FROM system.databases", + shouldBlock: true, + }, + { + name: "UNION DISTINCT with unauthorized table", + query: "SELECT key_id FROM default.key_verifications_raw_v2 UNION DISTINCT SELECT id FROM default.secrets", + shouldBlock: true, + }, + { + name: "UNION ALL with another allowed table", + query: "SELECT key_id FROM default.key_verifications_raw_v2 UNION ALL SELECT key_id FROM default.key_verifications_raw_v2", + shouldBlock: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + + if tt.shouldBlock { + require.Error(t, err, "UNION with unauthorized/system tables should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate table is not allowed") + } else { + require.NoError(t, err, "UNION with allowed tables should work") + } + }) + } +} + +func TestParser_JOINWithTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + shouldBlock bool + }{ + { + name: "INNER JOIN to system table", + query: "SELECT * FROM default.key_verifications_raw_v2 INNER JOIN system.tables ON 1=1", + shouldBlock: true, + }, + { + name: "LEFT JOIN to unauthorized table", + query: "SELECT * FROM default.key_verifications_raw_v2 LEFT JOIN default.secrets ON key_id = id", + shouldBlock: true, + }, + { + name: "RIGHT JOIN to system table", + query: "SELECT * FROM default.key_verifications_raw_v2 RIGHT JOIN system.databases ON 1=1", + shouldBlock: true, + }, + { + name: "CROSS JOIN to system table", + query: "SELECT * FROM default.key_verifications_raw_v2 CROSS JOIN system.users", + shouldBlock: true, + }, + { + name: "Multiple JOINs with one unauthorized", + query: "SELECT * FROM default.key_verifications_raw_v2 t1 JOIN default.key_verifications_raw_v2 t2 ON t1.key_id = t2.key_id JOIN system.tables t3 ON 1=1", + shouldBlock: true, + }, + { + name: "JOIN with allowed tables", + query: "SELECT * FROM default.key_verifications_raw_v2 t1 INNER JOIN default.key_verifications_raw_v2 t2 ON t1.key_id = t2.key_id", + shouldBlock: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + + if tt.shouldBlock { + require.Error(t, err, "JOIN with unauthorized/system tables should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate table is not allowed") + } else { + require.NoError(t, err, "JOIN with allowed tables should work") + } + }) + } +} + +func TestParser_SubqueryWithTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + Limit: 10, + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + expected string + shouldBlock bool + }{ + { + name: "subquery in FROM with allowed table", + query: "SELECT * FROM (SELECT * FROM default.key_verifications_raw_v2 LIMIT 10000) LIMIT 5", + expected: "SELECT * FROM (SELECT * FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' LIMIT 10) LIMIT 5", + shouldBlock: false, + }, + { + name: "subquery with aggregation not selecting workspace_id", + query: "SELECT date, verifications FROM (SELECT time as date, SUM(count) as verifications FROM default.key_verifications_raw_v2 WHERE time >= now() - INTERVAL 60 DAY GROUP BY date) ORDER BY date", + expected: "SELECT date, verifications FROM (SELECT time AS date, SUM(count) AS verifications FROM default.key_verifications_raw_v2 WHERE workspace_id = 'ws_123' AND time >= now() - INTERVAL 60 DAY GROUP BY date LIMIT 10) ORDER BY date LIMIT 10", + shouldBlock: false, + }, + { + name: "subquery in WHERE with system table", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id IN (SELECT key_id FROM system.tables)", + shouldBlock: true, + }, + { + name: "subquery with unauthorized table", + query: "SELECT * FROM default.key_verifications_raw_v2 WHERE key_id IN (SELECT id FROM default.secrets)", + shouldBlock: true, + }, + { + name: "nested subquery with system table", + query: "SELECT * FROM (SELECT * FROM (SELECT * FROM system.tables) t1) t2", + shouldBlock: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.Parse(context.Background(), tt.query) + + if tt.shouldBlock { + require.Error(t, err, "Subquery with unauthorized/system tables should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate table is not allowed") + } else { + require.NoError(t, err, "Subquery with allowed tables should work") + require.Equal(t, tt.expected, result) + } + }) + } +} + +func TestParser_CTEWithTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + shouldBlock bool + }{ + { + name: "CTE with system table", + query: "WITH t AS (SELECT * FROM system.tables) SELECT * FROM t", + shouldBlock: true, + }, + { + name: "CTE with unauthorized table", + query: "WITH secrets AS (SELECT * FROM default.secrets) SELECT * FROM secrets", + shouldBlock: true, + }, + { + name: "Multiple CTEs with one unauthorized", + query: "WITH t1 AS (SELECT * FROM default.key_verifications_raw_v2), t2 AS (SELECT * FROM system.tables) SELECT * FROM t1 JOIN t2 ON 1=1", + shouldBlock: true, + }, + { + name: "Nested CTEs with system table", + query: "WITH t1 AS (SELECT * FROM default.key_verifications_raw_v2), t2 AS (SELECT * FROM t1), t3 AS (SELECT * FROM system.tables) SELECT * FROM t2", + shouldBlock: true, + }, + { + name: "CTE with allowed table should work", + query: "WITH t AS (SELECT * FROM default.key_verifications_raw_v2) SELECT * FROM t", + shouldBlock: false, + }, + { + name: "Multiple CTEs with allowed tables", + query: "WITH t1 AS (SELECT * FROM default.key_verifications_raw_v2), t2 AS (SELECT * FROM t1) SELECT * FROM t2", + shouldBlock: false, + }, + { + name: "CTE JOIN with allowed table", + query: "WITH t AS (SELECT * FROM default.key_verifications_raw_v2) SELECT * FROM t JOIN default.key_verifications_raw_v2 v ON t.key_id = v.key_id", + shouldBlock: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + + if tt.shouldBlock { + require.Error(t, err, "CTE with unauthorized/system tables should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate table is not allowed") + } else { + require.NoError(t, err, "CTE with allowed tables should work") + } + }) + } +} + +func TestParser_ScalarSubqueryWithTables(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + shouldBlock bool + }{ + { + name: "scalar subquery with system table", + query: "SELECT key_id, (SELECT COUNT(*) FROM system.tables) AS cnt FROM default.key_verifications_raw_v2", + shouldBlock: true, + }, + { + name: "scalar subquery with unauthorized table", + query: "SELECT key_id, (SELECT secret FROM default.secrets LIMIT 1) AS s FROM default.key_verifications_raw_v2", + shouldBlock: true, + }, + { + name: "scalar subquery with allowed table", + query: "SELECT key_id, (SELECT COUNT(*) FROM default.key_verifications_raw_v2) AS cnt FROM default.key_verifications_raw_v2", + shouldBlock: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + + if tt.shouldBlock { + require.Error(t, err, "Scalar subquery with unauthorized/system tables should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate table is not allowed") + } else { + require.NoError(t, err, "Scalar subquery with allowed tables should work") + } + }) + } +} + +func TestParser_TableFunctions(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + }{ + { + name: "file() table function", + query: "SELECT * FROM file('/etc/passwd', 'CSV')", + }, + { + name: "url() table function", + query: "SELECT * FROM url('http://evil.com/data', 'JSONEachRow')", + }, + { + name: "remote() table function to another server", + query: "SELECT * FROM remote('other-server:9000', 'db', 'table')", + }, + { + name: "s3() table function", + query: "SELECT * FROM s3('https://bucket.s3.amazonaws.com/data.csv')", + }, + { + name: "hdfs() table function", + query: "SELECT * FROM hdfs('hdfs://namenode:port/path')", + }, + { + name: "mysql() table function", + query: "SELECT * FROM mysql('mysql://user:pass@host/db', 'table', 'select * from sensitive')", + }, + { + name: "postgresql() table function", + query: "SELECT * FROM postgresql('postgres://host/db', 'table')", + }, + { + name: "executable() table function", + query: "SELECT * FROM executable('/bin/bash', 'CSV', 'arg1')", + }, + { + name: "azureblobstorage() table function", + query: "SELECT * FROM azureblobstorage('https://account.blob.core.windows.net/container/file')", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + require.Error(t, err, "Table function should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate function is not allowed") + }) + } +} diff --git a/go/pkg/clickhouse/query-parser/types.go b/go/pkg/clickhouse/query-parser/types.go new file mode 100644 index 0000000000..0eb58db949 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/types.go @@ -0,0 +1,27 @@ +package queryparser + +import ( + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" +) + +// SecurityFilter represents a row-level security constraint +type SecurityFilter struct { + Column string // Column name + AllowedValues []string // Values user is allowed to access +} + +// Config for the parser +type Config struct { + WorkspaceID string + TableAliases map[string]string + AllowedTables []string + SecurityFilters []SecurityFilter // Row-level security filters (auto-injected) + Limit int +} + +// Parser rewrites ClickHouse queries +type Parser struct { + config Config + stmt *clickhouse.SelectQuery + cteNames map[string]bool // Tracks CTE names defined in WITH clause +} diff --git a/go/pkg/clickhouse/query-parser/validation.go b/go/pkg/clickhouse/query-parser/validation.go new file mode 100644 index 0000000000..f8648c6911 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/validation.go @@ -0,0 +1,151 @@ +package queryparser + +import ( + "fmt" + "strings" + + clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser" + "github.com/unkeyed/unkey/go/pkg/codes" + "github.com/unkeyed/unkey/go/pkg/fault" +) + +// Whitelist of allowed ClickHouse functions for analytics queries +var allowedFunctions = map[string]bool{ + // Aggregate functions + "count": true, + "sum": true, + "avg": true, + "min": true, + "max": true, + "any": true, + "grouparray": true, + "groupuniqarray": true, + "uniq": true, + "uniqexact": true, + "quantile": true, + + // Date/Time functions + "now": true, + "now64": true, + "today": true, + "todate": true, + "todatetime": true, + "todatetime64": true, + "tostartofday": true, + "tostartofweek": true, + "tostartofmonth": true, + "tostartofyear": true, + "tostartofhour": true, + "tostartofminute": true, + "date_trunc": true, + "formatdatetime": true, + "fromunixtimestamp64milli": true, + "tounixtimestamp64milli": true, + "tointervalday": true, + "tointervalweek": true, + "tointervalmonth": true, + "tointervalyear": true, + "tointervalhour": true, + "tointervalminute": true, + "tointervalsecond": true, + "tointervalmillisecond": true, + "tointervalmicrosecond": true, + "tointervalnanosecond": true, + "tointervalquarter": true, + + // String functions + "lower": true, + "upper": true, + "substring": true, + "concat": true, + "length": true, + "trim": true, + "startswith": true, + "endswith": true, + + // Math functions + "round": true, + "floor": true, + "ceil": true, + "abs": true, + + // Conditional functions + "if": true, + "case": true, + "coalesce": true, + "countif": true, + + // Type conversion + "tostring": true, + "toint32": true, + "toint64": true, + "tofloat64": true, + + // Array functions + "has": true, + "hasany": true, + "hasall": true, + "arrayjoin": true, + "arrayfilter": true, +} + +// Whitelist of allowed table functions +// Table functions are used in FROM clause and can access external data sources +// Most are blocked by default for security +var allowedTableFunctions = map[string]bool{ + // Currently no table functions are whitelisted + // If needed, safe ones could be added here, e.g.: + // "numbers": true, // generates sequence of numbers +} + +func (p *Parser) validateFunctions() error { + var validateErr error + + clickhouse.WalkWithBreak(p.stmt, func(node clickhouse.Expr) bool { + // Check regular functions + funcExpr, isFuncExpr := node.(*clickhouse.FunctionExpr) + if isFuncExpr { + if funcExpr.Name == nil || funcExpr.Name.Name == "" { + return true + } + + funcName := strings.ToLower(funcExpr.Name.Name) + if allowedFunctions[funcName] { + return true + } + + validateErr = fault.New(fmt.Sprintf("function '%s' not allowed", funcName), + fault.Code(codes.User.BadRequest.InvalidAnalyticsFunction.URN()), + fault.Public(fmt.Sprintf("Function '%s' is not allowed", funcName)), + ) + return false + } + + // Check table functions + tableFuncExpr, isTableFuncExpr := node.(*clickhouse.TableFunctionExpr) + if !isTableFuncExpr { + return true + } + + if tableFuncExpr.Name == nil { + return true + } + + funcName := strings.ToLower(tableFuncExpr.Name.String()) + if funcName == "" { + return true + } + + if allowedTableFunctions[funcName] { + return true + } + + validateErr = fault.New(fmt.Sprintf("table function '%s' not allowed", funcName), + fault.Code(codes.User.BadRequest.InvalidAnalyticsFunction.URN()), + fault.Public(fmt.Sprintf("Table function '%s' is not allowed for security reasons", funcName)), + ) + return false + }) + + return validateErr +} diff --git a/go/pkg/clickhouse/query-parser/validation_test.go b/go/pkg/clickhouse/query-parser/validation_test.go new file mode 100644 index 0000000000..9c374416c3 --- /dev/null +++ b/go/pkg/clickhouse/query-parser/validation_test.go @@ -0,0 +1,138 @@ +package queryparser + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParser_BlockNonWhitelistedFunctions(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + shouldFail bool + }{ + { + name: "file function", + query: "SELECT file('/etc/passwd') FROM default.key_verifications_raw_v2", + shouldFail: true, + }, + { + name: "url function", + query: "SELECT url('http://evil.com/data') FROM default.key_verifications_raw_v2", + shouldFail: true, + }, + { + name: "system function", + query: "SELECT system('rm -rf /') FROM default.key_verifications_raw_v2", + shouldFail: true, + }, + { + name: "executable function", + query: "SELECT executable('/bin/bash') FROM default.key_verifications_raw_v2", + shouldFail: true, + }, + { + name: "dict_get to access dictionaries", + query: "SELECT dictGet('dict', 'attr', key_id) FROM default.key_verifications_raw_v2", + shouldFail: true, + }, + { + name: "nested safe functions should work", + query: "SELECT count(DISTINCT key_id) FROM default.key_verifications_raw_v2", + shouldFail: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + + if tt.shouldFail { + require.Error(t, err, "Dangerous function should be blocked") + require.Contains(t, err.Error(), "not allowed", "Error should indicate function is not allowed") + } else { + require.NoError(t, err, "Safe function combination should work") + } + }) + } +} + +func TestParser_AllowSafeFunctions(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.keys_v2", + }, + }) + + safeFuncs := []string{"count", "sum", "avg", "max", "min", "now", "toDate"} + + for _, fn := range safeFuncs { + query := "SELECT " + fn + "(*) FROM default.keys_v2" + _, err := p.Parse(context.Background(), query) + require.NoError(t, err, "Function %s should be allowed", fn) + } +} + +func TestParser_OnlySelectAllowed(t *testing.T) { + p := NewParser(Config{ + WorkspaceID: "ws_123", + AllowedTables: []string{ + "default.key_verifications_raw_v2", + }, + }) + + tests := []struct { + name string + query string + }{ + { + name: "INSERT statement", + query: "INSERT INTO default.key_verifications_raw_v2 (key_id) VALUES ('malicious')", + }, + { + name: "UPDATE statement", + query: "UPDATE default.key_verifications_raw_v2 SET key_id = 'hacked'", + }, + { + name: "DELETE statement", + query: "DELETE FROM default.key_verifications_raw_v2", + }, + { + name: "DROP statement", + query: "DROP TABLE default.key_verifications_raw_v2", + }, + { + name: "CREATE statement", + query: "CREATE TABLE malicious (id INT)", + }, + { + name: "ALTER statement", + query: "ALTER TABLE default.key_verifications_raw_v2 ADD COLUMN backdoor STRING", + }, + { + name: "TRUNCATE statement", + query: "TRUNCATE TABLE default.key_verifications_raw_v2", + }, + { + name: "GRANT statement", + query: "GRANT ALL ON *.* TO 'attacker'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := p.Parse(context.Background(), tt.query) + require.Error(t, err, "Only SELECT queries should be allowed") + }) + } +} diff --git a/go/pkg/clickhouse/schema/001_key_verifications_raw_v2.sql b/go/pkg/clickhouse/schema/001_key_verifications_raw_v2.sql index 64a251ef4f..444a1c8703 100644 --- a/go/pkg/clickhouse/schema/001_key_verifications_raw_v2.sql +++ b/go/pkg/clickhouse/schema/001_key_verifications_raw_v2.sql @@ -10,6 +10,7 @@ CREATE TABLE key_verifications_raw_v2 key_space_id String, -- Empty string if the key has no identity identity_id String, + external_id String, key_id String, -- Right now this is a 3 character airport code, but when we move to aws, @@ -34,6 +35,7 @@ CREATE TABLE key_verifications_raw_v2 INDEX idx_request_id (request_id) TYPE bloom_filter GRANULARITY 1, INDEX idx_identity_id (identity_id) TYPE bloom_filter GRANULARITY 1, + INDEX idx_external_id (external_id) TYPE bloom_filter GRANULARITY 1, INDEX idx_key_id (key_id) TYPE bloom_filter GRANULARITY 1, INDEX idx_tags (tags) TYPE bloom_filter GRANULARITY 1 ) @@ -56,6 +58,7 @@ SELECT workspace_id, key_space_id, identity_id, + '' as external_id, key_id, region, outcome, diff --git a/go/pkg/clickhouse/schema/002_key_verifications_per_minute_v2.sql b/go/pkg/clickhouse/schema/002_key_verifications_per_minute_v2.sql index a045edca09..f036adf9e3 100644 --- a/go/pkg/clickhouse/schema/002_key_verifications_per_minute_v2.sql +++ b/go/pkg/clickhouse/schema/002_key_verifications_per_minute_v2.sql @@ -3,6 +3,7 @@ CREATE TABLE key_verifications_per_minute_v2 ( workspace_id String, key_space_id String, identity_id String, + external_id String, key_id String, outcome LowCardinality (String), tags Array(String), @@ -33,6 +34,7 @@ SELECT workspace_id, key_space_id, identity_id, + external_id, key_id, outcome, tags, @@ -49,6 +51,7 @@ GROUP BY time, key_space_id, identity_id, + external_id, key_id, outcome, tags diff --git a/go/pkg/clickhouse/schema/003_key_verifications_per_hour_v2.sql b/go/pkg/clickhouse/schema/003_key_verifications_per_hour_v2.sql index 6397256ffa..5119aea761 100644 --- a/go/pkg/clickhouse/schema/003_key_verifications_per_hour_v2.sql +++ b/go/pkg/clickhouse/schema/003_key_verifications_per_hour_v2.sql @@ -3,6 +3,7 @@ CREATE TABLE key_verifications_per_hour_v2 ( workspace_id String, key_space_id String, identity_id String, + external_id String, key_id String, outcome LowCardinality (String), tags Array(String), @@ -32,6 +33,7 @@ SELECT workspace_id, key_space_id, identity_id, + external_id, key_id, outcome, tags, @@ -48,6 +50,7 @@ GROUP BY time, key_space_id, identity_id, + external_id, key_id, outcome, tags diff --git a/go/pkg/clickhouse/schema/004_key_verifications_per_day_v2.sql b/go/pkg/clickhouse/schema/004_key_verifications_per_day_v2.sql index 6b1ec6c6d4..bd1ce5e7eb 100644 --- a/go/pkg/clickhouse/schema/004_key_verifications_per_day_v2.sql +++ b/go/pkg/clickhouse/schema/004_key_verifications_per_day_v2.sql @@ -3,6 +3,7 @@ CREATE TABLE key_verifications_per_day_v2 ( workspace_id String, key_space_id String, identity_id String, + external_id String, key_id String, outcome LowCardinality (String), tags Array(String), @@ -33,6 +34,7 @@ SELECT workspace_id, key_space_id, identity_id, + external_id, key_id, outcome, tags, @@ -49,6 +51,7 @@ GROUP BY time, key_space_id, identity_id, + external_id, key_id, outcome, tags diff --git a/go/pkg/clickhouse/schema/005_key_verifications_per_month_v2.sql b/go/pkg/clickhouse/schema/005_key_verifications_per_month_v2.sql index 2ad9349b72..8749be7592 100644 --- a/go/pkg/clickhouse/schema/005_key_verifications_per_month_v2.sql +++ b/go/pkg/clickhouse/schema/005_key_verifications_per_month_v2.sql @@ -3,6 +3,7 @@ CREATE TABLE key_verifications_per_month_v2 ( workspace_id String, key_space_id String, identity_id String, + external_id String, key_id String, outcome LowCardinality (String), tags Array(String), @@ -33,6 +34,7 @@ SELECT workspace_id, key_space_id, identity_id, + external_id, key_id, outcome, tags, @@ -49,6 +51,7 @@ GROUP BY time, key_space_id, identity_id, + external_id, key_id, outcome, tags diff --git a/go/pkg/clickhouse/schema/types.go b/go/pkg/clickhouse/schema/types.go index 8b0297d77f..01a1f5a4c6 100644 --- a/go/pkg/clickhouse/schema/types.go +++ b/go/pkg/clickhouse/schema/types.go @@ -9,6 +9,7 @@ type KeyVerificationV2 struct { WorkspaceID string `ch:"workspace_id" json:"workspace_id"` KeySpaceID string `ch:"key_space_id" json:"key_space_id"` IdentityID string `ch:"identity_id" json:"identity_id"` + ExternalID string `ch:"external_id" json:"external_id"` KeyID string `ch:"key_id" json:"key_id"` Region string `ch:"region" json:"region"` Outcome string `ch:"outcome" json:"outcome"` diff --git a/go/pkg/clickhouse/user.go b/go/pkg/clickhouse/user.go new file mode 100644 index 0000000000..afdd682401 --- /dev/null +++ b/go/pkg/clickhouse/user.go @@ -0,0 +1,183 @@ +package clickhouse + +import ( + "context" + "fmt" + "regexp" + + driver "github.com/ClickHouse/clickhouse-go/v2" +) + +var ( + // validIdentifier matches safe ClickHouse identifiers (usernames, policy names, quota names, profile names) + // Allows alphanumeric characters and underscores only + validIdentifier = regexp.MustCompile(`^[a-zA-Z0-9_]+$`) + + // validTableName matches safe ClickHouse table names in database.table format + // Allows alphanumeric characters and underscores in both database and table parts + validTableName = regexp.MustCompile(`^[a-zA-Z0-9_]+\.[a-zA-Z0-9_]+$`) +) + +// UserConfig contains configuration for creating/updating a ClickHouse user +type UserConfig struct { + WorkspaceID string + Username string + Password string + + // Tables to grant SELECT permission on + AllowedTables []string + + // Quota settings (per window) + QuotaDurationSeconds int32 + MaxQueriesPerWindow int32 + MaxExecutionTimePerWindow int32 + + // Per-query limits (settings profile) + MaxQueryExecutionTime int32 + MaxQueryMemoryBytes int64 + MaxQueryResultRows int32 +} + +// validateIdentifiers checks that all identifiers in the config are safe to use in SQL statements. +// This prevents SQL injection since ClickHouse identifiers cannot be parameterized. +func validateIdentifiers(config UserConfig) error { + // Validate username + if !validIdentifier.MatchString(config.Username) { + return fmt.Errorf("invalid username: must contain only alphanumeric characters and underscores, got %q", config.Username) + } + + // Validate workspace ID (used in policy/quota/profile names and WHERE clauses) + if !validIdentifier.MatchString(config.WorkspaceID) { + return fmt.Errorf("invalid workspace_id: must contain only alphanumeric characters and underscores, got %q", config.WorkspaceID) + } + + // Validate all table names + for _, table := range config.AllowedTables { + if !validTableName.MatchString(table) { + return fmt.Errorf("invalid table name: must be in format 'database.table' with alphanumeric characters and underscores only, got %q", table) + } + } + + return nil +} + +// ConfigureUser creates or updates a ClickHouse user with all necessary permissions, quotas, and settings. +// This is idempotent - it can be run multiple times to update settings. +func (c *clickhouse) ConfigureUser(ctx context.Context, config UserConfig) error { + logger := c.logger.With("workspace_id", config.WorkspaceID, "username", config.Username) + + // Validate all identifiers to prevent SQL injection + if err := validateIdentifiers(config); err != nil { + return fmt.Errorf("identifier validation failed: %w", err) + } + + // Create or alter ClickHouse user + logger.Info("creating/updating clickhouse user") + createUserSQL := fmt.Sprintf("CREATE USER IF NOT EXISTS %s IDENTIFIED WITH sha256_password BY {password:String}", config.Username) + err := c.Exec(ctx, createUserSQL, driver.Named("password", config.Password)) + if err != nil { + return fmt.Errorf("failed to create user: %w", err) + } + + // Revoke all permissions + logger.Info("revoking all permissions") + revokeSQL := fmt.Sprintf("REVOKE ALL ON *.* FROM %s", config.Username) + err = c.Exec(ctx, revokeSQL) + if err != nil { + logger.Warn("failed to revoke permissions (user may be new)", "error", err) + } + + // Grant SELECT on specified tables + for _, table := range config.AllowedTables { + logger.Debug("granting SELECT permission", "table", table) + grantSQL := fmt.Sprintf("GRANT SELECT ON %s TO %s", table, config.Username) + err = c.Exec(ctx, grantSQL) + if err != nil { + return fmt.Errorf("failed to grant SELECT on %s: %w", table, err) + } + } + + // Create row-level security (RLS) policies + policyName := fmt.Sprintf("workspace_%s_rls", config.WorkspaceID) + for _, table := range config.AllowedTables { + logger.Debug("creating row policy", "table", table, "policy", policyName) + + createPolicySQL := fmt.Sprintf( + "CREATE ROW POLICY OR REPLACE %s ON %s FOR SELECT USING workspace_id = '%s' TO %s", + policyName, table, config.WorkspaceID, config.Username, + ) + err = c.Exec(ctx, createPolicySQL) + if err != nil { + return fmt.Errorf("failed to create row policy on %s: %w", table, err) + } + } + + // Create or replace quota + quotaName := fmt.Sprintf("workspace_%s_quota", config.WorkspaceID) + logger.Info("creating/updating quota", "name", quotaName) + + createOrReplaceQuotaSQL := fmt.Sprintf(` + CREATE QUOTA OR REPLACE %s + FOR INTERVAL %d SECOND + MAX queries = %d, + MAX execution_time = %d + -- MAX result_rows is intentionally NOT set here + -- Per-window result row limits are too restrictive for analytics queries + -- which legitimately need to return large result sets. + -- Per-query limits are still enforced via the settings profile (max_result_rows). + TO %s + `, + quotaName, + config.QuotaDurationSeconds, + config.MaxQueriesPerWindow, + config.MaxExecutionTimePerWindow, + config.Username, + ) + err = c.Exec(ctx, createOrReplaceQuotaSQL) + if err != nil { + return fmt.Errorf("failed to create/replace quota: %w", err) + } + + // Create or replace settings profile + profileName := fmt.Sprintf("workspace_%s_profile", config.WorkspaceID) + logger.Info("creating/updating settings profile", "name", profileName) + + createOrReplaceProfileSQL := fmt.Sprintf(` + CREATE SETTINGS PROFILE OR REPLACE %s SETTINGS + max_execution_time = %d, + max_memory_usage = %d, + max_result_rows = %d, + readonly = 2 + TO %s + `, + profileName, + config.MaxQueryExecutionTime, + config.MaxQueryMemoryBytes, + config.MaxQueryResultRows, + config.Username, + ) + err = c.Exec(ctx, createOrReplaceProfileSQL) + if err != nil { + return fmt.Errorf("failed to create/replace settings profile: %w", err) + } + + logger.Info("successfully configured clickhouse user", + "tables", len(config.AllowedTables), + "max_queries_per_window", config.MaxQueriesPerWindow, + "quota_duration_seconds", config.QuotaDurationSeconds, + ) + + return nil +} + +// DefaultAllowedTables returns the default list of tables for analytics access +func DefaultAllowedTables() []string { + return []string{ + // Key verifications + "default.key_verifications_raw_v2", + "default.key_verifications_per_minute_v2", + "default.key_verifications_per_hour_v2", + "default.key_verifications_per_day_v2", + "default.key_verifications_per_month_v2", + } +} diff --git a/go/pkg/codes/codes.go b/go/pkg/codes/codes.go index 1dc935d0df..303d94ce9c 100644 --- a/go/pkg/codes/codes.go +++ b/go/pkg/codes/codes.go @@ -48,6 +48,12 @@ const ( // CategoryUserBadRequest represents invalid user input errors. CategoryUserBadRequest Category = "bad_request" + // CategoryUserUnprocessableEntity represents requests that are syntactically correct but cannot be processed. + CategoryUserUnprocessableEntity Category = "unprocessable_entity" + + // CategoryUserTooManyRequests represents rate limit exceeded errors. + CategoryUserTooManyRequests Category = "too_many_requests" + // CategoryNotFound represents resource not found errors. CategoryNotFound Category = "not_found" diff --git a/go/pkg/codes/constants_gen.go b/go/pkg/codes/constants_gen.go index 204cbbd4ad..564fa3bb47 100644 --- a/go/pkg/codes/constants_gen.go +++ b/go/pkg/codes/constants_gen.go @@ -20,6 +20,28 @@ const ( UserErrorsBadRequestRequestTimeout URN = "err:user:bad_request:request_timeout" // ClientClosedRequest indicates the client closed the connection before the request completed. UserErrorsBadRequestClientClosedRequest URN = "err:user:bad_request:client_closed_request" + // InvalidAnalyticsQuery indicates the analytics SQL query is invalid or has syntax errors. + UserErrorsBadRequestInvalidAnalyticsQuery URN = "err:user:bad_request:invalid_analytics_query" + // InvalidAnalyticsTable indicates the table referenced in the analytics query is not allowed or does not exist. + UserErrorsBadRequestInvalidAnalyticsTable URN = "err:user:bad_request:invalid_analytics_table" + // InvalidAnalyticsFunction indicates a disallowed function was used in the analytics query. + UserErrorsBadRequestInvalidAnalyticsFunction URN = "err:user:bad_request:invalid_analytics_function" + // InvalidAnalyticsQueryType indicates the query type or operation is not supported (e.g., INSERT, UPDATE, DELETE). + UserErrorsBadRequestInvalidAnalyticsQueryType URN = "err:user:bad_request:invalid_analytics_query_type" + + // UnprocessableEntity + + // QueryExecutionTimeout indicates the query exceeded the maximum execution time limit. + UserErrorsUnprocessableEntityQueryExecutionTimeout URN = "err:user:unprocessable_entity:query_execution_timeout" + // QueryMemoryLimitExceeded indicates the query exceeded the maximum memory usage limit. + UserErrorsUnprocessableEntityQueryMemoryLimitExceeded URN = "err:user:unprocessable_entity:query_memory_limit_exceeded" + // QueryRowsLimitExceeded indicates the query exceeded the maximum rows to read limit. + UserErrorsUnprocessableEntityQueryRowsLimitExceeded URN = "err:user:unprocessable_entity:query_rows_limit_exceeded" + + // TooManyRequests + + // QueryQuotaExceeded indicates the workspace has exceeded their query quota for the current window. + UserErrorsTooManyRequestsQueryQuotaExceeded URN = "err:user:too_many_requests:query_quota_exceeded" // ---------------- // UnkeyAuthErrors @@ -65,6 +87,11 @@ const ( // NotFound indicates the requested API was not found. UnkeyDataErrorsApiNotFound URN = "err:unkey:data:api_not_found" + // KeySpace + + // NotFound indicates the requested key space was not found. + UnkeyDataErrorsKeySpaceNotFound URN = "err:unkey:data:key_space_not_found" + // Permission // Duplicate indicates the requested permission already exists. @@ -108,6 +135,13 @@ const ( // NotFound indicates the requested audit log was not found. UnkeyDataErrorsAuditLogNotFound URN = "err:unkey:data:audit_log_not_found" + // Analytics + + // NotConfigured indicates analytics is not configured for the workspace. + UnkeyDataErrorsAnalyticsNotConfigured URN = "err:unkey:data:analytics_not_configured" + // ConnectionFailed indicates the connection to the analytics database failed. + UnkeyDataErrorsAnalyticsConnectionFailed URN = "err:unkey:data:analytics_connection_failed" + // ---------------- // UnkeyAppErrors // ---------------- diff --git a/go/pkg/codes/generate.go b/go/pkg/codes/generate.go index 3b994c914a..1f7143b78b 100644 --- a/go/pkg/codes/generate.go +++ b/go/pkg/codes/generate.go @@ -5,6 +5,7 @@ package main import ( + "encoding/json" "fmt" "go/ast" "go/parser" @@ -12,9 +13,12 @@ import ( "os" "path/filepath" "reflect" + "sort" "strings" "github.com/unkeyed/unkey/go/pkg/codes" + "golang.org/x/text/cases" + "golang.org/x/text/language" ) // commentMap stores comments for types and fields @@ -46,16 +50,45 @@ func main() { f.WriteString("// Error code constants for use in switch statements for exhaustive checking\n") f.WriteString("const (\n") + // Track all error codes for MDX generation + allErrorCodes := []ErrorCodeInfo{} + // Process each top-level error domain using reflection - processErrorDomain(f, "User", reflect.ValueOf(codes.User)) - processErrorDomain(f, "Unkey", reflect.ValueOf(codes.Auth)) - processErrorDomain(f, "Unkey", reflect.ValueOf(codes.Data)) - processErrorDomain(f, "Unkey", reflect.ValueOf(codes.App)) - processErrorDomain(f, "Unkey", reflect.ValueOf(codes.Gateway)) + allErrorCodes = append(allErrorCodes, processErrorDomain(f, "User", "User", reflect.ValueOf(codes.User))...) + allErrorCodes = append(allErrorCodes, processErrorDomain(f, "Unkey", "Auth", reflect.ValueOf(codes.Auth))...) + allErrorCodes = append(allErrorCodes, processErrorDomain(f, "Unkey", "Data", reflect.ValueOf(codes.Data))...) + allErrorCodes = append(allErrorCodes, processErrorDomain(f, "Unkey", "App", reflect.ValueOf(codes.App))...) + allErrorCodes = append(allErrorCodes, processErrorDomain(f, "Unkey", "Gateway", reflect.ValueOf(codes.Gateway))...) f.WriteString(")\n") fmt.Println("Generated error constants with documentation") + + // Generate missing MDX documentation files + if err := generateMissingMDXFiles(allErrorCodes); err != nil { + fmt.Fprintf(os.Stderr, "Error generating MDX files: %v\n", err) + os.Exit(1) + } + + // Remove obsolete MDX files that don't have corresponding error codes + if err := removeObsoleteMDXFiles(allErrorCodes); err != nil { + fmt.Fprintf(os.Stderr, "Error removing obsolete MDX files: %v\n", err) + os.Exit(1) + } + + // Update docs.json with all error files + if err := updateDocsJSON(allErrorCodes); err != nil { + fmt.Fprintf(os.Stderr, "Error updating docs.json: %v\n", err) + os.Exit(1) + } +} + +// ErrorCodeInfo stores information about an error code for MDX generation +type ErrorCodeInfo struct { + URN string + Name string + Description string + Domain string // "User", "Auth", "Data", "App", "Gateway" } // extractComments parses source files to get documentation comments @@ -121,7 +154,7 @@ func extractComments() error { } // processErrorDomain extracts error codes from a domain using reflection -func processErrorDomain(f *os.File, systemName string, domainValue reflect.Value) { +func processErrorDomain(f *os.File, systemName string, domain string, domainValue reflect.Value) []ErrorCodeInfo { // Section header domainType := domainValue.Type() domainName := domainType.Name() @@ -130,6 +163,8 @@ func processErrorDomain(f *os.File, systemName string, domainValue reflect.Value f.WriteString("// ----------------\n") f.WriteString("\n") + errorCodes := []ErrorCodeInfo{} + // Iterate through categories (fields of the domain struct) for i := 0; i < domainValue.NumField(); i++ { categoryField := domainValue.Field(i) @@ -138,16 +173,20 @@ func processErrorDomain(f *os.File, systemName string, domainValue reflect.Value fmt.Fprintf(f, "// %s\n\n", categoryName) // Iterate through error codes in this category - processCategory(f, systemName, domainName, categoryName, categoryField) + codes := processCategory(f, systemName, domainName, categoryName, domain, categoryField) + errorCodes = append(errorCodes, codes...) f.WriteString("\n") } + + return errorCodes } // processCategory extracts error codes from a category using reflection -func processCategory(f *os.File, systemName, domainName, categoryName string, categoryValue reflect.Value) { +func processCategory(f *os.File, systemName, domainName, categoryName, domain string, categoryValue reflect.Value) []ErrorCodeInfo { // Iterate through error codes in this category categoryType := categoryValue.Type() + errorCodes := []ErrorCodeInfo{} for j := 0; j < categoryValue.NumField(); j++ { codeField := categoryValue.Field(j) @@ -162,7 +201,8 @@ func processCategory(f *os.File, systemName, domainName, categoryName string, ca // Get the string representation codeStr := codeObj.URN() - // Look up and write comments if available + // Extract description from comments + description := "" if comments, ok := commentMap[categoryType.Name()]; ok { if comment, ok := comments[codeName]; ok { // Clean up the comment and add it to the output @@ -171,6 +211,10 @@ func processCategory(f *os.File, systemName, domainName, categoryName string, ca line = strings.TrimSpace(line) if line != "" { f.WriteString(fmt.Sprintf("\t// %s\n", line)) + // Use first line as description + if description == "" { + description = line + } } } } @@ -178,5 +222,355 @@ func processCategory(f *os.File, systemName, domainName, categoryName string, ca // Write the constant f.WriteString(fmt.Sprintf("\t%s URN = \"%s\"\n", constName, codeStr)) + + // Store error code info for MDX generation + errorCodes = append(errorCodes, ErrorCodeInfo{ + URN: string(codeObj.URN()), + Name: codeObj.Specific, + Description: description, + Domain: domain, + }) } + + return errorCodes +} + +// generateMissingMDXFiles creates MDX documentation files for error codes that don't have them +func generateMissingMDXFiles(errorCodes []ErrorCodeInfo) error { + // Get the base docs directory path (relative to this file) + baseDocsPath := filepath.Join("..", "..", "..", "apps", "docs", "errors") + + created := 0 + skipped := 0 + + for _, errCode := range errorCodes { + // Skip gateway errors (these are internal to the gateway, not API errors) + if errCode.Domain == "Gateway" { + skipped++ + continue + } + + // Parse URN to get file path + // Example: err:user:bad_request:client_closed_request -> user/bad_request/client_closed_request.mdx + parts := strings.Split(errCode.URN, ":") + if len(parts) < 4 || parts[0] != "err" { + continue + } + + // Build file path from URN parts (skip "err:" prefix) + pathParts := parts[1 : len(parts)-1] + fileName := parts[len(parts)-1] + ".mdx" + filePath := filepath.Join(append([]string{baseDocsPath}, append(pathParts, fileName)...)...) + + // Check if file already exists + if _, err := os.Stat(filePath); err == nil { + skipped++ + continue + } + + // Create directory if it doesn't exist + dir := filepath.Dir(filePath) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", dir, err) + } + + // Generate description + description := errCode.Description + if description == "" { + description = "Error occurred" + } + + // Create MDX file with basic template + content := fmt.Sprintf(`--- +title: "%s" +description: "%s" +--- + +`+"`%s`"+` + +`, errCode.Name, description, errCode.URN) + + if err := os.WriteFile(filePath, []byte(content), 0644); err != nil { + return fmt.Errorf("failed to write file %s: %w", filePath, err) + } + + created++ + fmt.Printf("Created: %s\n", filePath) + } + + fmt.Printf("\nMDX files: %d created, %d already existed\n", created, skipped) + return nil +} + +// removeObsoleteMDXFiles deletes MDX files that don't have corresponding error codes +func removeObsoleteMDXFiles(errorCodes []ErrorCodeInfo) error { + baseDocsPath := filepath.Join("..", "..", "..", "apps", "docs", "errors") + + // Build a set of valid file paths from error codes + validPaths := make(map[string]bool) + for _, errCode := range errorCodes { + // Skip gateway errors + if errCode.Domain == "Gateway" { + continue + } + + parts := strings.Split(errCode.URN, ":") + if len(parts) < 4 || parts[0] != "err" { + continue + } + + // Build file path from URN + pathParts := parts[1 : len(parts)-1] + fileName := parts[len(parts)-1] + ".mdx" + filePath := filepath.Join(append([]string{baseDocsPath}, append(pathParts, fileName)...)...) + validPaths[filePath] = true + } + + deleted := 0 + + // Walk through the errors directory + err := filepath.Walk(baseDocsPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories and non-MDX files + if info.IsDir() || !strings.HasSuffix(path, ".mdx") { + return nil + } + + // Skip overview.mdx (it's not an error code file) + if strings.HasSuffix(path, "overview.mdx") { + return nil + } + + // Check if this file has a corresponding error code + if !validPaths[path] { + // Delete obsolete file + if err := os.Remove(path); err != nil { + return fmt.Errorf("failed to delete %s: %w", path, err) + } + deleted++ + fmt.Printf("Deleted obsolete: %s\n", path) + } + + return nil + }) + + if err != nil { + return err + } + + if deleted > 0 { + fmt.Printf("\nRemoved %d obsolete MDX file(s)\n", deleted) + } + + return nil +} + +// updateDocsJSON updates the docs.json navigation to include all error pages +func updateDocsJSON(errorCodes []ErrorCodeInfo) error { + docsJSONPath := filepath.Join("..", "..", "..", "apps", "docs", "docs.json") + + // Read existing docs.json + data, err := os.ReadFile(docsJSONPath) + if err != nil { + return fmt.Errorf("failed to read docs.json: %w", err) + } + + // Parse JSON into a map to preserve structure + var docsConfig map[string]interface{} + if err := json.Unmarshal(data, &docsConfig); err != nil { + return fmt.Errorf("failed to parse docs.json: %w", err) + } + + // Navigate to the errors section in navigation + navigation, ok := docsConfig["navigation"].(map[string]interface{}) + if !ok { + return fmt.Errorf("navigation not found in docs.json") + } + + dropdowns, ok := navigation["dropdowns"].([]interface{}) + if !ok { + return fmt.Errorf("dropdowns not found in navigation") + } + + // Find the Documentation dropdown + var docsDropdown map[string]interface{} + for _, dropdown := range dropdowns { + dd := dropdown.(map[string]interface{}) + if dd["dropdown"] == "Documentation" { + docsDropdown = dd + break + } + } + + if docsDropdown == nil { + return fmt.Errorf("Documentation dropdown not found") + } + + groups := docsDropdown["groups"].([]interface{}) + + // Find the Errors group (should be one of the top-level groups) + var errorsGroup map[string]interface{} + var errorsGroupIndex int + for i, group := range groups { + g := group.(map[string]interface{}) + if g["group"] == "Errors" { + errorsGroup = g + errorsGroupIndex = i + break + } + } + + if errorsGroup == nil { + return fmt.Errorf("Errors group not found in groups") + } + + // Organize error codes by category + type ErrorCategory struct { + Name string + Path string + Files []string + } + + unkeyCategories := make(map[string]*ErrorCategory) + userCategories := make(map[string]*ErrorCategory) + + for _, errCode := range errorCodes { + // Skip gateway errors (these are internal to the gateway, not API errors) + if errCode.Domain == "Gateway" { + continue + } + + parts := strings.Split(errCode.URN, ":") + if len(parts) < 4 { + continue + } + + system := parts[1] // "user" or "unkey" + category := parts[2] // "bad_request", "application", etc. + errorName := parts[len(parts)-1] + + // Build the path for this error + pathParts := parts[1 : len(parts)-1] + errorPath := "errors/" + strings.Join(append(pathParts, errorName), "/") + + if system == "unkey" { + if _, exists := unkeyCategories[category]; !exists { + // Convert category name to title case + titleName := strings.ReplaceAll(category, "_", " ") + caser := cases.Title(language.English) + titleName = caser.String(titleName) + unkeyCategories[category] = &ErrorCategory{ + Name: titleName, + Path: category, + Files: []string{}, + } + } + unkeyCategories[category].Files = append(unkeyCategories[category].Files, errorPath) + } else if system == "user" { + if _, exists := userCategories[category]; !exists { + titleName := strings.ReplaceAll(category, "_", " ") + caser := cases.Title(language.English) + titleName = caser.String(titleName) + userCategories[category] = &ErrorCategory{ + Name: titleName, + Path: category, + Files: []string{}, + } + } + userCategories[category].Files = append(userCategories[category].Files, errorPath) + } + } + + // Sort files within each category + for _, cat := range unkeyCategories { + sort.Strings(cat.Files) + } + for _, cat := range userCategories { + sort.Strings(cat.Files) + } + + // Build the new errors pages structure + errorPages := []interface{}{ + "errors/overview", + } + + // Add Unkey Errors section + unkeyErrorsPages := []interface{}{} + + // Sort category keys for consistent output + unkeyCategoryKeys := make([]string, 0, len(unkeyCategories)) + for k := range unkeyCategories { + unkeyCategoryKeys = append(unkeyCategoryKeys, k) + } + sort.Strings(unkeyCategoryKeys) + + for _, catKey := range unkeyCategoryKeys { + cat := unkeyCategories[catKey] + catPages := make([]interface{}, len(cat.Files)) + for i, file := range cat.Files { + catPages[i] = file + } + unkeyErrorsPages = append(unkeyErrorsPages, map[string]interface{}{ + "group": cat.Name, + "pages": catPages, + }) + } + + errorPages = append(errorPages, map[string]interface{}{ + "group": "Unkey Errors", + "pages": unkeyErrorsPages, + }) + + // Add User Errors section + userErrorsPages := []interface{}{} + + userCategoryKeys := make([]string, 0, len(userCategories)) + for k := range userCategories { + userCategoryKeys = append(userCategoryKeys, k) + } + sort.Strings(userCategoryKeys) + + for _, catKey := range userCategoryKeys { + cat := userCategories[catKey] + catPages := make([]interface{}, len(cat.Files)) + for i, file := range cat.Files { + catPages[i] = file + } + + // If only one category, don't create a subgroup + if len(userCategories) == 1 { + userErrorsPages = catPages + } else { + userErrorsPages = append(userErrorsPages, map[string]interface{}{ + "group": cat.Name, + "pages": catPages, + }) + } + } + + errorPages = append(errorPages, map[string]interface{}{ + "group": "User Errors", + "pages": userErrorsPages, + }) + + // Update the errors group + errorsGroup["pages"] = errorPages + groups[errorsGroupIndex] = errorsGroup + docsDropdown["groups"] = groups + + // Write back to docs.json with nice formatting + updatedJSON, err := json.MarshalIndent(docsConfig, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal docs.json: %w", err) + } + + if err := os.WriteFile(docsJSONPath, updatedJSON, 0644); err != nil { + return fmt.Errorf("failed to write docs.json: %w", err) + } + + fmt.Println("Updated docs.json with all error pages") + return nil } diff --git a/go/pkg/codes/unkey_data.go b/go/pkg/codes/unkey_data.go index 5d21739314..557f164a30 100644 --- a/go/pkg/codes/unkey_data.go +++ b/go/pkg/codes/unkey_data.go @@ -20,6 +20,12 @@ type dataApi struct { NotFound Code } +// dataKeySpace defines errors related to key space operations. +type dataKeySpace struct { + // NotFound indicates the requested key space was not found. + NotFound Code +} + // dataPermission defines errors related to permission operations. type dataPermission struct { // Duplicate indicates the requested permission already exists. @@ -73,6 +79,15 @@ type dataAuditLog struct { NotFound Code } +// dataAnalytics defines errors related to analytics operations. +type dataAnalytics struct { + // NotConfigured indicates analytics is not configured for the workspace. + NotConfigured Code + + // ConnectionFailed indicates the connection to the analytics database failed. + ConnectionFailed Code +} + // UnkeyDataErrors defines all data-related errors in the Unkey system. // These errors generally relate to CRUD operations on domain entities. type UnkeyDataErrors struct { @@ -80,6 +95,7 @@ type UnkeyDataErrors struct { Key dataKey Workspace dataWorkspace Api dataApi + KeySpace dataKeySpace Permission dataPermission Role dataRole KeyAuth dataKeyAuth @@ -87,6 +103,7 @@ type UnkeyDataErrors struct { RatelimitOverride dataRatelimitOverride Identity dataIdentity AuditLog dataAuditLog + Analytics dataAnalytics } // Data contains all predefined data-related error codes. @@ -105,6 +122,10 @@ var Data = UnkeyDataErrors{ NotFound: Code{SystemUnkey, CategoryUnkeyData, "api_not_found"}, }, + KeySpace: dataKeySpace{ + NotFound: Code{SystemUnkey, CategoryUnkeyData, "key_space_not_found"}, + }, + Permission: dataPermission{ NotFound: Code{SystemUnkey, CategoryUnkeyData, "permission_not_found"}, Duplicate: Code{SystemUnkey, CategoryUnkeyData, "permission_already_exists"}, @@ -136,4 +157,9 @@ var Data = UnkeyDataErrors{ AuditLog: dataAuditLog{ NotFound: Code{SystemUnkey, CategoryUnkeyData, "audit_log_not_found"}, }, + + Analytics: dataAnalytics{ + NotConfigured: Code{SystemUnkey, CategoryUnkeyData, "analytics_not_configured"}, + ConnectionFailed: Code{SystemUnkey, CategoryUnkeyData, "analytics_connection_failed"}, + }, } diff --git a/go/pkg/codes/user_request.go b/go/pkg/codes/user_request.go index 4a230b3c58..50aac0287b 100644 --- a/go/pkg/codes/user_request.go +++ b/go/pkg/codes/user_request.go @@ -10,6 +10,30 @@ type userBadRequest struct { RequestTimeout Code // ClientClosedRequest indicates the client closed the connection before the request completed. ClientClosedRequest Code + // InvalidAnalyticsQuery indicates the analytics SQL query is invalid or has syntax errors. + InvalidAnalyticsQuery Code + // InvalidAnalyticsTable indicates the table referenced in the analytics query is not allowed or does not exist. + InvalidAnalyticsTable Code + // InvalidAnalyticsFunction indicates a disallowed function was used in the analytics query. + InvalidAnalyticsFunction Code + // InvalidAnalyticsQueryType indicates the query type or operation is not supported (e.g., INSERT, UPDATE, DELETE). + InvalidAnalyticsQueryType Code +} + +// userUnprocessableEntity defines errors for requests that are syntactically correct but cannot be processed. +type userUnprocessableEntity struct { + // QueryExecutionTimeout indicates the query exceeded the maximum execution time limit. + QueryExecutionTimeout Code + // QueryMemoryLimitExceeded indicates the query exceeded the maximum memory usage limit. + QueryMemoryLimitExceeded Code + // QueryRowsLimitExceeded indicates the query exceeded the maximum rows to read limit. + QueryRowsLimitExceeded Code +} + +// userTooManyRequests defines errors related to rate limiting and quota exceeded. +type userTooManyRequests struct { + // QueryQuotaExceeded indicates the workspace has exceeded their query quota for the current window. + QueryQuotaExceeded Code } // UserErrors defines all user-related errors in the Unkey system. @@ -17,6 +41,10 @@ type userBadRequest struct { type UserErrors struct { // BadRequest contains errors related to invalid user input. BadRequest userBadRequest + // UnprocessableEntity contains errors for syntactically valid requests that cannot be processed. + UnprocessableEntity userUnprocessableEntity + // TooManyRequests contains errors related to rate limiting. + TooManyRequests userTooManyRequests } // User contains all predefined user error codes. @@ -28,5 +56,17 @@ var User = UserErrors{ RequestBodyTooLarge: Code{SystemUser, CategoryUserBadRequest, "request_body_too_large"}, RequestTimeout: Code{SystemUser, CategoryUserBadRequest, "request_timeout"}, ClientClosedRequest: Code{SystemUser, CategoryUserBadRequest, "client_closed_request"}, + InvalidAnalyticsQuery: Code{SystemUser, CategoryUserBadRequest, "invalid_analytics_query"}, + InvalidAnalyticsTable: Code{SystemUser, CategoryUserBadRequest, "invalid_analytics_table"}, + InvalidAnalyticsFunction: Code{SystemUser, CategoryUserBadRequest, "invalid_analytics_function"}, + InvalidAnalyticsQueryType: Code{SystemUser, CategoryUserBadRequest, "invalid_analytics_query_type"}, + }, + UnprocessableEntity: userUnprocessableEntity{ + QueryExecutionTimeout: Code{SystemUser, CategoryUserUnprocessableEntity, "query_execution_timeout"}, + QueryMemoryLimitExceeded: Code{SystemUser, CategoryUserUnprocessableEntity, "query_memory_limit_exceeded"}, + QueryRowsLimitExceeded: Code{SystemUser, CategoryUserUnprocessableEntity, "query_rows_limit_exceeded"}, + }, + TooManyRequests: userTooManyRequests{ + QueryQuotaExceeded: Code{SystemUser, CategoryUserTooManyRequests, "query_quota_exceeded"}, }, } diff --git a/go/pkg/db/api_find_key_auth_by_ids.sql_generated.go b/go/pkg/db/api_find_key_auth_by_ids.sql_generated.go new file mode 100644 index 0000000000..ed04a54569 --- /dev/null +++ b/go/pkg/db/api_find_key_auth_by_ids.sql_generated.go @@ -0,0 +1,74 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: api_find_key_auth_by_ids.sql + +package db + +import ( + "context" + "strings" +) + +const findKeyAuthsByIds = `-- name: FindKeyAuthsByIds :many +SELECT ka.id as key_auth_id, a.id as api_id +FROM apis a +JOIN key_auth as ka ON ka.id = a.key_auth_id +WHERE a.workspace_id = ? + AND a.id IN (/*SLICE:api_ids*/?) + AND ka.deleted_at_m IS NULL + AND a.deleted_at_m IS NULL +` + +type FindKeyAuthsByIdsParams struct { + WorkspaceID string `db:"workspace_id"` + ApiIds []string `db:"api_ids"` +} + +type FindKeyAuthsByIdsRow struct { + KeyAuthID string `db:"key_auth_id"` + ApiID string `db:"api_id"` +} + +// FindKeyAuthsByIds +// +// SELECT ka.id as key_auth_id, a.id as api_id +// FROM apis a +// JOIN key_auth as ka ON ka.id = a.key_auth_id +// WHERE a.workspace_id = ? +// AND a.id IN (/*SLICE:api_ids*/?) +// AND ka.deleted_at_m IS NULL +// AND a.deleted_at_m IS NULL +func (q *Queries) FindKeyAuthsByIds(ctx context.Context, db DBTX, arg FindKeyAuthsByIdsParams) ([]FindKeyAuthsByIdsRow, error) { + query := findKeyAuthsByIds + var queryParams []interface{} + queryParams = append(queryParams, arg.WorkspaceID) + if len(arg.ApiIds) > 0 { + for _, v := range arg.ApiIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:api_ids*/?", strings.Repeat(",?", len(arg.ApiIds))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:api_ids*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FindKeyAuthsByIdsRow + for rows.Next() { + var i FindKeyAuthsByIdsRow + if err := rows.Scan(&i.KeyAuthID, &i.ApiID); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/api_find_key_auth_by_key_auth_ids.sql_generated.go b/go/pkg/db/api_find_key_auth_by_key_auth_ids.sql_generated.go new file mode 100644 index 0000000000..2e0f3f2ea2 --- /dev/null +++ b/go/pkg/db/api_find_key_auth_by_key_auth_ids.sql_generated.go @@ -0,0 +1,74 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: api_find_key_auth_by_key_auth_ids.sql + +package db + +import ( + "context" + "strings" +) + +const findKeyAuthsByKeyAuthIds = `-- name: FindKeyAuthsByKeyAuthIds :many +SELECT ka.id as key_auth_id, a.id as api_id +FROM key_auth as ka +JOIN apis a ON a.key_auth_id = ka.id +WHERE a.workspace_id = ? + AND ka.id IN (/*SLICE:key_auth_ids*/?) + AND ka.deleted_at_m IS NULL + AND a.deleted_at_m IS NULL +` + +type FindKeyAuthsByKeyAuthIdsParams struct { + WorkspaceID string `db:"workspace_id"` + KeyAuthIds []string `db:"key_auth_ids"` +} + +type FindKeyAuthsByKeyAuthIdsRow struct { + KeyAuthID string `db:"key_auth_id"` + ApiID string `db:"api_id"` +} + +// FindKeyAuthsByKeyAuthIds +// +// SELECT ka.id as key_auth_id, a.id as api_id +// FROM key_auth as ka +// JOIN apis a ON a.key_auth_id = ka.id +// WHERE a.workspace_id = ? +// AND ka.id IN (/*SLICE:key_auth_ids*/?) +// AND ka.deleted_at_m IS NULL +// AND a.deleted_at_m IS NULL +func (q *Queries) FindKeyAuthsByKeyAuthIds(ctx context.Context, db DBTX, arg FindKeyAuthsByKeyAuthIdsParams) ([]FindKeyAuthsByKeyAuthIdsRow, error) { + query := findKeyAuthsByKeyAuthIds + var queryParams []interface{} + queryParams = append(queryParams, arg.WorkspaceID) + if len(arg.KeyAuthIds) > 0 { + for _, v := range arg.KeyAuthIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:key_auth_ids*/?", strings.Repeat(",?", len(arg.KeyAuthIds))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:key_auth_ids*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FindKeyAuthsByKeyAuthIdsRow + for rows.Next() { + var i FindKeyAuthsByKeyAuthIdsRow + if err := rows.Scan(&i.KeyAuthID, &i.ApiID); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/bulk_clickhouse_workspace_settings_insert.sql_generated.go b/go/pkg/db/bulk_clickhouse_workspace_settings_insert.sql_generated.go new file mode 100644 index 0000000000..a188e8474c --- /dev/null +++ b/go/pkg/db/bulk_clickhouse_workspace_settings_insert.sql_generated.go @@ -0,0 +1,48 @@ +// Code generated by sqlc bulk insert plugin. DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "strings" +) + +// bulkInsertClickhouseWorkspaceSettings is the base query for bulk insert +const bulkInsertClickhouseWorkspaceSettings = `INSERT INTO ` + "`" + `clickhouse_workspace_settings` + "`" + ` ( workspace_id, username, password_encrypted, quota_duration_seconds, max_queries_per_window, max_execution_time_per_window, max_query_execution_time, max_query_memory_bytes, max_query_result_rows, created_at, updated_at ) VALUES %s` + +// InsertClickhouseWorkspaceSettingses performs bulk insert in a single query +func (q *BulkQueries) InsertClickhouseWorkspaceSettingses(ctx context.Context, db DBTX, args []InsertClickhouseWorkspaceSettingsParams) error { + + if len(args) == 0 { + return nil + } + + // Build the bulk insert query + valueClauses := make([]string, len(args)) + for i := range args { + valueClauses[i] = "( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" + } + + bulkQuery := fmt.Sprintf(bulkInsertClickhouseWorkspaceSettings, strings.Join(valueClauses, ", ")) + + // Collect all arguments + var allArgs []any + for _, arg := range args { + allArgs = append(allArgs, arg.WorkspaceID) + allArgs = append(allArgs, arg.Username) + allArgs = append(allArgs, arg.PasswordEncrypted) + allArgs = append(allArgs, arg.QuotaDurationSeconds) + allArgs = append(allArgs, arg.MaxQueriesPerWindow) + allArgs = append(allArgs, arg.MaxExecutionTimePerWindow) + allArgs = append(allArgs, arg.MaxQueryExecutionTime) + allArgs = append(allArgs, arg.MaxQueryMemoryBytes) + allArgs = append(allArgs, arg.MaxQueryResultRows) + allArgs = append(allArgs, arg.CreatedAt) + allArgs = append(allArgs, arg.UpdatedAt) + } + + // Execute the bulk insert + _, err := db.ExecContext(ctx, bulkQuery, allArgs...) + return err +} diff --git a/go/pkg/db/bulk_key_auth_insert.sql_generated.go b/go/pkg/db/bulk_key_auth_insert.sql_generated.go new file mode 100644 index 0000000000..fc4a861c17 --- /dev/null +++ b/go/pkg/db/bulk_key_auth_insert.sql_generated.go @@ -0,0 +1,42 @@ +// Code generated by sqlc bulk insert plugin. DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "strings" +) + +// bulkInsertKeyAuth is the base query for bulk insert +const bulkInsertKeyAuth = `INSERT INTO key_auth ( id, workspace_id, created_at_m, default_prefix, default_bytes, store_encrypted_keys ) VALUES %s` + +// InsertKeyAuths performs bulk insert in a single query +func (q *BulkQueries) InsertKeyAuths(ctx context.Context, db DBTX, args []InsertKeyAuthParams) error { + + if len(args) == 0 { + return nil + } + + // Build the bulk insert query + valueClauses := make([]string, len(args)) + for i := range args { + valueClauses[i] = "( ?, ?, ?, ?, ?, false )" + } + + bulkQuery := fmt.Sprintf(bulkInsertKeyAuth, strings.Join(valueClauses, ", ")) + + // Collect all arguments + var allArgs []any + for _, arg := range args { + allArgs = append(allArgs, arg.ID) + allArgs = append(allArgs, arg.WorkspaceID) + allArgs = append(allArgs, arg.CreatedAtM) + allArgs = append(allArgs, arg.DefaultPrefix) + allArgs = append(allArgs, arg.DefaultBytes) + } + + // Execute the bulk insert + _, err := db.ExecContext(ctx, bulkQuery, allArgs...) + return err +} diff --git a/go/pkg/db/clickhouse_workspace_settings_find_by_workspace_id.sql_generated.go b/go/pkg/db/clickhouse_workspace_settings_find_by_workspace_id.sql_generated.go new file mode 100644 index 0000000000..54fddd1ba5 --- /dev/null +++ b/go/pkg/db/clickhouse_workspace_settings_find_by_workspace_id.sql_generated.go @@ -0,0 +1,38 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: clickhouse_workspace_settings_find_by_workspace_id.sql + +package db + +import ( + "context" +) + +const findClickhouseWorkspaceSettingsByWorkspaceID = `-- name: FindClickhouseWorkspaceSettingsByWorkspaceID :one +SELECT workspace_id, username, password_encrypted, quota_duration_seconds, max_queries_per_window, max_execution_time_per_window, max_query_execution_time, max_query_memory_bytes, max_query_result_rows, created_at, updated_at FROM ` + "`" + `clickhouse_workspace_settings` + "`" + ` +WHERE workspace_id = ? +` + +// FindClickhouseWorkspaceSettingsByWorkspaceID +// +// SELECT workspace_id, username, password_encrypted, quota_duration_seconds, max_queries_per_window, max_execution_time_per_window, max_query_execution_time, max_query_memory_bytes, max_query_result_rows, created_at, updated_at FROM `clickhouse_workspace_settings` +// WHERE workspace_id = ? +func (q *Queries) FindClickhouseWorkspaceSettingsByWorkspaceID(ctx context.Context, db DBTX, workspaceID string) (ClickhouseWorkspaceSetting, error) { + row := db.QueryRowContext(ctx, findClickhouseWorkspaceSettingsByWorkspaceID, workspaceID) + var i ClickhouseWorkspaceSetting + err := row.Scan( + &i.WorkspaceID, + &i.Username, + &i.PasswordEncrypted, + &i.QuotaDurationSeconds, + &i.MaxQueriesPerWindow, + &i.MaxExecutionTimePerWindow, + &i.MaxQueryExecutionTime, + &i.MaxQueryMemoryBytes, + &i.MaxQueryResultRows, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} diff --git a/go/pkg/db/clickhouse_workspace_settings_insert.sql_generated.go b/go/pkg/db/clickhouse_workspace_settings_insert.sql_generated.go new file mode 100644 index 0000000000..16232b4fdb --- /dev/null +++ b/go/pkg/db/clickhouse_workspace_settings_insert.sql_generated.go @@ -0,0 +1,99 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: clickhouse_workspace_settings_insert.sql + +package db + +import ( + "context" + "database/sql" +) + +const insertClickhouseWorkspaceSettings = `-- name: InsertClickhouseWorkspaceSettings :exec +INSERT INTO ` + "`" + `clickhouse_workspace_settings` + "`" + ` ( + workspace_id, + username, + password_encrypted, + quota_duration_seconds, + max_queries_per_window, + max_execution_time_per_window, + max_query_execution_time, + max_query_memory_bytes, + max_query_result_rows, + created_at, + updated_at +) +VALUES ( + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ? +) +` + +type InsertClickhouseWorkspaceSettingsParams struct { + WorkspaceID string `db:"workspace_id"` + Username string `db:"username"` + PasswordEncrypted string `db:"password_encrypted"` + QuotaDurationSeconds int32 `db:"quota_duration_seconds"` + MaxQueriesPerWindow int32 `db:"max_queries_per_window"` + MaxExecutionTimePerWindow int32 `db:"max_execution_time_per_window"` + MaxQueryExecutionTime int32 `db:"max_query_execution_time"` + MaxQueryMemoryBytes int64 `db:"max_query_memory_bytes"` + MaxQueryResultRows int32 `db:"max_query_result_rows"` + CreatedAt int64 `db:"created_at"` + UpdatedAt sql.NullInt64 `db:"updated_at"` +} + +// InsertClickhouseWorkspaceSettings +// +// INSERT INTO `clickhouse_workspace_settings` ( +// workspace_id, +// username, +// password_encrypted, +// quota_duration_seconds, +// max_queries_per_window, +// max_execution_time_per_window, +// max_query_execution_time, +// max_query_memory_bytes, +// max_query_result_rows, +// created_at, +// updated_at +// ) +// VALUES ( +// ?, +// ?, +// ?, +// ?, +// ?, +// ?, +// ?, +// ?, +// ?, +// ?, +// ? +// ) +func (q *Queries) InsertClickhouseWorkspaceSettings(ctx context.Context, db DBTX, arg InsertClickhouseWorkspaceSettingsParams) error { + _, err := db.ExecContext(ctx, insertClickhouseWorkspaceSettings, + arg.WorkspaceID, + arg.Username, + arg.PasswordEncrypted, + arg.QuotaDurationSeconds, + arg.MaxQueriesPerWindow, + arg.MaxExecutionTimePerWindow, + arg.MaxQueryExecutionTime, + arg.MaxQueryMemoryBytes, + arg.MaxQueryResultRows, + arg.CreatedAt, + arg.UpdatedAt, + ) + return err +} diff --git a/go/pkg/db/clickhouse_workspace_settings_update_limits.sql_generated.go b/go/pkg/db/clickhouse_workspace_settings_update_limits.sql_generated.go new file mode 100644 index 0000000000..b91619f34c --- /dev/null +++ b/go/pkg/db/clickhouse_workspace_settings_update_limits.sql_generated.go @@ -0,0 +1,61 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: clickhouse_workspace_settings_update_limits.sql + +package db + +import ( + "context" + "database/sql" +) + +const updateClickhouseWorkspaceSettingsLimits = `-- name: UpdateClickhouseWorkspaceSettingsLimits :exec +UPDATE ` + "`" + `clickhouse_workspace_settings` + "`" + ` +SET + quota_duration_seconds = ?, + max_queries_per_window = ?, + max_execution_time_per_window = ?, + max_query_execution_time = ?, + max_query_memory_bytes = ?, + max_query_result_rows = ?, + updated_at = ? +WHERE workspace_id = ? +` + +type UpdateClickhouseWorkspaceSettingsLimitsParams struct { + QuotaDurationSeconds int32 `db:"quota_duration_seconds"` + MaxQueriesPerWindow int32 `db:"max_queries_per_window"` + MaxExecutionTimePerWindow int32 `db:"max_execution_time_per_window"` + MaxQueryExecutionTime int32 `db:"max_query_execution_time"` + MaxQueryMemoryBytes int64 `db:"max_query_memory_bytes"` + MaxQueryResultRows int32 `db:"max_query_result_rows"` + UpdatedAt sql.NullInt64 `db:"updated_at"` + WorkspaceID string `db:"workspace_id"` +} + +// UpdateClickhouseWorkspaceSettingsLimits +// +// UPDATE `clickhouse_workspace_settings` +// SET +// quota_duration_seconds = ?, +// max_queries_per_window = ?, +// max_execution_time_per_window = ?, +// max_query_execution_time = ?, +// max_query_memory_bytes = ?, +// max_query_result_rows = ?, +// updated_at = ? +// WHERE workspace_id = ? +func (q *Queries) UpdateClickhouseWorkspaceSettingsLimits(ctx context.Context, db DBTX, arg UpdateClickhouseWorkspaceSettingsLimitsParams) error { + _, err := db.ExecContext(ctx, updateClickhouseWorkspaceSettingsLimits, + arg.QuotaDurationSeconds, + arg.MaxQueriesPerWindow, + arg.MaxExecutionTimePerWindow, + arg.MaxQueryExecutionTime, + arg.MaxQueryMemoryBytes, + arg.MaxQueryResultRows, + arg.UpdatedAt, + arg.WorkspaceID, + ) + return err +} diff --git a/go/pkg/db/generate.go b/go/pkg/db/generate.go index f48752e4b4..9ad39f3be5 100644 --- a/go/pkg/db/generate.go +++ b/go/pkg/db/generate.go @@ -1,7 +1,7 @@ package db //go:generate go build -o ./plugins/dist/bulk-insert ./plugins/bulk-insert -//go:generate sqlc generate +//go:generate go run github.com/sqlc-dev/sqlc/cmd/sqlc generate // we copy all of the relevant bits into query.go and don't want the default // exports that get generated //go:generate rm delete_me.go diff --git a/go/pkg/db/identity_find_identities_by_ids.sql_generated.go b/go/pkg/db/identity_find_identities_by_ids.sql_generated.go new file mode 100644 index 0000000000..6c29b50771 --- /dev/null +++ b/go/pkg/db/identity_find_identities_by_ids.sql_generated.go @@ -0,0 +1,72 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: identity_find_identities_by_ids.sql + +package db + +import ( + "context" + "strings" +) + +const findIdentitiesByIds = `-- name: FindIdentitiesByIds :many +SELECT id, external_id +FROM identities +WHERE workspace_id = ? + AND deleted = ? + AND id IN (/*SLICE:identities*/?) +` + +type FindIdentitiesByIdsParams struct { + WorkspaceID string `db:"workspace_id"` + Deleted bool `db:"deleted"` + Identities []string `db:"identities"` +} + +type FindIdentitiesByIdsRow struct { + ID string `db:"id"` + ExternalID string `db:"external_id"` +} + +// FindIdentitiesByIds +// +// SELECT id, external_id +// FROM identities +// WHERE workspace_id = ? +// AND deleted = ? +// AND id IN (/*SLICE:identities*/?) +func (q *Queries) FindIdentitiesByIds(ctx context.Context, db DBTX, arg FindIdentitiesByIdsParams) ([]FindIdentitiesByIdsRow, error) { + query := findIdentitiesByIds + var queryParams []interface{} + queryParams = append(queryParams, arg.WorkspaceID) + queryParams = append(queryParams, arg.Deleted) + if len(arg.Identities) > 0 { + for _, v := range arg.Identities { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:identities*/?", strings.Repeat(",?", len(arg.Identities))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:identities*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FindIdentitiesByIdsRow + for rows.Next() { + var i FindIdentitiesByIdsRow + if err := rows.Scan(&i.ID, &i.ExternalID); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/identity_find_many.sql_generated.go b/go/pkg/db/identity_find_many.sql_generated.go new file mode 100644 index 0000000000..7ca77bd52d --- /dev/null +++ b/go/pkg/db/identity_find_many.sql_generated.go @@ -0,0 +1,84 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: identity_find_many.sql + +package db + +import ( + "context" + "strings" +) + +const findIdentities = `-- name: FindIdentities :many +SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at +FROM identities +WHERE workspace_id = ? + AND deleted = ? + AND (external_id IN(/*SLICE:identities*/?) OR id IN (/*SLICE:identities*/?)) +` + +type FindIdentitiesParams struct { + WorkspaceID string `db:"workspace_id"` + Deleted bool `db:"deleted"` + Identities []string `db:"identities"` +} + +// FindIdentities +// +// SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at +// FROM identities +// WHERE workspace_id = ? +// AND deleted = ? +// AND (external_id IN(/*SLICE:identities*/?) OR id IN (/*SLICE:identities*/?)) +func (q *Queries) FindIdentities(ctx context.Context, db DBTX, arg FindIdentitiesParams) ([]Identity, error) { + query := findIdentities + var queryParams []interface{} + queryParams = append(queryParams, arg.WorkspaceID) + queryParams = append(queryParams, arg.Deleted) + if len(arg.Identities) > 0 { + for _, v := range arg.Identities { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:identities*/?", strings.Repeat(",?", len(arg.Identities))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:identities*/?", "NULL", 1) + } + if len(arg.Identities) > 0 { + for _, v := range arg.Identities { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:identities*/?", strings.Repeat(",?", len(arg.Identities))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:identities*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Identity + for rows.Next() { + var i Identity + if err := rows.Scan( + &i.ID, + &i.ExternalID, + &i.WorkspaceID, + &i.Environment, + &i.Meta, + &i.Deleted, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/key_auth_get_by_id.sql_generated.go b/go/pkg/db/key_auth_get_by_id.sql_generated.go new file mode 100644 index 0000000000..da0820387d --- /dev/null +++ b/go/pkg/db/key_auth_get_by_id.sql_generated.go @@ -0,0 +1,59 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_auth_get_by_id.sql + +package db + +import ( + "context" + "database/sql" +) + +const getKeyAuthByID = `-- name: GetKeyAuthByID :one +SELECT + id, + workspace_id, + created_at_m, + default_prefix, + default_bytes, + store_encrypted_keys +FROM key_auth +WHERE id = ? + AND deleted_at_m IS NULL +` + +type GetKeyAuthByIDRow struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` + CreatedAtM int64 `db:"created_at_m"` + DefaultPrefix sql.NullString `db:"default_prefix"` + DefaultBytes sql.NullInt32 `db:"default_bytes"` + StoreEncryptedKeys bool `db:"store_encrypted_keys"` +} + +// GetKeyAuthByID +// +// SELECT +// id, +// workspace_id, +// created_at_m, +// default_prefix, +// default_bytes, +// store_encrypted_keys +// FROM key_auth +// WHERE id = ? +// AND deleted_at_m IS NULL +func (q *Queries) GetKeyAuthByID(ctx context.Context, db DBTX, id string) (GetKeyAuthByIDRow, error) { + row := db.QueryRowContext(ctx, getKeyAuthByID, id) + var i GetKeyAuthByIDRow + err := row.Scan( + &i.ID, + &i.WorkspaceID, + &i.CreatedAtM, + &i.DefaultPrefix, + &i.DefaultBytes, + &i.StoreEncryptedKeys, + ) + return i, err +} diff --git a/go/pkg/db/key_auth_insert.sql_generated.go b/go/pkg/db/key_auth_insert.sql_generated.go new file mode 100644 index 0000000000..65e88d1052 --- /dev/null +++ b/go/pkg/db/key_auth_insert.sql_generated.go @@ -0,0 +1,65 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: key_auth_insert.sql + +package db + +import ( + "context" + "database/sql" +) + +const insertKeyAuth = `-- name: InsertKeyAuth :exec +INSERT INTO key_auth ( + id, + workspace_id, + created_at_m, + default_prefix, + default_bytes, + store_encrypted_keys +) VALUES ( + ?, + ?, + ?, + ?, + ?, + false +) +` + +type InsertKeyAuthParams struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` + CreatedAtM int64 `db:"created_at_m"` + DefaultPrefix sql.NullString `db:"default_prefix"` + DefaultBytes sql.NullInt32 `db:"default_bytes"` +} + +// InsertKeyAuth +// +// INSERT INTO key_auth ( +// id, +// workspace_id, +// created_at_m, +// default_prefix, +// default_bytes, +// store_encrypted_keys +// ) VALUES ( +// ?, +// ?, +// ?, +// ?, +// ?, +// false +// ) +func (q *Queries) InsertKeyAuth(ctx context.Context, db DBTX, arg InsertKeyAuthParams) error { + _, err := db.ExecContext(ctx, insertKeyAuth, + arg.ID, + arg.WorkspaceID, + arg.CreatedAtM, + arg.DefaultPrefix, + arg.DefaultBytes, + ) + return err +} diff --git a/go/pkg/db/models_generated.go b/go/pkg/db/models_generated.go index 9d21ff614b..a37822ffdd 100644 --- a/go/pkg/db/models_generated.go +++ b/go/pkg/db/models_generated.go @@ -566,6 +566,20 @@ type AuditLogTarget struct { UpdatedAt sql.NullInt64 `db:"updated_at"` } +type ClickhouseWorkspaceSetting struct { + WorkspaceID string `db:"workspace_id"` + Username string `db:"username"` + PasswordEncrypted string `db:"password_encrypted"` + QuotaDurationSeconds int32 `db:"quota_duration_seconds"` + MaxQueriesPerWindow int32 `db:"max_queries_per_window"` + MaxExecutionTimePerWindow int32 `db:"max_execution_time_per_window"` + MaxQueryExecutionTime int32 `db:"max_query_execution_time"` + MaxQueryMemoryBytes int64 `db:"max_query_memory_bytes"` + MaxQueryResultRows int32 `db:"max_query_result_rows"` + CreatedAt int64 `db:"created_at"` + UpdatedAt sql.NullInt64 `db:"updated_at"` +} + type Deployment struct { ID string `db:"id"` WorkspaceID string `db:"workspace_id"` diff --git a/go/pkg/db/plugins/bulk-insert/bulk_insert.go.tmpl b/go/pkg/db/plugins/bulk-insert/bulk_insert.go.tmpl index 9013ced05a..8e6a966d86 100644 --- a/go/pkg/db/plugins/bulk-insert/bulk_insert.go.tmpl +++ b/go/pkg/db/plugins/bulk-insert/bulk_insert.go.tmpl @@ -50,7 +50,7 @@ func (q *BulkQueries) {{.BulkFunctionName}}(ctx context.Context, args []{{.Param {{if .EmitMethodsWithDBArgument -}} _, err := db.ExecContext(ctx, bulkQuery, allArgs...) {{else -}} - _, err := q.db.ExecContext(ctx, bulkQuery, allArgs...) + _, err := q.db.ExecContext(ctx, bulkQuery, allArgs...) {{end -}} return err diff --git a/go/pkg/db/querier_bulk_generated.go b/go/pkg/db/querier_bulk_generated.go index f8438d32ee..6e1be6a4f5 100644 --- a/go/pkg/db/querier_bulk_generated.go +++ b/go/pkg/db/querier_bulk_generated.go @@ -11,11 +11,13 @@ type BulkQuerier interface { InsertApis(ctx context.Context, db DBTX, args []InsertApiParams) error InsertAuditLogs(ctx context.Context, db DBTX, args []InsertAuditLogParams) error InsertAuditLogTargets(ctx context.Context, db DBTX, args []InsertAuditLogTargetParams) error + InsertClickhouseWorkspaceSettingses(ctx context.Context, db DBTX, args []InsertClickhouseWorkspaceSettingsParams) error InsertDeployments(ctx context.Context, db DBTX, args []InsertDeploymentParams) error InsertDeploymentSteps(ctx context.Context, db DBTX, args []InsertDeploymentStepParams) error InsertDomains(ctx context.Context, db DBTX, args []InsertDomainParams) error InsertIdentities(ctx context.Context, db DBTX, args []InsertIdentityParams) error InsertIdentityRatelimits(ctx context.Context, db DBTX, args []InsertIdentityRatelimitParams) error + InsertKeyAuths(ctx context.Context, db DBTX, args []InsertKeyAuthParams) error InsertKeyEncryptions(ctx context.Context, db DBTX, args []InsertKeyEncryptionParams) error InsertKeys(ctx context.Context, db DBTX, args []InsertKeyParams) error InsertKeyRatelimits(ctx context.Context, db DBTX, args []InsertKeyRatelimitParams) error diff --git a/go/pkg/db/querier_generated.go b/go/pkg/db/querier_generated.go index 0140198ee9..a19e3a22c8 100644 --- a/go/pkg/db/querier_generated.go +++ b/go/pkg/db/querier_generated.go @@ -148,6 +148,11 @@ type Querier interface { // JOIN audit_log ON audit_log.id = audit_log_target.audit_log_id // WHERE audit_log_target.id = ? FindAuditLogTargetByID(ctx context.Context, db DBTX, id string) ([]FindAuditLogTargetByIDRow, error) + //FindClickhouseWorkspaceSettingsByWorkspaceID + // + // SELECT workspace_id, username, password_encrypted, quota_duration_seconds, max_queries_per_window, max_execution_time_per_window, max_query_execution_time, max_query_memory_bytes, max_query_result_rows, created_at, updated_at FROM `clickhouse_workspace_settings` + // WHERE workspace_id = ? + FindClickhouseWorkspaceSettingsByWorkspaceID(ctx context.Context, db DBTX, workspaceID string) (ClickhouseWorkspaceSetting, error) //FindDeploymentById // // SELECT @@ -265,6 +270,14 @@ type Querier interface { // AND project_id = ? // AND slug = ? FindEnvironmentByProjectIdAndSlug(ctx context.Context, db DBTX, arg FindEnvironmentByProjectIdAndSlugParams) (FindEnvironmentByProjectIdAndSlugRow, error) + //FindIdentities + // + // SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at + // FROM identities + // WHERE workspace_id = ? + // AND deleted = ? + // AND (external_id IN(/*SLICE:identities*/?) OR id IN (/*SLICE:identities*/?)) + FindIdentities(ctx context.Context, db DBTX, arg FindIdentitiesParams) ([]Identity, error) //FindIdentityByExternalID // // SELECT id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at @@ -328,6 +341,26 @@ type Querier interface { // AND i.deleted = ? // LIMIT 1 FindIdentityWithRatelimits(ctx context.Context, db DBTX, arg FindIdentityWithRatelimitsParams) ([]FindIdentityWithRatelimitsRow, error) + //FindKeyAuthsByIds + // + // SELECT ka.id as key_auth_id, a.id as api_id + // FROM apis a + // JOIN key_auth as ka ON ka.id = a.key_auth_id + // WHERE a.workspace_id = ? + // AND a.id IN (/*SLICE:api_ids*/?) + // AND ka.deleted_at_m IS NULL + // AND a.deleted_at_m IS NULL + FindKeyAuthsByIds(ctx context.Context, db DBTX, arg FindKeyAuthsByIdsParams) ([]FindKeyAuthsByIdsRow, error) + //FindKeyAuthsByKeyAuthIds + // + // SELECT ka.id as key_auth_id, a.id as api_id + // FROM key_auth as ka + // JOIN apis a ON a.key_auth_id = ka.id + // WHERE a.workspace_id = ? + // AND ka.id IN (/*SLICE:key_auth_ids*/?) + // AND ka.deleted_at_m IS NULL + // AND a.deleted_at_m IS NULL + FindKeyAuthsByKeyAuthIds(ctx context.Context, db DBTX, arg FindKeyAuthsByKeyAuthIdsParams) ([]FindKeyAuthsByKeyAuthIdsRow, error) //FindKeyByID // // SELECT id, key_auth_id, hash, start, workspace_id, for_workspace_id, name, owner_id, identity_id, meta, expires, created_at_m, updated_at_m, deleted_at_m, refill_day, refill_amount, last_refill_at, enabled, remaining_requests, ratelimit_async, ratelimit_limit, ratelimit_duration, environment FROM `keys` k @@ -836,6 +869,19 @@ type Querier interface { // SELECT id, org_id, name, slug, partition_id, plan, tier, stripe_customer_id, stripe_subscription_id, beta_features, features, subscriptions, enabled, delete_protection, created_at_m, updated_at_m, deleted_at_m FROM `workspaces` // WHERE id = ? FindWorkspaceByID(ctx context.Context, db DBTX, id string) (Workspace, error) + //GetKeyAuthByID + // + // SELECT + // id, + // workspace_id, + // created_at_m, + // default_prefix, + // default_bytes, + // store_encrypted_keys + // FROM key_auth + // WHERE id = ? + // AND deleted_at_m IS NULL + GetKeyAuthByID(ctx context.Context, db DBTX, id string) (GetKeyAuthByIDRow, error) //HardDeleteWorkspace // // DELETE FROM `workspaces` @@ -954,6 +1000,35 @@ type Querier interface { // ? // ) InsertAuditLogTarget(ctx context.Context, db DBTX, arg InsertAuditLogTargetParams) error + //InsertClickhouseWorkspaceSettings + // + // INSERT INTO `clickhouse_workspace_settings` ( + // workspace_id, + // username, + // password_encrypted, + // quota_duration_seconds, + // max_queries_per_window, + // max_execution_time_per_window, + // max_query_execution_time, + // max_query_memory_bytes, + // max_query_result_rows, + // created_at, + // updated_at + // ) + // VALUES ( + // ?, + // ?, + // ?, + // ?, + // ?, + // ?, + // ?, + // ?, + // ?, + // ?, + // ? + // ) + InsertClickhouseWorkspaceSettings(ctx context.Context, db DBTX, arg InsertClickhouseWorkspaceSettingsParams) error //InsertDeployment // // INSERT INTO `deployments` ( @@ -1121,6 +1196,24 @@ type Querier interface { // ? // ) InsertKey(ctx context.Context, db DBTX, arg InsertKeyParams) error + //InsertKeyAuth + // + // INSERT INTO key_auth ( + // id, + // workspace_id, + // created_at_m, + // default_prefix, + // default_bytes, + // store_encrypted_keys + // ) VALUES ( + // ?, + // ?, + // ?, + // ?, + // ?, + // false + // ) + InsertKeyAuth(ctx context.Context, db DBTX, arg InsertKeyAuthParams) error //InsertKeyEncryption // // INSERT INTO encrypted_keys @@ -1722,6 +1815,19 @@ type Querier interface { // SET delete_protection = ? // WHERE id = ? UpdateApiDeleteProtection(ctx context.Context, db DBTX, arg UpdateApiDeleteProtectionParams) error + //UpdateClickhouseWorkspaceSettingsLimits + // + // UPDATE `clickhouse_workspace_settings` + // SET + // quota_duration_seconds = ?, + // max_queries_per_window = ?, + // max_execution_time_per_window = ?, + // max_query_execution_time = ?, + // max_query_memory_bytes = ?, + // max_query_result_rows = ?, + // updated_at = ? + // WHERE workspace_id = ? + UpdateClickhouseWorkspaceSettingsLimits(ctx context.Context, db DBTX, arg UpdateClickhouseWorkspaceSettingsLimitsParams) error //UpdateDeploymentOpenapiSpec // // UPDATE deployments diff --git a/go/pkg/db/queries/api_find_key_auth_by_ids.sql b/go/pkg/db/queries/api_find_key_auth_by_ids.sql new file mode 100644 index 0000000000..a41f859a38 --- /dev/null +++ b/go/pkg/db/queries/api_find_key_auth_by_ids.sql @@ -0,0 +1,8 @@ +-- name: FindKeyAuthsByIds :many +SELECT ka.id as key_auth_id, a.id as api_id +FROM apis a +JOIN key_auth as ka ON ka.id = a.key_auth_id +WHERE a.workspace_id = sqlc.arg(workspace_id) + AND a.id IN (sqlc.slice(api_ids)) + AND ka.deleted_at_m IS NULL + AND a.deleted_at_m IS NULL; diff --git a/go/pkg/db/queries/api_find_key_auth_by_key_auth_ids.sql b/go/pkg/db/queries/api_find_key_auth_by_key_auth_ids.sql new file mode 100644 index 0000000000..4dcb639c78 --- /dev/null +++ b/go/pkg/db/queries/api_find_key_auth_by_key_auth_ids.sql @@ -0,0 +1,8 @@ +-- name: FindKeyAuthsByKeyAuthIds :many +SELECT ka.id as key_auth_id, a.id as api_id +FROM key_auth as ka +JOIN apis a ON a.key_auth_id = ka.id +WHERE a.workspace_id = sqlc.arg(workspace_id) + AND ka.id IN (sqlc.slice(key_auth_ids)) + AND ka.deleted_at_m IS NULL + AND a.deleted_at_m IS NULL; diff --git a/go/pkg/db/queries/clickhouse_workspace_settings_find_by_workspace_id.sql b/go/pkg/db/queries/clickhouse_workspace_settings_find_by_workspace_id.sql new file mode 100644 index 0000000000..0b2262070f --- /dev/null +++ b/go/pkg/db/queries/clickhouse_workspace_settings_find_by_workspace_id.sql @@ -0,0 +1,3 @@ +-- name: FindClickhouseWorkspaceSettingsByWorkspaceID :one +SELECT * FROM `clickhouse_workspace_settings` +WHERE workspace_id = sqlc.arg(workspace_id); diff --git a/go/pkg/db/queries/clickhouse_workspace_settings_insert.sql b/go/pkg/db/queries/clickhouse_workspace_settings_insert.sql new file mode 100644 index 0000000000..504c077aba --- /dev/null +++ b/go/pkg/db/queries/clickhouse_workspace_settings_insert.sql @@ -0,0 +1,27 @@ +-- name: InsertClickhouseWorkspaceSettings :exec +INSERT INTO `clickhouse_workspace_settings` ( + workspace_id, + username, + password_encrypted, + quota_duration_seconds, + max_queries_per_window, + max_execution_time_per_window, + max_query_execution_time, + max_query_memory_bytes, + max_query_result_rows, + created_at, + updated_at +) +VALUES ( + sqlc.arg(workspace_id), + sqlc.arg(username), + sqlc.arg(password_encrypted), + sqlc.arg(quota_duration_seconds), + sqlc.arg(max_queries_per_window), + sqlc.arg(max_execution_time_per_window), + sqlc.arg(max_query_execution_time), + sqlc.arg(max_query_memory_bytes), + sqlc.arg(max_query_result_rows), + sqlc.arg(created_at), + sqlc.arg(updated_at) +); diff --git a/go/pkg/db/queries/clickhouse_workspace_settings_update_limits.sql b/go/pkg/db/queries/clickhouse_workspace_settings_update_limits.sql new file mode 100644 index 0000000000..d51101b4be --- /dev/null +++ b/go/pkg/db/queries/clickhouse_workspace_settings_update_limits.sql @@ -0,0 +1,11 @@ +-- name: UpdateClickhouseWorkspaceSettingsLimits :exec +UPDATE `clickhouse_workspace_settings` +SET + quota_duration_seconds = sqlc.arg(quota_duration_seconds), + max_queries_per_window = sqlc.arg(max_queries_per_window), + max_execution_time_per_window = sqlc.arg(max_execution_time_per_window), + max_query_execution_time = sqlc.arg(max_query_execution_time), + max_query_memory_bytes = sqlc.arg(max_query_memory_bytes), + max_query_result_rows = sqlc.arg(max_query_result_rows), + updated_at = sqlc.arg(updated_at) +WHERE workspace_id = sqlc.arg(workspace_id); diff --git a/go/pkg/db/queries/identity_find_many.sql b/go/pkg/db/queries/identity_find_many.sql new file mode 100644 index 0000000000..496b3f262d --- /dev/null +++ b/go/pkg/db/queries/identity_find_many.sql @@ -0,0 +1,6 @@ +-- name: FindIdentities :many +SELECT * +FROM identities +WHERE workspace_id = sqlc.arg(workspace_id) + AND deleted = sqlc.arg(deleted) + AND (external_id IN(sqlc.slice(identities)) OR id IN (sqlc.slice(identities))); diff --git a/go/pkg/db/queries/key_auth_get_by_id.sql b/go/pkg/db/queries/key_auth_get_by_id.sql new file mode 100644 index 0000000000..ca2222df33 --- /dev/null +++ b/go/pkg/db/queries/key_auth_get_by_id.sql @@ -0,0 +1,11 @@ +-- name: GetKeyAuthByID :one +SELECT + id, + workspace_id, + created_at_m, + default_prefix, + default_bytes, + store_encrypted_keys +FROM key_auth +WHERE id = ? + AND deleted_at_m IS NULL; diff --git a/go/pkg/db/queries/key_auth_insert.sql b/go/pkg/db/queries/key_auth_insert.sql new file mode 100644 index 0000000000..e3c8649e1a --- /dev/null +++ b/go/pkg/db/queries/key_auth_insert.sql @@ -0,0 +1,16 @@ +-- name: InsertKeyAuth :exec +INSERT INTO key_auth ( + id, + workspace_id, + created_at_m, + default_prefix, + default_bytes, + store_encrypted_keys +) VALUES ( + ?, + ?, + ?, + ?, + ?, + false +); diff --git a/go/pkg/db/schema.sql b/go/pkg/db/schema.sql index f433e5bffa..0b6162b57c 100644 --- a/go/pkg/db/schema.sql +++ b/go/pkg/db/schema.sql @@ -307,6 +307,22 @@ CREATE TABLE `environments` ( CONSTRAINT `environments_project_id_slug_idx` UNIQUE(`project_id`,`slug`) ); +CREATE TABLE `clickhouse_workspace_settings` ( + `workspace_id` varchar(256) NOT NULL, + `username` varchar(256) NOT NULL, + `password_encrypted` text NOT NULL, + `quota_duration_seconds` int NOT NULL DEFAULT 3600, + `max_queries_per_window` int NOT NULL DEFAULT 1000, + `max_execution_time_per_window` int NOT NULL DEFAULT 1800, + `max_query_execution_time` int NOT NULL DEFAULT 30, + `max_query_memory_bytes` bigint NOT NULL DEFAULT 1000000000, + `max_query_result_rows` int NOT NULL DEFAULT 10000, + `created_at` bigint NOT NULL DEFAULT 0, + `updated_at` bigint, + CONSTRAINT `clickhouse_workspace_settings_workspace_id` PRIMARY KEY(`workspace_id`), + CONSTRAINT `clickhouse_workspace_settings_username_unique` UNIQUE(`username`) +); + CREATE TABLE `projects` ( `id` varchar(256) NOT NULL, `workspace_id` varchar(256) NOT NULL, diff --git a/go/pkg/rbac/permissions.go b/go/pkg/rbac/permissions.go index fd70d33ec5..a30427f4a1 100644 --- a/go/pkg/rbac/permissions.go +++ b/go/pkg/rbac/permissions.go @@ -65,6 +65,9 @@ const ( // VerifyKey permits verifying API keys VerifyKey ActionType = "verify_key" + + // ReadAnalytics permits viewing API analytics + ReadAnalytics ActionType = "read_analytics" ) // Predefined rate limiting actions. These constants define operations diff --git a/go/pkg/testutil/http.go b/go/pkg/testutil/http.go index e1652ce434..6f3c80f887 100644 --- a/go/pkg/testutil/http.go +++ b/go/pkg/testutil/http.go @@ -3,6 +3,7 @@ package testutil import ( "bytes" "context" + "database/sql" "encoding/json" "net/http" "net/http/httptest" @@ -10,6 +11,8 @@ import ( "time" "github.com/stretchr/testify/require" + vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" + "github.com/unkeyed/unkey/go/internal/services/analytics" "github.com/unkeyed/unkey/go/internal/services/auditlogs" "github.com/unkeyed/unkey/go/internal/services/caches" "github.com/unkeyed/unkey/go/internal/services/keys" @@ -40,16 +43,17 @@ type Harness struct { middleware []zen.Middleware - DB db.Database - Caches caches.Caches - Logger logging.Logger - Keys keys.KeyService - UsageLimiter usagelimiter.Service - Auditlogs auditlogs.AuditLogService - ClickHouse clickhouse.ClickHouse - Ratelimit ratelimit.Service - Vault *vault.Service - seeder *seed.Seeder + DB db.Database + Caches caches.Caches + Logger logging.Logger + Keys keys.KeyService + UsageLimiter usagelimiter.Service + Auditlogs auditlogs.AuditLogService + ClickHouse clickhouse.ClickHouse + Ratelimit ratelimit.Service + Vault *vault.Service + AnalyticsConnectionManager analytics.ConnectionManager + seeder *seed.Seeder } func NewHarness(t *testing.T) *Harness { @@ -157,24 +161,36 @@ func NewHarness(t *testing.T) *Harness { }) require.NoError(t, err) + // Create analytics connection manager + analyticsConnManager, err := analytics.NewConnectionManager(analytics.ConnectionManagerConfig{ + SettingsCache: caches.ClickhouseSetting, + Database: db, + Logger: logger, + Clock: clk, + BaseURL: chDSN, + Vault: v, + }) + require.NoError(t, err) + // Create seeder seeder := seed.New(t, db, v) seeder.Seed(context.Background()) h := Harness{ - t: t, - Logger: logger, - srv: srv, - validator: validator, - Keys: keyService, - UsageLimiter: ulSvc, - Ratelimit: ratelimitService, - Vault: v, - ClickHouse: ch, - DB: db, - seeder: seeder, - Clock: clk, + t: t, + Logger: logger, + srv: srv, + validator: validator, + Keys: keyService, + UsageLimiter: ulSvc, + Ratelimit: ratelimitService, + Vault: v, + ClickHouse: ch, + DB: db, + seeder: seeder, + Clock: clk, + AnalyticsConnectionManager: analyticsConnManager, Auditlogs: auditlogs.New(auditlogs.Config{ DB: db, Logger: logger, @@ -237,6 +253,102 @@ func (h *Harness) CreatePermission(req seed.CreatePermissionRequest) db.Permissi return h.seeder.CreatePermission(context.Background(), req) } +type SetupAnalyticsOption func(*setupAnalyticsConfig) + +type setupAnalyticsConfig struct { + MaxQueryResultRows int32 + MaxQueryMemoryBytes int64 + MaxQueriesPerWindow int32 + MaxExecutionTimePerWindow int32 + QuotaDurationSeconds int32 + MaxQueryExecutionTime int32 +} + +func WithMaxQueryResultRows(rows int32) SetupAnalyticsOption { + return func(c *setupAnalyticsConfig) { + c.MaxQueryResultRows = rows + } +} + +func WithMaxQueryMemoryBytes(bytes int64) SetupAnalyticsOption { + return func(c *setupAnalyticsConfig) { + c.MaxQueryMemoryBytes = bytes + } +} + +func WithMaxQueriesPerWindow(queries int32) SetupAnalyticsOption { + return func(c *setupAnalyticsConfig) { + c.MaxQueriesPerWindow = queries + } +} + +func WithMaxExecutionTimePerWindow(seconds int32) SetupAnalyticsOption { + return func(c *setupAnalyticsConfig) { + c.MaxExecutionTimePerWindow = seconds + } +} + +func (h *Harness) SetupAnalytics(workspaceID string, opts ...SetupAnalyticsOption) { + ctx := context.Background() + + // Defaults + config := setupAnalyticsConfig{ + MaxQueryResultRows: 10_000_000, + MaxQueryMemoryBytes: 1_000_000_000, + MaxQueriesPerWindow: 1_000, + MaxExecutionTimePerWindow: 1_800, + QuotaDurationSeconds: 3_600, + MaxQueryExecutionTime: 30, + } + + // Apply options + for _, opt := range opts { + opt(&config) + } + + password := "test_password" + username := workspaceID + + // Encrypt the password using the vault service + encryptRes, err := h.Vault.Encrypt(ctx, &vaultv1.EncryptRequest{ + Keyring: workspaceID, + Data: password, + }) + require.NoError(h.t, err) + + // Configure ClickHouse user with permissions, quotas, and settings + err = h.ClickHouse.ConfigureUser(ctx, clickhouse.UserConfig{ + WorkspaceID: workspaceID, + Username: username, + Password: password, + AllowedTables: clickhouse.DefaultAllowedTables(), + QuotaDurationSeconds: config.QuotaDurationSeconds, + MaxQueriesPerWindow: config.MaxQueriesPerWindow, + MaxExecutionTimePerWindow: config.MaxExecutionTimePerWindow, + MaxQueryExecutionTime: config.MaxQueryExecutionTime, + MaxQueryMemoryBytes: config.MaxQueryMemoryBytes, + MaxQueryResultRows: config.MaxQueryResultRows, + }) + require.NoError(h.t, err) + + // Store the encrypted credentials in the database + now := h.Clock.Now().UnixMilli() + err = db.Query.InsertClickhouseWorkspaceSettings(ctx, h.DB.RW(), db.InsertClickhouseWorkspaceSettingsParams{ + WorkspaceID: workspaceID, + Username: username, + PasswordEncrypted: encryptRes.Encrypted, + QuotaDurationSeconds: config.QuotaDurationSeconds, + MaxQueriesPerWindow: config.MaxQueriesPerWindow, + MaxExecutionTimePerWindow: config.MaxExecutionTimePerWindow, + MaxQueryExecutionTime: config.MaxQueryExecutionTime, + MaxQueryMemoryBytes: config.MaxQueryMemoryBytes, + MaxQueryResultRows: config.MaxQueryResultRows, + CreatedAt: now, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + require.NoError(h.t, err) +} + func (h *Harness) Resources() seed.Resources { return h.seeder.Resources } diff --git a/go/pkg/zen/middleware_errors.go b/go/pkg/zen/middleware_errors.go index dcadd59696..db848361e5 100644 --- a/go/pkg/zen/middleware_errors.go +++ b/go/pkg/zen/middleware_errors.go @@ -37,6 +37,7 @@ func WithErrorHandling(logger logging.Logger) Middleware { case codes.UnkeyDataErrorsKeyNotFound, codes.UnkeyDataErrorsWorkspaceNotFound, codes.UnkeyDataErrorsApiNotFound, + codes.UnkeyDataErrorsKeySpaceNotFound, codes.UnkeyDataErrorsPermissionNotFound, codes.UnkeyDataErrorsRoleNotFound, codes.UnkeyDataErrorsKeyAuthNotFound, @@ -57,7 +58,7 @@ func WithErrorHandling(logger logging.Logger) Middleware { }, }) - // Bad Request errors + // Bad Request errors - General validation case codes.UnkeyAppErrorsValidationInvalidInput, codes.UnkeyAuthErrorsAuthenticationMissing, codes.UnkeyAuthErrorsAuthenticationMalformed, @@ -77,6 +78,54 @@ func WithErrorHandling(logger logging.Logger) Middleware { }, }) + // Bad Request errors - Query validation (malformed queries) + case codes.UserErrorsBadRequestInvalidAnalyticsQuery, + codes.UserErrorsBadRequestInvalidAnalyticsTable, + codes.UserErrorsBadRequestInvalidAnalyticsFunction, + codes.UserErrorsBadRequestInvalidAnalyticsQueryType: + return s.JSON(http.StatusBadRequest, openapi.BadRequestErrorResponse{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Error: openapi.BadRequestErrorDetails{ + Title: "Bad Request", + Type: code.DocsURL(), + Detail: fault.UserFacingMessage(err), + Status: http.StatusBadRequest, + Errors: []openapi.ValidationError{}, + }, + }) + + // Unprocessable Entity - Query resource limits + case codes.UserErrorsUnprocessableEntityQueryExecutionTimeout, + codes.UserErrorsUnprocessableEntityQueryMemoryLimitExceeded, + codes.UserErrorsUnprocessableEntityQueryRowsLimitExceeded: + return s.JSON(http.StatusUnprocessableEntity, openapi.UnprocessableEntityErrorResponse{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Error: openapi.BaseError{ + Title: http.StatusText(http.StatusUnprocessableEntity), + Type: code.DocsURL(), + Detail: fault.UserFacingMessage(err), + Status: http.StatusUnprocessableEntity, + }, + }) + + // Too Many Requests - Query rate limiting + case codes.UserErrorsTooManyRequestsQueryQuotaExceeded: + return s.JSON(http.StatusTooManyRequests, openapi.TooManyRequestsErrorResponse{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Error: openapi.BaseError{ + Title: http.StatusText(http.StatusTooManyRequests), + Type: code.DocsURL(), + Detail: fault.UserFacingMessage(err), + Status: http.StatusTooManyRequests, + }, + }) + // Request Timeout errors case codes.UserErrorsBadRequestRequestTimeout: return s.JSON(http.StatusRequestTimeout, openapi.BadRequestErrorResponse{ @@ -210,7 +259,8 @@ func WithErrorHandling(logger logging.Logger) Middleware { }) // Precondition Failed - case codes.UnkeyAppErrorsPreconditionPreconditionFailed: + case codes.UnkeyDataErrorsAnalyticsNotConfigured, + codes.UnkeyAppErrorsPreconditionPreconditionFailed: return s.JSON(http.StatusPreconditionFailed, openapi.PreconditionFailedErrorResponse{ Meta: openapi.Meta{ RequestId: s.RequestID(), @@ -251,6 +301,27 @@ func WithErrorHandling(logger logging.Logger) Middleware { }, }) + // Service Unavailable errors + case codes.UnkeyDataErrorsAnalyticsConnectionFailed: + logger.Error( + "analytics connection error", + "error", err.Error(), + "requestId", s.RequestID(), + "publicMessage", fault.UserFacingMessage(err), + ) + + return s.JSON(http.StatusServiceUnavailable, openapi.ServiceUnavailableErrorResponse{ + Meta: openapi.Meta{ + RequestId: s.RequestID(), + }, + Error: openapi.BaseError{ + Title: "Service Unavailable", + Type: code.DocsURL(), + Detail: fault.UserFacingMessage(err), + Status: http.StatusServiceUnavailable, + }, + }) + // Rate Limited errors case codes.UnkeyGatewayErrorsAuthRateLimited: return s.JSON(http.StatusTooManyRequests, openapi.BadRequestErrorResponse{ diff --git a/go/tools.go b/go/tools.go index 1460ca186b..3adb83a094 100644 --- a/go/tools.go +++ b/go/tools.go @@ -4,7 +4,6 @@ package tools import ( _ "github.com/bufbuild/buf/cmd/buf" - _ "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" _ "github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen" _ "github.com/pressly/goose/v3/cmd/goose" _ "github.com/sqlc-dev/sqlc/cmd/sqlc" diff --git a/internal/db/package.json b/internal/db/package.json index a45746f7fe..28dedb0b21 100644 --- a/internal/db/package.json +++ b/internal/db/package.json @@ -9,6 +9,7 @@ "license": "AGPL-3.0", "scripts": { "migrate": "drizzle-kit push", + "generate": "drizzle-kit generate", "export": "drizzle-kit export", "studio": "dotenv -e .env drizzle-kit studio " }, diff --git a/internal/db/src/schema/clickhouse_workspace_settings.ts b/internal/db/src/schema/clickhouse_workspace_settings.ts new file mode 100644 index 0000000000..4a2124ceed --- /dev/null +++ b/internal/db/src/schema/clickhouse_workspace_settings.ts @@ -0,0 +1,40 @@ +import { relations } from "drizzle-orm"; +import { bigint, int, mysqlTable, text, varchar } from "drizzle-orm/mysql-core"; +import { lifecycleDatesV2 } from "./util/lifecycle_dates"; +import { workspaces } from "./workspaces"; + +/** + * ClickHouse configuration for workspaces with analytics enabled. + * Each workspace gets a dedicated user with resource quotas to prevent abuse. + */ +export const clickhouseWorkspaceSettings = mysqlTable("clickhouse_workspace_settings", { + workspaceId: varchar("workspace_id", { length: 256 }).primaryKey(), + + // Authentication + username: varchar("username", { length: 256 }).notNull().unique(), + passwordEncrypted: text("password_encrypted").notNull(), + + // Quota window configuration + quotaDurationSeconds: int("quota_duration_seconds").notNull().default(3_600), // 1 hour + maxQueriesPerWindow: int("max_queries_per_window").notNull().default(1_000), + maxExecutionTimePerWindow: int("max_execution_time_per_window").notNull().default(1_800), // 30 min total + + // Per-query limits (prevent cluster crashes) + maxQueryExecutionTime: int("max_query_execution_time").notNull().default(30), // seconds + maxQueryMemoryBytes: bigint("max_query_memory_bytes", { mode: "number" }) + .notNull() + .default(1_000_000_000), // 1GB + maxQueryResultRows: int("max_query_result_rows").notNull().default(10_000), + + ...lifecycleDatesV2, +}); + +export const clickhouseWorkspaceSettingsRelations = relations( + clickhouseWorkspaceSettings, + ({ one }) => ({ + workspace: one(workspaces, { + fields: [clickhouseWorkspaceSettings.workspaceId], + references: [workspaces.id], + }), + }), +); diff --git a/internal/db/src/schema/index.ts b/internal/db/src/schema/index.ts index f2f2c451ea..94f2eab6d1 100644 --- a/internal/db/src/schema/index.ts +++ b/internal/db/src/schema/index.ts @@ -10,6 +10,7 @@ export * from "./identity"; export * from "./quota"; export * from "./audit_logs"; export * from "./environments"; +export * from "./clickhouse_workspace_settings"; // Deployment platform tables export * from "./projects"; diff --git a/internal/db/src/schema/workspaces.ts b/internal/db/src/schema/workspaces.ts index 5f0e033018..77a1a6c957 100644 --- a/internal/db/src/schema/workspaces.ts +++ b/internal/db/src/schema/workspaces.ts @@ -2,6 +2,7 @@ import type { Subscriptions } from "@unkey/billing"; import { relations } from "drizzle-orm"; import { boolean, json, mysqlEnum, mysqlTable, varchar } from "drizzle-orm/mysql-core"; import { apis } from "./apis"; +import { clickhouseWorkspaceSettings } from "./clickhouse_workspace_settings"; import { identities } from "./identity"; import { keyAuth } from "./keyAuth"; import { keys } from "./keys"; @@ -120,6 +121,7 @@ export const workspacesRelations = relations(workspaces, ({ many, one }) => ({ keySpaces: many(keyAuth), identities: many(identities), quotas: one(quotas), + clickhouseSettings: one(clickhouseWorkspaceSettings), projects: many(projects), })); diff --git a/internal/proto/generated/cache/v1/invalidation_pb.ts b/internal/proto/generated/cache/v1/invalidation_pb.ts new file mode 100644 index 0000000000..ea7f19c7e3 --- /dev/null +++ b/internal/proto/generated/cache/v1/invalidation_pb.ts @@ -0,0 +1,56 @@ +// @generated by protoc-gen-es v2.8.0 with parameter "target=ts" +// @generated from file cache/v1/invalidation.proto (package cache.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file cache/v1/invalidation.proto. + */ +export const file_cache_v1_invalidation: GenFile = /*@__PURE__*/ + fileDesc("ChtjYWNoZS92MS9pbnZhbGlkYXRpb24ucHJvdG8SCGNhY2hlLnYxImsKFkNhY2hlSW52YWxpZGF0aW9uRXZlbnQSEgoKY2FjaGVfbmFtZRgBIAEoCRIRCgljYWNoZV9rZXkYAiABKAkSEQoJdGltZXN0YW1wGAMgASgDEhcKD3NvdXJjZV9pbnN0YW5jZRgEIAEoCUKaAQoMY29tLmNhY2hlLnYxQhFJbnZhbGlkYXRpb25Qcm90b1ABWjZnaXRodWIuY29tL3Vua2V5ZWQvdW5rZXkvZ28vZ2VuL3Byb3RvL2NhY2hlL3YxO2NhY2hldjGiAgNDWFiqAghDYWNoZS5WMcoCCENhY2hlXFYx4gIUQ2FjaGVcVjFcR1BCTWV0YWRhdGHqAglDYWNoZTo6VjFiBnByb3RvMw"); + +/** + * CacheInvalidationEvent represents a cache invalidation event + * + * @generated from message cache.v1.CacheInvalidationEvent + */ +export type CacheInvalidationEvent = Message<"cache.v1.CacheInvalidationEvent"> & { + /** + * The name/identifier of the cache to invalidate + * + * @generated from field: string cache_name = 1; + */ + cacheName: string; + + /** + * The cache key to invalidate + * + * @generated from field: string cache_key = 2; + */ + cacheKey: string; + + /** + * Unix millisecond timestamp when the invalidation was triggered + * + * @generated from field: int64 timestamp = 3; + */ + timestamp: bigint; + + /** + * Optional: The node that triggered the invalidation (to avoid self-invalidation) + * + * @generated from field: string source_instance = 4; + */ + sourceInstance: string; +}; + +/** + * Describes the message cache.v1.CacheInvalidationEvent. + * Use `create(CacheInvalidationEventSchema)` to create a new message. + */ +export const CacheInvalidationEventSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_cache_v1_invalidation, 0); + diff --git a/packages/rbac/src/permissions.ts b/packages/rbac/src/permissions.ts index 54c4fa3fbd..92b097eb7f 100644 --- a/packages/rbac/src/permissions.ts +++ b/packages/rbac/src/permissions.ts @@ -36,6 +36,7 @@ export const apiActions = z.enum([ "decrypt_key", "read_key", "verify_key", + "read_analytics", ]); export const ratelimitActions = z.enum([ "limit", @@ -69,6 +70,15 @@ export const identityActions = z.enum([ "update_identity", "delete_identity", ]); + +// Resources that require an ID (resource.id.action format) +const scopedResources = { + api: { idSchema: apiId, actionsSchema: apiActions }, + ratelimit: { idSchema: ratelimitNamespaceId, actionsSchema: ratelimitActions }, + rbac: { idSchema: rbacId, actionsSchema: rbacActions }, + identity: { idSchema: identityEnvId, actionsSchema: identityActions }, +} as const; + export type Resources = { [resourceId in `api.${z.infer}`]: z.infer; } & { @@ -80,6 +90,7 @@ export type Resources = { } & { [resourceId in `identity.${z.infer}`]: z.infer; }; + export type UnkeyPermission = Flatten | "*"; /** * Validation for roles used for our root keys @@ -93,27 +104,18 @@ export const unkeyPermissionValidation = z.custom().refine((s) return true; } const split = s.split("."); + + // Handle scoped resource.id.action format (3 parts) if (split.length !== 3) { return false; } const [resource, id, action] = split; - switch (resource) { - case "api": { - return apiId.safeParse(id).success && apiActions.safeParse(action).success; - } - case "ratelimit": { - return ( - ratelimitNamespaceId.safeParse(id).success && ratelimitActions.safeParse(action).success - ); - } - case "rbac": { - return rbacId.safeParse(id).success && rbacActions.safeParse(action).success; - } - case "identity": { - return identityEnvId.safeParse(id).success && identityActions.safeParse(action).success; - } - default: { - return false; - } + const resourceConfig = scopedResources[resource as keyof typeof scopedResources]; + if (resourceConfig) { + return ( + resourceConfig.idSchema.safeParse(id).success && + resourceConfig.actionsSchema.safeParse(action).success + ); } + return false; }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac57e82868..d98722d087 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -445,8 +445,8 @@ importers: version: 0.34.3 devDependencies: mintlify: - specifier: ^4.2.31 - version: 4.2.31(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) + specifier: ^4.2.144 + version: 4.2.144(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) apps/engineering: dependencies: @@ -633,7 +633,7 @@ importers: devDependencies: checkly: specifier: latest - version: 6.8.2(@types/node@20.14.9)(typescript@5.5.3) + version: 6.0.1(@types/node@20.14.9)(typescript@5.5.3) ts-node: specifier: 10.9.1 version: 10.9.1(@types/node@20.14.9)(typescript@5.5.3) @@ -1297,12 +1297,12 @@ packages: json-schema: 0.4.0 dev: false - /@alcalzone/ansi-tokenize@0.1.3: - resolution: {integrity: sha512-3yWxPTq3UQ/FY9p1ErPxIyfT64elWaMvM9lIHnaqpyft63tkxodF5aUElYHrdisWve5cETkh1+KBw1yJuW0aRw==} - engines: {node: '>=14.13.1'} + /@alcalzone/ansi-tokenize@0.2.2: + resolution: {integrity: sha512-mkOh+Wwawzuf5wa30bvc4nA+Qb6DIrGWgBhRR/Pw4T9nsgYait8izvXkNyU78D6Wcu3Z+KUdwCmLCxlWjEotYA==} + engines: {node: '>=18'} dependencies: ansi-styles: 6.2.3 - is-fullwidth-code-point: 4.0.0 + is-fullwidth-code-point: 5.1.0 dev: true /@alloc/quick-lru@5.2.0: @@ -1536,6 +1536,16 @@ packages: js-yaml: 4.1.0 dev: false + /@ark/schema@0.53.0: + resolution: {integrity: sha512-1PB7RThUiTlmIu8jbSurPrhHpVixPd4C+xNBUF/HrjIENCeDcAMg36n5mpMzED7OQGDVIzpfXXiMnaTiutjHJw==} + dependencies: + '@ark/util': 0.53.0 + dev: true + + /@ark/util@0.53.0: + resolution: {integrity: sha512-TGn4gLlA6dJcQiqrtCtd88JhGb2XBHo6qIejsDre+nxpGuUVW4G3YZGVrwjNBTO0EyR+ykzIo4joHJzOj+/cpA==} + dev: true + /@asamuzakjp/css-color@3.2.0: resolution: {integrity: sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==} dependencies: @@ -5261,7 +5271,7 @@ packages: transitivePeerDependencies: - supports-color - /@mdx-js/react@3.1.1(@types/react@18.3.11)(react@18.3.1): + /@mdx-js/react@3.1.1(@types/react@18.3.11)(react@19.2.0): resolution: {integrity: sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==} peerDependencies: '@types/react': '>=16' @@ -5269,7 +5279,7 @@ packages: dependencies: '@types/mdx': 2.0.13 '@types/react': 18.3.11 - react: 18.3.1 + react: 19.2.0 dev: true /@mendable/firecrawl-js@1.5.2(ws@8.18.3): @@ -5291,25 +5301,30 @@ packages: langium: 3.3.1 dev: false - /@mintlify/cli@4.0.635(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-jvTcEz3Zt31AQG9K+VBzJujssYKL6ED5NHeajsoysH2aJzLM8Xp7h+JPseVZmeDUU7jp+yCF/fNeGmmlUQXyqQ==} + /@mintlify/cli@4.0.748(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): + resolution: {integrity: sha512-Yj/QSzG5EGhQ6v+68kAXo4CnfeQdGLklr6tVeCE6lZtP3WcvOyyyrHZRBIHCmWyUgspzedEBZ2i41w7HmvhNcQ==} engines: {node: '>=18.0.0'} hasBin: true dependencies: - '@mintlify/common': 1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/link-rot': 3.0.583(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/models': 0.0.210 - '@mintlify/prebuild': 1.0.576(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/previewing': 4.0.620(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) - '@mintlify/validation': 0.1.421 + '@mintlify/common': 1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/link-rot': 3.0.693(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/models': 0.0.230 + '@mintlify/prebuild': 1.0.680(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/previewing': 4.0.729(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) + '@mintlify/validation': 0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) chalk: 5.6.2 + color: 4.2.3 detect-port: 1.6.1 fs-extra: 11.3.2 - ink: 5.2.1(@types/react@18.3.11)(react@18.3.1) + gray-matter: 4.0.3 + ink: 6.4.0(@types/react@18.3.11)(react@19.2.0) inquirer: 12.10.0(@types/node@22.14.0) js-yaml: 4.1.0 - react: 18.3.1 + mdast: 3.0.0 + mdast-util-mdx-jsx: 3.2.0 + react: 19.2.0 semver: 7.7.3 + unist-util-visit: 5.0.0 yargs: 17.7.2 transitivePeerDependencies: - '@radix-ui/react-popover' @@ -5324,41 +5339,49 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true - /@mintlify/common@1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-yYwEfKJOjH+X8B13b5cjbpy3X1bSjEhxSFAVFhGFNbM5eT8f0S/Yu3i+jcJXbkl+5zifa4b4Mh4Vk7oUhXRxuQ==} + /@mintlify/common@1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-9/j3pNOjGZDSy1xgIY9CWrk5sfSnag7LDHrvPNAwE17GGq7V3Mwx/SnDj+0P72bSHURvfl1bm31c92WgG31xuw==} dependencies: '@asyncapi/parser': 3.4.0 - '@mintlify/mdx': 2.0.13(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/models': 0.0.210 - '@mintlify/openapi-parser': 0.0.7 - '@mintlify/validation': 0.1.421 + '@mintlify/mdx': 3.0.1(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/models': 0.0.230 + '@mintlify/openapi-parser': 0.0.8 + '@mintlify/validation': 0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) '@sindresorhus/slugify': 2.2.1 acorn: 8.15.0 acorn-jsx: 5.3.2(acorn@8.15.0) + color-blend: 4.0.0 estree-util-to-js: 2.0.0 estree-walker: 3.0.3 gray-matter: 4.0.3 hast-util-from-html: 2.0.3 hast-util-to-html: 9.0.5 hast-util-to-text: 4.0.2 + hex-rgb: 5.0.0 js-yaml: 4.1.0 lodash: 4.17.21 mdast: 3.0.0 mdast-util-from-markdown: 2.0.2 + mdast-util-gfm: 3.1.0 mdast-util-mdx: 3.0.0 mdast-util-mdx-jsx: 3.2.0 + micromark-extension-gfm: 3.0.0 micromark-extension-mdx-jsx: 3.0.2 + micromark-extension-mdxjs: 3.0.0 openapi-types: 12.1.3 + postcss: 8.5.6 remark: 15.0.1 remark-frontmatter: 5.0.0 remark-gfm: 4.0.1 remark-math: 6.0.0 remark-mdx: 3.1.1 remark-stringify: 11.0.0 + tailwindcss: 3.4.15 unified: 11.0.5 unist-builder: 4.0.0 unist-util-map: 4.0.0 @@ -5375,17 +5398,18 @@ packages: - react - react-dom - supports-color + - ts-node - typescript dev: true - /@mintlify/link-rot@3.0.583(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-rZ47hB/lUdQhYnbH4QrBl5Sph2JTJr78LhXlKTcY226fQUD3yaRNCmk23lIkoGm7sc2gQEIuPW+guPL8KJx8XA==} + /@mintlify/link-rot@3.0.693(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-Eft8OIikui67HB78rCIpY20boZ0rXBQxi2m2JfUctaaWq7E4xUiEVFHzkgTBWkupxFOp7b73nhTwsUPf/TH1Lg==} engines: {node: '>=18.0.0'} dependencies: - '@mintlify/common': 1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/prebuild': 1.0.576(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/previewing': 4.0.620(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) - '@mintlify/validation': 0.1.421 + '@mintlify/common': 1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/prebuild': 1.0.680(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/previewing': 4.0.729(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) + '@mintlify/validation': 0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) fs-extra: 11.3.2 unist-util-visit: 4.1.2 transitivePeerDependencies: @@ -5401,12 +5425,13 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true - /@mintlify/mdx@2.0.13(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-sAGk0j5COB4P1j2jG10ZYdLOQNhxFAhWX1w0lOXc3RhehVHMtMgImz+OzydR6yWcvH/S8eN0n8VEUy9BYXhe3w==} + /@mintlify/mdx@3.0.1(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-zNeYjXIhBt4Ea/PAeLjtK08Z+iZHmlkizs4htaZWXtL4kkZYTuqeayssezIYkA82n6gMFp5FphTKxOm+nv//cQ==} peerDependencies: '@radix-ui/react-popover': ^1.1.15 react: ^18.3.1 @@ -5420,8 +5445,8 @@ packages: mdast-util-gfm: 3.1.0 mdast-util-mdx-jsx: 3.2.0 mdast-util-to-hast: 13.2.0 - next-mdx-remote-client: 1.1.4(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(unified@11.0.5) - react: 18.3.1 + next-mdx-remote-client: 1.1.4(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(unified@11.0.5) + react: 19.2.0 react-dom: 18.3.1(react@18.3.1) rehype-katex: 7.0.1 remark-gfm: 4.0.1 @@ -5436,8 +5461,8 @@ packages: - typescript dev: true - /@mintlify/models@0.0.210: - resolution: {integrity: sha512-AtqmFILN1bGcxFPUDj4KYUto7lumCMyMowhC9wh2AkTJBSFxnPSTAhFj3QHeCn73isPHwK2ZcwECWyoBz/5nBA==} + /@mintlify/models@0.0.230: + resolution: {integrity: sha512-G/mg8TytxLn29zete+KXsFKFmi/N/C6a0HX15pN+qq1CWHP3hzi5mELUf7xID/w77bkzN1tVSFKgkwZVBr01OA==} engines: {node: '>=18.0.0'} dependencies: axios: 1.13.0 @@ -5446,8 +5471,8 @@ packages: - debug dev: true - /@mintlify/openapi-parser@0.0.7: - resolution: {integrity: sha512-3ecbkzPbsnkKVZJypVL0H5pCTR7a4iLv4cP7zbffzAwy+vpH70JmPxNVpPPP62yLrdZlfNcMxu5xKeT7fllgMg==} + /@mintlify/openapi-parser@0.0.8: + resolution: {integrity: sha512-9MBRq9lS4l4HITYCrqCL7T61MOb20q9IdU7HWhqYMNMM1jGO1nHjXasFy61yZ8V6gMZyyKQARGVoZ0ZrYN48Og==} engines: {node: '>=18'} dependencies: ajv: 8.17.1 @@ -5458,13 +5483,13 @@ packages: yaml: 2.8.1 dev: true - /@mintlify/prebuild@1.0.576(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-qEK9BXkAUWvUn0EXa2mstrJDDQO5qxeJYPMD+gfbcHaIJ6suJ6eZSC0VMDpouUra+YxZGHM+HKI0BfeHOKCftA==} + /@mintlify/prebuild@1.0.680(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-UPJC0sIs7+52ViTazzZo8nZiUAHdV3O0Bw4entunMWNTy7SClKAIH3HbYXu2M2+dfCmtnzmnG8BNuQk80HMq4A==} dependencies: - '@mintlify/common': 1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/openapi-parser': 0.0.7 - '@mintlify/scraping': 4.0.314(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/validation': 0.1.421 + '@mintlify/common': 1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/openapi-parser': 0.0.8 + '@mintlify/scraping': 4.0.414(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/validation': 0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) chalk: 5.6.2 favicons: 7.2.0 fs-extra: 11.3.2 @@ -5485,17 +5510,18 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true - /@mintlify/previewing@4.0.620(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-s51OZTAsRe4GDSoHf19CKBPZgSS5heqC9n+ENsAej22sd1tN8oGHl2Fw2rgSt7VgxdcINu/yXsEt9sgS2LPz7g==} + /@mintlify/previewing@4.0.729(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): + resolution: {integrity: sha512-UhJojVE7KGBs1HL9e3BDs9TCzabD39kxNTht2u7VMr3oEoBleuzB7hNFUp4/c49AAwebh3Yx42NdHBedYAx99g==} engines: {node: '>=18.0.0'} dependencies: - '@mintlify/common': 1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/prebuild': 1.0.576(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/validation': 0.1.421 + '@mintlify/common': 1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/prebuild': 1.0.680(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/validation': 0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) better-opn: 3.0.2 chalk: 5.6.2 chokidar: 3.6.0 @@ -5503,13 +5529,13 @@ packages: fs-extra: 11.3.2 got: 13.0.0 gray-matter: 4.0.3 - ink: 5.2.1(@types/react@18.3.11)(react@18.3.1) - ink-spinner: 5.0.0(ink@5.2.1)(react@18.3.1) + ink: 6.4.0(@types/react@18.3.11)(react@19.2.0) + ink-spinner: 5.0.0(ink@6.4.0)(react@19.2.0) is-online: 10.0.0 js-yaml: 4.1.0 mdast: 3.0.0 openapi-types: 12.1.3 - react: 18.3.1 + react: 19.2.0 socket.io: 4.8.1 tar: 6.2.1 unist-util-visit: 4.1.2 @@ -5526,17 +5552,18 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true - /@mintlify/scraping@4.0.314(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-vPNUJpYSXllf5A+hiSBze3urMkNzKn8CeQ58cu7CzIco05h8hH0hnZYG8JaBCReXk3kfosSiDB4veY9EBEsB0w==} + /@mintlify/scraping@4.0.414(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-2ENtNtwaY2n/eEx4/lNn4ityENMTeZFv8PmoUDb4QSMdWBljXzp3CnpNwcPH6z2HDvkYdp3sEtb/p4Czr+gKxA==} engines: {node: '>=18.0.0'} hasBin: true dependencies: - '@mintlify/common': 1.0.458(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(typescript@5.9.3) - '@mintlify/openapi-parser': 0.0.7 + '@mintlify/common': 1.0.555(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/openapi-parser': 0.0.8 fs-extra: 11.3.2 hast-util-to-mdast: 10.1.2 js-yaml: 4.1.0 @@ -5564,21 +5591,30 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true - /@mintlify/validation@0.1.421: - resolution: {integrity: sha512-+NH9q7Lf50j3BL6R2p/kXinrrxlI9g2nGwq4gHVIaE5bzf/0E3FMoy4s4GvT6OfjuR6D/6oN1irQwpA6abliqA==} + /@mintlify/validation@0.1.483(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3): + resolution: {integrity: sha512-nV6yGwhqhsSbCyc41EMFOX1zWdngkop6PYThufXnfvgRe50Q7VetFgCqwrnxgQHP5gvfkaBEeAUiZLi9G9ZQVw==} dependencies: - '@mintlify/models': 0.0.210 + '@mintlify/mdx': 3.0.1(@radix-ui/react-popover@1.1.15)(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(typescript@5.9.3) + '@mintlify/models': 0.0.230 + arktype: 2.1.25 lcm: 0.0.3 lodash: 4.17.21 openapi-types: 12.1.3 zod: 3.23.8 zod-to-json-schema: 3.24.6(zod@3.23.8) transitivePeerDependencies: + - '@radix-ui/react-popover' + - '@types/react' - debug + - react + - react-dom + - supports-color + - typescript dev: true /@neon-rs/load@0.0.4: @@ -9778,7 +9814,7 @@ packages: resolution: {integrity: sha512-nty0tHUq2f1IKuFYsLM4CXLZGHdMn+X/IwEUIpeSOXt0QjMUbL0Em57iJUDzz+2MkWG83smIigNZ3fauGjqgdQ==} engines: {node: '>=8.3.0'} dependencies: - node-fetch: 2.6.7 + node-fetch: 2.7.0 tslib: 1.14.1 transitivePeerDependencies: - encoding @@ -11785,6 +11821,20 @@ packages: engines: {node: '>= 0.4'} dev: false + /arkregex@0.0.2: + resolution: {integrity: sha512-ttjDUICBVoXD/m8bf7eOjx8XMR6yIT2FmmW9vsN0FCcFOygEZvvIX8zK98tTdXkzi0LkRi5CmadB44jFEIyDNA==} + dependencies: + '@ark/util': 0.53.0 + dev: true + + /arktype@2.1.25: + resolution: {integrity: sha512-fdj10sNlUPeDRg1QUqMbzJ4Q7gutTOWOpLUNdcC4vxeVrN0G+cbDOvLbuxQOFj/NDAode1G7kwFv4yKwQvupJg==} + dependencies: + '@ark/schema': 0.53.0 + '@ark/util': 0.53.0 + arkregex: 0.0.2 + dev: true + /array-buffer-byte-length@1.0.2: resolution: {integrity: sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==} engines: {node: '>= 0.4'} @@ -12432,8 +12482,8 @@ packages: engines: {node: '>= 16'} dev: true - /checkly@6.8.2(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-7HgKD7AbK4sW5HIJNoKJKmOCQNDWcGtG9pwnlVlySTMf25pqARNlKuvWB7kFlB2PRsSwTFuOR64atdM/lPOGNA==} + /checkly@6.0.1(@types/node@20.14.9)(typescript@5.5.3): + resolution: {integrity: sha512-tPFERSn3bQ0WyqIXfpx/ith4f1l6Rhb4EI0iz5RcdgltJ6jbnjyJvbsTaWgIBQlAenKdZrKtJFsd4dodWVIgAQ==} engines: {node: ^18.19.0 || >=20.5.0} hasBin: true peerDependencies: @@ -12758,6 +12808,11 @@ packages: /collapse-white-space@2.1.0: resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==} + /color-blend@4.0.0: + resolution: {integrity: sha512-fYODTHhI/NG+B5GnzvuL3kiFrK/UnkUezWFTgEPBTY5V+kpyfAn95Vn9sJeeCX6omrCOdxnqCL3CvH+6sXtIbw==} + engines: {node: '>=10.0.0'} + dev: true + /color-convert@1.9.3: resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} dependencies: @@ -16287,6 +16342,11 @@ packages: engines: {node: '>=6'} dev: false + /hex-rgb@5.0.0: + resolution: {integrity: sha512-NQO+lgVUCtHxZ792FodgW0zflK+ozS9X9dwGp9XvvmPlH7pyxd588cn24TD3rmPm/N0AIRXF10Otah8yKqGw4w==} + engines: {node: '>=12'} + dev: true + /homedir-polyfill@1.0.3: resolution: {integrity: sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==} engines: {node: '>=0.10.0'} @@ -16517,7 +16577,7 @@ packages: dev: true optional: true - /ink-spinner@5.0.0(ink@5.2.1)(react@18.3.1): + /ink-spinner@5.0.0(ink@6.4.0)(react@19.2.0): resolution: {integrity: sha512-EYEasbEjkqLGyPOUc8hBJZNuC5GvXGMLu0w5gdTNskPc7Izc5vO3tdQEYnzvshucyGCBXc86ig0ujXPMWaQCdA==} engines: {node: '>=14.16'} peerDependencies: @@ -16525,24 +16585,24 @@ packages: react: '>=18.0.0' dependencies: cli-spinners: 2.9.2 - ink: 5.2.1(@types/react@18.3.11)(react@18.3.1) - react: 18.3.1 + ink: 6.4.0(@types/react@18.3.11)(react@19.2.0) + react: 19.2.0 dev: true - /ink@5.2.1(@types/react@18.3.11)(react@18.3.1): - resolution: {integrity: sha512-BqcUyWrG9zq5HIwW6JcfFHsIYebJkWWb4fczNah1goUO0vv5vneIlfwuS85twyJ5hYR/y18FlAYUxrO9ChIWVg==} - engines: {node: '>=18'} + /ink@6.4.0(@types/react@18.3.11)(react@19.2.0): + resolution: {integrity: sha512-v43isNGrHeFfipbQbwz7/Eg0+aWz3ASEdT/s1Ty2JtyBzR3maE0P77FwkMET+Nzh5KbRL3efLgkT/ZzPFzW3BA==} + engines: {node: '>=20'} peerDependencies: - '@types/react': '>=18.0.0' - react: '>=18.0.0' - react-devtools-core: ^4.19.1 + '@types/react': '>=19.0.0' + react: '>=19.0.0' + react-devtools-core: ^6.1.2 peerDependenciesMeta: '@types/react': optional: true react-devtools-core: optional: true dependencies: - '@alcalzone/ansi-tokenize': 0.1.3 + '@alcalzone/ansi-tokenize': 0.2.2 '@types/react': 18.3.11 ansi-escapes: 7.1.1 ansi-styles: 6.2.3 @@ -16554,11 +16614,10 @@ packages: code-excerpt: 4.0.0 es-toolkit: 1.41.0 indent-string: 5.0.0 - is-in-ci: 1.0.0 + is-in-ci: 2.0.0 patch-console: 2.0.0 - react: 18.3.1 - react-reconciler: 0.29.2(react@18.3.1) - scheduler: 0.23.2 + react: 19.2.0 + react-reconciler: 0.32.0(react@19.2.0) signal-exit: 3.0.7 slice-ansi: 7.1.2 stack-utils: 2.0.6 @@ -16841,9 +16900,9 @@ packages: /is-hexadecimal@2.0.1: resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} - /is-in-ci@1.0.0: - resolution: {integrity: sha512-eUuAjybVTHMYWm/U+vBO1sY/JOCgoPCXRxzdju0K+K0BiGW0SChEL1MLC0PoCIR1OlPo5YAp8HuQoUlsWEICwg==} - engines: {node: '>=18'} + /is-in-ci@2.0.0: + resolution: {integrity: sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w==} + engines: {node: '>=20'} hasBin: true dev: true @@ -18440,12 +18499,12 @@ packages: minipass: 7.1.2 dev: true - /mintlify@4.2.31(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): - resolution: {integrity: sha512-DFDlm0KRUGcnaO2kAw3QpAIVXQ4qUy63OSOzwleKLN+sqJJJIc8cd5rfd98v5ORmPtf0YJJQGNX6SvsvIxOHCQ==} + /mintlify@4.2.144(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3): + resolution: {integrity: sha512-7H6289CP3pkzMOqWMi3q1Nj710ct2Cgx8lkjUJkNRFM9NTS7FkVGGLPa4nZET2qIjjZm+1bV5+FxHfzMB3f17A==} engines: {node: '>=18.0.0'} hasBin: true dependencies: - '@mintlify/cli': 4.0.635(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) + '@mintlify/cli': 4.0.748(@radix-ui/react-popover@1.1.15)(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.9.3) transitivePeerDependencies: - '@radix-ui/react-popover' - '@types/node' @@ -18459,6 +18518,7 @@ packages: - react-dom - react-native-b4a - supports-color + - ts-node - typescript - utf-8-validate dev: true @@ -18642,7 +18702,7 @@ packages: engines: {node: '>= 0.4.0'} dev: true - /next-mdx-remote-client@1.1.4(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)(unified@11.0.5): + /next-mdx-remote-client@1.1.4(@types/react@18.3.11)(react-dom@18.3.1)(react@19.2.0)(unified@11.0.5): resolution: {integrity: sha512-psCMdO50tfoT1kAH7OGXZvhyRfiHVK6IqwjmWFV5gtLo4dnqjAgcjcLNeJ92iI26UNlKShxYrBs1GQ6UXxk97A==} engines: {node: '>=18.18.0'} peerDependencies: @@ -18651,8 +18711,8 @@ packages: dependencies: '@babel/code-frame': 7.27.1 '@mdx-js/mdx': 3.1.1 - '@mdx-js/react': 3.1.1(@types/react@18.3.11)(react@18.3.1) - react: 18.3.1 + '@mdx-js/react': 3.1.1(@types/react@18.3.11)(react@19.2.0) + react: 19.2.0 react-dom: 18.3.1(react@18.3.1) remark-mdx-remove-esm: 1.2.1(unified@11.0.5) serialize-error: 12.0.0 @@ -20245,15 +20305,14 @@ packages: fast-deep-equal: 2.0.1 dev: false - /react-reconciler@0.29.2(react@18.3.1): - resolution: {integrity: sha512-zZQqIiYgDCTP/f1N/mAR10nJGrPD2ZR+jDSEsKWJHYC7Cm2wodlwbR3upZRdC3cjIjSlTLNVyO7Iu0Yy7t2AYg==} + /react-reconciler@0.32.0(react@19.2.0): + resolution: {integrity: sha512-2NPMOzgTlG0ZWdIf3qG+dcbLSoAc/uLfOwckc3ofy5sSK0pLJqnQLpUFxvGcN2rlXSjnVtGeeFLNimCQEj5gOQ==} engines: {node: '>=0.10.0'} peerDependencies: - react: ^18.3.1 + react: ^19.1.0 dependencies: - loose-envify: 1.4.0 - react: 18.3.1 - scheduler: 0.23.2 + react: 19.2.0 + scheduler: 0.26.0 dev: true /react-remove-scroll-bar@2.3.8(@types/react@18.3.11)(react@18.3.1): @@ -20382,6 +20441,11 @@ packages: dependencies: loose-envify: 1.4.0 + /react@19.2.0: + resolution: {integrity: sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==} + engines: {node: '>=0.10.0'} + dev: true + /read-cache@1.0.0: resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} dependencies: @@ -21023,6 +21087,10 @@ packages: dependencies: loose-envify: 1.4.0 + /scheduler@0.26.0: + resolution: {integrity: sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==} + dev: true + /schema-utils@4.3.3: resolution: {integrity: sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==} engines: {node: '>= 10.13.0'}