diff --git a/apps/api/src/routes/v1_analytics_getVerifications.happy.test.ts b/apps/api/src/routes/v1_analytics_getVerifications.happy.test.ts index e1f5a6d16a..0a076aa188 100644 --- a/apps/api/src/routes/v1_analytics_getVerifications.happy.test.ts +++ b/apps/api/src/routes/v1_analytics_getVerifications.happy.test.ts @@ -40,28 +40,35 @@ describe("with no data", () => { describe.each([ // generate and query times are different to ensure the query covers the entire generate interval // and the used toStartOf function in clickhouse + // NOTE: Using dynamic dates to avoid TTL deletion (table has 1 month TTL) { granularity: "hour", - generate: { start: "2024-12-05", end: "2024-12-07" }, - query: { start: "2024-12-04", end: "2024-12-10" }, + generateDaysAgo: { start: 5, end: 3 }, // 5 days ago to 3 days ago + queryDaysAgo: { start: 6, end: 0 }, // 6 days ago to now }, { granularity: "day", - generate: { start: "2024-12-05", end: "2024-12-07" }, - query: { start: "2024-12-01", end: "2024-12-10" }, + generateDaysAgo: { start: 5, end: 3 }, + queryDaysAgo: { start: 9, end: 0 }, }, { granularity: "month", - generate: { start: "2024-10-1", end: "2025-10-12" }, - query: { start: "2023-12-01", end: "2026-12-10" }, + generateDaysAgo: { start: 25, end: 5 }, + queryDaysAgo: { start: 28, end: 0 }, }, ])("per $granularity", (tc) => { test("all verifications are accounted for", { timeout: 120_000 }, async (t) => { const h = await IntegrationHarness.init(t); + const now = Date.now(); + const generateStart = now - tc.generateDaysAgo.start * 24 * 60 * 60 * 1000; + const generateEnd = now - tc.generateDaysAgo.end * 24 * 60 * 60 * 1000; + const queryStart = now - tc.queryDaysAgo.start * 24 * 60 * 60 * 1000; + const queryEnd = now - tc.queryDaysAgo.end * 24 * 60 * 60 * 1000; + const verifications = generate({ - start: new Date(tc.generate.start).getTime(), - end: new Date(tc.generate.end).getTime(), + start: generateStart, + end: generateEnd, length: 10_000, workspaceId: h.resources.userWorkspace.id, keySpaceId: h.resources.userKeyAuth.id, @@ -73,12 +80,13 @@ describe.each([ const inserted = await h.ch.querier.query({ query: - "SELECT COUNT(*) AS count from verifications.raw_key_verifications_v1 WHERE workspace_id={workspaceId:String}", + "SELECT COUNT(*) AS count from default.key_verifications_raw_v2 WHERE workspace_id={workspaceId:String}", params: z.object({ workspaceId: z.string() }), schema: z.object({ count: z.number() }), })({ workspaceId: h.resources.userWorkspace.id, }); + expect(inserted.err).toEqual(undefined); expect(inserted.val!.at(0)?.count).toEqual(verifications.length); @@ -87,8 +95,8 @@ describe.each([ const res = await h.get({ url: "/v1/analytics.getVerifications", searchparams: { - start: new Date(tc.query.start).getTime().toString(), - end: new Date(tc.query.end).getTime().toString(), + start: queryStart.toString(), + end: queryEnd.toString(), groupBy: tc.granularity, apiId: h.resources.userApi.id, }, @@ -99,18 +107,78 @@ describe.each([ expect(res.status, `expected 200, received: ${JSON.stringify(res, null, 2)}`).toBe(200); + // For month/day granularity with Date columns, only count data from buckets + // that fall within the query date range (ignoring time component) + const queryStartDate = new Date(queryStart); + const queryEndDate = new Date(queryEnd); + const shouldCountVerification = (v: any) => { + if (tc.granularity === "month") { + // Get the bucket (month start) for this verification + const vDate = new Date(v.time); + const bucketDate = new Date(Date.UTC(vDate.getUTCFullYear(), vDate.getUTCMonth(), 1)); + + // Compare bucket date against query range (as Date, no time) + const startDateOnly = new Date( + Date.UTC( + queryStartDate.getUTCFullYear(), + queryStartDate.getUTCMonth(), + queryStartDate.getUTCDate(), + ), + ); + const endDateOnly = new Date( + Date.UTC( + queryEndDate.getUTCFullYear(), + queryEndDate.getUTCMonth(), + queryEndDate.getUTCDate(), + ), + ); + + return bucketDate >= startDateOnly && bucketDate <= endDateOnly; + } + if (tc.granularity === "day") { + // Get the bucket (day start) for this verification + const vDate = new Date(v.time); + const bucketDate = new Date( + Date.UTC(vDate.getUTCFullYear(), vDate.getUTCMonth(), vDate.getUTCDate()), + ); + + const startDateOnly = new Date( + Date.UTC( + queryStartDate.getUTCFullYear(), + queryStartDate.getUTCMonth(), + queryStartDate.getUTCDate(), + ), + ); + const endDateOnly = new Date( + Date.UTC( + queryEndDate.getUTCFullYear(), + queryEndDate.getUTCMonth(), + queryEndDate.getUTCDate(), + ), + ); + + return bucketDate >= startDateOnly && bucketDate <= endDateOnly; + } + return true; // For hour granularity, count all + }; + + let expectedTotal = 0; const outcomes = verifications.reduce( (acc, v) => { + if (!shouldCountVerification(v)) { + return acc; + } if (!acc[v.outcome]) { acc[v.outcome] = 0; } acc[v.outcome]++; + expectedTotal++; return acc; }, {} as { [K in (typeof POSSIBLE_OUTCOMES)[number]]: number }, ); - expect(res.body.reduce((sum, d) => sum + d.total, 0)).toEqual(verifications.length); + expect(res.body.reduce((sum, d) => sum + d.total, 0)).toEqual(expectedTotal); expect(res.body.reduce((sum, d) => sum + (d.valid ?? 0), 0)).toEqual(outcomes.VALID); expect(res.body.reduce((sum, d) => sum + (d.notFound ?? 0), 0)).toEqual(0); expect(res.body.reduce((sum, d) => sum + (d.forbidden ?? 0), 0)).toEqual(0); @@ -420,13 +488,32 @@ describe("RFC scenarios", () => { expect(res.body.length).lte(2); expect(res.body.length).gte(1); + + // With Date columns, the API compares Date('start') to Date(bucket) + // This includes all months where Date(month_start) falls between Date(start) and Date(end) + const startDate = new Date(start); + const endDate = new Date(end); + let total = 0; const outcomes = verifications.reduce( (acc, v) => { - if ( - v.identity_id !== identity.id || - new Date(v.time).getUTCMonth() !== new Date(now).getUTCMonth() - ) { + if (v.identity_id !== identity.id) { + return acc; + } + + // Check if verification date (as Date, no time) falls within range + const vDate = new Date(v.time); + const vDateOnly = new Date( + Date.UTC(vDate.getUTCFullYear(), vDate.getUTCMonth(), vDate.getUTCDate()), + ); + const startDateOnly = new Date( + Date.UTC(startDate.getUTCFullYear(), startDate.getUTCMonth(), startDate.getUTCDate()), + ); + const endDateOnly = new Date( + Date.UTC(endDate.getUTCFullYear(), endDate.getUTCMonth(), endDate.getUTCDate()), + ); + + if (vDateOnly < startDateOnly || vDateOnly > endDateOnly) { return acc; } diff --git a/apps/api/src/routes/v1_analytics_getVerifications.ts b/apps/api/src/routes/v1_analytics_getVerifications.ts index f166d9f6e9..2496d45607 100644 --- a/apps/api/src/routes/v1_analytics_getVerifications.ts +++ b/apps/api/src/routes/v1_analytics_getVerifications.ts @@ -222,24 +222,24 @@ export const registerV1AnalyticsGetVerifications = (app: App) => const tables = { hour: { - name: "verifications.key_verifications_per_hour_v3", + name: "default.key_verifications_per_hour_v2", fill: `WITH FILL FROM toStartOfHour(fromUnixTimestamp64Milli({ start: Int64 })) TO toStartOfHour(fromUnixTimestamp64Milli({ end: Int64 })) STEP INTERVAL 1 HOUR`, }, day: { - name: "verifications.key_verifications_per_day_v3", + name: "default.key_verifications_per_day_v2", fill: `WITH FILL -FROM toStartOfDay(fromUnixTimestamp64Milli({ start: Int64 })) -TO toStartOfDay(fromUnixTimestamp64Milli({ end: Int64 })) +FROM toDate(toStartOfDay(fromUnixTimestamp64Milli({ start: Int64 }))) +TO toDate(toStartOfDay(fromUnixTimestamp64Milli({ end: Int64 }))) STEP INTERVAL 1 DAY`, }, month: { - name: "verifications.key_verifications_per_month_v3", + name: "default.key_verifications_per_month_v2", fill: `WITH FILL -FROM toDateTime(toStartOfMonth(fromUnixTimestamp64Milli({ start: Int64 }))) -TO toDateTime(toStartOfMonth(fromUnixTimestamp64Milli({ end: Int64 }))) +FROM toDate(toStartOfMonth(fromUnixTimestamp64Milli({ start: Int64 }))) +TO toDate(toStartOfMonth(fromUnixTimestamp64Milli({ end: Int64 }))) STEP INTERVAL 1 MONTH`, }, } as const; @@ -389,8 +389,16 @@ STEP INTERVAL 1 MONTH`, if (filteredKeyIds && filteredKeyIds.length > 0) { where.push("AND key_id IN {keyIds:Array(String)}"); } - where.push("AND time >= fromUnixTimestamp64Milli({start:Int64})"); - where.push("AND time <= fromUnixTimestamp64Milli({end:Int64})"); + + // For month and day tables, the time column is Date not DateTime + // Convert the timestamp to Date for proper comparison + if (table === tables.month || table === tables.day) { + where.push("AND time >= toDate(fromUnixTimestamp64Milli({start:Int64}))"); + where.push("AND time <= toDate(fromUnixTimestamp64Milli({end:Int64}))"); + } else { + where.push("AND time >= fromUnixTimestamp64Milli({start:Int64})"); + where.push("AND time <= fromUnixTimestamp64Milli({end:Int64})"); + } const query: string[] = []; query.push(`SELECT ${[...new Set(select)].join(", ")}`); diff --git a/apps/engineering/content/docs/architecture/services/analytics.mdx b/apps/engineering/content/docs/architecture/services/analytics.mdx index c991edf7e9..19625ac427 100644 --- a/apps/engineering/content/docs/architecture/services/analytics.mdx +++ b/apps/engineering/content/docs/architecture/services/analytics.mdx @@ -64,11 +64,11 @@ Users query against friendly table names that map to actual ClickHouse tables: ```go TableAliases: map[string]string{ - "key_verifications": "default.key_verifications_raw_v2", - "key_verifications_per_minute": "default.key_verifications_per_minute_v2", - "key_verifications_per_hour": "default.key_verifications_per_hour_v2", - "key_verifications_per_day": "default.key_verifications_per_day_v2", - "key_verifications_per_month": "default.key_verifications_per_month_v2", + "key_verifications_v1": "default.key_verifications_raw_v2", + "key_verifications_per_minute_v1": "default.key_verifications_per_minute_v2", + "key_verifications_per_hour_v1": "default.key_verifications_per_hour_v2", + "key_verifications_per_day_v1": "default.key_verifications_per_day_v2", + "key_verifications_per_month_v1": "default.key_verifications_per_month_v2", } ``` diff --git a/apps/engineering/content/docs/architecture/services/clickhouse.mdx b/apps/engineering/content/docs/architecture/services/clickhouse.mdx index 17a48888fa..a05268bc13 100644 --- a/apps/engineering/content/docs/architecture/services/clickhouse.mdx +++ b/apps/engineering/content/docs/architecture/services/clickhouse.mdx @@ -170,7 +170,7 @@ If you need to understand or optimize a query: 1. Use the client's query method directly for custom queries: ```typescript const result = await ch.querier.query({ - query: `SELECT count() FROM verifications.raw_key_verifications_v1 WHERE workspace_id = {workspaceId: String}`, + query: `SELECT count() FROM raw_key_verifications_raw_v2 WHERE workspace_id = {workspaceId: String}`, params: z.object({ workspaceId: z.string() }), schema: z.object({ count: z.number() }) })({ workspaceId: "ws_123" }); @@ -179,7 +179,7 @@ If you need to understand or optimize a query: 2. Check performance with `EXPLAIN`: ```typescript const explain = await ch.querier.query({ - query: `EXPLAIN SELECT * FROM verifications.raw_key_verifications_v1 WHERE workspace_id = {workspaceId: String}`, + query: `EXPLAIN SELECT * FROM raw_key_verifications_raw_v2 WHERE workspace_id = {workspaceId: String}`, params: z.object({ workspaceId: z.string() }), schema: z.object({ explain: z.string() }) })({ workspaceId: "ws_123" }); diff --git a/apps/engineering/content/docs/contributing/pull-request-checks.mdx b/apps/engineering/content/docs/contributing/pull-request-checks.mdx index 868f21280c..286dd8ee01 100644 --- a/apps/engineering/content/docs/contributing/pull-request-checks.mdx +++ b/apps/engineering/content/docs/contributing/pull-request-checks.mdx @@ -144,7 +144,7 @@ This ensures all values are properly escaped by ClickHouse's query engine. ### SQL Performance - ClickHouse -When querying ClickHouse, we have to be careful about what we are joining or querying since that particular data might be huge. For example, not handling queries to metrics.raw_api_requests_v1 properly can be quite problematic. +When querying ClickHouse, we have to be careful about what we are joining or querying since that particular data might be huge. For example, not handling queries to api_requests_raw_v2 properly can be quite problematic. To improve query performance: - Filter data as early as possible in the query @@ -163,8 +163,8 @@ To improve query performance: r.identifier, r.passed, m.host, - FROM ratelimits.raw_ratelimits_v1 r - LEFT JOIN metrics.raw_api_requests_v1 m ON + FROM ratelimits_raw_v2 r + LEFT JOIN api_requests_raw_v2 m ON r.request_id = m.request_id WHERE r.workspace_id = {workspaceId: String} AND r.namespace_id = {namespaceId: String} @@ -177,8 +177,8 @@ To improve query performance: LIMIT {limit: Int} ```` -Imagine this query: we are trying to get ratelimit details by joining with raw_api_requests_v1. -Since raw_api_requests_v1 might be huge, the query can take a long time to execute or consume too much memory. +Imagine this query: we are trying to get ratelimit details by joining with api_requests_raw_v2. +Since api_requests_raw_v2 might be huge, the query can take a long time to execute or consume too much memory. To optimize this, we should first filter as much as possible, with time and workspace_id being the most important factors. We can make the query much faster by eliminating unrelated workspaceId's and irrelevant time ranges. @@ -192,7 +192,7 @@ WITH filtered_ratelimits AS ( namespace_id, identifier, toUInt8(passed) as status - FROM ratelimits.raw_ratelimits_v1 r + FROM ratelimits_raw_v2 r WHERE workspace_id = {workspaceId: String} AND namespace_id = {namespaceId: String} AND time BETWEEN {startTime: UInt64} AND {endTime: UInt64} @@ -212,7 +212,7 @@ SELECT m.host, FROM filtered_ratelimits fr LEFT JOIN ( - SELECT * FROM metrics.raw_api_requests_v1 + SELECT * FROM api_requests_raw_v2 -- Those two filters are doing the heavy lifting now WHERE workspace_id = {workspaceId: String} AND time BETWEEN {startTime: UInt64} AND {endTime: UInt64} @@ -228,19 +228,19 @@ Finally, don't forget about adding indexes for our filters. While this step is o -- Composite index for workspace + time filtering -- Most effective when filtering workspace_id first, then time -- MINMAX type creates a sparse index with min/max values -ALTER TABLE ratelimits.raw_ratelimits_v1 +ALTER TABLE ratelimits_raw_v2 ADD INDEX idx_workspace_time (workspace_id, time) TYPE minmax GRANULARITY 1; -- Single-column index for JOIN operations -- Speeds up request_id matching between tables -- GRANULARITY 1 means finest possible index precision -ALTER TABLE ratelimits.raw_ratelimits_v1 +ALTER TABLE ratelimits_raw_v2 ADD INDEX idx_request_id (request_id) TYPE minmax GRANULARITY 1; -- Same indexes on metrics table to optimize both sides of JOIN -ALTER TABLE metrics.raw_api_requests_v1 +ALTER TABLE api_requests_raw_v2 ADD INDEX idx_workspace_time (workspace_id, time) TYPE minmax GRANULARITY 1; -ALTER TABLE metrics.raw_api_requests_v1 +ALTER TABLE api_requests_raw_v2 ADD INDEX idx_request_id (request_id) TYPE minmax GRANULARITY 1; ``` diff --git a/go/apps/api/integration/multi_node_ratelimiting/run.go b/go/apps/api/integration/multi_node_ratelimiting/run.go index 6bf0609240..832b053319 100644 --- a/go/apps/api/integration/multi_node_ratelimiting/run.go +++ b/go/apps/api/integration/multi_node_ratelimiting/run.go @@ -179,7 +179,7 @@ func RunRateLimitTest( data, selectErr := clickhouse.Select[aggregatedCounts]( ctx, h.CH.Conn(), - `SELECT count(*) as total_requests, countIf(passed > 0) as success_count, countIf(passed = 0) as failure_count FROM ratelimits.raw_ratelimits_v1 WHERE workspace_id = {workspace_id:String} AND namespace_id = {namespace_id:String}`, + `SELECT count(*) as total_requests, countIf(passed > 0) as success_count, countIf(passed = 0) as failure_count FROM default.ratelimits_raw_v2 WHERE workspace_id = {workspace_id:String} AND namespace_id = {namespace_id:String}`, map[string]string{ "workspace_id": h.Resources().UserWorkspace.ID, "namespace_id": namespaceID, @@ -205,7 +205,7 @@ func RunRateLimitTest( metricsCount := uint64(0) uniqueCount := uint64(0) - row := h.CH.Conn().QueryRow(ctx, fmt.Sprintf(`SELECT count(*) as total_requests, count(DISTINCT request_id) as unique_requests FROM metrics.raw_api_requests_v1 WHERE workspace_id = '%s';`, h.Resources().UserWorkspace.ID)) + row := h.CH.Conn().QueryRow(ctx, fmt.Sprintf(`SELECT count(*) as total_requests, count(DISTINCT request_id) as unique_requests FROM default.api_requests_raw_v2 WHERE workspace_id = '%s';`, h.Resources().UserWorkspace.ID)) err = row.Scan(&metricsCount, &uniqueCount) diff --git a/go/apps/api/integration/multi_node_usagelimiting/run.go b/go/apps/api/integration/multi_node_usagelimiting/run.go index fe079ac88b..bd79f56aa4 100644 --- a/go/apps/api/integration/multi_node_usagelimiting/run.go +++ b/go/apps/api/integration/multi_node_usagelimiting/run.go @@ -176,7 +176,7 @@ func RunUsageLimitTest( data, selectErr := clickhouse.Select[aggregatedCounts]( ctx, h.CH.Conn(), - `SELECT count(*) as total_requests, countIf(outcome = 'VALID') as success_count, countIf(outcome = 'USAGE_EXCEEDED') as failure_count FROM verifications.raw_key_verifications_v1 WHERE workspace_id = {workspace_id:String} AND key_id = {key_id:String}`, + `SELECT count(*) as total_requests, countIf(outcome = 'VALID') as success_count, countIf(outcome = 'USAGE_EXCEEDED') as failure_count FROM default.key_verifications_raw_v2 WHERE workspace_id = {workspace_id:String} AND key_id = {key_id:String}`, map[string]string{ "workspace_id": h.Resources().UserWorkspace.ID, "key_id": keyResponse.KeyID, @@ -203,7 +203,7 @@ func RunUsageLimitTest( require.Eventually(t, func() bool { metricsCount := uint64(0) uniqueCount := uint64(0) - row := h.CH.Conn().QueryRow(ctx, fmt.Sprintf(`SELECT count(*) as total_requests, count(DISTINCT request_id) as unique_requests FROM metrics.raw_api_requests_v1 WHERE workspace_id = '%s';`, h.Resources().UserWorkspace.ID)) + row := h.CH.Conn().QueryRow(ctx, fmt.Sprintf(`SELECT count(*) as total_requests, count(DISTINCT request_id) as unique_requests FROM default.api_requests_raw_v2 WHERE workspace_id = '%s';`, h.Resources().UserWorkspace.ID)) err := row.Scan(&metricsCount, &uniqueCount) require.NoError(t, err) diff --git a/go/apps/api/routes/v2_ratelimit_limit/200_test.go b/go/apps/api/routes/v2_ratelimit_limit/200_test.go index 12d102960c..d1fb9e7653 100644 --- a/go/apps/api/routes/v2_ratelimit_limit/200_test.go +++ b/go/apps/api/routes/v2_ratelimit_limit/200_test.go @@ -127,13 +127,13 @@ func TestLimitSuccessfully(t *testing.T) { res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, 200, res.Status, "expected 200, received: %v", res.Body) - row := schema.RatelimitRequestV1{} + row := schema.RatelimitV2{} require.Eventually(t, func() bool { - data, err := clickhouse.Select[schema.RatelimitRequestV1]( + data, err := clickhouse.Select[schema.RatelimitV2]( ctx, h.ClickHouse.Conn(), - "SELECT * FROM ratelimits.raw_ratelimits_v1 WHERE workspace_id = {workspace_id:String} AND namespace_id = {namespace_id:String}", + "SELECT * FROM default.ratelimits_raw_v2 WHERE workspace_id = {workspace_id:String} AND namespace_id = {namespace_id:String}", map[string]string{ "workspace_id": h.Resources().UserWorkspace.ID, "namespace_id": namespaceID, diff --git a/go/pkg/clickhouse/billable_ratelimits.go b/go/pkg/clickhouse/billable_ratelimits.go index f27c6e5d82..82a672446d 100644 --- a/go/pkg/clickhouse/billable_ratelimits.go +++ b/go/pkg/clickhouse/billable_ratelimits.go @@ -22,7 +22,7 @@ func (c *clickhouse) GetBillableRatelimits(ctx context.Context, workspaceID stri query := ` SELECT sum(count) as count - FROM billing.billable_ratelimits_per_month_v1 + FROM default.billable_ratelimits_per_month_v2 WHERE workspace_id = ? AND year = ? AND month = ? diff --git a/go/pkg/clickhouse/billable_verifications.go b/go/pkg/clickhouse/billable_verifications.go index 6c6b85986f..05dc24fb5e 100644 --- a/go/pkg/clickhouse/billable_verifications.go +++ b/go/pkg/clickhouse/billable_verifications.go @@ -22,7 +22,7 @@ func (c *clickhouse) GetBillableVerifications(ctx context.Context, workspaceID s query := ` SELECT sum(count) as count - FROM billing.billable_verifications_per_month_v2 + FROM default.billable_verifications_per_month_v2 WHERE workspace_id = ? AND year = ? AND month = ? diff --git a/go/pkg/clickhouse/key_verifications_test.go b/go/pkg/clickhouse/key_verifications_test.go index 7e96c0bdbf..177c40be4b 100644 --- a/go/pkg/clickhouse/key_verifications_test.go +++ b/go/pkg/clickhouse/key_verifications_test.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "fmt" "math/rand" "slices" "testing" @@ -98,7 +99,7 @@ func TestKeyVerifications(t *testing.T) { for i := 0; i < len(verifications); i += batchSize { t0 = time.Now() - batch, err := conn.PrepareBatch(ctx, "INSERT INTO key_verifications_raw_v2") + batch, err := conn.PrepareBatch(ctx, "INSERT INTO default.key_verifications_raw_v2") require.NoError(t, err) for _, row := range verifications[i:min(i+batchSize, len(verifications))] { @@ -112,17 +113,17 @@ func TestKeyVerifications(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { rawCount := uint64(0) - err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM key_verifications_raw_v2 WHERE workspace_id = ?", workspaceID).Scan(&rawCount) + err := conn.QueryRow(ctx, "SELECT COUNT(*) FROM default.key_verifications_raw_v2 WHERE workspace_id = ?", workspaceID).Scan(&rawCount) require.NoError(c, err) require.Equal(c, len(verifications), int(rawCount)) }, time.Minute, time.Second) t.Run("totals are correct", func(t *testing.T) { - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { queried := int64(0) - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ?;", table, workspaceID).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ?;", table), workspaceID).Scan(&queried) require.NoError(c, err) t.Logf("expected %d, got %d", len(verifications), queried) require.Equal(c, len(verifications), int(queried)) @@ -137,12 +138,12 @@ func TestKeyVerifications(t *testing.T) { return acc }, map[string]int{}) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { for outcome, count := range countByOutcome { require.EventuallyWithT(t, func(c *assert.CollectT) { queried := int64(0) - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND outcome = ?;", table, workspaceID, outcome).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND outcome = ?;", table), workspaceID, outcome).Scan(&queried) require.NoError(c, err) t.Logf("%s expected %d, got %d", outcome, count, queried) require.Equal(c, count, int(queried)) @@ -163,14 +164,14 @@ func TestKeyVerifications(t *testing.T) { return acc }, map[string]int{}) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { t.Parallel() for outcome, count := range countByOutcome { require.EventuallyWithT(t, func(c *assert.CollectT) { queried := int64(0) - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND key_id = ? AND outcome = ?;", table, workspaceID, keyID, outcome).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND key_id = ? AND outcome = ?;", table), workspaceID, keyID, outcome).Scan(&queried) require.NoError(c, err) require.Equal(c, count, int(queried)) }, time.Minute, time.Second) @@ -190,12 +191,12 @@ func TestKeyVerifications(t *testing.T) { return acc }, map[string]int{}) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { for outcome, count := range countByOutcome { require.EventuallyWithT(t, func(c *assert.CollectT) { queried := int64(0) - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND identity_id = ? AND outcome = ?;", table, workspaceID, identityID, outcome).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND identity_id = ? AND outcome = ?;", table), workspaceID, identityID, outcome).Scan(&queried) require.NoError(c, err) require.Equal(c, count, int(queried)) }, time.Minute, time.Second) @@ -218,12 +219,12 @@ func TestKeyVerifications(t *testing.T) { return acc }, map[string]int{}) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { for outcome, count := range countByOutcome { require.EventuallyWithT(t, func(c *assert.CollectT) { queried := int64(0) - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND indexOf(tags, ?) > 0 AND outcome = ?;", table, workspaceID, tag, outcome).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND indexOf(tags, ?) > 0 AND outcome = ?;", table), workspaceID, tag, outcome).Scan(&queried) require.NoError(c, err) require.Equal(c, count, int(queried)) }, time.Minute, time.Second) @@ -241,7 +242,7 @@ func TestKeyVerifications(t *testing.T) { p75 := percentile(latencies, 0.75) p99 := percentile(latencies, 0.99) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { t.Parallel() var ( @@ -249,7 +250,7 @@ func TestKeyVerifications(t *testing.T) { queriedP75 float32 queriedP99 float32 ) - err := conn.QueryRow(ctx, "SELECT avgMerge(latency_avg), quantilesTDigestMerge(0.75)(latency_p75)[1], quantilesTDigestMerge(0.99)(latency_p99)[1] FROM ? WHERE workspace_id = ?;", table, workspaceID).Scan(&queriedAvg, &queriedP75, &queriedP99) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT avgMerge(latency_avg), quantilesTDigestMerge(0.75)(latency_p75)[1], quantilesTDigestMerge(0.99)(latency_p99)[1] FROM %s WHERE workspace_id = ?;", table), workspaceID).Scan(&queriedAvg, &queriedP75, &queriedP99) require.NoError(t, err) require.InDelta(t, avg, queriedAvg, 0.01) @@ -265,11 +266,11 @@ func TestKeyVerifications(t *testing.T) { return acc + v.SpentCredits }, int64(0)) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { t.Parallel() var queried int64 - err := conn.QueryRow(ctx, "SELECT sum(spent_credits) FROM ? WHERE workspace_id = ?;", table, workspaceID).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(spent_credits) FROM %s WHERE workspace_id = ?;", table), workspaceID).Scan(&queried) require.NoError(t, err) require.Equal(t, credits, queried) @@ -286,11 +287,11 @@ func TestKeyVerifications(t *testing.T) { return acc }, int64(0)) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { t.Parallel() var queried int64 - err := conn.QueryRow(ctx, "SELECT sum(spent_credits) FROM ? WHERE workspace_id = ? AND identity_id = ?;", table, workspaceID, identityID).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(spent_credits) FROM %s WHERE workspace_id = ? AND identity_id = ?;", table), workspaceID, identityID).Scan(&queried) require.NoError(t, err) require.Equal(t, credits, queried) @@ -310,11 +311,11 @@ func TestKeyVerifications(t *testing.T) { return acc }, int64(0)) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { t.Run(table, func(t *testing.T) { t.Parallel() var queried int64 - err := conn.QueryRow(ctx, "SELECT sum(spent_credits) FROM ? WHERE workspace_id = ? AND key_id = ?;", table, workspaceID, keyID).Scan(&queried) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(spent_credits) FROM %s WHERE workspace_id = ? AND key_id = ?;", table), workspaceID, keyID).Scan(&queried) require.NoError(t, err) require.Equal(t, credits, queried) @@ -331,13 +332,13 @@ func TestKeyVerifications(t *testing.T) { id := identityID expectedExternalID := identityToExternalID[id] - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { tbl := table t.Run(tbl, func(t *testing.T) { t.Parallel() require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedExternalID string - err := conn.QueryRow(ctx, "SELECT external_id FROM ? WHERE workspace_id = ? AND identity_id = ? LIMIT 1;", tbl, workspaceID, id).Scan(&queriedExternalID) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT external_id FROM %s WHERE workspace_id = ? AND identity_id = ? LIMIT 1;", tbl), workspaceID, id).Scan(&queriedExternalID) require.NoError(c, err) require.Equal(c, expectedExternalID, queriedExternalID, "external_id should match for identity %s in table %s", id, tbl) }, time.Minute, time.Second) @@ -359,13 +360,13 @@ func TestKeyVerifications(t *testing.T) { return acc }, 0) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { tbl := table t.Run(tbl, func(t *testing.T) { t.Parallel() require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedCount int64 - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ?;", tbl, workspaceID, extID).Scan(&queriedCount) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND external_id = ?;", tbl), workspaceID, extID).Scan(&queriedCount) require.NoError(c, err) require.Equal(c, expectedCount, int(queriedCount), "count should match for external_id %s in table %s", extID, tbl) }, time.Minute, time.Second) @@ -388,7 +389,7 @@ func TestKeyVerifications(t *testing.T) { return acc }, map[string]int{}) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { tbl := table t.Run(tbl, func(t *testing.T) { t.Parallel() @@ -397,7 +398,7 @@ func TestKeyVerifications(t *testing.T) { expCount := expectedCount require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedCount int64 - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ? AND outcome = ?;", tbl, workspaceID, extID, out).Scan(&queriedCount) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND external_id = ? AND outcome = ?;", tbl), workspaceID, extID, out).Scan(&queriedCount) require.NoError(c, err) require.Equal(c, expCount, int(queriedCount), "count for external_id %s and outcome %s should match in table %s", extID, out, tbl) }, time.Minute, time.Second) @@ -414,7 +415,7 @@ func TestKeyVerifications(t *testing.T) { id := identityID extID := identityToExternalID[id] - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { tbl := table t.Run(tbl, func(t *testing.T) { t.Parallel() @@ -426,7 +427,7 @@ func TestKeyVerifications(t *testing.T) { wrongIdentityID = identities[(slices.Index(identities, id)+2)%len(identities)] } - err := conn.QueryRow(ctx, "SELECT SUM(count) FROM ? WHERE workspace_id = ? AND external_id = ? AND identity_id = ?;", tbl, workspaceID, extID, wrongIdentityID).Scan(&countWithWrongIdentity) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(count) FROM %s WHERE workspace_id = ? AND external_id = ? AND identity_id = ?;", tbl), workspaceID, extID, wrongIdentityID).Scan(&countWithWrongIdentity) if err != nil { // It's OK if there are no rows, that means the mapping is correct if errors.Is(err, sql.ErrNoRows) { @@ -454,13 +455,13 @@ func TestKeyVerifications(t *testing.T) { return acc }, int64(0)) - for _, table := range []string{"key_verifications_per_minute_v2", "key_verifications_per_hour_v2", "key_verifications_per_day_v2", "key_verifications_per_month_v2"} { + for _, table := range []string{"default.key_verifications_per_minute_v2", "default.key_verifications_per_hour_v2", "default.key_verifications_per_day_v2", "default.key_verifications_per_month_v2"} { tbl := table t.Run(tbl, func(t *testing.T) { t.Parallel() require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedCredits int64 - err := conn.QueryRow(ctx, "SELECT SUM(spent_credits) FROM ? WHERE workspace_id = ? AND external_id = ?;", tbl, workspaceID, extID).Scan(&queriedCredits) + err := conn.QueryRow(ctx, fmt.Sprintf("SELECT SUM(spent_credits) FROM %s WHERE workspace_id = ? AND external_id = ?;", tbl), workspaceID, extID).Scan(&queriedCredits) require.NoError(c, err) require.Equal(c, expectedCredits, queriedCredits, "spent_credits for external_id %s should match in table %s", extID, tbl) }, time.Minute, time.Second) @@ -479,7 +480,7 @@ func TestKeyVerifications(t *testing.T) { }, int64(0)) var queried int64 - err := conn.QueryRow(ctx, "SELECT sum(count) FROM billable_verifications_per_month_v2 WHERE workspace_id = ?;", workspaceID).Scan(&queried) + err := conn.QueryRow(ctx, "SELECT sum(count) FROM default.billable_verifications_per_month_v2 WHERE workspace_id = ?;", workspaceID).Scan(&queried) require.NoError(t, err) diff --git a/go/pkg/clickhouse/ratelimits_test.go b/go/pkg/clickhouse/ratelimits_test.go index 8a7aee922a..4515d0e5a3 100644 --- a/go/pkg/clickhouse/ratelimits_test.go +++ b/go/pkg/clickhouse/ratelimits_test.go @@ -107,7 +107,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { t0 = time.Now() - batch, err := conn.PrepareBatch(ctx, "INSERT INTO ratelimits_raw_v2") + batch, err := conn.PrepareBatch(ctx, "INSERT INTO default.ratelimits_raw_v2") require.NoError(t, err) for _, row := range ratelimits { @@ -121,7 +121,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { // Wait for raw data to be available require.EventuallyWithT(t, func(c *assert.CollectT) { rawCount := uint64(0) - err = conn.QueryRow(ctx, "SELECT COUNT(*) FROM ratelimits_raw_v2 WHERE workspace_id = ?", workspaceID).Scan(&rawCount) + err = conn.QueryRow(ctx, "SELECT COUNT(*) FROM default.ratelimits_raw_v2 WHERE workspace_id = ?", workspaceID).Scan(&rawCount) require.NoError(c, err) require.Equal(c, len(ratelimits), int(rawCount)) }, time.Minute, time.Second) @@ -137,11 +137,11 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { totalRequests := len(ratelimits) - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ?", table, workspaceID).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ?", table), workspaceID).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, totalPassed, int(queriedPassed), "passed count should match") require.Equal(c, totalRequests, int(queriedTotal), "total count should match") @@ -158,7 +158,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { p75 := percentile(latencies, 0.75) p99 := percentile(latencies, 0.99) - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var ( @@ -166,7 +166,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { queriedP75 float32 queriedP99 float32 ) - err = conn.QueryRow(ctx, "SELECT avgMerge(latency_avg), quantilesTDigestMerge(0.75)(latency_p75)[1], quantilesTDigestMerge(0.99)(latency_p99)[1] FROM ? WHERE workspace_id = ?", table, workspaceID).Scan(&queriedAvg, &queriedP75, &queriedP99) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT avgMerge(latency_avg), quantilesTDigestMerge(0.75)(latency_p75)[1], quantilesTDigestMerge(0.99)(latency_p99)[1] FROM %s WHERE workspace_id = ?", table), workspaceID).Scan(&queriedAvg, &queriedP75, &queriedP99) require.NoError(c, err) require.InDelta(c, avg, queriedAvg, 0.01, "average latency should match") @@ -189,12 +189,12 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { return acc }, make(map[string]struct{ passed, total int })) - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { for namespaceID, expectedStats := range namespaceStats { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ? AND namespace_id = ?", table, workspaceID, namespaceID).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ? AND namespace_id = ?", table), workspaceID, namespaceID).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, expectedStats.passed, int(queriedPassed), "passed count for namespace %s should match", namespaceID) require.Equal(c, expectedStats.total, int(queriedTotal), "total count for namespace %s should match", namespaceID) @@ -221,7 +221,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { return array.Random(identifiers) }) - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { for _, identifier := range sampleIdentifiers { expectedStats, exists := identifierStats[identifier] @@ -231,7 +231,7 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ? AND identifier = ?", table, workspaceID, identifier).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ? AND identifier = ?", table), workspaceID, identifier).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, expectedStats.passed, int(queriedPassed), "passed count for identifier %s should match", identifier) require.Equal(c, expectedStats.total, int(queriedTotal), "total count for identifier %s should match", identifier) @@ -251,11 +251,11 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { passed += 1 } } - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ?", table, workspaceID).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ?", table), workspaceID).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, total, int(queriedTotal), "total queries should match") @@ -279,11 +279,11 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { } } } - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ? AND identifier = ?", table, workspaceID, identifier).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ? AND identifier = ?", table), workspaceID, identifier).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, total, int(queriedTotal), "total queries should match") @@ -309,11 +309,11 @@ func TestRatelimits_ComprehensiveLoadTest(t *testing.T) { } } } - for _, table := range []string{"ratelimits_per_minute_v2", "ratelimits_per_hour_v2", "ratelimits_per_day_v2", "ratelimits_per_month_v2"} { + for _, table := range []string{"default.ratelimits_per_minute_v2", "default.ratelimits_per_hour_v2", "default.ratelimits_per_day_v2", "default.ratelimits_per_month_v2"} { t.Run(table, func(t *testing.T) { require.EventuallyWithT(t, func(c *assert.CollectT) { var queriedPassed, queriedTotal int64 - err = conn.QueryRow(ctx, "SELECT sum(passed), sum(total) FROM ? WHERE workspace_id = ? AND namespace_id = ?", table, workspaceID, namespace).Scan(&queriedPassed, &queriedTotal) + err = conn.QueryRow(ctx, fmt.Sprintf("SELECT sum(passed), sum(total) FROM %s WHERE workspace_id = ? AND namespace_id = ?", table), workspaceID, namespace).Scan(&queriedPassed, &queriedTotal) require.NoError(c, err) require.Equal(c, total, int(queriedTotal), "total queries should match") diff --git a/internal/clickhouse/schema/000_README.md b/internal/clickhouse/schema/000_README.md deleted file mode 100644 index b1d380abf9..0000000000 --- a/internal/clickhouse/schema/000_README.md +++ /dev/null @@ -1,72 +0,0 @@ - -# ClickHouse Table Naming Conventions - -This document outlines the naming conventions for tables and materialized views in our ClickHouse setup. Adhering to these conventions ensures consistency, clarity, and ease of management across our data infrastructure. - -## General Rules - -1. Use lowercase letters and separate words with underscores. -2. Avoid ClickHouse reserved words and special characters in names. -3. Be descriptive but concise. - -## Table Naming Convention - -Format: `[prefix]_[domain]_[description]_[version]` - -### Prefixes - -- `raw_`: Input data tables -- `tmp_{yourname}_`: Temporary tables for experiments, add your name, so it's easy to identify ownership. - -### Domain/Category - -Include the domain or category of the data when applicable. - -Examples: -- `keys` -- `audit` -- `user` -- `gateway` - -### Versioning - -- Version numbers: `_v1`, `_v2`, etc. - -### Aggregation Suffixes - -For aggregated or summary tables, use suffixes like: -- `_per_day` -- `_per_month` -- `_summary` - -## Materialized View Naming Convention - -Format: `[description]_[aggregation]_mv_[version]` - -- Always suffix with `mv_[version]` -- Include a description of the view's purpose -- Add aggregation level if applicable - -## Examples - -1. Raw Data Table: - `raw_sales_transactions_v1` - -2. Materialized View: - `active_users_per_day_mv_v2` - -3. Temporary Table: - `tmp_andreas_user_analysis_v1` - -4. Aggregated Table: - `sales_summary_per_hour_mv_v1` - -## Consistency Across Related Objects - -Maintain consistent naming across related tables, views, and other objects: - -- `raw_user_activity_v1` -- `user_activity_per_day_v1` -- `user_activity_per_day_mv_v1` - -By following these conventions, we ensure a clear, consistent, and scalable naming structure for our ClickHouse setup. diff --git a/internal/clickhouse/schema/001_create_databases.sql b/internal/clickhouse/schema/001_create_databases.sql deleted file mode 100644 index c8b10934e7..0000000000 --- a/internal/clickhouse/schema/001_create_databases.sql +++ /dev/null @@ -1,17 +0,0 @@ --- +goose up - -CREATE DATABASE verifications; -CREATE DATABASE telemetry; -CREATE DATABASE metrics; -CREATE DATABASE ratelimits; -CREATE DATABASE business; -CREATE DATABASE billing; - - --- +goose down -DROP DATABASE verifications; -DROP DATABASE telemetry; -DROP DATABASE metrics; -DROP DATABASE ratelimits; -DROP DATABASE business; -DROP DATABASE billing; diff --git a/internal/clickhouse/schema/002_create_metrics_raw_api_requests_v1.sql b/internal/clickhouse/schema/002_create_metrics_raw_api_requests_v1.sql deleted file mode 100644 index 829f5f26e2..0000000000 --- a/internal/clickhouse/schema/002_create_metrics_raw_api_requests_v1.sql +++ /dev/null @@ -1,43 +0,0 @@ --- +goose up -CREATE TABLE metrics.raw_api_requests_v1( - request_id String, - -- unix milli - time Int64, - - workspace_id String, - - host String, - - -- Upper case HTTP method - -- Examples: "GET", "POST", "PUT", "DELETE" - method LowCardinality(String), - path String, - -- "Key: Value" pairs - request_headers Array(String), - request_body String, - - response_status Int, - -- "Key: Value" pairs - response_headers Array(String), - response_body String, - -- internal err.Error() string, empty if no error - error String, - - -- milliseconds - service_latency Int64, - - user_agent String, - ip_address String, - country String, - city String, - colo String, - continent String, - - -) -ENGINE = MergeTree() -ORDER BY (workspace_id, time, request_id) -; - --- +goose down -DROP TABLE metrics.raw_api_requests_v1; diff --git a/internal/clickhouse/schema/003_create_verifications_raw_key_verifications_v1.sql b/internal/clickhouse/schema/003_create_verifications_raw_key_verifications_v1.sql deleted file mode 100644 index 9edb56bd49..0000000000 --- a/internal/clickhouse/schema/003_create_verifications_raw_key_verifications_v1.sql +++ /dev/null @@ -1,35 +0,0 @@ --- +goose up -CREATE TABLE verifications.raw_key_verifications_v1( - -- the api request id, so we can correlate the verification with traces and logs - request_id String, - - -- unix milli - time Int64, - - workspace_id String, - key_space_id String, - key_id String, - - -- Right now this is a 3 character airport code, but when we move to aws, - -- this will be the region code such as `us-east-1` - region LowCardinality(String), - - -- Examples: - -- - "VALID" - -- - "RATE_LIMITED" - -- - "EXPIRED" - -- - "DISABLED - outcome LowCardinality(String), - - -- Empty string if the key has no identity - identity_id String, - - - -) -ENGINE = MergeTree() -ORDER BY (workspace_id, key_space_id, key_id, time) -; - --- +goose down -DROP TABLE verifications.raw_key_verifications_v1; diff --git a/internal/clickhouse/schema/004_create_verifications_key_verifications_per_hour_v1.sql b/internal/clickhouse/schema/004_create_verifications_key_verifications_per_hour_v1.sql deleted file mode 100644 index bfdf27092a..0000000000 --- a/internal/clickhouse/schema/004_create_verifications_key_verifications_per_hour_v1.sql +++ /dev/null @@ -1,18 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_hour_v1 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, time, identity_id, key_id) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_hour_v1; diff --git a/internal/clickhouse/schema/005_create_verifications_key_verifications_per_day_v1.sql b/internal/clickhouse/schema/005_create_verifications_key_verifications_per_day_v1.sql deleted file mode 100644 index d2d8ceb2e6..0000000000 --- a/internal/clickhouse/schema/005_create_verifications_key_verifications_per_day_v1.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_day_v1 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, time, identity_id, key_id) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_day_v1; - diff --git a/internal/clickhouse/schema/006_create_verifications_key_verifications_per_month_v1.sql b/internal/clickhouse/schema/006_create_verifications_key_verifications_per_month_v1.sql deleted file mode 100644 index f849f136ed..0000000000 --- a/internal/clickhouse/schema/006_create_verifications_key_verifications_per_month_v1.sql +++ /dev/null @@ -1,17 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_month_v1 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, time, identity_id, key_id) -; - --- +goose down -DROP TABLE verifications.key_verifications_per_month_v1; diff --git a/internal/clickhouse/schema/007_create_verifications_key_verifications_per_hour_mv_v1.sql b/internal/clickhouse/schema/007_create_verifications_key_verifications_per_hour_mv_v1.sql deleted file mode 100644 index 4219ab2404..0000000000 --- a/internal/clickhouse/schema/007_create_verifications_key_verifications_per_hour_mv_v1.sql +++ /dev/null @@ -1,25 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_hour_mv_v1 -TO verifications.key_verifications_per_hour_v1 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfHour(fromUnixTimestamp64Milli(time)) AS time -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_hour_mv_v1; diff --git a/internal/clickhouse/schema/008_create_verifications_key_verifications_per_day_mv_v1.sql b/internal/clickhouse/schema/008_create_verifications_key_verifications_per_day_mv_v1.sql deleted file mode 100644 index 76a050afd2..0000000000 --- a/internal/clickhouse/schema/008_create_verifications_key_verifications_per_day_mv_v1.sql +++ /dev/null @@ -1,25 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_day_mv_v1 -TO verifications.key_verifications_per_day_v1 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfDay(fromUnixTimestamp64Milli(time)) AS time -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_day_mv_v1; diff --git a/internal/clickhouse/schema/009_create_verifications_key_verifications_per_month_mv_v1.sql b/internal/clickhouse/schema/009_create_verifications_key_verifications_per_month_mv_v1.sql deleted file mode 100644 index e37e73bf4f..0000000000 --- a/internal/clickhouse/schema/009_create_verifications_key_verifications_per_month_mv_v1.sql +++ /dev/null @@ -1,25 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_month_mv_v1 -TO verifications.key_verifications_per_month_v1 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfMonth(fromUnixTimestamp64Milli(time)) AS time -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_month_mv_v1; diff --git a/internal/clickhouse/schema/010_create_ratelimits_raw_ratelimits_table.sql b/internal/clickhouse/schema/010_create_ratelimits_raw_ratelimits_table.sql deleted file mode 100644 index 755dd1d95d..0000000000 --- a/internal/clickhouse/schema/010_create_ratelimits_raw_ratelimits_table.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.raw_ratelimits_v1( - request_id String, - -- unix milli - time Int64, - workspace_id String, - namespace_id String, - identifier String, - passed Bool - -) -ENGINE = MergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - - --- +goose down -DROP TABLE ratelimits.raw_ratelimits_v1; diff --git a/internal/clickhouse/schema/011_create_telemetry_raw_sdks_v1.sql b/internal/clickhouse/schema/011_create_telemetry_raw_sdks_v1.sql deleted file mode 100644 index 3b7a6ad67c..0000000000 --- a/internal/clickhouse/schema/011_create_telemetry_raw_sdks_v1.sql +++ /dev/null @@ -1,24 +0,0 @@ --- +goose up -CREATE TABLE telemetry.raw_sdks_v1( - -- the api request id, so we can correlate the telemetry with traces and logs - request_id String, - - -- unix milli - time Int64, - - -- ie: node@20 - runtime String, - -- ie: vercel - platform String, - - -- ie: [ "@unkey/api@1.2.3", "@unkey/ratelimit@4.5.6" ] - versions Array(String) -) -ENGINE = MergeTree() -ORDER BY (request_id, time) -; - - - --- +goose down -DROP TABLE telemetry.raw_sdks_v1; diff --git a/internal/clickhouse/schema/012_create_billing_billable_verifications_per_month_v1.sql b/internal/clickhouse/schema/012_create_billing_billable_verifications_per_month_v1.sql deleted file mode 100644 index 31dfaf6fe4..0000000000 --- a/internal/clickhouse/schema/012_create_billing_billable_verifications_per_month_v1.sql +++ /dev/null @@ -1,15 +0,0 @@ --- +goose up -CREATE TABLE billing.billable_verifications_per_month_v1 -( - year Int, - month Int, - workspace_id String, - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, year, month) -; - - --- +goose down -DROP TABLE billing.billable_verifications_per_month_v1; diff --git a/internal/clickhouse/schema/013_create_billing_billable_verifications_per_month_mv_v1.sql b/internal/clickhouse/schema/013_create_billing_billable_verifications_per_month_mv_v1.sql deleted file mode 100644 index 00e8005f1d..0000000000 --- a/internal/clickhouse/schema/013_create_billing_billable_verifications_per_month_mv_v1.sql +++ /dev/null @@ -1,21 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW billing.billable_verifications_per_month_mv_v1 -TO billing.billable_verifications_per_month_v1 -AS -SELECT - workspace_id, - count(*) AS count, - toYear(time) AS year, - toMonth(time) AS month -FROM verifications.key_verifications_per_month_v1 -WHERE outcome = 'VALID' -GROUP BY - workspace_id, - year, - month -; - - - --- +goose down -DROP VIEW billing.billable_verifications_per_month_mv_v1; diff --git a/internal/clickhouse/schema/014_create_ratelimits_ratelimits_per_minute_v1.sql b/internal/clickhouse/schema/014_create_ratelimits_ratelimits_per_minute_v1.sql deleted file mode 100644 index cfc1b4b6a9..0000000000 --- a/internal/clickhouse/schema/014_create_ratelimits_ratelimits_per_minute_v1.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.ratelimits_per_minute_v1 -( - time DateTime, - workspace_id String, - namespace_id String, - identifier String, - - passed Int64, - total Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - - --- +goose down -DROP TABLE ratelimits.ratelimits_per_minute_v1; diff --git a/internal/clickhouse/schema/015_create_ratelimits_ratelimits_per_hour_v1.sql b/internal/clickhouse/schema/015_create_ratelimits_ratelimits_per_hour_v1.sql deleted file mode 100644 index 1cc5f6aca2..0000000000 --- a/internal/clickhouse/schema/015_create_ratelimits_ratelimits_per_hour_v1.sql +++ /dev/null @@ -1,18 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.ratelimits_per_hour_v1 -( - time DateTime, - workspace_id String, - namespace_id String, - identifier String, - - passed Int64, - total Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - --- +goose down -DROP TABLE ratelimits.ratelimits_per_hour_v1; diff --git a/internal/clickhouse/schema/016_create_ratelimits_ratelimits_per_day_v1.sql b/internal/clickhouse/schema/016_create_ratelimits_ratelimits_per_day_v1.sql deleted file mode 100644 index 57b7b9febf..0000000000 --- a/internal/clickhouse/schema/016_create_ratelimits_ratelimits_per_day_v1.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.ratelimits_per_day_v1 -( - time DateTime, - workspace_id String, - namespace_id String, - identifier String, - - passed Int64, - total Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - - --- +goose down -DROP TABLE ratelimits.ratelimits_per_day_v1; diff --git a/internal/clickhouse/schema/017_create_ratelimits_ratelimits_per_month_v1.sql b/internal/clickhouse/schema/017_create_ratelimits_ratelimits_per_month_v1.sql deleted file mode 100644 index ffd91a5f63..0000000000 --- a/internal/clickhouse/schema/017_create_ratelimits_ratelimits_per_month_v1.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.ratelimits_per_month_v1 -( - time DateTime, - workspace_id String, - namespace_id String, - identifier String, - - passed Int64, - total Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - - --- +goose down -DROP TABLE ratelimits.ratelimits_per_month_v1; diff --git a/internal/clickhouse/schema/018_create_ratelimits_ratelimits_per_minute_mv_v1.sql b/internal/clickhouse/schema/018_create_ratelimits_ratelimits_per_minute_mv_v1.sql deleted file mode 100644 index 93487cd52e..0000000000 --- a/internal/clickhouse/schema/018_create_ratelimits_ratelimits_per_minute_mv_v1.sql +++ /dev/null @@ -1,23 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW ratelimits.ratelimits_per_minute_mv_v1 -TO ratelimits.ratelimits_per_minute_v1 -AS -SELECT - workspace_id, - namespace_id, - identifier, - countIf(passed > 0) as passed, - count(*) as total, - toStartOfMinute(fromUnixTimestamp64Milli(time)) AS time -FROM ratelimits.raw_ratelimits_v1 -GROUP BY - workspace_id, - namespace_id, - identifier, - time -; - - - --- +goose down -DROP VIEW ratelimits.ratelimits_per_minute_mv_v1; diff --git a/internal/clickhouse/schema/019_create_ratelimits_ratelimits_per_hour_mv_v1.sql b/internal/clickhouse/schema/019_create_ratelimits_ratelimits_per_hour_mv_v1.sql deleted file mode 100644 index fb423d933e..0000000000 --- a/internal/clickhouse/schema/019_create_ratelimits_ratelimits_per_hour_mv_v1.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW ratelimits.ratelimits_per_hour_mv_v1 -TO ratelimits.ratelimits_per_hour_v1 -AS -SELECT - workspace_id, - namespace_id, - identifier, - countIf(passed > 0) as passed, - count(*) as total, - toStartOfHour(fromUnixTimestamp64Milli(time)) AS time -FROM ratelimits.raw_ratelimits_v1 -GROUP BY - workspace_id, - namespace_id, - identifier, - time -; - - --- +goose down -DROP VIEW ratelimits.ratelimits_per_hour_mv_v1; diff --git a/internal/clickhouse/schema/020_create_ratelimits_ratelimits_per_day_mv_v1.sql b/internal/clickhouse/schema/020_create_ratelimits_ratelimits_per_day_mv_v1.sql deleted file mode 100644 index 8e76285e3b..0000000000 --- a/internal/clickhouse/schema/020_create_ratelimits_ratelimits_per_day_mv_v1.sql +++ /dev/null @@ -1,23 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW ratelimits.ratelimits_per_day_mv_v1 -TO ratelimits.ratelimits_per_day_v1 -AS -SELECT - workspace_id, - namespace_id, - identifier, - count(*) as total, - countIf(passed > 0) as passed, - toStartOfDay(fromUnixTimestamp64Milli(time)) AS time -FROM ratelimits.raw_ratelimits_v1 -GROUP BY - workspace_id, - namespace_id, - identifier, - time -; - - - --- +goose down -DROP VIEW ratelimits.ratelimits_per_day_mv_v1; diff --git a/internal/clickhouse/schema/021_create_ratelimits_ratelimits_per_month_mv_v1.sql b/internal/clickhouse/schema/021_create_ratelimits_ratelimits_per_month_mv_v1.sql deleted file mode 100644 index a97c911186..0000000000 --- a/internal/clickhouse/schema/021_create_ratelimits_ratelimits_per_month_mv_v1.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW ratelimits.ratelimits_per_month_mv_v1 -TO ratelimits.ratelimits_per_month_v1 -AS -SELECT - workspace_id, - namespace_id, - identifier, - countIf(passed > 0) as passed, - count(*) as total, - toStartOfMonth(fromUnixTimestamp64Milli(time)) AS time -FROM ratelimits.raw_ratelimits_v1 -GROUP BY - workspace_id, - namespace_id, - identifier, - time -; - - --- +goose down -DROP VIEW ratelimits.ratelimits_per_month_mv_v1; diff --git a/internal/clickhouse/schema/022_create_business_active_workspaces_per_month_v1.sql b/internal/clickhouse/schema/022_create_business_active_workspaces_per_month_v1.sql deleted file mode 100644 index 29be93dd73..0000000000 --- a/internal/clickhouse/schema/022_create_business_active_workspaces_per_month_v1.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose up -CREATE TABLE business.active_workspaces_per_month_v1 -( - time Date, - workspace_id String -) -ENGINE = MergeTree() -ORDER BY (time) -; - - --- +goose down -DROP TABLE business.active_workspaces_per_month_v1; diff --git a/internal/clickhouse/schema/023_create_business_active_workspaces_per_month_mv_v1.sql b/internal/clickhouse/schema/023_create_business_active_workspaces_per_month_mv_v1.sql deleted file mode 100644 index a22401696b..0000000000 --- a/internal/clickhouse/schema/023_create_business_active_workspaces_per_month_mv_v1.sql +++ /dev/null @@ -1,21 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW IF NOT EXISTS business.active_workspaces_keys_per_month_mv_v1 -TO business.active_workspaces_per_month_v1 -AS -SELECT - workspace_id, toDate(time) as time -FROM verifications.key_verifications_per_month_v1 -; - -CREATE MATERIALIZED VIEW IF NOT EXISTS business.active_workspaces_ratelimits_per_month_mv_v1 -TO business.active_workspaces_per_month_v1 -AS -SELECT - workspace_id, toDate(time) as time -FROM ratelimits.ratelimits_per_month_v1 -; - - --- +goose down -DROP VIEW business.active_workspaces_keys_per_month_mv_v1; -DROP VIEW business.active_workspaces_ratelimits_per_month_mv_v1; diff --git a/internal/clickhouse/schema/024_create_ratelimits_last_used_mv_v1.sql b/internal/clickhouse/schema/024_create_ratelimits_last_used_mv_v1.sql deleted file mode 100644 index 56c2f26e38..0000000000 --- a/internal/clickhouse/schema/024_create_ratelimits_last_used_mv_v1.sql +++ /dev/null @@ -1,34 +0,0 @@ --- +goose up -CREATE TABLE ratelimits.ratelimits_last_used_v1 -( - time Int64, - workspace_id String, - namespace_id String, - identifier String, - -) -ENGINE = AggregatingMergeTree() -ORDER BY (workspace_id, namespace_id, time, identifier) -; - - - -CREATE MATERIALIZED VIEW ratelimits.ratelimits_last_used_mv_v1 -TO ratelimits.ratelimits_last_used_v1 -AS -SELECT - workspace_id, - namespace_id, - identifier, - maxSimpleState(time) as time -FROM ratelimits.raw_ratelimits_v1 -GROUP BY - workspace_id, - namespace_id, - identifier -; - - --- +goose down -DROP VIEW IF EXISTS ratelimits.ratelimits_last_used_mv_v1; -DROP TABLE IF EXISTS ratelimits.ratelimits_last_used_v1; diff --git a/internal/clickhouse/schema/025_create_billable_verifications_v2.sql b/internal/clickhouse/schema/025_create_billable_verifications_v2.sql deleted file mode 100644 index d5e89dda4a..0000000000 --- a/internal/clickhouse/schema/025_create_billable_verifications_v2.sql +++ /dev/null @@ -1,30 +0,0 @@ --- +goose up -CREATE TABLE billing.billable_verifications_per_month_v2 -( - year Int, - month Int, - workspace_id String, - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, year, month) -; - -CREATE MATERIALIZED VIEW billing.billable_verifications_per_month_mv_v2 -TO billing.billable_verifications_per_month_v2 -AS SELECT - workspace_id, - sum(count) AS count, - toYear(time) AS year, - toMonth(time) AS month -FROM verifications.key_verifications_per_month_v1 -WHERE outcome = 'VALID' -GROUP BY - workspace_id, - year, - month -; --- +goose down - -DROP VIEW billing.billable_verifications_per_month_mv_v2; -DROP TABLE billing.billable_verifications_per_month_v2; diff --git a/internal/clickhouse/schema/026_create_billable_ratelimits_v1.sql b/internal/clickhouse/schema/026_create_billable_ratelimits_v1.sql deleted file mode 100644 index 0b0b46d954..0000000000 --- a/internal/clickhouse/schema/026_create_billable_ratelimits_v1.sql +++ /dev/null @@ -1,30 +0,0 @@ --- +goose up - -CREATE TABLE billing.billable_ratelimits_per_month_v1 -( - year Int, - month Int, - workspace_id String, - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, year, month) -; - -CREATE MATERIALIZED VIEW billing.billable_ratelimits_per_month_mv_v1 -TO billing.billable_ratelimits_per_month_v1 -AS SELECT - workspace_id, - sum(passed) AS count, - toYear(time) AS year, - toMonth(time) AS month -FROM ratelimits.ratelimits_per_month_v1 -WHERE passed > 0 -GROUP BY - workspace_id, - year, - month -; --- +goose down -DROP VIEW billing.billable_ratelimits_per_month_mv_v1; -DROP TABLE billing.billable_ratelimits_per_month_v1; diff --git a/internal/clickhouse/schema/027_add_tags_to_verifications.raw_key_verifications_v1.sql b/internal/clickhouse/schema/027_add_tags_to_verifications.raw_key_verifications_v1.sql deleted file mode 100644 index 06b384707b..0000000000 --- a/internal/clickhouse/schema/027_add_tags_to_verifications.raw_key_verifications_v1.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +goose up -ALTER TABLE verifications.raw_key_verifications_v1 -ADD COLUMN IF NOT EXISTS tags Array(String) DEFAULT []; - - --- +goose down - - - -ALTER TABLE verifications.raw_key_verifications_v1 -DROP COLUMN IF EXISTS tags; diff --git a/internal/clickhouse/schema/028_create_verifications.key_verifications_per_hour_v2.sql b/internal/clickhouse/schema/028_create_verifications.key_verifications_per_hour_v2.sql deleted file mode 100644 index fa83cd0cef..0000000000 --- a/internal/clickhouse/schema/028_create_verifications.key_verifications_per_hour_v2.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_hour_v2 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_hour_v2; diff --git a/internal/clickhouse/schema/029_create_verifications.key_verifications_per_hour_mv_v2.sql b/internal/clickhouse/schema/029_create_verifications.key_verifications_per_hour_mv_v2.sql deleted file mode 100644 index 23bd8f56d2..0000000000 --- a/internal/clickhouse/schema/029_create_verifications.key_verifications_per_hour_mv_v2.sql +++ /dev/null @@ -1,27 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_hour_mv_v2 -TO verifications.key_verifications_per_hour_v2 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfHour(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_hour_mv_v2; diff --git a/internal/clickhouse/schema/030_create_verifications.key_verifications_per_day_v2.sql b/internal/clickhouse/schema/030_create_verifications.key_verifications_per_day_v2.sql deleted file mode 100644 index 48018969b7..0000000000 --- a/internal/clickhouse/schema/030_create_verifications.key_verifications_per_day_v2.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_day_v2 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_day_v2; diff --git a/internal/clickhouse/schema/031_create_verifications.key_verifications_per_day_mv_v2.sql b/internal/clickhouse/schema/031_create_verifications.key_verifications_per_day_mv_v2.sql deleted file mode 100644 index dbb8568e08..0000000000 --- a/internal/clickhouse/schema/031_create_verifications.key_verifications_per_day_mv_v2.sql +++ /dev/null @@ -1,27 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_day_mv_v2 -TO verifications.key_verifications_per_day_v2 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfDay(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_day_mv_v2; diff --git a/internal/clickhouse/schema/032_create_verifications.key_verifications_per_month_v2.sql b/internal/clickhouse/schema/032_create_verifications.key_verifications_per_month_v2.sql deleted file mode 100644 index a037c26018..0000000000 --- a/internal/clickhouse/schema/032_create_verifications.key_verifications_per_month_v2.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_month_v2 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_month_v2; diff --git a/internal/clickhouse/schema/033_create_verifications.key_verifications_per_month_mv_v2.sql b/internal/clickhouse/schema/033_create_verifications.key_verifications_per_month_mv_v2.sql deleted file mode 100644 index a045769a07..0000000000 --- a/internal/clickhouse/schema/033_create_verifications.key_verifications_per_month_mv_v2.sql +++ /dev/null @@ -1,27 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW verifications.key_verifications_per_month_mv_v2 -TO verifications.key_verifications_per_month_v2 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfMonth(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - - --- +goose down -DROP VIEW verifications.key_verifications_per_month_mv_v2; diff --git a/internal/clickhouse/schema/034_billing_read_from_verifications.key_verifications_per_month_v2.sql b/internal/clickhouse/schema/034_billing_read_from_verifications.key_verifications_per_month_v2.sql deleted file mode 100644 index cd5d6fbd3a..0000000000 --- a/internal/clickhouse/schema/034_billing_read_from_verifications.key_verifications_per_month_v2.sql +++ /dev/null @@ -1,33 +0,0 @@ --- +goose up -ALTER TABLE billing.billable_verifications_per_month_mv_v1 -MODIFY QUERY -SELECT - workspace_id, - count(*) AS count, - toYear(time) AS year, - toMonth(time) AS month -FROM verifications.key_verifications_per_month_v2 -WHERE outcome = 'VALID' -GROUP BY - workspace_id, - year, - month -; - - --- +goose down - -ALTER TABLE billing.billable_verifications_per_month_mv_v1 -MODIFY QUERY -SELECT - workspace_id, - count(*) AS count, - toYear(time) AS year, - toMonth(time) AS month -FROM verifications.key_verifications_per_month_v1 -WHERE outcome = 'VALID' -GROUP BY - workspace_id, - year, - month -; diff --git a/internal/clickhouse/schema/035_business_update_active_workspaces_keys_per_month_mv_v1_read_from_verifications.key_verifications_per_month_v2.sql b/internal/clickhouse/schema/035_business_update_active_workspaces_keys_per_month_mv_v1_read_from_verifications.key_verifications_per_month_v2.sql deleted file mode 100644 index 1a22f97904..0000000000 --- a/internal/clickhouse/schema/035_business_update_active_workspaces_keys_per_month_mv_v1_read_from_verifications.key_verifications_per_month_v2.sql +++ /dev/null @@ -1,16 +0,0 @@ --- +goose up -ALTER TABLE business.active_workspaces_keys_per_month_mv_v1 -MODIFY QUERY -SELECT - workspace_id, toDate(time) as time -FROM verifications.key_verifications_per_month_v2 -; - --- +goose down - -ALTER TABLE business.active_workspaces_keys_per_month_mv_v1 -MODIFY QUERY -SELECT - workspace_id, toDate(time) as time -FROM verifications.key_verifications_per_month_v1 -; diff --git a/internal/clickhouse/schema/036_create_verifications.key_verifications_per_hour_v3.sql b/internal/clickhouse/schema/036_create_verifications.key_verifications_per_hour_v3.sql deleted file mode 100644 index 6f99ddcefa..0000000000 --- a/internal/clickhouse/schema/036_create_verifications.key_verifications_per_hour_v3.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_hour_v3 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags, outcome) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_hour_v3; diff --git a/internal/clickhouse/schema/037_create_verifications.key_verifications_per_hour_mv_v3.sql b/internal/clickhouse/schema/037_create_verifications.key_verifications_per_hour_mv_v3.sql deleted file mode 100644 index 4cf157874c..0000000000 --- a/internal/clickhouse/schema/037_create_verifications.key_verifications_per_hour_mv_v3.sql +++ /dev/null @@ -1,49 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW IF NOT EXISTS verifications.key_verifications_per_hour_mv_v3 -TO verifications.key_verifications_per_hour_v3 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfHour(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - - --- populate from existing data --- INSERT INTO verifications.key_verifications_per_hour_v3 --- SELECT --- toStartOfHour(fromUnixTimestamp64Milli(time)) AS time, --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- tags, --- count(*) as count --- FROM verifications.raw_key_verifications_v1 --- GROUP BY --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- time, --- tags --- ; - --- +goose down -DROP VIEW verifications.key_verifications_per_hour_mv_v3; diff --git a/internal/clickhouse/schema/038_create_verifications.key_verifications_per_day_v3.sql b/internal/clickhouse/schema/038_create_verifications.key_verifications_per_day_v3.sql deleted file mode 100644 index c4cd43aba5..0000000000 --- a/internal/clickhouse/schema/038_create_verifications.key_verifications_per_day_v3.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_day_v3 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags, outcome) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_day_v3; diff --git a/internal/clickhouse/schema/039_create_verifications.key_verifications_per_day_mv_v3.sql b/internal/clickhouse/schema/039_create_verifications.key_verifications_per_day_mv_v3.sql deleted file mode 100644 index 05e54a6857..0000000000 --- a/internal/clickhouse/schema/039_create_verifications.key_verifications_per_day_mv_v3.sql +++ /dev/null @@ -1,49 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW IF NOT EXISTS verifications.key_verifications_per_day_mv_v3 -TO verifications.key_verifications_per_day_v3 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfDay(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - --- populate from existing data --- INSERT INTO verifications.key_verifications_per_day_v3 --- SELECT --- toStartOfDay(fromUnixTimestamp64Milli(time)) AS time, --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- tags, --- count(*) as count --- FROM verifications.raw_key_verifications_v1 --- GROUP BY --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- time, --- tags --- ; - - --- +goose down -DROP VIEW verifications.key_verifications_per_day_mv_v3; diff --git a/internal/clickhouse/schema/040_create_verifications.key_verifications_per_month_v3.sql b/internal/clickhouse/schema/040_create_verifications.key_verifications_per_month_v3.sql deleted file mode 100644 index 9b29bd585a..0000000000 --- a/internal/clickhouse/schema/040_create_verifications.key_verifications_per_month_v3.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_month_v3 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags, outcome) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_month_v3; diff --git a/internal/clickhouse/schema/041_create_verifications.key_verifications_per_month_mv_v3.sql b/internal/clickhouse/schema/041_create_verifications.key_verifications_per_month_mv_v3.sql deleted file mode 100644 index b53b3ae6ea..0000000000 --- a/internal/clickhouse/schema/041_create_verifications.key_verifications_per_month_mv_v3.sql +++ /dev/null @@ -1,49 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW IF NOT EXISTS verifications.key_verifications_per_month_mv_v3 -TO verifications.key_verifications_per_month_v3 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfMonth(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - --- populate from existing data --- INSERT INTO verifications.key_verifications_per_month_v3 --- SELECT --- toStartOfMonth(fromUnixTimestamp64Milli(time)) AS time, --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- tags, --- count(*) as counts --- FROM verifications.raw_key_verifications_v1 --- GROUP BY --- workspace_id, --- key_space_id, --- identity_id, --- key_id, --- outcome, --- time, --- tags --- ; - - --- +goose down -DROP VIEW verifications.key_verifications_per_month_mv_v3; diff --git a/internal/clickhouse/schema/042_create_api_requests_per_hour_v1.sql b/internal/clickhouse/schema/042_create_api_requests_per_hour_v1.sql deleted file mode 100644 index e2c49a801b..0000000000 --- a/internal/clickhouse/schema/042_create_api_requests_per_hour_v1.sql +++ /dev/null @@ -1,24 +0,0 @@ --- +goose up -CREATE TABLE metrics.api_requests_per_hour_v1 ( - time DateTime, - workspace_id String, - path String, - response_status Int, - host String, - -- Upper case HTTP method - -- Examples: "GET", "POST", "PUT", "DELETE" - method LowCardinality(String), - count Int64 -) ENGINE = SummingMergeTree() -ORDER BY - ( - workspace_id, - time, - host, - path, - response_status, - method - ); - --- +goose down -DROP TABLE metrics.api_requests_per_hour_v1; diff --git a/internal/clickhouse/schema/043_create_api_requests_per_hour_mv_v1.sql b/internal/clickhouse/schema/043_create_api_requests_per_hour_mv_v1.sql deleted file mode 100644 index 1d2f96c113..0000000000 --- a/internal/clickhouse/schema/043_create_api_requests_per_hour_mv_v1.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW metrics.api_requests_per_hour_mv_v1 TO metrics.api_requests_per_hour_v1 AS -SELECT - workspace_id, - path, - response_status, - host, - method, - count(*) as count, - toStartOfHour(fromUnixTimestamp64Milli(time)) AS time -FROM - metrics.raw_api_requests_v1 -GROUP BY - workspace_id, - path, - response_status, - host, - method, - time; - --- +goose down -DROP VIEW metrics.api_requests_per_hour_mv_v1; \ No newline at end of file diff --git a/internal/clickhouse/schema/044_create_api_requests_per_minute_v1.sql b/internal/clickhouse/schema/044_create_api_requests_per_minute_v1.sql deleted file mode 100644 index 38651b146b..0000000000 --- a/internal/clickhouse/schema/044_create_api_requests_per_minute_v1.sql +++ /dev/null @@ -1,24 +0,0 @@ --- +goose up -CREATE TABLE metrics.api_requests_per_minute_v1 ( - time DateTime, - workspace_id String, - path String, - response_status Int, - host String, - -- Upper case HTTP method - -- Examples: "GET", "POST", "PUT", "DELETE" - method LowCardinality(String), - count Int64 -) ENGINE = SummingMergeTree() -ORDER BY - ( - workspace_id, - time, - host, - path, - response_status, - method - ); - --- +goose down -DROP TABLE metrics.api_requests_per_minute_v1; diff --git a/internal/clickhouse/schema/045_create_api_requests_per_minute_mv_v1.sql b/internal/clickhouse/schema/045_create_api_requests_per_minute_mv_v1.sql deleted file mode 100644 index 0f58a183bf..0000000000 --- a/internal/clickhouse/schema/045_create_api_requests_per_minute_mv_v1.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW metrics.api_requests_per_minute_mv_v1 TO metrics.api_requests_per_minute_v1 AS -SELECT - workspace_id, - path, - response_status, - host, - method, - count(*) as count, - toStartOfMinute(fromUnixTimestamp64Milli(time)) AS time -FROM - metrics.raw_api_requests_v1 -GROUP BY - workspace_id, - path, - response_status, - host, - method, - time; - --- +goose down -DROP VIEW metrics.api_requests_per_minute_mv_v1; diff --git a/internal/clickhouse/schema/046_create_api_requests_per_day_v1.sql b/internal/clickhouse/schema/046_create_api_requests_per_day_v1.sql deleted file mode 100644 index 6b6e6fb800..0000000000 --- a/internal/clickhouse/schema/046_create_api_requests_per_day_v1.sql +++ /dev/null @@ -1,24 +0,0 @@ --- +goose up -CREATE TABLE metrics.api_requests_per_day_v1 ( - time DateTime, - workspace_id String, - path String, - response_status Int, - host String, - -- Upper case HTTP method - -- Examples: "GET", "POST", "PUT", "DELETE" - method LowCardinality(String), - count Int64 -) ENGINE = SummingMergeTree() -ORDER BY - ( - workspace_id, - time, - host, - path, - response_status, - method - ); - --- +goose down -DROP TABLE metrics.api_requests_per_day_v1; diff --git a/internal/clickhouse/schema/047_create_api_requests_per_day_mv_v1.sql b/internal/clickhouse/schema/047_create_api_requests_per_day_mv_v1.sql deleted file mode 100644 index a5452eaf48..0000000000 --- a/internal/clickhouse/schema/047_create_api_requests_per_day_mv_v1.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW metrics.api_requests_per_day_mv_v1 TO metrics.api_requests_per_day_v1 AS -SELECT - workspace_id, - path, - response_status, - host, - method, - count(*) as count, - toStartOfDay(fromUnixTimestamp64Milli(time)) AS time -FROM - metrics.raw_api_requests_v1 -GROUP BY - workspace_id, - path, - response_status, - host, - method, - time; - --- +goose down -DROP VIEW metrics.api_requests_per_day_mv_v1; diff --git a/internal/clickhouse/schema/048_raw_ratelimits_metrics_indexes_v1.sql b/internal/clickhouse/schema/048_raw_ratelimits_metrics_indexes_v1.sql deleted file mode 100644 index 41d6116ac6..0000000000 --- a/internal/clickhouse/schema/048_raw_ratelimits_metrics_indexes_v1.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose up -ALTER TABLE ratelimits.raw_ratelimits_v1 - ADD INDEX idx_workspace_time (workspace_id, time) TYPE minmax GRANULARITY 1; - -ALTER TABLE ratelimits.raw_ratelimits_v1 - ADD INDEX idx_request_id (request_id) TYPE minmax GRANULARITY 1; - --- +goose down -ALTER TABLE ratelimits.raw_ratelimits_v1 - DROP INDEX idx_workspace_time; - -ALTER TABLE ratelimits.raw_ratelimits_v1 - DROP INDEX idx_request_id; diff --git a/internal/clickhouse/schema/049_raw_api_metrics_ratelimit_indexes_v1.sql b/internal/clickhouse/schema/049_raw_api_metrics_ratelimit_indexes_v1.sql deleted file mode 100644 index 882d430d74..0000000000 --- a/internal/clickhouse/schema/049_raw_api_metrics_ratelimit_indexes_v1.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose up -ALTER TABLE metrics.raw_api_requests_v1 - ADD INDEX idx_workspace_time (workspace_id, time) TYPE minmax GRANULARITY 1; - -ALTER TABLE metrics.raw_api_requests_v1 - ADD INDEX idx_request_id (request_id) TYPE minmax GRANULARITY 1; - --- +goose down -ALTER TABLE metrics.raw_api_requests_v1 - DROP INDEX idx_workspace_time; - -ALTER TABLE metrics.raw_api_requests_v1 - DROP INDEX idx_request_id; diff --git a/internal/clickhouse/schema/050_create_verifications.key_verifications_per_minute_v1.sql b/internal/clickhouse/schema/050_create_verifications.key_verifications_per_minute_v1.sql deleted file mode 100644 index 2b68530cdc..0000000000 --- a/internal/clickhouse/schema/050_create_verifications.key_verifications_per_minute_v1.sql +++ /dev/null @@ -1,19 +0,0 @@ --- +goose up -CREATE TABLE verifications.key_verifications_per_minute_v1 -( - time DateTime, - workspace_id String, - key_space_id String, - identity_id String, - key_id String, - outcome LowCardinality(String), - tags Array(String), - count Int64 -) -ENGINE = SummingMergeTree() -ORDER BY (workspace_id, key_space_id, identity_id, key_id, time, tags, outcome) -; - - --- +goose down -DROP TABLE verifications.key_verifications_per_minute_v1; diff --git a/internal/clickhouse/schema/051_create_verifications.key_verifications_per_minute_mv_v1.sql b/internal/clickhouse/schema/051_create_verifications.key_verifications_per_minute_mv_v1.sql deleted file mode 100644 index 2a358d855c..0000000000 --- a/internal/clickhouse/schema/051_create_verifications.key_verifications_per_minute_mv_v1.sql +++ /dev/null @@ -1,26 +0,0 @@ --- +goose up -CREATE MATERIALIZED VIEW IF NOT EXISTS verifications.key_verifications_per_minute_mv_v1 -TO verifications.key_verifications_per_minute_v1 -AS -SELECT - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - count(*) as count, - toStartOfMinute(fromUnixTimestamp64Milli(time)) AS time, - tags -FROM verifications.raw_key_verifications_v1 -GROUP BY - workspace_id, - key_space_id, - identity_id, - key_id, - outcome, - time, - tags -; - --- +goose down -DROP VIEW verifications.key_verifications_per_minute_mv_v1; diff --git a/internal/clickhouse/src/logs-timeseries.test.ts b/internal/clickhouse/src/logs-timeseries.test.ts index 5056d6e2fb..1b0302bad0 100644 --- a/internal/clickhouse/src/logs-timeseries.test.ts +++ b/internal/clickhouse/src/logs-timeseries.test.ts @@ -57,7 +57,7 @@ describe.each([10, 100, 1_000, 10_000, 100_000])("with %i requests", (n) => { } const count = await ch.querier.query({ - query: "SELECT count(*) as count FROM metrics.raw_api_requests_v1", + query: "SELECT count(*) as count FROM default.api_requests_raw_v2", schema: z.object({ count: z.number().int() }), })({}); diff --git a/internal/clickhouse/src/verification_outcomes_propagate_correctly.test.ts b/internal/clickhouse/src/verification_outcomes_propagate_correctly.test.ts index 4f93d30f9a..28ebd15420 100644 --- a/internal/clickhouse/src/verification_outcomes_propagate_correctly.test.ts +++ b/internal/clickhouse/src/verification_outcomes_propagate_correctly.test.ts @@ -90,7 +90,7 @@ describe.each([10, 100, 1_000, 10_000])("with %i verifications", (n) => { SELECT outcome, COUNT(*) as count - FROM verifications.raw_key_verifications_v1 + FROM default.key_verifications_raw_v2 WHERE workspace_id = '${workspaceId}' AND key_space_id = '${keySpaceId}' AND @@ -109,7 +109,7 @@ describe.each([10, 100, 1_000, 10_000])("with %i verifications", (n) => { } await ch.querier.query({ - query: "OPTIMIZE TABLE verifications.key_verifications_per_day_v3 FINAL", + query: "OPTIMIZE TABLE default.key_verifications_per_day_v2 FINAL", schema: z.any(), })({}); @@ -119,7 +119,7 @@ describe.each([10, 100, 1_000, 10_000])("with %i verifications", (n) => { SELECT outcome, SUM(count) as total - FROM verifications.key_verifications_per_day_v3 + FROM default.key_verifications_per_day_v2 WHERE workspace_id = '${workspaceId}' AND key_space_id = '${keySpaceId}' AND diff --git a/tools/artillery/create-keys.ts b/tools/artillery/create-keys.ts index a17c90904b..99145a5f76 100644 --- a/tools/artillery/create-keys.ts +++ b/tools/artillery/create-keys.ts @@ -40,13 +40,6 @@ interface CreateKeyResult { keyId: string; } -interface CreateKeyError { - index: number; - error: string; - statusCode: number; - response?: any; -} - async function createKey( rootKey: string, apiId: string, @@ -88,7 +81,7 @@ async function createKey( console.error( `Failed to create key at index ${index}: ${response.status} ${response.statusText}`, ); - console.error(`Error response body:`, errorBody); + console.error("Error response body", errorBody); return null; } diff --git a/tools/migrate/ch_logs.ts b/tools/migrate/ch_logs.ts index d954328e89..df62862b3a 100644 --- a/tools/migrate/ch_logs.ts +++ b/tools/migrate/ch_logs.ts @@ -22,7 +22,7 @@ async function main() { const query = ch.querier.query({ query: ` - SELECT * FROM metrics.raw_api_requests_v1 + SELECT * FROM default.api_requests_raw_v2 WHERE workspace_id = '${workspaceId}' AND time > ${start} AND time < ${end} diff --git a/tools/migrate/ch_migrate_v2_simple.ts b/tools/migrate/ch_migrate_v2_simple.ts deleted file mode 100644 index b3532d6f97..0000000000 --- a/tools/migrate/ch_migrate_v2_simple.ts +++ /dev/null @@ -1,101 +0,0 @@ -import { createClient } from "@clickhouse/client-web"; - -// Configuration -const CHUNK_SIZE_HOURS = 6; // Process 6 hours at a time -const CHUNK_SIZE_MS = CHUNK_SIZE_HOURS * 60 * 60 * 1000; -const MIGRATION_START = new Date("2023-05-01T00:00:00Z"); -const MIGRATION_END = new Date("2025-10-01T00:00:00Z"); - -async function main() { - const ch = createClient({ - url: process.env.CLICKHOUSE_URL, - - clickhouse_settings: { - output_format_json_quote_64bit_integers: 0, - output_format_json_quote_64bit_floats: 0, - }, - }); - - if (!process.env.CLICKHOUSE_URL) { - throw new Error("CLICKHOUSE_URL environment variable is required"); - } - - let end = MIGRATION_END.getTime(); - - while (end > MIGRATION_START.getTime()) { - const start = end - CHUNK_SIZE_MS; - - console.log(`⏳ Processing ${new Date(start).toLocaleString()} (${start})`); - - await Promise.all([ - ch.query({ - query: ` - INSERT INTO key_verifications_raw_v2 - SELECT - request_id, - time, - workspace_id, - key_space_id, - identity_id, - key_id, - region, - outcome, - tags, - 0 as spent_credits, -- v1 doesn't have this column, default to 0 - 0.0 as latency -- v1 doesn't have this column, default to 0.0 - FROM verifications.raw_key_verifications_v1 - WHERE time >= ${start} - AND time < ${end}; - `, - }), - ch.query({ - query: ` - INSERT INTO ratelimits_raw_v2 - SELECT - request_id, - time, - workspace_id, - namespace_id, - identifier, - passed, - 0.0 as latency -- v1 doesn't have this column, default to 0.0 - FROM - ratelimits.raw_ratelimits_v1 - WHERE time >= ${start} - AND time < ${end}; - - `, - }), - ch.query({ - query: ` - INSERT INTO api_requests_raw_v2 - SELECT - request_id, - time, - workspace_id, - host, - method, - path, - request_headers, - request_body, - response_status, - response_headers, - response_body, - error, - service_latency, - user_agent, - ip_address, - '' as region - FROM - metrics.raw_api_requests_v1 - WHERE time >= ${start} - AND time < ${end}; - - `, - }), - ]); - - end = start; - } -} -main(); diff --git a/tools/migrate/v1_deprecation.ts b/tools/migrate/v1_deprecation.ts index 523c6fb81a..737561756d 100644 --- a/tools/migrate/v1_deprecation.ts +++ b/tools/migrate/v1_deprecation.ts @@ -28,7 +28,7 @@ async function main() { SELECT workspace_id, splitByChar('?', path, 1)[1] as path - FROM metrics.api_requests_per_day_v1 + FROM default.api_requests_per_day_v2 WHERE startsWith(path, '/v1/') AND workspace_id != '' AND workspace_id != 'ws_2vUFz88G6TuzMQHZaUhXADNyZWMy' // filter out special workspaces @@ -47,7 +47,9 @@ async function main() { let emailsSent = 0; console.log( - `Found ${new Set(rows.val.map((r) => r.workspace_id)).size} workspaces across ${rows.val.length} paths`, + `Found ${ + new Set(rows.val.map((r) => r.workspace_id)).size + } workspaces across ${rows.val.length} paths`, ); const workspaceToPaths = new Map(); for (const row of rows.val) {