diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0516f7550..25331b686 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: go tool gotestsum -- -race -v -count=1 ./... \ -coverpkg="./cmd/...,./internal/...,${pkgs}" -coverprofile=coverage.out - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: code-coverage-report path: coverage.out @@ -39,7 +39,7 @@ jobs: - test runs-on: ubuntu-latest steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: code-coverage-report - uses: coverallsapp/github-action@v2 diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index b3c6b8414..05d2b99d6 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -28,7 +28,7 @@ jobs: mv tmp.$$.json package.json npm pack - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: installer path: supabase-1.28.0.tgz @@ -41,7 +41,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: installer @@ -57,7 +57,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: installer @@ -73,7 +73,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: installer @@ -96,7 +96,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: installer @@ -115,7 +115,7 @@ jobs: os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v5 + - uses: actions/download-artifact@v6 with: name: installer diff --git a/cmd/db.go b/cmd/db.go index 9558dc274..5367436df 100644 --- a/cmd/db.go +++ b/cmd/db.go @@ -101,8 +101,6 @@ var ( if usePgSchema { differ = diff.DiffPgSchema fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "--use-pg-schema flag is experimental and may not include all entities, such as views and grants.") - } else if !viper.GetBool("EXPERIMENTAL") { - differ = diff.DiffSchemaMigraBash } return diff.Run(cmd.Context(), schema, file, flags.DbConfig, differ, afero.NewOsFs()) }, diff --git a/cmd/inspect.go b/cmd/inspect.go index 24c3ff465..2405e8d78 100644 --- a/cmd/inspect.go +++ b/cmd/inspect.go @@ -18,6 +18,7 @@ import ( "github.com/supabase/cli/internal/inspect/replication_slots" "github.com/supabase/cli/internal/inspect/role_stats" "github.com/supabase/cli/internal/inspect/table_stats" + "github.com/supabase/cli/internal/inspect/traffic_profile" "github.com/supabase/cli/internal/inspect/vacuum_stats" "github.com/supabase/cli/internal/utils/flags" ) @@ -135,6 +136,14 @@ var ( }, } + inspectTrafficProfileCmd = &cobra.Command{ + Use: "traffic-profile", + Short: "Show read/write activity ratio for tables based on block I/O operations", + RunE: func(cmd *cobra.Command, args []string) error { + return traffic_profile.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + inspectCacheHitCmd = &cobra.Command{ Deprecated: `use "db-stats" instead.`, Use: "cache-hit", @@ -270,6 +279,7 @@ func init() { inspectDBCmd.AddCommand(inspectBloatCmd) inspectDBCmd.AddCommand(inspectVacuumStatsCmd) inspectDBCmd.AddCommand(inspectTableStatsCmd) + inspectDBCmd.AddCommand(inspectTrafficProfileCmd) inspectDBCmd.AddCommand(inspectRoleStatsCmd) inspectDBCmd.AddCommand(inspectDBStatsCmd) // DEPRECATED diff --git a/docs/supabase/inspect/db-traffic-profile.md b/docs/supabase/inspect/db-traffic-profile.md new file mode 100644 index 000000000..161706f75 --- /dev/null +++ b/docs/supabase/inspect/db-traffic-profile.md @@ -0,0 +1,24 @@ +# db-traffic-profile + +This command analyzes table I/O patterns to show read/write activity ratios based on block-level operations. It combines data from PostgreSQL's `pg_stat_user_tables` (for tuple operations) and `pg_statio_user_tables` (for block I/O) to categorize each table's workload profile. + + +The command classifies tables into categories: +- **Read-Heavy** - Read operations are more than 5x write operations (e.g., 1:10, 1:50) +- **Write-Heavy** - Write operations are more than 20% of read operations (e.g., 1:2, 1:4, 2:1, 10:1) +- **Balanced** - Mixed workload where writes are between 20% and 500% of reads +- **Read-Only** - Only read operations detected +- **Write-Only** - Only write operations detected + +``` +SCHEMA │ TABLE │ BLOCKS READ │ WRITE TUPLES │ BLOCKS WRITE │ ACTIVITY RATIO +───────┼──────────────┼─────────────┼──────────────┼──────────────┼──────────────────── +public │ user_events │ 450,234 │ 9,004,680│ 23,450 │ 20:1 (Write-Heavy) +public │ users │ 89,203 │ 12,451│ 1,203 │ 7.2:1 (Read-Heavy) +public │ sessions │ 15,402 │ 14,823│ 2,341 │ ≈1:1 (Balanced) +public │ cache_data │ 123,456 │ 0│ 0 │ Read-Only +auth │ audit_logs │ 0 │ 98,234│ 12,341 │ Write-Only +``` + +**Note:** This command only displays tables that have had both read and write activity. Tables with no I/O operations are not shown. The classification ratio threshold (default: 5:1) determines when a table is considered "heavy" in one direction versus balanced. + diff --git a/go.mod b/go.mod index 6d5bfe49b..0d7d1fb63 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/docker/docker v28.5.1+incompatible github.com/docker/go-connections v0.6.0 github.com/fsnotify/fsnotify v1.9.0 - github.com/getsentry/sentry-go v0.36.0 + github.com/getsentry/sentry-go v0.36.1 github.com/go-errors/errors v1.5.1 github.com/go-git/go-git/v5 v5.16.3 github.com/go-playground/validator/v10 v10.28.0 diff --git a/go.sum b/go.sum index ad6b798e7..2809edcbf 100644 --- a/go.sum +++ b/go.sum @@ -300,8 +300,8 @@ github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIp github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/getkin/kin-openapi v0.131.0 h1:NO2UeHnFKRYhZ8wg6Nyh5Cq7dHk4suQQr72a4pMrDxE= github.com/getkin/kin-openapi v0.131.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= -github.com/getsentry/sentry-go v0.36.0 h1:UkCk0zV28PiGf+2YIONSSYiYhxwlERE5Li3JPpZqEns= -github.com/getsentry/sentry-go v0.36.0/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c= +github.com/getsentry/sentry-go v0.36.1 h1:kMJt0WWsxWATUxkvFgVBZdIeHSk/Oiv5P0jZ9e5m/Lw= +github.com/getsentry/sentry-go v0.36.1/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c= github.com/ghostiam/protogetter v0.3.15 h1:1KF5sXel0HE48zh1/vn0Loiw25A9ApyseLzQuif1mLY= github.com/ghostiam/protogetter v0.3.15/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= diff --git a/internal/branches/get/get.go b/internal/branches/get/get.go index ab2f174b1..a207901cc 100644 --- a/internal/branches/get/get.go +++ b/internal/branches/get/get.go @@ -90,7 +90,7 @@ func getPoolerConfig(ctx context.Context, ref string) (api.SupavisorConfigRespon return result, errors.Errorf("unexpected get pooler status %d: %s", resp.StatusCode(), string(resp.Body)) } for _, config := range *resp.JSON200 { - if config.DatabaseType == api.PRIMARY { + if config.DatabaseType == api.SupavisorConfigResponseDatabaseTypePRIMARY { return config, nil } } diff --git a/internal/db/diff/templates/migra.ts b/internal/db/diff/templates/migra.ts index 788a77d9d..a8ae28d80 100644 --- a/internal/db/diff/templates/migra.ts +++ b/internal/db/diff/templates/migra.ts @@ -1,4 +1,4 @@ -import { createClient } from "npm:@pgkit/client"; +import { createClient, sql } from "npm:@pgkit/client"; import { Migration } from "npm:@pgkit/migra"; // Avoids error on self-signed certificate @@ -20,7 +20,12 @@ const extensionSchemas = [ ]; try { - let sql = ""; + // Step down from login role to postgres + await clientHead.query(sql`set role postgres`); + // Force schema qualified references for pg_get_expr + await clientHead.query(sql`set search_path = ''`); + await clientBase.query(sql`set search_path = ''`); + let result = ""; for (const schema of includedSchemas) { const m = await Migration.create(clientBase, clientHead, { schema, @@ -35,7 +40,7 @@ try { } else { m.add_all_changes(true); } - sql += m.sql; + result += m.sql; } if (includedSchemas.length === 0) { // Migra does not ignore custom types and triggers created by extensions, so we diff @@ -48,7 +53,7 @@ try { e.set_safety(false); e.add(e.changes.schemas({ creations_only: true })); e.add_extension_changes(); - sql += e.sql; + result += e.sql; } // Diff user defined entities in non-managed schemas, including extensions. const m = await Migration.create(clientBase, clientHead, { @@ -61,7 +66,7 @@ try { }); m.set_safety(false); m.add_all_changes(true); - sql += m.sql; + result += m.sql; // For managed schemas, we want to include triggers and RLS policies only. for (const schema of managedSchemas) { const s = await Migration.create(clientBase, clientHead, { @@ -73,10 +78,10 @@ try { s.add(s.changes.rlspolicies({ drops_only: true })); s.add(s.changes.rlspolicies({ creations_only: true })); s.add(s.changes.triggers({ creations_only: true })); - sql += s.sql; + result += s.sql; } } - console.log(sql); + console.log(result); } catch (e) { console.error(e); } finally { diff --git a/internal/db/pull/pull.go b/internal/db/pull/pull.go index 9c822971b..b55b6935c 100644 --- a/internal/db/pull/pull.go +++ b/internal/db/pull/pull.go @@ -7,7 +7,6 @@ import ( "math" "os" "path/filepath" - "slices" "strconv" "strings" @@ -17,7 +16,6 @@ import ( "github.com/spf13/afero" "github.com/supabase/cli/internal/db/diff" "github.com/supabase/cli/internal/db/dump" - "github.com/supabase/cli/internal/db/start" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/migration/new" "github.com/supabase/cli/internal/migration/repair" @@ -26,10 +24,9 @@ import ( ) var ( - errMissing = errors.New("No migrations found") - errInSync = errors.New("No schema changes found") - errConflict = errors.Errorf("The remote database's migration history does not match local files in %s directory.", utils.MigrationsDir) - managedSchemas = []string{"auth", "storage", "realtime"} + errMissing = errors.New("No migrations found") + errInSync = errors.New("No schema changes found") + errConflict = errors.Errorf("The remote database's migration history does not match local files in %s directory.", utils.MigrationsDir) ) func Run(ctx context.Context, schema []string, config pgconn.Config, name string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { @@ -63,24 +60,16 @@ func run(ctx context.Context, schema []string, path string, conn *pgx.Conn, fsys if err = dumpRemoteSchema(ctx, path, config, fsys); err != nil { return err } - // Pull changes in managed schemas automatically - if err = diffRemoteSchema(ctx, managedSchemas, path, config, fsys); errors.Is(err, errInSync) { + // Run a second pass to pull in changes from default privileges and managed schemas + if err = diffRemoteSchema(ctx, nil, path, config, fsys); errors.Is(err, errInSync) { err = nil } return err } else if err != nil { return err } - // 2. Fetch user defined schemas - if len(schema) == 0 { - var err error - if schema, err = migration.ListUserSchemas(ctx, conn); err != nil { - return err - } - schema = append(schema, managedSchemas...) - } - // 3. Fetch remote schema changes - return diffUserSchemas(ctx, schema, path, config, fsys) + // 2. Fetch remote schema changes + return diffRemoteSchema(ctx, schema, path, config, fsys) } func dumpRemoteSchema(ctx context.Context, path string, config pgconn.Config, fsys afero.Fs) error { @@ -103,7 +92,7 @@ func diffRemoteSchema(ctx context.Context, schema []string, path string, config if err != nil { return err } - if len(output) == 0 { + if trimmed := strings.TrimSpace(output); len(trimmed) == 0 { return errors.New(errInSync) } // Append to existing migration file since we run this after dump @@ -118,59 +107,6 @@ func diffRemoteSchema(ctx context.Context, schema []string, path string, config return nil } -func diffUserSchemas(ctx context.Context, schema []string, path string, config pgconn.Config, fsys afero.Fs) error { - var managed, user []string - for _, s := range schema { - if slices.Contains(managedSchemas, s) { - managed = append(managed, s) - } else { - user = append(user, s) - } - } - fmt.Fprintln(os.Stderr, "Creating shadow database...") - shadow, err := diff.CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) - if err != nil { - return err - } - defer utils.DockerRemove(shadow) - if err := start.WaitForHealthyService(ctx, start.HealthTimeout, shadow); err != nil { - return err - } - if err := diff.MigrateShadowDatabase(ctx, shadow, fsys); err != nil { - return err - } - shadowConfig := pgconn.Config{ - Host: utils.Config.Hostname, - Port: utils.Config.Db.ShadowPort, - User: "postgres", - Password: utils.Config.Db.Password, - Database: "postgres", - } - // Diff managed and user defined schemas separately - var output string - if len(user) > 0 { - fmt.Fprintln(os.Stderr, "Diffing schemas:", strings.Join(user, ",")) - if output, err = diff.DiffSchemaMigraBash(ctx, shadowConfig, config, user); err != nil { - return err - } - } - if len(managed) > 0 { - fmt.Fprintln(os.Stderr, "Diffing schemas:", strings.Join(managed, ",")) - if result, err := diff.DiffSchemaMigra(ctx, shadowConfig, config, managed); err != nil { - return err - } else { - output += result - } - } - if len(output) == 0 { - return errors.New(errInSync) - } - if err := utils.WriteFile(path, []byte(output), fsys); err != nil { - return errors.Errorf("failed to write dump file: %w", err) - } - return nil -} - func assertRemoteInSync(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { remoteMigrations, err := migration.ListRemoteMigrations(ctx, conn) if err != nil { diff --git a/internal/db/pull/pull_test.go b/internal/db/pull/pull_test.go index 293448156..49fecb19f 100644 --- a/internal/db/pull/pull_test.go +++ b/internal/db/pull/pull_test.go @@ -83,24 +83,6 @@ func TestPullSchema(t *testing.T) { assert.Equal(t, []byte("test"), contents) }) - t.Run("throws error on load user schema failure", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - path := filepath.Join(utils.MigrationsDir, "0_test.sql") - require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) - // Setup mock postgres - conn := pgtest.NewConn() - defer conn.Close(t) - conn.Query(migration.LIST_MIGRATION_VERSION). - Reply("SELECT 1", []any{"0"}). - Query(migration.ListSchemas, migration.ManagedSchemas). - ReplyError(pgerrcode.DuplicateTable, `relation "test" already exists`) - // Run test - err := run(context.Background(), nil, "", conn.MockClient(t), fsys) - // Check error - assert.ErrorContains(t, err, `ERROR: relation "test" already exists (SQLSTATE 42P07)`) - }) - t.Run("throws error on diff failure", func(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() diff --git a/internal/gen/bearerjwt/bearerjwt_test.go b/internal/gen/bearerjwt/bearerjwt_test.go index 4bce6c1a9..fe329b649 100644 --- a/internal/gen/bearerjwt/bearerjwt_test.go +++ b/internal/gen/bearerjwt/bearerjwt_test.go @@ -180,6 +180,7 @@ func TestGenerateToken(t *testing.T) { claims := jwt.MapClaims{} // Setup in-memory fs fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteFile("supabase/keys.json", []byte("[]"), fsys)) require.NoError(t, utils.WriteFile("supabase/config.toml", []byte(` [auth] signing_keys_path = "./keys.json" diff --git a/internal/inspect/traffic_profile/traffic_profile.go b/internal/inspect/traffic_profile/traffic_profile.go new file mode 100644 index 000000000..fb58ec3e1 --- /dev/null +++ b/internal/inspect/traffic_profile/traffic_profile.go @@ -0,0 +1,49 @@ +package traffic_profile + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed traffic_profile.sql +var TrafficProfileQuery string + +type Result struct { + Schemaname string + Table_name string + Blocks_read int64 + Write_tuples int64 + Blocks_write float64 + Activity_ratio string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TrafficProfileQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Schema|Table|Blocks Read|Write Tuples|Blocks Write|Activity Ratio|\n|-|-|-|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%d`|`%d`|`%.1f`|`%s`|\n", + r.Schemaname, r.Table_name, r.Blocks_read, r.Write_tuples, r.Blocks_write, r.Activity_ratio) + } + return utils.RenderTable(table) +} diff --git a/internal/inspect/traffic_profile/traffic_profile.sql b/internal/inspect/traffic_profile/traffic_profile.sql new file mode 100644 index 000000000..b56298ac0 --- /dev/null +++ b/internal/inspect/traffic_profile/traffic_profile.sql @@ -0,0 +1,45 @@ + -- Query adapted from Crunchy Data blog: "Is Postgres Read Heavy or Write Heavy? (And Why You Should Care)" by David Christensen +WITH +ratio_target AS (SELECT 5 AS ratio), +table_list AS (SELECT + s.schemaname, + s.relname AS table_name, + si.heap_blks_read + si.idx_blks_read AS blocks_read, +s.n_tup_ins + s.n_tup_upd + s.n_tup_del AS write_tuples, +relpages * (s.n_tup_ins + s.n_tup_upd + s.n_tup_del ) / (case when reltuples = 0 then 1 else reltuples end) as blocks_write +FROM + pg_stat_user_tables AS s +JOIN pg_statio_user_tables AS si ON s.relid = si.relid +JOIN pg_class c ON c.oid = s.relid +WHERE +(s.n_tup_ins + s.n_tup_upd + s.n_tup_del) > 0 +AND + (si.heap_blks_read + si.idx_blks_read) > 0 + ) +SELECT + schemaname, + table_name, + blocks_read, + write_tuples, + blocks_write, + CASE + WHEN blocks_read = 0 and blocks_write = 0 THEN + 'No Activity' + WHEN blocks_write * ratio > blocks_read THEN + CASE + WHEN blocks_read = 0 THEN 'Write-Only' + ELSE + ROUND(blocks_write :: numeric / blocks_read :: numeric, 1)::text || ':1 (Write-Heavy)' + END + WHEN blocks_read > blocks_write * ratio THEN + CASE + WHEN blocks_write = 0 THEN 'Read-Only' + ELSE + '1:' || ROUND(blocks_read::numeric / blocks_write :: numeric, 1)::text || ' (Read-Heavy)' + END + ELSE + '1:1 (Balanced)' + END AS activity_ratio +FROM table_list, ratio_target +ORDER BY + (blocks_read + blocks_write) DESC diff --git a/internal/inspect/traffic_profile/traffic_profile_test.go b/internal/inspect/traffic_profile/traffic_profile_test.go new file mode 100644 index 000000000..870076c68 --- /dev/null +++ b/internal/inspect/traffic_profile/traffic_profile_test.go @@ -0,0 +1,42 @@ +package traffic_profile + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTrafficProfile(t *testing.T) { + t.Run("inspects traffic profile", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TrafficProfileQuery). + Reply("SELECT 6", Result{ + Schemaname: "public", + Table_name: "users", + Blocks_read: 1000, + Write_tuples: 500, + Blocks_write: 250.5, + Activity_ratio: "1:4.0 (Read-Heavy)", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/link/link.go b/internal/link/link.go index d3e48fb22..5c66f93c6 100644 --- a/internal/link/link.go +++ b/internal/link/link.go @@ -214,7 +214,7 @@ func linkPooler(ctx context.Context, projectRef string, fsys afero.Fs) error { return errors.Errorf("%w: %s", tenant.ErrAuthToken, string(resp.Body)) } for _, config := range *resp.JSON200 { - if config.DatabaseType == api.PRIMARY { + if config.DatabaseType == api.SupavisorConfigResponseDatabaseTypePRIMARY { updatePoolerConfig(config) } } diff --git a/internal/start/start.go b/internal/start/start.go index 47f8b5c37..22b8fa694 100644 --- a/internal/start/start.go +++ b/internal/start/start.go @@ -358,14 +358,14 @@ EOF // default JWT for downstream services. // Finally, the apikey header may be set to a legacy JWT. In that case, we want to copy // it to Authorization header for backwards compatibility. - `$((function() return (headers.authorization ~= nil and headers.authorization:sub(1, 10) ~= 'Bearer sb_' and headers.authorization) or (headers.apikey == '%s' and 'Bearer %s') or (headers.apikey == '%s' and 'Bearer %s') or headers.apikey end)())`, + `$((headers.authorization ~= nil and headers.authorization:sub(1, 10) ~= 'Bearer sb_' and headers.authorization) or (headers.apikey == '%s' and 'Bearer %s') or (headers.apikey == '%s' and 'Bearer %s') or headers.apikey)`, utils.Config.Auth.SecretKey.Value, utils.Config.Auth.ServiceRoleKey.Value, utils.Config.Auth.PublishableKey.Value, utils.Config.Auth.AnonKey.Value, ), QueryToken: fmt.Sprintf( - `$((function() return (query_params.apikey == '%s' and '%s') or (query_params.apikey == '%s' and '%s') or query_params.apikey end)())`, + `$((query_params.apikey == '%s' and '%s') or (query_params.apikey == '%s' and '%s') or query_params.apikey)`, utils.Config.Auth.SecretKey.Value, utils.Config.Auth.ServiceRoleKey.Value, utils.Config.Auth.PublishableKey.Value, diff --git a/internal/start/templates/kong.yml b/internal/start/templates/kong.yml index 533aa7c37..0c185eb6e 100644 --- a/internal/start/templates/kong.yml +++ b/internal/start/templates/kong.yml @@ -113,25 +113,6 @@ services: replace: querystring: - "apikey:{{ .QueryToken }}" - - name: realtime-v1-longpoll-not-working - _comment: "Realtime: /realtime/v1/* -> ws://realtime:4000/socket/longpoll" - url: http://{{ .RealtimeId }}:4000/socket - protocol: http - routes: - - name: realtime-v1-longpoll - strip_path: true - paths: - - /realtime/v1/ - plugins: - - name: cors - - name: request-transformer - config: - add: - headers: - - "Authorization: {{ .BearerToken }}" - replace: - headers: - - "Authorization: {{ .BearerToken }}" - name: realtime-v1-rest _comment: "Realtime: /realtime/v1/* -> http://realtime:4000/api/*" url: http://{{ .RealtimeId }}:4000/api diff --git a/internal/utils/connect.go b/internal/utils/connect.go index 3223bb032..bd8d199f7 100644 --- a/internal/utils/connect.go +++ b/internal/utils/connect.go @@ -98,7 +98,7 @@ func assertDomainInProfile(host string) error { if err != nil { return errors.Errorf("failed to parse pooler TLD: %w", err) } - if !strings.HasSuffix(CurrentProfile.APIURL, "."+domain) { + if len(CurrentProfile.PoolerHost) > 0 && !strings.EqualFold(CurrentProfile.PoolerHost, domain) { return errors.Errorf("Pooler domain does not belong to current profile: %s", domain) } return nil diff --git a/internal/utils/profile.go b/internal/utils/profile.go index 06b607a0d..eef9e5a0d 100644 --- a/internal/utils/profile.go +++ b/internal/utils/profile.go @@ -16,6 +16,7 @@ type Profile struct { APIURL string `mapstructure:"api_url" validate:"required,http_url"` DashboardURL string `mapstructure:"dashboard_url" validate:"required,http_url"` ProjectHost string `mapstructure:"project_host" validate:"required,hostname_rfc1123"` + PoolerHost string `mapstructure:"pooler_host" validate:"omitempty,hostname_rfc1123"` DocsURL string `mapstructure:"docs_url" validate:"omitempty,http_url"` StudioImage string `mapstructure:"studio_image"` } @@ -26,12 +27,14 @@ var allProfiles = []Profile{{ DashboardURL: "https://supabase.com/dashboard", DocsURL: "https://supabase.com/docs", ProjectHost: "supabase.co", + PoolerHost: "supabase.com", }, { Name: "supabase-staging", APIURL: "https://api.supabase.green", DashboardURL: "https://supabase.green/dashboard", DocsURL: "https://supabase.com/docs", ProjectHost: "supabase.red", + PoolerHost: "supabase.green", }, { Name: "supabase-local", APIURL: "http://localhost:8080", @@ -44,6 +47,7 @@ var allProfiles = []Profile{{ DashboardURL: "https://cloud.snap.com/dashboard", DocsURL: "https://cloud.snap.com/docs", ProjectHost: "snapcloud.dev", + PoolerHost: "snapcloud.co", }} var CurrentProfile Profile diff --git a/package.json b/package.json index 283515106..1bae6b518 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,7 @@ "supabase": "bin/supabase" }, "dependencies": { - "bin-links": "^5.0.0", + "bin-links": "^6.0.0", "https-proxy-agent": "^7.0.2", "node-fetch": "^3.3.2", "tar": "7.5.1" diff --git a/pkg/api/client.gen.go b/pkg/api/client.gen.go index e8d6bcf49..b3c1a153e 100644 --- a/pkg/api/client.gen.go +++ b/pkg/api/client.gen.go @@ -155,6 +155,9 @@ type ClientInterface interface { // V1ClaimProjectForOrganization request V1ClaimProjectForOrganization(ctx context.Context, slug string, token string, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1GetAllProjectsForOrganization request + V1GetAllProjectsForOrganization(ctx context.Context, slug string, params *V1GetAllProjectsForOrganizationParams, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1ListAllProjects request V1ListAllProjects(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -900,6 +903,18 @@ func (c *Client) V1ClaimProjectForOrganization(ctx context.Context, slug string, return c.Client.Do(req) } +func (c *Client) V1GetAllProjectsForOrganization(ctx context.Context, slug string, params *V1GetAllProjectsForOrganizationParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1GetAllProjectsForOrganizationRequest(c.Server, slug, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) V1ListAllProjects(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewV1ListAllProjectsRequest(c.Server) if err != nil { @@ -3817,6 +3832,126 @@ func NewV1ClaimProjectForOrganizationRequest(server string, slug string, token s return req, nil } +// NewV1GetAllProjectsForOrganizationRequest generates requests for V1GetAllProjectsForOrganization +func NewV1GetAllProjectsForOrganizationRequest(server string, slug string, params *V1GetAllProjectsForOrganizationParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "slug", runtime.ParamLocationPath, slug) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/organizations/%s/projects", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if params.Offset != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "offset", runtime.ParamLocationQuery, *params.Offset); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.Limit != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "limit", runtime.ParamLocationQuery, *params.Limit); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.Search != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "search", runtime.ParamLocationQuery, *params.Search); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.Sort != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "sort", runtime.ParamLocationQuery, *params.Sort); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.Statuses != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "statuses", runtime.ParamLocationQuery, *params.Statuses); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewV1ListAllProjectsRequest generates requests for V1ListAllProjects func NewV1ListAllProjectsRequest(server string) (*http.Request, error) { var err error @@ -9663,6 +9798,9 @@ type ClientWithResponsesInterface interface { // V1ClaimProjectForOrganizationWithResponse request V1ClaimProjectForOrganizationWithResponse(ctx context.Context, slug string, token string, reqEditors ...RequestEditorFn) (*V1ClaimProjectForOrganizationResponse, error) + // V1GetAllProjectsForOrganizationWithResponse request + V1GetAllProjectsForOrganizationWithResponse(ctx context.Context, slug string, params *V1GetAllProjectsForOrganizationParams, reqEditors ...RequestEditorFn) (*V1GetAllProjectsForOrganizationResponse, error) + // V1ListAllProjectsWithResponse request V1ListAllProjectsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ListAllProjectsResponse, error) @@ -10489,6 +10627,28 @@ func (r V1ClaimProjectForOrganizationResponse) StatusCode() int { return 0 } +type V1GetAllProjectsForOrganizationResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *OrganizationProjectsResponse +} + +// Status returns HTTPResponse.Status +func (r V1GetAllProjectsForOrganizationResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1GetAllProjectsForOrganizationResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type V1ListAllProjectsResponse struct { Body []byte HTTPResponse *http.Response @@ -13400,6 +13560,15 @@ func (c *ClientWithResponses) V1ClaimProjectForOrganizationWithResponse(ctx cont return ParseV1ClaimProjectForOrganizationResponse(rsp) } +// V1GetAllProjectsForOrganizationWithResponse request returning *V1GetAllProjectsForOrganizationResponse +func (c *ClientWithResponses) V1GetAllProjectsForOrganizationWithResponse(ctx context.Context, slug string, params *V1GetAllProjectsForOrganizationParams, reqEditors ...RequestEditorFn) (*V1GetAllProjectsForOrganizationResponse, error) { + rsp, err := c.V1GetAllProjectsForOrganization(ctx, slug, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1GetAllProjectsForOrganizationResponse(rsp) +} + // V1ListAllProjectsWithResponse request returning *V1ListAllProjectsResponse func (c *ClientWithResponses) V1ListAllProjectsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ListAllProjectsResponse, error) { rsp, err := c.V1ListAllProjects(ctx, reqEditors...) @@ -15238,6 +15407,32 @@ func ParseV1ClaimProjectForOrganizationResponse(rsp *http.Response) (*V1ClaimPro return response, nil } +// ParseV1GetAllProjectsForOrganizationResponse parses an HTTP response from a V1GetAllProjectsForOrganizationWithResponse call +func ParseV1GetAllProjectsForOrganizationResponse(rsp *http.Response) (*V1GetAllProjectsForOrganizationResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1GetAllProjectsForOrganizationResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest OrganizationProjectsResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + // ParseV1ListAllProjectsResponse parses an HTTP response from a V1ListAllProjectsWithResponse call func ParseV1ListAllProjectsResponse(rsp *http.Response) (*V1ListAllProjectsResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) diff --git a/pkg/api/types.gen.go b/pkg/api/types.gen.go index 192bf38d9..d55f973ee 100644 --- a/pkg/api/types.gen.go +++ b/pkg/api/types.gen.go @@ -678,6 +678,77 @@ const ( OrganizationProjectClaimResponsePreviewTargetSubscriptionPlanTeam OrganizationProjectClaimResponsePreviewTargetSubscriptionPlan = "team" ) +// Defines values for OrganizationProjectsResponseProjectsDatabasesDiskType. +const ( + Gp3 OrganizationProjectsResponseProjectsDatabasesDiskType = "gp3" + Io2 OrganizationProjectsResponseProjectsDatabasesDiskType = "io2" +) + +// Defines values for OrganizationProjectsResponseProjectsDatabasesInfraComputeSize. +const ( + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeLarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "large" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeMedium OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "medium" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeMicro OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "micro" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN12xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "12xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN16xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "16xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN24xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "24xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN24xlargeHighMemory OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "24xlarge_high_memory" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN24xlargeOptimizedCpu OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "24xlarge_optimized_cpu" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN24xlargeOptimizedMemory OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "24xlarge_optimized_memory" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN2xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "2xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN48xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "48xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN48xlargeHighMemory OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "48xlarge_high_memory" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN48xlargeOptimizedCpu OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "48xlarge_optimized_cpu" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN48xlargeOptimizedMemory OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "48xlarge_optimized_memory" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN4xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "4xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeN8xlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "8xlarge" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeNano OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "nano" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizePico OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "pico" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeSmall OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "small" + OrganizationProjectsResponseProjectsDatabasesInfraComputeSizeXlarge OrganizationProjectsResponseProjectsDatabasesInfraComputeSize = "xlarge" +) + +// Defines values for OrganizationProjectsResponseProjectsDatabasesStatus. +const ( + OrganizationProjectsResponseProjectsDatabasesStatusACTIVEHEALTHY OrganizationProjectsResponseProjectsDatabasesStatus = "ACTIVE_HEALTHY" + OrganizationProjectsResponseProjectsDatabasesStatusACTIVEUNHEALTHY OrganizationProjectsResponseProjectsDatabasesStatus = "ACTIVE_UNHEALTHY" + OrganizationProjectsResponseProjectsDatabasesStatusCOMINGUP OrganizationProjectsResponseProjectsDatabasesStatus = "COMING_UP" + OrganizationProjectsResponseProjectsDatabasesStatusGOINGDOWN OrganizationProjectsResponseProjectsDatabasesStatus = "GOING_DOWN" + OrganizationProjectsResponseProjectsDatabasesStatusINITFAILED OrganizationProjectsResponseProjectsDatabasesStatus = "INIT_FAILED" + OrganizationProjectsResponseProjectsDatabasesStatusINITREADREPLICA OrganizationProjectsResponseProjectsDatabasesStatus = "INIT_READ_REPLICA" + OrganizationProjectsResponseProjectsDatabasesStatusINITREADREPLICAFAILED OrganizationProjectsResponseProjectsDatabasesStatus = "INIT_READ_REPLICA_FAILED" + OrganizationProjectsResponseProjectsDatabasesStatusREMOVED OrganizationProjectsResponseProjectsDatabasesStatus = "REMOVED" + OrganizationProjectsResponseProjectsDatabasesStatusRESIZING OrganizationProjectsResponseProjectsDatabasesStatus = "RESIZING" + OrganizationProjectsResponseProjectsDatabasesStatusRESTARTING OrganizationProjectsResponseProjectsDatabasesStatus = "RESTARTING" + OrganizationProjectsResponseProjectsDatabasesStatusRESTORING OrganizationProjectsResponseProjectsDatabasesStatus = "RESTORING" + OrganizationProjectsResponseProjectsDatabasesStatusUNKNOWN OrganizationProjectsResponseProjectsDatabasesStatus = "UNKNOWN" +) + +// Defines values for OrganizationProjectsResponseProjectsDatabasesType. +const ( + OrganizationProjectsResponseProjectsDatabasesTypePRIMARY OrganizationProjectsResponseProjectsDatabasesType = "PRIMARY" + OrganizationProjectsResponseProjectsDatabasesTypeREADREPLICA OrganizationProjectsResponseProjectsDatabasesType = "READ_REPLICA" +) + +// Defines values for OrganizationProjectsResponseProjectsStatus. +const ( + OrganizationProjectsResponseProjectsStatusACTIVEHEALTHY OrganizationProjectsResponseProjectsStatus = "ACTIVE_HEALTHY" + OrganizationProjectsResponseProjectsStatusACTIVEUNHEALTHY OrganizationProjectsResponseProjectsStatus = "ACTIVE_UNHEALTHY" + OrganizationProjectsResponseProjectsStatusCOMINGUP OrganizationProjectsResponseProjectsStatus = "COMING_UP" + OrganizationProjectsResponseProjectsStatusGOINGDOWN OrganizationProjectsResponseProjectsStatus = "GOING_DOWN" + OrganizationProjectsResponseProjectsStatusINACTIVE OrganizationProjectsResponseProjectsStatus = "INACTIVE" + OrganizationProjectsResponseProjectsStatusINITFAILED OrganizationProjectsResponseProjectsStatus = "INIT_FAILED" + OrganizationProjectsResponseProjectsStatusPAUSEFAILED OrganizationProjectsResponseProjectsStatus = "PAUSE_FAILED" + OrganizationProjectsResponseProjectsStatusPAUSING OrganizationProjectsResponseProjectsStatus = "PAUSING" + OrganizationProjectsResponseProjectsStatusREMOVED OrganizationProjectsResponseProjectsStatus = "REMOVED" + OrganizationProjectsResponseProjectsStatusRESIZING OrganizationProjectsResponseProjectsStatus = "RESIZING" + OrganizationProjectsResponseProjectsStatusRESTARTING OrganizationProjectsResponseProjectsStatus = "RESTARTING" + OrganizationProjectsResponseProjectsStatusRESTOREFAILED OrganizationProjectsResponseProjectsStatus = "RESTORE_FAILED" + OrganizationProjectsResponseProjectsStatusRESTORING OrganizationProjectsResponseProjectsStatus = "RESTORING" + OrganizationProjectsResponseProjectsStatusUNKNOWN OrganizationProjectsResponseProjectsStatus = "UNKNOWN" + OrganizationProjectsResponseProjectsStatusUPGRADING OrganizationProjectsResponseProjectsStatus = "UPGRADING" +) + // Defines values for PostgresConfigResponseSessionReplicationRole. const ( PostgresConfigResponseSessionReplicationRoleLocal PostgresConfigResponseSessionReplicationRole = "local" @@ -864,8 +935,8 @@ const ( // Defines values for SupavisorConfigResponseDatabaseType. const ( - PRIMARY SupavisorConfigResponseDatabaseType = "PRIMARY" - READREPLICA SupavisorConfigResponseDatabaseType = "READ_REPLICA" + SupavisorConfigResponseDatabaseTypePRIMARY SupavisorConfigResponseDatabaseType = "PRIMARY" + SupavisorConfigResponseDatabaseTypeREADREPLICA SupavisorConfigResponseDatabaseType = "READ_REPLICA" ) // Defines values for SupavisorConfigResponsePoolMode. @@ -1358,6 +1429,14 @@ const ( V1OauthAuthorizeProjectClaimParamsCodeChallengeMethodSha256 V1OauthAuthorizeProjectClaimParamsCodeChallengeMethod = "sha256" ) +// Defines values for V1GetAllProjectsForOrganizationParamsSort. +const ( + CreatedAsc V1GetAllProjectsForOrganizationParamsSort = "created_asc" + CreatedDesc V1GetAllProjectsForOrganizationParamsSort = "created_desc" + NameAsc V1GetAllProjectsForOrganizationParamsSort = "name_asc" + NameDesc V1GetAllProjectsForOrganizationParamsSort = "name_desc" +) + // Defines values for V1GetAvailableRegionsParamsContinent. const ( AF V1GetAvailableRegionsParamsContinent = "AF" @@ -1371,26 +1450,26 @@ const ( // Defines values for V1GetAvailableRegionsParamsDesiredInstanceSize. const ( - Large V1GetAvailableRegionsParamsDesiredInstanceSize = "large" - Medium V1GetAvailableRegionsParamsDesiredInstanceSize = "medium" - Micro V1GetAvailableRegionsParamsDesiredInstanceSize = "micro" - N12xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "12xlarge" - N16xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "16xlarge" - N24xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge" - N24xlargeHighMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_high_memory" - N24xlargeOptimizedCpu V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_optimized_cpu" - N24xlargeOptimizedMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_optimized_memory" - N2xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "2xlarge" - N48xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge" - N48xlargeHighMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_high_memory" - N48xlargeOptimizedCpu V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_optimized_cpu" - N48xlargeOptimizedMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_optimized_memory" - N4xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "4xlarge" - N8xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "8xlarge" - Nano V1GetAvailableRegionsParamsDesiredInstanceSize = "nano" - Pico V1GetAvailableRegionsParamsDesiredInstanceSize = "pico" - Small V1GetAvailableRegionsParamsDesiredInstanceSize = "small" - Xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeLarge V1GetAvailableRegionsParamsDesiredInstanceSize = "large" + V1GetAvailableRegionsParamsDesiredInstanceSizeMedium V1GetAvailableRegionsParamsDesiredInstanceSize = "medium" + V1GetAvailableRegionsParamsDesiredInstanceSizeMicro V1GetAvailableRegionsParamsDesiredInstanceSize = "micro" + V1GetAvailableRegionsParamsDesiredInstanceSizeN12xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "12xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN16xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "16xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN24xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN24xlargeHighMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_high_memory" + V1GetAvailableRegionsParamsDesiredInstanceSizeN24xlargeOptimizedCpu V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_optimized_cpu" + V1GetAvailableRegionsParamsDesiredInstanceSizeN24xlargeOptimizedMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "24xlarge_optimized_memory" + V1GetAvailableRegionsParamsDesiredInstanceSizeN2xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "2xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN48xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN48xlargeHighMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_high_memory" + V1GetAvailableRegionsParamsDesiredInstanceSizeN48xlargeOptimizedCpu V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_optimized_cpu" + V1GetAvailableRegionsParamsDesiredInstanceSizeN48xlargeOptimizedMemory V1GetAvailableRegionsParamsDesiredInstanceSize = "48xlarge_optimized_memory" + V1GetAvailableRegionsParamsDesiredInstanceSizeN4xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "4xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeN8xlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "8xlarge" + V1GetAvailableRegionsParamsDesiredInstanceSizeNano V1GetAvailableRegionsParamsDesiredInstanceSize = "nano" + V1GetAvailableRegionsParamsDesiredInstanceSizePico V1GetAvailableRegionsParamsDesiredInstanceSize = "pico" + V1GetAvailableRegionsParamsDesiredInstanceSizeSmall V1GetAvailableRegionsParamsDesiredInstanceSize = "small" + V1GetAvailableRegionsParamsDesiredInstanceSizeXlarge V1GetAvailableRegionsParamsDesiredInstanceSize = "xlarge" ) // Defines values for V1GetSecurityAdvisorsParamsLintType. @@ -2770,6 +2849,56 @@ type OrganizationProjectClaimResponsePreviewSourceSubscriptionPlan string // OrganizationProjectClaimResponsePreviewTargetSubscriptionPlan defines model for OrganizationProjectClaimResponse.Preview.TargetSubscriptionPlan. type OrganizationProjectClaimResponsePreviewTargetSubscriptionPlan string +// OrganizationProjectsResponse defines model for OrganizationProjectsResponse. +type OrganizationProjectsResponse struct { + Pagination struct { + // Count Total number of projects. Use this to calculate the total number of pages. + Count float32 `json:"count"` + + // Limit Maximum number of projects per page + Limit float32 `json:"limit"` + + // Offset Number of projects skipped in this response + Offset float32 `json:"offset"` + } `json:"pagination"` + Projects []struct { + CloudProvider string `json:"cloud_provider"` + Databases []struct { + CloudProvider string `json:"cloud_provider"` + DiskLastModifiedAt *string `json:"disk_last_modified_at,omitempty"` + DiskThroughputMbps *float32 `json:"disk_throughput_mbps,omitempty"` + DiskType *OrganizationProjectsResponseProjectsDatabasesDiskType `json:"disk_type,omitempty"` + DiskVolumeSizeGb *float32 `json:"disk_volume_size_gb,omitempty"` + Identifier string `json:"identifier"` + InfraComputeSize *OrganizationProjectsResponseProjectsDatabasesInfraComputeSize `json:"infra_compute_size,omitempty"` + Region string `json:"region"` + Status OrganizationProjectsResponseProjectsDatabasesStatus `json:"status"` + Type OrganizationProjectsResponseProjectsDatabasesType `json:"type"` + } `json:"databases"` + InsertedAt string `json:"inserted_at"` + IsBranch bool `json:"is_branch"` + Name string `json:"name"` + Ref string `json:"ref"` + Region string `json:"region"` + Status OrganizationProjectsResponseProjectsStatus `json:"status"` + } `json:"projects"` +} + +// OrganizationProjectsResponseProjectsDatabasesDiskType defines model for OrganizationProjectsResponse.Projects.Databases.DiskType. +type OrganizationProjectsResponseProjectsDatabasesDiskType string + +// OrganizationProjectsResponseProjectsDatabasesInfraComputeSize defines model for OrganizationProjectsResponse.Projects.Databases.InfraComputeSize. +type OrganizationProjectsResponseProjectsDatabasesInfraComputeSize string + +// OrganizationProjectsResponseProjectsDatabasesStatus defines model for OrganizationProjectsResponse.Projects.Databases.Status. +type OrganizationProjectsResponseProjectsDatabasesStatus string + +// OrganizationProjectsResponseProjectsDatabasesType defines model for OrganizationProjectsResponse.Projects.Databases.Type. +type OrganizationProjectsResponseProjectsDatabasesType string + +// OrganizationProjectsResponseProjectsStatus defines model for OrganizationProjectsResponse.Projects.Status. +type OrganizationProjectsResponseProjectsStatus string + // OrganizationResponseV1 defines model for OrganizationResponseV1. type OrganizationResponseV1 struct { Id string `json:"id"` @@ -2783,6 +2912,7 @@ type PgsodiumConfigResponse struct { // PostgresConfigResponse defines model for PostgresConfigResponse. type PostgresConfigResponse struct { + CheckpointTimeout *int `json:"checkpoint_timeout,omitempty"` EffectiveCacheSize *string `json:"effective_cache_size,omitempty"` HotStandbyFeedback *bool `json:"hot_standby_feedback,omitempty"` LogicalDecodingWorkMem *string `json:"logical_decoding_work_mem,omitempty"` @@ -3480,6 +3610,7 @@ type UpdatePgsodiumConfigBody struct { // UpdatePostgresConfigBody defines model for UpdatePostgresConfigBody. type UpdatePostgresConfigBody struct { + CheckpointTimeout *int `json:"checkpoint_timeout,omitempty"` EffectiveCacheSize *string `json:"effective_cache_size,omitempty"` HotStandbyFeedback *bool `json:"hot_standby_feedback,omitempty"` LogicalDecodingWorkMem *string `json:"logical_decoding_work_mem,omitempty"` @@ -4017,8 +4148,9 @@ type V1RestorePointResponseStatus string // V1RunQueryBody defines model for V1RunQueryBody. type V1RunQueryBody struct { - Query string `json:"query"` - ReadOnly *bool `json:"read_only,omitempty"` + Parameters *[]interface{} `json:"parameters,omitempty"` + Query string `json:"query"` + ReadOnly *bool `json:"read_only,omitempty"` } // V1ServiceHealthResponse defines model for V1ServiceHealthResponse. @@ -4161,6 +4293,29 @@ type V1OauthAuthorizeProjectClaimParamsResponseType string // V1OauthAuthorizeProjectClaimParamsCodeChallengeMethod defines parameters for V1OauthAuthorizeProjectClaim. type V1OauthAuthorizeProjectClaimParamsCodeChallengeMethod string +// V1GetAllProjectsForOrganizationParams defines parameters for V1GetAllProjectsForOrganization. +type V1GetAllProjectsForOrganizationParams struct { + // Offset Number of projects to skip + Offset *int `form:"offset,omitempty" json:"offset,omitempty"` + + // Limit Number of projects to return per page + Limit *int `form:"limit,omitempty" json:"limit,omitempty"` + + // Search Search projects by name + Search *string `form:"search,omitempty" json:"search,omitempty"` + + // Sort Sort order for projects + Sort *V1GetAllProjectsForOrganizationParamsSort `form:"sort,omitempty" json:"sort,omitempty"` + + // Statuses A comma-separated list of project statuses to filter by. + // + // The following values are supported: `ACTIVE_HEALTHY`, `INACTIVE`. + Statuses *string `form:"statuses,omitempty" json:"statuses,omitempty"` +} + +// V1GetAllProjectsForOrganizationParamsSort defines parameters for V1GetAllProjectsForOrganization. +type V1GetAllProjectsForOrganizationParamsSort string + // V1GetAvailableRegionsParams defines parameters for V1GetAvailableRegions. type V1GetAvailableRegionsParams struct { // OrganizationSlug Slug of your organization diff --git a/pkg/config/config.go b/pkg/config/config.go index 8d76db168..531bc3dea 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -558,7 +558,7 @@ func (c *config) newDecodeHook(fs ...mapstructure.DecodeHookFunc) mapstructure.D return mapstructure.ComposeDecodeHookFunc(fs...) } -func (c *config) Load(path string, fsys fs.FS) error { +func (c *config) Load(path string, fsys fs.FS, overrides ...ConfigEditor) error { builder := NewPathBuilder(path) // Load secrets from .env file if err := loadNestedEnv(builder.SupabaseDirPath); err != nil { @@ -637,6 +637,9 @@ func (c *config) Load(path string, fsys fs.FS) error { if err := c.resolve(builder, fsys); err != nil { return err } + for _, apply := range overrides { + apply(c) + } return c.Validate(fsys) } @@ -852,9 +855,7 @@ func (c *config) Validate(fsys fs.FS) error { } } if len(c.Auth.SigningKeysPath) > 0 { - if f, err := fsys.Open(c.Auth.SigningKeysPath); errors.Is(err, os.ErrNotExist) { - // Ignore missing signing key path on CI - } else if err != nil { + if f, err := fsys.Open(c.Auth.SigningKeysPath); err != nil { return errors.Errorf("failed to read signing keys: %w", err) } else if c.Auth.SigningKeys, err = fetcher.ParseJSON[[]JWK](f); err != nil { return errors.Errorf("failed to decode signing keys: %w", err) @@ -1215,7 +1216,7 @@ func (h *hookConfig) validate(hookType string) (err error) { } else if err := assertEnvLoaded(h.Secrets.Value); err != nil { return err } - for _, secret := range strings.Split(h.Secrets.Value, "|") { + for secret := range strings.SplitSeq(h.Secrets.Value, "|") { if !hookSecretPattern.MatchString(secret) { return errors.Errorf(`Invalid hook config: auth.hook.%s.secrets must be formatted as "v1,whsec_" with a minimum length of 32 characters.`, hookType) } diff --git a/pkg/config/decode_hooks.go b/pkg/config/decode_hooks.go index b97d9ba8d..265ec872e 100644 --- a/pkg/config/decode_hooks.go +++ b/pkg/config/decode_hooks.go @@ -12,7 +12,7 @@ var envPattern = regexp.MustCompile(`^env\((.*)\)$`) // LoadEnvHook is a mapstructure decode hook that loads environment variables // from strings formatted as env(VAR_NAME). -func LoadEnvHook(f reflect.Kind, t reflect.Kind, data interface{}) (interface{}, error) { +func LoadEnvHook(f reflect.Kind, t reflect.Kind, data any) (any, error) { if f != reflect.String { return data, nil } @@ -35,7 +35,7 @@ Example: verify_jwt = true` // ValidateFunctionsHook is a mapstructure decode hook that validates the functions config format. -func ValidateFunctionsHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { +func ValidateFunctionsHook(f reflect.Type, t reflect.Type, data any) (any, error) { // Only handle FunctionConfig type if t != reflect.TypeOf(FunctionConfig{}) { return data, nil diff --git a/pkg/config/secret.go b/pkg/config/secret.go index 72c075ba7..8fa7f811b 100644 --- a/pkg/config/secret.go +++ b/pkg/config/secret.go @@ -83,7 +83,7 @@ func DecryptSecretHookFunc(hashKey string) mapstructure.DecodeHookFunc { privateKeys = append(privateKeys, strToArr(kv[1])...) } } - return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { if f.Kind() != reflect.String { return data, nil } diff --git a/pkg/config/templates/Dockerfile b/pkg/config/templates/Dockerfile index ff75d8e7e..332d52b8f 100644 --- a/pkg/config/templates/Dockerfile +++ b/pkg/config/templates/Dockerfile @@ -1,19 +1,19 @@ # Exposed for updates by .github/dependabot.yml -FROM supabase/postgres:17.6.1.024 AS pg +FROM supabase/postgres:17.6.1.029 AS pg # Append to ServiceImages when adding new dependencies below FROM library/kong:2.8.1 AS kong FROM axllent/mailpit:v1.22.3 AS mailpit FROM postgrest/postgrest:v13.0.7 AS postgrest -FROM supabase/postgres-meta:v0.93.0 AS pgmeta -FROM supabase/studio:2025.10.20-sha-5005fc6 AS studio +FROM supabase/postgres-meta:v0.93.1 AS pgmeta +FROM supabase/studio:2025.10.27-sha-85b84e0 AS studio FROM darthsim/imgproxy:v3.8.0 AS imgproxy -FROM supabase/edge-runtime:v1.69.14 AS edgeruntime +FROM supabase/edge-runtime:v1.69.15 AS edgeruntime FROM timberio/vector:0.28.1-alpine AS vector FROM supabase/supavisor:2.7.3 AS supavisor FROM supabase/gotrue:v2.180.0 AS gotrue -FROM supabase/realtime:v2.56.0 AS realtime -FROM supabase/storage-api:v1.28.1 AS storage -FROM supabase/logflare:1.22.6 AS logflare +FROM supabase/realtime:v2.57.2 AS realtime +FROM supabase/storage-api:v1.28.2 AS storage +FROM supabase/logflare:1.23.2 AS logflare # Append to JobImages when adding new dependencies below FROM supabase/pgadmin-schema-diff:cli-0.0.5 AS differ FROM supabase/migra:3.0.1663481299 AS migra diff --git a/pkg/config/updater_test.go b/pkg/config/updater_test.go index ad12812e5..98d56068a 100644 --- a/pkg/config/updater_test.go +++ b/pkg/config/updater_test.go @@ -130,7 +130,7 @@ func TestUpdateExperimentalConfig(t *testing.T) { gock.New(server). Post("/v1/projects/test-project/database/webhooks/enable"). Reply(http.StatusOK). - JSON(map[string]interface{}{}) + JSON(map[string]any{}) // Run test err := updater.UpdateExperimentalConfig(context.Background(), "test-project", experimental{ Webhooks: &webhooks{ @@ -325,7 +325,7 @@ func TestUpdateRemoteConfig(t *testing.T) { gock.New(server). Post("/v1/projects/test-project/database/webhooks/enable"). Reply(http.StatusOK). - JSON(map[string]interface{}{}) + JSON(map[string]any{}) // Run test err := updater.UpdateRemoteConfig(context.Background(), baseConfig{ ProjectId: "test-project", diff --git a/pkg/config/utils.go b/pkg/config/utils.go index 0ab42264a..2bb6db6f4 100644 --- a/pkg/config/utils.go +++ b/pkg/config/utils.go @@ -63,6 +63,7 @@ func NewPathBuilder(configPath string) pathBuilder { PgmetaVersionPath: filepath.Join(base, ".temp", "pgmeta-version"), PoolerVersionPath: filepath.Join(base, ".temp", "pooler-version"), RealtimeVersionPath: filepath.Join(base, ".temp", "realtime-version"), + LogflareVersionPath: filepath.Join(base, ".temp", "logflare-version"), CliVersionPath: filepath.Join(base, ".temp", "cli-latest"), CurrBranchPath: filepath.Join(base, ".branches", "_current_branch"), SchemasDir: filepath.Join(base, "schemas"), diff --git a/pkg/diff/diff.go b/pkg/diff/diff.go index 6a40b23fc..e3a8e2d98 100644 --- a/pkg/diff/diff.go +++ b/pkg/diff/diff.go @@ -116,10 +116,7 @@ func Diff(oldName string, old []byte, newName string, new []byte) []byte { // End chunk with common lines for context. if len(ctext) > 0 { - n := end.x - start.x - if n > C { - n = C - } + n := min(end.x-start.x, C) for _, s := range x[start.x : start.x+n] { ctext = append(ctext, " "+s) count.x++ @@ -234,7 +231,7 @@ func tgs(x, y []string) []pair { for i := range T { T[i] = n + 1 } - for i := 0; i < n; i++ { + for i := range n { k := sort.Search(n, func(k int) bool { return T[k] >= J[i] }) diff --git a/pkg/fetcher/http.go b/pkg/fetcher/http.go index 49ac67839..ac3ba91b9 100644 --- a/pkg/fetcher/http.go +++ b/pkg/fetcher/http.go @@ -6,6 +6,7 @@ import ( "encoding/json" "io" "net/http" + "slices" "github.com/go-errors/errors" ) @@ -93,10 +94,8 @@ func (s *Fetcher) Send(ctx context.Context, method, path string, reqBody any, re if err != nil { return nil, errors.Errorf("failed to execute http request: %w", err) } - for _, expected := range s.status { - if resp.StatusCode == expected { - return resp, nil - } + if slices.Contains(s.status, resp.StatusCode) { + return resp, nil } // Reject unexpected status codes as error if len(s.status) > 0 || resp.StatusCode >= http.StatusBadRequest { diff --git a/pkg/migration/list_test.go b/pkg/migration/list_test.go index 80d09fd40..4654fa876 100644 --- a/pkg/migration/list_test.go +++ b/pkg/migration/list_test.go @@ -16,7 +16,7 @@ func TestRemoteMigrations(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) conn.Query(LIST_MIGRATION_VERSION). - Reply("SELECT 1", []interface{}{"20220727064247"}) + Reply("SELECT 1", []any{"20220727064247"}) // Run test versions, err := ListRemoteMigrations(context.Background(), conn.MockClient(t)) // Check error @@ -42,7 +42,7 @@ func TestRemoteMigrations(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) conn.Query(LIST_MIGRATION_VERSION). - Reply("SELECT 1", []interface{}{}) + Reply("SELECT 1", []any{}) // Run test _, err := ListRemoteMigrations(context.Background(), conn.MockClient(t)) // Check error diff --git a/pkg/migration/scripts/dump_schema.sh b/pkg/migration/scripts/dump_schema.sh index 7f61c5866..d7fa91f7f 100755 --- a/pkg/migration/scripts/dump_schema.sh +++ b/pkg/migration/scripts/dump_schema.sh @@ -53,6 +53,3 @@ pg_dump \ | sed -E 's/^ALTER TABLE "cron"/-- &/' \ | sed -E 's/^SET transaction_timeout = 0;/-- &/' \ | sed -E "${EXTRA_SED:-}" - -# Reset session config generated by pg_dump -echo "RESET ALL;" diff --git a/pkg/pgtest/mock.go b/pkg/pgtest/mock.go index 0f5e9bb84..4ceb2fece 100644 --- a/pkg/pgtest/mock.go +++ b/pkg/pgtest/mock.go @@ -79,7 +79,7 @@ func (r *MockConn) Intercept(config *pgx.ConnConfig) { } // Adds a simple query or prepared statement to the mock connection. -func (r *MockConn) Query(sql string, args ...interface{}) *MockConn { +func (r *MockConn) Query(sql string, args ...any) *MockConn { var oids []uint32 var params [][]byte for _, v := range args { @@ -92,7 +92,7 @@ func (r *MockConn) Query(sql string, args ...interface{}) *MockConn { return r } -func (r *MockConn) encodeValueArg(v interface{}) (value []byte, oid uint32) { +func (r *MockConn) encodeValueArg(v any) (value []byte, oid uint32) { if v == nil { return nil, pgtype.TextArrayOID } @@ -119,7 +119,7 @@ func (r *MockConn) encodeValueArg(v interface{}) (value []byte, oid uint32) { return value, dt.OID } -func getDataTypeSize(v interface{}) int16 { +func getDataTypeSize(v any) int16 { t := reflect.TypeOf(v) k := t.Kind() if k < reflect.Int || k > reflect.Complex128 { @@ -135,12 +135,12 @@ func (r *MockConn) lastQuery() *extendedQueryStep { // Adds a server reply using binary or text protocol format. // // TODO: support prepared statements when using binary protocol -func (r *MockConn) Reply(tag string, rows ...interface{}) *MockConn { +func (r *MockConn) Reply(tag string, rows ...any) *MockConn { q := r.lastQuery() // Add field description if len(rows) > 0 { var desc pgproto3.RowDescription - if arr, ok := rows[0].([]interface{}); ok { + if arr, ok := rows[0].([]any); ok { for i, v := range arr { name := fmt.Sprintf("c_%02d", i) if fd := toFieldDescription(v); fd != nil { @@ -176,7 +176,7 @@ func (r *MockConn) Reply(tag string, rows ...interface{}) *MockConn { // Add row data for _, data := range rows { var dr pgproto3.DataRow - if arr, ok := data.([]interface{}); ok { + if arr, ok := data.([]any); ok { for _, v := range arr { if value, oid := r.encodeValueArg(v); oid > 0 { dr.Values = append(dr.Values, value) @@ -209,7 +209,7 @@ func (r *MockConn) Reply(tag string, rows ...interface{}) *MockConn { return r } -func toFieldDescription(v interface{}) *pgproto3.FieldDescription { +func toFieldDescription(v any) *pgproto3.FieldDescription { if dt, ok := ci.DataTypeForValue(v); ok { size := getDataTypeSize(v) format := ci.ParamFormatCodeForOID(dt.OID)