Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion scripts/check-migration-numbering.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,25 @@ if [ ! -d "$migrations_path" ]; then
exit 1
fi

# is_sql_migration checks if a .up.sql file contains executable SQL
# statements and not just comments/whitespace/semicolons. The logic mirrors the
# Go helper the `migrate` package dependency uses in the `migrate.go` file.
is_sql_migration() {
local file="$1"
local cleaned

cleaned=$(
perl -0777 -pe '
s/^\x{FEFF}//;
s{/\*.*?\*/}{}gs;
s{--[^\r\n]*}{}g;
' "$file" 2>/dev/null | \
sed -E 's/[[:space:];]+//g'
)

[ -n "$cleaned" ]
}

# Get all unique prefixes (e.g., 000001, always 6 digits) from .up.sql files.
prefixes=($(ls "$migrations_path"/*.up.sql 2>/dev/null | \
sed -E 's/.*\/([0-9]{6})_.*\.up\.sql/\1/' | sort))
Expand All @@ -34,7 +53,11 @@ for i in "${!prefixes[@]}"; do
base_filename=$(ls "$migrations_path/${prefixes[$i]}"_*.up.sql | \
sed -E 's/\.up\.sql//')

if [ ! -f "$base_filename.down.sql" ]; then
# Error if the .up.sql is an SQL migration, but we're missing the
# corresponding .down.sql. This doesn't apply if the .up.sql file is a code
# migration.
if is_sql_migration "$base_filename.up.sql" && \
[ ! -f "$base_filename.down.sql" ]; then
Comment on lines +56 to +60
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated:

Do we need to include this commit? Is it required for your end goal?

We don’t often have no-op down migrations, but IMO it’s fine to include them. It keeps the migration sequence contiguous, and IMO it’s cleaner to always provide a down file for every up, even if the down is empty.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to include this commit? Is it required for your end goal?

It’s not required, but I want to explain why I think this change is beneficial.

This change specifically targets code migrations, allowing them to omit a down.sql file when appropriate.

To illustrate why SQL and code migrations should behave differently, consider the following example of migrations:

  • 0001_example.up.sql
    CREATE TABLE example (id INTEGER PRIMARY KEY);

  • 0001_example.down.sql
    DROP TABLE example;

  • Followed by a code migration:
    0002_example_code_migration.up.sql
    (This migration inserts data into the example table.)

If we also add a down.sql for the code migration:

  • 0002_example_code_migration.down.sql
    (This would simply re-run the code migration and insert the data again.)

This highlights the mismatch:

  • For SQL migrations, the down.sql undoes the up.sql.
  • For code migrations, a down.sql would unintentionally re-execute the migration instead of undoing the changes.
    In this example, the data inserted by the code migration would actually be removed only when the table itself is dropped by 0001_example.down.sql.

Currently, the code migrations in tapd are structured as no-ops when re-executed, but that assumption can't be true for all code migrations. For example, it'd be much more complex for migrations like the kvdb → SQL migration in litd.


For that reason, I think the code migration itself should indicate whether a down.sql should be present or not, rather than having CI enforce for all types of migrations. The migrate library explicitly supports migrations without a down.sql file, and I think this is a case where that functionality makes sense to use.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don’t often have no-op down migrations, but IMO it’s fine to include them. It keeps the migration sequence contiguous, and IMO it’s cleaner to always provide a down file for every up, even if the down is empty.

And just to clarify, this if clause here will only trigger when the up.sql case contains any executable content, i.e. when the up.sql is an SQL migration. It's not checking if the down.sql file is empty.

echo "Error: Missing .down.sql file for migration $expected_prefix."
exit 1
fi
Expand Down
2 changes: 1 addition & 1 deletion tapdb/migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ const (
// daemon.
//
// NOTE: This MUST be updated when a new migration is added.
LatestMigrationVersion = 47
LatestMigrationVersion = 49
)

// DatabaseBackend is an interface that contains all methods our different
Expand Down
184 changes: 184 additions & 0 deletions tapdb/migrations_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"testing"

Expand Down Expand Up @@ -573,6 +574,10 @@ func TestMigration33(t *testing.T) {

// And now that we have test data inserted, we can migrate to the latest
// version.
// NOTE: the post-migration check was originally run at migration
// version 33, but was later moved to version 48. Targeting the latest
// migration, will execute post-migration check, but at version 48
// instead of 33.
err := db.ExecuteMigrations(TargetLatest, WithPostStepCallbacks(
makePostStepCallbacks(db, postMigrationChecks),
))
Expand Down Expand Up @@ -672,6 +677,116 @@ func TestMigration33(t *testing.T) {
)
}

// TestMigration48ScriptKeyTypeReplay makes sure that if the script key type
// backfill already ran for a user (originally at migration 33), the replay at
// migration 48 is a no-op and doesn't rewrite any data.
func TestMigration48ScriptKeyTypeReplay(t *testing.T) {
ctx := context.Background()

db := NewTestDBWithVersion(t, 32)

InsertTestdata(t, db.BaseDB, "migrations_test_00033_dummy_data.sql")

// We simulate that a user previously ran migration 33, which at the
// time also contained the code migration which has now been separated
// to migration 48.
err := db.ExecuteMigrations(TargetVersion(33), WithPostStepCallbacks(
makePostStepCallbacks(db, map[uint]postMigrationCheck{
33: determineAndAssignScriptKeyType,
}),
))
require.NoError(t, err)

const (
key1 = "039c571fffcac1a1a7cd3372bd202ad8562f28e48b90f8a4eb714" +
"eca062f576ee6"
key2 = "029c571fffcac1a1a7cd3372bd202ad8562f28e48b90f8a4eb714" +
"eca062f576ee6"
key3 = "03f9cdf1ff7c9fbb0ea3c8533cd7048994f41ea20a79764469c22" +
"aa18aa6696169"
key4 = "027c79b9b26e463895eef5679d8558942c86c4ad2233adef01bc3" +
"e6d540b3653fe"
key5 = "0350aaeb166f4234650d84a2d8a130987aeaf6950206e0905401e" +
"e74ff3f8d18e6"
key6 = "02248bca7dbb12dcf0b490263a1d521691691aa2541842b7472c8" +
"3acac0e88443b"
)

expectedKeyTypes := map[string]asset.ScriptKeyType{
key1: asset.ScriptKeyUnknown,
key2: asset.ScriptKeyBip86,
key3: asset.ScriptKeyScriptPathExternal,
key4: asset.ScriptKeyTombstone,
key5: asset.ScriptKeyScriptPathChannel,
key6: asset.ScriptKeyBurn,
}

// fetchTypes returns the script key types currently persisted in the
// database keyed by their tweaked hex representation.
fetchTypes := func() map[string]asset.ScriptKeyType {
currentTypes := make(map[string]asset.ScriptKeyType)

for keyHex := range expectedKeyTypes {
keyBytes, err := hex.DecodeString(keyHex)
require.NoError(t, err)

dbKey, err := db.BaseDB.FetchScriptKeyByTweakedKey(
ctx, keyBytes,
)
require.NoError(t, err)

currentTypes[keyHex] =
extractSqlInt16[asset.ScriptKeyType](
dbKey.ScriptKey.KeyType,
)
}

return currentTypes
}

// Verify that the database contains the expected script keys after the
// first migration has been run.
require.Equal(t, expectedKeyTypes, fetchTypes())

// Now let's change the ScriptKey type for one of the entries, to an
// incorrect value. When the code migration is rerun, this value should
// not be changed despite being incorrect, as the replay of the code
// migration won't act on values which have already been assigned.
keyBytes, err := hex.DecodeString(key5)
require.NoError(t, err)

dbKey, err := db.BaseDB.FetchScriptKeyByTweakedKey(
ctx, keyBytes,
)
require.NoError(t, err)

_, err = db.BaseDB.UpsertScriptKey(ctx, NewScriptKey{
InternalKeyID: dbKey.InternalKey.KeyID,
TweakedScriptKey: dbKey.ScriptKey.TweakedScriptKey,
Tweak: dbKey.ScriptKey.Tweak,
KeyType: sqlInt16(asset.ScriptKeyBip86),
})
require.NoError(t, err)

// Executing the code migration again (now at migration 48) should not
// change or add any new values than the values assigned when the code
// migration was run for migration version 33.
err = db.ExecuteMigrations(TargetLatest, WithPostStepCallbacks(
makePostStepCallbacks(db, postMigrationChecks),
))
require.NoError(t, err)
Comment on lines +771 to +777
Copy link
Contributor

@ffranr ffranr Nov 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Target 48 explicitly here? TargetVersion(48) Or maybe that's not necessary and I'm missing something.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same below for TargetVersion(49).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

might be nice to have const values for migration numbers (33, 49 and 48, etc) in these unit tests. Start/replacement. Maybe?


// As we changed the value for key5 to an incorrect value, we expect
// that the db still contains that, hence not being equal to the
// expectedKeyTypes.
require.NotEqual(t, expectedKeyTypes, fetchTypes())

// If we change the expectedKeyTypes to contain the incorrect value,
// they should however be equal.
expectedKeyTypes[key5] = asset.ScriptKeyBip86
require.Equal(t, expectedKeyTypes, fetchTypes())
}

// TestMigration37 tests that the Golang based post-migration check for the
// asset burn insertion works as expected.
func TestMigration37(t *testing.T) {
Expand All @@ -685,6 +800,10 @@ func TestMigration37(t *testing.T) {

// And now that we have test data inserted, we can migrate to the latest
// version.
// NOTE: the post-migration check was originally run at migration
// version 37, but was later moved to version 49. Targeting the latest
// migration, will execute post-migration check, but at version 49
// instead of 37.
err := db.ExecuteMigrations(TargetLatest, WithPostStepCallbacks(
makePostStepCallbacks(db, postMigrationChecks),
))
Expand All @@ -696,6 +815,71 @@ func TestMigration37(t *testing.T) {
require.Len(t, burns, 5)
}

// TestMigration49BurnReplay makes sure that if the asset burn code migration
// already ran for a user (originally at migration 37), the replay at migration
// 49 is a no-op and doesn't insert duplicate burns.
func TestMigration49BurnReplay(t *testing.T) {
ctx := context.Background()

db := NewTestDBWithVersion(t, 36)

InsertTestdata(t, db.BaseDB, "migrations_test_00037_dummy_data.sql")

// The test data inserts 3 burns into the database before the migration
// is run. After the migration is run, 2 more entries will be added.
burnsBefore, err := db.QueryBurns(ctx, QueryBurnsFilters{})
require.NoError(t, err)
require.Len(t, burnsBefore, 3)

// We simulate that a user previously ran migration 37, which at the
// time also contained the code migration which has now been separated
// to migration 49.
err = db.ExecuteMigrations(TargetVersion(37), WithPostStepCallbacks(
makePostStepCallbacks(db, map[uint]postMigrationCheck{
37: insertAssetBurns,
}),
))
require.NoError(t, err)

// Since the migration was insertAssetBurns code migration was run for
// version 37, the database should now contain 5 entries.
burnsAfterFirstMigration, err := db.QueryBurns(ctx, QueryBurnsFilters{})
require.NoError(t, err)
require.Len(t, burnsAfterFirstMigration, 5)

normalizeBurns := func(burns []sqlc.QueryBurnsRow) []string {
result := make([]string, 0, len(burns))

for _, burn := range burns {
result = append(result, fmt.Sprintf("%x:%x:%x:%d",
burn.AnchorTxid, burn.AssetID, burn.GroupKey,
burn.Amount))
}

sort.Strings(result)

return result
}

// Execute the rest of the migrations, which will trigger the migration
// version 49 code migration.
err = db.ExecuteMigrations(TargetLatest, WithPostStepCallbacks(
makePostStepCallbacks(db, postMigrationChecks),
))
require.NoError(t, err)

burnsAfter, err := db.QueryBurns(ctx, QueryBurnsFilters{})
require.NoError(t, err)

// Despite that the code migration in migration version 49 was executed
// once more, the asset burns persisted in the database should not have
// changed.
require.Equal(
t, normalizeBurns(burnsAfterFirstMigration),
normalizeBurns(burnsAfter),
)
}

// TestDirtySqliteVersion tests that if a migration fails and leaves an Sqlite
// database backend in a dirty state, any attempts of re-executing migrations on
// the db (i.e. restart tapd), will fail with an error indicating that the
Expand Down
14 changes: 7 additions & 7 deletions tapdb/post_migration_checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ import (
)

const (
// Migration33ScriptKeyType is the version of the migration that
// introduces the script key type.
Migration33ScriptKeyType = 33
// Migration48ScriptKeyType is the version of the code migration that
// runs the script key type detection/backfill.
Migration48ScriptKeyType = 48

// Migration37InsertAssetBurns is the version of the migration that
// Migration49InsertAssetBurns is the version of the code migration that
// inserts the asset burns into the specific asset burns table by
// querying all assets and detecting burns from their witnesses.
Migration37InsertAssetBurns = 37
Migration49InsertAssetBurns = 49
)

// postMigrationCheck is a function type for a function that performs a
Expand All @@ -38,8 +38,8 @@ var (
// applied. These functions are used to perform additional checks on the
// database state that are not fully expressible in SQL.
postMigrationChecks = map[uint]postMigrationCheck{
Migration33ScriptKeyType: determineAndAssignScriptKeyType,
Migration37InsertAssetBurns: insertAssetBurns,
Migration48ScriptKeyType: determineAndAssignScriptKeyType,
Migration49InsertAssetBurns: insertAssetBurns,
}
)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
-- The file intentionally only contains this comment to ensure the file created and picked up in the migration stream.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
-- The file intentionally only contains this comment to ensure the file created and picked up in the migration stream.
Loading