diff --git a/comment/comment_repository.go b/comment/comment_repository.go index 9cb34cedd1..6e74830846 100644 --- a/comment/comment_repository.go +++ b/comment/comment_repository.go @@ -122,12 +122,12 @@ func (m *GormCommentRepository) Delete(ctx context.Context, commentID uuid.UUID, if err := tx.Error; err != nil { return errors.NewInternalError(ctx, err) } - m.db.Delete(c) // save a revision of the deleted comment if err := m.revisionRepository.Create(ctx, suppressorID, RevisionTypeDelete, c); err != nil { return errs.Wrapf(err, "error while deleting work item") } - return nil + tx = m.db.Delete(c) + return tx.Error } // List all comments related to a single item diff --git a/comment/comment_repository_blackbox_test.go b/comment/comment_repository_blackbox_test.go index bfe7f75b22..bc343236a0 100644 --- a/comment/comment_repository_blackbox_test.go +++ b/comment/comment_repository_blackbox_test.go @@ -54,17 +54,20 @@ func (s *TestCommentRepository) createComments(comments []*comment.Comment, crea func (s *TestCommentRepository) TestCreateCommentWithParentComment() { // parent comment - fxt := tf.NewTestFixture(s.T(), s.DB, tf.Identities(1)) - parentComment := newComment(uuid.NewV4(), "Test A", rendering.SystemMarkupMarkdown) - s.repo.Create(s.Ctx, parentComment, fxt.Identities[0].ID) + fxt := tf.NewTestFixture(s.T(), s.DB, tf.WorkItems(1)) + _ = fxt + parentComment := newComment(fxt.WorkItems[0].ID, "Test A", rendering.SystemMarkupMarkdown) + err := s.repo.Create(s.Ctx, parentComment, fxt.Identities[0].ID) + require.NoError(s.T(), err) // child comments - childComment := newComment(uuid.NewV4(), "Test Child A", rendering.SystemMarkupMarkdown) + childComment := newComment(fxt.WorkItems[0].ID, "Test Child A", rendering.SystemMarkupMarkdown) childComment.ParentCommentID = id.NullUUID{ UUID: parentComment.ID, Valid: true, } // when - s.repo.Create(s.Ctx, childComment, fxt.Identities[0].ID) + err = s.repo.Create(s.Ctx, childComment, fxt.Identities[0].ID) + require.NoError(s.T(), err) // then require.NotNil(s.T(), childComment.ID, "Comment was not created, ID nil") require.NotNil(s.T(), childComment.CreatedAt, "Comment was not created") @@ -74,7 +77,7 @@ func (s *TestCommentRepository) TestCreateCommentWithParentComment() { // now retrieving the stored child comment again and see if the parent reference was stored var resultComment *comment.Comment // when - resultComment, err := s.repo.Load(s.Ctx, childComment.ID) + resultComment, err = s.repo.Load(s.Ctx, childComment.ID) // then require.NoError(s.T(), err) require.NotNil(s.T(), resultComment.ParentCommentID, "Parent comment id was not set, ID nil") diff --git a/docs/cascading-soft-delete.md b/docs/cascading-soft-delete.md new file mode 100644 index 0000000000..5174bb4165 --- /dev/null +++ b/docs/cascading-soft-delete.md @@ -0,0 +1,296 @@ +# Cascading soft-deletes + +## Abstract + +In this document I describe our current approach to deal with deletion of objects in our Postgres database and I present the flaws of the current implementation. For example so far we don't have the ability to have cascading soft-deletes. Then I show a method that combines the strengths of Postgres' **cascading soft-delete** and an archiving approach that is easy to implement, maintain and that brings a performance boost in all search queries. + +## About soft-deletes in GORM + +In the [fabric8-services/fabric8-wit](https://github.com/fabric8-services/fabric8-wit) project which is written in Go we are using the an object oriented mapper for our database called [GORM](http://gorm.io/). + +GORM offers a way to [soft-delete](http://gorm.io/docs/delete.html#Soft-Delete) database entries: + +> If model has `DeletedAt` field, it will get soft delete ability automatically! then it won’t be deleted from database permanently when call `Delete`, but only set field `DeletedAt`‘s value to current time. + +Suppose you have a model definition, in other words a Go struct that looks like this: + +```go +// User is the Go model for a user entry in the database +type User struct { + ID int + Name string + DeletedAt *time.Time +} +``` + +And let's say you've loaded an existing user entry by its `ID` from the DB into an object `u`. + +```go +id := 123 +u := User{} +db.Where("id=?", id).First(&u) +``` + +If you then go ahead and delete the object using GORM: + +```go +db.Delete(&u) +``` + +the DB entry will not be deleted using `DELETE` in SQL but the row will be updated and the `deleted_at` is set to the current time: + +```sql +UPDATE users SET deleted_at="2018-10-12 11:24" WHERE id = 123; +``` + +## Problems with soft-deletes in GORM - Inversion of dependency and no cascade + +The above mentioned soft-delete is nice for archiving individual records but it can lead to very odd results for all records that depend on it. That is because soft-deletes by GORM don't cascade as a potential `DELETE` in SQL would do if a foreign key was modelled with `ON DELETE CASCADE`. + +When you model a database you typcially design a table and then maybe another one that has a foreign key to the first one: + +```sql +CREATE TABLE countries ( + name text PRIMARY KEY, + deleted_at timestamp +); + +CREATE TABLE cities ( + name text, + country text REFERENCES countries(name) ON DELETE CASCADE, + deleted_at timestamp +); +``` + +Here we've modeled a list of countries and cities that reference a particular country. When you `DELETE` a country record, all cities will be deleted as well. But since the table has a `deleted_at` column that is carried on in the Go struct for a country or city, the GORM mapper will only soft-delete the country and leave the belonging cities untouched. + +### Shifting responsibility from DB to user/developer + +GORM thereby puts it in the hands of the developer to (soft-)delete all dependend cities. In other words, what previously was modeled as a relationship from **cities to countries** is now reversed as a relationship from **countries to cities**. That is because the user/developer is now responsible to (soft-)delete all cities belonging to a country when the country is deleted. + +## Proposal + +Wouldn't it be great if we can have soft-deletes and all the benefits of a `ON DELETE CASCADE`? + +It turns out that we can have it without much effort. Let's focus on a single table for now, namely the `countries` table. + +### An archive table + +Suppose for a second, that we can have another table called `countries_archive` that has the excact **same structure** as the `countries` table. Also suppose that all future **schema migrations** that are done to `countries` are applied to the `countries_archive` table. The only exception is that **unique constraints** and **foreign keys** will not be applied to `countries_archive`. + +I guess, this already sounds too good to be true, right? Well, we can create such a table using what's called [Inheritenance](https://www.postgresql.org/docs/9.6/static/ddl-inherit.html) in Postgres: + +```sql +CREATE TABLE countries_archive () INHERITS (countries); +``` + +The resulting `countries_archive` table will is meant to store all records where `deleted_at IS NOT NULL`. + +Note, that in our Go code we would never directly use any `_archive` table. Instead we would query for the original table from which `*_archive` table inherits and Postgres then magically looks into the `*_archive` table automatically. A bit further below I explain why that is; it has to do with partitioning. + +### Moving entries to the archive table on (soft)-DELETE + +Since the two tables, `countries` and `countries_archive` look exactly alike schemawise we can `INSERT` into the archive very easily using a trigger function when + +1. a `DELETE` happens on the `countries` table +2. or when a soft-delete is happening by setting `deleted_at` to a not `NULL` value. + +The trigger function looks like this: + +```sql +CREATE OR REPLACE FUNCTION archive_record() +RETURNS TRIGGER AS $$ +BEGIN + -- When a soft-delete happens... + IF (TG_OP = 'UPDATE' AND NEW.deleted_at IS NOT NULL) THEN + EXECUTE format('DELETE FROM %I.%I WHERE id = $1', TG_TABLE_SCHEMA, TG_TABLE_NAME) USING OLD.id; + RETURN OLD; + END IF; + -- When a hard-DELETE or a cascaded delete happens + IF (TG_OP = 'DELETE') THEN + -- Set the time when the deletion happens + IF (OLD.deleted_at IS NULL) THEN + OLD.deleted_at := timenow(); + END IF; + EXECUTE format('INSERT INTO %I.%I SELECT $1.*' + , TG_TABLE_SCHEMA, TG_TABLE_NAME || '_archive') + USING OLD; + END IF; + RETURN NULL; +END; + $$ LANGUAGE plpgsql; +``` + +To wire-up the function with a trigger we can write: + +```sql +CREATE TRIGGER soft_delete_countries + AFTER + -- this is what is triggered by GORM + UPDATE OF deleted_at + -- this is what is triggered by a cascaded DELETE or a direct hard-DELETE + OR DELETE + ON countries + FOR EACH ROW + EXECUTE PROCEDURE archive_record(); +``` + +## Conclusions + +Originally the inheritance functionality in postgres was developed to [partition data](https://www.postgresql.org/docs/9.1/static/ddl-partitioning.html). When you search your partitioned data using a specific column or condition, Postgres can find out which partition to search through and can thereby [improve the performance of your query](https://stackoverflow.com/a/3075248/835098). + +We can benefit from this performance improvement by only searching entities in existence, unless told otherwise. Entries in existence are those where `deleted_at IS NULL` holds true. (Notice, that GORM will automatically add an `AND deleted_at IS NULL` to every query if there's a `DeletedAt` in GORM's model struct.) + +Let's see if Postgres already knows how to take advantage of our separation by running an `EXPLAIN`: + +```sql +EXPLAIN SELECT * FROM countries WHERE deleted_at IS NULL; ++-------------------------------------------------------------------------+ +| QUERY PLAN | +|-------------------------------------------------------------------------| +| Append (cost=0.00..21.30 rows=7 width=44) | +| -> Seq Scan on countries (cost=0.00..0.00 rows=1 width=44) | +| Filter: (deleted_at IS NULL) | +| -> Seq Scan on countries_archive (cost=0.00..21.30 rows=6 width=44) | +| Filter: (deleted_at IS NULL) | ++-------------------------------------------------------------------------+ +``` + +As we can see, Postgres still searches both tables, `countries` and `countries_archive`. Let's see what happens when we add a check constraint to the `countries_archive` table upon table creation: + +```sql +CREATE TABLE countries_archive ( + CHECK (deleted_at IS NOT NULL) +) INHERITS (countries); +``` + +Now, Postgres knows that it can skip `countries_archive` when `deleted_at` is expected to be `NULL`: + +```sql +EXPLAIN SELECT * FROM countries WHERE deleted_at IS NULL; ++----------------------------------------------------------------+ +| QUERY PLAN | +|----------------------------------------------------------------| +| Append (cost=0.00..0.00 rows=1 width=44) | +| -> Seq Scan on countries (cost=0.00..0.00 rows=1 width=44) | +| Filter: (deleted_at IS NULL) | ++----------------------------------------------------------------+ +``` + +Notice the absence of the sequential scan of the `countries_archive` table in the aforementioned `EXPLAIN`. + +## Benefits and Risks + +### Benefits + +1. We have regular **cascaded deletes** back and can let the DB figure out in which order to delete things. +2. At the same time, we're **archiving our data** as well. Every soft-delete +3. **No Go code changes** are required (except one, see the 2nd risks below). We only have to setup a table and a trigger for each table that shall be archived. +4. Whenever we figure that we don't want this behaviour with triggers and cascaded soft-delete anymore **we can easily go back**. +5. All future **schema migrations** that are being made to the original table will be applied to the `_archive` version of that table as well. Except for constraints, which is good. + +### Risks + +1. Suppose you add a new table that references another existing table with a foreign key that has `ON DELETE CASCADE`. If the existing table uses the `archive_record()` function from above, your new table will receive hard `DELETE`s when something in the existing table is soft-deletes. This isn't a problem, if you use `archive_record()` for your new dependent table as well. But you just have to remember it. + +2. We have tables like `work_item_revisions` and `comment_revisions`. We keep record of what is changing in the referenced tables, namely `work_items` and `comments`. And when a comment is deleted for example, we have to `INSERT` into the `comment_revisions` a record that states that a comment has been deleted. This is problematic because we do it **after** the comment was deleted, that means it no longer exists in the `comments` table. This will cause a foreign key violation. As it turns out we can avoid this by doing the `INSERT` to the `_revision` table before we do the actual delete. That is the only Go code change. + +## Final thoughts + +The approach presented here does not solve the problem of restoring individual rows. On the other hand, this approach doesn't make it harder or more complicated. It just remains unsolved. + +In our application certain fields of a work item don't have a foreign key specified. A good example are the area IDs. That means when an area is `DELETE`d, an associated work item is not automatically `DELETE`d. There are two scenarios when an area is removed itself: + +1. A delete is directly requested from a user. +2. A user requests to delete a space and then the area is removed due to its foreign key constraint on the space. + +Notice that, in the first scenario the user's requests goes through the area controller code and then through the area repository code. We have a chance in any of those layers to modify all work items that would reference a non-existing area otherwise. In the second scenario everything related to the area happens and stays on the DB layer so we have no chance of moifying the work items. The good news is that we don't have to. Every work item references a space and will therefore be deleted anyways when the space goes away. + +What applies to areas also applies to iterations, labels and board columns as well. + +# How to apply to our database? + +Steps + +1. Create "*_archived" tables for all tables that inherit the original tables. +2. Install a soft-delete trigger usinge the above `archive_record()` function. +3. Move all entries where `deleted_at IS NOT NULL` to their respective `_archive` table by doing a hard `DELETE` which will trigger the `archive_record()` function. + +# Example + +Here is a [fully working example](https://rextester.com/BMCB94324) in which we demonstrated a cascaded soft-delete over two tables, `countries` and `capitals`. We show how records are being archived independently of the method that was chosen for the delete. + +```sql +CREATE TABLE countries ( + id int primary key, + name text unique, + deleted_at timestamp +); +CREATE TABLE countries_archive ( + CHECK ( deleted_at IS NOT NULL ) +) INHERITS(countries); + +CREATE TABLE capitals ( + id int primary key, + name text, + country_id int references countries(id) on delete cascade, + deleted_at timestamp +); +CREATE TABLE capitals_archive ( + CHECK ( deleted_at IS NOT NULL ) +) INHERITS(capitals); + +CREATE OR REPLACE FUNCTION archive_record() +RETURNS TRIGGER AS $$ +BEGIN + IF (TG_OP = 'UPDATE' AND NEW.deleted_at IS NOT NULL) THEN + EXECUTE format('DELETE FROM %I.%I WHERE id = $1', TG_TABLE_SCHEMA, TG_TABLE_NAME) USING OLD.id; + RETURN OLD; + END IF; + IF (TG_OP = 'DELETE') THEN + IF (OLD.deleted_at IS NULL) THEN + OLD.deleted_at := timenow(); + END IF; + EXECUTE format('INSERT INTO %I.%I SELECT $1.*' + , TG_TABLE_SCHEMA, TG_TABLE_NAME || '_archive') + USING OLD; + END IF; + RETURN NULL; +END; + $$ LANGUAGE plpgsql; + +CREATE TRIGGER soft_delete_countries + AFTER + UPDATE OF deleted_at + OR DELETE + ON countries + FOR EACH ROW + EXECUTE PROCEDURE archive_record(); + +CREATE TRIGGER soft_delete_capitals + AFTER + UPDATE OF deleted_at + OR DELETE + ON capitals + FOR EACH ROW + EXECUTE PROCEDURE archive_record(); + +INSERT INTO countries (id, name) VALUES (1, 'France'); +INSERT INTO countries (id, name) VALUES (2, 'India'); +INSERT INTO capitals VALUES (1, 'Paris', 1); +INSERT INTO capitals VALUES (2, 'Bengaluru', 2); + +SELECT 'BEFORE countries' as "info", * FROM ONLY countries; +SELECT 'BEFORE countries_archive' as "info", * FROM countries_archive; +SELECT 'BEFORE capitals' as "info", * FROM ONLY capitals; +SELECT 'BEFORE capitals_archive' as "info", * FROM capitals_archive; + +-- Delete one country via hard-DELETE and one via soft-delete +DELETE FROM countries WHERE id = 1; +UPDATE countries SET deleted_at = '2018-12-01' WHERE id = 2; + +SELECT 'AFTER countries' as "info", * FROM ONLY countries; +SELECT 'AFTER countries_archive' as "info", * FROM countries_archive; +SELECT 'AFTER capitals' as "info", * FROM ONLY capitals; +SELECT 'AFTER capitals_archive' as "info", * FROM capitals_archive; +``` \ No newline at end of file diff --git a/migration/migration.go b/migration/migration.go index 95ba77c1a5..54e4897670 100644 --- a/migration/migration.go +++ b/migration/migration.go @@ -465,6 +465,9 @@ func GetMigrations() Migrations { // Version 109 m = append(m, steps{ExecuteSQLFile("109-number-column-for-iteration.sql")}) + // Version 110 + m = append(m, steps{ExecuteSQLFile("110-cascading-soft-delete.sql")}) + // Version N // // In order to add an upgrade, simply append an array of MigrationFunc to the @@ -565,7 +568,7 @@ func MigrateToNextVersion(tx *sql.Tx, nextVersion *int64, m Migrations, catalog // Apply all the updates of the next version for j := range m[*nextVersion] { if err := m[*nextVersion][j](tx); err != nil { - return errs.Errorf("failed to execute migration of step %d of version %d: %s\n", j, *nextVersion, err) + return errs.Errorf("failed to execute migration step %d of version %d: %s\n", j, *nextVersion, err) } } diff --git a/migration/migration_blackbox_test.go b/migration/migration_blackbox_test.go index 2cb1780e4f..068cb364fb 100644 --- a/migration/migration_blackbox_test.go +++ b/migration/migration_blackbox_test.go @@ -153,12 +153,13 @@ func TestMigrations(t *testing.T) { t.Run("TestMigration101", testMigration101TypeGroupHasDescriptionField) t.Run("TestMigration102", testMigration102LinkTypeDescriptionFields) t.Run("TestMigration103", testMigration103NotNullNotEmptyonEmail) - t.Run("TestMigration104", testMigration104IndexOnWIRevisionTable) - t.Run("TestMigration105", testMigration105UpdateRootIterationAreaPathField) - t.Run("TestMigration106", testMigration106RemoveLinkCategoryConcept) - t.Run("TestMigration107", testMigration107NumberSequencesTable) - t.Run("TestMigration108", testMigration108NumberColumnForArea) - t.Run("TestMigration109", testMigration109NumberColumnForIteration) + t.Run("TestMirgraion104", testMigration104IndexOnWIRevisionTable) + t.Run("TestMirgraion105", testMigration105UpdateRootIterationAreaPathField) + t.Run("TestMirgraion106", testMigration106RemoveLinkCategoryConcept) + t.Run("TestMirgraion107", testMigration107NumberSequencesTable) + t.Run("TestMirgraion108", testMigration108NumberColumnForArea) + t.Run("TestMirgraion109", testMigration109NumberColumnForIteration) + t.Run("TestMirgraion110", testMigration110CascadingSoftDelete) // Perform the migration err = migration.Migrate(sqlDB, databaseName) @@ -1383,6 +1384,185 @@ func testMigration109NumberColumnForIteration(t *testing.T) { require.True(t, dialect.HasColumn("iterations", "number")) } +func testMigration110CascadingSoftDelete(t *testing.T) { + t.Run("migrate to previous version", func(t *testing.T) { + migrateToVersion(t, sqlDB, migrations[:110], 110) + }) + + areaID := uuid.NewV4().String() + codebaseID := uuid.NewV4().String() + commentID := uuid.NewV4().String() + identityID := uuid.NewV4().String() + iterationID := uuid.NewV4().String() + labelID := uuid.NewV4().String() + spaceID := uuid.NewV4().String() + spaceTemplateID := uuid.NewV4().String() + trackerID := uuid.NewV4().String() + trackerItemID := "12345678" + trackerQueryID := "12345678" + userID := uuid.NewV4().String() + workItemBoardColumnID := uuid.NewV4().String() + workItemBoardID := uuid.NewV4().String() + workItemChildTypeID := uuid.NewV4().String() + workItemID := uuid.NewV4().String() + workItemLinkID := uuid.NewV4().String() + workItemLinkTypeID := uuid.NewV4().String() + workItemTypeGroupID := uuid.NewV4().String() + workItemTypeGroupMemberID := uuid.NewV4().String() + workItemTypeID := uuid.NewV4().String() + // deleted entries + deletedAreaID := uuid.NewV4().String() + deletedCodebaseID := uuid.NewV4().String() + deletedCommentID := uuid.NewV4().String() + deletedIdentityID := uuid.NewV4().String() + deletedIterationID := uuid.NewV4().String() + deletedLabelID := uuid.NewV4().String() + deletedSpaceID := uuid.NewV4().String() + deletedSpaceTemplateID := uuid.NewV4().String() + deletedTrackerID := uuid.NewV4().String() + deletedTrackerItemID := "123456789" + deletedTrackerQueryID := "123456789" + deletedUserID := uuid.NewV4().String() + deletedWorkItemBoardColumnID := uuid.NewV4().String() + deletedWorkItemBoardID := uuid.NewV4().String() + deletedWorkItemChildTypeID := uuid.NewV4().String() + deletedWorkItemID := uuid.NewV4().String() + deletedWorkItemLinkID := uuid.NewV4().String() + deletedWorkItemLinkTypeID := uuid.NewV4().String() + deletedWorkItemTypeGroupID := uuid.NewV4().String() + deletedWorkItemTypeGroupMemberID := uuid.NewV4().String() + deletedWorkItemTypeID := uuid.NewV4().String() + + t.Run("setup test data to migrate", func(t *testing.T) { + require.Nil(t, runSQLscript(sqlDB, "110-cascading-soft-delete.test.sql", + areaID, + codebaseID, + commentID, + identityID, + iterationID, + labelID, + spaceID, + spaceTemplateID, + trackerID, + trackerItemID, + trackerQueryID, + userID, + workItemBoardColumnID, + workItemBoardID, + workItemChildTypeID, + workItemID, + workItemLinkID, + workItemLinkTypeID, + workItemTypeGroupID, + workItemTypeGroupMemberID, + workItemTypeID, + // deleted entries + deletedAreaID, + deletedCodebaseID, + deletedCommentID, + deletedIdentityID, + deletedIterationID, + deletedLabelID, + deletedSpaceID, + deletedSpaceTemplateID, + deletedTrackerID, + deletedTrackerItemID, + deletedTrackerQueryID, + deletedUserID, + deletedWorkItemBoardColumnID, + deletedWorkItemBoardID, + deletedWorkItemChildTypeID, + deletedWorkItemID, + deletedWorkItemLinkID, + deletedWorkItemLinkTypeID, + deletedWorkItemTypeGroupID, + deletedWorkItemTypeGroupMemberID, + deletedWorkItemTypeID, + )) + }) + + // Helper functions + verifyExists := func(t *testing.T, table string, id string) { + q := fmt.Sprintf("SELECT 1 FROM %s WHERE id = '%s' AND deleted_at IS NULL", table, id) + row := sqlDB.QueryRow(q) + require.NotNil(t, row, "exists(): table: %q, id: %q", table, id) + var p int32 + err := row.Scan(&p) + require.NoError(t, err, "exists(): table: %q, id: %q, err: %+v", table, id, err) + require.Equal(t, int32(1), p, "exists(): table %q is missing id %q", table, id) + } + verifySoftDeleted := func(t *testing.T, table string, id string) { + q := fmt.Sprintf("SELECT 1 FROM %s WHERE id = '%s' AND deleted_at IS NOT NULL", table, id) + row := sqlDB.QueryRow(q) + require.NotNil(t, row, "existsButIsDeleted(): table: %q, id: %q", table, id) + var p int32 + err := row.Scan(&p) + require.NoError(t, err, "existsButIsDeleted(): table: %q, id: %q, err: %+v", table, id, err) + require.Equal(t, int32(1), p, "existsButIsDeleted(): table %q is missing id %q", table, id) + } + checkEntitiesExist := func(t *testing.T, name string, existFunc func(t *testing.T, table string, id string)) { + t.Run(name, func(t *testing.T) { + existFunc(t, "areas", areaID) + existFunc(t, "codebases", codebaseID) + existFunc(t, "comments", commentID) + existFunc(t, "identities", identityID) + existFunc(t, "iterations", iterationID) + existFunc(t, "labels", labelID) + existFunc(t, "spaces", spaceID) + existFunc(t, "space_templates", spaceTemplateID) + existFunc(t, "trackers", trackerID) + existFunc(t, "tracker_items", trackerItemID) + existFunc(t, "tracker_queries", trackerQueryID) + existFunc(t, "users", userID) + existFunc(t, "work_item_board_columns", workItemBoardColumnID) + existFunc(t, "work_item_boards", workItemBoardID) + existFunc(t, "work_item_child_types", workItemChildTypeID) + existFunc(t, "work_items", workItemID) + existFunc(t, "work_item_links", workItemLinkID) + existFunc(t, "work_item_link_types", workItemLinkTypeID) + existFunc(t, "work_item_type_groups", workItemTypeGroupID) + existFunc(t, "work_item_type_group_members", workItemTypeGroupMemberID) + existFunc(t, "work_item_types", workItemTypeID) + // deleted entries + verifySoftDeleted(t, "areas", deletedAreaID) + verifySoftDeleted(t, "codebases", deletedCodebaseID) + verifySoftDeleted(t, "comments", deletedCommentID) + verifySoftDeleted(t, "identities", deletedIdentityID) + verifySoftDeleted(t, "iterations", deletedIterationID) + verifySoftDeleted(t, "labels", deletedLabelID) + verifySoftDeleted(t, "spaces", deletedSpaceID) + verifySoftDeleted(t, "space_templates", deletedSpaceTemplateID) + verifySoftDeleted(t, "trackers", deletedTrackerID) + verifySoftDeleted(t, "tracker_items", deletedTrackerItemID) + verifySoftDeleted(t, "tracker_queries", deletedTrackerQueryID) + verifySoftDeleted(t, "users", deletedUserID) + verifySoftDeleted(t, "work_item_board_columns", deletedWorkItemBoardColumnID) + verifySoftDeleted(t, "work_item_boards", deletedWorkItemBoardID) + verifySoftDeleted(t, "work_item_child_types", deletedWorkItemChildTypeID) + verifySoftDeleted(t, "work_items", deletedWorkItemID) + verifySoftDeleted(t, "work_item_links", deletedWorkItemLinkID) + verifySoftDeleted(t, "work_item_link_types", deletedWorkItemLinkTypeID) + verifySoftDeleted(t, "work_item_type_groups", deletedWorkItemTypeGroupID) + verifySoftDeleted(t, "work_item_type_group_members", deletedWorkItemTypeGroupMemberID) + verifySoftDeleted(t, "work_item_types", deletedWorkItemTypeID) + }) + } + + t.Run("before migration", func(t *testing.T) { + checkEntitiesExist(t, "all entries have been created", verifyExists) + }) + t.Run("migrate to current version", func(t *testing.T) { + migrateToVersion(t, sqlDB, migrations[:111], 111) + require.False(t, dialect.HasTable("work_item_link_categories")) + }) + t.Run("after migration", func(t *testing.T) { + checkEntitiesExist(t, "entities are not modified", verifyExists) + require.Nil(t, runSQLscript(sqlDB, "110-soft-delete-entities.test.sql", spaceTemplateID, trackerID, userID)) + checkEntitiesExist(t, "all entries are soft-deleted", verifySoftDeleted) + }) + +} + // runSQLscript loads the given filename from the packaged SQL test files and // executes it on the given database. Golang text/template module is used // to handle all the optional arguments passed to the sql test files diff --git a/migration/sql-files/110-cascading-soft-delete.sql b/migration/sql-files/110-cascading-soft-delete.sql new file mode 100644 index 0000000000..2f84c4dd3c --- /dev/null +++ b/migration/sql-files/110-cascading-soft-delete.sql @@ -0,0 +1,160 @@ +-- Delete no longer needed work item link categories that is no longer used +-- since migration 106. +DROP TABLE work_item_link_categories; + +-- Add missing foreign key constraint from comment to work item. +DELETE FROM comments c WHERE parent_id IS NOT NULL AND NOT EXISTS (SELECT * FROM work_items w WHERE c.parent_id = w.id); +ALTER TABLE comments ADD FOREIGN KEY (parent_id) REFERENCES work_items(id) ON DELETE CASCADE; + +-- Change foreign key from "tracker_items" and "tracker_queries" to "trackers" +-- from `ON UPDATE RESTRICT ON DELETE RESTRICT` to `ON DELETE CASCADE` by adding +-- the new and then dropping the old foreign key. +ALTER TABLE tracker_items + ADD FOREIGN KEY (tracker_id) REFERENCES trackers(id) ON DELETE CASCADE, + DROP CONSTRAINT tracker_items_tracker_id_trackers_id_foreign; +ALTER TABLE tracker_queries + ADD FOREIGN KEY (tracker_id) REFERENCES trackers(id) ON DELETE CASCADE, + DROP CONSTRAINT tracker_queries_tracker_id_trackers_id_foreign; + +-- Change foreign key from "identites" to "users" to cascade. +ALTER TABLE identities + ADD FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE, + DROP CONSTRAINT identities_user_id_users_id_fk; + +-- Change foreign key from "comment_revisions" to "comments" to ON DELETE CASCADE. +ALTER TABLE comment_revisions + ADD FOREIGN KEY (comment_id) REFERENCES comments(id) ON DELETE CASCADE, + DROP CONSTRAINT comment_revisions_comments_fk; + +ALTER TABLE work_item_revisions ADD COLUMN deleted_at timestamp with time zone; +ALTER TABLE comment_revisions ADD COLUMN deleted_at timestamp with time zone; + +CREATE OR REPLACE FUNCTION archive_record() +RETURNS TRIGGER AS $$ +BEGIN + -- archive_record() can be use used as the trigger function on all tables + -- that want to archive their data into a separate *_archive table after + -- it was (soft-)DELETEd on the main table. The function will have no effect + -- if it is being used on a non-DELETE or non-UPDATE trigger. + -- + -- You should set up a trigger like so: + -- + -- CREATE TRIGGER soft_delete_countries + -- AFTER + -- -- this is what is triggered by GORM + -- UPDATE OF deleted_at + -- -- this is what is triggered by a cascaded DELETE or a direct hard-DELETE + -- OR DELETE + -- ON countries + -- FOR EACH ROW + -- EXECUTE PROCEDURE archive_record(); + -- + -- The effect of such a trigger is that your entry will be archived under + -- these circumstances: + -- + -- 1. a soft-delete happens by setting a row's `deleted_at` field to a non-`NULL` value, + -- 2. a hard-DELETE happens, + -- 3. or a cascaded DELETE happens that was triggered by one of the before mentioned events. + -- + -- The only requirements are: + -- + -- 1. your table has a `deleted_at` field + -- 2. your table has an archive table with the extact same name and an `_archive` suffix + -- 3. your table has a primary key called `id` + -- + -- You should set up your archive table like so: + -- + -- CREATE TABLE your_table_archive (CHECK(deleted_at IS NOT NULL)) INHERITS(your_table); + + -- When a soft-delete happens + IF (TG_OP = 'UPDATE' AND NEW.deleted_at IS NOT NULL) THEN + EXECUTE format('DELETE FROM %I.%I WHERE id = $1', TG_TABLE_SCHEMA, TG_TABLE_NAME) USING OLD.id; + RETURN OLD; + END IF; + -- When a hard-DELETE or a cascaded delete happens + IF (TG_OP = 'DELETE') THEN + -- Set the time when the deletion happen (if not already done) + IF (OLD.deleted_at IS NULL) THEN + OLD.deleted_at := timenow(); + END IF; + EXECUTE format('INSERT INTO %I.%I SELECT $1.*', TG_TABLE_SCHEMA, TG_TABLE_NAME || '_archive') + USING OLD; + END IF; + RETURN NULL; +END; + $$ LANGUAGE plpgsql; + +-- Create archive tables +CREATE TABLE areas_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (areas); +CREATE TABLE codebases_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (codebases); +CREATE TABLE comment_revisions_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (comment_revisions); +CREATE TABLE comments_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (comments); +CREATE TABLE identities_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (identities); +CREATE TABLE iterations_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (iterations); +CREATE TABLE labels_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (labels); +CREATE TABLE space_templates_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (space_templates); +CREATE TABLE spaces_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (spaces); +CREATE TABLE tracker_items_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (tracker_items); +CREATE TABLE tracker_queries_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (tracker_queries); +CREATE TABLE trackers_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (trackers); +CREATE TABLE users_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (users); +CREATE TABLE work_item_board_columns_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_board_columns); +CREATE TABLE work_item_boards_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_boards); +CREATE TABLE work_item_child_types_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_child_types); +CREATE TABLE work_item_link_types_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_link_types); +CREATE TABLE work_item_links_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_links); +CREATE TABLE work_item_revisions_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_revisions); +CREATE TABLE work_item_type_group_members_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_type_group_members); +CREATE TABLE work_item_type_groups_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_type_groups); +CREATE TABLE work_item_types_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_item_types); +CREATE TABLE work_items_archive (CHECK (deleted_at IS NOT NULL)) INHERITS (work_items); + +-- Setup triggers +CREATE TRIGGER archive_areas AFTER UPDATE OF deleted_at OR DELETE ON areas FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_codebases AFTER UPDATE OF deleted_at OR DELETE ON codebases FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_comment_revisions_archive AFTER UPDATE OF deleted_at OR DELETE ON comment_revisions FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_comments AFTER UPDATE OF deleted_at OR DELETE ON comments FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_identities AFTER UPDATE OF deleted_at OR DELETE ON identities FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_iterations AFTER UPDATE OF deleted_at OR DELETE ON iterations FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_labels AFTER UPDATE OF deleted_at OR DELETE ON labels FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_space_templates AFTER UPDATE OF deleted_at OR DELETE ON space_templates FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_spaces AFTER UPDATE OF deleted_at OR DELETE ON spaces FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_tracker_items AFTER UPDATE OF deleted_at OR DELETE ON tracker_items FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_tracker_queries AFTER UPDATE OF deleted_at OR DELETE ON tracker_queries FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_trackers AFTER UPDATE OF deleted_at OR DELETE ON trackers FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_users AFTER UPDATE OF deleted_at OR DELETE ON users FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_board_columns AFTER UPDATE OF deleted_at OR DELETE ON work_item_board_columns FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_boards AFTER UPDATE OF deleted_at OR DELETE ON work_item_boards FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_child_types AFTER UPDATE OF deleted_at OR DELETE ON work_item_child_types FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_link_types AFTER UPDATE OF deleted_at OR DELETE ON work_item_link_types FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_links AFTER UPDATE OF deleted_at OR DELETE ON work_item_links FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_revisions_archive AFTER UPDATE OF deleted_at OR DELETE ON work_item_revisions FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_type_group_members AFTER UPDATE OF deleted_at OR DELETE ON work_item_type_group_members FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_type_groups AFTER UPDATE OF deleted_at OR DELETE ON work_item_type_groups FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_item_types AFTER UPDATE OF deleted_at OR DELETE ON work_item_types FOR EACH ROW EXECUTE PROCEDURE archive_record(); +CREATE TRIGGER archive_work_items AFTER UPDATE OF deleted_at OR DELETE ON work_items FOR EACH ROW EXECUTE PROCEDURE archive_record(); + +-- Archive all deleted records +DELETE FROM areas WHERE deleted_at IS NOT NULL; +DELETE FROM codebases WHERE deleted_at IS NOT NULL; +DELETE FROM comment_revisions WHERE deleted_at IS NOT NULL; +DELETE FROM comments WHERE deleted_at IS NOT NULL; +DELETE FROM identities WHERE deleted_at IS NOT NULL; +DELETE FROM iterations WHERE deleted_at IS NOT NULL; +DELETE FROM labels WHERE deleted_at IS NOT NULL; +DELETE FROM space_templates WHERE deleted_at IS NOT NULL; +DELETE FROM spaces WHERE deleted_at IS NOT NULL; +DELETE FROM tracker_items WHERE deleted_at IS NOT NULL; +DELETE FROM tracker_queries WHERE deleted_at IS NOT NULL; +DELETE FROM trackers WHERE deleted_at IS NOT NULL; +DELETE FROM users WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_board_columns WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_boards WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_child_types WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_link_types WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_links WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_revisions WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_type_group_members WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_type_groups WHERE deleted_at IS NOT NULL; +DELETE FROM work_item_types WHERE deleted_at IS NOT NULL; +DELETE FROM work_items WHERE deleted_at IS NOT NULL; \ No newline at end of file diff --git a/migration/sql-test-files/110-cascading-soft-delete.test.sql b/migration/sql-test-files/110-cascading-soft-delete.test.sql new file mode 100644 index 0000000000..644c19bd3d --- /dev/null +++ b/migration/sql-test-files/110-cascading-soft-delete.test.sql @@ -0,0 +1,127 @@ +SET id.area = '{{index . 0}}'; +SET id.codebase = '{{index . 1}}'; +SET id.comment = '{{index . 2}}'; +SET id.identity = '{{index . 3}}'; +SET id.iteration = '{{index . 4}}'; +SET id.label = '{{index . 5}}'; +SET id.space = '{{index . 6}}'; +SET id.spaceTemplate = '{{index . 7}}'; +SET id.tracker = '{{index . 8}}'; +SET id.trackerItem = '{{index . 9}}'; +SET id.trackerQuery = '{{index . 10}}'; +SET id._user = '{{index . 11}}'; +SET id.workItemBoardColumn = '{{index . 12}}'; +SET id.workItemBoard = '{{index . 13}}'; +SET id.workItemChildType = '{{index . 14}}'; +SET id.workItem = '{{index . 15}}'; +SET id.workItemLink = '{{index . 16}}'; +SET id.workItemLinkType = '{{index . 17}}'; +SET id.workItemTypeGroup = '{{index . 18}}'; +SET id.workItemTypeGroupMember = '{{index . 19}}'; +SET id.workItemType = '{{index . 20}}'; +-- deleted items +SET id.deletedArea ='{{index . 21}}'; +SET id.deletedCodebase ='{{index . 22}}'; +SET id.deletedComment ='{{index . 23}}'; +SET id.deletedIdentity ='{{index . 24}}'; +SET id.deletedIteration ='{{index . 25}}'; +SET id.deletedLabel ='{{index . 26}}'; +SET id.deletedSpace ='{{index . 27}}'; +SET id.deletedSpaceTemplate ='{{index . 28}}'; +SET id.deletedTracker ='{{index . 29}}'; +SET id.deletedTrackerItem ='{{index . 30}}'; +SET id.deletedTrackerQuery ='{{index . 31}}'; +SET id.deletedUser ='{{index . 32}}'; +SET id.deletedWorkItemBoardColumn ='{{index . 33}}'; +SET id.deletedWorkItemBoard ='{{index . 34}}'; +SET id.deletedWorkItemChildType ='{{index . 35}}'; +SET id.deletedWorkItem ='{{index . 36}}'; +SET id.deletedWorkItemLink ='{{index . 37}}'; +SET id.deletedWorkItemLinkType ='{{index . 38}}'; +SET id.deletedWorkItemTypeGroup ='{{index . 39}}'; +SET id.deletedWorkItemTypeGroupMember ='{{index . 40}}'; +SET id.deletedWorkItemType ='{{index . 41}}'; + +INSERT INTO space_templates (id,name,description, deleted_at) VALUES + (current_setting('id.spaceTemplate')::uuid, current_setting('id.spaceTemplate'), 'test template', NULL), + (current_setting('id.deletedSpaceTemplate')::uuid, current_setting('id.deletedSpaceTemplate'), 'test template', '2018-09-17 16:01'); + +INSERT INTO spaces (id,name,space_template_id, deleted_at) VALUES + (current_setting('id.space')::uuid, current_setting('id.space'), current_setting('id.spaceTemplate')::uuid, NULL), + (current_setting('id.deletedSpace')::uuid, current_setting('id.deletedSpace'), current_setting('id.spaceTemplate')::uuid, '2018-09-17 16:01'); + +INSERT INTO iterations (id, name, path, space_id, number, deleted_at) VALUES + (current_setting('id.iteration')::uuid, 'root iteration', replace(current_setting('id.iteration'), '-', '_')::ltree, current_setting('id.space')::uuid, 1, NULL), + (current_setting('id.deletedIteration')::uuid, 'deleted iteration', replace(current_setting('id.deletedIteration'), '-', '_')::ltree, current_setting('id.space')::uuid, 2, '2018-09-17 16:01'); + +INSERT INTO areas (id, name, path, space_id, number, deleted_at) VALUES + (current_setting('id.area')::uuid, 'area', replace(current_setting('id.area'), '-', '_')::ltree, current_setting('id.space')::uuid, 1, NULL), + (current_setting('id.deletedArea')::uuid, 'area deleted', replace(current_setting('id.deletedArea'), '-', '_')::ltree, current_setting('id.space')::uuid, 2, '2018-09-17 16:01'); + +INSERT INTO labels (id, name, text_color, background_color, space_id, deleted_at) VALUES + (current_setting('id.label')::uuid, 'some label', '#ffffff', '#000000', current_setting('id.space')::uuid, NULL), + (current_setting('id.deletedLabel')::uuid, 'deleted label', '#000000', '#ffffff', current_setting('id.space')::uuid, '2018-09-17 16:01'); + +INSERT INTO work_item_types (id, name, space_template_id, fields, description, icon, deleted_at) VALUES + (current_setting('id.workItemType')::uuid, 'WIT1', current_setting('id.spaceTemplate')::uuid, '{"system.title": {"Type": {"Kind": "string"}, "Label": "Title", "Required": true, "Description": "The title text of the work item"}}', 'Description for WIT1', 'fa fa-bookmark', NULL), + (current_setting('id.deletedWorkItemType')::uuid, 'WIT2 Deleted', current_setting('id.spaceTemplate')::uuid, '{"system.title": {"Type": {"Kind": "string"}, "Label": "Title", "Required": true, "Description": "The title text of the work item"}}', 'Description for WIT2 Deleted', 'fa fa-bookmark', '2018-09-17 16:01'); + +INSERT INTO work_items (id, type, space_id, fields, deleted_at) VALUES + (current_setting('id.workItem')::uuid, current_setting('id.workItemType')::uuid, current_setting('id.space')::uuid, '{"system.title":"Work item 1"}'::json, NULL), + (current_setting('id.deletedWorkItem')::uuid, current_setting('id.workItemType')::uuid, current_setting('id.space')::uuid, '{"system.title":"Work item 2 Deleted"}'::json, '2018-09-17 16:01'); + +INSERT INTO work_item_link_types (id, name, forward_name, reverse_name, topology, space_template_id, deleted_at) VALUES + (current_setting('id.workItemLinkType')::uuid, 'Bug blocker', 'blocks', 'blocked by', 'network', current_setting('id.spaceTemplate')::uuid, NULL), + (current_setting('id.deletedWorkItemLinkType')::uuid, 'Dependency', 'depends on', 'is dependent on', 'dependency', current_setting('id.spaceTemplate')::uuid, '2018-09-17 16:01'); + +INSERT INTO work_item_links (id, link_type_id, source_id, target_id, deleted_at) VALUES + (current_setting('id.workItemLink')::uuid, current_setting('id.workItemLinkType')::uuid, current_setting('id.workItem')::uuid, current_setting('id.workItem')::uuid, NULL), + (current_setting('id.deletedWorkItemLink')::uuid, current_setting('id.workItemLinkType')::uuid, current_setting('id.workItem')::uuid, current_setting('id.workItem')::uuid, '2018-09-17 16:01'); + +INSERT INTO comments (id, parent_id, body, deleted_at) VALUES + (current_setting('id.comment')::uuid, current_setting('id.workItem')::uuid, 'a comment', NULL), + (current_setting('id.deletedComment')::uuid, current_setting('id.workItem')::uuid, 'another comment', '2018-09-17 16:01'); + +INSERT INTO codebases (id, space_id, type, url, stack_id, deleted_at) VALUES + (current_setting('id.codebase')::uuid, current_setting('id.space')::uuid, 'git', 'git@github.com:fabric8-services/fabric8-wit.git', 'golang-default', NULL), + (current_setting('id.deletedCodebase')::uuid, current_setting('id.space')::uuid, 'git', 'git@github.com:fabric8-services/fabric8-common.git', 'golang-default', '2018-09-17 16:01'); + +INSERT INTO users (id, email, full_name, deleted_at) VALUES + (current_setting('id._user')::uuid, concat('john_doe@', current_setting('id._user'), '.com'), 'John Doe', NULL), + (current_setting('id.deletedUser')::uuid, concat('jane_doe@', current_setting('id.deletedUser'), '.com'), 'Jane Doe', '2018-09-17 16:01'); + +INSERT INTO identities (id, username, user_id, deleted_at) VALUES + (current_setting('id.identity')::uuid, current_setting('id.identity'), current_setting('id._user')::uuid, NULL), + (current_setting('id.deletedIdentity')::uuid, current_setting('id.deletedIdentity'), current_setting('id.deletedUser')::uuid, '2018-09-17 16:01'); + +INSERT INTO trackers (id, url, type, deleted_at) VALUES + (current_setting('id.tracker')::uuid, 'https://api.github.com/', 'github', NULL), + (current_setting('id.deletedTracker')::uuid, 'https://api.github.com/', 'github', '2018-09-17 16:01'); + +INSERT INTO tracker_items (id, remote_item_id, item, tracker_id, deleted_at) VALUES + (current_setting('id.trackerItem')::bigint, '1234', '5678', current_setting('id.tracker')::uuid, NULL), + (current_setting('id.deletedTrackerItem')::bigint, '1234', '5678', current_setting('id.tracker')::uuid, '2018-09-17 16:01'); + +INSERT INTO tracker_queries (id, query, schedule, space_id, tracker_id, deleted_at) VALUES + (current_setting('id.trackerQuery')::bigint, 'after', 'the', current_setting('id.space')::uuid, current_setting('id.tracker')::uuid, NULL), + (current_setting('id.deletedTrackerQuery')::bigint, 'before', 'the', current_setting('id.space')::uuid, current_setting('id.tracker')::uuid, '2018-09-17 16:01'); + +INSERT INTO work_item_boards (id, space_template_id, name, description, context_type, context, deleted_at) VALUES + (current_setting('id.workItemBoard')::uuid, current_setting('id.spaceTemplate')::uuid, 'foo board', 'foo', 'my context type', 'my context', NULL), + (current_setting('id.deletedWorkItemBoard')::uuid, current_setting('id.spaceTemplate')::uuid, 'bar board', 'bar', 'my context type', 'my context', '2018-09-17 16:01'); + +INSERT INTO work_item_board_columns (id, board_id, name, column_order, trans_rule_key, trans_rule_argument, deleted_at) VALUES + (current_setting('id.workItemBoardColumn')::uuid, current_setting('id.workItemBoard')::uuid, 'foo board column', 1, 'my trans rule key', 'my trans rule argument', NULL), + (current_setting('id.deletedWorkItemBoardColumn')::uuid, current_setting('id.workItemBoard')::uuid, 'bar board column', 2, 'my trans rule key', 'my trans rule argument', '2018-09-17 16:01'); + +INSERT INTO work_item_child_types (id, parent_work_item_type_id, child_work_item_type_id, deleted_at) VALUES + (current_setting('id.workItemChildType')::uuid, current_setting('id.workItemType')::uuid, current_setting('id.workItemType')::uuid, NULL), + (current_setting('id.deletedWorkItemChildType')::uuid, current_setting('id.workItemType')::uuid, current_setting('id.workItemType')::uuid, '2018-09-17 16:01'); + +INSERT INTO work_item_type_groups (id, name, bucket, space_template_id, deleted_at) VALUES + (current_setting('id.workItemTypeGroup')::uuid, 'foo group', 'portfolio', current_setting('id.spaceTemplate')::uuid, NULL), + (current_setting('id.deletedWorkItemTypeGroup')::uuid, 'foo group', 'portfolio', current_setting('id.spaceTemplate')::uuid, '2018-09-17 16:01'); + +INSERT INTO work_item_type_group_members (id, type_group_id, work_item_type_id, deleted_at) VALUES + (current_setting('id.workItemTypeGroupMember')::uuid, current_setting('id.workItemTypeGroup')::uuid, current_setting('id.workItemType')::uuid, NULL), + (current_setting('id.deletedWorkItemTypeGroupMember')::uuid, current_setting('id.workItemTypeGroup')::uuid, current_setting('id.workItemType')::uuid, '2018-09-17 16:01'); diff --git a/migration/sql-test-files/110-soft-delete-entities.test.sql b/migration/sql-test-files/110-soft-delete-entities.test.sql new file mode 100644 index 0000000000..e0e45c7eef --- /dev/null +++ b/migration/sql-test-files/110-soft-delete-entities.test.sql @@ -0,0 +1,15 @@ +SET id.spaceTemplate = '{{index . 0}}'; +SET id.tracker = '{{index . 1}}'; +SET id._user = '{{index . 2}}'; + +-- This should cause all entities that reference the space template directly or +-- indirectly to be soft-deleted as well +UPDATE space_templates SET deleted_at = '2018-09-17 16:01' WHERE id = current_setting('id.spaceTemplate')::uuid; + +-- Trackers are currently not used and therefore not connected to a space or a +-- space template. That's why we deleted it manually here to have a cascaded +-- delete on tracker queries and tracker items. +UPDATE trackers SET deleted_at = '2018-09-17 16:01' WHERE id = current_setting('id.tracker')::uuid; + + +UPDATE users SET deleted_at = '2018-09-17 16:01' WHERE id = current_setting('id._user')::uuid; \ No newline at end of file diff --git a/workitem/workitem_repository.go b/workitem/workitem_repository.go index 6c5b9acb4d..3b6a25441a 100644 --- a/workitem/workitem_repository.go +++ b/workitem/workitem_repository.go @@ -364,6 +364,11 @@ func (r *GormWorkItemRepository) Delete(ctx context.Context, workitemID uuid.UUI workItem.ID = workitemID // retrieve the current version of the work item to delete r.db.Select("id, version, type").Where("id = ?", workitemID).Find(&workItem) + // store a revision of the deleted work item + _, err := r.wirr.Create(context.Background(), suppressorID, RevisionTypeDelete, workItem) + if err != nil { + return errs.Wrapf(err, "error while deleting work item") + } // delete the work item tx := r.db.Delete(workItem) if err := tx.Error; err != nil { @@ -372,11 +377,6 @@ func (r *GormWorkItemRepository) Delete(ctx context.Context, workitemID uuid.UUI if tx.RowsAffected == 0 { return errors.NewNotFoundError("work item", workitemID.String()) } - // store a revision of the deleted work item - _, err := r.wirr.Create(context.Background(), suppressorID, RevisionTypeDelete, workItem) - if err != nil { - return errs.Wrapf(err, "error while deleting work item") - } log.Debug(ctx, map[string]interface{}{"wi_id": workitemID}, "Work item deleted successfully!") return nil }