diff --git a/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc._type.md b/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc._type.md
deleted file mode 100644
index 5ef4a56e161de..0000000000000
--- a/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc._type.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-server](./kibana-plugin-core-server.md) > [SavedObjectsRawDoc](./kibana-plugin-core-server.savedobjectsrawdoc.md) > [\_type](./kibana-plugin-core-server.savedobjectsrawdoc._type.md)
-
-## SavedObjectsRawDoc.\_type property
-
-Signature:
-
-```typescript
-_type?: string;
-```
diff --git a/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc.md b/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc.md
index 4dd4632727634..54bca496b9930 100644
--- a/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc.md
+++ b/docs/development/core/server/kibana-plugin-core-server.savedobjectsrawdoc.md
@@ -20,5 +20,4 @@ export interface SavedObjectsRawDoc
| [\_primary\_term](./kibana-plugin-core-server.savedobjectsrawdoc._primary_term.md) | number | |
| [\_seq\_no](./kibana-plugin-core-server.savedobjectsrawdoc._seq_no.md) | number | |
| [\_source](./kibana-plugin-core-server.savedobjectsrawdoc._source.md) | SavedObjectsRawDocSource | |
-| [\_type](./kibana-plugin-core-server.savedobjectsrawdoc._type.md) | string | |
diff --git a/rfcs/text/0013_saved_object_migrations.md b/rfcs/text/0013_saved_object_migrations.md
index 6e125c28c04c0..6f5ab280a4612 100644
--- a/rfcs/text/0013_saved_object_migrations.md
+++ b/rfcs/text/0013_saved_object_migrations.md
@@ -214,31 +214,43 @@ Note:
2. If the source is a < v6.5 `.kibana` index or < 7.4 `.kibana_task_manager`
index prepare the legacy index for a migration:
1. Mark the legacy index as read-only and wait for all in-flight operations to drain (requires https://github.com/elastic/elasticsearch/pull/58094). This prevents any further writes from outdated nodes. Assuming this API is similar to the existing `//_close` API, we expect to receive `"acknowledged" : true` and `"shards_acknowledged" : true`. If all shards don’t acknowledge within the timeout, retry the operation until it succeeds.
- 2. Clone the legacy index into a new index which has writes enabled. Use a fixed index name i.e `.kibana_pre6.5.0_001` or `.kibana_task_manager_pre7.4.0_001`. `POST /.kibana/_clone/.kibana_pre6.5.0_001?wait_for_active_shards=all {"settings": {"index.blocks.write": false}}`. Ignore errors if the clone already exists. Ignore errors if the legacy source doesn't exist.
- 3. Wait for the cloning to complete `GET /_cluster/health/.kibana_pre6.5.0_001?wait_for_status=green&timeout=60s` If cloning doesn’t complete within the 60s timeout, log a warning for visibility and poll again.
- 4. Apply the `convertToAlias` script if defined `POST /.kibana_pre6.5.0_001/_update_by_query?conflicts=proceed {"script": {...}}`. The `convertToAlias` script will have to be idempotent, preferably setting `ctx.op="noop"` on subsequent runs to avoid unecessary writes.
+ 2. Create a new index which will become the source index after the legacy
+ pre-migration is complete. This index should have the same mappings as
+ the legacy index. Use a fixed index name i.e `.kibana_pre6.5.0_001` or
+ `.kibana_task_manager_pre7.4.0_001`. Ignore index already exists errors.
+ 3. Reindex the legacy index into the new source index with the
+ `convertToAlias` script if specified. Use `wait_for_completion: false`
+ to run this as a task. Ignore errors if the legacy source doesn't exist.
+ 4. Wait for the reindex task to complete. If the task doesn’t complete
+ within the 60s timeout, log a warning for visibility and poll again.
+ Ignore errors if the legacy source doesn't exist.
5. Delete the legacy index and replace it with an alias of the same name
```
POST /_aliases
{
"actions" : [
- { "add": { "index": ".kibana_pre6.5.0_001", "alias": ".kibana" } },
{ "remove_index": { "index": ".kibana" } }
+ { "add": { "index": ".kibana_pre6.5.0_001", "alias": ".kibana" } },
]
}
```.
Unlike the delete index API, the `remove_index` action will fail if
- provided with an _alias_. Ignore "The provided expression [.kibana]
- matches an alias, specify the corresponding concrete indices instead."
- or "index_not_found_exception" errors. These actions are applied
- atomically so that other Kibana instances will always see either a
- `.kibana` index or an alias, but never neither.
- 6. Use the cloned `.kibana_pre6.5.0_001` as the source for the rest of the migration algorithm.
+ provided with an _alias_. Therefore, if another instance completed this
+ step, the `.kibana` alias won't be added to `.kibana_pre6.5.0_001` a
+ second time. This avoids a situation where `.kibana` could point to both
+ `.kibana_pre6.5.0_001` and `.kibana_7.10.0_001`. These actions are
+ applied atomically so that other Kibana instances will always see either
+ a `.kibana` index or an alias, but never neither.
+
+ Ignore "The provided expression [.kibana] matches an alias, specify the
+ corresponding concrete indices instead." or "index_not_found_exception"
+ errors as this means another instance has already completed this step.
+ 6. Use the reindexed legacy `.kibana_pre6.5.0_001` as the source for the rest of the migration algorithm.
3. If `.kibana` and `.kibana_7.10.0` both exists and are pointing to the same index this version's migration has already been completed.
1. Because the same version can have plugins enabled at any point in time,
- perform the mappings update in step (6) and migrate outdated documents
- with step (7).
- 2. Skip to step (9) to start serving traffic.
+ perform the mappings update in step (7) and migrate outdated documents
+ with step (8).
+ 2. Skip to step (10) to start serving traffic.
4. Fail the migration if:
1. `.kibana` is pointing to an index that belongs to a later version of Kibana .e.g. `.kibana_7.12.0_001`
2. (Only in 8.x) The source index contains documents that belong to an unknown Saved Object type (from a disabled plugin). Log an error explaining that the plugin that created these documents needs to be enabled again or that these objects should be deleted. See section (4.2.1.4).
diff --git a/src/core/public/public.api.md b/src/core/public/public.api.md
index 50e8cca75737e..64833c21df6bd 100644
--- a/src/core/public/public.api.md
+++ b/src/core/public/public.api.md
@@ -7,6 +7,7 @@
import { Action } from 'history';
import { ApiResponse } from '@elastic/elasticsearch/lib/Transport';
import Boom from '@hapi/boom';
+import { ConfigDeprecationProvider } from '@kbn/config';
import { ConfigPath } from '@kbn/config';
import { EnvironmentMode } from '@kbn/config';
import { EuiBreadcrumb } from '@elastic/eui';
@@ -18,7 +19,6 @@ import { History } from 'history';
import { Href } from 'history';
import { IconType } from '@elastic/eui';
import { KibanaClient } from '@elastic/elasticsearch/api/kibana';
-import { KibanaConfigType } from 'src/core/server/kibana_config';
import { Location } from 'history';
import { LocationDescriptorObject } from 'history';
import { Logger } from '@kbn/logging';
diff --git a/src/core/server/elasticsearch/client/mocks.ts b/src/core/server/elasticsearch/client/mocks.ts
index bedd0e65c5a83..02b4b08502d96 100644
--- a/src/core/server/elasticsearch/client/mocks.ts
+++ b/src/core/server/elasticsearch/client/mocks.ts
@@ -22,7 +22,9 @@ import type { DeeplyMockedKeys } from '@kbn/utility-types/jest';
import { ElasticsearchClient } from './types';
import { ICustomClusterClient } from './cluster_client';
-const createInternalClientMock = (): DeeplyMockedKeys => {
+const createInternalClientMock = (
+ res?: MockedTransportRequestPromise
+): DeeplyMockedKeys => {
// we mimic 'reflection' on a concrete instance of the client to generate the mocked functions.
const client = new Client({
node: 'http://localhost',
@@ -59,7 +61,7 @@ const createInternalClientMock = (): DeeplyMockedKeys => {
.filter(([key]) => !omitted.includes(key))
.forEach(([key, descriptor]) => {
if (typeof descriptor.value === 'function') {
- obj[key] = jest.fn(() => createSuccessTransportRequestPromise({}));
+ obj[key] = jest.fn(() => res ?? createSuccessTransportRequestPromise({}));
} else if (typeof obj[key] === 'object' && obj[key] != null) {
mockify(obj[key], omitted);
}
@@ -95,8 +97,8 @@ const createInternalClientMock = (): DeeplyMockedKeys => {
export type ElasticsearchClientMock = DeeplyMockedKeys;
-const createClientMock = (): ElasticsearchClientMock =>
- (createInternalClientMock() as unknown) as ElasticsearchClientMock;
+const createClientMock = (res?: MockedTransportRequestPromise): ElasticsearchClientMock =>
+ (createInternalClientMock(res) as unknown) as ElasticsearchClientMock;
export interface ScopedClusterClientMock {
asInternalUser: ElasticsearchClientMock;
diff --git a/src/core/server/saved_objects/migrations/core/document_migrator.ts b/src/core/server/saved_objects/migrations/core/document_migrator.ts
index ccda72702b53c..f30ec4634fb7a 100644
--- a/src/core/server/saved_objects/migrations/core/document_migrator.ts
+++ b/src/core/server/saved_objects/migrations/core/document_migrator.ts
@@ -312,7 +312,7 @@ function wrapWithTry(
const failedTransform = `${type}:${version}`;
const failedDoc = JSON.stringify(doc);
log.warn(
- `Failed to transform document ${doc}. Transform: ${failedTransform}\nDoc: ${failedDoc}`
+ `Failed to transform document ${doc?.id}. Transform: ${failedTransform}\nDoc: ${failedDoc}`
);
throw error;
}
diff --git a/src/core/server/saved_objects/migrations/core/migration_context.ts b/src/core/server/saved_objects/migrations/core/migration_context.ts
index 0ea362d65623e..33a389ccaeec8 100644
--- a/src/core/server/saved_objects/migrations/core/migration_context.ts
+++ b/src/core/server/saved_objects/migrations/core/migration_context.ts
@@ -24,7 +24,7 @@
* serves as a central blueprint for what migrations will end up doing.
*/
-import { Logger } from 'src/core/server/logging';
+import { Logger } from '../../../logging';
import { MigrationEsClient } from './migration_es_client';
import { SavedObjectsSerializer } from '../../serialization';
import {
diff --git a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.mock.ts b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.mock.ts
index 23d8c4518d3ab..da4d39f435038 100644
--- a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.mock.ts
+++ b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.mock.ts
@@ -16,9 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-import type { PublicMethodsOf } from '@kbn/utility-types';
-
-import { KibanaMigrator, KibanaMigratorStatus } from './kibana_migrator';
+import { IKibanaMigrator, KibanaMigratorStatus } from './kibana_migrator';
import { buildActiveMappings } from '../core';
const { mergeTypes } = jest.requireActual('./kibana_migrator');
import { SavedObjectsType } from '../../types';
@@ -45,7 +43,16 @@ const createMigrator = (
types: SavedObjectsType[];
} = { types: defaultSavedObjectTypes }
) => {
- const mockMigrator: jest.Mocked> = {
+ const mockMigrator: jest.Mocked = {
+ kibanaVersion: '8.0.0-testing',
+ savedObjectsConfig: {
+ batchSize: 100,
+ scrollDuration: '15m',
+ pollInterval: 1500,
+ skip: false,
+ // TODO migrationsV2: remove/deprecate once we release migrations v2
+ enableV2: false,
+ },
runMigrations: jest.fn(),
getActiveMappings: jest.fn(),
migrateDocument: jest.fn(),
diff --git a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.test.ts b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.test.ts
index 7eb2cfefe4620..4248f6fdbeca4 100644
--- a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.test.ts
+++ b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.test.ts
@@ -23,6 +23,7 @@ import { KibanaMigratorOptions, KibanaMigrator } from './kibana_migrator';
import { loggingSystemMock } from '../../../logging/logging_system.mock';
import { SavedObjectTypeRegistry } from '../../saved_objects_type_registry';
import { SavedObjectsType } from '../../types';
+import { errors as esErrors } from '@elastic/elasticsearch';
const createRegistry = (types: Array>) => {
const registry = new SavedObjectTypeRegistry();
@@ -89,38 +90,188 @@ describe('KibanaMigrator', () => {
expect(options.client.cat.templates).toHaveBeenCalledTimes(1);
});
- it('emits results on getMigratorResult$()', async () => {
- const options = mockOptions();
+ describe('when enableV2 = false', () => {
+ it('when enableV2 = false creates an IndexMigrator which retries NoLivingConnectionsError errors from ES client', async () => {
+ const options = mockOptions();
- options.client.cat.templates.mockReturnValue(
- elasticsearchClientMock.createSuccessTransportRequestPromise(
- { templates: [] },
- { statusCode: 404 }
- )
- );
- options.client.indices.get.mockReturnValue(
- elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
- );
- options.client.indices.getAlias.mockReturnValue(
- elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
- );
+ options.client.cat.templates.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise(
+ { templates: [] },
+ { statusCode: 404 }
+ )
+ );
+ options.client.indices.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
+ );
+ options.client.indices.getAlias.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
+ );
- const migrator = new KibanaMigrator(options);
- const migratorStatus = migrator.getStatus$().pipe(take(3)).toPromise();
- await migrator.runMigrations();
- const { status, result } = await migratorStatus;
- expect(status).toEqual('completed');
- expect(result![0]).toMatchObject({
- destIndex: '.my-index_1',
- elapsedMs: expect.any(Number),
- sourceIndex: '.my-index',
- status: 'migrated',
+ options.client.indices.create = jest
+ .fn()
+ .mockReturnValueOnce(
+ elasticsearchClientMock.createErrorTransportRequestPromise(
+ new esErrors.NoLivingConnectionsError('reason', {} as any)
+ )
+ )
+ .mockImplementationOnce(() =>
+ elasticsearchClientMock.createSuccessTransportRequestPromise('success')
+ );
+
+ const migrator = new KibanaMigrator(options);
+ const migratorStatus = migrator.getStatus$().pipe(take(3)).toPromise();
+ await migrator.runMigrations();
+
+ expect(options.client.indices.create).toHaveBeenCalledTimes(3);
+ const { status } = await migratorStatus;
+ return expect(status).toEqual('completed');
+ });
+
+ it('emits results on getMigratorResult$()', async () => {
+ const options = mockOptions();
+
+ options.client.cat.templates.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise(
+ { templates: [] },
+ { statusCode: 404 }
+ )
+ );
+ options.client.indices.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
+ );
+ options.client.indices.getAlias.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
+ );
+
+ const migrator = new KibanaMigrator(options);
+ const migratorStatus = migrator.getStatus$().pipe(take(3)).toPromise();
+ await migrator.runMigrations();
+ const { status, result } = await migratorStatus;
+ expect(status).toEqual('completed');
+ expect(result![0]).toMatchObject({
+ destIndex: '.my-index_1',
+ elapsedMs: expect.any(Number),
+ sourceIndex: '.my-index',
+ status: 'migrated',
+ });
+ expect(result![1]).toMatchObject({
+ destIndex: 'other-index_1',
+ elapsedMs: expect.any(Number),
+ sourceIndex: 'other-index',
+ status: 'migrated',
+ });
+ });
+ });
+ describe('when enableV2 = true', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
});
- expect(result![1]).toMatchObject({
- destIndex: 'other-index_1',
- elapsedMs: expect.any(Number),
- sourceIndex: 'other-index',
- status: 'migrated',
+
+ it('creates a V2 migrator that initializes a new index and migrates an existing index', async () => {
+ const options = mockV2MigrationOptions();
+ const migrator = new KibanaMigrator(options);
+ const migratorStatus = migrator.getStatus$().pipe(take(3)).toPromise();
+ await migrator.runMigrations();
+
+ // Basic assertions that we're creating and reindexing the expected indices
+ expect(options.client.indices.create).toHaveBeenCalledTimes(3);
+ expect(options.client.indices.create.mock.calls).toEqual(
+ expect.arrayContaining([
+ // LEGACY_CREATE_REINDEX_TARGET
+ expect.arrayContaining([expect.objectContaining({ index: '.my-index_pre8.2.3_001' })]),
+ // CREATE_REINDEX_TEMP
+ expect.arrayContaining([
+ expect.objectContaining({ index: '.my-index_8.2.3_reindex_temp' }),
+ ]),
+ // CREATE_NEW_TARGET
+ expect.arrayContaining([expect.objectContaining({ index: 'other-index_8.2.3_001' })]),
+ ])
+ );
+ // LEGACY_REINDEX
+ expect(options.client.reindex.mock.calls[0][0]).toEqual(
+ expect.objectContaining({
+ body: expect.objectContaining({
+ source: expect.objectContaining({ index: '.my-index' }),
+ dest: expect.objectContaining({ index: '.my-index_pre8.2.3_001' }),
+ }),
+ })
+ );
+ // REINDEX_SOURCE_TO_TEMP
+ expect(options.client.reindex.mock.calls[1][0]).toEqual(
+ expect.objectContaining({
+ body: expect.objectContaining({
+ source: expect.objectContaining({ index: '.my-index_pre8.2.3_001' }),
+ dest: expect.objectContaining({ index: '.my-index_8.2.3_reindex_temp' }),
+ }),
+ })
+ );
+ const { status } = await migratorStatus;
+ return expect(status).toEqual('completed');
+ });
+ it('emits results on getMigratorResult$()', async () => {
+ const options = mockV2MigrationOptions();
+ const migrator = new KibanaMigrator(options);
+ const migratorStatus = migrator.getStatus$().pipe(take(3)).toPromise();
+ await migrator.runMigrations();
+
+ const { status, result } = await migratorStatus;
+ expect(status).toEqual('completed');
+ expect(result![0]).toMatchObject({
+ destIndex: '.my-index_8.2.3_001',
+ sourceIndex: '.my-index_pre8.2.3_001',
+ elapsedMs: expect.any(Number),
+ status: 'migrated',
+ });
+ expect(result![1]).toMatchObject({
+ destIndex: 'other-index_8.2.3_001',
+ elapsedMs: expect.any(Number),
+ status: 'patched',
+ });
+ });
+ it('rejects when the migration state machine terminates in a FATAL state', () => {
+ const options = mockV2MigrationOptions();
+ options.client.indices.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise(
+ {
+ '.my-index_8.2.4_001': {
+ aliases: {
+ '.my-index': {},
+ '.my-index_8.2.4': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ },
+ { statusCode: 200 }
+ )
+ );
+
+ const migrator = new KibanaMigrator(options);
+ return expect(migrator.runMigrations()).rejects.toMatchInlineSnapshot(
+ `[Error: Unable to complete saved object migrations for the [.my-index] index: The .my-index alias is pointing to a newer version of Kibana: v8.2.4]`
+ );
+ });
+ it('rejects when an unexpected exception occurs in an action', async () => {
+ const options = mockV2MigrationOptions();
+ options.client.tasks.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({
+ completed: true,
+ error: { type: 'elatsicsearch_exception', reason: 'task failed with an error' },
+ failures: [],
+ task: { description: 'task description' },
+ })
+ );
+
+ const migrator = new KibanaMigrator(options);
+
+ await expect(migrator.runMigrations()).rejects.toMatchInlineSnapshot(`
+ [Error: Unable to complete saved object migrations for the [.my-index] index. Please check the health of your Elasticsearch cluster and try again. Error: Reindex failed with the following error:
+ {"_tag":"Some","value":{"type":"elatsicsearch_exception","reason":"task failed with an error"}}]
+ `);
+ expect(loggingSystemMock.collect(options.logger).error[0][0]).toMatchInlineSnapshot(`
+ [Error: Reindex failed with the following error:
+ {"_tag":"Some","value":{"type":"elatsicsearch_exception","reason":"task failed with an error"}}]
+ `);
});
});
});
@@ -130,7 +281,40 @@ type MockedOptions = KibanaMigratorOptions & {
client: ReturnType;
};
-const mockOptions = () => {
+const mockV2MigrationOptions = () => {
+ const options = mockOptions({ enableV2: true });
+
+ options.client.indices.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise(
+ {
+ '.my-index': {
+ aliases: { '.kibana': {} },
+ mappings: { properties: {} },
+ settings: {},
+ },
+ },
+ { statusCode: 200 }
+ )
+ );
+ options.client.indices.addBlock.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({ acknowledged: true })
+ );
+ options.client.reindex.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({ taskId: 'reindex_task_id' })
+ );
+ options.client.tasks.get.mockReturnValue(
+ elasticsearchClientMock.createSuccessTransportRequestPromise({
+ completed: true,
+ error: undefined,
+ failures: [],
+ task: { description: 'task description' },
+ })
+ );
+
+ return options;
+};
+
+const mockOptions = ({ enableV2 }: { enableV2: boolean } = { enableV2: false }) => {
const options: MockedOptions = {
logger: loggingSystemMock.create().get(),
kibanaVersion: '8.2.3',
@@ -144,7 +328,7 @@ const mockOptions = () => {
name: { type: 'keyword' },
},
},
- migrations: {},
+ migrations: { '8.2.3': jest.fn().mockImplementation((doc) => doc) },
},
{
name: 'testtype2',
@@ -168,6 +352,7 @@ const mockOptions = () => {
pollInterval: 20000,
scrollDuration: '10m',
skip: false,
+ enableV2,
},
client: elasticsearchClientMock.createElasticsearchClient(),
};
diff --git a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.ts b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.ts
index 18a385c6994b8..12db79a1067ed 100644
--- a/src/core/server/saved_objects/migrations/kibana/kibana_migrator.ts
+++ b/src/core/server/saved_objects/migrations/kibana/kibana_migrator.ts
@@ -22,27 +22,40 @@
* (the shape of the mappings and documents in the index).
*/
-import { KibanaConfigType } from 'src/core/server/kibana_config';
import { BehaviorSubject } from 'rxjs';
-
+import { KibanaConfigType } from '../../../kibana_config';
+import { ElasticsearchClient } from '../../../elasticsearch';
import { Logger } from '../../../logging';
import { IndexMapping, SavedObjectsTypeMappingDefinitions } from '../../mappings';
-import { SavedObjectUnsanitizedDoc, SavedObjectsSerializer } from '../../serialization';
-import { buildActiveMappings, IndexMigrator, MigrationResult, MigrationStatus } from '../core';
+import {
+ SavedObjectUnsanitizedDoc,
+ SavedObjectsSerializer,
+ SavedObjectsRawDoc,
+} from '../../serialization';
+import {
+ buildActiveMappings,
+ createMigrationEsClient,
+ IndexMigrator,
+ MigrationResult,
+ MigrationStatus,
+} from '../core';
import { DocumentMigrator, VersionedTransformer } from '../core/document_migrator';
-import { MigrationEsClient } from '../core/';
import { createIndexMap } from '../core/build_index_map';
import { SavedObjectsMigrationConfigType } from '../../saved_objects_config';
import { ISavedObjectTypeRegistry } from '../../saved_objects_type_registry';
import { SavedObjectsType } from '../../types';
+import { runResilientMigrator } from '../../migrationsv2';
+import { migrateRawDocs } from '../core/migrate_raw_docs';
+import { MigrationLogger } from '../core/migration_logger';
export interface KibanaMigratorOptions {
- client: MigrationEsClient;
+ client: ElasticsearchClient;
typeRegistry: ISavedObjectTypeRegistry;
savedObjectsConfig: SavedObjectsMigrationConfigType;
kibanaConfig: KibanaConfigType;
kibanaVersion: string;
logger: Logger;
+ migrationsRetryDelay?: number;
}
export type IKibanaMigrator = Pick;
@@ -56,8 +69,7 @@ export interface KibanaMigratorStatus {
* Manages the shape of mappings and documents in the Kibana index.
*/
export class KibanaMigrator {
- private readonly client: MigrationEsClient;
- private readonly savedObjectsConfig: SavedObjectsMigrationConfigType;
+ private readonly client: ElasticsearchClient;
private readonly documentMigrator: VersionedTransformer;
private readonly kibanaConfig: KibanaConfigType;
private readonly log: Logger;
@@ -69,6 +81,11 @@ export class KibanaMigrator {
status: 'waiting',
});
private readonly activeMappings: IndexMapping;
+ private migrationsRetryDelay?: number;
+ // TODO migrationsV2: make private once we release migrations v2
+ public kibanaVersion: string;
+ // TODO migrationsV2: make private once we release migrations v2
+ public readonly savedObjectsConfig: SavedObjectsMigrationConfigType;
/**
* Creates an instance of KibanaMigrator.
@@ -80,6 +97,7 @@ export class KibanaMigrator {
savedObjectsConfig,
kibanaVersion,
logger,
+ migrationsRetryDelay,
}: KibanaMigratorOptions) {
this.client = client;
this.kibanaConfig = kibanaConfig;
@@ -88,6 +106,7 @@ export class KibanaMigrator {
this.serializer = new SavedObjectsSerializer(this.typeRegistry);
this.mappingProperties = mergeTypes(this.typeRegistry.getAllTypes());
this.log = logger;
+ this.kibanaVersion = kibanaVersion;
this.documentMigrator = new DocumentMigrator({
kibanaVersion,
typeRegistry,
@@ -96,6 +115,7 @@ export class KibanaMigrator {
// Building the active mappings (and associated md5sums) is an expensive
// operation so we cache the result
this.activeMappings = buildActiveMappings(this.mappingProperties);
+ this.migrationsRetryDelay = migrationsRetryDelay;
}
/**
@@ -151,21 +171,45 @@ export class KibanaMigrator {
});
const migrators = Object.keys(indexMap).map((index) => {
- return new IndexMigrator({
- batchSize: this.savedObjectsConfig.batchSize,
- client: this.client,
- documentMigrator: this.documentMigrator,
- index,
- log: this.log,
- mappingProperties: indexMap[index].typeMappings,
- pollInterval: this.savedObjectsConfig.pollInterval,
- scrollDuration: this.savedObjectsConfig.scrollDuration,
- serializer: this.serializer,
- // Only necessary for the migrator of the kibana index.
- obsoleteIndexTemplatePattern:
- index === kibanaIndexName ? 'kibana_index_template*' : undefined,
- convertToAliasScript: indexMap[index].script,
- });
+ // TODO migrationsV2: remove old migrations algorithm
+ if (this.savedObjectsConfig.enableV2) {
+ return {
+ migrate: (): Promise => {
+ return runResilientMigrator({
+ client: this.client,
+ kibanaVersion: this.kibanaVersion,
+ targetMappings: buildActiveMappings(indexMap[index].typeMappings),
+ logger: this.log,
+ preMigrationScript: indexMap[index].script,
+ transformRawDocs: (rawDocs: SavedObjectsRawDoc[]) =>
+ migrateRawDocs(
+ this.serializer,
+ this.documentMigrator.migrate,
+ rawDocs,
+ new MigrationLogger(this.log)
+ ),
+ migrationVersionPerType: this.documentMigrator.migrationVersion,
+ indexPrefix: index,
+ });
+ },
+ };
+ } else {
+ return new IndexMigrator({
+ batchSize: this.savedObjectsConfig.batchSize,
+ client: createMigrationEsClient(this.client, this.log, this.migrationsRetryDelay),
+ documentMigrator: this.documentMigrator,
+ index,
+ log: this.log,
+ mappingProperties: indexMap[index].typeMappings,
+ pollInterval: this.savedObjectsConfig.pollInterval,
+ scrollDuration: this.savedObjectsConfig.scrollDuration,
+ serializer: this.serializer,
+ // Only necessary for the migrator of the kibana index.
+ obsoleteIndexTemplatePattern:
+ index === kibanaIndexName ? 'kibana_index_template*' : undefined,
+ convertToAliasScript: indexMap[index].script,
+ });
+ }
});
return Promise.all(migrators.map((migrator) => migrator.migrate()));
diff --git a/src/core/server/saved_objects/migrationsv2/README.md b/src/core/server/saved_objects/migrationsv2/README.md
new file mode 100644
index 0000000000000..fcfff14ec98be
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/README.md
@@ -0,0 +1,100 @@
+## TODO
+ - [ ] Should we adopt the naming convention of event log `.kibana-event-log-8.0.0-000001`?
+ - [ ] Can we detect and throw if there's an auto-created `.kibana` index
+ with inferred mappings? If we detect this we cannot assume that `.kibana`
+ contains all the latest documents. Our algorithm might also fail because we
+ clone the `.kibana` index with it's faulty mappings which can prevent us
+ from updating the mappings to the correct ones. We can ask users to verify
+ their indices to identify where the most up to date documents are located
+ (e.g. in `.kibana`, `.kibana_N` or perhaps a combination of both). We can
+ prepare a `.kibana_7.11.0_001` index and ask users to manually reindex
+ documents into this index.
+
+## Manual QA Test Plan
+### 1. Legacy pre-migration
+When upgrading from a legacy index additional steps are required before the
+regular migration process can start.
+
+We have the following potential legacy indices:
+ - v5.x index that wasn't upgraded -> kibana should refuse to start the migration
+ - v5.x index that was upgraded to v6.x: `.kibana-6` _index_ with `.kibana` _alias_
+ - < v6.5 `.kibana` _index_ (Saved Object Migrations were
+ introduced in v6.5 https://github.com/elastic/kibana/pull/20243)
+ - TODO: Test versions which introduced the `kibana_index_template` template?
+ - < v7.4 `.kibana_task_manager` _index_ (Task Manager started
+ using Saved Objects in v7.4 https://github.com/elastic/kibana/pull/39829)
+
+Test plan:
+1. Ensure that the different versions of Kibana listed above can successfully
+ upgrade to 7.11.
+2. Ensure that multiple Kibana nodes can migrate a legacy index in parallel
+ (choose a representative legacy version to test with e.g. v6.4). Add a lot
+ of Saved Objects to Kibana to increase the time it takes for a migration to
+ complete which will make it easier to introduce failures.
+ 1. If all instances are started in parallel the upgrade should succeed
+ 2. If nodes are randomly restarted shortly after they start participating
+ in the migration the upgrade should either succeed or never complete.
+ However, if a fatal error occurs it should never result in permanent
+ failure.
+ 1. Start one instance, wait 500 ms
+ 2. Start a second instance
+ 3. If an instance starts a saved object migration, wait X ms before
+ killing the process and restarting the migration.
+ 4. Keep decreasing X until migrations are barely able to complete.
+ 5. If a migration fails with a fatal error, start a Kibana that doesn't
+ get restarted. Given enough time, it should always be able to
+ successfully complete the migration.
+
+For a successful migration the following behaviour should be observed:
+ 1. The `.kibana` index should be reindexed into a `.kibana_pre6.5.0` index
+ 2. The `.kibana` index should be deleted
+ 3. The `.kibana_index_template` should be deleted
+ 4. The `.kibana_pre6.5.0` index should have a write block applied
+ 5. Documents from `.kibana_pre6.5.0` should be migrated into `.kibana_7.11.0_001`
+ 6. Once migration has completed, the `.kibana_current` and `.kibana_7.11.0`
+ aliases should point to the `.kibana_7.11.0_001` index.
+
+### 2. Plugins enabled/disabled
+Kibana plugins can be disabled/enabled at any point in time. We need to ensure
+that Saved Object documents are migrated for all the possible sequences of
+enabling, disabling, before or after a version upgrade.
+
+#### Test scenario 1 (enable a plugin after migration):
+1. Start an old version of Kibana (< 7.11)
+2. Create a document that we know will be migrated in a later version (i.e.
+ create a `dashboard`)
+3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
+4. Upgrade Kibana to v7.11 making sure the plugin in step (3) is still disabled.
+5. Enable the plugin from step (3)
+6. Restart Kibana
+7. Ensure that the document from step (2) has been migrated
+ (`migrationVersion` contains 7.11.0)
+
+#### Test scenario 2 (disable a plugin after migration):
+1. Start an old version of Kibana (< 7.11)
+2. Create a document that we know will be migrated in a later version (i.e.
+ create a `dashboard`)
+3. Upgrade Kibana to v7.11 making sure the plugin in step (3) is enabled.
+4. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
+6. Restart Kibana
+7. Ensure that Kibana logs a warning, but continues to start even though there
+ are saved object documents which don't belong to an enable plugin
+
+#### Test scenario 2 (multiple instances, enable a plugin after migration):
+Follow the steps from 'Test scenario 1', but perform the migration with
+multiple instances of Kibana
+
+#### Test scenario 3 (multiple instances, mixed plugin enabled configs):
+We don't support this upgrade scenario, but it's worth making sure we don't
+have data loss when there's a user error.
+1. Start an old version of Kibana (< 7.11)
+2. Create a document that we know will be migrated in a later version (i.e.
+ create a `dashboard`)
+3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
+4. Upgrade Kibana to v7.11 using multiple instances of Kibana. The plugin from
+ step (3) should be enabled on half of the instances and disabled on the
+ other half.
+5. Ensure that the document from step (2) has been migrated
+ (`migrationVersion` contains 7.11.0)
+
+###
\ No newline at end of file
diff --git a/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.test.ts b/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.test.ts
new file mode 100644
index 0000000000000..3186d7456383a
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.test.ts
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { errors as esErrors } from '@elastic/elasticsearch';
+import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
+import { catchRetryableEsClientErrors } from './catch_retryable_es_client_errors';
+
+describe('catchRetryableEsClientErrors', () => {
+ it('rejects non-retryable response errors', () => {
+ const error = new esErrors.ResponseError(
+ elasticsearchClientMock.createApiResponse({
+ body: { error: { type: 'cluster_block_exception' } },
+ statusCode: 400,
+ })
+ );
+ return expect(Promise.reject(error).catch(catchRetryableEsClientErrors)).rejects.toBe(error);
+ });
+ describe('returns left retryable_es_client_error for', () => {
+ it('NoLivingConnectionsError', async () => {
+ const error = new esErrors.NoLivingConnectionsError(
+ 'reason',
+ elasticsearchClientMock.createApiResponse()
+ );
+ expect(
+ ((await Promise.reject(error).catch(catchRetryableEsClientErrors)) as any).left
+ ).toMatchObject({
+ message: 'reason',
+ type: 'retryable_es_client_error',
+ });
+ });
+
+ it('ConnectionError', async () => {
+ const error = new esErrors.ConnectionError(
+ 'reason',
+ elasticsearchClientMock.createApiResponse()
+ );
+ expect(
+ ((await Promise.reject(error).catch(catchRetryableEsClientErrors)) as any).left
+ ).toMatchObject({
+ message: 'reason',
+ type: 'retryable_es_client_error',
+ });
+ });
+ it('TimeoutError', async () => {
+ const error = new esErrors.TimeoutError(
+ 'reason',
+ elasticsearchClientMock.createApiResponse()
+ );
+ expect(
+ ((await Promise.reject(error).catch(catchRetryableEsClientErrors)) as any).left
+ ).toMatchObject({
+ message: 'reason',
+ type: 'retryable_es_client_error',
+ });
+ });
+ it('ResponseError of type snapshot_in_progress_exception', async () => {
+ const error = new esErrors.ResponseError(
+ elasticsearchClientMock.createApiResponse({
+ body: { error: { type: 'snapshot_in_progress_exception' } },
+ })
+ );
+ expect(
+ ((await Promise.reject(error).catch(catchRetryableEsClientErrors)) as any).left
+ ).toMatchObject({
+ message: 'snapshot_in_progress_exception',
+ type: 'retryable_es_client_error',
+ });
+ });
+ it('ResponseError with retryable status code', async () => {
+ const statusCodes = [503, 401, 403, 408, 410];
+ return Promise.all(
+ statusCodes.map(async (status) => {
+ const error = new esErrors.ResponseError(
+ elasticsearchClientMock.createApiResponse({
+ statusCode: status,
+ body: { error: { type: 'reason' } },
+ })
+ );
+ expect(
+ ((await Promise.reject(error).catch(catchRetryableEsClientErrors)) as any).left
+ ).toMatchObject({
+ message: 'reason',
+ type: 'retryable_es_client_error',
+ });
+ })
+ );
+ });
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.ts b/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.ts
new file mode 100644
index 0000000000000..c520f50eb5644
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/actions/catch_retryable_es_client_errors.ts
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as Either from 'fp-ts/lib/Either';
+import { errors as EsErrors } from '@elastic/elasticsearch';
+
+const retryResponseStatuses = [
+ 503, // ServiceUnavailable
+ 401, // AuthorizationException
+ 403, // AuthenticationException
+ 408, // RequestTimeout
+ 410, // Gone
+];
+
+export interface RetryableEsClientError {
+ type: 'retryable_es_client_error';
+ message: string;
+ error?: Error;
+}
+
+export const catchRetryableEsClientErrors = (
+ e: EsErrors.ElasticsearchClientError
+): Either.Either => {
+ if (
+ e instanceof EsErrors.NoLivingConnectionsError ||
+ e instanceof EsErrors.ConnectionError ||
+ e instanceof EsErrors.TimeoutError ||
+ (e instanceof EsErrors.ResponseError &&
+ (retryResponseStatuses.includes(e.statusCode) ||
+ // ES returns a 400 Bad Request when trying to close or delete an
+ // index while snapshots are in progress. This should have been a 503
+ // so once https://github.com/elastic/elasticsearch/issues/65883 is
+ // fixed we can remove this.
+ e.body?.error?.type === 'snapshot_in_progress_exception'))
+ ) {
+ return Either.left({
+ type: 'retryable_es_client_error' as const,
+ message: e.message,
+ error: e,
+ });
+ } else {
+ throw e;
+ }
+};
diff --git a/src/core/server/saved_objects/migrationsv2/actions/index.test.ts b/src/core/server/saved_objects/migrationsv2/actions/index.test.ts
new file mode 100644
index 0000000000000..226813a33d8de
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/actions/index.test.ts
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as Actions from './';
+import { catchRetryableEsClientErrors } from './catch_retryable_es_client_errors';
+import { errors as EsErrors } from '@elastic/elasticsearch';
+jest.mock('./catch_retryable_es_client_errors');
+import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
+import * as Option from 'fp-ts/lib/Option';
+
+describe('actions', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ // Create a mock client that rejects all methods with a 503 statuscode
+ // response.
+ const retryableError = new EsErrors.ResponseError(
+ elasticsearchClientMock.createApiResponse({
+ statusCode: 503,
+ body: { error: { type: 'es_type', reason: 'es_reason' } },
+ })
+ );
+ const client = elasticsearchClientMock.createInternalClient(
+ elasticsearchClientMock.createErrorTransportRequestPromise(retryableError)
+ );
+
+ describe('fetchIndices', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.fetchIndices(client, ['my_index']);
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('setWriteBlock', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.setWriteBlock(client, 'my_index');
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('cloneIndex', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.cloneIndex(client, 'my_source_index', 'my_target_index');
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('pickupUpdatedMappings', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.pickupUpdatedMappings(client, 'my_index');
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('reindex', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.reindex(
+ client,
+ 'my_source_index',
+ 'my_target_index',
+ Option.none,
+ false
+ );
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('waitForReindexTask', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.waitForReindexTask(client, 'my task id', '60s');
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('waitForPickupUpdatedMappingsTask', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.waitForPickupUpdatedMappingsTask(client, 'my task id', '60s');
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('updateAliases', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.updateAliases(client, []);
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('createIndex', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.createIndex(client, 'new_index', { properties: {} });
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('updateAndPickupMappings', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.updateAndPickupMappings(client, 'new_index', { properties: {} });
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('searchForOutdatedDocuments', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.searchForOutdatedDocuments(client, 'new_index', { properties: {} });
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+
+ describe('bulkOverwriteTransformedDocuments', () => {
+ it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
+ const task = Actions.bulkOverwriteTransformedDocuments(client, 'new_index', []);
+ try {
+ await task();
+ } catch (e) {
+ /** ignore */
+ }
+
+ expect(catchRetryableEsClientErrors).toHaveBeenCalledWith(retryableError);
+ });
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/actions/index.ts b/src/core/server/saved_objects/migrationsv2/actions/index.ts
new file mode 100644
index 0000000000000..07d130690af77
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/actions/index.ts
@@ -0,0 +1,892 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as Either from 'fp-ts/lib/Either';
+import * as TaskEither from 'fp-ts/lib/TaskEither';
+import * as Option from 'fp-ts/lib/Option';
+import { ElasticsearchClientError } from '@elastic/elasticsearch/lib/errors';
+import { pipe } from 'fp-ts/lib/pipeable';
+import { errors as EsErrors } from '@elastic/elasticsearch';
+import { flow } from 'fp-ts/lib/function';
+import { ElasticsearchClient } from '../../../elasticsearch';
+import { IndexMapping } from '../../mappings';
+import { SavedObjectsRawDoc } from '../../serialization';
+import {
+ catchRetryableEsClientErrors,
+ RetryableEsClientError,
+} from './catch_retryable_es_client_errors';
+export { RetryableEsClientError };
+
+export const isRetryableEsClientResponse = (
+ res: Either.Either
+): res is Either.Left => {
+ return Either.isLeft(res) && res.left.type === 'retryable_es_client_error';
+};
+
+/**
+ * Batch size for updateByQuery, reindex & search operations. Smaller batches
+ * reduce the memory pressure on Elasticsearch and Kibana so are less likely
+ * to cause failures.
+ * TODO (profile/tune): How much smaller can we make this number before it
+ * starts impacting how long migrations take to perform?
+ */
+const BATCH_SIZE = 1000;
+const DEFAULT_TIMEOUT = '60s';
+/** Allocate 1 replica if there are enough data nodes, otherwise continue with 0 */
+const INDEX_AUTO_EXPAND_REPLICAS = '0-1';
+/** ES rule of thumb: shards should be several GB to 10's of GB, so Kibana is unlikely to cross that limit */
+const INDEX_NUMBER_OF_SHARDS = 1;
+/** Wait for all shards to be active before starting an operation */
+const WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE = 'all';
+
+export type FetchIndexResponse = Record<
+ string,
+ { aliases: Record; mappings: IndexMapping; settings: unknown }
+>;
+
+/**
+ * Fetches information about the given indices including aliases, mappings and
+ * settings.
+ */
+export const fetchIndices = (
+ client: ElasticsearchClient,
+ indicesToFetch: string[]
+): TaskEither.TaskEither => () => {
+ return client.indices
+ .get(
+ {
+ index: indicesToFetch,
+ ignore_unavailable: true, // Don't return an error for missing indices. Note this *will* include closed indices, the docs are misleading https://github.com/elastic/elasticsearch/issues/63607
+ },
+ { ignore: [404], maxRetries: 0 }
+ )
+ .then(({ body }) => {
+ return Either.right(body);
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+/**
+ * Sets a write block in place for the given index. If the response includes
+ * `acknowledged: true` all in-progress writes have drained and no further
+ * writes to this index will be possible.
+ *
+ * The first time the write block is added to an index the response will
+ * include `shards_acknowledged: true` but once the block is in place,
+ * subsequent calls return `shards_acknowledged: false`
+ */
+export const setWriteBlock = (
+ client: ElasticsearchClient,
+ index: string
+): TaskEither.TaskEither<
+ { type: 'index_not_found_exception' } | RetryableEsClientError,
+ 'set_write_block_succeeded'
+> => () => {
+ return client.indices
+ .addBlock<{
+ acknowledged: boolean;
+ shards_acknowledged: boolean;
+ }>(
+ {
+ index,
+ block: 'write',
+ },
+ { maxRetries: 0 /** handle retry ourselves for now */ }
+ )
+ .then((res) => {
+ return res.body.acknowledged === true
+ ? Either.right('set_write_block_succeeded' as const)
+ : Either.left({
+ type: 'retryable_es_client_error' as const,
+ message: 'set_write_block_failed',
+ });
+ })
+ .catch((e: ElasticsearchClientError) => {
+ if (e instanceof EsErrors.ResponseError) {
+ if (e.message === 'index_not_found_exception') {
+ return Either.left({ type: 'index_not_found_exception' as const });
+ }
+ }
+ throw e;
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+/**
+ * Removes a write block from an index
+ */
+export const removeWriteBlock = (
+ client: ElasticsearchClient,
+ index: string
+): TaskEither.TaskEither => () => {
+ return client.indices
+ .putSettings<{
+ acknowledged: boolean;
+ shards_acknowledged: boolean;
+ }>(
+ {
+ index,
+ // Don't change any existing settings
+ preserve_existing: true,
+ body: {
+ 'index.blocks.write': false,
+ },
+ },
+ { maxRetries: 0 /** handle retry ourselves for now */ }
+ )
+ .then((res) => {
+ return res.body.acknowledged === true
+ ? Either.right('remove_write_block_succeeded' as const)
+ : Either.left({
+ type: 'retryable_es_client_error' as const,
+ message: 'remove_write_block_failed',
+ });
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+const waitForIndexStatusGreen = (
+ client: ElasticsearchClient,
+ index: string
+): TaskEither.TaskEither => () => {
+ return client.cluster
+ .health({ index, wait_for_status: 'green', timeout: '30s' })
+ .then(() => {
+ return Either.right({});
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+export type CloneIndexResponse = AcknowledgeResponse;
+
+/**
+ * Makes a clone of the source index into the target.
+ *
+ * @remarks
+ * This method adds some additional logic to the ES clone index API:
+ * - it is idempotent, if it gets called multiple times subsequent calls will
+ * wait for the first clone operation to complete (up to 60s)
+ * - the first call will wait up to 90s for the cluster state and all shards
+ * to be updated.
+ */
+export const cloneIndex = (
+ client: ElasticsearchClient,
+ source: string,
+ target: string
+): TaskEither.TaskEither<
+ RetryableEsClientError | { type: 'index_not_found_exception'; index: string },
+ CloneIndexResponse
+> => {
+ const cloneTask: TaskEither.TaskEither<
+ RetryableEsClientError | { type: 'index_not_found_exception'; index: string },
+ AcknowledgeResponse
+ > = () => {
+ return client.indices
+ .clone(
+ {
+ index: source,
+ target,
+ wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE,
+ body: {
+ settings: {
+ index: {
+ // The source we're cloning from will have a write block set, so
+ // we need to remove it to allow writes to our newly cloned index
+ 'blocks.write': false,
+ number_of_shards: INDEX_NUMBER_OF_SHARDS,
+ auto_expand_replicas: INDEX_AUTO_EXPAND_REPLICAS,
+ // Set an explicit refresh interval so that we don't inherit the
+ // value from incorrectly configured index templates (not required
+ // after we adopt system indices)
+ refresh_interval: '1s',
+ // Bump priority so that recovery happens before newer indices
+ priority: 10,
+ },
+ },
+ },
+ timeout: DEFAULT_TIMEOUT,
+ },
+ { maxRetries: 0 /** handle retry ourselves for now */ }
+ )
+ .then((res) => {
+ /**
+ * - acknowledged=false, we timed out before the cluster state was
+ * updated with the newly created index, but it probably will be
+ * created sometime soon.
+ * - shards_acknowledged=false, we timed out before all shards were
+ * started
+ * - acknowledged=true, shards_acknowledged=true, cloning complete
+ */
+ return Either.right({
+ acknowledged: res.body.acknowledged,
+ shardsAcknowledged: res.body.shards_acknowledged,
+ });
+ })
+ .catch((error: EsErrors.ResponseError) => {
+ if (error.body.error.type === 'index_not_found_exception') {
+ return Either.left({
+ type: 'index_not_found_exception' as const,
+ index: error.body.error.index,
+ });
+ } else if (error.body.error.type === 'resource_already_exists_exception') {
+ /**
+ * If the target index already exists it means a previous clone
+ * operation had already been started. However, we can't be sure
+ * that all shards were started so return shardsAcknowledged: false
+ */
+ return Either.right({
+ acknowledged: true,
+ shardsAcknowledged: false,
+ });
+ } else {
+ throw error;
+ }
+ })
+ .catch(catchRetryableEsClientErrors);
+ };
+
+ return pipe(
+ cloneTask,
+ TaskEither.chain((res) => {
+ if (res.acknowledged && res.shardsAcknowledged) {
+ // If the cluster state was updated and all shards ackd we're done
+ return TaskEither.right(res);
+ } else {
+ // Otherwise, wait until the target index has a 'green' status.
+ return pipe(
+ waitForIndexStatusGreen(client, target),
+ TaskEither.map((value) => {
+ /** When the index status is 'green' we know that all shards were started */
+ return { acknowledged: true, shardsAcknowledged: true };
+ })
+ );
+ }
+ })
+ );
+};
+
+interface WaitForTaskResponse {
+ error: Option.Option<{ type: string; reason: string; index: string }>;
+ completed: boolean;
+ failures: Option.Option;
+ description: string;
+}
+
+/**
+ * Blocks for up to 60s or until a task completes.
+ *
+ * TODO: delete completed tasks
+ */
+const waitForTask = (
+ client: ElasticsearchClient,
+ taskId: string,
+ timeout: string
+): TaskEither.TaskEither => () => {
+ return client.tasks
+ .get<{
+ completed: boolean;
+ response: { failures: any[] };
+ task: { description: string };
+ error: { type: string; reason: string; index: string };
+ }>({
+ task_id: taskId,
+ wait_for_completion: true,
+ timeout,
+ })
+ .then((res) => {
+ const body = res.body;
+ const failures = body.response?.failures ?? [];
+ return Either.right({
+ completed: body.completed,
+ error: Option.fromNullable(body.error),
+ failures: failures.length > 0 ? Option.some(failures) : Option.none,
+ description: body.task.description,
+ });
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+export interface UpdateByQueryResponse {
+ taskId: string;
+}
+
+/**
+ * Pickup updated mappings by performing an update by query operation on all
+ * documents in the index. Returns a task ID which can be
+ * tracked for progress.
+ *
+ * @remarks When mappings are updated to add a field which previously wasn't
+ * mapped Elasticsearch won't automatically add existing documents to it's
+ * internal search indices. So search results on this field won't return any
+ * existing documents. By running an update by query we essentially refresh
+ * these the internal search indices for all existing documents.
+ * This action uses `conflicts: 'proceed'` allowing several Kibana instances
+ * to run this in parallel.
+ */
+export const pickupUpdatedMappings = (
+ client: ElasticsearchClient,
+ index: string
+): TaskEither.TaskEither => () => {
+ return client
+ .updateByQuery({
+ // Ignore version conflicts that can occur from parallel update by query operations
+ conflicts: 'proceed',
+ // Return an error when targeting missing or closed indices
+ allow_no_indices: false,
+ index,
+ // How many documents to update per batch
+ scroll_size: BATCH_SIZE,
+ // force a refresh so that we can query the updated index immediately
+ // after the operation completes
+ refresh: true,
+ // Create a task and return task id instead of blocking until complete
+ wait_for_completion: false,
+ })
+ .then(({ body: { task: taskId } }) => {
+ return Either.right({ taskId });
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+export interface ReindexResponse {
+ taskId: string;
+}
+
+/**
+ * Reindex documents from the `sourceIndex` into the `targetIndex`. Returns a
+ * task ID which can be tracked for progress.
+ *
+ * @remarks This action is idempotent allowing several Kibana instances to run
+ * this in parallel. By using `op_type: 'create', conflicts: 'proceed'` there
+ * will be only one write per reindexed document.
+ */
+export const reindex = (
+ client: ElasticsearchClient,
+ sourceIndex: string,
+ targetIndex: string,
+ reindexScript: Option.Option,
+ requireAlias: boolean
+): TaskEither.TaskEither => () => {
+ return client
+ .reindex({
+ // Require targetIndex to be an alias. Prevents a new index from being
+ // created if targetIndex doesn't exist.
+ // @ts-expect-error This API isn't documented
+ require_alias: requireAlias,
+ body: {
+ // Ignore version conflicts from existing documents
+ conflicts: 'proceed',
+ source: {
+ index: sourceIndex,
+ // Set reindex batch size
+ size: BATCH_SIZE,
+ },
+ dest: {
+ index: targetIndex,
+ // Don't override existing documents, only create if missing
+ op_type: 'create',
+ },
+ script: Option.fold(
+ () => undefined,
+ (script) => ({
+ source: script,
+ lang: 'painless',
+ })
+ )(reindexScript),
+ },
+ // force a refresh so that we can query the target index
+ refresh: true,
+ // Create a task and return task id instead of blocking until complete
+ wait_for_completion: false,
+ })
+ .then(({ body: { task: taskId } }) => {
+ return Either.right({ taskId });
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+interface WaitForReindexTaskFailure {
+ cause: { type: string; reason: string };
+}
+
+export const waitForReindexTask = flow(
+ waitForTask,
+ TaskEither.chain(
+ (
+ res
+ ): TaskEither.TaskEither<
+ | { type: 'index_not_found_exception'; index: string }
+ | { type: 'target_index_had_write_block' }
+ | { type: 'incompatible_mapping_exception' }
+ | RetryableEsClientError,
+ 'reindex_succeeded'
+ > => {
+ const failureIsAWriteBlock = ({ cause: { type, reason } }: WaitForReindexTaskFailure) =>
+ type === 'cluster_block_exception' &&
+ reason.match(/index \[.+] blocked by: \[FORBIDDEN\/8\/index write \(api\)\]/);
+
+ const failureIsIncompatibleMappingException = ({
+ cause: { type, reason },
+ }: WaitForReindexTaskFailure) =>
+ type === 'strict_dynamic_mapping_exception' || type === 'mapper_parsing_exception';
+
+ if (Option.isSome(res.error)) {
+ if (res.error.value.type === 'index_not_found_exception') {
+ return TaskEither.left({
+ type: 'index_not_found_exception' as const,
+ index: res.error.value.index,
+ });
+ } else {
+ throw new Error('Reindex failed with the following error:\n' + JSON.stringify(res.error));
+ }
+ } else if (Option.isSome(res.failures)) {
+ if (res.failures.value.every(failureIsAWriteBlock)) {
+ return TaskEither.left({ type: 'target_index_had_write_block' as const });
+ } else if (res.failures.value.every(failureIsIncompatibleMappingException)) {
+ return TaskEither.left({ type: 'incompatible_mapping_exception' as const });
+ } else {
+ throw new Error(
+ 'Reindex failed with the following failures:\n' + JSON.stringify(res.failures.value)
+ );
+ }
+ } else {
+ return TaskEither.right('reindex_succeeded' as const);
+ }
+ }
+ )
+);
+
+export const verifyReindex = (
+ client: ElasticsearchClient,
+ sourceIndex: string,
+ targetIndex: string
+): TaskEither.TaskEither<
+ RetryableEsClientError | { type: 'verify_reindex_failed' },
+ 'verify_reindex_succeeded'
+> => () => {
+ const count = (index: string) =>
+ client
+ .count<{ count: number }>({
+ index,
+ // Return an error when targeting missing or closed indices
+ allow_no_indices: false,
+ })
+ .then((res) => {
+ return res.body.count;
+ });
+
+ return Promise.all([count(sourceIndex), count(targetIndex)])
+ .then(([sourceCount, targetCount]) => {
+ if (targetCount >= sourceCount) {
+ return Either.right('verify_reindex_succeeded' as const);
+ } else {
+ return Either.left({ type: 'verify_reindex_failed' as const });
+ }
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+export const waitForPickupUpdatedMappingsTask = flow(
+ waitForTask,
+ TaskEither.chain(
+ (res): TaskEither.TaskEither => {
+ // We don't catch or type failures/errors because they should never
+ // occur in our migration algorithm and we don't have any business logic
+ // for dealing with it. If something happens we'll just crash and try
+ // again.
+ if (Option.isSome(res.failures)) {
+ throw new Error(
+ 'pickupUpdatedMappings task failed with the following failures:\n' +
+ JSON.stringify(res.failures.value)
+ );
+ } else if (Option.isSome(res.error)) {
+ throw new Error(
+ 'pickupUpdatedMappings task failed with the following error:\n' +
+ JSON.stringify(res.error.value)
+ );
+ } else {
+ return TaskEither.right('pickup_updated_mappings_succeeded' as const);
+ }
+ }
+ )
+);
+
+export type AliasAction =
+ | { remove_index: { index: string } }
+ | { remove: { index: string; alias: string; must_exist: boolean } }
+ | { add: { index: string; alias: string } };
+
+/**
+ * Calls the Update index alias API `_alias` with the provided alias actions.
+ */
+export const updateAliases = (
+ client: ElasticsearchClient,
+ aliasActions: AliasAction[]
+): TaskEither.TaskEither<
+ | { type: 'index_not_found_exception'; index: string }
+ | { type: 'alias_not_found_exception' }
+ | { type: 'remove_index_not_a_concrete_index' }
+ | RetryableEsClientError,
+ 'update_aliases_succeeded'
+> => () => {
+ return client.indices
+ .updateAliases(
+ {
+ body: {
+ actions: aliasActions,
+ },
+ },
+ { maxRetries: 0 }
+ )
+ .then(() => {
+ // Ignore `acknowledged: false`. When the coordinating node accepts
+ // the new cluster state update but not all nodes have applied the
+ // update within the timeout `acknowledged` will be false. However,
+ // retrying this update will always immediately result in `acknowledged:
+ // true` even if there are still nodes which are falling behind with
+ // cluster state updates.
+ // The only impact for using `updateAliases` to mark the version index
+ // as ready is that it could take longer for other Kibana instances to
+ // see that the version index is ready so they are more likely to
+ // perform unecessary duplicate work.
+ return Either.right('update_aliases_succeeded' as const);
+ })
+ .catch((err: EsErrors.ElasticsearchClientError) => {
+ if (err instanceof EsErrors.ResponseError) {
+ if (err.body.error.type === 'index_not_found_exception') {
+ return Either.left({
+ type: 'index_not_found_exception' as const,
+ index: err.body.error.index,
+ });
+ } else if (
+ err.body.error.type === 'illegal_argument_exception' &&
+ err.body.error.reason.match(
+ /The provided expression \[.+\] matches an alias, specify the corresponding concrete indices instead./
+ )
+ ) {
+ return Either.left({ type: 'remove_index_not_a_concrete_index' as const });
+ } else if (
+ err.body.error.type === 'aliases_not_found_exception' ||
+ (err.body.error.type === 'resource_not_found_exception' &&
+ err.body.error.reason.match(/required alias \[.+\] does not exist/))
+ ) {
+ return Either.left({
+ type: 'alias_not_found_exception' as const,
+ });
+ }
+ }
+ throw err;
+ })
+ .catch(catchRetryableEsClientErrors);
+};
+
+export interface AcknowledgeResponse {
+ acknowledged: boolean;
+ shardsAcknowledged: boolean;
+}
+
+/**
+ * Creates an index with the given mappings
+ *
+ * @remarks
+ * This method adds some additional logic to the ES create index API:
+ * - it is idempotent, if it gets called multiple times subsequent calls will
+ * wait for the first create operation to complete (up to 60s)
+ * - the first call will wait up to 120s for the cluster state and all shards
+ * to be updated.
+ */
+export const createIndex = (
+ client: ElasticsearchClient,
+ indexName: string,
+ mappings: IndexMapping,
+ aliases?: string[]
+): TaskEither.TaskEither => {
+ const createIndexTask: TaskEither.TaskEither<
+ RetryableEsClientError,
+ AcknowledgeResponse
+ > = () => {
+ const aliasesObject = (aliases ?? []).reduce((acc, alias) => {
+ acc[alias] = {};
+ return acc;
+ }, {} as Record);
+
+ return client.indices
+ .create(
+ {
+ index: indexName,
+ // wait until all shards are available before creating the index
+ // (since number_of_shards=1 this does not have any effect atm)
+ wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE,
+ // Wait up to 60s for the cluster state to update and all shards to be
+ // started
+ timeout: DEFAULT_TIMEOUT,
+ body: {
+ mappings,
+ aliases: aliasesObject,
+ settings: {
+ index: {
+ // ES rule of thumb: shards should be several GB to 10's of GB, so
+ // Kibana is unlikely to cross that limit.
+ number_of_shards: 1,
+ auto_expand_replicas: INDEX_AUTO_EXPAND_REPLICAS,
+ // Set an explicit refresh interval so that we don't inherit the
+ // value from incorrectly configured index templates (not required
+ // after we adopt system indices)
+ refresh_interval: '1s',
+ // Bump priority so that recovery happens before newer indices
+ priority: 10,
+ },
+ },
+ },
+ },
+ { maxRetries: 0 /** handle retry ourselves for now */ }
+ )
+ .then((res) => {
+ /**
+ * - acknowledged=false, we timed out before the cluster state was
+ * updated on all nodes with the newly created index, but it
+ * probably will be created sometime soon.
+ * - shards_acknowledged=false, we timed out before all shards were
+ * started
+ * - acknowledged=true, shards_acknowledged=true, index creation complete
+ */
+ return Either.right({
+ acknowledged: res.body.acknowledged,
+ shardsAcknowledged: res.body.shards_acknowledged,
+ });
+ })
+ .catch((error) => {
+ if (error.body.error.type === 'resource_already_exists_exception') {
+ /**
+ * If the target index already exists it means a previous create
+ * operation had already been started. However, we can't be sure
+ * that all shards were started so return shardsAcknowledged: false
+ */
+ return Either.right({
+ acknowledged: true,
+ shardsAcknowledged: false,
+ });
+ } else {
+ throw error;
+ }
+ })
+ .catch(catchRetryableEsClientErrors);
+ };
+
+ return pipe(
+ createIndexTask,
+ TaskEither.chain((res) => {
+ if (res.acknowledged && res.shardsAcknowledged) {
+ // If the cluster state was updated and all shards ackd we're done
+ return TaskEither.right('create_index_succeeded');
+ } else {
+ // Otherwise, wait until the target index has a 'green' status.
+ return pipe(
+ waitForIndexStatusGreen(client, indexName),
+ TaskEither.map(() => {
+ /** When the index status is 'green' we know that all shards were started */
+ return 'create_index_succeeded';
+ })
+ );
+ }
+ })
+ );
+};
+
+export interface UpdateAndPickupMappingsResponse {
+ taskId: string;
+}
+
+/**
+ * Updates an index's mappings and runs an pickupUpdatedMappings task so that the mapping
+ * changes are "picked up". Returns a taskId to track progress.
+ */
+export const updateAndPickupMappings = (
+ client: ElasticsearchClient,
+ index: string,
+ mappings: IndexMapping
+): TaskEither.TaskEither => {
+ const putMappingTask: TaskEither.TaskEither<
+ RetryableEsClientError,
+ 'update_mappings_succeeded'
+ > = () => {
+ return client.indices
+ .putMapping, IndexMapping>({
+ index,
+ timeout: DEFAULT_TIMEOUT,
+ body: mappings,
+ })
+ .then((res) => {
+ // Ignore `acknowledged: false`. When the coordinating node accepts
+ // the new cluster state update but not all nodes have applied the
+ // update within the timeout `acknowledged` will be false. However,
+ // retrying this update will always immediately result in `acknowledged:
+ // true` even if there are still nodes which are falling behind with
+ // cluster state updates.
+ // For updateAndPickupMappings this means that there is the potential
+ // that some existing document's fields won't be picked up if the node
+ // on which the Kibana shard is running has fallen behind with cluster
+ // state updates and the mapping update wasn't applied before we run
+ // `pickupUpdatedMappings`. ES tries to limit this risk by blocking
+ // index operations (including update_by_query used by
+ // updateAndPickupMappings) if there are pending mappings changes. But
+ // not all mapping changes will prevent this.
+ return Either.right('update_mappings_succeeded' as const);
+ })
+ .catch(catchRetryableEsClientErrors);
+ };
+
+ return pipe(
+ putMappingTask,
+ TaskEither.chain((res) => {
+ return pickupUpdatedMappings(client, index);
+ })
+ );
+};
+export interface SearchResponse {
+ outdatedDocuments: SavedObjectsRawDoc[];
+}
+
+/**
+ * Search for outdated saved object documents with the provided query. Will
+ * return one batch of documents. Searching should be repeated until no more
+ * outdated documents can be found.
+ */
+export const searchForOutdatedDocuments = (
+ client: ElasticsearchClient,
+ index: string,
+ query: Record
+): TaskEither.TaskEither => () => {
+ return client
+ .search<{
+ // when `filter_path` is specified, ES doesn't return empty arrays, so if
+ // there are no search results res.body.hits will be undefined.
+ hits?: {
+ hits?: SavedObjectsRawDoc[];
+ };
+ }>({
+ index,
+ // Optimize search performance by sorting by the "natural" index order
+ sort: ['_doc'],
+ // Return the _seq_no and _primary_term so we can use optimistic
+ // concurrency control for updates
+ seq_no_primary_term: true,
+ size: BATCH_SIZE,
+ body: {
+ query,
+ },
+ // Return an error when targeting missing or closed indices
+ allow_no_indices: false,
+ // Don't return partial results if timeouts or shard failures are
+ // encountered. This is important because 0 search hits is interpreted as
+ // there being no more outdated documents left that require
+ // transformation. Although the default is `false`, we set this
+ // explicitly to avoid users overriding the
+ // search.default_allow_partial_results cluster setting to true.
+ allow_partial_search_results: false,
+ // Improve performance by not calculating the total number of hits
+ // matching the query.
+ track_total_hits: false,
+ // Reduce the response payload size by only returning the data we care about
+ filter_path: [
+ 'hits.hits._id',
+ 'hits.hits._source',
+ 'hits.hits._seq_no',
+ 'hits.hits._primary_term',
+ ],
+ })
+ .then((res) => Either.right({ outdatedDocuments: res.body.hits?.hits ?? [] }))
+ .catch(catchRetryableEsClientErrors);
+};
+
+/**
+ * Write the up-to-date transformed documents to the index, overwriting any
+ * documents that are still on their outdated version.
+ */
+export const bulkOverwriteTransformedDocuments = (
+ client: ElasticsearchClient,
+ index: string,
+ transformedDocs: SavedObjectsRawDoc[]
+): TaskEither.TaskEither => () => {
+ return client
+ .bulk<{
+ took: number;
+ errors: boolean;
+ items: [
+ {
+ index: {
+ _id: string;
+ status: number;
+ // the filter_path ensures that only items with errors are returned
+ error: { type: string; reason: string };
+ };
+ }
+ ];
+ }>({
+ // Because we only add aliases in the MARK_VERSION_INDEX_READY step we
+ // can't bulkIndex to an alias with require_alias=true. This means if
+ // users tamper during this operation (delete indices or restore a
+ // snapshot), we could end up auto-creating an index without the correct
+ // mappings. Such tampering could lead to many other problems and is
+ // probably unlikely so for now we'll accept this risk and wait till
+ // system indices puts in place a hard control.
+ require_alias: false,
+ wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE,
+ // Wait for a refresh to happen before returning. This ensures that when
+ // this Kibana instance searches for outdated documents, it won't find
+ // documents that were already transformed by itself or another Kibna
+ // instance. However, this causes each OUTDATED_DOCUMENTS_SEARCH ->
+ // OUTDATED_DOCUMENTS_TRANSFORM cycle to take 1s so when batches are
+ // small performance will become a lot worse.
+ // The alternative is to use a search_after with either a tie_breaker
+ // field or using a Point In Time as a cursor to go through all documents.
+ refresh: 'wait_for',
+ filter_path: ['items.*.error'],
+ body: transformedDocs.flatMap((doc) => {
+ return [
+ {
+ index: {
+ _index: index,
+ _id: doc._id,
+ // overwrite existing documents
+ op_type: 'index',
+ // use optimistic concurrency control to ensure that outdated
+ // documents are only overwritten once with the latest version
+ if_seq_no: doc._seq_no,
+ if_primary_term: doc._primary_term,
+ },
+ },
+ doc._source,
+ ];
+ }),
+ })
+ .then((res) => {
+ // Filter out version_conflict_engine_exception since these just mean
+ // that another instance already updated these documents
+ const errors = (res.body.items ?? []).filter(
+ (item) => item.index.error.type !== 'version_conflict_engine_exception'
+ );
+ if (errors.length === 0) {
+ return Either.right('bulk_index_succeeded' as const);
+ } else {
+ throw new Error(JSON.stringify(errors));
+ }
+ })
+ .catch(catchRetryableEsClientErrors);
+};
diff --git a/src/core/server/saved_objects/migrationsv2/index.ts b/src/core/server/saved_objects/migrationsv2/index.ts
new file mode 100644
index 0000000000000..87a29e7047ac8
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/index.ts
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { ElasticsearchClient } from '../../elasticsearch';
+import { IndexMapping } from '../mappings';
+import { Logger } from '../../logging';
+import { SavedObjectsMigrationVersion } from '../types';
+import { MigrationResult } from '../migrations/core';
+import { next, TransformRawDocs } from './next';
+import { createInitialState, model } from './model';
+import { migrationStateActionMachine } from './migrations_state_action_machine';
+
+/**
+ * Migrates the provided indexPrefix index using a resilient algorithm that is
+ * completely lock-free so that any failure can always be retried by
+ * restarting Kibana.
+ */
+export async function runResilientMigrator({
+ client,
+ kibanaVersion,
+ targetMappings,
+ logger,
+ preMigrationScript,
+ transformRawDocs,
+ migrationVersionPerType,
+ indexPrefix,
+}: {
+ client: ElasticsearchClient;
+ kibanaVersion: string;
+ targetMappings: IndexMapping;
+ preMigrationScript?: string;
+ logger: Logger;
+ transformRawDocs: TransformRawDocs;
+ migrationVersionPerType: SavedObjectsMigrationVersion;
+ indexPrefix: string;
+}): Promise {
+ const initialState = createInitialState({
+ kibanaVersion,
+ targetMappings,
+ preMigrationScript,
+ migrationVersionPerType,
+ indexPrefix,
+ });
+ return migrationStateActionMachine({
+ initialState,
+ logger,
+ next: next(client, transformRawDocs),
+ model,
+ });
+}
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/.gitignore b/src/core/server/saved_objects/migrationsv2/integration_tests/.gitignore
new file mode 100644
index 0000000000000..57208badcc680
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/integration_tests/.gitignore
@@ -0,0 +1 @@
+migration_test_kibana.log
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/actions.test.ts b/src/core/server/saved_objects/migrationsv2/integration_tests/actions.test.ts
new file mode 100644
index 0000000000000..8947a5ec2171c
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/integration_tests/actions.test.ts
@@ -0,0 +1,970 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { ElasticsearchClient } from '../../../';
+import { InternalCoreStart } from '../../../internal_types';
+import * as kbnTestServer from '../../../../test_helpers/kbn_server';
+import { Root } from '../../../root';
+import { SavedObjectsRawDoc } from '../../serialization';
+import {
+ bulkOverwriteTransformedDocuments,
+ cloneIndex,
+ createIndex,
+ fetchIndices,
+ reindex,
+ searchForOutdatedDocuments,
+ SearchResponse,
+ setWriteBlock,
+ updateAliases,
+ waitForReindexTask,
+ ReindexResponse,
+ waitForPickupUpdatedMappingsTask,
+ pickupUpdatedMappings,
+ UpdateByQueryResponse,
+ updateAndPickupMappings,
+ UpdateAndPickupMappingsResponse,
+ verifyReindex,
+ removeWriteBlock,
+} from '../actions';
+import * as Either from 'fp-ts/lib/Either';
+import * as Option from 'fp-ts/lib/Option';
+
+const { startES } = kbnTestServer.createTestServers({
+ adjustTimeout: (t: number) => jest.setTimeout(t),
+});
+let esServer: kbnTestServer.TestElasticsearchUtils;
+
+describe('migration actions', () => {
+ let root: Root;
+ let start: InternalCoreStart;
+ let client: ElasticsearchClient;
+
+ beforeAll(async () => {
+ esServer = await startES();
+ root = kbnTestServer.createRootWithCorePlugins({
+ server: {
+ basePath: '/hello',
+ },
+ });
+
+ await root.setup();
+ start = await root.start();
+ client = start.elasticsearch.client.asInternalUser;
+
+ // Create test fixture data:
+ await createIndex(client, 'existing_index_with_docs', {
+ dynamic: true as any,
+ properties: {},
+ })();
+ const sourceDocs = ([
+ { _source: { title: 'doc 1' } },
+ { _source: { title: 'doc 2' } },
+ { _source: { title: 'doc 3' } },
+ { _source: { title: 'saved object 4' } },
+ ] as unknown) as SavedObjectsRawDoc[];
+ await bulkOverwriteTransformedDocuments(client, 'existing_index_with_docs', sourceDocs)();
+
+ await createIndex(client, 'existing_index_2', { properties: {} })();
+ await createIndex(client, 'existing_index_with_write_block', { properties: {} })();
+ await bulkOverwriteTransformedDocuments(
+ client,
+ 'existing_index_with_write_block',
+ sourceDocs
+ )();
+ await setWriteBlock(client, 'existing_index_with_write_block')();
+ await updateAliases(client, [
+ { add: { index: 'existing_index_2', alias: 'existing_index_2_alias' } },
+ ])();
+ });
+
+ afterAll(async () => {
+ await esServer.stop();
+ await root.shutdown();
+ });
+
+ describe('fetchIndices', () => {
+ it('resolves right empty record if no indices were found', async () => {
+ const task = fetchIndices(client, ['no_such_index']);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": Object {},
+ }
+ `);
+ });
+ it('resolves right record with found indices', async () => {
+ const res = (await fetchIndices(client, [
+ 'no_such_index',
+ 'existing_index_with_docs',
+ ])()) as Either.Right;
+
+ return expect(res.right).toEqual(
+ expect.objectContaining({
+ existing_index_with_docs: {
+ aliases: {},
+ mappings: expect.anything(),
+ settings: expect.anything(),
+ },
+ })
+ );
+ });
+ });
+
+ describe('setWriteBlock', () => {
+ beforeAll(async () => {
+ await createIndex(client, 'new_index_without_write_block', { properties: {} })();
+ });
+ it('resolves right when setting the write block succeeds', async () => {
+ const task = setWriteBlock(client, 'new_index_without_write_block');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "set_write_block_succeeded",
+ }
+ `);
+ });
+ it('resolves right when setting a write block on an index that already has one', () => {
+ const task = setWriteBlock(client, 'existing_index_with_write_block');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "set_write_block_succeeded",
+ }
+ `);
+ });
+ it('once resolved, prevents further writes to the index', async () => {
+ const task = setWriteBlock(client, 'new_index_without_write_block');
+ await task();
+ const sourceDocs = ([
+ { _source: { title: 'doc 1' } },
+ { _source: { title: 'doc 2' } },
+ { _source: { title: 'doc 3' } },
+ { _source: { title: 'doc 4' } },
+ ] as unknown) as SavedObjectsRawDoc[];
+ return expect(
+ bulkOverwriteTransformedDocuments(client, 'new_index_without_write_block', sourceDocs)()
+ ).rejects.toMatchObject(expect.anything());
+ });
+ it('resolves left index_not_found_exception when the index does not exist', () => {
+ const task = setWriteBlock(client, 'no_such_index');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ });
+
+ describe('removeWriteBlock', () => {
+ beforeAll(async () => {
+ await createIndex(client, 'existing_index_without_write_block_2', { properties: {} })();
+ await createIndex(client, 'existing_index_with_write_block_2', { properties: {} })();
+ await setWriteBlock(client, 'existing_index_with_write_block_2')();
+ });
+ it('resolves right if successful when an index already has a write block', () => {
+ const task = removeWriteBlock(client, 'existing_index_with_write_block_2');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "remove_write_block_succeeded",
+ }
+ `);
+ });
+ it('resolves right if successful when an index does not have a write block', () => {
+ const task = removeWriteBlock(client, 'existing_index_without_write_block_2');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "remove_write_block_succeeded",
+ }
+ `);
+ });
+ it('rejects if there is a non-retryable error', () => {
+ const task = removeWriteBlock(client, 'no_such_index');
+ return expect(task()).rejects.toMatchInlineSnapshot(
+ `[ResponseError: index_not_found_exception]`
+ );
+ });
+ });
+
+ describe('cloneIndex', () => {
+ afterEach(async () => {
+ try {
+ await client.indices.delete({ index: 'yellow_then_green_index' });
+ } catch (e) {
+ /** ignore */
+ }
+ });
+ it('resolves right if cloning into a new target index', () => {
+ const task = cloneIndex(client, 'existing_index_with_write_block', 'yellow_then_green_index');
+ expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": Object {
+ "acknowledged": true,
+ "shardsAcknowledged": true,
+ },
+ }
+ `);
+ });
+ it('resolves right after waiting for index status to be green if clone target already existed', async () => {
+ // Create a yellow index
+ await client.indices.create({
+ index: 'yellow_then_green_index',
+ body: {
+ mappings: { properties: {} },
+ settings: {
+ // Allocate 1 replica so that this index stays yellow
+ number_of_replicas: '1',
+ },
+ },
+ });
+
+ // Call clone even though the index already exists
+ const cloneIndexPromise = cloneIndex(
+ client,
+ 'existing_index_with_write_block',
+ 'yellow_then_green_index'
+ )();
+ let indexGreen = false;
+
+ setTimeout(() => {
+ client.indices.putSettings({
+ body: {
+ index: {
+ number_of_replicas: 0,
+ },
+ },
+ });
+ indexGreen = true;
+ }, 10);
+
+ return cloneIndexPromise.then((res) => {
+ // Assert that the promise didn't resolve before the index became green
+ expect(indexGreen).toBe(true);
+ expect(res).toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": Object {
+ "acknowledged": true,
+ "shardsAcknowledged": true,
+ },
+ }
+ `);
+ });
+ });
+ it('resolves left index_not_found_exception if the source index does not exist', () => {
+ const task = cloneIndex(client, 'no_such_index', 'yellow_then_green_index');
+ expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "index": "no_such_index",
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ });
+
+ // Reindex doesn't return any errors on it's own, so we have to test
+ // together with waitForReindexTask
+ describe('reindex & waitForReindexTask', () => {
+ it('resolves right when reindex succeeds without reindex script', async () => {
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target',
+ Option.none,
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "reindex_succeeded",
+ }
+ `);
+
+ const results = ((await searchForOutdatedDocuments(
+ client,
+ 'reindex_target',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(results.map((doc) => doc._source.title)).toMatchInlineSnapshot(`
+ Array [
+ "doc 1",
+ "doc 2",
+ "doc 3",
+ "saved object 4",
+ ]
+ `);
+ });
+ it('resolves right when reindex succeeds with reindex script', async () => {
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_2',
+ Option.some(`ctx._source.title = ctx._source.title + '_updated'`),
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "reindex_succeeded",
+ }
+ `);
+ const results = ((await searchForOutdatedDocuments(
+ client,
+ 'reindex_target_2',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(results.map((doc) => doc._source.title)).toMatchInlineSnapshot(`
+ Array [
+ "doc 1_updated",
+ "doc 2_updated",
+ "doc 3_updated",
+ "saved object 4_updated",
+ ]
+ `);
+ });
+ it('resolves right, ignores version conflicts and does not update existing docs when reindex multiple times', async () => {
+ // Reindex with a script
+ let res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_3',
+ Option.some(`ctx._source.title = ctx._source.title + '_updated'`),
+ false
+ )()) as Either.Right;
+ let task = waitForReindexTask(client, res.right.taskId, '10s');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "reindex_succeeded",
+ }
+ `);
+
+ // reindex without a script
+ res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_3',
+ Option.none,
+ false
+ )()) as Either.Right;
+ task = waitForReindexTask(client, res.right.taskId, '10s');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "reindex_succeeded",
+ }
+ `);
+
+ // Assert that documents weren't overrided by the second, unscripted reindex
+ const results = ((await searchForOutdatedDocuments(
+ client,
+ 'reindex_target_3',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(results.map((doc) => doc._source.title)).toMatchInlineSnapshot(`
+ Array [
+ "doc 1_updated",
+ "doc 2_updated",
+ "doc 3_updated",
+ "saved object 4_updated",
+ ]
+ `);
+ });
+ it('resolves right and proceeds to add missing documents if there are some existing docs conflicts', async () => {
+ // Simulate a reindex that only adds some of the documents from the
+ // source index into the target index
+ await createIndex(client, 'reindex_target_4', { properties: {} })();
+ const sourceDocs = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_with_docs',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments
+ .slice(0, 2)
+ .map(({ _id, _source }) => ({
+ _id,
+ _source,
+ }));
+ await bulkOverwriteTransformedDocuments(client, 'reindex_target_4', sourceDocs)();
+
+ // Now do a real reindex
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_4',
+ Option.some(`ctx._source.title = ctx._source.title + '_updated'`),
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "reindex_succeeded",
+ }
+ `);
+ // Assert that existing documents weren't overrided, but that missing
+ // documents were added by the reindex
+ const results = ((await searchForOutdatedDocuments(
+ client,
+ 'reindex_target_4',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(results.map((doc) => doc._source.title)).toMatchInlineSnapshot(`
+ Array [
+ "doc 1",
+ "doc 2",
+ "doc 3_updated",
+ "saved object 4_updated",
+ ]
+ `);
+ });
+ it('resolves left incompatible_mapping_exception if all reindex failures are due to a strict_dynamic_mapping_exception', async () => {
+ // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS
+ // step which makes the mappings incompatible with outdated documents.
+ // If another instance then tries a reindex it will get a
+ // strict_dynamic_mapping_exception even if the documents already exist
+ // and should ignore this error.
+
+ // Create an index with incompatible mappings
+ await createIndex(client, 'reindex_target_5', {
+ dynamic: 'strict',
+ properties: {
+ /** no title field */
+ },
+ })();
+
+ const {
+ right: { taskId: reindexTaskId },
+ } = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_5',
+ Option.none,
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, reindexTaskId, '10s');
+
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "incompatible_mapping_exception",
+ },
+ }
+ `);
+ });
+ it('resolves left incompatible_mapping_exception if all reindex failures are due to a mapper_parsing_exception', async () => {
+ // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS
+ // step which makes the mappings incompatible with outdated documents.
+ // If another instance then tries a reindex it will get a
+ // strict_dynamic_mapping_exception even if the documents already exist
+ // and should ignore this error.
+
+ // Create an index with incompatible mappings
+ await createIndex(client, 'reindex_target_6', {
+ dynamic: 'false',
+ properties: { title: { type: 'integer' } }, // integer is incompatible with string title
+ })();
+
+ const {
+ right: { taskId: reindexTaskId },
+ } = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_6',
+ Option.none,
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, reindexTaskId, '10s');
+
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "incompatible_mapping_exception",
+ },
+ }
+ `);
+ });
+ it('resolves left index_not_found_exception if source index does not exist', async () => {
+ const res = (await reindex(
+ client,
+ 'no_such_index',
+ 'reindex_target',
+ Option.none,
+ false
+ )()) as Either.Right;
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "index": "no_such_index",
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ it('resolves left target_index_had_write_block if all failures are due to a write block', async () => {
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'existing_index_with_write_block',
+ Option.none,
+ false
+ )()) as Either.Right;
+
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "target_index_had_write_block",
+ },
+ }
+ `);
+ });
+ it('resolves left if requireAlias=true and the target is not an alias', async () => {
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'existing_index_with_write_block',
+ Option.none,
+ true
+ )()) as Either.Right;
+
+ const task = waitForReindexTask(client, res.right.taskId, '10s');
+
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "index": "existing_index_with_write_block",
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ });
+
+ describe('verifyReindex', () => {
+ it('resolves right if source and target indices have the same amount of documents', async () => {
+ const res = (await reindex(
+ client,
+ 'existing_index_with_docs',
+ 'reindex_target_7',
+ Option.none,
+ false
+ )()) as Either.Right;
+ await waitForReindexTask(client, res.right.taskId, '10s')();
+
+ const task = verifyReindex(client, 'existing_index_with_docs', 'reindex_target_7');
+ await expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "verify_reindex_succeeded",
+ }
+ `);
+ });
+ it('resolves left if source and target indices have different amount of documents', () => {
+ const task = verifyReindex(client, 'existing_index_with_docs', 'existing_index_2');
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "verify_reindex_failed",
+ },
+ }
+ `);
+ });
+ it('rejects if source or target index does not exist', async () => {
+ let task = verifyReindex(client, 'no_such_index', 'existing_index_2');
+ await expect(task()).rejects.toMatchInlineSnapshot(
+ `[ResponseError: index_not_found_exception]`
+ );
+
+ task = verifyReindex(client, 'existing_index_2', 'no_such_index');
+ await expect(task()).rejects.toMatchInlineSnapshot(
+ `[ResponseError: index_not_found_exception]`
+ );
+ });
+ });
+
+ describe('searchForOutdatedDocuments', () => {
+ it('only returns documents that match the outdatedDocumentsQuery', async () => {
+ const resultsWithQuery = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_with_docs',
+ {
+ match: { title: { query: 'doc' } },
+ }
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(resultsWithQuery.length).toBe(3);
+
+ const resultsWithoutQuery = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_with_docs',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(resultsWithoutQuery.length).toBe(4);
+ });
+ it('resolves with _id, _source, _seq_no and _primary_term', async () => {
+ const results = ((await searchForOutdatedDocuments(client, 'existing_index_with_docs', {
+ match: { title: { query: 'doc' } },
+ })()) as Either.Right).right.outdatedDocuments;
+ expect(results).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({
+ _id: expect.any(String),
+ _seq_no: expect.any(Number),
+ _primary_term: expect.any(Number),
+ _source: expect.any(Object),
+ }),
+ ])
+ );
+ });
+ // I haven't been able to find a way to reproduce a partial search result
+ // it.todo('rejects if only partial search results can be obtained');
+ });
+
+ describe('waitForPickupUpdatedMappingsTask', () => {
+ it('rejects if there are failures', async () => {
+ const res = (await pickupUpdatedMappings(
+ client,
+ 'existing_index_with_write_block'
+ )()) as Either.Right;
+
+ const task = waitForPickupUpdatedMappingsTask(client, res.right.taskId, '10s');
+
+ // We can't do a snapshot match because the response includes an index
+ // id which ES assigns dynamically
+ return expect(task()).rejects.toMatchObject({
+ message: /pickupUpdatedMappings task failed with the following failures:\n\[\{\"index\":\"existing_index_with_write_block\"/,
+ });
+ });
+ it('rejects if there is an error', async () => {
+ const res = (await pickupUpdatedMappings(
+ client,
+ 'no_such_index'
+ )()) as Either.Right;
+
+ const task = waitForPickupUpdatedMappingsTask(client, res.right.taskId, '10s');
+
+ return expect(task()).rejects.toMatchInlineSnapshot(`
+ [Error: pickupUpdatedMappings task failed with the following error:
+ {"type":"index_not_found_exception","reason":"no such index [no_such_index]","resource.type":"index_or_alias","resource.id":"no_such_index","index_uuid":"_na_","index":"no_such_index"}]
+ `);
+ });
+ it('resolves right when successful', async () => {
+ const res = (await pickupUpdatedMappings(
+ client,
+ 'existing_index_with_docs'
+ )()) as Either.Right;
+
+ const task = waitForPickupUpdatedMappingsTask(client, res.right.taskId, '10s');
+
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "pickup_updated_mappings_succeeded",
+ }
+ `);
+ });
+ });
+
+ describe('updateAndPickupMappings', () => {
+ it('resolves right when mappings were updated and picked up', async () => {
+ // Create an index without any mappings and insert documents into it
+ await createIndex(client, 'existing_index_without_mappings', {
+ dynamic: false as any,
+ properties: {},
+ })();
+ const sourceDocs = ([
+ { _source: { title: 'doc 1' } },
+ { _source: { title: 'doc 2' } },
+ { _source: { title: 'doc 3' } },
+ { _source: { title: 'doc 4' } },
+ ] as unknown) as SavedObjectsRawDoc[];
+ await bulkOverwriteTransformedDocuments(
+ client,
+ 'existing_index_without_mappings',
+ sourceDocs
+ )();
+
+ // Assert that we can't search over the unmapped fields of the document
+ const originalSearchResults = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_without_mappings',
+ { match: { title: { query: 'doc' } } }
+ )()) as Either.Right).right.outdatedDocuments;
+ expect(originalSearchResults.length).toBe(0);
+
+ // Update and pickup mappings so that the title field is searchable
+ const res = await updateAndPickupMappings(client, 'existing_index_without_mappings', {
+ properties: {
+ title: { type: 'text' },
+ },
+ })();
+ expect(Either.isRight(res)).toBe(true);
+ const taskId = (res as Either.Right).right.taskId;
+ await waitForPickupUpdatedMappingsTask(client, taskId, '60s')();
+
+ // Repeat the search expecting to be able to find the existing documents
+ const pickedUpSearchResults = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_without_mappings',
+ { match: { title: { query: 'doc' } } }
+ )()) as Either.Right).right.outdatedDocuments;
+ return expect(pickedUpSearchResults.length).toBe(4);
+ });
+ });
+
+ describe('updateAliases', () => {
+ describe('remove', () => {
+ it('resolves left index_not_found_exception when the index does not exist', () => {
+ const task = updateAliases(client, [
+ {
+ remove: {
+ alias: 'no_such_alias',
+ index: 'no_such_index',
+ must_exist: false,
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "index": "no_such_index",
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ describe('with must_exist=false', () => {
+ it('resolves left alias_not_found_exception when alias does not exist', async () => {
+ const task = updateAliases(client, [
+ {
+ remove: {
+ alias: 'no_such_alias',
+ index: 'existing_index_with_docs',
+ must_exist: false,
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "alias_not_found_exception",
+ },
+ }
+ `);
+ });
+ });
+ describe('with must_exist=true', () => {
+ it('resolves left alias_not_found_exception when alias does not exist on specified index', async () => {
+ const task = updateAliases(client, [
+ {
+ remove: {
+ alias: 'existing_index_2_alias',
+ index: 'existing_index_with_docs',
+ must_exist: true,
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "alias_not_found_exception",
+ },
+ }
+ `);
+ });
+ it('resolves left alias_not_found_exception when alias does not exist', async () => {
+ const task = updateAliases(client, [
+ {
+ remove: {
+ alias: 'no_such_alias',
+ index: 'existing_index_with_docs',
+ must_exist: true,
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "alias_not_found_exception",
+ },
+ }
+ `);
+ });
+ });
+ });
+ describe('remove_index', () => {
+ it('left index_not_found_exception if index does not exist', () => {
+ const task = updateAliases(client, [
+ {
+ remove_index: {
+ index: 'no_such_index',
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "index": "no_such_index",
+ "type": "index_not_found_exception",
+ },
+ }
+ `);
+ });
+ it('left remove_index_not_a_concrete_index when remove_index targets an alias', () => {
+ const task = updateAliases(client, [
+ {
+ remove_index: {
+ index: 'existing_index_2_alias',
+ },
+ },
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Left",
+ "left": Object {
+ "type": "remove_index_not_a_concrete_index",
+ },
+ }
+ `);
+ });
+ });
+ });
+
+ describe('createIndex', () => {
+ afterAll(async () => {
+ await client.indices.delete({ index: 'yellow_then_green_index' });
+ });
+ it('resolves right after waiting for an index status to be green if the index already existed', async () => {
+ // Create a yellow index
+ await client.indices.create(
+ {
+ index: 'yellow_then_green_index',
+ body: {
+ mappings: { properties: {} },
+ settings: {
+ // Allocate 1 replica so that this index stays yellow
+ number_of_replicas: '1',
+ },
+ },
+ },
+ { maxRetries: 0 /** handle retry ourselves for now */ }
+ );
+
+ // Call createIndex even though the index already exists
+ const createIndexPromise = createIndex(client, 'yellow_then_green_index', undefined as any)();
+ let indexGreen = false;
+
+ setTimeout(() => {
+ client.indices.putSettings({
+ body: {
+ index: {
+ number_of_replicas: 0,
+ },
+ },
+ });
+ indexGreen = true;
+ }, 10);
+
+ return createIndexPromise.then((res) => {
+ // Assert that the promise didn't resolve before the index became green
+ expect(indexGreen).toBe(true);
+ expect(res).toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "create_index_succeeded",
+ }
+ `);
+ });
+ });
+ it('rejects when there is an unexpected error creating the index', () => {
+ // Creating an index with the same name as an existing alias to induce
+ // failure
+ expect(
+ createIndex(client, 'existing_index_2_alias', undefined as any)()
+ ).rejects.toMatchInlineSnapshot(`[ResponseError: invalid_index_name_exception]`);
+ });
+ });
+
+ describe('bulkOverwriteTransformedDocuments', () => {
+ it('resolves right when documents do not yet exist in the index', () => {
+ const newDocs = ([
+ { _source: { title: 'doc 5' } },
+ { _source: { title: 'doc 6' } },
+ { _source: { title: 'doc 7' } },
+ ] as unknown) as SavedObjectsRawDoc[];
+ const task = bulkOverwriteTransformedDocuments(client, 'existing_index_with_docs', newDocs);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "bulk_index_succeeded",
+ }
+ `);
+ });
+ it('resolves right even if there were some version_conflict_engine_exception', async () => {
+ const existingDocs = ((await searchForOutdatedDocuments(
+ client,
+ 'existing_index_with_docs',
+ undefined as any
+ )()) as Either.Right).right.outdatedDocuments;
+
+ const task = bulkOverwriteTransformedDocuments(client, 'existing_index_with_docs', [
+ ...existingDocs,
+ ({ _source: { title: 'doc 8' } } as unknown) as SavedObjectsRawDoc,
+ ]);
+ return expect(task()).resolves.toMatchInlineSnapshot(`
+ Object {
+ "_tag": "Right",
+ "right": "bulk_index_succeeded",
+ }
+ `);
+ });
+ it('rejects if there are errors', () => {
+ const newDocs = ([
+ { _source: { title: 'doc 5' } },
+ { _source: { title: 'doc 6' } },
+ { _source: { title: 'doc 7' } },
+ ] as unknown) as SavedObjectsRawDoc[];
+ return expect(
+ bulkOverwriteTransformedDocuments(client, 'existing_index_with_write_block', newDocs)()
+ ).rejects.toMatchObject(expect.anything());
+ });
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.3.0_xpack_sample_saved_objects.zip b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.3.0_xpack_sample_saved_objects.zip
new file mode 100644
index 0000000000000..5745a3e07d488
Binary files /dev/null and b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.3.0_xpack_sample_saved_objects.zip differ
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.7.2_xpack_100k_obj.zip b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.7.2_xpack_100k_obj.zip
new file mode 100644
index 0000000000000..13afaa04b06f9
Binary files /dev/null and b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.7.2_xpack_100k_obj.zip differ
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/archives/8.0.0_oss_sample_saved_objects.zip b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/8.0.0_oss_sample_saved_objects.zip
new file mode 100644
index 0000000000000..abb8dd2b6d491
Binary files /dev/null and b/src/core/server/saved_objects/migrationsv2/integration_tests/archives/8.0.0_oss_sample_saved_objects.zip differ
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/migration.test.ts b/src/core/server/saved_objects/migrationsv2/integration_tests/migration.test.ts
new file mode 100644
index 0000000000000..942021fd1918d
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/integration_tests/migration.test.ts
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { join } from 'path';
+import Semver from 'semver';
+import { REPO_ROOT } from '@kbn/dev-utils';
+import { Env } from '@kbn/config';
+import { getEnvOptions } from '@kbn/config/target/mocks';
+import * as kbnTestServer from '../../../../test_helpers/kbn_server';
+import { ElasticsearchClient } from '../../../elasticsearch';
+import { SavedObjectsRawDoc } from '../../serialization';
+import { InternalCoreStart } from '../../../internal_types';
+import { Root } from '../../../root';
+
+const kibanaVersion = Env.createDefault(REPO_ROOT, getEnvOptions()).packageInfo.version;
+
+describe('migration v2', () => {
+ let esServer: kbnTestServer.TestElasticsearchUtils;
+ let root: Root;
+ let coreStart: InternalCoreStart;
+ let esClient: ElasticsearchClient;
+
+ const startServers = async ({ dataArchive, oss }: { dataArchive: string; oss: boolean }) => {
+ const { startES } = kbnTestServer.createTestServers({
+ adjustTimeout: (t: number) => jest.setTimeout(t),
+ settings: {
+ es: {
+ license: oss ? 'oss' : 'trial',
+ dataArchive,
+ },
+ },
+ });
+
+ root = kbnTestServer.createRootWithCorePlugins(
+ {
+ migrations: {
+ skip: false,
+ enableV2: true,
+ },
+ logging: {
+ appenders: {
+ file: {
+ kind: 'file',
+ path: join(__dirname, 'migration_test_kibana.log'),
+ layout: {
+ kind: 'json',
+ },
+ },
+ },
+ loggers: [
+ {
+ context: 'root',
+ appenders: ['file'],
+ },
+ ],
+ },
+ },
+ {
+ oss,
+ }
+ );
+
+ const startEsPromise = startES().then((es) => (esServer = es));
+ const startKibanaPromise = root
+ .setup()
+ .then(() => root.start())
+ .then((start) => {
+ coreStart = start;
+ esClient = coreStart.elasticsearch.client.asInternalUser;
+ });
+
+ await Promise.all([startEsPromise, startKibanaPromise]);
+ };
+
+ const getExpectedVersionPerType = () =>
+ coreStart.savedObjects
+ .getTypeRegistry()
+ .getAllTypes()
+ .reduce((versionMap, type) => {
+ if (type.migrations) {
+ const highestVersion = Object.keys(type.migrations).sort(Semver.compare).reverse()[0];
+ return {
+ ...versionMap,
+ [type.name]: highestVersion,
+ };
+ } else {
+ return {
+ ...versionMap,
+ [type.name]: undefined,
+ };
+ }
+ }, {} as Record);
+
+ const assertMigrationVersion = (
+ doc: SavedObjectsRawDoc,
+ expectedVersions: Record
+ ) => {
+ const migrationVersions = doc._source.migrationVersion;
+ const type = doc._source.type;
+ expect(migrationVersions ? migrationVersions[type] : undefined).toEqual(expectedVersions[type]);
+ };
+
+ const stopServers = async () => {
+ if (root) {
+ await root.shutdown();
+ }
+ if (esServer) {
+ await esServer.stop();
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, 10000));
+ };
+
+ describe('migrating from 7.3.0-xpack version', () => {
+ const migratedIndex = `.kibana_${kibanaVersion}_001`;
+
+ beforeAll(async () => {
+ await startServers({
+ oss: false,
+ dataArchive: join(__dirname, 'archives', '7.3.0_xpack_sample_saved_objects.zip'),
+ });
+ });
+
+ afterAll(async () => {
+ await stopServers();
+ });
+
+ it('creates the new index and the correct aliases', async () => {
+ const { body } = await esClient.indices.get(
+ {
+ index: migratedIndex,
+ },
+ { ignore: [404] }
+ );
+
+ const response = body[migratedIndex];
+
+ expect(response).toBeDefined();
+ expect(Object.keys(response.aliases).sort()).toEqual(['.kibana', `.kibana_${kibanaVersion}`]);
+ });
+
+ it('copies all the document of the previous index to the new one', async () => {
+ const migratedIndexResponse = await esClient.count({
+ index: migratedIndex,
+ });
+ const oldIndexResponse = await esClient.count({
+ index: '.kibana_1',
+ });
+
+ // Use a >= comparison since once Kibana has started it might create new
+ // documents like telemetry tasks
+ expect(migratedIndexResponse.body.count).toBeGreaterThanOrEqual(oldIndexResponse.body.count);
+ });
+
+ it('migrates the documents to the highest version', async () => {
+ const expectedVersions = getExpectedVersionPerType();
+ const res = await esClient.search({
+ index: migratedIndex,
+ sort: ['_doc'],
+ size: 10000,
+ });
+ const allDocuments = res.body.hits.hits as SavedObjectsRawDoc[];
+ allDocuments.forEach((doc) => {
+ assertMigrationVersion(doc, expectedVersions);
+ });
+ });
+ });
+
+ describe('migrating from the same Kibana version', () => {
+ const migratedIndex = `.kibana_${kibanaVersion}_001`;
+
+ beforeAll(async () => {
+ await startServers({
+ oss: true,
+ dataArchive: join(__dirname, 'archives', '8.0.0_oss_sample_saved_objects.zip'),
+ });
+ });
+
+ afterAll(async () => {
+ await stopServers();
+ });
+
+ it('creates the new index and the correct aliases', async () => {
+ const { body } = await esClient.indices.get(
+ {
+ index: migratedIndex,
+ },
+ { ignore: [404] }
+ );
+
+ const response = body[migratedIndex];
+
+ expect(response).toBeDefined();
+ expect(Object.keys(response.aliases).sort()).toEqual(['.kibana', `.kibana_${kibanaVersion}`]);
+ });
+
+ it('copies all the document of the previous index to the new one', async () => {
+ const migratedIndexResponse = await esClient.count({
+ index: migratedIndex,
+ });
+ const oldIndexResponse = await esClient.count({
+ index: '.kibana_1',
+ });
+
+ // Use a >= comparison since once Kibana has started it might create new
+ // documents like telemetry tasks
+ expect(migratedIndexResponse.body.count).toBeGreaterThanOrEqual(oldIndexResponse.body.count);
+ });
+
+ it('migrates the documents to the highest version', async () => {
+ const expectedVersions = getExpectedVersionPerType();
+ const res = await esClient.search({
+ index: migratedIndex,
+ sort: ['_doc'],
+ size: 10000,
+ });
+ const allDocuments = res.body.hits.hits as SavedObjectsRawDoc[];
+ allDocuments.forEach((doc) => {
+ assertMigrationVersion(doc, expectedVersions);
+ });
+ });
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/migration_7.7.2_xpack_100k.test.ts b/src/core/server/saved_objects/migrationsv2/integration_tests/migration_7.7.2_xpack_100k.test.ts
new file mode 100644
index 0000000000000..0dc0825b16b8e
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/integration_tests/migration_7.7.2_xpack_100k.test.ts
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { join } from 'path';
+import { REPO_ROOT } from '@kbn/dev-utils';
+import { Env } from '@kbn/config';
+import { getEnvOptions } from '@kbn/config/target/mocks';
+import * as kbnTestServer from '../../../../test_helpers/kbn_server';
+import { ElasticsearchClient } from '../../../elasticsearch';
+import { InternalCoreStart } from '../../../internal_types';
+import { Root } from '../../../root';
+
+const kibanaVersion = Env.createDefault(REPO_ROOT, getEnvOptions()).packageInfo.version;
+
+describe.skip('migration from 7.7.2-xpack with 100k objects', () => {
+ let esServer: kbnTestServer.TestElasticsearchUtils;
+ let root: Root;
+ let coreStart: InternalCoreStart;
+ let esClient: ElasticsearchClient;
+
+ beforeEach(() => {
+ jest.setTimeout(600000);
+ });
+
+ const startServers = async ({ dataArchive, oss }: { dataArchive: string; oss: boolean }) => {
+ const { startES } = kbnTestServer.createTestServers({
+ adjustTimeout: (t: number) => jest.setTimeout(600000),
+ settings: {
+ es: {
+ license: oss ? 'oss' : 'trial',
+ dataArchive,
+ },
+ },
+ });
+
+ root = kbnTestServer.createRootWithCorePlugins(
+ {
+ migrations: {
+ skip: false,
+ enableV2: true,
+ },
+ logging: {
+ appenders: {
+ file: {
+ kind: 'file',
+ path: join(__dirname, 'migration_test_kibana.log'),
+ layout: {
+ kind: 'json',
+ },
+ },
+ },
+ loggers: [
+ {
+ context: 'root',
+ appenders: ['file'],
+ },
+ ],
+ },
+ },
+ {
+ oss,
+ }
+ );
+
+ const startEsPromise = startES().then((es) => (esServer = es));
+ const startKibanaPromise = root
+ .setup()
+ .then(() => root.start())
+ .then((start) => {
+ coreStart = start;
+ esClient = coreStart.elasticsearch.client.asInternalUser;
+ });
+
+ await Promise.all([startEsPromise, startKibanaPromise]);
+ };
+
+ const stopServers = async () => {
+ if (root) {
+ await root.shutdown();
+ }
+ if (esServer) {
+ await esServer.stop();
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, 10000));
+ };
+
+ const migratedIndex = `.kibana_${kibanaVersion}_001`;
+
+ beforeAll(async () => {
+ await startServers({
+ oss: false,
+ dataArchive: join(__dirname, 'archives', '7.7.2_xpack_100k_obj.zip'),
+ });
+ });
+
+ afterAll(async () => {
+ await stopServers();
+ });
+
+ it('copies all the document of the previous index to the new one', async () => {
+ const migratedIndexResponse = await esClient.count({
+ index: migratedIndex,
+ });
+ const oldIndexResponse = await esClient.count({
+ index: '.kibana_1',
+ });
+
+ // Use a >= comparison since once Kibana has started it might create new
+ // documents like telemetry tasks
+ expect(migratedIndexResponse.body.count).toBeGreaterThanOrEqual(oldIndexResponse.body.count);
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.test.ts b/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.test.ts
new file mode 100644
index 0000000000000..6dbb986e868ee
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.test.ts
@@ -0,0 +1,512 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { migrationStateActionMachine } from './migrations_state_action_machine';
+import { loggingSystemMock } from '../../mocks';
+import * as Either from 'fp-ts/lib/Either';
+import * as Option from 'fp-ts/lib/Option';
+import { AllControlStates, State } from './types';
+import { createInitialState } from './model';
+import { ResponseError } from '@elastic/elasticsearch/lib/errors';
+import { elasticsearchClientMock } from '../../elasticsearch/client/mocks';
+
+describe('migrationsStateActionMachine', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ const mockLogger = loggingSystemMock.create();
+
+ const initialState = createInitialState({
+ kibanaVersion: '7.11.0',
+ targetMappings: { properties: {} },
+ migrationVersionPerType: {},
+ indexPrefix: '.my-so-index',
+ });
+
+ const next = jest.fn((s: State) => {
+ if (s.controlState === 'DONE' || s.controlState === 'FATAL') {
+ return null;
+ } else {
+ return () => Promise.resolve(Either.right('response'));
+ }
+ });
+
+ // A model that transitions through all the provided states
+ const transitionModel = (states: AllControlStates[]) => {
+ let i = 0;
+ return (s: State, res: Either.Either): State => {
+ if (i < states.length) {
+ const newState = {
+ ...s,
+ controlState: states[i],
+ logs: [...s.logs, { level: 'info', message: `Log from ${states[i]} control state` }],
+ } as State;
+ i++;
+ return newState;
+ } else {
+ throw new Error("states didn't contain a terminal state");
+ }
+ };
+ };
+
+ it('logs state transitions, messages in state.logs and action responses', async () => {
+ await migrationStateActionMachine({
+ initialState,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'LEGACY_DELETE', 'DONE']),
+ next,
+ });
+ const logs = loggingSystemMock.collect(mockLogger);
+ const doneLog = logs.info.splice(8, 1)[0][0];
+ expect(doneLog).toMatch(/\[.my-so-index\] Migration completed after \d+ms/);
+ expect(logs).toMatchInlineSnapshot(`
+ Object {
+ "debug": Array [
+ Array [
+ "[.my-so-index] INIT RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_REINDEX RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ ],
+ "error": Array [],
+ "fatal": Array [],
+ "info": Array [
+ Array [
+ "[.my-so-index] Log from LEGACY_REINDEX control state",
+ ],
+ Array [
+ "[.my-so-index] INIT -> LEGACY_REINDEX",
+ ],
+ Array [
+ "[.my-so-index] Log from LEGACY_DELETE control state",
+ ],
+ Array [
+ "[.my-so-index] LEGACY_REINDEX -> LEGACY_DELETE",
+ ],
+ Array [
+ "[.my-so-index] Log from LEGACY_DELETE control state",
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE -> LEGACY_DELETE",
+ ],
+ Array [
+ "[.my-so-index] Log from DONE control state",
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE -> DONE",
+ ],
+ ],
+ "log": Array [],
+ "trace": Array [],
+ "warn": Array [],
+ }
+ `);
+ });
+ it('resolves when reaching the DONE state', () => {
+ return expect(
+ migrationStateActionMachine({
+ initialState,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'LEGACY_DELETE', 'DONE']),
+ next,
+ })
+ ).resolves.toEqual(expect.anything());
+ });
+ it('resolves with migrated status if some sourceIndex in the DONE state', () => {
+ return expect(
+ migrationStateActionMachine({
+ initialState: { ...initialState, ...{ sourceIndex: Option.some('source-index') } },
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'LEGACY_DELETE', 'DONE']),
+ next,
+ })
+ ).resolves.toEqual(expect.objectContaining({ status: 'migrated' }));
+ });
+ it('resolves with patched status if none sourceIndex in the DONE state', () => {
+ return expect(
+ migrationStateActionMachine({
+ initialState: { ...initialState, ...{ sourceIndex: Option.none } },
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'LEGACY_DELETE', 'DONE']),
+ next,
+ })
+ ).resolves.toEqual(expect.objectContaining({ status: 'patched' }));
+ });
+ it('rejects with error message when reaching the FATAL state', () => {
+ return expect(
+ migrationStateActionMachine({
+ initialState: { ...initialState, reason: 'the fatal reason' } as State,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'FATAL']),
+ next,
+ })
+ ).rejects.toMatchInlineSnapshot(
+ `[Error: Unable to complete saved object migrations for the [.my-so-index] index: the fatal reason]`
+ );
+ });
+ it('logs all state transitions and action responses when reaching the FATAL state', async () => {
+ await migrationStateActionMachine({
+ initialState: {
+ ...initialState,
+ reason: 'the fatal reason',
+ outdatedDocuments: [{ _id: '1234', password: 'sensitive password' }],
+ } as State,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_DELETE', 'FATAL']),
+ next,
+ }).catch((err) => err);
+ // Ignore the first 4 log entries that come from our model
+ const executionLogLogs = loggingSystemMock.collect(mockLogger).info.slice(4);
+ expect(executionLogLogs).toMatchInlineSnapshot(`
+ Array [
+ Array [
+ "[.my-so-index] INIT RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ "[.my-so-index] INIT -> LEGACY_DELETE",
+ Object {
+ "controlState": "LEGACY_DELETE",
+ "currentAlias": ".my-so-index",
+ "indexPrefix": ".my-so-index",
+ "kibanaVersion": "7.11.0",
+ "legacyIndex": ".my-so-index",
+ "logs": Array [
+ Object {
+ "level": "info",
+ "message": "Log from LEGACY_DELETE control state",
+ },
+ ],
+ "outdatedDocuments": Array [
+ "1234",
+ ],
+ "outdatedDocumentsQuery": Object {
+ "bool": Object {
+ "should": Array [],
+ },
+ },
+ "preMigrationScript": Object {
+ "_tag": "None",
+ },
+ "reason": "the fatal reason",
+ "retryCount": 0,
+ "retryDelay": 0,
+ "targetIndexMappings": Object {
+ "properties": Object {},
+ },
+ "tempIndex": ".my-so-index_7.11.0_reindex_temp",
+ "tempIndexMappings": Object {
+ "dynamic": false,
+ "properties": Object {
+ "migrationVersion": Object {
+ "dynamic": "true",
+ "type": "object",
+ },
+ "type": Object {
+ "type": "keyword",
+ },
+ },
+ },
+ "versionAlias": ".my-so-index_7.11.0",
+ "versionIndex": ".my-so-index_7.11.0_001",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE RESPONSE",
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_DELETE -> FATAL",
+ Object {
+ "controlState": "FATAL",
+ "currentAlias": ".my-so-index",
+ "indexPrefix": ".my-so-index",
+ "kibanaVersion": "7.11.0",
+ "legacyIndex": ".my-so-index",
+ "logs": Array [
+ Object {
+ "level": "info",
+ "message": "Log from LEGACY_DELETE control state",
+ },
+ Object {
+ "level": "info",
+ "message": "Log from FATAL control state",
+ },
+ ],
+ "outdatedDocuments": Array [
+ "1234",
+ ],
+ "outdatedDocumentsQuery": Object {
+ "bool": Object {
+ "should": Array [],
+ },
+ },
+ "preMigrationScript": Object {
+ "_tag": "None",
+ },
+ "reason": "the fatal reason",
+ "retryCount": 0,
+ "retryDelay": 0,
+ "targetIndexMappings": Object {
+ "properties": Object {},
+ },
+ "tempIndex": ".my-so-index_7.11.0_reindex_temp",
+ "tempIndexMappings": Object {
+ "dynamic": false,
+ "properties": Object {
+ "migrationVersion": Object {
+ "dynamic": "true",
+ "type": "object",
+ },
+ "type": Object {
+ "type": "keyword",
+ },
+ },
+ },
+ "versionAlias": ".my-so-index_7.11.0",
+ "versionIndex": ".my-so-index_7.11.0_001",
+ },
+ ],
+ ]
+ `);
+ });
+ it('rejects and logs the error when an action throws with an ResponseError', async () => {
+ await expect(
+ migrationStateActionMachine({
+ initialState: { ...initialState, reason: 'the fatal reason' } as State,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'FATAL']),
+ next: () => {
+ throw new ResponseError(
+ elasticsearchClientMock.createApiResponse({
+ body: { error: { type: 'snapshot_in_progress_exception', reason: 'error reason' } },
+ })
+ );
+ },
+ })
+ ).rejects.toMatchInlineSnapshot(
+ `[Error: Unable to complete saved object migrations for the [.my-so-index] index. Please check the health of your Elasticsearch cluster and try again. ResponseError: snapshot_in_progress_exception]`
+ );
+ expect(loggingSystemMock.collect(mockLogger)).toMatchInlineSnapshot(`
+ Object {
+ "debug": Array [],
+ "error": Array [
+ Array [
+ "[.my-so-index] [snapshot_in_progress_exception]: error reason",
+ ],
+ Array [
+ "[.my-so-index] migration failed, dumping execution log:",
+ ],
+ ],
+ "fatal": Array [],
+ "info": Array [],
+ "log": Array [],
+ "trace": Array [],
+ "warn": Array [],
+ }
+ `);
+ });
+ it('rejects and logs the error when an action throws', async () => {
+ await expect(
+ migrationStateActionMachine({
+ initialState: { ...initialState, reason: 'the fatal reason' } as State,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'FATAL']),
+ next: () => {
+ throw new Error('this action throws');
+ },
+ })
+ ).rejects.toMatchInlineSnapshot(
+ `[Error: Unable to complete saved object migrations for the [.my-so-index] index. Please check the health of your Elasticsearch cluster and try again. Error: this action throws]`
+ );
+ expect(loggingSystemMock.collect(mockLogger)).toMatchInlineSnapshot(`
+ Object {
+ "debug": Array [],
+ "error": Array [
+ Array [
+ [Error: this action throws],
+ ],
+ Array [
+ "[.my-so-index] migration failed, dumping execution log:",
+ ],
+ ],
+ "fatal": Array [],
+ "info": Array [],
+ "log": Array [],
+ "trace": Array [],
+ "warn": Array [],
+ }
+ `);
+ });
+ it('logs all state transitions and action responses when an action throws', async () => {
+ try {
+ await migrationStateActionMachine({
+ initialState: { ...initialState, reason: 'the fatal reason' } as State,
+ logger: mockLogger.get(),
+ model: transitionModel(['LEGACY_REINDEX', 'LEGACY_DELETE', 'FATAL']),
+ next: (state) => {
+ if (state.controlState === 'LEGACY_DELETE') throw new Error('this action throws');
+ return () => Promise.resolve('hello');
+ },
+ });
+ } catch (e) {
+ /** ignore */
+ }
+ // Ignore the first 4 log entries that come from our model
+ const executionLogLogs = loggingSystemMock.collect(mockLogger).info.slice(4);
+ expect(executionLogLogs).toMatchInlineSnapshot(`
+ Array [
+ Array [
+ "[.my-so-index] INIT RESPONSE",
+ "hello",
+ ],
+ Array [
+ "[.my-so-index] INIT -> LEGACY_REINDEX",
+ Object {
+ "controlState": "LEGACY_REINDEX",
+ "currentAlias": ".my-so-index",
+ "indexPrefix": ".my-so-index",
+ "kibanaVersion": "7.11.0",
+ "legacyIndex": ".my-so-index",
+ "logs": Array [
+ Object {
+ "level": "info",
+ "message": "Log from LEGACY_REINDEX control state",
+ },
+ ],
+ "outdatedDocuments": Array [],
+ "outdatedDocumentsQuery": Object {
+ "bool": Object {
+ "should": Array [],
+ },
+ },
+ "preMigrationScript": Object {
+ "_tag": "None",
+ },
+ "reason": "the fatal reason",
+ "retryCount": 0,
+ "retryDelay": 0,
+ "targetIndexMappings": Object {
+ "properties": Object {},
+ },
+ "tempIndex": ".my-so-index_7.11.0_reindex_temp",
+ "tempIndexMappings": Object {
+ "dynamic": false,
+ "properties": Object {
+ "migrationVersion": Object {
+ "dynamic": "true",
+ "type": "object",
+ },
+ "type": Object {
+ "type": "keyword",
+ },
+ },
+ },
+ "versionAlias": ".my-so-index_7.11.0",
+ "versionIndex": ".my-so-index_7.11.0_001",
+ },
+ ],
+ Array [
+ "[.my-so-index] LEGACY_REINDEX RESPONSE",
+ "hello",
+ ],
+ Array [
+ "[.my-so-index] LEGACY_REINDEX -> LEGACY_DELETE",
+ Object {
+ "controlState": "LEGACY_DELETE",
+ "currentAlias": ".my-so-index",
+ "indexPrefix": ".my-so-index",
+ "kibanaVersion": "7.11.0",
+ "legacyIndex": ".my-so-index",
+ "logs": Array [
+ Object {
+ "level": "info",
+ "message": "Log from LEGACY_REINDEX control state",
+ },
+ Object {
+ "level": "info",
+ "message": "Log from LEGACY_DELETE control state",
+ },
+ ],
+ "outdatedDocuments": Array [],
+ "outdatedDocumentsQuery": Object {
+ "bool": Object {
+ "should": Array [],
+ },
+ },
+ "preMigrationScript": Object {
+ "_tag": "None",
+ },
+ "reason": "the fatal reason",
+ "retryCount": 0,
+ "retryDelay": 0,
+ "targetIndexMappings": Object {
+ "properties": Object {},
+ },
+ "tempIndex": ".my-so-index_7.11.0_reindex_temp",
+ "tempIndexMappings": Object {
+ "dynamic": false,
+ "properties": Object {
+ "migrationVersion": Object {
+ "dynamic": "true",
+ "type": "object",
+ },
+ "type": Object {
+ "type": "keyword",
+ },
+ },
+ },
+ "versionAlias": ".my-so-index_7.11.0",
+ "versionIndex": ".my-so-index_7.11.0_001",
+ },
+ ],
+ ]
+ `);
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.ts b/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.ts
new file mode 100644
index 0000000000000..d11b41e357e5a
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/migrations_state_action_machine.ts
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { errors as EsErrors } from '@elastic/elasticsearch';
+import * as Option from 'fp-ts/lib/Option';
+import { performance } from 'perf_hooks';
+import { Logger, LogMeta } from '../../logging';
+import { Model, Next, stateActionMachine } from './state_action_machine';
+import { State } from './types';
+
+type ExecutionLog = Array<
+ | {
+ type: 'transition';
+ prevControlState: State['controlState'];
+ controlState: State['controlState'];
+ state: State;
+ }
+ | {
+ type: 'response';
+ controlState: State['controlState'];
+ res: unknown;
+ }
+>;
+
+const logStateTransition = (
+ logger: Logger,
+ logMessagePrefix: string,
+ oldState: State,
+ newState: State
+) => {
+ if (newState.logs.length > oldState.logs.length) {
+ newState.logs
+ .slice(oldState.logs.length)
+ .forEach((log) => logger[log.level](logMessagePrefix + log.message));
+ }
+
+ logger.info(logMessagePrefix + `${oldState.controlState} -> ${newState.controlState}`);
+};
+
+const logActionResponse = (
+ logger: Logger,
+ logMessagePrefix: string,
+ state: State,
+ res: unknown
+) => {
+ logger.debug(logMessagePrefix + `${state.controlState} RESPONSE`, res as LogMeta);
+};
+
+const dumpExecutionLog = (logger: Logger, logMessagePrefix: string, executionLog: ExecutionLog) => {
+ logger.error(logMessagePrefix + 'migration failed, dumping execution log:');
+ executionLog.forEach((log) => {
+ if (log.type === 'transition') {
+ logger.info(logMessagePrefix + `${log.prevControlState} -> ${log.controlState}`, log.state);
+ }
+ if (log.type === 'response') {
+ logger.info(logMessagePrefix + `${log.controlState} RESPONSE`, log.res as LogMeta);
+ }
+ });
+};
+
+/**
+ * A specialized migrations-specific state-action machine that:
+ * - logs messages in state.logs
+ * - logs state transitions
+ * - logs action responses
+ * - resolves if the final state is DONE
+ * - rejects if the final state is FATAL
+ * - catches and logs exceptions and then rejects with a migrations specific error
+ */
+export async function migrationStateActionMachine({
+ initialState,
+ logger,
+ next,
+ model,
+}: {
+ initialState: State;
+ logger: Logger;
+ next: Next;
+ model: Model;
+}) {
+ const executionLog: ExecutionLog = [];
+ const starteTime = performance.now();
+ // Since saved object index names usually start with a `.` and can be
+ // configured by users to include several `.`'s we can't use a logger tag to
+ // indicate which messages come from which index upgrade.
+ const logMessagePrefix = `[${initialState.indexPrefix}] `;
+ try {
+ const finalState = await stateActionMachine(
+ initialState,
+ (state) => next(state),
+ (state, res) => {
+ executionLog.push({
+ type: 'response',
+ res,
+ controlState: state.controlState,
+ });
+ logActionResponse(logger, logMessagePrefix, state, res);
+ const newState = model(state, res);
+ // Redact the state to reduce the memory consumption and so that we
+ // don't log sensitive information inside documents by only keeping
+ // the _id's of outdatedDocuments
+ const redactedNewState = {
+ ...newState,
+ // @ts-expect-error outdatedDocuments don't exist in all states
+ ...{ outdatedDocuments: (newState.outdatedDocuments ?? []).map((doc) => doc._id) },
+ };
+ executionLog.push({
+ type: 'transition',
+ state: redactedNewState,
+ controlState: newState.controlState,
+ prevControlState: state.controlState,
+ });
+ logStateTransition(logger, logMessagePrefix, state, redactedNewState as State);
+ return newState;
+ }
+ );
+
+ const elapsedMs = performance.now() - starteTime;
+ if (finalState.controlState === 'DONE') {
+ logger.info(logMessagePrefix + `Migration completed after ${Math.round(elapsedMs)}ms`);
+ if (finalState.sourceIndex != null && Option.isSome(finalState.sourceIndex)) {
+ return {
+ status: 'migrated' as const,
+ destIndex: finalState.targetIndex,
+ sourceIndex: finalState.sourceIndex.value,
+ elapsedMs,
+ };
+ } else {
+ return {
+ status: 'patched' as const,
+ destIndex: finalState.targetIndex,
+ elapsedMs,
+ };
+ }
+ } else if (finalState.controlState === 'FATAL') {
+ dumpExecutionLog(logger, logMessagePrefix, executionLog);
+ return Promise.reject(
+ new Error(
+ `Unable to complete saved object migrations for the [${initialState.indexPrefix}] index: ` +
+ finalState.reason
+ )
+ );
+ } else {
+ throw new Error('Invalid terminating control state');
+ }
+ } catch (e) {
+ if (e instanceof EsErrors.ResponseError) {
+ logger.error(
+ logMessagePrefix + `[${e.body?.error?.type}]: ${e.body?.error?.reason ?? e.message}`
+ );
+ } else {
+ logger.error(e);
+ }
+ dumpExecutionLog(logger, logMessagePrefix, executionLog);
+ throw new Error(
+ `Unable to complete saved object migrations for the [${initialState.indexPrefix}] index. Please check the health of your Elasticsearch cluster and try again. ${e}`
+ );
+ }
+}
diff --git a/src/core/server/saved_objects/migrationsv2/model.test.ts b/src/core/server/saved_objects/migrationsv2/model.test.ts
new file mode 100644
index 0000000000000..ab7c47389f539
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/model.test.ts
@@ -0,0 +1,1137 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as Either from 'fp-ts/lib/Either';
+import * as Option from 'fp-ts/lib/Option';
+import {
+ FatalState,
+ State,
+ LegacySetWriteBlockState,
+ SetSourceWriteBlockState,
+ LegacyCreateReindexTargetState,
+ LegacyReindexState,
+ LegacyReindexWaitForTaskState,
+ LegacyDeleteState,
+ ReindexSourceToTempState,
+ UpdateTargetMappingsState,
+ UpdateTargetMappingsWaitForTaskState,
+ OutdatedDocumentsSearch,
+ OutdatedDocumentsTransform,
+ MarkVersionIndexReady,
+ BaseState,
+ CreateReindexTempState,
+ ReindexSourceToTempWaitForTaskState,
+ MarkVersionIndexReadyConflict,
+ CreateNewTargetState,
+ CloneTempToSource,
+ SetTempWriteBlock,
+} from './types';
+import { SavedObjectsRawDoc } from '..';
+import { AliasAction, RetryableEsClientError } from './actions';
+import { createInitialState, model } from './model';
+import { ResponseType } from './next';
+
+describe('migrations v2 model', () => {
+ const baseState: BaseState = {
+ controlState: '',
+ legacyIndex: '.kibana',
+ kibanaVersion: '7.11.0',
+ logs: [],
+ retryCount: 0,
+ retryDelay: 0,
+ indexPrefix: '.kibana',
+ outdatedDocumentsQuery: {},
+ targetIndexMappings: {
+ properties: {
+ new_saved_object_type: {
+ properties: {
+ value: { type: 'text' },
+ },
+ },
+ },
+ _meta: {
+ migrationMappingPropertyHashes: {
+ new_saved_object_type: '4a11183eee21e6fbad864f7a30b39ad0',
+ },
+ },
+ },
+ tempIndexMappings: { properties: {} },
+ preMigrationScript: Option.none,
+ currentAlias: '.kibana',
+ versionAlias: '.kibana_7.11.0',
+ versionIndex: '.kibana_7.11.0_001',
+ tempIndex: '.kibana_7.11.0_reindex_temp',
+ };
+
+ describe('exponential retry delays for retryable_es_client_error', () => {
+ let state: State = {
+ ...baseState,
+ controlState: 'INIT',
+ };
+ const retryableError: RetryableEsClientError = {
+ type: 'retryable_es_client_error',
+ message: 'snapshot_in_progress_exception',
+ };
+ test('sets retryCount, exponential retryDelay if an action fails with a retryable_es_client_error', () => {
+ const states = new Array(10).fill(1).map(() => {
+ state = model(state, Either.left(retryableError));
+ return state;
+ });
+ const retryState = states.map(({ retryCount, retryDelay }) => ({ retryCount, retryDelay }));
+ expect(retryState).toMatchInlineSnapshot(`
+ Array [
+ Object {
+ "retryCount": 1,
+ "retryDelay": 2000,
+ },
+ Object {
+ "retryCount": 2,
+ "retryDelay": 4000,
+ },
+ Object {
+ "retryCount": 3,
+ "retryDelay": 8000,
+ },
+ Object {
+ "retryCount": 4,
+ "retryDelay": 16000,
+ },
+ Object {
+ "retryCount": 5,
+ "retryDelay": 32000,
+ },
+ Object {
+ "retryCount": 6,
+ "retryDelay": 64000,
+ },
+ Object {
+ "retryCount": 7,
+ "retryDelay": 64000,
+ },
+ Object {
+ "retryCount": 8,
+ "retryDelay": 64000,
+ },
+ Object {
+ "retryCount": 9,
+ "retryDelay": 64000,
+ },
+ Object {
+ "retryCount": 10,
+ "retryDelay": 64000,
+ },
+ ]
+ `);
+ });
+
+ test('resets retryCount, retryDelay when an action succeeds', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_7.11.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.11.0': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model({ ...state, ...{ retryCount: 5, retryDelay: 32000 } }, res);
+
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+
+ test('resets retryCount, retryDelay when an action fails with a non-retryable error', () => {
+ const legacyReindexState = {
+ ...state,
+ ...{ controlState: 'LEGACY_REINDEX_WAIT_FOR_TASK', retryCount: 5, retryDelay: 32000 },
+ };
+ const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
+ type: 'target_index_had_write_block',
+ });
+ const newState = model(legacyReindexState as State, res);
+
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+
+ test('terminates to FATAL after 10 retries', () => {
+ const newState = model(
+ { ...state, ...{ retryCount: 10, retryDelay: 64000 } },
+ Either.left(retryableError)
+ ) as FatalState;
+
+ expect(newState.controlState).toEqual('FATAL');
+ expect(newState.reason).toMatchInlineSnapshot(
+ `"Unable to complete the INIT step after 10 attempts, terminating."`
+ );
+ });
+ });
+
+ describe('model transitions from', () => {
+ describe('INIT', () => {
+ const initState: State = {
+ ...baseState,
+ controlState: 'INIT',
+ currentAlias: '.kibana',
+ versionAlias: '.kibana_7.11.0',
+ versionIndex: '.kibana_7.11.0_001',
+ };
+ test('INIT -> OUTDATED_DOCUMENTS_SEARCH if .kibana is already pointing to the target index', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_7.11.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.11.0': {},
+ },
+ mappings: {
+ properties: {
+ disabled_saved_object_type: {
+ properties: {
+ value: { type: 'keyword' },
+ },
+ },
+ },
+ _meta: {
+ migrationMappingPropertyHashes: {
+ disabled_saved_object_type: '7997cf5a56cc02bdc9c93361bde732b0',
+ },
+ },
+ },
+ settings: {},
+ },
+ });
+ const newState = model(initState, res);
+
+ expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH');
+ expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
+ Object {
+ "_meta": Object {
+ "migrationMappingPropertyHashes": Object {
+ "new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
+ },
+ },
+ "properties": Object {
+ "disabled_saved_object_type": Object {
+ "dynamic": false,
+ "properties": Object {},
+ },
+ "new_saved_object_type": Object {
+ "properties": Object {
+ "value": Object {
+ "type": "text",
+ },
+ },
+ },
+ },
+ }
+ `);
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test("INIT -> FATAL when .kibana points to newer version's index", () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_7.12.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.12.0': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ '.kibana_7.11.0_001': {
+ aliases: { '.kibana_7.11.0': {} },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(initState, res) as FatalState;
+
+ expect(newState.controlState).toEqual('FATAL');
+ expect(newState.reason).toMatchInlineSnapshot(
+ `"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
+ );
+ });
+ test('INIT -> SET_SOURCE_WRITE_BLOCK when .kibana points to an index with an invalid version', () => {
+ // If users tamper with our index version naming scheme we can no
+ // longer accurately detect a newer version. Older Kibana versions
+ // will have indices like `.kibana_10` and users might choose an
+ // invalid name when restoring from a snapshot. So we try to be
+ // lenient and assume it's an older index and perform a migration.
+ // If the tampered index belonged to a newer version the migration
+ // will fail when we start transforming documents.
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_7.invalid.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.12.0': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ '.kibana_7.11.0_001': {
+ aliases: { '.kibana_7.11.0': {} },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(initState, res) as FatalState;
+
+ expect(newState.controlState).toEqual('SET_SOURCE_WRITE_BLOCK');
+ expect(newState).toMatchObject({
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some('.kibana_7.invalid.0_001'),
+ targetIndex: '.kibana_7.11.0_001',
+ });
+ });
+ test('INIT -> SET_SOURCE_WRITE_BLOCK when migrating from a v2 migrations index (>= 7.11.0)', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_7.11.0_001': {
+ aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ '.kibana_3': {
+ aliases: {},
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(
+ {
+ ...initState,
+ ...{
+ kibanaVersion: '7.12.0',
+ versionAlias: '.kibana_7.12.0',
+ versionIndex: '.kibana_7.12.0_001',
+ },
+ },
+ res
+ );
+
+ expect(newState).toMatchObject({
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some('.kibana_7.11.0_001'),
+ targetIndex: '.kibana_7.12.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('INIT -> SET_SOURCE_WRITE_BLOCK when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana_3': {
+ aliases: {
+ '.kibana': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(initState, res);
+
+ expect(newState).toMatchObject({
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some('.kibana_3'),
+ targetIndex: '.kibana_7.11.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('INIT -> LEGACY_SET_WRITE_BLOCK when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ '.kibana': {
+ aliases: {},
+ mappings: { properties: {}, _meta: {} },
+ settings: {},
+ },
+ });
+ const newState = model(initState, res);
+
+ expect(newState).toMatchObject({
+ controlState: 'LEGACY_SET_WRITE_BLOCK',
+ sourceIndex: Option.some('.kibana_pre6.5.0_001'),
+ targetIndex: '.kibana_7.11.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('INIT -> SET_SOURCE_WRITE_BLOCK when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ 'my-saved-objects_3': {
+ aliases: {
+ 'my-saved-objects': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(
+ {
+ ...baseState,
+ controlState: 'INIT',
+ currentAlias: 'my-saved-objects',
+ versionAlias: 'my-saved-objects_7.11.0',
+ versionIndex: 'my-saved-objects_7.11.0_001',
+ },
+ res
+ );
+
+ expect(newState).toMatchObject({
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some('my-saved-objects_3'),
+ targetIndex: 'my-saved-objects_7.11.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('INIT -> SET_SOURCE_WRITE_BLOCK when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
+ const res: ResponseType<'INIT'> = Either.right({
+ 'my-saved-objects_7.11.0': {
+ aliases: {
+ 'my-saved-objects': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(
+ {
+ ...baseState,
+ controlState: 'INIT',
+ kibanaVersion: '7.12.0',
+ currentAlias: 'my-saved-objects',
+ versionAlias: 'my-saved-objects_7.12.0',
+ versionIndex: 'my-saved-objects_7.12.0_001',
+ },
+ res
+ );
+
+ expect(newState).toMatchObject({
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some('my-saved-objects_7.11.0'),
+ targetIndex: 'my-saved-objects_7.12.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('INIT -> CREATE_NEW_TARGET when no indices/aliases exist', () => {
+ const res: ResponseType<'INIT'> = Either.right({});
+ const newState = model(initState, res);
+
+ expect(newState).toMatchObject({
+ controlState: 'CREATE_NEW_TARGET',
+ sourceIndex: Option.none,
+ targetIndex: '.kibana_7.11.0_001',
+ });
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('LEGACY_SET_WRITE_BLOCK', () => {
+ const legacySetWriteBlockState: LegacySetWriteBlockState = {
+ ...baseState,
+ controlState: 'LEGACY_SET_WRITE_BLOCK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ legacyReindexTargetMappings: { properties: {} },
+ legacyPreMigrationDoneActions: [],
+ legacyIndex: '',
+ };
+ test('LEGACY_SET_WRITE_BLOCK -> LEGACY_SET_WRITE_BLOCK if action fails with set_write_block_failed', () => {
+ const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.left({
+ type: 'retryable_es_client_error',
+ message: 'set_write_block_failed',
+ });
+ const newState = model(legacySetWriteBlockState, res);
+ expect(newState.controlState).toEqual('LEGACY_SET_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(1);
+ expect(newState.retryDelay).toEqual(2000);
+ });
+ test('LEGACY_SET_WRITE_BLOCK -> LEGACY_CREATE_REINDEX_TARGET if action fails with index_not_found_exception', () => {
+ const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.left({
+ type: 'index_not_found_exception',
+ });
+ const newState = model(legacySetWriteBlockState, res);
+ expect(newState.controlState).toEqual('LEGACY_CREATE_REINDEX_TARGET');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('LEGACY_SET_WRITE_BLOCK -> LEGACY_CREATE_REINDEX_TARGET if action succeeds with set_write_block_succeeded', () => {
+ const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.right(
+ 'set_write_block_succeeded'
+ );
+ const newState = model(legacySetWriteBlockState, res);
+ expect(newState.controlState).toEqual('LEGACY_CREATE_REINDEX_TARGET');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('LEGACY_CREATE_REINDEX_TARGET', () => {
+ const legacyCreateReindexTargetState: LegacyCreateReindexTargetState = {
+ ...baseState,
+ controlState: 'LEGACY_CREATE_REINDEX_TARGET',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ legacyReindexTargetMappings: { properties: {} },
+ legacyPreMigrationDoneActions: [],
+ legacyIndex: '',
+ };
+ test('LEGACY_CREATE_REINDEX_TARGET -> LEGACY_REINDEX', () => {
+ const res: ResponseType<'LEGACY_CREATE_REINDEX_TARGET'> = Either.right(
+ 'create_index_succeeded'
+ );
+ const newState = model(legacyCreateReindexTargetState, res);
+ expect(newState.controlState).toEqual('LEGACY_REINDEX');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ // The createIndex action called by LEGACY_CREATE_REINDEX_TARGET never
+ // returns a left, it will always succeed or timeout. Since timeout
+ // failures are always retried we don't explicity test this logic
+ });
+ describe('LEGACY_REINDEX', () => {
+ const legacyReindexState: LegacyReindexState = {
+ ...baseState,
+ controlState: 'LEGACY_REINDEX',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ legacyReindexTargetMappings: { properties: {} },
+ legacyPreMigrationDoneActions: [],
+ legacyIndex: '',
+ };
+ test('LEGACY_REINDEX -> LEGACY_REINDEX_WAIT_FOR_TASK', () => {
+ const res: ResponseType<'LEGACY_REINDEX'> = Either.right({ taskId: 'task id' });
+ const newState = model(legacyReindexState, res);
+ expect(newState.controlState).toEqual('LEGACY_REINDEX_WAIT_FOR_TASK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('LEGACY_REINDEX_WAIT_FOR_TASK', () => {
+ const legacyReindexWaitForTaskState: LegacyReindexWaitForTaskState = {
+ ...baseState,
+ controlState: 'LEGACY_REINDEX_WAIT_FOR_TASK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('source_index_name') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ legacyReindexTargetMappings: { properties: {} },
+ legacyPreMigrationDoneActions: [],
+ legacyIndex: 'legacy_index_name',
+ legacyReindexTaskId: 'test_task_id',
+ };
+ test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action succeeds', () => {
+ const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.right('reindex_succeeded');
+ const newState = model(legacyReindexWaitForTaskState, res);
+ expect(newState.controlState).toEqual('LEGACY_DELETE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action fails with index_not_found_exception for reindex source', () => {
+ const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
+ type: 'index_not_found_exception',
+ index: 'legacy_index_name',
+ });
+ const newState = model(legacyReindexWaitForTaskState, res);
+ expect(newState.controlState).toEqual('LEGACY_DELETE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action fails with target_index_had_write_block', () => {
+ const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
+ type: 'target_index_had_write_block',
+ });
+ const newState = model(legacyReindexWaitForTaskState, res);
+ expect(newState.controlState).toEqual('LEGACY_DELETE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('LEGACY_DELETE', () => {
+ const legacyDeleteState: LegacyDeleteState = {
+ ...baseState,
+ controlState: 'LEGACY_DELETE',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('source_index_name') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ legacyReindexTargetMappings: { properties: {} },
+ legacyPreMigrationDoneActions: [],
+ legacyIndex: 'legacy_index_name',
+ };
+ test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action succeeds', () => {
+ const res: ResponseType<'LEGACY_DELETE'> = Either.right('update_aliases_succeeded');
+ const newState = model(legacyDeleteState, res);
+ expect(newState.controlState).toEqual('SET_SOURCE_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action fails with index_not_found_exception for legacy index', () => {
+ const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
+ type: 'index_not_found_exception',
+ index: 'legacy_index_name',
+ });
+ const newState = model(legacyDeleteState, res);
+ expect(newState.controlState).toEqual('SET_SOURCE_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action fails with remove_index_not_a_concrete_index', () => {
+ const res: ResponseType<'LEGACY_DELETE'> = Either.left({
+ type: 'remove_index_not_a_concrete_index',
+ });
+ const newState = model(legacyDeleteState, res);
+ expect(newState.controlState).toEqual('SET_SOURCE_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('SET_SOURCE_WRITE_BLOCK', () => {
+ const setWriteBlockState: SetSourceWriteBlockState = {
+ ...baseState,
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('SET_SOURCE_WRITE_BLOCK -> SET_SOURCE_WRITE_BLOCK if action fails with set_write_block_failed', () => {
+ const res: ResponseType<'SET_SOURCE_WRITE_BLOCK'> = Either.left({
+ type: 'retryable_es_client_error',
+ message: 'set_write_block_failed',
+ });
+ const newState = model(setWriteBlockState, res);
+ expect(newState.controlState).toEqual('SET_SOURCE_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(1);
+ expect(newState.retryDelay).toEqual(2000);
+ });
+ test('SET_SOURCE_WRITE_BLOCK -> CREATE_REINDEX_TEMP if action succeeds with set_write_block_succeeded', () => {
+ const res: ResponseType<'SET_SOURCE_WRITE_BLOCK'> = Either.right(
+ 'set_write_block_succeeded'
+ );
+ const newState = model(setWriteBlockState, res);
+ expect(newState.controlState).toEqual('CREATE_REINDEX_TEMP');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('CREATE_REINDEX_TEMP', () => {
+ const createReindexTargetState: CreateReindexTempState = {
+ ...baseState,
+ controlState: 'CREATE_REINDEX_TEMP',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ tempIndexMappings: { properties: {} },
+ };
+ it('CREATE_REINDEX_TEMP -> REINDEX_SOURCE_TO_TEMP if action succeeds', () => {
+ const res: ResponseType<'CREATE_REINDEX_TEMP'> = Either.right('create_index_succeeded');
+ const newState = model(createReindexTargetState, res);
+ expect(newState.controlState).toEqual('REINDEX_SOURCE_TO_TEMP');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('REINDEX_SOURCE_TO_TEMP', () => {
+ const reindexSourceToTargetState: ReindexSourceToTempState = {
+ ...baseState,
+ controlState: 'REINDEX_SOURCE_TO_TEMP',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('REINDEX_SOURCE_TO_TEMP -> REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK', () => {
+ const res: ResponseType<'REINDEX_SOURCE_TO_TEMP'> = Either.right({
+ taskId: 'reindex-task-id',
+ });
+ const newState = model(reindexSourceToTargetState, res);
+ expect(newState.controlState).toEqual('REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK', () => {
+ const state: ReindexSourceToTempWaitForTaskState = {
+ ...baseState,
+ controlState: 'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ reindexSourceToTargetTaskId: 'reindex-task-id',
+ };
+ test('REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK -> SET_TEMP_WRITE_BLOCK when response is right', () => {
+ const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK'> = Either.right(
+ 'reindex_succeeded'
+ );
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('SET_TEMP_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK -> SET_TEMP_WRITE_BLOCK when response is left target_index_had_write_block', () => {
+ const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK'> = Either.left({
+ type: 'target_index_had_write_block',
+ });
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('SET_TEMP_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK -> SET_TEMP_WRITE_BLOCK when response is left index_not_found_exception', () => {
+ const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK'> = Either.left({
+ type: 'index_not_found_exception',
+ index: '.kibana_7.11.0_reindex_temp',
+ });
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('SET_TEMP_WRITE_BLOCK');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('SET_TEMP_WRITE_BLOCK', () => {
+ const state: SetTempWriteBlock = {
+ ...baseState,
+ controlState: 'SET_TEMP_WRITE_BLOCK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('SET_TEMP_WRITE_BLOCK -> CLONE_TEMP_TO_TARGET when response is right', () => {
+ const res: ResponseType<'SET_TEMP_WRITE_BLOCK'> = Either.right('set_write_block_succeeded');
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('CLONE_TEMP_TO_TARGET');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('CLONE_TEMP_TO_TARGET', () => {
+ const state: CloneTempToSource = {
+ ...baseState,
+ controlState: 'CLONE_TEMP_TO_TARGET',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ it('CLONE_TEMP_TO_TARGET -> OUTDATED_DOCUMENTS_SEARCH if response is right', () => {
+ const res: ResponseType<'CLONE_TEMP_TO_TARGET'> = Either.right({
+ acknowledged: true,
+ shardsAcknowledged: true,
+ });
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ it('CLONE_TEMP_TO_TARGET -> OUTDATED_DOCUMENTS_SEARCH if response is left index_not_fonud_exception', () => {
+ const res: ResponseType<'CLONE_TEMP_TO_TARGET'> = Either.left({
+ type: 'index_not_found_exception',
+ index: 'temp_index',
+ });
+ const newState = model(state, res);
+ expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('OUTDATED_DOCUMENTS_SEARCH', () => {
+ const outdatedDocumentsSourchState: OutdatedDocumentsSearch = {
+ ...baseState,
+ controlState: 'OUTDATED_DOCUMENTS_SEARCH',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('OUTDATED_DOCUMENTS_SEARCH -> OUTDATED_DOCUMENTS_TRANSFORM if some outdated documents were found', () => {
+ const outdatedDocuments = ([
+ Symbol('raw saved object doc'),
+ ] as unknown) as SavedObjectsRawDoc[];
+ const res: ResponseType<'OUTDATED_DOCUMENTS_SEARCH'> = Either.right({
+ outdatedDocuments,
+ });
+ const newState = model(outdatedDocumentsSourchState, res) as OutdatedDocumentsTransform;
+ expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_TRANSFORM');
+ expect(newState.outdatedDocuments).toEqual(outdatedDocuments);
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('OUTDATED_DOCUMENTS_SEARCH -> UPDATE_TARGET_MAPPINGS if none outdated documents were found and some versionIndexReadyActions', () => {
+ const aliasActions = ([Symbol('alias action')] as unknown) as AliasAction[];
+ const outdatedDocumentsSourchStateWithSomeVersionIndexReadyActions = {
+ ...outdatedDocumentsSourchState,
+ ...{
+ versionIndexReadyActions: Option.some(aliasActions),
+ },
+ };
+ const res: ResponseType<'OUTDATED_DOCUMENTS_SEARCH'> = Either.right({
+ outdatedDocuments: [],
+ });
+ const newState = model(
+ outdatedDocumentsSourchStateWithSomeVersionIndexReadyActions,
+ res
+ ) as MarkVersionIndexReady;
+ expect(newState.controlState).toEqual('UPDATE_TARGET_MAPPINGS');
+ expect(newState.versionIndexReadyActions.value).toEqual(aliasActions);
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('OUTDATED_DOCUMENTS_SEARCH -> UPDATE_TARGET_MAPPINGS if none outdated documents were found and none versionIndexReadyActions', () => {
+ const res: ResponseType<'OUTDATED_DOCUMENTS_SEARCH'> = Either.right({
+ outdatedDocuments: [],
+ });
+ const newState = model(outdatedDocumentsSourchState, res);
+ expect(newState.controlState).toEqual('UPDATE_TARGET_MAPPINGS');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('OUTDATED_DOCUMENTS_TRANSFORM', () => {
+ const outdatedDocuments = ([
+ Symbol('raw saved object doc'),
+ ] as unknown) as SavedObjectsRawDoc[];
+ const outdatedDocumentsTransformState: OutdatedDocumentsTransform = {
+ ...baseState,
+ controlState: 'OUTDATED_DOCUMENTS_TRANSFORM',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ outdatedDocuments,
+ };
+ test('OUTDATED_DOCUMENTS_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH if action succeeds', () => {
+ const res: ResponseType<'OUTDATED_DOCUMENTS_TRANSFORM'> = Either.right(
+ 'bulk_index_succeeded'
+ );
+ const newState = model(outdatedDocumentsTransformState, res);
+ expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('UPDATE_TARGET_MAPPINGS', () => {
+ const updateTargetMappingsState: UpdateTargetMappingsState = {
+ ...baseState,
+ controlState: 'UPDATE_TARGET_MAPPINGS',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('UPDATE_TARGET_MAPPINGS -> UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK', () => {
+ const res: ResponseType<'UPDATE_TARGET_MAPPINGS'> = Either.right({
+ taskId: 'update target mappings task',
+ });
+ const newState = model(
+ updateTargetMappingsState,
+ res
+ ) as UpdateTargetMappingsWaitForTaskState;
+ expect(newState.controlState).toEqual('UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK');
+ expect(newState.updateTargetMappingsTaskId).toEqual('update target mappings task');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK', () => {
+ const updateTargetMappingsWaitForTaskState: UpdateTargetMappingsWaitForTaskState = {
+ ...baseState,
+ controlState: 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK',
+ versionIndexReadyActions: Option.none,
+ sourceIndex: Option.some('.kibana') as Option.Some,
+ targetIndex: '.kibana_7.11.0_001',
+ updateTargetMappingsTaskId: 'update target mappings task',
+ };
+ test('UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK -> MARK_VERSION_INDEX_READY if some versionIndexReadyActions', () => {
+ const res: ResponseType<'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK'> = Either.right(
+ 'pickup_updated_mappings_succeeded'
+ );
+ const newState = model(
+ {
+ ...updateTargetMappingsWaitForTaskState,
+ versionIndexReadyActions: Option.some([
+ { add: { index: 'kibana-index', alias: 'my-alias' } },
+ ]),
+ },
+ res
+ ) as UpdateTargetMappingsWaitForTaskState;
+ expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK -> DONE if none versionIndexReadyActions', () => {
+ const res: ResponseType<'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK'> = Either.right(
+ 'pickup_updated_mappings_succeeded'
+ );
+ const newState = model(
+ updateTargetMappingsWaitForTaskState,
+ res
+ ) as UpdateTargetMappingsWaitForTaskState;
+ expect(newState.controlState).toEqual('DONE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('CREATE_NEW_TARGET', () => {
+ const aliasActions = Option.some([Symbol('alias action')] as unknown) as Option.Some<
+ AliasAction[]
+ >;
+ const createNewTargetState: CreateNewTargetState = {
+ ...baseState,
+ controlState: 'CREATE_NEW_TARGET',
+ versionIndexReadyActions: aliasActions,
+ sourceIndex: Option.none as Option.None,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('CREATE_NEW_TARGET -> MARK_VERSION_INDEX_READY', () => {
+ const res: ResponseType<'CREATE_NEW_TARGET'> = Either.right('create_index_succeeded');
+ const newState = model(createNewTargetState, res);
+ expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('MARK_VERSION_INDEX_READY', () => {
+ const aliasActions = Option.some([Symbol('alias action')] as unknown) as Option.Some<
+ AliasAction[]
+ >;
+ const markVersionIndexReadyState: MarkVersionIndexReady = {
+ ...baseState,
+ controlState: 'MARK_VERSION_INDEX_READY',
+ versionIndexReadyActions: aliasActions,
+ sourceIndex: Option.none as Option.None,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('MARK_VERSION_INDEX_READY -> DONE if the action succeeded', () => {
+ const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.right(
+ 'update_aliases_succeeded'
+ );
+ const newState = model(markVersionIndexReadyState, res);
+ expect(newState.controlState).toEqual('DONE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if someone else removed the current alias from the source index', () => {
+ const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.left({
+ type: 'alias_not_found_exception',
+ });
+ const newState = model(markVersionIndexReadyState, res);
+ expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY_CONFLICT');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ describe('MARK_VERSION_INDEX_READY_CONFLICT', () => {
+ const aliasActions = Option.some([Symbol('alias action')] as unknown) as Option.Some<
+ AliasAction[]
+ >;
+ const markVersionIndexConflictState: MarkVersionIndexReadyConflict = {
+ ...baseState,
+ controlState: 'MARK_VERSION_INDEX_READY_CONFLICT',
+ versionIndexReadyActions: aliasActions,
+ sourceIndex: Option.none as Option.None,
+ targetIndex: '.kibana_7.11.0_001',
+ };
+ test('MARK_VERSION_INDEX_CONFLICT -> DONE if the current alias is pointing to the version alias', () => {
+ const res: ResponseType<'MARK_VERSION_INDEX_READY_CONFLICT'> = Either.right({
+ '.kibana_7.11.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.11.0': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ '.kibana_7.12.0_001': {
+ aliases: { '.kibana_7.12.0': {} },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(markVersionIndexConflictState, res);
+ expect(newState.controlState).toEqual('DONE');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ test('MARK_VERSION_INDEX_READY_CONFLICT -> FATAL if the current alias is pointing to a different version index', () => {
+ const res: ResponseType<'MARK_VERSION_INDEX_READY_CONFLICT'> = Either.right({
+ '.kibana_7.11.0_001': {
+ aliases: { '.kibana_7.11.0': {} },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ '.kibana_7.12.0_001': {
+ aliases: {
+ '.kibana': {},
+ '.kibana_7.12.0': {},
+ },
+ mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
+ settings: {},
+ },
+ });
+ const newState = model(markVersionIndexConflictState, res) as FatalState;
+ expect(newState.controlState).toEqual('FATAL');
+ expect(newState.reason).toMatchInlineSnapshot(
+ `"Multiple versions of Kibana are attempting a migration in parallel. Another Kibana instance on version 7.12.0 completed this migration (this instance is running 7.11.0). Ensure that all Kibana instances are running on same version and try again."`
+ );
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
+ });
+ });
+ describe('createInitialState', () => {
+ it('creates the initial state for the model based on the passed in paramaters', () => {
+ expect(
+ createInitialState({
+ kibanaVersion: '8.1.0',
+ targetMappings: {
+ dynamic: 'strict',
+ properties: { my_type: { properties: { title: { type: 'text' } } } },
+ },
+ migrationVersionPerType: {},
+ indexPrefix: '.kibana_task_manager',
+ })
+ ).toMatchInlineSnapshot(`
+ Object {
+ "controlState": "INIT",
+ "currentAlias": ".kibana_task_manager",
+ "indexPrefix": ".kibana_task_manager",
+ "kibanaVersion": "8.1.0",
+ "legacyIndex": ".kibana_task_manager",
+ "logs": Array [],
+ "outdatedDocumentsQuery": Object {
+ "bool": Object {
+ "should": Array [],
+ },
+ },
+ "preMigrationScript": Object {
+ "_tag": "None",
+ },
+ "retryCount": 0,
+ "retryDelay": 0,
+ "targetIndexMappings": Object {
+ "dynamic": "strict",
+ "properties": Object {
+ "my_type": Object {
+ "properties": Object {
+ "title": Object {
+ "type": "text",
+ },
+ },
+ },
+ },
+ },
+ "tempIndex": ".kibana_task_manager_8.1.0_reindex_temp",
+ "tempIndexMappings": Object {
+ "dynamic": false,
+ "properties": Object {
+ "migrationVersion": Object {
+ "dynamic": "true",
+ "type": "object",
+ },
+ "type": Object {
+ "type": "keyword",
+ },
+ },
+ },
+ "versionAlias": ".kibana_task_manager_8.1.0",
+ "versionIndex": ".kibana_task_manager_8.1.0_001",
+ }
+ `);
+ });
+ it('returns state with a preMigration script', () => {
+ const preMigrationScript = "ctx._id = ctx._source.type + ':' + ctx._id";
+ const initialState = createInitialState({
+ kibanaVersion: '8.1.0',
+ targetMappings: {
+ dynamic: 'strict',
+ properties: { my_type: { properties: { title: { type: 'text' } } } },
+ },
+ preMigrationScript,
+ migrationVersionPerType: {},
+ indexPrefix: '.kibana_task_manager',
+ });
+
+ expect(Option.isSome(initialState.preMigrationScript)).toEqual(true);
+ expect((initialState.preMigrationScript as Option.Some).value).toEqual(
+ preMigrationScript
+ );
+ });
+ it('returns state without a preMigration script', () => {
+ expect(
+ Option.isNone(
+ createInitialState({
+ kibanaVersion: '8.1.0',
+ targetMappings: {
+ dynamic: 'strict',
+ properties: { my_type: { properties: { title: { type: 'text' } } } },
+ },
+ preMigrationScript: undefined,
+ migrationVersionPerType: {},
+ indexPrefix: '.kibana_task_manager',
+ }).preMigrationScript
+ )
+ ).toEqual(true);
+ });
+ it('returns state with an outdatedDocumentsQuery', () => {
+ expect(
+ createInitialState({
+ kibanaVersion: '8.1.0',
+ targetMappings: {
+ dynamic: 'strict',
+ properties: { my_type: { properties: { title: { type: 'text' } } } },
+ },
+ preMigrationScript: "ctx._id = ctx._source.type + ':' + ctx._id",
+ migrationVersionPerType: { my_dashboard: '7.10.1', my_viz: '8.0.0' },
+ indexPrefix: '.kibana_task_manager',
+ }).outdatedDocumentsQuery
+ ).toMatchInlineSnapshot(`
+ Object {
+ "bool": Object {
+ "should": Array [
+ Object {
+ "bool": Object {
+ "must": Object {
+ "term": Object {
+ "type": "my_dashboard",
+ },
+ },
+ "must_not": Object {
+ "term": Object {
+ "migrationVersion.my_dashboard": "7.10.1",
+ },
+ },
+ },
+ },
+ Object {
+ "bool": Object {
+ "must": Object {
+ "term": Object {
+ "type": "my_viz",
+ },
+ },
+ "must_not": Object {
+ "term": Object {
+ "migrationVersion.my_viz": "8.0.0",
+ },
+ },
+ },
+ },
+ ],
+ },
+ }
+ `);
+ });
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/model.ts b/src/core/server/saved_objects/migrationsv2/model.ts
new file mode 100644
index 0000000000000..5fb4c3e29538f
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/model.ts
@@ -0,0 +1,762 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import { gt, valid } from 'semver';
+import * as Either from 'fp-ts/lib/Either';
+import * as Option from 'fp-ts/lib/Option';
+import { cloneDeep } from 'lodash';
+import { AliasAction, FetchIndexResponse, RetryableEsClientError } from './actions';
+import { AllActionStates, InitState, State } from './types';
+import { IndexMapping } from '../mappings';
+import { ResponseType } from './next';
+import { SavedObjectsMigrationVersion } from '../types';
+import { disableUnknownTypeMappingFields } from '../migrations/core/migration_context';
+
+/**
+ * How many times to retry a failing step.
+ *
+ * Waiting for a task to complete will cause a failing step every time the
+ * wait_for_task action times out e.g. the following sequence has 3 retry
+ * attempts:
+ * LEGACY_REINDEX_WAIT_FOR_TASK (60s timeout) ->
+ * LEGACY_REINDEX_WAIT_FOR_TASK (2s delay, 60s timeout) ->
+ * LEGACY_REINDEX_WAIT_FOR_TASK (4s delay, 60s timeout) ->
+ * LEGACY_REINDEX_WAIT_FOR_TASK (success) -> ...
+ *
+ * This places an upper limit to how long we will wait for a task to complete.
+ * The duration of a step is the time it takes for the action to complete plus
+ * the exponential retry delay:
+ * max_task_runtime = 2+4+8+16+32+64*(MAX_RETRY_ATTEMPTS-5) + ACTION_DURATION*MAX_RETRY_ATTEMPTS
+ *
+ * For MAX_RETRY_ATTEMPTS=10, ACTION_DURATION=60
+ * max_task_runtime = 16.46 minutes
+ */
+const MAX_RETRY_ATTEMPTS = 10;
+
+/**
+ * A helper function/type for ensuring that all control state's are handled.
+ */
+function throwBadControlState(p: never): never;
+function throwBadControlState(controlState: any) {
+ throw new Error('Unexpected control state: ' + controlState);
+}
+
+/**
+ * A helper function/type for ensuring that all response types are handled.
+ */
+function throwBadResponse(state: State, p: never): never;
+function throwBadResponse(state: State, res: any): never {
+ throw new Error(
+ `${state.controlState} received unexpected action response: ` + JSON.stringify(res)
+ );
+}
+
+/**
+ * Merge the _meta.migrationMappingPropertyHashes mappings of an index with
+ * the given target mappings.
+ *
+ * @remarks Mapping updates are commutative (deeply merged) by Elasticsearch,
+ * except for the _meta key. The source index we're migrating from might
+ * contain documents created by a plugin that is disabled in the Kibana
+ * instance performing this migration. We merge the
+ * _meta.migrationMappingPropertyHashes mappings from the source index into
+ * the targetMappings to ensure that any `migrationPropertyHashes` for
+ * disabled plugins aren't lost.
+ *
+ * Right now we don't use these `migrationPropertyHashes` but it could be used
+ * in the future to detect if mappings were changed. If mappings weren't
+ * changed we don't need to reindex but can clone the index to save disk space.
+ *
+ * @param targetMappings
+ * @param indexMappings
+ */
+function mergeMigrationMappingPropertyHashes(
+ targetMappings: IndexMapping,
+ indexMappings: IndexMapping
+) {
+ return {
+ ...targetMappings,
+ _meta: {
+ migrationMappingPropertyHashes: {
+ ...indexMappings._meta?.migrationMappingPropertyHashes,
+ ...targetMappings._meta?.migrationMappingPropertyHashes,
+ },
+ },
+ };
+}
+
+function indexBelongsToLaterVersion(indexName: string, kibanaVersion: string): boolean {
+ const version = valid(indexVersion(indexName));
+ return version != null ? gt(version, kibanaVersion) : false;
+}
+/**
+ * Extracts the version number from a >= 7.11 index
+ * @param indexName A >= v7.11 index name
+ */
+function indexVersion(indexName?: string): string | undefined {
+ return (indexName?.match(/.+_(\d+\.\d+\.\d+)_\d+/) || [])[1];
+}
+
+/**
+ * Creates a record of alias -> index name pairs
+ */
+function getAliases(indices: FetchIndexResponse) {
+ return Object.keys(indices).reduce((acc, index) => {
+ Object.keys(indices[index].aliases || {}).forEach((alias) => {
+ // TODO throw if multiple .kibana aliases point to the same index?
+ acc[alias] = index;
+ });
+ return acc;
+ }, {} as Record);
+}
+
+const delayRetryState = (state: S, left: RetryableEsClientError): S => {
+ if (state.retryCount === MAX_RETRY_ATTEMPTS) {
+ return {
+ ...state,
+ controlState: 'FATAL',
+ reason: `Unable to complete the ${state.controlState} step after ${MAX_RETRY_ATTEMPTS} attempts, terminating.`,
+ };
+ } else {
+ const retryCount = state.retryCount + 1;
+ const retryDelay = 1000 * Math.min(Math.pow(2, retryCount), 64); // 2s, 4s, 8s, 16s, 32s, 64s, 64s, 64s ...
+
+ return {
+ ...state,
+ retryCount,
+ retryDelay,
+ logs: [
+ ...state.logs,
+ {
+ level: 'error',
+ message: `Action failed with '${
+ left.message
+ }'. Retrying attempt ${retryCount} out of ${MAX_RETRY_ATTEMPTS} in ${
+ retryDelay / 1000
+ } seconds.`,
+ },
+ ],
+ };
+ }
+};
+const resetRetryState = (state: S): S => {
+ return { ...state, ...{ retryCount: 0, retryDelay: 0 } };
+};
+
+export type ExcludeRetryableEsError = Exclude<
+ | Exclude<
+ Response,
+ Either.Either ? Response['left'] : never, never>
+ >
+ | Either.Either<
+ Exclude<
+ Response extends Either.Left ? Response['left'] : never,
+ RetryableEsClientError
+ >,
+ Response extends Either.Right ? Response['right'] : never
+ >,
+ Either.Left
+>;
+
+export const model = (currentState: State, resW: ResponseType): State => {
+ // The action response `resW` is weakly typed, the type includes all action
+ // responses. Each control state only triggers one action so each control
+ // state only has one action response type. This allows us to narrow the
+ // response type to only the action response associated with a specific
+ // control state using:
+ // `const res = resW as ResponseType;`
+
+ let stateP: State = cloneDeep(currentState);
+
+ // Handle retryable_es_client_errors. Other left values need to be handled
+ // by the control state specific code below.
+ if (Either.isLeft(resW) && resW.left.type === 'retryable_es_client_error') {
+ // Retry the same step after an exponentially increasing delay.
+ return delayRetryState(stateP, resW.left);
+ } else {
+ // If the action didn't fail with a retryable_es_client_error, reset the
+ // retry counter and retryDelay state
+ stateP = resetRetryState(stateP);
+ }
+
+ if (stateP.controlState === 'INIT') {
+ const res = resW as ExcludeRetryableEsError>;
+
+ if (Either.isRight(res)) {
+ const indices = res.right;
+ const aliases = getAliases(indices);
+
+ if (
+ // `.kibana` and the version specific aliases both exists and
+ // are pointing to the same index. This version's migration has already
+ // been completed.
+ aliases[stateP.currentAlias] != null &&
+ aliases[stateP.versionAlias] != null &&
+ aliases[stateP.currentAlias] === aliases[stateP.versionAlias]
+ ) {
+ return {
+ ...stateP,
+ // Skip to 'OUTDATED_DOCUMENTS_SEARCH' so that if a new plugin was
+ // installed / enabled we can transform any old documents and update
+ // the mappings for this plugin's types.
+ controlState: 'OUTDATED_DOCUMENTS_SEARCH',
+ // Source is a none because we didn't do any migration from a source
+ // index
+ sourceIndex: Option.none,
+ targetIndex: `${stateP.indexPrefix}_${stateP.kibanaVersion}_001`,
+ targetIndexMappings: disableUnknownTypeMappingFields(
+ stateP.targetIndexMappings,
+ indices[aliases[stateP.currentAlias]].mappings
+ ),
+ versionIndexReadyActions: Option.none,
+ };
+ } else if (
+ // `.kibana` is pointing to an index that belongs to a later
+ // version of Kibana .e.g. a 7.11.0 instance found the `.kibana` alias
+ // pointing to `.kibana_7.12.0_001`
+ indexBelongsToLaterVersion(aliases[stateP.currentAlias], stateP.kibanaVersion)
+ ) {
+ return {
+ ...stateP,
+ controlState: 'FATAL',
+ reason: `The ${
+ stateP.currentAlias
+ } alias is pointing to a newer version of Kibana: v${indexVersion(
+ aliases[stateP.currentAlias]
+ )}`,
+ };
+ } else if (
+ // If the `.kibana` alias exists
+ aliases[stateP.currentAlias] != null
+ ) {
+ // The source index is the index the `.kibana` alias points to
+ const source = aliases[stateP.currentAlias];
+ const target = stateP.versionIndex;
+ return {
+ ...stateP,
+ controlState: 'SET_SOURCE_WRITE_BLOCK',
+ sourceIndex: Option.some(source) as Option.Some,
+ targetIndex: target,
+ targetIndexMappings: mergeMigrationMappingPropertyHashes(
+ stateP.targetIndexMappings,
+ indices[source].mappings
+ ),
+ versionIndexReadyActions: Option.some([
+ { remove: { index: source, alias: stateP.currentAlias, must_exist: true } },
+ { add: { index: target, alias: stateP.currentAlias } },
+ { add: { index: target, alias: stateP.versionAlias } },
+ { remove_index: { index: stateP.tempIndex } },
+ ]),
+ };
+ } else if (indices[stateP.legacyIndex] != null) {
+ // Migrate from a legacy index
+
+ // If the user used default index names we can narrow the version
+ // number we use when creating a backup index. This is purely to help
+ // users more easily identify how "old" and index is so that they can
+ // decide if it's safe to delete these rollback backups. Because
+ // backups are kept for rollback, a version number is more useful than
+ // a date.
+ let legacyVersion = '';
+ if (stateP.indexPrefix === '.kibana') {
+ legacyVersion = 'pre6.5.0';
+ } else if (stateP.indexPrefix === '.kibana_task_manager') {
+ legacyVersion = 'pre7.4.0';
+ } else {
+ legacyVersion = 'pre' + stateP.kibanaVersion;
+ }
+
+ const legacyReindexTarget = `${stateP.indexPrefix}_${legacyVersion}_001`;
+
+ const target = stateP.versionIndex;
+ return {
+ ...stateP,
+ controlState: 'LEGACY_SET_WRITE_BLOCK',
+ sourceIndex: Option.some(legacyReindexTarget) as Option.Some,
+ targetIndex: target,
+ targetIndexMappings: mergeMigrationMappingPropertyHashes(
+ stateP.targetIndexMappings,
+ indices[stateP.legacyIndex].mappings
+ ),
+ legacyReindexTargetMappings: indices[stateP.legacyIndex].mappings,
+ legacyPreMigrationDoneActions: [
+ { remove_index: { index: stateP.legacyIndex } },
+ {
+ add: {
+ index: legacyReindexTarget,
+ alias: stateP.currentAlias,
+ },
+ },
+ ],
+ versionIndexReadyActions: Option.some([
+ {
+ remove: {
+ index: legacyReindexTarget,
+ alias: stateP.currentAlias,
+ must_exist: true,
+ },
+ },
+ { add: { index: target, alias: stateP.currentAlias } },
+ { add: { index: target, alias: stateP.versionAlias } },
+ { remove_index: { index: stateP.tempIndex } },
+ ]),
+ };
+ } else {
+ // This cluster doesn't have an existing Saved Object index, create a
+ // new version specific index.
+ const target = stateP.versionIndex;
+ return {
+ ...stateP,
+ controlState: 'CREATE_NEW_TARGET',
+ sourceIndex: Option.none as Option.None,
+ targetIndex: target,
+ versionIndexReadyActions: Option.some([
+ { add: { index: target, alias: stateP.currentAlias } },
+ { add: { index: target, alias: stateP.versionAlias } },
+ ]) as Option.Some,
+ };
+ }
+ } else {
+ return throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'LEGACY_SET_WRITE_BLOCK') {
+ const res = resW as ExcludeRetryableEsError>;
+ // If the write block is sucessfully in place
+ if (Either.isRight(res)) {
+ return { ...stateP, controlState: 'LEGACY_CREATE_REINDEX_TARGET' };
+ } else if (Either.isLeft(res)) {
+ // If the write block failed because the index doesn't exist, it means
+ // another instance already completed the legacy pre-migration. Proceed
+ // to the next step.
+ if (res.left.type === 'index_not_found_exception') {
+ return { ...stateP, controlState: 'LEGACY_CREATE_REINDEX_TARGET' };
+ } else {
+ // @ts-expect-error TS doesn't correctly narrow this type to never
+ return throwBadResponse(stateP, res);
+ }
+ } else {
+ return throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'LEGACY_CREATE_REINDEX_TARGET') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'LEGACY_REINDEX',
+ };
+ } else {
+ // If the createIndex action receives an 'resource_already_exists_exception'
+ // it will wait until the index status turns green so we don't have any
+ // left responses to handle here.
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'LEGACY_REINDEX') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'LEGACY_REINDEX_WAIT_FOR_TASK',
+ legacyReindexTaskId: res.right.taskId,
+ };
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'LEGACY_REINDEX_WAIT_FOR_TASK') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'LEGACY_DELETE',
+ };
+ } else {
+ const left = res.left;
+ if (
+ (left.type === 'index_not_found_exception' && left.index === stateP.legacyIndex) ||
+ left.type === 'target_index_had_write_block'
+ ) {
+ // index_not_found_exception for the LEGACY_REINDEX source index:
+ // another instance already complete the LEGACY_DELETE step.
+ //
+ // target_index_had_write_block: another instance already completed the
+ // SET_SOURCE_WRITE_BLOCK step.
+ //
+ // If we detect that another instance has already completed a step, we
+ // can technically skip ahead in the process until after the completed
+ // step. However, by not skipping ahead we limit branches in the
+ // control state progression and simplify the implementation.
+ return { ...stateP, controlState: 'LEGACY_DELETE' };
+ } else {
+ // We don't handle the following errors as the algorithm will never
+ // run into these during the LEGACY_REINDEX_WAIT_FOR_TASK step:
+ // - index_not_found_exception for the LEGACY_REINDEX target index
+ // - strict_dynamic_mapping_exception
+ throwBadResponse(stateP, left as never);
+ }
+ }
+ } else if (stateP.controlState === 'LEGACY_DELETE') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return { ...stateP, controlState: 'SET_SOURCE_WRITE_BLOCK' };
+ } else if (Either.isLeft(res)) {
+ const left = res.left;
+ if (
+ left.type === 'remove_index_not_a_concrete_index' ||
+ (left.type === 'index_not_found_exception' && left.index === stateP.legacyIndex)
+ ) {
+ // index_not_found_exception, another Kibana instance already
+ // deleted the legacy index
+ //
+ // remove_index_not_a_concrete_index, another Kibana instance already
+ // deleted the legacy index and created a .kibana alias
+ //
+ // If we detect that another instance has already completed a step, we
+ // can technically skip ahead in the process until after the completed
+ // step. However, by not skipping ahead we limit branches in the
+ // control state progression and simplify the implementation.
+ return { ...stateP, controlState: 'SET_SOURCE_WRITE_BLOCK' };
+ } else {
+ // We don't handle the following errors as the migration algorithm
+ // will never cause them to occur:
+ // - alias_not_found_exception we're not using must_exist
+ // - index_not_found_exception for source index into which we reindex
+ // the legacy index
+ throwBadResponse(stateP, left as never);
+ }
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'SET_SOURCE_WRITE_BLOCK') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ // If the write block is successfully in place, proceed to the next step.
+ return {
+ ...stateP,
+ controlState: 'CREATE_REINDEX_TEMP',
+ };
+ } else {
+ // We don't handle the following errors as the migration algorithm
+ // will never cause them to occur:
+ // - index_not_found_exception
+ return throwBadResponse(stateP, res as never);
+ }
+ } else if (stateP.controlState === 'CREATE_REINDEX_TEMP') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return { ...stateP, controlState: 'REINDEX_SOURCE_TO_TEMP' };
+ } else {
+ // If the createIndex action receives an 'resource_already_exists_exception'
+ // it will wait until the index status turns green so we don't have any
+ // left responses to handle here.
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'REINDEX_SOURCE_TO_TEMP') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK',
+ reindexSourceToTargetTaskId: res.right.taskId,
+ };
+ } else {
+ // Since this is a background task, the request should always succeed,
+ // errors only show up in the returned task.
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'SET_TEMP_WRITE_BLOCK',
+ };
+ } else {
+ const left = res.left;
+ if (
+ left.type === 'target_index_had_write_block' ||
+ (left.type === 'index_not_found_exception' && left.index === stateP.tempIndex)
+ ) {
+ // index_not_found_exception:
+ // another instance completed the MARK_VERSION_INDEX_READY and
+ // removed the temp index.
+ // target_index_had_write_block
+ // another instance completed the SET_TEMP_WRITE_BLOCK step adding a
+ // write block to the temp index.
+ //
+ // For simplicity we continue linearly through the next steps even if
+ // we know another instance already completed these.
+ return {
+ ...stateP,
+ controlState: 'SET_TEMP_WRITE_BLOCK',
+ };
+ } else {
+ // Don't handle incompatible_mapping_exception as we will never add a write
+ // block to the temp index or change the mappings.
+ throwBadResponse(stateP, left as never);
+ }
+ }
+ } else if (stateP.controlState === 'SET_TEMP_WRITE_BLOCK') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'CLONE_TEMP_TO_TARGET',
+ };
+ } else {
+ const left = res.left;
+ if (left.type === 'index_not_found_exception') {
+ // index_not_found_exception:
+ // another instance completed the MARK_VERSION_INDEX_READY and
+ // removed the temp index.
+ //
+ // For simplicity we continue linearly through the next steps even if
+ // we know another instance already completed these.
+ return {
+ ...stateP,
+ controlState: 'CLONE_TEMP_TO_TARGET',
+ };
+ } else {
+ // @ts-expect-error TS doesn't correctly narrow this to never
+ throwBadResponse(stateP, left);
+ }
+ }
+ } else if (stateP.controlState === 'CLONE_TEMP_TO_TARGET') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'OUTDATED_DOCUMENTS_SEARCH',
+ };
+ } else {
+ const left = res.left;
+ if (left.type === 'index_not_found_exception') {
+ // index_not_found_exception means another instance alread completed
+ // the MARK_VERSION_INDEX_READY step and removed the temp index
+ // We still perform the OUTDATED_DOCUMENTS_* and
+ // UPDATE_TARGET_MAPPINGS steps since we might have plugins enabled
+ // which the other instances don't.
+ return {
+ ...stateP,
+ controlState: 'OUTDATED_DOCUMENTS_SEARCH',
+ };
+ }
+ throwBadResponse(stateP, res as never);
+ }
+ } else if (stateP.controlState === 'OUTDATED_DOCUMENTS_SEARCH') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ // If outdated documents were found, transform them
+ if (res.right.outdatedDocuments.length > 0) {
+ return {
+ ...stateP,
+ controlState: 'OUTDATED_DOCUMENTS_TRANSFORM',
+ outdatedDocuments: res.right.outdatedDocuments,
+ };
+ } else {
+ // If there are no more results we have transformed all outdated
+ // documents and can proceed to the next step
+ return {
+ ...stateP,
+ controlState: 'UPDATE_TARGET_MAPPINGS',
+ };
+ }
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'OUTDATED_DOCUMENTS_TRANSFORM') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'OUTDATED_DOCUMENTS_SEARCH',
+ };
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'UPDATE_TARGET_MAPPINGS') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK',
+ updateTargetMappingsTaskId: res.right.taskId,
+ };
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ if (Option.isSome(stateP.versionIndexReadyActions)) {
+ // If there are some versionIndexReadyActions we performed a full
+ // migration and need to point the aliases to our newly migrated
+ // index.
+ return {
+ ...stateP,
+ controlState: 'MARK_VERSION_INDEX_READY',
+ versionIndexReadyActions: stateP.versionIndexReadyActions,
+ };
+ } else {
+ // If there are none versionIndexReadyActions another instance
+ // already completed this migration and we only transformed outdated
+ // documents and updated the mappings for incase a new plugin was
+ // enabled.
+ return {
+ ...stateP,
+ controlState: 'DONE',
+ };
+ }
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'CREATE_NEW_TARGET') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return {
+ ...stateP,
+ controlState: 'MARK_VERSION_INDEX_READY',
+ };
+ } else {
+ // If the createIndex action receives an 'resource_already_exists_exception'
+ // it will wait until the index status turns green so we don't have any
+ // left responses to handle here.
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'MARK_VERSION_INDEX_READY') {
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ return { ...stateP, controlState: 'DONE' };
+ } else {
+ const left = res.left;
+ if (left.type === 'alias_not_found_exception') {
+ // the versionIndexReadyActions checks that the currentAlias is still
+ // pointing to the source index. If this fails with an
+ // alias_not_found_exception another instance has completed a
+ // migration from the same source.
+ return { ...stateP, controlState: 'MARK_VERSION_INDEX_READY_CONFLICT' };
+ } else if (
+ left.type === 'remove_index_not_a_concrete_index' ||
+ left.type === 'index_not_found_exception'
+ ) {
+ // We don't handle these errors as the migration algorithm will never
+ // cause them to occur (these are only relevant to the LEGACY_DELETE
+ // step).
+ throwBadResponse(stateP, left as never);
+ } else {
+ throwBadResponse(stateP, left);
+ }
+ }
+ } else if (stateP.controlState === 'MARK_VERSION_INDEX_READY_CONFLICT') {
+ // If another instance completed a migration from the same source we need
+ // to check that the completed migration was performed by a Kibana that's
+ // on the same version as this instance.
+ const res = resW as ExcludeRetryableEsError>;
+ if (Either.isRight(res)) {
+ const indices = res.right;
+ const aliases = getAliases(indices);
+ if (
+ aliases[stateP.currentAlias] != null &&
+ aliases[stateP.versionAlias] != null &&
+ aliases[stateP.currentAlias] === aliases[stateP.versionAlias]
+ ) {
+ // If the current and version aliases are pointing to the same index
+ // the migration was completed by another instance on the same version
+ // and it's safe to start serving traffic.
+ return { ...stateP, controlState: 'DONE' };
+ } else {
+ // Fail the migration, the instance that completed the migration is
+ // running a different version of Kibana. This avoids a situation where
+ // we loose acknowledged writes because two versions are both
+ // accepting writes, but are writing into difference indices.
+ const conflictingKibanaVersion =
+ indexVersion(aliases[stateP.currentAlias]) ?? aliases[stateP.currentAlias];
+ return {
+ ...stateP,
+ controlState: 'FATAL',
+ reason: `Multiple versions of Kibana are attempting a migration in parallel. Another Kibana instance on version ${conflictingKibanaVersion} completed this migration (this instance is running ${stateP.kibanaVersion}). Ensure that all Kibana instances are running on same version and try again.`,
+ };
+ }
+ } else {
+ throwBadResponse(stateP, res);
+ }
+ } else if (stateP.controlState === 'DONE' || stateP.controlState === 'FATAL') {
+ // The state-action machine will never call the model in the terminating states
+ throwBadControlState(stateP as never);
+ } else {
+ return throwBadControlState(stateP);
+ }
+};
+
+/**
+ * Construct the initial state for the model
+ */
+export const createInitialState = ({
+ kibanaVersion,
+ targetMappings,
+ preMigrationScript,
+ migrationVersionPerType,
+ indexPrefix,
+}: {
+ kibanaVersion: string;
+ targetMappings: IndexMapping;
+ preMigrationScript?: string;
+ migrationVersionPerType: SavedObjectsMigrationVersion;
+ indexPrefix: string;
+}): InitState => {
+ const outdatedDocumentsQuery = {
+ bool: {
+ should: Object.entries(migrationVersionPerType).map(([type, latestVersion]) => ({
+ bool: {
+ must: { term: { type } },
+ must_not: { term: { [`migrationVersion.${type}`]: latestVersion } },
+ },
+ })),
+ },
+ };
+
+ const reindexTargetMappings: IndexMapping = {
+ // @ts-expect-error we don't allow plugins to set `dynamic`
+ dynamic: false,
+ properties: {
+ type: { type: 'keyword' },
+ migrationVersion: {
+ // @ts-expect-error we don't allow plugins to set `dynamic`
+ dynamic: 'true',
+ type: 'object',
+ },
+ },
+ };
+
+ const initialState: InitState = {
+ controlState: 'INIT',
+ indexPrefix,
+ legacyIndex: indexPrefix,
+ currentAlias: indexPrefix,
+ versionAlias: `${indexPrefix}_${kibanaVersion}`,
+ versionIndex: `${indexPrefix}_${kibanaVersion}_001`,
+ tempIndex: `${indexPrefix}_${kibanaVersion}_reindex_temp`,
+ kibanaVersion,
+ preMigrationScript: Option.fromNullable(preMigrationScript),
+ targetIndexMappings: targetMappings,
+ tempIndexMappings: reindexTargetMappings,
+ outdatedDocumentsQuery,
+ retryCount: 0,
+ retryDelay: 0,
+ logs: [],
+ };
+ return initialState;
+};
diff --git a/src/core/server/saved_objects/migrationsv2/next.test.ts b/src/core/server/saved_objects/migrationsv2/next.test.ts
new file mode 100644
index 0000000000000..172679faf35c5
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/next.test.ts
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { ElasticsearchClient } from '../../elasticsearch';
+import { next } from './next';
+import { State } from './types';
+
+describe('migrations v2 next', () => {
+ it.todo('when state.retryDelay > 0 delays execution of the next action');
+ it('DONE returns null', () => {
+ const state = { controlState: 'DONE' } as State;
+ const action = next({} as ElasticsearchClient, (() => {}) as any)(state);
+ expect(action).toEqual(null);
+ });
+ it('FATAL returns null', () => {
+ const state = { controlState: 'FATAL', reason: '' } as State;
+ const action = next({} as ElasticsearchClient, (() => {}) as any)(state);
+ expect(action).toEqual(null);
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/next.ts b/src/core/server/saved_objects/migrationsv2/next.ts
new file mode 100644
index 0000000000000..c5cbf654c3464
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/next.ts
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as TaskEither from 'fp-ts/lib/TaskEither';
+import * as Option from 'fp-ts/lib/Option';
+import { UnwrapPromise } from '@kbn/utility-types';
+import { pipe } from 'fp-ts/lib/pipeable';
+import {
+ AllActionStates,
+ ReindexSourceToTempState,
+ MarkVersionIndexReady,
+ InitState,
+ LegacyCreateReindexTargetState,
+ LegacyDeleteState,
+ LegacyReindexState,
+ LegacyReindexWaitForTaskState,
+ LegacySetWriteBlockState,
+ OutdatedDocumentsSearch,
+ OutdatedDocumentsTransform,
+ SetSourceWriteBlockState,
+ State,
+ UpdateTargetMappingsState,
+ UpdateTargetMappingsWaitForTaskState,
+ CreateReindexTempState,
+ ReindexSourceToTempWaitForTaskState,
+ MarkVersionIndexReadyConflict,
+ CreateNewTargetState,
+ CloneTempToSource,
+ SetTempWriteBlock,
+} from './types';
+import * as Actions from './actions';
+import { ElasticsearchClient } from '../../elasticsearch';
+import { SavedObjectsRawDoc } from '..';
+
+export type TransformRawDocs = (rawDocs: SavedObjectsRawDoc[]) => Promise;
+type ActionMap = ReturnType;
+
+/**
+ * The response type of the provided control state's action.
+ *
+ * E.g. given 'INIT', provides the response type of the action triggered by
+ * `next` in the 'INIT' control state.
+ */
+export type ResponseType = UnwrapPromise<
+ ReturnType>
+>;
+
+export const nextActionMap = (client: ElasticsearchClient, transformRawDocs: TransformRawDocs) => {
+ return {
+ INIT: (state: InitState) =>
+ Actions.fetchIndices(client, [state.currentAlias, state.versionAlias]),
+ SET_SOURCE_WRITE_BLOCK: (state: SetSourceWriteBlockState) =>
+ Actions.setWriteBlock(client, state.sourceIndex.value),
+ CREATE_NEW_TARGET: (state: CreateNewTargetState) =>
+ Actions.createIndex(client, state.targetIndex, state.targetIndexMappings),
+ CREATE_REINDEX_TEMP: (state: CreateReindexTempState) =>
+ Actions.createIndex(client, state.tempIndex, state.tempIndexMappings),
+ REINDEX_SOURCE_TO_TEMP: (state: ReindexSourceToTempState) =>
+ Actions.reindex(client, state.sourceIndex.value, state.tempIndex, Option.none, false),
+ SET_TEMP_WRITE_BLOCK: (state: SetTempWriteBlock) =>
+ Actions.setWriteBlock(client, state.tempIndex),
+ REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK: (state: ReindexSourceToTempWaitForTaskState) =>
+ Actions.waitForReindexTask(client, state.reindexSourceToTargetTaskId, '60s'),
+ CLONE_TEMP_TO_TARGET: (state: CloneTempToSource) =>
+ Actions.cloneIndex(client, state.tempIndex, state.targetIndex),
+ UPDATE_TARGET_MAPPINGS: (state: UpdateTargetMappingsState) =>
+ Actions.updateAndPickupMappings(client, state.targetIndex, state.targetIndexMappings),
+ UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK: (state: UpdateTargetMappingsWaitForTaskState) =>
+ Actions.waitForPickupUpdatedMappingsTask(client, state.updateTargetMappingsTaskId, '60s'),
+ OUTDATED_DOCUMENTS_SEARCH: (state: OutdatedDocumentsSearch) =>
+ Actions.searchForOutdatedDocuments(client, state.targetIndex, state.outdatedDocumentsQuery),
+ OUTDATED_DOCUMENTS_TRANSFORM: (state: OutdatedDocumentsTransform) =>
+ pipe(
+ TaskEither.tryCatch(
+ () => transformRawDocs(state.outdatedDocuments),
+ (e) => {
+ throw e;
+ }
+ ),
+ TaskEither.chain((docs) =>
+ Actions.bulkOverwriteTransformedDocuments(client, state.targetIndex, docs)
+ )
+ ),
+ MARK_VERSION_INDEX_READY: (state: MarkVersionIndexReady) =>
+ Actions.updateAliases(client, state.versionIndexReadyActions.value),
+ MARK_VERSION_INDEX_READY_CONFLICT: (state: MarkVersionIndexReadyConflict) =>
+ Actions.fetchIndices(client, [state.currentAlias, state.versionAlias]),
+ LEGACY_SET_WRITE_BLOCK: (state: LegacySetWriteBlockState) =>
+ Actions.setWriteBlock(client, state.legacyIndex),
+ LEGACY_CREATE_REINDEX_TARGET: (state: LegacyCreateReindexTargetState) =>
+ Actions.createIndex(client, state.sourceIndex.value, state.legacyReindexTargetMappings),
+ LEGACY_REINDEX: (state: LegacyReindexState) =>
+ Actions.reindex(
+ client,
+ state.legacyIndex,
+ state.sourceIndex.value,
+ state.preMigrationScript,
+ false
+ ),
+ LEGACY_REINDEX_WAIT_FOR_TASK: (state: LegacyReindexWaitForTaskState) =>
+ Actions.waitForReindexTask(client, state.legacyReindexTaskId, '60s'),
+ LEGACY_DELETE: (state: LegacyDeleteState) =>
+ Actions.updateAliases(client, state.legacyPreMigrationDoneActions),
+ };
+};
+
+export const next = (client: ElasticsearchClient, transformRawDocs: TransformRawDocs) => {
+ const map = nextActionMap(client, transformRawDocs);
+ return (state: State) => {
+ const delay = any>(fn: F): (() => ReturnType) => {
+ return () => {
+ return state.retryDelay > 0
+ ? new Promise((resolve) => setTimeout(resolve, state.retryDelay)).then(fn)
+ : fn();
+ };
+ };
+
+ if (state.controlState === 'DONE' || state.controlState === 'FATAL') {
+ // Return null if we're in one of the terminating states
+ return null;
+ } else {
+ // Otherwise return the delayed action
+ // We use an explicit cast as otherwise TS infers `(state: never) => ...`
+ // here because state is inferred to be the intersection of all states
+ // instead of the union.
+ const nextAction = map[state.controlState] as (
+ state: State
+ ) => ReturnType;
+ return delay(nextAction(state));
+ }
+ };
+};
diff --git a/src/core/server/saved_objects/migrationsv2/state_action_machine.test.ts b/src/core/server/saved_objects/migrationsv2/state_action_machine.test.ts
new file mode 100644
index 0000000000000..15dde10eb21ec
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/state_action_machine.test.ts
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { stateActionMachine } from './state_action_machine';
+import * as E from 'fp-ts/lib/Either';
+
+describe('state action machine', () => {
+ const state = { controlState: 'INIT', count: 1 };
+
+ const next = jest.fn((s: typeof state) => {
+ if (s.controlState === 'INIT') return () => Promise.resolve(E.right('response'));
+ if (s.controlState === 'DONE') return null;
+ else throw new Error('Invalid control state');
+ });
+
+ const countUntilModel = (maxCount: number) =>
+ jest.fn((s: typeof state, res: E.Either) => {
+ if (s.count === maxCount) {
+ return { controlState: 'DONE', count: s.count };
+ } else {
+ return { controlState: s.controlState, count: s.count + 1 };
+ }
+ });
+
+ const countUntilThree = countUntilModel(3);
+ const finalStateP = stateActionMachine(state, next, countUntilThree);
+
+ test('await the next action and passes the response to the model with the updated state from the previous step', () => {
+ expect(countUntilThree.mock.calls).toMatchInlineSnapshot(`
+ Array [
+ Array [
+ Object {
+ "controlState": "INIT",
+ "count": 1,
+ },
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ Object {
+ "controlState": "INIT",
+ "count": 2,
+ },
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ Array [
+ Object {
+ "controlState": "INIT",
+ "count": 3,
+ },
+ Object {
+ "_tag": "Right",
+ "right": "response",
+ },
+ ],
+ ]
+ `);
+ });
+
+ test('calls next for each step until next returns null', () => {
+ expect(next).toHaveBeenCalledTimes(4);
+ expect(next.mock.results[3]).toMatchObject({
+ type: 'return',
+ value: null,
+ });
+ });
+
+ test('rejects if an exception is throw from inside an action', () => {
+ return expect(
+ stateActionMachine({ ...state, controlState: 'THROW' }, next, countUntilThree)
+ ).rejects.toThrowErrorMatchingInlineSnapshot(`"Invalid control state"`);
+ });
+
+ test('resolve with the final state once all steps are completed', () => {
+ return expect(finalStateP).resolves.toMatchInlineSnapshot(`
+ Object {
+ "controlState": "DONE",
+ "count": 3,
+ }
+ `);
+ });
+
+ test("rejects if control state doesn't change after 50 steps", () => {
+ return expect(
+ stateActionMachine(state, next, countUntilModel(51))
+ ).rejects.toThrowErrorMatchingInlineSnapshot(
+ `"Control state didn't change after 50 steps aborting."`
+ );
+ });
+});
diff --git a/src/core/server/saved_objects/migrationsv2/state_action_machine.ts b/src/core/server/saved_objects/migrationsv2/state_action_machine.ts
new file mode 100644
index 0000000000000..6c681401c522b
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/state_action_machine.ts
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+export interface ControlState {
+ controlState: string;
+}
+
+const MAX_STEPS_WITHOUT_CONTROL_STATE_CHANGE = 50;
+
+/**
+ * A state-action machine next function that returns the next action thunk
+ * based on the passed in state.
+ */
+export type Next = (state: S) => (() => Promise) | null;
+
+/**
+ * A state-action machine model that given the current state and an action
+ * response returns the state for the next step.
+ */
+export type Model = (state: S, res: any) => S;
+
+/**
+ * A state-action machine for performing Saved Object Migrations.
+ *
+ * Based on https://www.microsoft.com/en-us/research/uploads/prod/2016/12/Computation-and-State-Machines.pdf
+ *
+ * The state-action machine defines it's behaviour in steps. Each step is a
+ * transition from a state s_i to the state s_i+1 caused by an action a_i.
+ *
+ * s_i -> a_i -> s_i+1
+ * s_i+1 -> a_i+1 -> s_i+2
+ *
+ * Given a state s1, `next(s1)` returns the next action to execute. Actions are
+ * asynchronous, once the action resolves, we can use the action response to
+ * determine the next state to transition to as defined by the function
+ * `model(state, response)`.
+ *
+ * We can then loosely define a step as:
+ * s_i+1 = model(s_i, await next(s_i)())
+ *
+ * When there are no more actions returned by `next` the state-action machine
+ * terminates.
+ *
+ * @param initialState The initial state with which to start the state action
+ * machine
+ * @param next A function which given the current state, returns a thunk which
+ * is the next action to perform. If next returns null the state action machine
+ * terminates.
+ * @param model A function which given the current state and the response of
+ * the action thunk, returns a new state
+ * @param onStepComplete A callback functions which is called after every
+ * completed step
+ */
+export async function stateActionMachine(
+ initialState: S,
+ // It would be nice to use generics to enforce that model should accept all
+ // the types of responses that actions could return. But seems to be
+ // impossible because of https://github.com/microsoft/TypeScript/issues/13995#issuecomment-477978591
+ next: Next,
+ model: Model
+) {
+ let state = initialState;
+ let controlStateStepCounter = 0;
+ let nextAction = next(state);
+
+ while (nextAction != null) {
+ // Perform the action that triggers the next step
+ const actionResponse = await nextAction();
+ const newState = model(state, actionResponse);
+
+ controlStateStepCounter =
+ newState.controlState === state.controlState ? controlStateStepCounter + 1 : 0;
+ if (controlStateStepCounter >= MAX_STEPS_WITHOUT_CONTROL_STATE_CHANGE) {
+ // This is just a fail-safe to ensure we don't get stuck in an infinite loop
+ throw new Error(
+ `Control state didn't change after ${MAX_STEPS_WITHOUT_CONTROL_STATE_CHANGE} steps aborting.`
+ );
+ }
+
+ // Get ready for the next step
+ state = newState;
+ nextAction = next(state);
+ }
+
+ return state;
+}
diff --git a/src/core/server/saved_objects/migrationsv2/types.ts b/src/core/server/saved_objects/migrationsv2/types.ts
new file mode 100644
index 0000000000000..84146911fed9b
--- /dev/null
+++ b/src/core/server/saved_objects/migrationsv2/types.ts
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import * as Option from 'fp-ts/lib/Option';
+import { ControlState } from './state_action_machine';
+import { AliasAction } from './actions';
+import { IndexMapping } from '../mappings';
+import { SavedObjectsRawDoc } from '..';
+
+export interface BaseState extends ControlState {
+ /** The first part of the index name such as `.kibana` or `.kibana_task_manager` */
+ readonly indexPrefix: string;
+ /**
+ * The name of the concrete legacy index (if it exists) e.g. `.kibana` for <
+ * 6.5 or `.kibana_task_manager` for < 7.4
+ */
+ readonly legacyIndex: string;
+ /** Kibana version number */
+ readonly kibanaVersion: string;
+ /** The mappings to apply to the target index */
+ readonly targetIndexMappings: IndexMapping;
+ /**
+ * Special mappings set when creating the temp index into which we reindex.
+ * These mappings have `dynamic: false` to allow for any kind of outdated
+ * document to be written to the index, but still define mappings for the
+ * `migrationVersion` and `type` fields so that we can search for and
+ * transform outdated documents.
+ */
+ readonly tempIndexMappings: IndexMapping;
+ /** Script to apply to a legacy index before it can be used as a migration source */
+ readonly preMigrationScript: Option.Option;
+ readonly outdatedDocumentsQuery: Record;
+ readonly retryCount: number;
+ readonly retryDelay: number;
+ readonly logs: Array<{ level: 'error' | 'info'; message: string }>;
+ /**
+ * The current alias e.g. `.kibana` which always points to the latest
+ * version index
+ */
+ readonly currentAlias: string;
+ /**
+ * The version alias e.g. `.kibana_7.11.0` which points to the index used
+ * by this version of Kibana e.g. `.kibana_7.11.0_001`
+ */
+ readonly versionAlias: string;
+ /**
+ * The index used by this version of Kibana e.g. `.kibana_7.11.0_001`
+ */
+ readonly versionIndex: string;
+ /**
+ * An alias on the target index used as part of an "reindex block" that
+ * prevents lost deletes e.g. `.kibana_7.11.0_reindex`.
+ */
+ readonly tempIndex: string;
+}
+
+export type InitState = BaseState & {
+ readonly controlState: 'INIT';
+};
+
+export type PostInitState = BaseState & {
+ /**
+ * The source index is the index from which the migration reads. If the
+ * Option is a none, we didn't do any migration from a source index, either:
+ * - this is a blank ES cluster and we will perform the CREATE_NEW_TARGET
+ * step
+ * - another Kibana instance already did the source migration and finished
+ * the MARK_VERSION_INDEX_READY step
+ */
+ readonly sourceIndex: Option.Option;
+ /** The target index is the index to which the migration writes */
+ readonly targetIndex: string;
+ readonly versionIndexReadyActions: Option.Option;
+ readonly outdatedDocumentsQuery: Record;
+};
+
+export type DoneState = PostInitState & {
+ /** Migration completed successfully */
+ readonly controlState: 'DONE';
+};
+
+export type FatalState = BaseState & {
+ /** Migration terminated with a failure */
+ readonly controlState: 'FATAL';
+ /** The reason the migration was terminated */
+ readonly reason: string;
+};
+
+export type SetSourceWriteBlockState = PostInitState & {
+ /** Set a write block on the source index to prevent any further writes */
+ readonly controlState: 'SET_SOURCE_WRITE_BLOCK';
+ readonly sourceIndex: Option.Some;
+};
+
+export type CreateNewTargetState = PostInitState & {
+ /** Blank ES cluster, create a new version-specific target index */
+ readonly controlState: 'CREATE_NEW_TARGET';
+ readonly sourceIndex: Option.None;
+ readonly versionIndexReadyActions: Option.Some;
+};
+
+export type CreateReindexTempState = PostInitState & {
+ /**
+ * Create a target index with mappings from the source index and registered
+ * plugins
+ */
+ readonly controlState: 'CREATE_REINDEX_TEMP';
+ readonly sourceIndex: Option.Some;
+};
+
+export type ReindexSourceToTempState = PostInitState & {
+ /** Reindex documents from the source index into the target index */
+ readonly controlState: 'REINDEX_SOURCE_TO_TEMP';
+ readonly sourceIndex: Option.Some;
+};
+
+export type ReindexSourceToTempWaitForTaskState = PostInitState & {
+ /**
+ * Wait until reindexing documents from the source index into the target
+ * index has completed
+ */
+ readonly controlState: 'REINDEX_SOURCE_TO_TEMP_WAIT_FOR_TASK';
+ readonly sourceIndex: Option.Some;
+ readonly reindexSourceToTargetTaskId: string;
+};
+
+export type SetTempWriteBlock = PostInitState & {
+ /**
+ *
+ */
+ readonly controlState: 'SET_TEMP_WRITE_BLOCK';
+ readonly sourceIndex: Option.Some;
+};
+
+export type CloneTempToSource = PostInitState & {
+ /**
+ * Clone the temporary reindex index into
+ */
+ readonly controlState: 'CLONE_TEMP_TO_TARGET';
+ readonly sourceIndex: Option.Some;
+};
+
+export type UpdateTargetMappingsState = PostInitState & {
+ /** Update the mappings of the target index */
+ readonly controlState: 'UPDATE_TARGET_MAPPINGS';
+};
+
+export type UpdateTargetMappingsWaitForTaskState = PostInitState & {
+ /** Update the mappings of the target index */
+ readonly controlState: 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK';
+ readonly updateTargetMappingsTaskId: string;
+};
+
+export type OutdatedDocumentsSearch = PostInitState & {
+ /** Search for outdated documents in the target index */
+ readonly controlState: 'OUTDATED_DOCUMENTS_SEARCH';
+};
+
+export type OutdatedDocumentsTransform = PostInitState & {
+ /** Transform a batch of outdated documents to their latest version and write them to the target index */
+ readonly controlState: 'OUTDATED_DOCUMENTS_TRANSFORM';
+ readonly outdatedDocuments: SavedObjectsRawDoc[];
+};
+
+export type MarkVersionIndexReady = PostInitState & {
+ /**
+ * Marks the version-specific index as ready. Once this step is complete,
+ * future Kibana instances will not have to prepare a target index by e.g.
+ * cloning a source index or creating a new index.
+ *
+ * To account for newly installed or enabled plugins, Kibana will still
+ * perform the `UPDATE_TARGET_MAPPINGS*` and `OUTDATED_DOCUMENTS_*` steps
+ * every time it is restarted.
+ */
+ readonly controlState: 'MARK_VERSION_INDEX_READY';
+ readonly versionIndexReadyActions: Option.Some;
+};
+
+export type MarkVersionIndexReadyConflict = PostInitState & {
+ /**
+ * If the MARK_VERSION_INDEX_READY step fails another instance was
+ * performing the migration in parallel and won the race to marking the
+ * migration as complete. This step ensures that the instance that won the
+ * race is running the same version of Kibana, if it does, the migration is
+ * complete and we can go to DONE.
+ *
+ * If it was a different version of Kibana that completed the migration fail
+ * the migration by going to FATAL. If this instance restarts it will either
+ * notice that a newer version already completed the migration and refuse to
+ * start up, or if it was an older version that completed the migration
+ * start a new migration to the latest version.
+ */
+ readonly controlState: 'MARK_VERSION_INDEX_READY_CONFLICT';
+};
+
+/**
+ * If we're migrating from a legacy index we need to perform some additional
+ * steps to prepare this index so that it can be used as a migration 'source'.
+ */
+export type LegacyBaseState = PostInitState & {
+ readonly sourceIndex: Option.Some;
+ readonly legacyPreMigrationDoneActions: AliasAction[];
+ /**
+ * The mappings read from the legacy index, used to create a new reindex
+ * target index.
+ */
+ readonly legacyReindexTargetMappings: IndexMapping;
+};
+
+export type LegacySetWriteBlockState = LegacyBaseState & {
+ /** Set a write block on the legacy index to prevent any further writes */
+ readonly controlState: 'LEGACY_SET_WRITE_BLOCK';
+};
+
+export type LegacyCreateReindexTargetState = LegacyBaseState & {
+ /**
+ * Create a new index into which we can reindex the legacy index. This
+ * index will have the same mappings as the legacy index. Once the legacy
+ * pre-migration is complete, this index will be used a migration 'source'.
+ */
+ readonly controlState: 'LEGACY_CREATE_REINDEX_TARGET';
+};
+
+export type LegacyReindexState = LegacyBaseState & {
+ /**
+ * Reindex the legacy index into the new index created in the
+ * LEGACY_CREATE_REINDEX_TARGET step (and apply the preMigration script).
+ */
+ readonly controlState: 'LEGACY_REINDEX';
+};
+
+export type LegacyReindexWaitForTaskState = LegacyBaseState & {
+ /** Wait for the reindex operation to complete */
+ readonly controlState: 'LEGACY_REINDEX_WAIT_FOR_TASK';
+ readonly legacyReindexTaskId: string;
+};
+
+export type LegacyDeleteState = LegacyBaseState & {
+ /**
+ * After reindexed has completed, delete the legacy index so that it won't
+ * conflict with the `currentAlias` that we want to create in a later step
+ * e.g. `.kibana`.
+ */
+ readonly controlState: 'LEGACY_DELETE';
+};
+
+export type State =
+ | FatalState
+ | InitState
+ | DoneState
+ | SetSourceWriteBlockState
+ | CreateNewTargetState
+ | CreateReindexTempState
+ | ReindexSourceToTempState
+ | ReindexSourceToTempWaitForTaskState
+ | SetTempWriteBlock
+ | CloneTempToSource
+ | UpdateTargetMappingsState
+ | UpdateTargetMappingsWaitForTaskState
+ | OutdatedDocumentsSearch
+ | OutdatedDocumentsTransform
+ | MarkVersionIndexReady
+ | MarkVersionIndexReadyConflict
+ | LegacyCreateReindexTargetState
+ | LegacySetWriteBlockState
+ | LegacyReindexState
+ | LegacyReindexWaitForTaskState
+ | LegacyDeleteState;
+
+export type AllControlStates = State['controlState'];
+/**
+ * All control states that trigger an action (excludes the terminal states
+ * 'FATAL' and 'DONE').
+ */
+export type AllActionStates = Exclude;
diff --git a/src/core/server/saved_objects/saved_objects_config.ts b/src/core/server/saved_objects/saved_objects_config.ts
index d292d021b4709..94f150cbaba90 100644
--- a/src/core/server/saved_objects/saved_objects_config.ts
+++ b/src/core/server/saved_objects/saved_objects_config.ts
@@ -28,6 +28,8 @@ export const savedObjectsMigrationConfig = {
scrollDuration: schema.string({ defaultValue: '15m' }),
pollInterval: schema.number({ defaultValue: 1500 }),
skip: schema.boolean({ defaultValue: false }),
+ // TODO migrationsV2: remove/deprecate once we release migrations v2
+ enableV2: schema.boolean({ defaultValue: false }),
}),
};
diff --git a/src/core/server/saved_objects/saved_objects_service.test.ts b/src/core/server/saved_objects/saved_objects_service.test.ts
index c90f564ce33d7..1eba61c7876d1 100644
--- a/src/core/server/saved_objects/saved_objects_service.test.ts
+++ b/src/core/server/saved_objects/saved_objects_service.test.ts
@@ -18,21 +18,18 @@
*/
import {
- KibanaMigratorMock,
migratorInstanceMock,
clientProviderInstanceMock,
typeRegistryInstanceMock,
} from './saved_objects_service.test.mocks';
import { BehaviorSubject } from 'rxjs';
import { ByteSizeValue } from '@kbn/config-schema';
-import { errors as esErrors } from '@elastic/elasticsearch';
import { SavedObjectsService } from './saved_objects_service';
import { mockCoreContext } from '../core_context.mock';
import { Env } from '../config';
import { configServiceMock } from '../mocks';
import { elasticsearchServiceMock } from '../elasticsearch/elasticsearch_service.mock';
-import { elasticsearchClientMock } from '../elasticsearch/client/mocks';
import { coreUsageDataServiceMock } from '../core_usage_data/core_usage_data_service.mock';
import { httpServiceMock } from '../http/http_service.mock';
import { httpServerMock } from '../http/http_server.mocks';
@@ -163,29 +160,6 @@ describe('SavedObjectsService', () => {
});
describe('#start()', () => {
- it('creates a KibanaMigrator which retries NoLivingConnectionsError errors from ES client', async () => {
- const coreContext = createCoreContext();
-
- const soService = new SavedObjectsService(coreContext);
- const coreSetup = createSetupDeps();
- const coreStart = createStartDeps();
-
- coreStart.elasticsearch.client.asInternalUser.indices.create = jest
- .fn()
- .mockImplementationOnce(() =>
- Promise.reject(new esErrors.NoLivingConnectionsError('reason', {} as any))
- )
- .mockImplementationOnce(() =>
- elasticsearchClientMock.createSuccessTransportRequestPromise('success')
- );
-
- await soService.setup(coreSetup);
- await soService.start(coreStart, 1);
-
- const response = await KibanaMigratorMock.mock.calls[0][0].client.indices.create();
- return expect(response.body).toBe('success');
- });
-
it('skips KibanaMigrator migrations when pluginsInitialized=false', async () => {
const coreContext = createCoreContext({ skipMigration: false });
const soService = new SavedObjectsService(coreContext);
diff --git a/src/core/server/saved_objects/saved_objects_service.ts b/src/core/server/saved_objects/saved_objects_service.ts
index 400d3157bd00d..d2e4d8c5cbb2d 100644
--- a/src/core/server/saved_objects/saved_objects_service.ts
+++ b/src/core/server/saved_objects/saved_objects_service.ts
@@ -30,7 +30,6 @@ import { CoreContext } from '../core_context';
import { CoreUsageDataSetup } from '../core_usage_data';
import {
ElasticsearchClient,
- IClusterClient,
InternalElasticsearchServiceSetup,
InternalElasticsearchServiceStart,
} from '../elasticsearch';
@@ -53,7 +52,6 @@ import { SavedObjectsSerializer } from './serialization';
import { registerRoutes } from './routes';
import { ServiceStatus } from '../status';
import { calculateStatus$ } from './status';
-import { createMigrationEsClient } from './migrations/core/';
/**
* Saved Objects is Kibana's data persistence mechanism allowing plugins to
* use Elasticsearch for storing and querying state. The SavedObjectsServiceSetup API exposes methods
@@ -365,7 +363,7 @@ export class SavedObjectsService
const migrator = this.createMigrator(
kibanaConfig,
this.config.migration,
- elasticsearch.client,
+ elasticsearch.client.asInternalUser,
migrationsRetryDelay
);
@@ -462,7 +460,7 @@ export class SavedObjectsService
private createMigrator(
kibanaConfig: KibanaConfigType,
savedObjectsConfig: SavedObjectsMigrationConfigType,
- client: IClusterClient,
+ client: ElasticsearchClient,
migrationsRetryDelay?: number
): IKibanaMigrator {
return new KibanaMigrator({
@@ -471,7 +469,8 @@ export class SavedObjectsService
kibanaVersion: this.coreContext.env.packageInfo.version,
savedObjectsConfig,
kibanaConfig,
- client: createMigrationEsClient(client.asInternalUser, this.logger, migrationsRetryDelay),
+ client,
+ migrationsRetryDelay,
});
}
}
diff --git a/src/core/server/saved_objects/serialization/types.ts b/src/core/server/saved_objects/serialization/types.ts
index e59b1a68e1ad1..1008fd95900c0 100644
--- a/src/core/server/saved_objects/serialization/types.ts
+++ b/src/core/server/saved_objects/serialization/types.ts
@@ -27,7 +27,6 @@ import { SavedObjectsMigrationVersion, SavedObjectReference } from '../types';
export interface SavedObjectsRawDoc {
_id: string;
_source: SavedObjectsRawDocSource;
- _type?: string;
_seq_no?: number;
_primary_term?: number;
}
diff --git a/src/core/server/saved_objects/service/lib/integration_tests/repository.test.ts b/src/core/server/saved_objects/service/lib/integration_tests/repository.test.ts
index 2f64776501df0..0957c2ed6cc28 100644
--- a/src/core/server/saved_objects/service/lib/integration_tests/repository.test.ts
+++ b/src/core/server/saved_objects/service/lib/integration_tests/repository.test.ts
@@ -17,7 +17,7 @@
* under the License.
*/
-import { InternalCoreStart } from 'src/core/server/internal_types';
+import { InternalCoreStart } from '../../../../internal_types';
import * as kbnTestServer from '../../../../../test_helpers/kbn_server';
import { Root } from '../../../../root';
diff --git a/src/core/server/saved_objects/service/lib/repository.test.js b/src/core/server/saved_objects/service/lib/repository.test.js
index a19b4cc01db8e..309a817d4d04f 100644
--- a/src/core/server/saved_objects/service/lib/repository.test.js
+++ b/src/core/server/saved_objects/service/lib/repository.test.js
@@ -25,6 +25,7 @@ import { SavedObjectsSerializer } from '../../serialization';
import { encodeHitVersion } from '../../version';
import { SavedObjectTypeRegistry } from '../../saved_objects_type_registry';
import { DocumentMigrator } from '../../migrations/core/document_migrator';
+import { mockKibanaMigrator } from '../../migrations/kibana/kibana_migrator.mock';
import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
import { esKuery } from '../../es_query';
const { nodeTypes } = esKuery;
@@ -215,10 +216,9 @@ describe('SavedObjectsRepository', () => {
beforeEach(() => {
client = elasticsearchClientMock.createElasticsearchClient();
- migrator = {
- migrateDocument: jest.fn().mockImplementation(documentMigrator.migrate),
- runMigrations: async () => ({ status: 'skipped' }),
- };
+ migrator = mockKibanaMigrator.create();
+ migrator.migrateDocument = jest.fn().mockImplementation(documentMigrator.migrate);
+ migrator.runMigrations = async () => ({ status: 'skipped' });
// create a mock serializer "shim" so we can track function calls, but use the real serializer's implementation
serializer = {
diff --git a/src/core/server/saved_objects/service/lib/repository.ts b/src/core/server/saved_objects/service/lib/repository.ts
index 587a0e51ef9b9..e74f2edcd5e33 100644
--- a/src/core/server/saved_objects/service/lib/repository.ts
+++ b/src/core/server/saved_objects/service/lib/repository.ts
@@ -1696,7 +1696,16 @@ export class SavedObjectsRepository {
* @param type - the type
*/
private getIndexForType(type: string) {
- return this._registry.getIndex(type) || this._index;
+ // TODO migrationsV2: Remove once we release migrations v2
+ // This is a hacky, but it required the least amount of changes to
+ // existing code to support a migrations v2 index. Long term we would
+ // want to always use the type registry to resolve a type's index
+ // (including the default index).
+ if (this._migrator.savedObjectsConfig.enableV2) {
+ return `${this._registry.getIndex(type) || this._index}_${this._migrator.kibanaVersion}`;
+ } else {
+ return this._registry.getIndex(type) || this._index;
+ }
}
/**
diff --git a/src/core/server/server.api.md b/src/core/server/server.api.md
index 1ab06b7912d1f..5f07a4b523056 100644
--- a/src/core/server/server.api.md
+++ b/src/core/server/server.api.md
@@ -103,7 +103,6 @@ import { IngestGetPipelineParams } from 'elasticsearch';
import { IngestPutPipelineParams } from 'elasticsearch';
import { IngestSimulateParams } from 'elasticsearch';
import { KibanaClient } from '@elastic/elasticsearch/api/kibana';
-import { KibanaConfigType } from 'src/core/server/kibana_config';
import { Logger } from '@kbn/logging';
import { LoggerFactory } from '@kbn/logging';
import { LogLevel } from '@kbn/logging';
@@ -2612,8 +2611,6 @@ export interface SavedObjectsRawDoc {
//
// (undocumented)
_source: SavedObjectsRawDocSource;
- // (undocumented)
- _type?: string;
}
// @public (undocumented)
@@ -2902,7 +2899,7 @@ export interface ShardsResponse {
// @public (undocumented)
export type SharedGlobalConfig = RecursiveReadonly<{
- kibana: Pick;
+ kibana: Pick;
elasticsearch: Pick;
path: Pick;
savedObjects: Pick;
diff --git a/src/plugins/data/public/public.api.md b/src/plugins/data/public/public.api.md
index 656034546d02f..f02e0cf443310 100644
--- a/src/plugins/data/public/public.api.md
+++ b/src/plugins/data/public/public.api.md
@@ -14,6 +14,7 @@ import { Assign } from '@kbn/utility-types';
import { BehaviorSubject } from 'rxjs';
import { BfetchPublicSetup } from 'src/plugins/bfetch/public';
import Boom from '@hapi/boom';
+import { ConfigDeprecationProvider } from '@kbn/config';
import { CoreSetup } from 'src/core/public';
import { CoreSetup as CoreSetup_2 } from 'kibana/public';
import { CoreStart } from 'kibana/public';
@@ -48,7 +49,6 @@ import { ISearchSource as ISearchSource_2 } from 'src/plugins/data/public';
import { IStorageWrapper } from 'src/plugins/kibana_utils/public';
import { IUiSettingsClient } from 'src/core/public';
import { KibanaClient } from '@elastic/elasticsearch/api/kibana';
-import { KibanaConfigType } from 'src/core/server/kibana_config';
import { Location } from 'history';
import { LocationDescriptorObject } from 'history';
import { Logger } from '@kbn/logging';
diff --git a/src/plugins/embeddable/public/public.api.md b/src/plugins/embeddable/public/public.api.md
index 4b7d60b4dc9ec..71e37695c5344 100644
--- a/src/plugins/embeddable/public/public.api.md
+++ b/src/plugins/embeddable/public/public.api.md
@@ -14,6 +14,7 @@ import { Assign } from '@kbn/utility-types';
import { BehaviorSubject } from 'rxjs';
import { BfetchPublicSetup } from 'src/plugins/bfetch/public';
import Boom from '@hapi/boom';
+import { ConfigDeprecationProvider } from '@kbn/config';
import { CoreSetup as CoreSetup_2 } from 'src/core/public';
import { CoreSetup as CoreSetup_3 } from 'kibana/public';
import { CoreStart as CoreStart_2 } from 'kibana/public';
@@ -42,7 +43,6 @@ import { ISearchSource } from 'src/plugins/data/public';
import { IStorageWrapper } from 'src/plugins/kibana_utils/public';
import { IUiSettingsClient as IUiSettingsClient_2 } from 'src/core/public';
import { KibanaClient } from '@elastic/elasticsearch/api/kibana';
-import { KibanaConfigType } from 'src/core/server/kibana_config';
import { Location } from 'history';
import { LocationDescriptorObject } from 'history';
import { Logger } from '@kbn/logging';
diff --git a/x-pack/plugins/maps/server/saved_objects/map.ts b/x-pack/plugins/maps/server/saved_objects/map.ts
index ce9d579137864..d6756ec4c9bf5 100644
--- a/x-pack/plugins/maps/server/saved_objects/map.ts
+++ b/x-pack/plugins/maps/server/saved_objects/map.ts
@@ -20,6 +20,7 @@ export const mapSavedObjects: SavedObjectsType = {
mapStateJSON: { type: 'text' },
layerListJSON: { type: 'text' },
uiStateJSON: { type: 'text' },
+ bounds: { dynamic: false, properties: {} }, // Disable removed field
},
},
management: {
diff --git a/x-pack/plugins/task_manager/server/saved_objects/index.ts b/x-pack/plugins/task_manager/server/saved_objects/index.ts
index fbb54b566ece0..12bf264a80d32 100644
--- a/x-pack/plugins/task_manager/server/saved_objects/index.ts
+++ b/x-pack/plugins/task_manager/server/saved_objects/index.ts
@@ -17,7 +17,7 @@ export function setupSavedObjects(
name: 'task',
namespaceType: 'agnostic',
hidden: true,
- convertToAliasScript: `ctx._id = ctx._source.type + ':' + ctx._id`,
+ convertToAliasScript: `ctx._id = ctx._source.type + ':' + ctx._id; ctx._source.remove("kibana")`,
mappings: mappings.task,
migrations,
indexPattern: config.index,