-
Notifications
You must be signed in to change notification settings - Fork 25.8k
Mark indices ready for frozen conversion in DLM service #144248
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
9be0828
6b90ead
150b495
e26e84e
0a3a8ef
0d46aed
aa69c38
204ed52
626954d
ca24600
bb67fc6
1005c91
9b09d6a
2063140
158ebca
c34e1ca
3c69809
2df0fcb
ad7488a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,7 +12,12 @@ | |
| import org.apache.logging.log4j.Logger; | ||
| import org.elasticsearch.action.DocWriteRequest; | ||
| import org.elasticsearch.action.IndicesRequest; | ||
| import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; | ||
| import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; | ||
| import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; | ||
| import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; | ||
| import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; | ||
| import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; | ||
| import org.elasticsearch.action.admin.indices.flush.FlushRequest; | ||
| import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; | ||
| import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; | ||
|
|
@@ -35,6 +40,7 @@ | |
| import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; | ||
| import org.elasticsearch.action.index.IndexRequest; | ||
| import org.elasticsearch.action.support.broadcast.BroadcastResponse; | ||
| import org.elasticsearch.cluster.ClusterState; | ||
| import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; | ||
| import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; | ||
| import org.elasticsearch.cluster.metadata.DataStream; | ||
|
|
@@ -64,13 +70,15 @@ | |
| import org.elasticsearch.health.node.DslErrorInfo; | ||
| import org.elasticsearch.health.node.FetchHealthInfoCacheAction; | ||
| import org.elasticsearch.index.Index; | ||
| import org.elasticsearch.index.IndexNotFoundException; | ||
| import org.elasticsearch.index.MergePolicyConfig; | ||
| import org.elasticsearch.index.mapper.DateFieldMapper; | ||
| import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; | ||
| import org.elasticsearch.indices.ExecutorNames; | ||
| import org.elasticsearch.indices.SystemDataStreamDescriptor; | ||
| import org.elasticsearch.plugins.Plugin; | ||
| import org.elasticsearch.plugins.SystemIndexPlugin; | ||
| import org.elasticsearch.repositories.RepositoriesService; | ||
| import org.elasticsearch.rest.RestStatus; | ||
| import org.elasticsearch.test.ESIntegTestCase; | ||
| import org.elasticsearch.test.transport.MockTransportService; | ||
|
|
@@ -86,9 +94,12 @@ | |
| import java.util.List; | ||
| import java.util.Locale; | ||
| import java.util.Map; | ||
| import java.util.Optional; | ||
| import java.util.Set; | ||
| import java.util.concurrent.TimeUnit; | ||
| import java.util.concurrent.atomic.AtomicLong; | ||
| import java.util.function.Consumer; | ||
| import java.util.function.UnaryOperator; | ||
|
|
||
| import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; | ||
| import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.READ_ONLY; | ||
|
|
@@ -117,6 +128,7 @@ | |
|
|
||
| public class DataStreamLifecycleServiceIT extends ESIntegTestCase { | ||
| private static final Logger logger = LogManager.getLogger(DataStreamLifecycleServiceIT.class); | ||
| private static final String DEFAULT_REPO = "my-repo"; | ||
|
|
||
| @Override | ||
| protected Collection<Class<? extends Plugin>> nodePlugins() { | ||
|
|
@@ -895,7 +907,7 @@ public void testReenableDataStreamLifecycle() throws Exception { | |
|
|
||
| public void testLifecycleAppliedToFailureStore() throws Exception { | ||
| DataStreamLifecycle.Template lifecycle = DataStreamLifecycle.failuresLifecycleBuilder() | ||
| .dataRetention(TimeValue.timeValueSeconds(20)) | ||
| .dataRetention(TimeValue.timeValueMinutes(20)) | ||
| .buildTemplate(); | ||
|
|
||
| putComposableIndexTemplate("id1", """ | ||
|
|
@@ -937,17 +949,27 @@ public void testLifecycleAppliedToFailureStore() throws Exception { | |
| ByteSizeValue targetFloor = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(clusterSettings); | ||
|
|
||
| assertBusy(() -> { | ||
| GetSettingsRequest getSettingsRequest = new GetSettingsRequest(TEST_REQUEST_TIMEOUT).indices(firstGenerationIndex) | ||
| .includeDefaults(true); | ||
| GetSettingsResponse getSettingsResponse = client().execute(GetSettingsAction.INSTANCE, getSettingsRequest).actionGet(); | ||
| assertThat( | ||
| getSettingsResponse.getSetting(firstGenerationIndex, MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey()), | ||
| is(targetFactor.toString()) | ||
| ); | ||
| assertThat( | ||
| getSettingsResponse.getSetting(firstGenerationIndex, MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey()), | ||
| is(targetFloor.getStringRep()) | ||
| ); | ||
| try { | ||
| GetSettingsRequest getSettingsRequest = new GetSettingsRequest(TEST_REQUEST_TIMEOUT).indices(firstGenerationIndex) | ||
| .includeDefaults(true); | ||
| GetSettingsResponse getSettingsResponse = client().execute(GetSettingsAction.INSTANCE, getSettingsRequest).actionGet(); | ||
| assertThat( | ||
| getSettingsResponse.getSetting( | ||
| firstGenerationIndex, | ||
| MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey() | ||
| ), | ||
| is(targetFactor.toString()) | ||
| ); | ||
| assertThat( | ||
| getSettingsResponse.getSetting( | ||
| firstGenerationIndex, | ||
| MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey() | ||
| ), | ||
| is(targetFloor.getStringRep()) | ||
| ); | ||
| } catch (IndexNotFoundException e) { | ||
| fail("expected index " + firstGenerationIndex + " to exist but it did not."); | ||
| } | ||
| }); | ||
|
|
||
| updateFailureStoreConfiguration(dataStreamName, true, TimeValue.timeValueSeconds(1)); | ||
|
|
@@ -967,7 +989,84 @@ public void testLifecycleAppliedToFailureStore() throws Exception { | |
| List<Index> retrievedFailureIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getFailureIndices(); | ||
| assertThat(retrievedFailureIndices.size(), equalTo(1)); | ||
| assertThat(retrievedFailureIndices.get(0).getName(), equalTo(secondGenerationIndex)); | ||
| }); | ||
| }, 30, TimeUnit.SECONDS); | ||
| } | ||
|
|
||
| public void testCollectAndMarkIndicesForFrozen() throws Exception { | ||
| assumeTrue("requires feature flag to be enabled", DataStreamLifecycle.DLM_SEARCHABLE_SNAPSHOTS_FEATURE_FLAG.isEnabled()); | ||
|
|
||
| client().execute( | ||
| TransportPutRepositoryAction.TYPE, | ||
| new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, DEFAULT_REPO).name(DEFAULT_REPO) | ||
| .type("fs") | ||
| .settings(Settings.builder().put("location", DEFAULT_REPO)) | ||
| ).get(); | ||
| updateClusterSettings(Settings.builder().put(RepositoriesService.DEFAULT_REPOSITORY_SETTING.getKey(), DEFAULT_REPO)); | ||
|
|
||
| DataStreamLifecycle.Template lifecycle = DataStreamLifecycle.dataLifecycleBuilder() | ||
| .frozenAfter(TimeValue.timeValueDays(1)) | ||
| .buildTemplate(); | ||
|
|
||
| Iterable<DataStreamLifecycleService> dataStreamLifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); | ||
| Clock clock = Clock.systemUTC(); | ||
| AtomicLong now = new AtomicLong(clock.millis()); | ||
| dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(now::get)); | ||
|
|
||
| putComposableIndexTemplate( | ||
| "mytemplate", | ||
| null, | ||
| List.of("foo*"), | ||
| Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1").build(), | ||
| null, | ||
| lifecycle, | ||
| null, | ||
| false | ||
| ); | ||
|
|
||
| String dataStream = "foo-ds"; | ||
| CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( | ||
| TEST_REQUEST_TIMEOUT, | ||
| TEST_REQUEST_TIMEOUT, | ||
| dataStream | ||
| ); | ||
| client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); | ||
|
|
||
| indexDocs(dataStream, randomIntBetween(10, 50)); | ||
|
|
||
| // Let's verify the rollover | ||
| List<String> backingIndices = waitForDataStreamIndices(dataStream, 2, false); | ||
| String candidateIndex = backingIndices.get(0); | ||
|
|
||
| AtomicLong twoDaysLater = new AtomicLong(clock.millis() + TimeValue.timeValueDays(2).millis()); | ||
| dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(twoDaysLater::get)); | ||
|
|
||
| assertBusy(() -> { | ||
| logger.info("--> checking to see if index has been marked for frozen"); | ||
| ClusterStateResponse resp = client().execute(ClusterStateAction.INSTANCE, new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).get(); | ||
| ClusterState state = resp.getState(); | ||
| String setRepo = Optional.ofNullable(state.metadata().getProject(Metadata.DEFAULT_PROJECT_ID)) | ||
| .map(pm -> pm.index(candidateIndex)) | ||
| .map(peek(im -> logger.info("--> found index {}", candidateIndex))) | ||
| .map(im -> im.getCustomData(DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY)) | ||
| .map(peek(custom -> logger.info("--> index {} has custom metadata: {}", candidateIndex, custom))) | ||
| .map(meta -> meta.get(DataStreamLifecycleService.FROZEN_CANDIDATE_REPOSITORY_METADATA_KEY)) | ||
| .map(peek(repo -> logger.info("--> index {} has repo {} configured", candidateIndex, repo))) | ||
| .orElse("_unset_"); | ||
| logger.info("--> repository set to: {}", setRepo); | ||
| assertThat(setRepo, equalTo(DEFAULT_REPO)); | ||
| }, 30, TimeUnit.SECONDS); | ||
|
Comment on lines
+1010
to
+1057
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Reset the injected clock in a This test mutates every 🤖 Prompt for AI Agents |
||
|
|
||
| dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(clock::millis)); | ||
| } | ||
|
|
||
| /** | ||
| * Helper for peeking Optionals | ||
| */ | ||
| <T> UnaryOperator<T> peek(Consumer<T> c) { | ||
| return x -> { | ||
| c.accept(x); | ||
| return x; | ||
| }; | ||
| } | ||
|
|
||
| static void indexDocs(String dataStream, int numDocs) { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In case you are wondering why this seemingly unrelated change was made: I ran these tests many many times while I was developing the test for this PR. This one in particular was flaky, because on a slower machine the index ended up deleted before we could do the check. This change makes the test no longer flaky on my machine.
It does not actually change the test behavior, or what we're testing for this particular test.