Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,12 @@ public class RolloverAction implements LifecycleAction, ToXContentObject {
private static final ParseField MAX_PRIMARY_SHARD_SIZE_FIELD = new ParseField("max_primary_shard_size");
private static final ParseField MAX_AGE_FIELD = new ParseField("max_age");
private static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs");
private static final ParseField MAX_SHARD_DOCS_FIELD = new ParseField("max_shard_docs");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think it is needed to add this new limit to the HLRC code base.
This HLRC is no longer developed in favour for the new java client.
This just exists in master/8.0 for tests that still rely on hlrc,
but there should be no need to add new things to the HLRC.
The HLRC is no longer released / published and will be removed in the near future.
So I think we can undo all changes in the client directory.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

got it, I will revert them.


private static final ConstructingObjectParser<RolloverAction, Void> PARSER = new ConstructingObjectParser<>(
NAME,
true,
a -> new RolloverAction((ByteSizeValue) a[0], (ByteSizeValue) a[1], (TimeValue) a[2], (Long) a[3])
a -> new RolloverAction((ByteSizeValue) a[0], (ByteSizeValue) a[1], (TimeValue) a[2], (Long) a[3], (Long) a[4])
);

static {
Expand All @@ -53,25 +54,28 @@ public class RolloverAction implements LifecycleAction, ToXContentObject {
ValueType.VALUE
);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_SHARD_DOCS_FIELD);
}

private final ByteSizeValue maxSize;
private final ByteSizeValue maxPrimaryShardSize;
private final TimeValue maxAge;
private final Long maxDocs;
private final Long maxShardDocs;

public static RolloverAction parse(XContentParser parser) {
return PARSER.apply(parser, null);
}

public RolloverAction(ByteSizeValue maxSize, ByteSizeValue maxPrimaryShardSize, TimeValue maxAge, Long maxDocs) {
if (maxSize == null && maxPrimaryShardSize == null && maxAge == null && maxDocs == null) {
public RolloverAction(ByteSizeValue maxSize, ByteSizeValue maxPrimaryShardSize, TimeValue maxAge, Long maxDocs, Long maxShardDocs) {
if (maxSize == null && maxPrimaryShardSize == null && maxAge == null && maxDocs == null && maxShardDocs == null) {
throw new IllegalArgumentException("At least one rollover condition must be set.");
}
this.maxSize = maxSize;
this.maxPrimaryShardSize = maxPrimaryShardSize;
this.maxAge = maxAge;
this.maxDocs = maxDocs;
this.maxShardDocs = maxShardDocs;
}

public ByteSizeValue getMaxSize() {
Expand All @@ -90,6 +94,10 @@ public Long getMaxDocs() {
return maxDocs;
}

public Long getMaxShardDocs() {
return maxShardDocs;
}

@Override
public String getName() {
return NAME;
Expand All @@ -110,13 +118,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (maxDocs != null) {
builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs);
}
if (maxShardDocs != null) {
builder.field(MAX_SHARD_DOCS_FIELD.getPreferredName(), maxShardDocs);
}
builder.endObject();
return builder;
}

@Override
public int hashCode() {
return Objects.hash(maxSize, maxPrimaryShardSize, maxAge, maxDocs);
return Objects.hash(maxSize, maxPrimaryShardSize, maxAge, maxDocs, maxShardDocs);
}

@Override
Expand All @@ -131,7 +142,8 @@ public boolean equals(Object obj) {
return Objects.equals(maxSize, other.maxSize)
&& Objects.equals(maxPrimaryShardSize, other.maxPrimaryShardSize)
&& Objects.equals(maxAge, other.maxAge)
&& Objects.equals(maxDocs, other.maxDocs);
&& Objects.equals(maxDocs, other.maxDocs)
&& Objects.equals(maxShardDocs, other.maxShardDocs);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxShardDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.client.TimedRequest;
import org.elasticsearch.client.indices.CreateIndexRequest;
Expand Down Expand Up @@ -120,6 +121,17 @@ public RolloverRequest addMaxPrimaryShardSizeCondition(ByteSizeValue size) {
return this;
}

/**
* Adds a size-based condition to check if the docs of the largest shard has at least <code>numDocs</code>
*/
public void addMaxShardDocsCondition(long numDocs) {
MaxShardDocsCondition maxShardDocsCondition = new MaxShardDocsCondition(numDocs);
if (this.conditions.containsKey(maxShardDocsCondition.name())) {
throw new IllegalArgumentException(maxShardDocsCondition + " condition is already set");
}
this.conditions.put(maxShardDocsCondition.name(), maxShardDocsCondition);
}

/**
* Returns all set conditions
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ public void testStartStopILM() throws Exception {
public void testExplainLifecycle() throws Exception {
Map<String, Phase> lifecyclePhases = new HashMap<>();
Map<String, LifecycleAction> hotActions = new HashMap<>();
hotActions.put(RolloverAction.NAME, new RolloverAction(null, null, TimeValue.timeValueHours(50 * 24), null));
hotActions.put(RolloverAction.NAME, new RolloverAction(null, null, TimeValue.timeValueHours(50 * 24), null, null));
Phase hotPhase = new Phase("hot", randomFrom(TimeValue.ZERO, null), hotActions);
lifecyclePhases.put("hot", hotPhase);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1066,6 +1066,24 @@ public void testRollover() throws IOException {
assertEquals("test", rolloverResponse.getOldIndex());
assertEquals("test_new", rolloverResponse.getNewIndex());
}
{
rolloverRequest.addMaxShardDocsCondition(1L);
rolloverRequest.dryRun(true);
RolloverResponse rolloverResponse = execute(
rolloverRequest,
highLevelClient().indices()::rollover,
highLevelClient().indices()::rolloverAsync
);
assertFalse(rolloverResponse.isRolledOver());
assertTrue(rolloverResponse.isDryRun());
Map<String, Boolean> conditionStatus = rolloverResponse.getConditionStatus();
assertEquals(3, conditionStatus.size());
assertTrue(conditionStatus.get("[max_docs: 1]"));
assertTrue(conditionStatus.get("[max_age: 1ms]"));
assertTrue(conditionStatus.get("[max_shard_docs: 1]"));
assertEquals("test", rolloverResponse.getOldIndex());
assertEquals("test_new", rolloverResponse.getNewIndex());
}
{
String mappings = "{\"properties\":{\"field2\":{\"type\":\"keyword\"}}}";
rolloverRequest.getCreateIndexRequest().mapping(mappings, XContentType.JSON);
Expand All @@ -1080,9 +1098,10 @@ public void testRollover() throws IOException {
assertTrue(rolloverResponse.isRolledOver());
assertFalse(rolloverResponse.isDryRun());
Map<String, Boolean> conditionStatus = rolloverResponse.getConditionStatus();
assertEquals(4, conditionStatus.size());
assertEquals(5, conditionStatus.size());
assertTrue(conditionStatus.get("[max_docs: 1]"));
assertTrue(conditionStatus.get("[max_age: 1ms]"));
assertTrue(conditionStatus.get("[max_shard_docs: 1]"));
assertFalse(conditionStatus.get("[max_size: 1mb]"));
assertFalse(conditionStatus.get("[max_primary_shard_size: 1mb]"));
assertEquals("test", rolloverResponse.getOldIndex());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ public void testPutLifecyclePolicy() throws Exception {
Map<String, Phase> phases = new HashMap<>();
Map<String, LifecycleAction> hotActions = new HashMap<>();
hotActions.put(RolloverAction.NAME, new RolloverAction(
new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null));
new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null, null));
phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); // <1>

Map<String, LifecycleAction> deleteActions =
Expand Down Expand Up @@ -163,7 +163,7 @@ public void testDeletePolicy() throws IOException, InterruptedException {
{
Map<String, Phase> phases = new HashMap<>();
Map<String, LifecycleAction> hotActions = new HashMap<>();
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null));
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null, null));
phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions));
Map<String, LifecycleAction> deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction());
phases.put("delete", new Phase("delete", new TimeValue(90, TimeUnit.DAYS), deleteActions));
Expand Down Expand Up @@ -231,7 +231,7 @@ public void testGetLifecyclePolicy() throws IOException, InterruptedException {
{
Map<String, Phase> phases = new HashMap<>();
Map<String, LifecycleAction> hotActions = new HashMap<>();
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null));
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null, null));
phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions));

Map<String, LifecycleAction> deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction());
Expand Down Expand Up @@ -324,7 +324,7 @@ public void testExplainLifecycle() throws Exception {
{
Map<String, Phase> phases = new HashMap<>();
Map<String, LifecycleAction> hotActions = new HashMap<>();
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null));
hotActions.put(RolloverAction.NAME, new RolloverAction(new ByteSizeValue(50, ByteSizeUnit.GB), null, null, null, null));
phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions));

LifecyclePolicy policy = new LifecyclePolicy("my_policy", phases);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,17 @@ static RolloverAction randomInstance() {
Long maxDocs = (maxSize == null && maxPrimaryShardSize == null && maxAge == null || randomBoolean())
? randomNonNegativeLong()
: null;
return new RolloverAction(maxSize, maxPrimaryShardSize, maxAge, maxDocs);
Long maxShardDocs = (maxSize == null && maxPrimaryShardSize == null && maxAge == null && maxDocs == null || randomBoolean())
? randomNonNegativeLong()
: null;
return new RolloverAction(maxSize, maxPrimaryShardSize, maxAge, maxDocs, maxShardDocs);
}

public void testNoConditions() {
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new RolloverAction(null, null, null, null));
IllegalArgumentException exception = expectThrows(
IllegalArgumentException.class,
() -> new RolloverAction(null, null, null, null, null)
);
assertEquals("At least one rollover condition must be set.", exception.getMessage());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxShardDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
Expand Down Expand Up @@ -39,15 +40,18 @@ public void testConstructorAndFieldAssignments() {
MaxDocsCondition maxDocsCondition = new MaxDocsCondition(10000L);
MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(2000));
MaxPrimaryShardSizeCondition maxPrimaryShardSizeCondition = new MaxPrimaryShardSizeCondition(new ByteSizeValue(3000));
MaxShardDocsCondition maxShardDocsCondition = new MaxShardDocsCondition(5000L);
Condition<?>[] expectedConditions = new Condition<?>[] {
maxAgeCondition,
maxDocsCondition,
maxSizeCondition,
maxPrimaryShardSizeCondition };
maxPrimaryShardSizeCondition,
maxShardDocsCondition };
rolloverRequest.addMaxIndexAgeCondition(maxAgeCondition.value());
rolloverRequest.addMaxIndexDocsCondition(maxDocsCondition.value());
rolloverRequest.addMaxIndexSizeCondition(maxSizeCondition.value());
rolloverRequest.addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition.value());
rolloverRequest.addMaxShardDocsCondition(maxShardDocsCondition.value());
List<Condition<?>> requestConditions = new ArrayList<>(rolloverRequest.getConditions().values());
assertThat(requestConditions, containsInAnyOrder(expectedConditions));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxShardDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
Expand All @@ -36,6 +37,7 @@ public class RolloverResponseTests extends ESTestCase {
conditionSuppliers.add(() -> new MaxDocsCondition(randomNonNegativeLong()));
conditionSuppliers.add(() -> new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong())));
conditionSuppliers.add(() -> new MaxPrimaryShardSizeCondition(new ByteSizeValue(randomNonNegativeLong())));
conditionSuppliers.add(() -> new MaxShardDocsCondition(randomNonNegativeLong()));
}

public void testFromXContent() throws IOException {
Expand Down
31 changes: 31 additions & 0 deletions docs/reference/ilm/actions/ilm-rollover.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,14 @@ replicas are ignored.
TIP: To see the current shard size, use the <<cat-shards, _cat shards>> API.
The `store` value shows the size each shard, and `prirep` indicates whether a
shard is a primary (`p`) or a replica (`r`).

`max_shard_docs`::
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe a better name would be max_single_shard_docs?
Since this condition will apply if any shard reach this limit?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this condition will apply if any shard reach this limit?
yeah, any shard that reaches the limit will cause a rollover.

I'm not sure, which one of the following is better:

  • max_shard_docs
  • max_single_shard_docs
  • max_primary_shard_docs

We can discuss and make a decision.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe max_primary_shard_docs is better here? Given how the doc count is computed in TransportRolloverAction#buildStats(...). In this code non primary shards are filtered, just like this
code is already doing when computing maxPrimaryShardSize.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, I also prefer max_primary_shard_docs, I will change the name to max_primary_shard_docs

(Optional, integer)
Triggers rollover when the largest shard in the index reaches a certain number of documents.
This is the maximum docs of the shards in the index. As with `max_docs`
+
TIP: To see the current shard docs, use the <<cat-shards, _cat shards>> API.
The `docs` value shows the number of documents each shard.
// end::rollover-conditions[]

[[ilm-rollover-ex]]
Expand Down Expand Up @@ -155,6 +163,29 @@ PUT _ilm/policy/my_policy
}
--------------------------------------------------

[ilm-rollover-documents-ex]]
===== Roll over based on document count of the largest shard

This example rolls the index over when it contains at least ten million documents of the largest shard.

[source,console]
--------------------------------------------------
PUT _ilm/policy/my_policy
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover" : {
"max_shard_docs": 10000000
}
}
}
}
}
}
--------------------------------------------------

[ilm-rollover-age-ex]]
===== Roll over based on index age

Expand Down
20 changes: 13 additions & 7 deletions docs/reference/indices/rollover-index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ If you use an index alias for time series data, you can use
date. For example, you can create an alias that points to an index named
`<my-index-{now/d}-000001>`. If you create the index on May 6, 2099, the index's
name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099,
the new index's name is `my-index-2099.05.07-000002`. For an example, see
the new index's name is `my-index-2099.05.07-000002`. For an example, see
<<roll-over-index-alias-with-write-index>>.
****

Expand Down Expand Up @@ -233,7 +233,8 @@ POST my-data-stream/_rollover
"conditions": {
"max_age": "7d",
"max_docs": 1000,
"max_primary_shard_size": "50gb"
"max_primary_shard_size": "50gb",
"max_shard_docs": "2000"
}
}
----
Expand All @@ -255,7 +256,8 @@ The API returns:
"conditions": {
"[max_age: 7d]": false,
"[max_docs: 1000]": true,
"[max_primary_shard_size: 50gb]": false
"[max_primary_shard_size: 50gb]": false,
"[max_shard_docs: 2000]": false
}
}
----
Expand Down Expand Up @@ -302,7 +304,8 @@ POST my-alias/_rollover
"conditions": {
"max_age": "7d",
"max_docs": 1000,
"max_primary_shard_size": "50gb"
"max_primary_shard_size": "50gb",
"max_shard_docs": "2000"
}
}
----
Expand All @@ -324,7 +327,8 @@ The API returns:
"conditions": {
"[max_age: 7d]": false,
"[max_docs: 1000]": true,
"[max_primary_shard_size: 50gb]": false
"[max_primary_shard_size: 50gb]": false,
"[max_shard_docs: 2000]": false
}
}
----
Expand Down Expand Up @@ -371,7 +375,8 @@ POST my-write-alias/_rollover
"conditions": {
"max_age": "7d",
"max_docs": 1000,
"max_primary_shard_size": "50gb"
"max_primary_shard_size": "50gb",
"max_shard_docs": "2000"
}
}
----
Expand All @@ -393,7 +398,8 @@ The API returns:
"conditions": {
"[max_age: 7d]": false,
"[max_docs: 1000]": true,
"[max_primary_shard_size: 50gb]": false
"[max_primary_shard_size: 50gb]": false,
"[max_shard_docs: 2000]": false
}
}
----
Expand Down
Loading