Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,12 @@ public void testRecovery() throws Exception {
* an index without a translog so we randomize whether
* or not we have one. */
shouldHaveTranslog = randomBoolean();

Settings.Builder settings = Settings.builder();
if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null;
createIndex(index, settings.build(), mappings);
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());

// make sure all recoveries are done
Expand Down Expand Up @@ -1267,7 +1272,8 @@ public void testOperationBasedRecovery() throws Exception {
if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
createIndex(index, settings.build());
final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null;
createIndex(index, settings.build(), mappings);
ensureGreen(index);
int committedDocs = randomIntBetween(100, 200);
for (int i = 0; i < committedDocs; i++) {
Expand Down Expand Up @@ -1325,7 +1331,8 @@ public void testResize() throws Exception {
if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false);
}
createIndex(index, settings.build());
final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null;
createIndex(index, settings.build(), mappings);
numDocs = randomIntBetween(10, 1000);
for (int i = 0; i < numDocs; i++) {
indexDocument(Integer.toString(i));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,8 @@ public void testOperationBasedRecovery() throws Exception {
if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
createIndex(index, settings.build());
final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null;
createIndex(index, settings.build(), mappings);
ensureGreen(index);
indexDocs(index, 0, randomIntBetween(100, 200));
flush(index, randomBoolean());
Expand Down
25 changes: 2 additions & 23 deletions server/src/main/java/org/elasticsearch/index/engine/Engine.java
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ public enum SearcherScope {
/**
* Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed
*/
public abstract Closeable acquireHistoryRetentionLock(HistorySource historySource);
public abstract Closeable acquireHistoryRetentionLock();

/**
* Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive).
Expand All @@ -739,24 +739,10 @@ public enum SearcherScope {
public abstract Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService,
long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException;

/**
* Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive).
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public abstract Translog.Snapshot readHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException;

/**
* Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine.
*/
public abstract int estimateNumberOfHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException;

/**
* Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog)
*/
public abstract boolean hasCompleteOperationHistory(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException;
public abstract boolean hasCompleteOperationHistory(String reason, long startingSeqNo);

/**
* Gets the minimum retained sequence number for this engine.
Expand Down Expand Up @@ -1932,11 +1918,4 @@ public interface TranslogRecoveryRunner {
* to advance this marker to at least the given sequence number.
*/
public abstract void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary);

/**
* Whether we should read history operations from translog or Lucene index
*/
public enum HistorySource {
TRANSLOG, INDEX
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -526,36 +526,6 @@ public void syncTranslog() throws IOException {
revisitIndexDeletionPolicyOnTranslogSynced();
}

/**
* Creates a new history snapshot for reading operations since the provided seqno.
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
@Override
public Translog.Snapshot readHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException {
if (historySource == HistorySource.INDEX) {
return newChangesSnapshot(reason, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false);
} else {
return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo);
}
}

/**
* Returns the estimated number of history operations whose seq# at least the provided seq# in this engine.
*/
@Override
public int estimateNumberOfHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException {
if (historySource == HistorySource.INDEX) {
try (Translog.Snapshot snapshot = newChangesSnapshot(reason, mapperService, Math.max(0, startingSeqNo),
Long.MAX_VALUE, false)) {
return snapshot.totalOperations();
}
} else {
return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo);
}
}

@Override
public TranslogStats getTranslogStats() {
return getTranslog().stats();
Expand Down Expand Up @@ -2597,27 +2567,8 @@ public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperS
}

@Override
public boolean hasCompleteOperationHistory(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) throws IOException {
if (historySource == HistorySource.INDEX) {
return getMinRetainedSeqNo() <= startingSeqNo;
} else {
final long currentLocalCheckpoint = localCheckpointTracker.getProcessedCheckpoint();
// avoid scanning translog if not necessary
if (startingSeqNo > currentLocalCheckpoint) {
return true;
}
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) {
Translog.Operation operation;
while ((operation = snapshot.next()) != null) {
if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
tracker.markSeqNoAsProcessed(operation.seqNo());
}
}
}
return tracker.getProcessedCheckpoint() >= currentLocalCheckpoint;
}
public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) {
return getMinRetainedSeqNo() <= startingSeqNo;
}

/**
Expand All @@ -2629,12 +2580,8 @@ public final long getMinRetainedSeqNo() {
}

@Override
public Closeable acquireHistoryRetentionLock(HistorySource historySource) {
if (historySource == HistorySource.INDEX) {
return softDeletesPolicy.acquireRetentionLock();
} else {
return translog.acquireRetentionLock();
}
public Closeable acquireHistoryRetentionLock() {
return softDeletesPolicy.acquireRetentionLock();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ public void syncTranslog() {
}

@Override
public Closeable acquireHistoryRetentionLock(HistorySource historySource) {
public Closeable acquireHistoryRetentionLock() {
return () -> {};
}

Expand All @@ -315,20 +315,7 @@ public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperS
}

@Override
public Translog.Snapshot readHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) {
return newEmptySnapshot();
}

@Override
public int estimateNumberOfHistoryOperations(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) {
return 0;
}

@Override
public boolean hasCompleteOperationHistory(String reason, HistorySource historySource,
MapperService mapperService, long startingSeqNo) {
public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) {
// we can do operation-based recovery if we don't have to replay any operation.
return startingSeqNo > seqNoStats.getMaxSeqNo();
}
Expand Down
29 changes: 7 additions & 22 deletions server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
Original file line number Diff line number Diff line change
Expand Up @@ -1918,31 +1918,16 @@ protected void doRun() {
/**
* Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed
*/
public Closeable acquireHistoryRetentionLock(Engine.HistorySource source) {
return getEngine().acquireHistoryRetentionLock(source);
}

/**
* Returns the estimated number of history operations whose seq# at least the provided seq# in this shard.
*/
public int estimateNumberOfHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().estimateNumberOfHistoryOperations(reason, source, mapperService, startingSeqNo);
}

/**
* Creates a new history snapshot for reading operations since the provided starting seqno (inclusive).
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public Translog.Snapshot getHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().readHistoryOperations(reason, source, mapperService, startingSeqNo);
public Closeable acquireHistoryRetentionLock() {
return getEngine().acquireHistoryRetentionLock();
}

/**
* Checks if we have a completed history of operations since the given starting seqno (inclusive).
* This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock(Engine.HistorySource)}
* This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock()}
*/
public boolean hasCompleteHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().hasCompleteOperationHistory(reason, source, mapperService, startingSeqNo);
public boolean hasCompleteHistoryOperations(String reason, long startingSeqNo) {
return getEngine().hasCompleteOperationHistory(reason, startingSeqNo);
}

/**
Expand Down Expand Up @@ -2131,7 +2116,7 @@ public RetentionLease addRetentionLease(
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) {
try (Closeable ignore = acquireHistoryRetentionLock()) {
final long actualRetainingSequenceNumber =
retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber;
return replicationTracker.addRetentionLease(id, actualRetainingSequenceNumber, source, listener);
Expand All @@ -2153,7 +2138,7 @@ public RetentionLease renewRetentionLease(final String id, final long retainingS
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) {
try (Closeable ignore = acquireHistoryRetentionLock()) {
final long actualRetainingSequenceNumber =
retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber;
return replicationTracker.renewRetentionLease(id, actualRetainingSequenceNumber, source);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.tasks.Task;
Expand Down Expand Up @@ -86,14 +85,13 @@ public void resync(final IndexShard indexShard, final ActionListener<ResyncTask>
Translog.Snapshot snapshot = null;
try {
final long startingSeqNo = indexShard.getLastKnownGlobalCheckpoint() + 1;
assert startingSeqNo >= 0 : "startingSeqNo must be non-negative; got [" + startingSeqNo + "]";
final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo();
final ShardId shardId = indexShard.shardId();
// Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender.
// Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible
// Also fail the resync early if the shard is shutting down
snapshot = indexShard.getHistoryOperations("resync",
indexShard.indexSettings.isSoftDeleteEnabled() ? Engine.HistorySource.INDEX : Engine.HistorySource.TRANSLOG,
startingSeqNo);
snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false);
final Translog.Snapshot originalSnapshot = snapshot;
final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() {
@Override
Expand Down
Loading