Skip to content

Commit b5adb3c

Browse files
authored
Remove AlreadyExpiredException (#24857)
This is a relict from the TTL functionality that has been removed in #21670
1 parent 6bc5b1d commit b5adb3c

File tree

5 files changed

+35
-194
lines changed

5 files changed

+35
-194
lines changed

core/src/main/java/org/elasticsearch/ElasticsearchException.java

+1-2
Original file line numberDiff line numberDiff line change
@@ -877,8 +877,7 @@ private enum ElasticsearchExceptionHandle {
877877
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83, UNKNOWN_VERSION_ADDED),
878878
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
879879
org.elasticsearch.transport.NodeDisconnectedException::new, 84, UNKNOWN_VERSION_ADDED),
880-
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
881-
org.elasticsearch.index.AlreadyExpiredException::new, 85, UNKNOWN_VERSION_ADDED),
880+
// 85 used to be for AlreadyExpiredException
882881
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
883882
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86, UNKNOWN_VERSION_ADDED),
884883
// 87 used to be for MergeMappingException

core/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java

-91
This file was deleted.

core/src/main/java/org/elasticsearch/index/engine/IgnoreOnRecoveryEngineException.java

-26
This file was deleted.

core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java

+33-55
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,10 @@
2424
import org.elasticsearch.common.io.stream.StreamOutput;
2525
import org.elasticsearch.common.xcontent.XContentFactory;
2626
import org.elasticsearch.index.engine.Engine;
27-
import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException;
2827
import org.elasticsearch.index.mapper.DocumentMapperForType;
2928
import org.elasticsearch.index.mapper.MapperException;
3029
import org.elasticsearch.index.mapper.MapperService;
3130
import org.elasticsearch.index.mapper.Mapping;
32-
import org.elasticsearch.index.mapper.Uid;
3331
import org.elasticsearch.index.translog.Translog;
3432
import org.elasticsearch.rest.RestStatus;
3533

@@ -149,59 +147,39 @@ private void maybeAddMappingUpdate(String type, Mapping update, String docId, bo
149147
* is encountered.
150148
*/
151149
private void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates, Engine.Operation.Origin origin) throws IOException {
152-
153-
try {
154-
switch (operation.opType()) {
155-
case INDEX:
156-
Translog.Index index = (Translog.Index) operation;
157-
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
158-
// autoGeneratedID docs that are coming from the primary are updated correctly.
159-
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()),
160-
source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source()))
161-
.routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(),
162-
index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
163-
maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates);
164-
logger.trace("[translog] recover [index] op [({}, {})] of [{}][{}]", index.seqNo(), index.primaryTerm(), index.type(), index.id());
165-
index(engine, engineIndex);
166-
break;
167-
case DELETE:
168-
Translog.Delete delete = (Translog.Delete) operation;
169-
logger.trace("[translog] recover [delete] op [({}, {})] of [{}][{}]", delete.seqNo(), delete.primaryTerm(), delete.type(), delete.id());
170-
final Engine.Delete engineDelete = new Engine.Delete(delete.type(), delete.id(), delete.uid(), delete.seqNo(),
171-
delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(),
172-
origin, System.nanoTime());
173-
delete(engine, engineDelete);
174-
break;
175-
case NO_OP:
176-
final Translog.NoOp noOp = (Translog.NoOp) operation;
177-
final long seqNo = noOp.seqNo();
178-
final long primaryTerm = noOp.primaryTerm();
179-
final String reason = noOp.reason();
180-
logger.trace("[translog] recover [no_op] op [({}, {})] of [{}]", seqNo, primaryTerm, reason);
181-
final Engine.NoOp engineNoOp =
182-
new Engine.NoOp(seqNo, primaryTerm, origin, System.nanoTime(), reason);
183-
noOp(engine, engineNoOp);
184-
break;
185-
default:
186-
throw new IllegalStateException("No operation defined for [" + operation + "]");
187-
}
188-
} catch (ElasticsearchException e) {
189-
boolean hasIgnoreOnRecoveryException = false;
190-
ElasticsearchException current = e;
191-
while (true) {
192-
if (current instanceof IgnoreOnRecoveryEngineException) {
193-
hasIgnoreOnRecoveryException = true;
194-
break;
195-
}
196-
if (current.getCause() instanceof ElasticsearchException) {
197-
current = (ElasticsearchException) current.getCause();
198-
} else {
199-
break;
200-
}
201-
}
202-
if (!hasIgnoreOnRecoveryException) {
203-
throw e;
204-
}
150+
switch (operation.opType()) {
151+
case INDEX:
152+
Translog.Index index = (Translog.Index) operation;
153+
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
154+
// autoGeneratedID docs that are coming from the primary are updated correctly.
155+
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()),
156+
source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source()))
157+
.routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(),
158+
index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
159+
maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates);
160+
logger.trace("[translog] recover [index] op [({}, {})] of [{}][{}]", index.seqNo(), index.primaryTerm(), index.type(), index.id());
161+
index(engine, engineIndex);
162+
break;
163+
case DELETE:
164+
Translog.Delete delete = (Translog.Delete) operation;
165+
logger.trace("[translog] recover [delete] op [({}, {})] of [{}][{}]", delete.seqNo(), delete.primaryTerm(), delete.type(), delete.id());
166+
final Engine.Delete engineDelete = new Engine.Delete(delete.type(), delete.id(), delete.uid(), delete.seqNo(),
167+
delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(),
168+
origin, System.nanoTime());
169+
delete(engine, engineDelete);
170+
break;
171+
case NO_OP:
172+
final Translog.NoOp noOp = (Translog.NoOp) operation;
173+
final long seqNo = noOp.seqNo();
174+
final long primaryTerm = noOp.primaryTerm();
175+
final String reason = noOp.reason();
176+
logger.trace("[translog] recover [no_op] op [({}, {})] of [{}]", seqNo, primaryTerm, reason);
177+
final Engine.NoOp engineNoOp =
178+
new Engine.NoOp(seqNo, primaryTerm, origin, System.nanoTime(), reason);
179+
noOp(engine, engineNoOp);
180+
break;
181+
default:
182+
throw new IllegalStateException("No operation defined for [" + operation + "]");
205183
}
206184
operationProcessed();
207185
}

core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java

+1-20
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@
5656
import org.elasticsearch.common.xcontent.XContentLocation;
5757
import org.elasticsearch.discovery.DiscoverySettings;
5858
import org.elasticsearch.env.ShardLockObtainFailedException;
59-
import org.elasticsearch.index.AlreadyExpiredException;
6059
import org.elasticsearch.index.Index;
6160
import org.elasticsearch.index.engine.RecoveryEngineException;
6261
import org.elasticsearch.index.query.QueryShardException;
@@ -296,24 +295,6 @@ public void testSearchException() throws IOException {
296295
assertTrue(ex.getCause() instanceof NullPointerException);
297296
}
298297

299-
public void testAlreadyExpiredException() throws IOException {
300-
AlreadyExpiredException alreadyExpiredException = serialize(new AlreadyExpiredException("index", "type", "id", 1, 2, 3));
301-
assertEquals("index", alreadyExpiredException.getIndex().getName());
302-
assertEquals("type", alreadyExpiredException.type());
303-
assertEquals("id", alreadyExpiredException.id());
304-
assertEquals(2, alreadyExpiredException.ttl());
305-
assertEquals(1, alreadyExpiredException.timestamp());
306-
assertEquals(3, alreadyExpiredException.now());
307-
308-
alreadyExpiredException = serialize(new AlreadyExpiredException(null, null, null, -1, -2, -3));
309-
assertNull(alreadyExpiredException.getIndex());
310-
assertNull(alreadyExpiredException.type());
311-
assertNull(alreadyExpiredException.id());
312-
assertEquals(-2, alreadyExpiredException.ttl());
313-
assertEquals(-1, alreadyExpiredException.timestamp());
314-
assertEquals(-3, alreadyExpiredException.now());
315-
}
316-
317298
public void testActionNotFoundTransportException() throws IOException {
318299
ActionNotFoundTransportException ex = serialize(new ActionNotFoundTransportException("AACCCTION"));
319300
assertEquals("AACCCTION", ex.action());
@@ -780,7 +761,7 @@ public void testIds() {
780761
ids.put(82, org.elasticsearch.repositories.RepositoryException.class);
781762
ids.put(83, org.elasticsearch.transport.ReceiveTimeoutTransportException.class);
782763
ids.put(84, org.elasticsearch.transport.NodeDisconnectedException.class);
783-
ids.put(85, org.elasticsearch.index.AlreadyExpiredException.class);
764+
ids.put(85, null);
784765
ids.put(86, org.elasticsearch.search.aggregations.AggregationExecutionException.class);
785766
ids.put(88, org.elasticsearch.indices.InvalidIndexTemplateException.class);
786767
ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class);

0 commit comments

Comments
 (0)