Skip to content

Commit fbb2a2f

Browse files
committed
Reduce log level for pipeline failure
Today we log `failed to execute pipeline for a bulk request` at `ERROR` level if an attempt to run an ingest pipeline fails. A failure here is commonly due to an `EsRejectedExecutionException`. We also feed such failures back to the client and record the rejection in the threadpool statistics. In line with elastic#51459 there is no need to log failures within actions so noisily and with such urgency. It is better to leave it up to the client to react accordingly. Typically an `EsRejectedExecutionException` should result in the client backing off and retrying, so a failure here is not normally fatal enough to justify an `ERROR` log at all. This commit reduces the log level for this message to `DEBUG`.
1 parent c576488 commit fbb2a2f

File tree

1 file changed

+7
-5
lines changed

1 file changed

+7
-5
lines changed

server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@
9696
*/
9797
public class TransportBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
9898

99+
private static final Logger logger = LogManager.getLogger(TransportBulkAction.class);
100+
99101
private final ThreadPool threadPool;
100102
private final AutoCreateIndex autoCreateIndex;
101103
private final ClusterService clusterService;
@@ -233,7 +235,7 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener<Bulk
233235
} else {
234236
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
235237
for (String index : autoCreateIndices) {
236-
createIndex(index, bulkRequest.timeout(), new ActionListener<CreateIndexResponse>() {
238+
createIndex(index, bulkRequest.timeout(), new ActionListener<>() {
237239
@Override
238240
public void onResponse(CreateIndexResponse result) {
239241
if (counter.decrementAndGet() == 0) {
@@ -634,7 +636,7 @@ private long relativeTime() {
634636
return relativeTimeProvider.getAsLong();
635637
}
636638

637-
void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener<BulkResponse> listener) {
639+
private void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener<BulkResponse> listener) {
638640
final long ingestStartTimeInNanos = System.nanoTime();
639641
final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
640642
ingestService.executeBulkRequest(
@@ -643,7 +645,7 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen
643645
bulkRequestModifier::markItemAsFailed,
644646
(originalThread, exception) -> {
645647
if (exception != null) {
646-
logger.error("failed to execute pipeline for a bulk request", exception);
648+
logger.debug("failed to execute pipeline for a bulk request", exception);
647649
listener.onFailure(exception);
648650
} else {
649651
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
@@ -692,7 +694,7 @@ public boolean isForceExecution() {
692694

693695
static final class BulkRequestModifier implements Iterator<DocWriteRequest<?>> {
694696

695-
private static final Logger LOGGER = LogManager.getLogger(BulkRequestModifier.class);
697+
private static final Logger logger = LogManager.getLogger(BulkRequestModifier.class);
696698

697699
final BulkRequest bulkRequest;
698700
final SparseFixedBitSet failedSlots;
@@ -774,7 +776,7 @@ synchronized void markItemAsDropped(int slot) {
774776

775777
synchronized void markItemAsFailed(int slot, Exception e) {
776778
IndexRequest indexRequest = getIndexWriteRequest(bulkRequest.requests().get(slot));
777-
LOGGER.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}]",
779+
logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}]",
778780
indexRequest.getPipeline(), indexRequest.index(), indexRequest.id()), e);
779781

780782
// We hit a error during preprocessing a request, so we:

0 commit comments

Comments
 (0)