Skip to content

Commit 2181d99

Browse files
committed
fixed changelog conflict
Signed-off-by: luyuncheng <[email protected]>
1 parent de5e971 commit 2181d99

26 files changed

+92
-61
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
3030
- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735))
3131
- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805))
3232
- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807))
33+
- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792))
3334

3435
### Deprecated
3536

server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,9 @@
5656
import org.apache.lucene.index.IndexReader;
5757
import org.apache.lucene.index.IndexableField;
5858
import org.apache.lucene.index.PostingsEnum;
59+
import org.apache.lucene.index.StoredFields;
5960
import org.apache.lucene.index.Term;
61+
import org.apache.lucene.index.TermVectors;
6062
import org.apache.lucene.index.Terms;
6163
import org.apache.lucene.index.TermsEnum;
6264
import org.apache.lucene.search.BooleanClause;
@@ -808,8 +810,10 @@ public String describeParams() {
808810
*/
809811
private PriorityQueue<ScoreTerm> retrieveTerms(int docNum) throws IOException {
810812
Map<String, Int> termFreqMap = new HashMap<>();
813+
final TermVectors termVectors = ir.termVectors();
814+
final StoredFields storedFields = ir.storedFields();
811815
for (String fieldName : fieldNames) {
812-
final Fields vectors = ir.getTermVectors(docNum);
816+
final Fields vectors = termVectors.get(docNum);
813817
final Terms vector;
814818
if (vectors != null) {
815819
vector = vectors.terms(fieldName);
@@ -819,7 +823,7 @@ private PriorityQueue<ScoreTerm> retrieveTerms(int docNum) throws IOException {
819823

820824
// field does not store term vector info
821825
if (vector == null) {
822-
Document d = ir.document(docNum);
826+
Document d = storedFields.document(docNum);
823827
IndexableField fields[] = d.getFields(fieldName);
824828
for (IndexableField field : fields) {
825829
final String stringValue = field.stringValue();

server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
import org.apache.lucene.index.IndexWriterConfig;
4646
import org.apache.lucene.index.LeafReaderContext;
4747
import org.apache.lucene.index.SerialMergeScheduler;
48+
import org.apache.lucene.index.StoredFields;
4849
import org.apache.lucene.index.Term;
4950
import org.apache.lucene.search.DocIdSetIterator;
5051
import org.apache.lucene.search.IndexSearcher;
@@ -507,12 +508,11 @@ private static void consumeFromType(IndexSearcher indexSearcher, String type, Ch
507508
final Bits liveDocs = leafReaderContext.reader().getLiveDocs();
508509
final IntPredicate isLiveDoc = liveDocs == null ? i -> true : liveDocs::get;
509510
final DocIdSetIterator docIdSetIterator = scorer.iterator();
511+
final StoredFields storedFields = leafReaderContext.reader().storedFields();
510512
while (docIdSetIterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
511513
if (isLiveDoc.test(docIdSetIterator.docID())) {
512514
logger.trace("processing doc {}", docIdSetIterator.docID());
513-
bytesRefConsumer.accept(
514-
leafReaderContext.reader().document(docIdSetIterator.docID()).getBinaryValue(DATA_FIELD_NAME)
515-
);
515+
bytesRefConsumer.accept(storedFields.document(docIdSetIterator.docID()).getBinaryValue(DATA_FIELD_NAME));
516516
}
517517
}
518518
}

server/src/main/java/org/opensearch/index/engine/InternalEngine.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
import org.apache.lucene.index.ShuffleForcedMergePolicy;
5151
import org.apache.lucene.index.SoftDeletesRetentionMergePolicy;
5252
import org.apache.lucene.index.StandardDirectoryReader;
53+
import org.apache.lucene.index.StoredFields;
5354
import org.apache.lucene.index.Term;
5455
import org.apache.lucene.search.BooleanClause;
5556
import org.apache.lucene.search.BooleanQuery;
@@ -3053,14 +3054,15 @@ private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryRead
30533054
final CombinedDocValues dv = new CombinedDocValues(leaf.reader());
30543055
final IdOnlyFieldVisitor idFieldVisitor = new IdOnlyFieldVisitor();
30553056
final DocIdSetIterator iterator = scorer.iterator();
3057+
final StoredFields storedFields = leaf.reader().storedFields();
30563058
int docId;
30573059
while ((docId = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
30583060
final long primaryTerm = dv.docPrimaryTerm(docId);
30593061
final long seqNo = dv.docSeqNo(docId);
30603062
localCheckpointTracker.markSeqNoAsProcessed(seqNo);
30613063
localCheckpointTracker.markSeqNoAsPersisted(seqNo);
30623064
idFieldVisitor.reset();
3063-
leaf.reader().document(docId, idFieldVisitor);
3065+
storedFields.document(docId, idFieldVisitor);
30643066
if (idFieldVisitor.getId() == null) {
30653067
assert dv.isTombstone(docId);
30663068
continue;

server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException {
289289
? SourceFieldMapper.RECOVERY_SOURCE_NAME
290290
: SourceFieldMapper.NAME;
291291
final FieldsVisitor fields = new FieldsVisitor(true, sourceField);
292-
leaf.reader().document(segmentDocID, fields);
292+
leaf.reader().storedFields().document(segmentDocID, fields);
293293

294294
final Translog.Operation op;
295295
final boolean isTombstone = parallelArray.isTombStone[docIndex];

server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -221,28 +221,33 @@ public int maxDoc() {
221221

222222
@Override
223223
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
224-
if (docID != 0) {
225-
throw new IllegalArgumentException("no such doc ID " + docID);
226-
}
227-
if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) {
228-
assert operation.source().toBytesRef().offset == 0;
229-
assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length;
230-
visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes);
231-
}
232-
if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) {
233-
visitor.stringField(FAKE_ROUTING_FIELD, operation.routing());
234-
}
235-
if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
236-
BytesRef bytesRef = Uid.encodeId(operation.id());
237-
final byte[] id = new byte[bytesRef.length];
238-
System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
239-
visitor.binaryField(FAKE_ID_FIELD, id);
240-
}
224+
storedFields().document(docID, visitor);
241225
}
242226

243227
@Override
244228
public StoredFields storedFields() throws IOException {
245-
throw new UnsupportedOperationException();
229+
return new StoredFields() {
230+
@Override
231+
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
232+
if (docID != 0) {
233+
throw new IllegalArgumentException("no such doc ID " + docID);
234+
}
235+
if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) {
236+
assert operation.source().toBytesRef().offset == 0;
237+
assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length;
238+
visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes);
239+
}
240+
if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) {
241+
visitor.stringField(FAKE_ROUTING_FIELD, operation.routing());
242+
}
243+
if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
244+
BytesRef bytesRef = Uid.encodeId(operation.id());
245+
final byte[] id = new byte[bytesRef.length];
246+
System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
247+
visitor.binaryField(FAKE_ID_FIELD, id);
248+
}
249+
}
250+
};
246251
}
247252

248253
@Override

server/src/main/java/org/opensearch/index/get/ShardGetService.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ private GetResult innerGetLoadFromStoredFields(
276276
);
277277
if (fieldVisitor != null) {
278278
try {
279-
docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor);
279+
docIdAndVersion.reader.storedFields().document(docIdAndVersion.docId, fieldVisitor);
280280
} catch (IOException e) {
281281
throw new OpenSearchException("Failed to get id [" + id + "]", e);
282282
}

server/src/main/java/org/opensearch/index/shard/ShardSplittingQuery.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ public Status needsField(FieldInfo fieldInfo) throws IOException {
286286
boolean matches(int doc) throws IOException {
287287
routing = id = null;
288288
leftToVisit = 2;
289-
leafReader.document(doc, this);
289+
leafReader.storedFields().document(doc, this);
290290
assert id != null : "docID must not be null - we might have hit a nested document";
291291
int targetShardId = OperationRouting.generateShardId(indexMetadata, id, routing);
292292
return targetShardId != shardId;

server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.lucene.index.IndexableField;
4040
import org.apache.lucene.index.MultiTerms;
4141
import org.apache.lucene.index.Term;
42+
import org.apache.lucene.index.TermVectors;
4243
import org.apache.lucene.index.Terms;
4344
import org.apache.lucene.index.memory.MemoryIndex;
4445
import org.opensearch.OpenSearchException;
@@ -127,7 +128,8 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ
127128
/* or from an existing document */
128129
else if (docIdAndVersion != null) {
129130
// fields with stored term vectors
130-
termVectorsByField = docIdAndVersion.reader.getTermVectors(docIdAndVersion.docId);
131+
TermVectors termVectors = docIdAndVersion.reader.termVectors();
132+
termVectorsByField = termVectors.get(docIdAndVersion.docId);
131133
Set<String> selectedFields = request.selectedFields();
132134
// generate tvs for fields where analyzer is overridden
133135
if (selectedFields == null && request.perFieldAnalyzer() != null) {
@@ -322,7 +324,8 @@ private static Fields generateTermVectors(
322324
}
323325
}
324326
/* and read vectors from it */
325-
return index.createSearcher().getIndexReader().getTermVectors(0);
327+
TermVectors termVectors = index.createSearcher().getIndexReader().termVectors();
328+
return termVectors.get(0);
326329
}
327330

328331
private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException {

server/src/main/java/org/opensearch/search/fetch/FetchPhase.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ public void execute(SearchContext context) {
159159
SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader();
160160
fieldReader = lf.getSequentialStoredFieldsReader()::document;
161161
} else {
162-
fieldReader = currentReaderContext.reader()::document;
162+
fieldReader = currentReaderContext.reader().storedFields()::document;
163163
}
164164
for (FetchSubPhaseProcessor processor : processors) {
165165
processor.setNextReader(currentReaderContext);

0 commit comments

Comments
 (0)