diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 8bc6891efd8e..686eb2a3b1d0 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -96,6 +96,12 @@ Other API Changes --------------------- +* GITHUB#11998: Add new stored fields and termvectors interfaces: IndexReader.storedFields() + and IndexReader.termVectors(). Deprecate IndexReader.document() and IndexReader.getTermVector(). + The new APIs do not rely upon ThreadLocal storage for each index segment, which can greatly + reduce RAM requirements when there are many threads and/or segments. + (Adrien Grand, Robert Muir) + * GITHUB#11742: MatchingFacetSetsCounts#getTopChildren now properly returns "top" children instead of all children. (Greg Miller) @@ -129,7 +135,6 @@ API Changes * GITHUB#11984: Improved TimeLimitBulkScorer to check the timeout at exponantial rate. (Costin Leau) - New Features --------------------- * GITHUB#11795: Add ByteWritesTrackingDirectoryWrapper to expose metrics for bytes merged, flushed, and overall diff --git a/lucene/MIGRATE.md b/lucene/MIGRATE.md index 549b1586fd04..ce7a3cc4bf58 100644 --- a/lucene/MIGRATE.md +++ b/lucene/MIGRATE.md @@ -19,6 +19,25 @@ ## Migration from Lucene 9.x to Lucene 10.0 +### Removed deprecated IndexSearcher.doc, IndexReader.document, IndexReader.getTermVectors (GITHUB#11998) + +The deprecated Stored Fields and Term Vectors apis relied upon threadlocal storage and have been removed. + +Instead, call storedFields()/termVectors() to return an instance which can fetch data for multiple documents, +and will be garbage-collected as usual. + +For example: +```java +TopDocs hits = searcher.search(query, 10); +StoredFields storedFields = reader.storedFields(); +for (ScoreDoc hit : hits.scoreDocs) { + Document doc = storedFields.document(hit.doc); +} +``` + +Note that these StoredFields and TermVectors instances should only be consumed in the thread where +they were acquired. For instance, it is illegal to share them across threads. + ### PersianStemFilter is added to PersianAnalyzer (LUCENE-10312) PersianAnalyzer now includes PersianStemFilter, that would change analysis results. If you need the exactly same analysis diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java index c049eb65a996..62e4d771257a 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java @@ -95,7 +95,7 @@ public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - Terms vector = r.getTermVectors(0).terms("field"); + Terms vector = r.termVectors().get(0).terms("field"); assertEquals(1, vector.size()); TermsEnum termsEnum = vector.iterator(); termsEnum.next(); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java index 3536224c50f2..69ac781f53f9 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; @@ -64,8 +65,9 @@ public void testBasic() throws Exception { SortField sortField = new SortField("collated", SortField.Type.STRING); TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField)); - assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field")); - assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field")); + StoredFields storedFields = ir.storedFields(); + assertEquals("abc", storedFields.document(td.scoreDocs[0].doc).get("field")); + assertEquals("ABC", storedFields.document(td.scoreDocs[1].doc).get("field")); ir.close(); dir.close(); } @@ -120,8 +122,9 @@ private void doTestRanges( Collator collator) throws Exception { SortedDocValues dvs = MultiDocValues.getSortedValues(is.getIndexReader(), "collated"); + StoredFields storedFields = is.storedFields(); for (int docID = 0; docID < is.getIndexReader().maxDoc(); docID++) { - Document doc = is.doc(docID); + Document doc = storedFields.document(docID); String s = doc.getField("field").stringValue(); boolean collatorAccepts = collate(collator, s, startPoint) >= 0 && collate(collator, s, endPoint) <= 0; diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUCollationDocValuesField.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUCollationDocValuesField.java index e441c9662e1d..e33443415d62 100644 --- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUCollationDocValuesField.java +++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUCollationDocValuesField.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; @@ -64,8 +65,9 @@ public void testBasic() throws Exception { SortField sortField = new SortField("collated", SortField.Type.STRING); TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField)); - assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field")); - assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field")); + StoredFields storedFields = ir.storedFields(); + assertEquals("abc", storedFields.document(td.scoreDocs[0].doc).get("field")); + assertEquals("ABC", storedFields.document(td.scoreDocs[1].doc).get("field")); ir.close(); dir.close(); } @@ -118,8 +120,9 @@ private void doTestRanges( Collator collator) throws Exception { SortedDocValues dvs = MultiDocValues.getSortedValues(is.getIndexReader(), "collated"); + StoredFields storedFields = is.storedFields(); for (int docID = 0; docID < is.getIndexReader().maxDoc(); docID++) { - Document doc = is.doc(docID); + Document doc = storedFields.document(docID); String s = doc.getField("field").stringValue(); boolean collatorAccepts = collator.compare(s, startPoint) >= 0 && collator.compare(s, endPoint) <= 0; diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingStoredFieldsReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingStoredFieldsReader.java index 315e4334ece7..a76b699cc72a 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingStoredFieldsReader.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingStoredFieldsReader.java @@ -692,7 +692,7 @@ public void skipBytes(long numBytes) throws IOException { } } - SerializedDocument document(int docID) throws IOException { + SerializedDocument serializedDocument(int docID) throws IOException { if (state.contains(docID) == false) { fieldsStream.seek(indexReader.getStartPointer(docID)); state.reset(docID); @@ -702,9 +702,9 @@ SerializedDocument document(int docID) throws IOException { } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + public void document(int docID, StoredFieldVisitor visitor) throws IOException { - final SerializedDocument doc = document(docID); + final SerializedDocument doc = serializedDocument(docID); for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) { final long infoAndBits = doc.in.readVLong(); diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java index fc10c6de2ef8..4089a28e977c 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase; import org.apache.lucene.tests.util.LuceneTestCase.Nightly; @@ -58,8 +59,9 @@ public void testMixedCompressions() throws Exception { DirectoryReader ir = DirectoryReader.open(dir); assertEquals(10, ir.numDocs()); + StoredFields storedFields = ir.storedFields(); for (int i = 0; i < 10; i++) { - Document doc = ir.document(i); + Document doc = storedFields.document(i); assertEquals("value1", doc.get("field1")); assertEquals("value2", doc.get("field2")); } diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java index e41e8e2b1333..5b9dd0e7d9b3 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/BaseLucene80DocValuesFormatTestCase.java @@ -52,6 +52,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -225,8 +226,9 @@ private void doTestSparseDocValuesVsStoredFields() throws Exception { final SortedSetDocValues sortedSet = DocValues.getSortedSet(reader, "sorted_set"); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < reader.maxDoc(); ++i) { - final Document doc = reader.document(i); + final Document doc = storedFields.document(i); final IndexableField valueField = doc.getField("value"); final Long value = valueField == null ? null : valueField.numericValue().longValue(); @@ -664,11 +666,12 @@ private void doTestSortedNumericBlocksOfVariousBitsPerValue(LongSupplier counts) for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv"); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < r.maxDoc(); i++) { if (i > docValues.docID()) { docValues.nextDoc(); } - String[] expectedStored = r.document(i).getValues("stored"); + String[] expectedStored = storedFields.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expectedStored.length); } else { @@ -736,6 +739,7 @@ private void assertDVAdvance(Directory dir, int jumpStep) throws IOException { TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) { // Create a new instance each time to ensure jumps from the beginning @@ -750,7 +754,7 @@ private void assertDVAdvance(Directory dir, int jumpStep) throws IOException { + jump + " from #" + (docID - jump); - String storedValue = r.document(docID).get("stored"); + String storedValue = storedFields.document(docID).get("stored"); if (storedValue == null) { assertFalse("There should be no DocValue for " + base, docValues.advanceExact(docID)); } else { diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene87/TestLucene87StoredFieldsFormatHighCompression.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene87/TestLucene87StoredFieldsFormatHighCompression.java index e844c4cd26d6..43881f5a395f 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene87/TestLucene87StoredFieldsFormatHighCompression.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene87/TestLucene87StoredFieldsFormatHighCompression.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase; import org.apache.lucene.tests.util.LuceneTestCase.Nightly; @@ -57,8 +58,9 @@ public void testMixedCompressions() throws Exception { DirectoryReader ir = DirectoryReader.open(dir); assertEquals(10, ir.numDocs()); + StoredFields storedFields = ir.storedFields(); for (int i = 0; i < 10; i++) { - Document doc = ir.document(i); + Document doc = storedFields.document(i); assertEquals("value1", doc.get("field1")); assertEquals("value2", doc.get("field2")); } diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java index cb06379d41f6..9af11fb633c4 100644 --- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java +++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_index/TestBackwardsCompatibility.java @@ -91,7 +91,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.VectorSimilarityFunction; @@ -1099,9 +1101,11 @@ private void doTestHits(ScoreDoc[] hits, int expectedCount, IndexReader reader) throws IOException { final int hitCount = hits.length; assertEquals("wrong number of hits", expectedCount, hitCount); + StoredFields storedFields = reader.storedFields(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < hitCount; i++) { - reader.document(hits[i].doc); - reader.getTermVectors(hits[i].doc); + storedFields.document(hits[i].doc); + termVectors.get(hits[i].doc); } } @@ -1118,10 +1122,12 @@ public void searchIndex( final Bits liveDocs = MultiBits.getLiveDocs(reader); assertNotNull(liveDocs); + StoredFields storedFields = reader.storedFields(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < DOCS_COUNT; i++) { if (liveDocs.get(i)) { - Document d = reader.document(i); + Document d = storedFields.document(i); List fields = d.getFields(); boolean isProxDoc = d.getField("content3") == null; if (isProxDoc) { @@ -1144,7 +1150,7 @@ public void searchIndex( assertEquals("field with non-ascii name", f.stringValue()); } - Fields tfvFields = reader.getTermVectors(i); + Fields tfvFields = termVectors.get(i); assertNotNull("i=" + i, tfvFields); Terms tfv = tfvFields.terms("utf8"); assertNotNull("docID=" + i + " index=" + oldName, tfv); @@ -1176,7 +1182,7 @@ public void searchIndex( MultiDocValues.getSortedNumericValues(reader, "dvSortedNumeric"); for (int i = 0; i < DOCS_COUNT; i++) { - int id = Integer.parseInt(reader.document(i).get("id")); + int id = Integer.parseInt(storedFields.document(i).get("id")); assertEquals(i, dvByte.nextDoc()); assertEquals(id, dvByte.longValue()); @@ -1231,7 +1237,7 @@ public void searchIndex( searcher.search(new TermQuery(new Term(new String("content"), "aaa")), 1000).scoreDocs; // First document should be #0 - Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = storedFields.document(hits[0].doc); assertEquals("didn't get the right document first", "0", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -1337,7 +1343,7 @@ public void searchIndex( // test KNN search ScoreDoc[] scoreDocs = assertKNNSearch(searcher, KNN_VECTOR, 10, 10, "0"); for (int i = 0; i < scoreDocs.length; i++) { - int id = Integer.parseInt(reader.document(scoreDocs[i].doc).get("id")); + int id = Integer.parseInt(storedFields.document(scoreDocs[i].doc).get("id")); int expectedId = i < DELETED_ID ? i : i + 1; assertEquals(expectedId, id); } @@ -1356,7 +1362,7 @@ private static ScoreDoc[] assertKNNSearch( ScoreDoc[] hits = searcher.search(new KnnVectorQuery(KNN_VECTOR_FIELD, queryVector, k), k).scoreDocs; assertEquals("wrong number of hits", expectedHitsCount, hits.length); - Document d = searcher.doc(hits[0].doc); + Document d = searcher.storedFields().document(hits[0].doc); assertEquals("wrong first document", expectedFirstDocId, d.get("id")); return hits; } @@ -1388,7 +1394,7 @@ public void changeIndexWithAdds(Random random, Directory dir, Version nameVersio IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs; - Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = searcher.getIndexReader().storedFields().document(hits[0].doc); assertEquals("wrong first document", "0", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); @@ -1420,7 +1426,7 @@ public void changeIndexWithAdds(Random random, Directory dir, Version nameVersio // make sure searching sees right # hits fot term search hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs; assertEquals("wrong number of hits", 44, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.storedFields().document(hits[0].doc); doTestHits(hits, 44, searcher.getIndexReader()); assertEquals("wrong first document", "0", d.get("id")); @@ -1445,7 +1451,7 @@ public void changeIndexNoAdds(Random random, Directory dir, Version nameVersion) IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - Document d = searcher.doc(hits[0].doc); + Document d = searcher.storedFields().document(hits[0].doc); assertEquals("wrong first document", "0", d.get("id")); if (nameVersion.major >= KNN_VECTOR_MIN_SUPPORTED_VERSION) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java index ee7f00029007..405305d50a1e 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiBits; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -82,13 +83,14 @@ public int doLogic() throws Exception { closeSearcher = false; } + StoredFields storedFields = reader.storedFields(); // optionally warm and add num docs traversed to count if (withWarm()) { Document doc = null; Bits liveDocs = MultiBits.getLiveDocs(reader); for (int m = 0; m < reader.maxDoc(); m++) { if (null == liveDocs || liveDocs.get(m)) { - doc = reader.document(m); + doc = storedFields.document(m); res += (doc == null ? 0 : 1); } } @@ -129,7 +131,7 @@ public int doLogic() throws Exception { System.out.println("numDocs() = " + reader.numDocs()); for (int i = 0; i < hits.scoreDocs.length; i++) { final int docID = hits.scoreDocs[i].doc; - final Document doc = reader.document(docID); + final Document doc = storedFields.document(docID); System.out.println( " " + i @@ -159,19 +161,19 @@ public int doLogic() throws Exception { } protected int withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception { - IndexReader reader = searcher.getIndexReader(); int res = 0; if (withTraverse()) { final ScoreDoc[] scoreDocs = hits.scoreDocs; int traversalSize = Math.min(scoreDocs.length, traversalSize()); + StoredFields storedFields = searcher.storedFields(); if (traversalSize > 0) { boolean retrieve = withRetrieve(); for (int m = 0; m < traversalSize; m++) { int id = scoreDocs[m].doc; res++; if (retrieve) { - Document document = retrieveDoc(reader, id); + Document document = retrieveDoc(storedFields, id); res += document != null ? 1 : 0; } } @@ -184,8 +186,8 @@ protected Collector createCollector() throws Exception { return TopScoreDocCollector.create(numHits(), withTotalHits() ? Integer.MAX_VALUE : 1); } - protected Document retrieveDoc(IndexReader ir, int id) throws IOException { - return ir.document(id); + protected Document retrieveDoc(StoredFields storedFields, int id) throws IOException { + return storedFields.document(id); } /** Return query maker used for this task. */ diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java index f26c3e1a40a7..dd6c647fae18 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java @@ -31,6 +31,8 @@ import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -196,12 +198,14 @@ private class StandardHLImpl implements HLImpl { @Override public void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception { IndexReader reader = searcher.getIndexReader(); + StoredFields storedFields = reader.storedFields(); + TermVectors termVectors = reader.termVectors(); highlighter.setFragmentScorer(new QueryScorer(q)); // highlighter.setTextFragmenter(); unfortunately no sentence mechanism, not even regex. // Default here is trivial for (ScoreDoc scoreDoc : docIdOrder(hits.scoreDocs)) { - Document document = reader.document(scoreDoc.doc, hlFields); - Fields tvFields = termVecs ? reader.getTermVectors(scoreDoc.doc) : null; + Document document = storedFields.document(scoreDoc.doc, hlFields); + Fields tvFields = termVecs ? termVectors.get(scoreDoc.doc) : null; for (IndexableField indexableField : document) { TokenStream tokenStream; if (termVecs) { @@ -316,8 +320,10 @@ private class NoHLImpl implements HLImpl { @Override public void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception { // just retrieve the HL fields + StoredFields storedFields = searcher.storedFields(); for (ScoreDoc scoreDoc : docIdOrder(hits.scoreDocs)) { - preventOptimizeAway += searcher.doc(scoreDoc.doc, hlFields).iterator().hasNext() ? 2 : 1; + preventOptimizeAway += + storedFields.document(scoreDoc.doc, hlFields).iterator().hasNext() ? 2 : 1; } } } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java index c0cd23ba9d94..ac908156c2ac 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java @@ -23,7 +23,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.document.Document; import org.apache.lucene.document.DocumentStoredFieldVisitor; -import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; /** * Search and Traverse and Retrieve docs task using a FieldVisitor loading only the requested @@ -51,12 +51,12 @@ public boolean withRetrieve() { } @Override - protected Document retrieveDoc(IndexReader ir, int id) throws IOException { + protected Document retrieveDoc(StoredFields storedFields, int id) throws IOException { if (fieldsToLoad == null) { - return ir.document(id); + return storedFields.document(id); } else { DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); - ir.document(id, visitor); + storedFields.document(id, visitor); return visitor.getDocument(); } } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java index fa741858eeb4..857117f4570a 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java @@ -20,6 +20,7 @@ import java.io.PrintWriter; import org.apache.lucene.benchmark.quality.utils.DocNameExtractor; import org.apache.lucene.benchmark.quality.utils.SubmissionReport; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -128,8 +129,9 @@ private QualityStats analyzeQueryResults( System.currentTimeMillis(); // extraction of first doc name we measure also construction of // doc name extractor, just in case. DocNameExtractor xt = new DocNameExtractor(docNameField); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < sd.length; i++) { - String docName = xt.docName(searcher, sd[i].doc); + String docName = xt.docName(storedFields, sd[i].doc); long docNameExtractTime = System.currentTimeMillis() - t1; t1 = System.currentTimeMillis(); boolean isRelevant = judge.isRelevant(docName, qq); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java index 72a7c5feaf1f..75eb5e7dcc35 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java @@ -22,7 +22,7 @@ import java.util.Objects; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.index.StoredFields; /** Utility: extract doc names from an index */ public class DocNameExtractor { @@ -41,34 +41,32 @@ public DocNameExtractor(final String docNameField) { /** * Extract the name of the input doc from the index. * - * @param searcher access to the index. + * @param storedFields access to the index. * @param docid ID of doc whose name is needed. * @return the name of the input doc as extracted from the index. * @throws IOException if cannot extract the doc name from the index. */ - public String docName(IndexSearcher searcher, int docid) throws IOException { + public String docName(StoredFields storedFields, int docid) throws IOException { final List name = new ArrayList<>(); - searcher - .getIndexReader() - .document( - docid, - new StoredFieldVisitor() { - @Override - public void stringField(FieldInfo fieldInfo, String value) { - name.add(Objects.requireNonNull(value, "String value should not be null")); - } + storedFields.document( + docid, + new StoredFieldVisitor() { + @Override + public void stringField(FieldInfo fieldInfo, String value) { + name.add(Objects.requireNonNull(value, "String value should not be null")); + } - @Override - public Status needsField(FieldInfo fieldInfo) { - if (!name.isEmpty()) { - return Status.STOP; - } else if (fieldInfo.name.equals(docNameField)) { - return Status.YES; - } else { - return Status.NO; - } - } - }); + @Override + public Status needsField(FieldInfo fieldInfo) { + if (!name.isEmpty()) { + return Status.STOP; + } else if (fieldInfo.name.equals(docNameField)) { + return Status.YES; + } else { + return Status.NO; + } + } + }); if (name.size() != 0) { return name.get(0); } else { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java index cbde9e9e2fe6..639837d06274 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java @@ -21,6 +21,7 @@ import java.text.NumberFormat; import java.util.Locale; import org.apache.lucene.benchmark.quality.QualityQuery; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; @@ -66,8 +67,9 @@ public void report(QualityQuery qq, TopDocs td, String docNameField, IndexSearch ScoreDoc[] sd = td.scoreDocs; String sep = " \t "; DocNameExtractor xt = new DocNameExtractor(docNameField); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < sd.length; i++) { - String docName = xt.docName(searcher, sd[i].doc); + String docName = xt.docName(storedFields, sd[i].doc); logger.println( qq.getQueryID() + sep diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 38e4804da41b..0ded6008ab85 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -48,6 +48,7 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; @@ -240,8 +241,9 @@ public void testDocMakerThreadSafety() throws Exception { final int maxDoc = r.maxDoc(); assertEquals(1000, maxDoc); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < 1000; i++) { - assertNotNull("doc " + i + " has null country", r.document(i).getField("country")); + assertNotNull("doc " + i + " has null country", storedFields.document(i).getField("country")); } r.close(); } @@ -644,7 +646,7 @@ public void testIndexWriterSettings() throws Exception { writer.close(); Directory dir = benchmark.getRunData().getDirectory(); IndexReader reader = DirectoryReader.open(dir); - Fields tfv = reader.getTermVectors(0); + Fields tfv = reader.termVectors().get(0); assertNotNull(tfv); assertTrue(tfv.size() > 0); reader.close(); diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java index ad058f9d6b7d..393d352abf2c 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java @@ -166,7 +166,8 @@ private void doIndexAndSearchTestWithRepeats( if (storedField == null) { storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value } - assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField)); + assertEquals( + "Wrong field value", storedField, searcher.storedFields().document(0).get(storedField)); } finally { IOUtils.close(reader, runData); } diff --git a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java index 7cd47ccae405..798dc7c787af 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java @@ -28,7 +28,9 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiTerms; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; @@ -132,9 +134,11 @@ public BooleanPerceptronClassifier( if (query != null) { q.add(new BooleanClause(query, BooleanClause.Occur.MUST)); } + TermVectors termVectors = indexReader.termVectors(); + StoredFields storedFields = indexReader.storedFields(); // run the search and use stored field values for (ScoreDoc scoreDoc : indexSearcher.search(q.build(), Integer.MAX_VALUE).scoreDocs) { - Document doc = indexSearcher.doc(scoreDoc.doc); + Document doc = storedFields.document(scoreDoc.doc); IndexableField textField = doc.getField(textFieldName); @@ -150,7 +154,7 @@ public BooleanPerceptronClassifier( long modifier = correctClass.compareTo(assignedClass); if (modifier != 0) { updateWeights( - indexReader, + termVectors, scoreDoc.doc, assignedClass, weights, @@ -164,7 +168,7 @@ public BooleanPerceptronClassifier( } private void updateWeights( - IndexReader indexReader, + TermVectors termVectors, int docId, Boolean assignedClass, SortedMap weights, @@ -174,7 +178,7 @@ private void updateWeights( TermsEnum cte = textTerms.iterator(); // get the doc term vectors - Terms terms = indexReader.getTermVector(docId, textFieldName); + Terms terms = termVectors.get(docId, textFieldName); if (terms == null) { throw new IOException("term vectors must be stored for field " + textFieldName); diff --git a/lucene/classification/src/java/org/apache/lucene/classification/KNearestFuzzyClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/KNearestFuzzyClassifier.java index 6e955b3e08b5..235ed2c0d6b1 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/KNearestFuzzyClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/KNearestFuzzyClassifier.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -159,8 +160,9 @@ private List> buildListFromTopDocs(TopDocs topDoc Map classBoosts = new HashMap<>(); // this is a boost based on class ranking positions in topDocs float maxScore = topDocs.totalHits.value == 0 ? Float.NaN : topDocs.scoreDocs[0].score; + StoredFields storedFields = indexSearcher.storedFields(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - IndexableField storableField = indexSearcher.doc(scoreDoc.doc).getField(classFieldName); + IndexableField storableField = storedFields.document(scoreDoc.doc).getField(classFieldName); if (storableField != null) { BytesRef cl = new BytesRef(storableField.stringValue()); // update count diff --git a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java index d39e5b6e8273..48424d4f0edd 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.queries.mlt.MoreLikeThis; import org.apache.lucene.search.BooleanClause; @@ -96,7 +97,8 @@ public KNearestNeighborClassifier( int minDocsFreq, int minTermFreq, String classFieldName, - String... textFieldNames) { + String... textFieldNames) + throws IOException { this.textFieldNames = textFieldNames; this.classFieldName = classFieldName; this.mlt = new MoreLikeThis(indexReader); @@ -191,8 +193,10 @@ protected List> buildListFromTopDocs(TopDocs topD Map classBoosts = new HashMap<>(); // this is a boost based on class ranking positions in topDocs float maxScore = topDocs.totalHits.value == 0 ? Float.NaN : topDocs.scoreDocs[0].score; + StoredFields storedFields = indexSearcher.storedFields(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - IndexableField[] storableFields = indexSearcher.doc(scoreDoc.doc).getFields(classFieldName); + IndexableField[] storableFields = + storedFields.document(scoreDoc.doc).getFields(classFieldName); for (IndexableField singleStorableField : storableFields) { if (singleStorableField != null) { BytesRef cl = new BytesRef(singleStorableField.stringValue()); diff --git a/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java index 5e6f6cb6b773..790f245e620b 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java @@ -75,7 +75,8 @@ public KNearestNeighborDocumentClassifier( int minTermFreq, String classFieldName, Map field2analyzer, - String... textFieldNames) { + String... textFieldNames) + throws IOException { super( indexReader, similarity, diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/ConfusionMatrixGenerator.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/ConfusionMatrixGenerator.java index e9fa47180f68..4994115b0a34 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/utils/ConfusionMatrixGenerator.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/ConfusionMatrixGenerator.java @@ -30,6 +30,7 @@ import org.apache.lucene.classification.Classifier; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermRangeQuery; @@ -80,13 +81,14 @@ public static ConfusionMatrix getConfusionMatrix( double time = 0d; int counter = 0; + StoredFields storedFields = indexSearcher.storedFields(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { if (timeoutMilliseconds > 0 && time >= timeoutMilliseconds) { break; } - Document doc = reader.document(scoreDoc.doc); + Document doc = storedFields.document(scoreDoc.doc); String[] correctAnswers = doc.getValues(classFieldName); if (correctAnswers != null && correctAnswers.length > 0) { diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java index 6bd7ff600e85..06a914dade9b 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ScoreDoc; @@ -134,6 +135,7 @@ public void split( int b = 0; // iterate over existing documents + StoredFields storedFields = originalIndex.storedFields(); for (GroupDocs group : topGroups.groups) { assert group.totalHits.relation == TotalHits.Relation.EQUAL_TO; long totalHits = group.totalHits.value; @@ -144,7 +146,7 @@ public void split( for (ScoreDoc scoreDoc : group.scoreDocs) { // create a new document for indexing - Document doc = createNewDoc(originalIndex, ft, scoreDoc, fieldNames); + Document doc = createNewDoc(storedFields, ft, scoreDoc, fieldNames); // add it to one of the IDXs if (b % 2 == 0 && tc < testSize) { @@ -180,10 +182,10 @@ public void split( } private Document createNewDoc( - IndexReader originalIndex, FieldType ft, ScoreDoc scoreDoc, String[] fieldNames) + StoredFields originalFields, FieldType ft, ScoreDoc scoreDoc, String[] fieldNames) throws IOException { Document doc = new Document(); - Document document = originalIndex.document(scoreDoc.doc); + Document document = originalFields.document(scoreDoc.doc); if (fieldNames != null && fieldNames.length > 0) { for (String fieldName : fieldNames) { IndexableField field = document.getField(fieldName); diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java index c150d011a2e3..bd0308108a8c 100644 --- a/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java +++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -81,9 +82,10 @@ public void tearDown() throws Exception { @Test public void testDenseFreqDoubleArrayConversion() throws Exception { IndexSearcher indexSearcher = new IndexSearcher(index); + TermVectors termVectors = index.termVectors(); for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) { - Terms docTerms = index.getTermVector(scoreDoc.doc, "text"); + Terms docTerms = termVectors.get(scoreDoc.doc, "text"); Double[] vector = DocToDoubleVectorUtils.toDenseLocalFreqDoubleArray(docTerms); assertNotNull(vector); assertTrue(vector.length > 0); @@ -95,9 +97,10 @@ public void testSparseFreqDoubleArrayConversion() throws Exception { Terms fieldTerms = MultiTerms.getTerms(index, "text"); if (fieldTerms != null && fieldTerms.size() != -1) { IndexSearcher indexSearcher = new IndexSearcher(index); + TermVectors termVectors = index.termVectors(); for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) { - Terms docTerms = index.getTermVector(scoreDoc.doc, "text"); + Terms docTerms = termVectors.get(scoreDoc.doc, "text"); Double[] vector = DocToDoubleVectorUtils.toSparseLocalFreqDoubleArray(docTerms, fieldTerms); assertNotNull(vector); assertTrue(vector.length > 0); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java index f63f88a96396..b0c19e8526ef 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java @@ -116,7 +116,7 @@ private void readIndex(int size) throws IOException { } @Override - public void visitDocument(int n, StoredFieldVisitor visitor) throws IOException { + public void document(int n, StoredFieldVisitor visitor) throws IOException { in.seek(offsets[n]); while (true) { diff --git a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsReader.java index 1d248b1aa8dc..0e4d33c5abe2 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsReader.java @@ -19,23 +19,21 @@ import java.io.Closeable; import java.io.IOException; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; /** * Codec API for reading stored fields. * - *

You need to implement {@link #visitDocument(int, StoredFieldVisitor)} to read the stored - * fields for a document, implement {@link #clone()} (creating clones of any IndexInputs used, etc), - * and {@link #close()} + *

You need to implement {@link #document(int, StoredFieldVisitor)} to read the stored fields for + * a document, implement {@link #clone()} (creating clones of any IndexInputs used, etc), and {@link + * #close()} * * @lucene.experimental */ -public abstract class StoredFieldsReader implements Cloneable, Closeable { +public abstract class StoredFieldsReader extends StoredFields implements Cloneable, Closeable { /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ protected StoredFieldsReader() {} - /** Visit the stored fields for document docID */ - public abstract void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException; - @Override public abstract StoredFieldsReader clone(); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java index 372378adc584..1b527e593420 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java @@ -132,7 +132,7 @@ public int merge(MergeState mergeState) throws IOException { } assert sub.mappedDocID == docCount; startDocument(); - sub.reader.visitDocument(sub.docID, sub.visitor); + sub.reader.document(sub.docID, sub.visitor); finishDocument(); docCount++; } @@ -149,7 +149,7 @@ public int merge(MergeState mergeState) throws IOException { * MergeVisitor visitor = new MergeVisitor(mergeState, readerIndex); * for (...) { * startDocument(); - * storedFieldsReader.visitDocument(docID, visitor); + * storedFieldsReader.document(docID, visitor); * finishDocument(); * } * diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java index 257621a754f1..0ad39dd1abe7 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java @@ -18,26 +18,18 @@ import java.io.Closeable; import java.io.IOException; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.TermVectors; /** * Codec API for reading term vectors: * * @lucene.experimental */ -public abstract class TermVectorsReader implements Cloneable, Closeable { +public abstract class TermVectorsReader extends TermVectors implements Cloneable, Closeable { /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ protected TermVectorsReader() {} - /** - * Returns term vectors for this document, or null if term vectors were not indexed. If offsets - * are available they are in an {@link OffsetAttribute} available from the {@link - * org.apache.lucene.index.PostingsEnum}. - */ - public abstract Fields get(int doc) throws IOException; - /** * Checks consistency of this reader. * diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsReader.java index 159837b11b1e..a50523b2520c 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsReader.java @@ -603,7 +603,7 @@ public void skipBytes(long numBytes) throws IOException { } } - SerializedDocument document(int docID) throws IOException { + SerializedDocument serializedDocument(int docID) throws IOException { if (state.contains(docID) == false) { fieldsStream.seek(indexReader.getStartPointer(docID)); state.reset(docID); @@ -625,9 +625,9 @@ boolean isLoaded(int docID) { } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + public void document(int docID, StoredFieldVisitor visitor) throws IOException { - final SerializedDocument doc = document(docID); + final SerializedDocument doc = serializedDocument(docID); for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) { final long infoAndBits = doc.in.readVLong(); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsWriter.java index ca9f93bb4018..63afcef96d9c 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingStoredFieldsWriter.java @@ -513,7 +513,7 @@ public void finish(int numDocs) throws IOException { private void copyOneDoc(Lucene90CompressingStoredFieldsReader reader, int docID) throws IOException { assert reader.getVersion() == VERSION_CURRENT; - SerializedDocument doc = reader.document(docID); + SerializedDocument doc = reader.serializedDocument(docID); startDocument(); bufferedDocs.copyBytes(doc.in, doc.length); numStoredFieldsInDoc = doc.numStoredFields; @@ -641,7 +641,7 @@ public int merge(MergeState mergeState) throws IOException { } else if (sub.mergeStrategy == MergeStrategy.VISITOR) { assert visitors[sub.readerIndex] != null; startDocument(); - reader.visitDocument(sub.docID, visitors[sub.readerIndex]); + reader.document(sub.docID, visitors[sub.readerIndex]); finishDocument(); ++docCount; sub = docIDMerger.next(); diff --git a/lucene/core/src/java/org/apache/lucene/document/Document.java b/lucene/core/src/java/org/apache/lucene/document/Document.java index 49f7515018b5..36b86940179d 100644 --- a/lucene/core/src/java/org/apache/lucene/document/Document.java +++ b/lucene/core/src/java/org/apache/lucene/document/Document.java @@ -20,9 +20,8 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; -import org.apache.lucene.index.IndexReader; // for javadoc import org.apache.lucene.index.IndexableField; -import org.apache.lucene.search.IndexSearcher; // for javadoc +import org.apache.lucene.index.StoredFields; // for javadoc import org.apache.lucene.search.ScoreDoc; // for javadoc import org.apache.lucene.util.BytesRef; @@ -36,7 +35,7 @@ * *

Note that fields which are not {@link * org.apache.lucene.index.IndexableFieldType#stored() stored} are not available in documents - * retrieved from the index, e.g. with {@link ScoreDoc#doc} or {@link IndexReader#document(int)}. + * retrieved from the index, e.g. with {@link ScoreDoc#doc} or {@link StoredFields#document(int)}. */ public final class Document implements Iterable { @@ -180,7 +179,7 @@ public IndexableField[] getFields(String name) { * Returns a List of all the fields in a document. * *

Note that fields which are not stored are not available in documents retrieved - * from the index, e.g. {@link IndexSearcher#doc(int)} or {@link IndexReader#document(int)}. + * from the index, e.g. {@link StoredFields#document(int)}. * * @return an immutable List<Field> */ diff --git a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java index 838a0494fbf4..8dbb21d8c199 100644 --- a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java +++ b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java @@ -22,8 +22,8 @@ import java.util.Objects; import java.util.Set; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; /** * A {@link StoredFieldVisitor} that creates a {@link Document} from stored fields. @@ -31,7 +31,7 @@ *

This visitor supports loading all stored fields, or only specific requested fields provided * from a {@link Set}. * - *

This is used by {@link IndexReader#document(int)} to load a document. + *

This is used by {@link StoredFields#document(int)} to load a document. * * @lucene.experimental */ diff --git a/lucene/core/src/java/org/apache/lucene/document/StoredField.java b/lucene/core/src/java/org/apache/lucene/document/StoredField.java index 2b043d218889..c93b9b5d7170 100644 --- a/lucene/core/src/java/org/apache/lucene/document/StoredField.java +++ b/lucene/core/src/java/org/apache/lucene/document/StoredField.java @@ -21,8 +21,8 @@ import org.apache.lucene.util.BytesRef; /** - * A field whose value is stored so that {@link IndexSearcher#doc} and {@link IndexReader#document - * IndexReader.document()} will return the field and its value. + * A field whose value is stored so that {@link IndexSearcher#storedFields} and {@link + * IndexReader#storedFields} will return the field and its value. */ public class StoredField extends Field { diff --git a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java index 2a7d6e70cbdd..6c6182a65b66 100644 --- a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java @@ -118,6 +118,23 @@ public final Fields getTermVectors(int docID) throws IOException { return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to subreader } + @Override + public final TermVectors termVectors() throws IOException { + ensureOpen(); + TermVectors[] subVectors = new TermVectors[subReaders.length]; + return new TermVectors() { + @Override + public Fields get(int docID) throws IOException { + final int i = readerIndex(docID); // find subreader num + // dispatch to subreader, reusing if possible + if (subVectors[i] == null) { + subVectors[i] = subReaders[i].termVectors(); + } + return subVectors[i].get(docID - starts[i]); + } + }; + } + @Override public final int numDocs() { // Don't call ensureOpen() here (it could affect performance) @@ -154,6 +171,23 @@ public final void document(int docID, StoredFieldVisitor visitor) throws IOExcep subReaders[i].document(docID - starts[i], visitor); // dispatch to subreader } + @Override + public final StoredFields storedFields() throws IOException { + ensureOpen(); + StoredFields[] subFields = new StoredFields[subReaders.length]; + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + final int i = readerIndex(docID); // find subreader num + // dispatch to subreader, reusing if possible + if (subFields[i] == null) { + subFields[i] = subReaders[i].storedFields(); + } + subFields[i].document(docID - starts[i], visitor); + } + }; + } + @Override public final int docFreq(Term term) throws IOException { ensureOpen(); diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java index 39abf4145b8a..86785b65460a 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java +++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java @@ -3001,7 +3001,7 @@ public static Status.StoredFieldStatus testStoredFields( // Intentionally pull even deleted documents to // make sure they too are not corrupt: DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); - storedFields.visitDocument(j, visitor); + storedFields.document(j, visitor); Document doc = visitor.getDocument(); if (liveDocs == null || liveDocs.get(j)) { status.docCount++; diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java index 5867357a164c..7220756d0cf0 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java @@ -35,14 +35,14 @@ public abstract class CodecReader extends LeafReader { protected CodecReader() {} /** - * Expert: retrieve thread-private StoredFieldsReader + * Expert: retrieve underlying StoredFieldsReader * * @lucene.internal */ public abstract StoredFieldsReader getFieldsReader(); /** - * Expert: retrieve thread-private TermVectorsReader + * Expert: retrieve underlying TermVectorsReader * * @lucene.internal */ @@ -83,24 +83,42 @@ protected CodecReader() {} */ public abstract KnnVectorsReader getVectorReader(); + // intentionally throw UOE for deprecated APIs: keep CodecReader clean! + // (IndexWriter should not be triggering threadlocals in any way) + @Override - public final void document(int docID, StoredFieldVisitor visitor) throws IOException { - checkBounds(docID); - getFieldsReader().visitDocument(docID, visitor); + @Deprecated + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + throw new UnsupportedOperationException("deprecated document access is not supported"); } @Override - public final Fields getTermVectors(int docID) throws IOException { - TermVectorsReader termVectorsReader = getTermVectorsReader(); - if (termVectorsReader == null) { - return null; - } - checkBounds(docID); - return termVectorsReader.get(docID); + @Deprecated + public Fields getTermVectors(int docID) throws IOException { + throw new UnsupportedOperationException("deprecated term vector access is not supported"); + } + + @Override + public final StoredFields storedFields() throws IOException { + final StoredFields reader = getFieldsReader(); + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + // Don't trust the codec to do proper checks + Objects.checkIndex(docID, maxDoc()); + reader.document(docID, visitor); + } + }; } - private void checkBounds(int docID) { - Objects.checkIndex(docID, maxDoc()); + @Override + public final TermVectors termVectors() throws IOException { + TermVectorsReader reader = getTermVectorsReader(); + if (reader == null) { + return TermVectors.EMPTY; + } else { + return reader; + } } @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/DocValuesLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/DocValuesLeafReader.java index 99853d9abd87..31b50d4f381c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocValuesLeafReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocValuesLeafReader.java @@ -73,6 +73,11 @@ public final Fields getTermVectors(int docID) throws IOException { throw new UnsupportedOperationException(); } + @Override + public final TermVectors termVectors() throws IOException { + throw new UnsupportedOperationException(); + } + @Override public final int numDocs() { throw new UnsupportedOperationException(); @@ -88,6 +93,11 @@ public final void document(int docID, StoredFieldVisitor visitor) throws IOExcep throw new UnsupportedOperationException(); } + @Override + public final StoredFields storedFields() throws IOException { + throw new UnsupportedOperationException(); + } + @Override protected final void doClose() throws IOException { throw new UnsupportedOperationException(); diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java index e2a60291ab80..4859fd684dcb 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java @@ -84,6 +84,18 @@ public FieldsProducer getPostingsReader() { return in.getPostingsReader(); } + @Override + @Deprecated + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + in.document(docID, visitor); + } + + @Override + @Deprecated + public Fields getTermVectors(int docID) throws IOException { + return in.getTermVectors(docID); + } + @Override public Bits getLiveDocs() { return in.getLiveDocs(); diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java index 8114bf3e3d57..11f8c2233520 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java @@ -363,6 +363,12 @@ public Fields getTermVectors(int docID) throws IOException { return in.getTermVectors(docID); } + @Override + public TermVectors termVectors() throws IOException { + ensureOpen(); + return in.termVectors(); + } + @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) @@ -375,6 +381,12 @@ public int maxDoc() { return in.maxDoc(); } + @Override + public StoredFields storedFields() throws IOException { + ensureOpen(); + return in.storedFields(); + } + @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException { ensureOpen(); diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java index 6a0fb033b766..400ea6c7b887 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java @@ -311,13 +311,19 @@ public final int hashCode() { /** * Retrieve term vectors for this document, or null if term vectors were not indexed. The returned * Fields instance acts like a single-document inverted index (the docID will be 0). + * + * @deprecated use {@link #termVectors()} to retrieve one or more documents */ + @Deprecated public abstract Fields getTermVectors(int docID) throws IOException; /** * Retrieve term vector for this document and field, or null if term vectors were not indexed. The * returned Fields instance acts like a single-document inverted index (the docID will be 0). + * + * @deprecated use {@link #termVectors()} to retrieve one or more documents */ + @Deprecated public final Terms getTermVector(int docID, String field) throws IOException { Fields vectors = getTermVectors(docID); if (vectors == null) { @@ -326,6 +332,26 @@ public final Terms getTermVector(int docID, String field) throws IOException { return vectors.terms(field); } + /** + * Returns a {@link TermVectors} reader for the term vectors of this index. + * + *

This call never returns {@code null}, even if no term vectors were indexed. The returned + * instance should only be used by a single thread. + * + *

Example: + * + *

+   * TopDocs hits = searcher.search(query, 10);
+   * TermVectors termVectors = reader.termVectors();
+   * for (ScoreDoc hit : hits.scoreDocs) {
+   *   Fields vector = termVectors.get(hit.doc);
+   * }
+   * 
+ * + * @throws IOException If there is a low-level IO error + */ + public abstract TermVectors termVectors() throws IOException; + /** * Returns the number of documents in this index. * @@ -354,7 +380,10 @@ public final int numDeletedDocs() { * Expert: visits the fields of a stored document, for custom processing/loading of each field. If * you simply want to load all fields, use {@link #document(int)}. If you want to load a subset, * use {@link DocumentStoredFieldVisitor}. + * + * @deprecated use {@link #storedFields()} to retrieve one or more documents */ + @Deprecated public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException; /** @@ -371,10 +400,12 @@ public final int numDeletedDocs() { * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error + * @deprecated use {@link #storedFields()} to retrieve one or more documents */ // TODO: we need a separate StoredField, so that the // Document returned here contains that class not // IndexableField + @Deprecated public final Document document(int docID) throws IOException { final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); @@ -384,13 +415,36 @@ public final Document document(int docID) throws IOException { /** * Like {@link #document(int)} but only loads the specified fields. Note that this is simply sugar * for {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. + * + * @deprecated use {@link #storedFields()} to retrieve one or more documents */ + @Deprecated public final Document document(int docID, Set fieldsToLoad) throws IOException { final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); document(docID, visitor); return visitor.getDocument(); } + /** + * Returns a {@link StoredFields} reader for the stored fields of this index. + * + *

This call never returns {@code null}, even if no stored fields were indexed. The returned + * instance should only be used by a single thread. + * + *

Example: + * + *

+   * TopDocs hits = searcher.search(query, 10);
+   * StoredFields storedFields = reader.storedFields();
+   * for (ScoreDoc hit : hits.scoreDocs) {
+   *   Document doc = storedFields.document(hit.doc);
+   * }
+   * 
+ * + * @throws IOException If there is a low-level IO error + */ + public abstract StoredFields storedFields() throws IOException; + /** * Returns true if any documents have been deleted. Implementers should consider overriding this * method if {@link #maxDoc()} or {@link #numDocs()} are not constant-time operations. diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java index bff73381d177..5d32e5dbcf93 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java @@ -42,7 +42,7 @@ public interface IndexableFieldType { * True if this field's indexed form should be also stored into term vectors. * *

This builds a miniature inverted-index for this field which can be accessed in a - * document-oriented way from {@link IndexReader#getTermVector(int,String)}. + * document-oriented way from {@link TermVectors#get(int,String)}. * *

This option is illegal if {@link #indexOptions()} returns IndexOptions.NONE. */ diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java index 788da1b9ace0..323177bbadbf 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java @@ -273,6 +273,23 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { } } + @Override + public StoredFields storedFields() throws IOException { + ensureOpen(); + StoredFields[] fields = new StoredFields[storedFieldsReaders.length]; + for (int i = 0; i < fields.length; i++) { + fields[i] = storedFieldsReaders[i].storedFields(); + } + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + for (StoredFields reader : fields) { + reader.document(docID, visitor); + } + } + }; + } + @Override public CacheHelper getCoreCacheHelper() { // ParallelReader instances can be short-lived, which would make caching trappy @@ -317,6 +334,30 @@ public Fields getTermVectors(int docID) throws IOException { return fields; } + @Override + public TermVectors termVectors() throws IOException { + ensureOpen(); + // TODO: optimize + return new TermVectors() { + @Override + public Fields get(int docID) throws IOException { + ParallelFields fields = null; + for (Map.Entry ent : tvFieldToReader.entrySet()) { + String fieldName = ent.getKey(); + Terms vector = ent.getValue().termVectors().get(docID, fieldName); + if (vector != null) { + if (fields == null) { + fields = new ParallelFields(); + } + fields.addField(fieldName, vector); + } + } + + return fields; + } + }; + } + @Override protected synchronized void doClose() throws IOException { IOException ioe = null; diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java index c13858c14ff4..8e9afb1fc28b 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import org.apache.lucene.codecs.Codec; @@ -244,16 +245,52 @@ public int maxDoc() { return si.info.maxDoc(); } + /* Support for deprecated threadlocal document/vectors APIs */ + @Override - public TermVectorsReader getTermVectorsReader() { + public final void document(int docID, StoredFieldVisitor visitor) throws IOException { + Objects.checkIndex(docID, maxDoc()); + getThreadLocalFieldsReader().document(docID, visitor); + } + + @Override + public final Fields getTermVectors(int docID) throws IOException { + Objects.checkIndex(docID, maxDoc()); + TermVectorsReader termVectorsReader = getThreadLocalTermVectorsReader(); + if (termVectorsReader == null) { + return null; + } + return termVectorsReader.get(docID); + } + + @Deprecated + private TermVectorsReader getThreadLocalTermVectorsReader() { ensureOpen(); return core.termVectorsLocal.get(); } + @Deprecated + private StoredFieldsReader getThreadLocalFieldsReader() { + ensureOpen(); + return core.fieldsReaderLocal.get(); + } + + /* end support for deprecated threadlocal document/vectors APIs */ + + @Override + public TermVectorsReader getTermVectorsReader() { + ensureOpen(); + if (core.termVectorsReaderOrig == null) { + return null; + } else { + return core.termVectorsReaderOrig.clone(); + } + } + @Override public StoredFieldsReader getFieldsReader() { ensureOpen(); - return core.fieldsReaderLocal.get(); + return core.fieldsReaderOrig.clone(); } @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java b/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java index 7f51cc835ffb..4c792cec63ed 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java +++ b/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java @@ -75,8 +75,8 @@ public void warm(LeafReader reader) throws IOException { } } - reader.document(0); - reader.getTermVectors(0); + reader.storedFields().document(0); + reader.termVectors().get(0); if (infoStream.isEnabled("SMSW")) { infoStream.message( diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java index 034a91b960c7..8bdc14974330 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java +++ b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java @@ -17,6 +17,7 @@ package org.apache.lucene.index; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -244,10 +245,16 @@ public void close() {} } private static StoredFieldsReader readerToStoredFieldsReader(final LeafReader reader) { + final StoredFields storedFields; + try { + storedFields = reader.storedFields(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return new StoredFieldsReader() { @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { - reader.document(docID, visitor); + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + storedFields.document(docID, visitor); } @Override @@ -266,10 +273,16 @@ public void close() {} } private static TermVectorsReader readerToTermVectorsReader(final LeafReader reader) { + final TermVectors termVectors; + try { + termVectors = reader.termVectors(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return new TermVectorsReader() { @Override public Fields get(int docID) throws IOException { - return reader.getTermVectors(docID); + return termVectors.get(docID); } @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingCodecReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingCodecReader.java index 320c89d96904..ded3360dee6f 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SortingCodecReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/SortingCodecReader.java @@ -402,8 +402,8 @@ public StoredFieldsReader getFieldsReader() { private StoredFieldsReader newStoredFieldsReader(StoredFieldsReader delegate) { return new StoredFieldsReader() { @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { - delegate.visitDocument(docMap.newToOld(docID), visitor); + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + delegate.document(docMap.newToOld(docID), visitor); } @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java index 95165bdcd707..0e3d16bd0c98 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java @@ -113,7 +113,7 @@ void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { CopyVisitor visitor = new CopyVisitor(sortWriter); for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) { sortWriter.startDocument(); - reader.visitDocument(sortMap == null ? docID : sortMap.newToOld(docID), visitor); + reader.document(sortMap == null ? docID : sortMap.newToOld(docID), visitor); sortWriter.finishDocument(); } sortWriter.finish(state.segmentInfo.maxDoc()); diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java b/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java index 5ff45f40c56b..27637f3af0fa 100644 --- a/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java +++ b/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java @@ -22,7 +22,7 @@ /** * Expert: provides a low-level means of accessing the stored field values in an index. See {@link - * IndexReader#document(int, StoredFieldVisitor)}. + * StoredFields#document(int, StoredFieldVisitor)}. * *

NOTE: a {@code StoredFieldVisitor} implementation should not try to load or visit other * stored documents in the same reader because the implementation of stored fields for most codecs @@ -30,7 +30,7 @@ * *

See {@link DocumentStoredFieldVisitor}, which is a StoredFieldVisitor that builds * the {@link Document} containing all stored fields. This is used by {@link - * IndexReader#document(int)}. + * StoredFields#document(int)}. * * @lucene.experimental */ diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredFields.java b/lucene/core/src/java/org/apache/lucene/index/StoredFields.java new file mode 100644 index 000000000000..2657d96d1a71 --- /dev/null +++ b/lucene/core/src/java/org/apache/lucene/index/StoredFields.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.lucene.index; + +import java.io.IOException; +import java.util.Set; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.DocumentStoredFieldVisitor; +import org.apache.lucene.util.Bits; + +/** + * API for reading stored fields. + * + *

NOTE: This class is not thread-safe and should only be consumed in the thread where it + * was acquired. + */ +public abstract class StoredFields { + /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ + protected StoredFields() {} + + /** + * Returns the stored fields of the nth Document in this + * index. This is just sugar for using {@link DocumentStoredFieldVisitor}. + * + *

NOTE: for performance reasons, this method does not check if the requested document + * is deleted, and therefore asking for a deleted document may yield unspecified results. Usually + * this is not required, however you can test if the doc is deleted by checking the {@link Bits} + * returned from {@link MultiBits#getLiveDocs}. + * + *

NOTE: only the content of a field is returned, if that field was stored during + * indexing. Metadata like boost, omitNorm, IndexOptions, tokenized, etc., are not preserved. + * + * @throws CorruptIndexException if the index is corrupt + * @throws IOException if there is a low-level IO error + */ + // TODO: we need a separate StoredField, so that the + // Document returned here contains that class not + // IndexableField + public final Document document(int docID) throws IOException { + final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); + document(docID, visitor); + return visitor.getDocument(); + } + + /** + * Expert: visits the fields of a stored document, for custom processing/loading of each field. If + * you simply want to load all fields, use {@link #document(int)}. If you want to load a subset, + * use {@link DocumentStoredFieldVisitor}. + */ + public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException; + + /** + * Like {@link #document(int)} but only loads the specified fields. Note that this is simply sugar + * for {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. + */ + public final Document document(int docID, Set fieldsToLoad) throws IOException { + final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); + document(docID, visitor); + return visitor.getDocument(); + } +} diff --git a/lucene/core/src/java/org/apache/lucene/index/TermVectors.java b/lucene/core/src/java/org/apache/lucene/index/TermVectors.java new file mode 100644 index 000000000000..fe2e8b56681a --- /dev/null +++ b/lucene/core/src/java/org/apache/lucene/index/TermVectors.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.lucene.index; + +import java.io.IOException; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs + +/** + * API for reading term vectors. + * + *

NOTE: This class is not thread-safe and should only be consumed in the thread where it + * was acquired. + */ +public abstract class TermVectors { + + /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ + protected TermVectors() {} + + /** + * Returns term vectors for this document, or null if term vectors were not indexed. + * + *

The returned Fields instance acts like a single-document inverted index (the docID will be + * 0). If offsets are available they are in an {@link OffsetAttribute} available from the {@link + * PostingsEnum}. + */ + public abstract Fields get(int doc) throws IOException; + + /** + * Retrieve term vector for this document and field, or null if term vectors were not indexed. + * + *

The returned Terms instance acts like a single-document inverted index (the docID will be + * 0). If offsets are available they are in an {@link OffsetAttribute} available from the {@link + * PostingsEnum}. + */ + public final Terms get(int doc, String field) throws IOException { + Fields vectors = get(doc); + if (vectors == null) { + return null; + } + return vectors.terms(field); + } + + /** Instance that never returns term vectors */ + public static final TermVectors EMPTY = + new TermVectors() { + @Override + public Fields get(int doc) { + return null; + } + }; +} diff --git a/lucene/core/src/java/org/apache/lucene/index/package-info.java b/lucene/core/src/java/org/apache/lucene/index/package-info.java index f2b385e9a21c..5dc4bd9ae6de 100644 --- a/lucene/core/src/java/org/apache/lucene/index/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/index/package-info.java @@ -143,7 +143,8 @@ * // access indexed fields for an index segment * Fields fields = reader.fields(); * // access term vector fields for a specified document - * Fields fields = reader.getTermVectors(docid); + * TermVectors vectors = reader.termVectors(); + * Fields fields = vectors.get(docid); * * * Fields implements Java's Iterable interface, so it's easy to enumerate the list of fields: diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java index 1f114105d9ac..ce93bd9cef83 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.QueryTimeout; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.search.similarities.BM25Similarity; @@ -382,7 +383,9 @@ public IndexReader getIndexReader() { * Sugar for .getIndexReader().document(docID) * * @see IndexReader#document(int) + * @deprecated Use {@link #storedFields()} to access fields for one or more documents */ + @Deprecated public Document doc(int docID) throws IOException { return reader.document(docID); } @@ -391,7 +394,9 @@ public Document doc(int docID) throws IOException { * Sugar for .getIndexReader().document(docID, fieldVisitor) * * @see IndexReader#document(int, StoredFieldVisitor) + * @deprecated Use {@link #storedFields()} to access fields for one or more documents */ + @Deprecated public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException { reader.document(docID, fieldVisitor); } @@ -400,11 +405,38 @@ public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException { * Sugar for .getIndexReader().document(docID, fieldsToLoad) * * @see IndexReader#document(int, Set) + * @deprecated Use {@link #storedFields()} to access fields for one or more documents */ + @Deprecated public Document doc(int docID, Set fieldsToLoad) throws IOException { return reader.document(docID, fieldsToLoad); } + /** + * Returns a {@link StoredFields} reader for the stored fields of this index. + * + *

Sugar for .getIndexReader().storedFields() + * + *

This call never returns {@code null}, even if no stored fields were indexed. The returned + * instance should only be used by a single thread. + * + *

Example: + * + *

+   * TopDocs hits = searcher.search(query, 10);
+   * StoredFields storedFields = searcher.storedFields();
+   * for (ScoreDoc hit : hits.scoreDocs) {
+   *   Document doc = storedFields.document(hit.doc);
+   * }
+   * 
+ * + * @throws IOException If there is a low-level IO error + * @see IndexReader#storedFields() + */ + public StoredFields storedFields() throws IOException { + return reader.storedFields(); + } + /** Expert: Set the Similarity implementation used by this IndexSearcher. */ public void setSimilarity(Similarity similarity) { this.similarity = similarity; diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java index 983e5bb2e837..a42d531c2b29 100644 --- a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java +++ b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java @@ -17,6 +17,7 @@ package org.apache.lucene.search; import java.io.IOException; +import org.apache.lucene.index.StoredFields; /** * Collector decouples the score from the collected doc: the score computation is skipped entirely @@ -77,9 +78,8 @@ public interface LeafCollector { * swallow the exception and continue collection with the next leaf. * *

Note: This is called in an inner search loop. For good search performance, implementations - * of this method should not call {@link IndexSearcher#doc(int)} or {@link - * org.apache.lucene.index.IndexReader#document(int)} on every hit. Doing so can slow searches by - * an order of magnitude or more. + * of this method should not call {@link StoredFields#document} on every hit. Doing so can slow + * searches by an order of magnitude or more. */ void collect(int doc) throws IOException; diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoreDoc.java b/lucene/core/src/java/org/apache/lucene/search/ScoreDoc.java index 57f7909c40d9..a4600c974db0 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ScoreDoc.java +++ b/lucene/core/src/java/org/apache/lucene/search/ScoreDoc.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.search; +import org.apache.lucene.index.StoredFields; + /** Holds one hit in {@link TopDocs}. */ public class ScoreDoc { @@ -25,7 +27,7 @@ public class ScoreDoc { /** * A hit document's number. * - * @see IndexSearcher#doc(int) + * @see StoredFields#document(int) */ public int doc; diff --git a/lucene/core/src/java/overview.html b/lucene/core/src/java/overview.html index 2b1c0a6bb5a9..e78eff90d442 100644 --- a/lucene/core/src/java/overview.html +++ b/lucene/core/src/java/overview.html @@ -49,8 +49,9 @@ ScoreDoc[] hits = isearcher.search(query, 10).scoreDocs; assertEquals(1, hits.length); // Iterate through the results: + StoredFields storedFields = isearcher.storedFields(); for (int i = 0; i < hits.length; i++) { - Document hitDoc = isearcher.doc(hits[i].doc); + Document hitDoc = storedFields.document(hits[i].doc); assertEquals("This is the text to be indexed.", hitDoc.get("fieldname")); } ireader.close(); diff --git a/lucene/core/src/test/org/apache/lucene/TestDemo.java b/lucene/core/src/test/org/apache/lucene/TestDemo.java index 349b47c074c2..6c608e1d0b12 100644 --- a/lucene/core/src/test/org/apache/lucene/TestDemo.java +++ b/lucene/core/src/test/org/apache/lucene/TestDemo.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; @@ -72,8 +73,9 @@ public void testDemo() throws IOException { assertEquals(1, hits.totalHits.value); // Iterate through the results. + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.scoreDocs.length; i++) { - Document hitDoc = searcher.doc(hits.scoreDocs[i].doc); + Document hitDoc = storedFields.document(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); } diff --git a/lucene/core/src/test/org/apache/lucene/TestSearch.java b/lucene/core/src/test/org/apache/lucene/TestSearch.java index 40b9203bb429..1ef629bd5db8 100644 --- a/lucene/core/src/test/org/apache/lucene/TestSearch.java +++ b/lucene/core/src/test/org/apache/lucene/TestSearch.java @@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -116,8 +117,9 @@ private void doTestSearch(Random random, PrintWriter out, boolean useCompoundFil hits = searcher.search(query, 1000, sort).scoreDocs; out.println(hits.length + " total results"); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.length && i < 10; i++) { - Document d = searcher.doc(hits[i].doc); + Document d = storedFields.document(hits[i].doc); out.println(i + " " + hits[i].score + " " + d.get("contents")); } } diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java index 22ad98bf5e6f..e92401235310 100644 --- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -137,9 +138,10 @@ private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, in private void printHits(PrintWriter out, ScoreDoc[] hits, IndexSearcher searcher) throws IOException { out.println(hits.length + " total results\n"); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105)) { - Document d = searcher.doc(hits[i].doc); + Document d = storedFields.document(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -148,9 +150,10 @@ private void printHits(PrintWriter out, ScoreDoc[] hits, IndexSearcher searcher) private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException { assertEquals("total results", expectedCount, hits.length); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105)) { - Document d = searcher.doc(hits[i].doc); + Document d = storedFields.document(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java index ec5add255319..71597667ffbf 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90DocValuesFormat.java @@ -55,6 +55,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -234,8 +235,9 @@ private void doTestSparseDocValuesVsStoredFields() throws Exception { final SortedSetDocValues sortedSet = DocValues.getSortedSet(reader, "sorted_set"); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < reader.maxDoc(); ++i) { - final Document doc = reader.document(i); + final Document doc = storedFields.document(i); final IndexableField valueField = doc.getField("value"); final Long value = valueField == null ? null : valueField.numericValue().longValue(); @@ -675,11 +677,12 @@ private void doTestSortedNumericBlocksOfVariousBitsPerValue(LongSupplier counts) for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv"); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < r.maxDoc(); i++) { if (i > docValues.docID()) { docValues.nextDoc(); } - String[] expectedStored = r.document(i).getValues("stored"); + String[] expectedStored = storedFields.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expectedStored.length); } else { @@ -747,6 +750,7 @@ private void assertDVAdvance(Directory dir, int jumpStep) throws IOException { TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) { // Create a new instance each time to ensure jumps from the beginning @@ -761,7 +765,7 @@ private void assertDVAdvance(Directory dir, int jumpStep) throws IOException { + jump + " from #" + (docID - jump); - String storedValue = r.document(docID).get("stored"); + String storedValue = storedFields.document(docID).get("stored"); if (storedValue == null) { assertFalse("There should be no DocValue for " + base, docValues.advanceExact(docID)); } else { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90StoredFieldsFormatHighCompression.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90StoredFieldsFormatHighCompression.java index ec72b6582a6c..8b42d3026ba3 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90StoredFieldsFormatHighCompression.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90StoredFieldsFormatHighCompression.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase; @@ -57,8 +58,9 @@ public void testMixedCompressions() throws Exception { DirectoryReader ir = DirectoryReader.open(dir); assertEquals(10, ir.numDocs()); + StoredFields storedFields = ir.storedFields(); for (int i = 0; i < 10; i++) { - Document doc = ir.document(i); + Document doc = storedFields.document(i); assertEquals("value1", doc.get("field1")); assertEquals("value2", doc.get("field2")); } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingTermVectorsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingTermVectorsFormat.java index 7cc6943185b5..f76b0bef7d33 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingTermVectorsFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/compressing/TestCompressingTermVectorsFormat.java @@ -60,7 +60,7 @@ public void testNoOrds() throws Exception { doc.add(new Field("foo", "this is a test", ft)); iw.addDocument(doc); LeafReader ir = getOnlyLeafReader(iw.getReader()); - Terms terms = ir.getTermVector(0, "foo"); + Terms terms = ir.termVectors().get(0, "foo"); assertNotNull(terms); TermsEnum termsEnum = terms.iterator(); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("this"))); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java index d33eab12546c..0e93934855d7 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java @@ -43,6 +43,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -119,10 +120,11 @@ public DocValuesFormat getDocValuesFormatForField(String field) { Query query = new TermQuery(new Term("fieldname", "text")); TopDocs hits = isearcher.search(query, 1); assertEquals(1, hits.totalHits.value); + StoredFields storedFields = isearcher.storedFields(); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int hitDocID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(hitDocID); + Document hitDoc = storedFields.document(hitDocID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1"); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java index 352626a93b35..f161f3375d5e 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -53,7 +53,7 @@ public void testBinaryFieldInIndex() throws Exception { /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + Document docFromReader = reader.storedFields().document(0); assertTrue(docFromReader != null); /** fetch the binary stored field and compare its content with the original one */ diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java index e30a48161e75..ef4eaa86438e 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; @@ -208,7 +209,7 @@ public void testGetValuesForIndexedDocument() throws Exception { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - doAssert(searcher.doc(hits[0].doc), true); + doAssert(searcher.storedFields().document(hits[0].doc), true); writer.close(); reader.close(); dir.close(); @@ -234,7 +235,7 @@ public void testPositionIncrementMultiFields() throws Exception { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - doAssert(searcher.doc(hits[0].doc), true); + doAssert(searcher.storedFields().document(hits[0].doc), true); writer.close(); reader.close(); dir.close(); @@ -313,8 +314,9 @@ public void testFieldSetValue() throws Exception { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); int result = 0; + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); + Document doc2 = storedFields.document(hits[i].doc); Field f = (Field) doc2.getField("id"); if (f.stringValue().equals("id1")) result |= 1; else if (f.stringValue().equals("id2")) result |= 2; @@ -350,7 +352,7 @@ public void testNumericFieldAsString() throws Exception { RandomIndexWriter iw = new RandomIndexWriter(random(), dir); iw.addDocument(doc); DirectoryReader ir = iw.getReader(); - Document sdoc = ir.document(0); + Document sdoc = ir.storedFields().document(0); assertEquals("5", sdoc.get("int")); assertNull(sdoc.get("somethingElse")); assertArrayEquals(new String[] {"5", "4"}, sdoc.getValues("int")); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFeatureSort.java b/lucene/core/src/test/org/apache/lucene/document/TestFeatureSort.java index ee42de7b7d53..a5b5806f3586 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestFeatureSort.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestFeatureSort.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; @@ -68,14 +69,15 @@ public void testFeature() throws IOException { writer.close(); IndexSearcher searcher = newSearcher(ir); + StoredFields storedFields = searcher.storedFields(); Sort sort = new Sort(FeatureField.newFeatureSort("field", "name")); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // numeric order - assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("1.3", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("30.1", storedFields.document(td.scoreDocs[0].doc).get("value")); + assertEquals("4.2", storedFields.document(td.scoreDocs[1].doc).get("value")); + assertEquals("1.3", storedFields.document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -101,13 +103,14 @@ public void testFeatureMissing() throws IOException { IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(FeatureField.newFeatureSort("field", "name")); + StoredFields storedFields = searcher.storedFields(); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as 0 - assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value")); + assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -134,13 +137,14 @@ public void testFeatureMissingFieldInSegment() throws IOException { IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(FeatureField.newFeatureSort("field", "name")); + StoredFields storedFields = searcher.storedFields(); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as 0 - assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value")); + assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -168,13 +172,14 @@ public void testFeatureMissingFeatureNameInSegment() throws IOException { IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(FeatureField.newFeatureSort("field", "name")); + StoredFields storedFields = searcher.storedFields(); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as 0 - assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value")); + assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -208,17 +213,18 @@ public void testFeatureMultipleMissing() throws IOException { IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(FeatureField.newFeatureSort("field", "name")); + StoredFields storedFields = searcher.storedFields(); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(7, td.totalHits.value); // null is treated as 0 - assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[3].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[4].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[5].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[6].doc).get("value")); + assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value")); + assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[2].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[3].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[4].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[5].doc).get("value")); + assertNull(storedFields.document(td.scoreDocs[6].doc).get("value")); ir.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestField.java b/lucene/core/src/test/org/apache/lucene/document/TestField.java index 6aa5518f33bd..6bbcb3e45cc6 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestField.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestField.java @@ -505,7 +505,7 @@ public void testIndexedBinaryField() throws Exception { IndexSearcher s = newSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("binary", br)), 1); assertEquals(1, hits.totalHits.value); - Document storedDoc = s.doc(hits.scoreDocs[0].doc); + Document storedDoc = s.storedFields().document(hits.scoreDocs[0].doc); assertEquals(br, storedDoc.getField("binary").binaryValue()); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java b/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java index 25e51891a607..f114aac85949 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestLatLonPointDistanceSort.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -209,6 +210,7 @@ private void doRandomTest(int numDocs, int numQueries) throws IOException { IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numQueries; i++) { double lat = GeoTestUtil.nextLatitude(); double lon = GeoTestUtil.nextLongitude(); @@ -217,7 +219,7 @@ private void doRandomTest(int numDocs, int numQueries) throws IOException { Result[] expected = new Result[reader.maxDoc()]; for (int doc = 0; doc < reader.maxDoc(); doc++) { - Document targetDoc = reader.document(doc); + Document targetDoc = storedFields.document(doc); final double distance; if (targetDoc.getField("lat") == null) { distance = missingValue; // missing diff --git a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java index d43d2079da73..f03024a9860d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java @@ -106,7 +106,7 @@ public void test() throws Exception { } DirectoryReader rd = DirectoryReader.open(dir); - Document sd = rd.document(numDocs - 1); + Document sd = rd.storedFields().document(numDocs - 1); assertNotNull(sd); assertEquals(1, sd.getFields().size()); BytesRef valueRef = sd.getBinaryValue("fld"); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java index c49aabfc1665..4ada596067a8 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -1547,8 +1547,9 @@ public void testFieldNamesChanged() throws IOException { IndexReader r3 = w.getReader(); w.close(); assertEquals(2, r3.numDocs()); + StoredFields storedFields = r3.storedFields(); for (int docID = 0; docID < 2; docID++) { - Document d = r3.document(docID); + Document d = storedFields.document(docID); if (d.get("id").equals("1")) { assertEquals("doc1 field1", d.get("f1")); } else { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java index fabb27c82be3..e014f69275ab 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java @@ -758,11 +758,12 @@ public void testSortedIndex() throws Exception { BinaryDocValues values = leafReader.getBinaryDocValues("number"); NumericDocValues sortValues = leafReader.getNumericDocValues("sort"); Bits liveDocs = leafReader.getLiveDocs(); + StoredFields storedFields = leafReader.storedFields(); long lastSortValue = Long.MIN_VALUE; for (int i = 0; i < leafReader.maxDoc(); i++) { - Document doc = leafReader.document(i); + Document doc = storedFields.document(i); OneSortDoc sortDoc = docs.get(Integer.parseInt(doc.get("id"))); assertEquals(i, values.nextDoc()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java index 93064bd7062a..a0edb9df83a6 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java @@ -58,7 +58,7 @@ public void testBinary() throws IOException { bytes.length = 2; TopDocs docs = is.search(new TermQuery(new Term("bytes", bytes)), 5); assertEquals(1, docs.totalHits.value); - assertEquals("" + i, is.doc(docs.scoreDocs[0].doc).get("id")); + assertEquals("" + i, is.storedFields().document(docs.scoreDocs[0].doc).get("id")); } ir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java index d85442986e9d..9168e3518e21 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java @@ -72,8 +72,9 @@ public void testFloatNorms() throws IOException { DirectoryReader open = DirectoryReader.open(dir); NumericDocValues norms = MultiDocValues.getNormValues(open, FLOAT_TEST_FIELD); assertNotNull(norms); + StoredFields storedFields = open.storedFields(); for (int i = 0; i < open.maxDoc(); i++) { - Document document = open.document(i); + Document document = storedFields.document(i); int expected = Integer.parseInt(document.get(FLOAT_TEST_FIELD).split(" ")[0]); assertEquals(i, norms.nextDoc()); assertEquals(expected, norms.longValue()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java index 4679cc306e76..e42f33876306 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java @@ -438,7 +438,7 @@ public void testTermVectors() throws Exception { IndexReader r = DirectoryReader.open(w); - Fields fields = r.getTermVectors(0); + Fields fields = r.termVectors().get(0); TermsEnum termsEnum = fields.terms("field").iterator(); assertTrue(termsEnum.seekExact(newBytesRef("bar"))); assertEquals(228, termsEnum.totalTermFreq()); @@ -456,7 +456,7 @@ public void testTermVectors() throws Exception { assertEquals(59, postings.freq()); assertEquals(NO_MORE_DOCS, postings.nextDoc()); - fields = r.getTermVectors(1); + fields = r.termVectors().get(1); termsEnum = fields.terms("field").iterator(); assertTrue(termsEnum.seekExact(newBytesRef("bar"))); assertEquals(140, termsEnum.totalTermFreq()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java index c49493ac1662..4f6da4a767bc 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java @@ -794,11 +794,12 @@ protected void reindex( iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); + StoredFields storedFields = reader.storedFields(); // Slowly parse the stored field into a new doc values field: for (int i = 0; i < maxDoc; i++) { // TODO: is this still O(blockSize^2)? - Document oldDoc = reader.document(i); + Document oldDoc = storedFields.document(i); Document newDoc = new Document(); long value = Long.parseLong(oldDoc.get("text").split(" ")[1]); newDoc.add(new NumericDocValuesField("number", value)); @@ -850,12 +851,13 @@ protected void reindex( iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); + StoredFields storedFields = reader.storedFields(); if (oldSchemaGen <= 0) { // Must slowly parse the stored field into a new doc values field: for (int i = 0; i < maxDoc; i++) { // TODO: is this still O(blockSize^2)? - Document oldDoc = reader.document(i); + Document oldDoc = storedFields.document(i); Document newDoc = new Document(); long value = Long.parseLong(oldDoc.get("text").split(" ")[1]); newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, value)); @@ -869,7 +871,7 @@ protected void reindex( for (int i = 0; i < maxDoc; i++) { // TODO: is this still O(blockSize^2)? assertEquals(i, oldValues.nextDoc()); - reader.document(i); + storedFields.document(i); Document newDoc = new Document(); newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, oldValues.longValue())); w.addDocument(newDoc); @@ -904,9 +906,10 @@ protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen return; } int maxDoc = r.maxDoc(); + StoredFields storedFields = r.storedFields(); boolean failed = false; for (int i = 0; i < maxDoc; i++) { - Document oldDoc = r.document(i); + Document oldDoc = storedFields.document(i); long value = Long.parseLong(oldDoc.get("text").split(" ")[1]); assertEquals(i, numbers.nextDoc()); if (value != numbers.longValue()) { @@ -978,12 +981,13 @@ protected void reindex( iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); + StoredFields storedFields = reader.storedFields(); if (oldSchemaGen <= 0) { // Must slowly parse the stored field into a new doc values field: for (int i = 0; i < maxDoc; i++) { // TODO: is this still O(blockSize^2)? - Document oldDoc = reader.document(i); + Document oldDoc = storedFields.document(i); Document newDoc = new Document(); long value = Long.parseLong(oldDoc.get("text").split(" ")[1]); newDoc.add(new NumericDocValuesField("number", newSchemaGen * value)); @@ -995,7 +999,7 @@ protected void reindex( assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues); for (int i = 0; i < maxDoc; i++) { // TODO: is this still O(blockSize^2)? - reader.document(i); + storedFields.document(i); Document newDoc = new Document(); assertEquals(i, oldValues.nextDoc()); long value = newSchemaGen * (oldValues.longValue() / oldSchemaGen); @@ -1035,9 +1039,10 @@ protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen return; } int maxDoc = r.maxDoc(); + StoredFields storedFields = r.storedFields(); boolean failed = false; for (int i = 0; i < maxDoc; i++) { - Document oldDoc = r.document(i); + Document oldDoc = storedFields.document(i); long value = Long.parseLong(oldDoc.get("text").split(" ")[1]); value *= schemaGen; assertEquals(i, numbers.nextDoc()); @@ -1348,8 +1353,9 @@ public void testRandomMultipleSchemaGensSameField() throws Exception { NumericDocValues numbers = leaf.getNumericDocValues("number"); if (numbers != null) { int maxDoc = leaf.maxDoc(); + StoredFields storedFields = leaf.storedFields(); for (int i = 0; i < maxDoc; i++) { - Document doc = leaf.document(i); + Document doc = storedFields.document(i); long value = Long.parseLong(doc.get("text").split(" ")[1]); assertEquals(i, numbers.nextDoc()); long dvValue = numbers.longValue(); @@ -1516,9 +1522,10 @@ private static void checkAllNumberDVs( IndexReader r, String fieldName, boolean doThrow, int multiplier) throws IOException { NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName); int maxDoc = r.maxDoc(); + StoredFields storedFields = r.storedFields(); boolean failed = false; for (int i = 0; i < maxDoc; i++) { - Document oldDoc = r.document(i); + Document oldDoc = storedFields.document(i); long value = multiplier * Long.parseLong(oldDoc.get("text").split(" ")[1]); assertEquals(i, numbers.nextDoc()); if (value != numbers.longValue()) { @@ -1560,9 +1567,10 @@ private static void testNumericDVSort(IndexSearcher s) throws IOException { TopDocs hits = s.search( new MatchAllDocsQuery(), 100, new Sort(new SortField("number", SortField.Type.LONG))); + StoredFields storedFields = s.storedFields(); long last = Long.MIN_VALUE; for (ScoreDoc scoreDoc : hits.scoreDocs) { - long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]); + long value = Long.parseLong(storedFields.document(scoreDoc.doc).get("text").split(" ")[1]); assertTrue(value >= last); assertEquals(value, ((Long) ((FieldDoc) scoreDoc).fields[0]).longValue()); last = value; @@ -1581,8 +1589,9 @@ private static void testPointRangeQuery(IndexSearcher s) throws IOException { } TopDocs hits = s.search(LongPoint.newRangeQuery("number", min, max), 100); + StoredFields storedFields = s.storedFields(); for (ScoreDoc scoreDoc : hits.scoreDocs) { - long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]); + long value = Long.parseLong(storedFields.document(scoreDoc.doc).get("text").split(" ")[1]); assertTrue(value >= min); assertTrue(value <= max); } @@ -1602,7 +1611,7 @@ public int compare(ScoreDoc a, ScoreDoc b) { numbers.advance(hit.doc); } assertEquals(hit.doc, numbers.docID()); - long value = Long.parseLong(s.doc(hit.doc).get("text").split(" ")[1]); + long value = Long.parseLong(storedFields.document(hit.doc).get("text").split(" ")[1]); assertEquals(value, numbers.longValue()); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java index 89b3b42677a7..d0c16b2667bf 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java @@ -66,16 +66,17 @@ public void testDocument() throws IOException { DirectoryReader reader = DirectoryReader.open(dir); assertTrue(reader != null); assertTrue(reader instanceof StandardDirectoryReader); + StoredFields storedFields = reader.storedFields(); - Document newDoc1 = reader.document(0); + Document newDoc1 = storedFields.document(0); assertTrue(newDoc1 != null); assertTrue( DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - Document newDoc2 = reader.document(1); + Document newDoc2 = storedFields.document(1); assertTrue(newDoc2 != null); assertTrue( DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); - Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); + Terms vector = reader.termVectors().get(0).terms(DocHelper.TEXT_FIELD_2_KEY); assertNotNull(vector); reader.close(); @@ -393,7 +394,7 @@ public void testBinaryFields() throws IOException { writer.addDocument(doc); writer.close(); DirectoryReader reader = DirectoryReader.open(dir); - Document doc2 = reader.document(reader.maxDoc() - 1); + Document doc2 = reader.storedFields().document(reader.maxDoc() - 1); IndexableField[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); @@ -416,7 +417,7 @@ public void testBinaryFields() throws IOException { writer.forceMerge(1); writer.close(); reader = DirectoryReader.open(dir); - doc2 = reader.document(reader.maxDoc() - 1); + doc2 = reader.storedFields().document(reader.maxDoc() - 1); fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); @@ -612,10 +613,12 @@ public static void assertIndexEquals(DirectoryReader index1, DirectoryReader ind } // check stored fields + StoredFields storedFields1 = index1.storedFields(); + StoredFields storedFields2 = index2.storedFields(); for (int i = 0; i < index1.maxDoc(); i++) { if (liveDocs1 == null || liveDocs1.get(i)) { - Document doc1 = index1.document(i); - Document doc2 = index2.document(i); + Document doc1 = storedFields1.document(i); + Document doc2 = storedFields2.document(i); List field1 = doc1.getFields(); List field2 = doc2.getFields(); assertEquals( @@ -998,11 +1001,11 @@ public void testOOBDocID() throws Exception { writer.addDocument(new Document()); DirectoryReader r = DirectoryReader.open(writer); writer.close(); - r.document(0); + r.storedFields().document(0); expectThrows( IllegalArgumentException.class, () -> { - r.document(1); + r.storedFields().document(1); }); r.close(); dir.close(); @@ -1084,9 +1087,9 @@ public void testLoadCertainFields() throws Exception { DirectoryReader r = writer.getReader(); writer.close(); Set fieldsToLoad = new HashSet<>(); - assertEquals(0, r.document(0, fieldsToLoad).getFields().size()); + assertEquals(0, r.storedFields().document(0, fieldsToLoad).getFields().size()); fieldsToLoad.add("field1"); - Document doc2 = r.document(0, fieldsToLoad); + Document doc2 = r.storedFields().document(0, fieldsToLoad); assertEquals(1, doc2.getFields().size()); assertEquals("foobar", doc2.get("field1")); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java index 4dd322b09c92..8221d5399ab5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java @@ -27,6 +27,7 @@ import java.util.Random; import java.util.Set; import org.apache.lucene.document.Document; +import org.apache.lucene.document.DocumentStoredFieldVisitor; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; @@ -132,7 +133,9 @@ private void doTestReopenWithCommit(Random random, Directory dir, boolean withRe if (i > 0) { int k = i - 1; int n = j + k * M; - Document prevItereationDoc = reader.document(n); + final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); + reader.storedFields().document(n, visitor); + Document prevItereationDoc = visitor.getDocument(); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k + "_" + j, id); @@ -285,7 +288,7 @@ public void run() throws Exception { 1000) .scoreDocs; if (hits.length > 0) { - searcher.doc(hits[0].doc); + searcher.storedFields().document(hits[0].doc); } if (refreshed != r) { refreshed.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java index 90195ede06ae..be130c2e98a9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java @@ -258,7 +258,10 @@ private SegmentCommitInfo merge( private void printSegment(PrintWriter out, SegmentCommitInfo si) throws Exception { SegmentReader reader = new SegmentReader(si, Version.LATEST.major, newIOContext(random())); - for (int i = 0; i < reader.numDocs(); i++) out.println(reader.document(i)); + StoredFields storedFields = reader.storedFields(); + for (int i = 0; i < reader.numDocs(); i++) { + out.println(storedFields.document(i)); + } for (FieldInfo fieldInfo : reader.getFieldInfos()) { if (fieldInfo.getIndexOptions() == IndexOptions.NONE) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java index 9f8def1a864b..3ff0664193fc 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java @@ -212,10 +212,11 @@ public void testDocValuesUnstored() throws IOException { FieldInfo dvInfo = fi.fieldInfo("dv"); assertTrue(dvInfo.getDocValuesType() != DocValuesType.NONE); NumericDocValues dv = MultiDocValues.getNumericValues(r, "dv"); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < 50; i++) { assertEquals(i, dv.nextDoc()); assertEquals(i, dv.longValue()); - Document d = r.document(i); + Document d = storedFields.document(i); // cannot use d.get("dv") due to another bug! assertNull(d.getField("dv")); assertEquals(Integer.toString(i), d.get("docId")); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java index caf830101e6a..e8bdecf46f1b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java @@ -82,7 +82,7 @@ public void testAddDocument() throws Exception { // After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random())); assertTrue(reader != null); - Document doc = reader.document(0); + Document doc = reader.storedFields().document(0); assertTrue(doc != null); // System.out.println("Document: " + doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java index 7a240cef0792..0c984fce0e4a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java @@ -90,7 +90,7 @@ public void test() throws IOException { assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = DirectoryReader.open(dir); - Document doc = reader.document(0); + Document doc = reader.storedFields().document(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); @@ -114,7 +114,7 @@ public void test() throws IOException { assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS); DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); - reader.document(0, visitor); + reader.storedFields().document(0, visitor); final List fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); @@ -214,9 +214,10 @@ public void testExceptions() throws Throwable { boolean exc = false; + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < 2; i++) { try { - reader.document(i); + storedFields.document(i); } catch ( @SuppressWarnings("unused") IOException ioe) { @@ -224,7 +225,7 @@ public void testExceptions() throws Throwable { exc = true; } try { - reader.document(i); + storedFields.document(i); } catch ( @SuppressWarnings("unused") IOException ioe) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java index e273fb64d298..df37ac70007a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java @@ -1770,6 +1770,7 @@ public void testRandom1() throws IOException { // Now check that the index is consistent IndexSearcher searcher = newSearcher(reader); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numDocs; ++i) { TermQuery termQuery = new TermQuery(new Term("id", Integer.toString(i))); final TopDocs topDocs = searcher.search(termQuery, 1); @@ -1780,7 +1781,7 @@ public void testRandom1() throws IOException { NumericDocValues values = MultiDocValues.getNumericValues(reader, "id"); assertEquals(topDocs.scoreDocs[0].doc, values.advance(topDocs.scoreDocs[0].doc)); assertEquals(i, values.longValue()); - Document document = reader.document(topDocs.scoreDocs[0].doc); + Document document = storedFields.document(topDocs.scoreDocs[0].doc); assertEquals(Integer.toString(i), document.get("id")); } } @@ -1821,6 +1822,7 @@ public void testMultiValuedRandom1() throws IOException { DirectoryReader reader = DirectoryReader.open(w); // Now check that the index is consistent IndexSearcher searcher = newSearcher(reader); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numDocs; ++i) { TermQuery termQuery = new TermQuery(new Term("id", Integer.toString(i))); final TopDocs topDocs = searcher.search(termQuery, 1); @@ -1831,7 +1833,7 @@ public void testMultiValuedRandom1() throws IOException { NumericDocValues values = MultiDocValues.getNumericValues(reader, "id"); assertEquals(topDocs.scoreDocs[0].doc, values.advance(topDocs.scoreDocs[0].doc)); assertEquals(i, values.longValue()); - Document document = reader.document(topDocs.scoreDocs[0].doc); + Document document = storedFields.document(topDocs.scoreDocs[0].doc); assertEquals(Integer.toString(i), document.get("id")); } } @@ -2634,7 +2636,7 @@ public void testRandom3() throws Exception { System.out.println("TEST: full index:"); SortedDocValues docValues = MultiDocValues.getSortedValues(r2, "bytes"); for(int i=0;i= 0); LeafReaderContext leafReaderContext = reader.leaves().get(i); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java index 6fdb906979ec..079448fb2f13 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java @@ -48,8 +48,9 @@ public void testMaxByteNorms() throws IOException { DirectoryReader open = DirectoryReader.open(dir); NumericDocValues normValues = MultiDocValues.getNormValues(open, BYTE_TEST_FIELD); assertNotNull(normValues); + StoredFields storedFields = open.storedFields(); for (int i = 0; i < open.maxDoc(); i++) { - Document document = open.document(i); + Document document = storedFields.document(i); int expected = Integer.parseInt(document.get(BYTE_TEST_FIELD).split(" ")[0]); assertEquals(i, normValues.nextDoc()); assertEquals(expected, normValues.longValue()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java index 0c801fa10c32..4a54f6b1cf2d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java @@ -755,8 +755,9 @@ public void testSegmentMerges() throws Exception { if (VERBOSE) { System.out.println("TEST: maxDoc=" + r.maxDoc()); } + StoredFields storedFields = r.storedFields(); for (int i = 0; i < r.maxDoc(); i++) { - Document rdoc = r.document(i); + Document rdoc = storedFields.document(i); assertEquals(i, ndv.nextDoc()); assertEquals("docid=" + i + " has wrong ndv value; doc=" + rdoc, value, ndv.longValue()); } @@ -904,11 +905,12 @@ public void testSortedIndex() throws Exception { NumericDocValues values = leafReader.getNumericDocValues("number"); NumericDocValues sortValues = leafReader.getNumericDocValues("sort"); Bits liveDocs = leafReader.getLiveDocs(); + StoredFields storedFields = leafReader.storedFields(); long lastSortValue = Long.MIN_VALUE; for (int i = 0; i < leafReader.maxDoc(); i++) { - Document doc = leafReader.document(i); + Document doc = storedFields.document(i); OneSortDoc sortDoc = docs.get(Integer.parseInt(doc.get("id"))); assertEquals(i, values.nextDoc()); @@ -1027,6 +1029,7 @@ public void testManyReopensAndFields() throws Exception { for (LeafReaderContext context : reader.leaves()) { LeafReader r = context.reader(); Bits liveDocs = r.getLiveDocs(); + StoredFields storedFields = r.storedFields(); for (int field = 0; field < fieldValues.length; field++) { String f = "f" + field; NumericDocValues ndv = r.getNumericDocValues(f); @@ -1039,13 +1042,13 @@ public void testManyReopensAndFields() throws Exception { "invalid value for docID=" + doc + " id=" - + r.document(doc).get("id") + + storedFields.document(doc).get("id") + ", field=" + f + ", reader=" + r + " doc=" - + r.document(doc), + + storedFields.document(doc), fieldValues[field], ndv.longValue()); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java index c4f90c4bb7b4..ee5fb94065e0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java @@ -209,13 +209,13 @@ public void testCloseInnerReader() throws Exception { expectThrows( AlreadyClosedException.class, () -> { - psub.document(0); + psub.storedFields().document(0); }); expectThrows( AlreadyClosedException.class, () -> { - pr.document(0); + pr.storedFields().document(0); }); // noop: @@ -299,10 +299,10 @@ public void testIgnoreStoredFields() throws IOException { ParallelCompositeReader pr = new ParallelCompositeReader( false, new CompositeReader[] {ir1, ir2}, new CompositeReader[] {ir1}); - assertEquals("v1", pr.document(0).get("f1")); - assertEquals("v1", pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertEquals("v1", pr.storedFields().document(0).get("f1")); + assertEquals("v1", pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNotNull(MultiTerms.getTerms(pr, "f1")); assertNotNull(MultiTerms.getTerms(pr, "f2")); @@ -312,10 +312,10 @@ public void testIgnoreStoredFields() throws IOException { // no stored fields at all pr = new ParallelCompositeReader(false, new CompositeReader[] {ir2}, new CompositeReader[0]); - assertNull(pr.document(0).get("f1")); - assertNull(pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertNull(pr.storedFields().document(0).get("f1")); + assertNull(pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNull(MultiTerms.getTerms(pr, "f1")); assertNull(MultiTerms.getTerms(pr, "f2")); @@ -326,10 +326,10 @@ public void testIgnoreStoredFields() throws IOException { // without overlapping pr = new ParallelCompositeReader(true, new CompositeReader[] {ir2}, new CompositeReader[] {ir1}); - assertEquals("v1", pr.document(0).get("f1")); - assertEquals("v1", pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertEquals("v1", pr.storedFields().document(0).get("f1")); + assertEquals("v1", pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNull(MultiTerms.getTerms(pr, "f1")); assertNull(MultiTerms.getTerms(pr, "f2")); @@ -380,10 +380,12 @@ private void queryTest(Query query) throws IOException { ScoreDoc[] parallelHits = parallel.search(query, 1000).scoreDocs; ScoreDoc[] singleHits = single.search(query, 1000).scoreDocs; assertEquals(parallelHits.length, singleHits.length); + StoredFields parallelFields = parallel.storedFields(); + StoredFields singleFields = single.storedFields(); for (int i = 0; i < parallelHits.length; i++) { assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f); - Document docParallel = parallel.doc(parallelHits[i].doc); - Document docSingle = single.doc(singleHits[i].doc); + Document docParallel = parallelFields.document(parallelHits[i].doc); + Document docSingle = singleFields.document(singleHits[i].doc); assertEquals(docParallel.get("f1"), docSingle.get("f1")); assertEquals(docParallel.get("f2"), docSingle.get("f2")); assertEquals(docParallel.get("f3"), docSingle.get("f3")); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java index 170c86097cfe..4df09de6a286 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java @@ -144,7 +144,7 @@ public void testCloseInnerReader() throws Exception { expectThrows( AlreadyClosedException.class, () -> { - pr.document(0); + pr.storedFields().document(0); }); // noop: @@ -200,10 +200,10 @@ public void testIgnoreStoredFields() throws IOException { // with overlapping ParallelLeafReader pr = new ParallelLeafReader(false, new LeafReader[] {ir1, ir2}, new LeafReader[] {ir1}); - assertEquals("v1", pr.document(0).get("f1")); - assertEquals("v1", pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertEquals("v1", pr.storedFields().document(0).get("f1")); + assertEquals("v1", pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNotNull(pr.terms("f1")); assertNotNull(pr.terms("f2")); @@ -213,10 +213,10 @@ public void testIgnoreStoredFields() throws IOException { // no stored fields at all pr = new ParallelLeafReader(false, new LeafReader[] {ir2}, new LeafReader[0]); - assertNull(pr.document(0).get("f1")); - assertNull(pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertNull(pr.storedFields().document(0).get("f1")); + assertNull(pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNull(pr.terms("f1")); assertNull(pr.terms("f2")); @@ -226,10 +226,10 @@ public void testIgnoreStoredFields() throws IOException { // without overlapping pr = new ParallelLeafReader(true, new LeafReader[] {ir2}, new LeafReader[] {ir1}); - assertEquals("v1", pr.document(0).get("f1")); - assertEquals("v1", pr.document(0).get("f2")); - assertNull(pr.document(0).get("f3")); - assertNull(pr.document(0).get("f4")); + assertEquals("v1", pr.storedFields().document(0).get("f1")); + assertEquals("v1", pr.storedFields().document(0).get("f2")); + assertNull(pr.storedFields().document(0).get("f3")); + assertNull(pr.storedFields().document(0).get("f4")); // check that fields are there assertNull(pr.terms("f1")); assertNull(pr.terms("f2")); @@ -252,10 +252,12 @@ private void queryTest(Query query) throws IOException { ScoreDoc[] parallelHits = parallel.search(query, 1000).scoreDocs; ScoreDoc[] singleHits = single.search(query, 1000).scoreDocs; assertEquals(parallelHits.length, singleHits.length); + StoredFields parallelFields = parallel.storedFields(); + StoredFields singleFields = single.storedFields(); for (int i = 0; i < parallelHits.length; i++) { assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f); - Document docParallel = parallel.doc(parallelHits[i].doc); - Document docSingle = single.doc(singleHits[i].doc); + Document docParallel = parallelFields.document(parallelHits[i].doc); + Document docSingle = singleFields.document(singleHits[i].doc); assertEquals(docParallel.get("f1"), docSingle.get("f1")); assertEquals(docParallel.get("f2"), docSingle.get("f2")); assertEquals(docParallel.get("f3"), docSingle.get("f3")); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java index 53716f732963..8bcb2bff305c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java @@ -67,7 +67,7 @@ public void testMixupDocs() throws Exception { writer.addDocument(doc); DirectoryReader reader = writer.getReader(); - Terms terms = reader.getTermVector(1, "field"); + Terms terms = reader.termVectors().get(1, "field"); assert terms != null; TermsEnum termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("withPayload"))); @@ -109,7 +109,7 @@ public void testMixupMultiValued() throws Exception { doc.add(field3); writer.addDocument(doc); DirectoryReader reader = writer.getReader(); - Terms terms = reader.getTermVector(0, "field"); + Terms terms = reader.termVectors().get(0, "field"); assert terms != null; TermsEnum termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("withPayload"))); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index 8bb77fcb6989..125537b30cd9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -156,9 +156,10 @@ public void doTestNumbers(boolean withPayloads) throws Exception { for (String term : terms) { PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef(term)); + StoredFields storedFields = reader.storedFields(); int doc; while ((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - String storedNumbers = reader.document(doc).get("numbers"); + String storedNumbers = storedFields.document(doc).get("numbers"); int freq = dp.freq(); for (int i = 0; i < freq; i++) { dp.nextPosition(); @@ -184,11 +185,12 @@ public void doTestNumbers(boolean withPayloads) throws Exception { for (int j = 0; j < numSkippingTests; j++) { int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999)); PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef("hundred")); + StoredFields storedFields = reader.storedFields(); int doc = dp.advance(num); assertEquals(num, doc); int freq = dp.freq(); for (int i = 0; i < freq; i++) { - String storedNumbers = reader.document(doc).get("numbers"); + String storedNumbers = storedFields.document(doc).get("numbers"); dp.nextPosition(); int start = dp.startOffset(); assert start >= 0; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReadOnlyIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestReadOnlyIndex.java index d2be8c176c59..9ea726f8abff 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestReadOnlyIndex.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestReadOnlyIndex.java @@ -89,8 +89,9 @@ private Void doTestReadOnlyIndex() throws Exception { TopDocs hits = isearcher.search(query, 1); assertEquals(1, hits.totalHits.value); // Iterate through the results: + StoredFields storedFields = isearcher.storedFields(); for (int i = 0; i < hits.scoreDocs.length; i++) { - Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc); + Document hitDoc = storedFields.document(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java index 63b697da2c9d..72f65c67c5c8 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java @@ -117,12 +117,12 @@ public void testMerge() throws IOException { newIOContext(random())); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); - Document newDoc1 = mergedReader.document(0); + Document newDoc1 = mergedReader.storedFields().document(0); assertTrue(newDoc1 != null); // There are 2 unstored fields on the document assertTrue( DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - Document newDoc2 = mergedReader.document(1); + Document newDoc2 = mergedReader.storedFields().document(1); assertTrue(newDoc2 != null); assertTrue( DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); @@ -143,7 +143,7 @@ public void testMerge() throws IOException { // System.out.println("stored size: " + stored.size()); assertEquals("We do not have 3 fields that were indexed with term vector", 3, tvCount); - Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); + Terms vector = mergedReader.termVectors().get(0).terms(DocHelper.TEXT_FIELD_2_KEY); assertNotNull(vector); assertEquals(3, vector.size()); TermsEnum termsEnum = vector.iterator(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java index da8abbaf5c04..bdc6395cb3bd 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java @@ -62,7 +62,7 @@ public void test() { public void testDocument() throws IOException { assertTrue(reader.numDocs() == 1); assertTrue(reader.maxDoc() >= 1); - Document result = reader.document(0); + Document result = reader.storedFields().document(0); assertTrue(result != null); // There are 2 unstored fields on the document that are not preserved across writing assertTrue( @@ -186,7 +186,7 @@ public static void checkNorms(LeafReader reader) throws IOException { } public void testTermVectors() throws IOException { - Terms result = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); + Terms result = reader.termVectors().get(0).terms(DocHelper.TEXT_FIELD_2_KEY); assertNotNull(result); assertEquals(3, result.size()); TermsEnum termsEnum = result.iterator(); @@ -197,7 +197,7 @@ public void testTermVectors() throws IOException { assertTrue(freq > 0); } - Fields results = reader.getTermVectors(0); + Fields results = reader.termVectors().get(0); assertTrue(results != null); assertEquals("We do not have 3 term freq vectors", 3, results.size()); } @@ -208,25 +208,25 @@ public void testOutOfBoundsAccess() throws IOException { expectThrows( IndexOutOfBoundsException.class, () -> { - reader.document(-1); + reader.storedFields().document(-1); }); expectThrows( IndexOutOfBoundsException.class, () -> { - reader.getTermVectors(-1); + reader.termVectors().get(-1); }); expectThrows( IndexOutOfBoundsException.class, () -> { - reader.document(numDocs); + reader.storedFields().document(numDocs); }); expectThrows( IndexOutOfBoundsException.class, () -> { - reader.getTermVectors(numDocs); + reader.termVectors().get(numDocs); }); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentToThreadMapping.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentToThreadMapping.java index 8eaee902c0ee..62d62d538a03 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentToThreadMapping.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentToThreadMapping.java @@ -71,6 +71,11 @@ public Fields getTermVectors(int doc) { return null; } + @Override + public TermVectors termVectors() { + return TermVectors.EMPTY; + } + @Override public NumericDocValues getNumericDocValues(String field) { return null; @@ -123,6 +128,14 @@ protected void doClose() {} @Override public void document(int doc, StoredFieldVisitor visitor) {} + @Override + public StoredFields storedFields() { + return new StoredFields() { + @Override + public void document(int doc, StoredFieldVisitor visitor) {} + }; + } + @Override public void checkIntegrity() throws IOException {} diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java index a68974810dba..1df06462d0e5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java @@ -220,9 +220,12 @@ public void testFieldBasedRetention() throws IOException { assertEquals(1, reader.numDocs()); assertEquals(3, reader.maxDoc()); Set versions = new HashSet<>(); - versions.add(reader.document(0, Collections.singleton("version")).get("version")); - versions.add(reader.document(1, Collections.singleton("version")).get("version")); - versions.add(reader.document(2, Collections.singleton("version")).get("version")); + versions.add( + reader.storedFields().document(0, Collections.singleton("version")).get("version")); + versions.add( + reader.storedFields().document(1, Collections.singleton("version")).get("version")); + versions.add( + reader.storedFields().document(2, Collections.singleton("version")).get("version")); assertTrue(versions.contains("5")); assertTrue(versions.contains("4")); assertTrue(versions.contains("3")); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSortingCodecReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSortingCodecReader.java index db9c7cf1f511..72c864f9e2a0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSortingCodecReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSortingCodecReader.java @@ -278,13 +278,15 @@ public void testSortOnAddIndicesRandom() throws IOException { assertEquals(1, vectorValue.length); assertEquals((float) ids.longValue(), vectorValue[0], 0.001f); - Fields termVectors = leaf.getTermVectors(idNext); + Fields termVectors = leaf.termVectors().get(idNext); assertTrue( termVectors .terms("term_vectors") .iterator() .seekExact(new BytesRef("test" + ids.longValue()))); - assertEquals(Long.toString(ids.longValue()), leaf.document(idNext).get("string_id")); + assertEquals( + Long.toString(ids.longValue()), + leaf.storedFields().document(idNext).get("string_id")); IndexSearcher searcher = new IndexSearcher(r); TopDocs result = searcher.search(LongPoint.newExactQuery("point_id", ids.longValue()), 1); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java index 54d62c97bafa..370c485b2691 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -69,9 +69,10 @@ public void testStressAdvance() throws Exception { final List bDocIDs = new ArrayList<>(); final DirectoryReader r = w.getReader(); + StoredFields storedFields = r.storedFields(); final int[] idToDocID = new int[r.maxDoc()]; for (int docID = 0; docID < idToDocID.length; docID++) { - int id = Integer.parseInt(r.document(docID).get("id")); + int id = Integer.parseInt(storedFields.document(docID).get("id")); if (aDocs.contains(id)) { aDocIDs.add(docID); } else { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java index e219944f71ab..eb4e9cdffa24 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -313,9 +313,10 @@ private static void printDocs(DirectoryReader r) throws Throwable { // TODO: improve this LeafReader sub = ctx.reader(); Bits liveDocs = sub.getLiveDocs(); + StoredFields storedFields = sub.storedFields(); System.out.println(" " + ((SegmentReader) sub).getSegmentInfo()); for (int docID = 0; docID < sub.maxDoc(); docID++) { - Document doc = sub.document(docID); + Document doc = storedFields.document(docID); if (liveDocs == null || liveDocs.get(docID)) { System.out.println(" docID=" + docID + " id:" + doc.get("id")); } else { @@ -415,20 +416,20 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) // verify stored fields are equivalent try { - verifyEquals(r1.document(id1), r2.document(id2)); + verifyEquals(r1.storedFields().document(id1), r2.storedFields().document(id2)); } catch (Throwable t) { System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term=" + term); - System.out.println(" d1=" + r1.document(id1)); - System.out.println(" d2=" + r2.document(id2)); + System.out.println(" d1=" + r1.storedFields().document(id1)); + System.out.println(" d2=" + r2.storedFields().document(id2)); throw t; } try { // verify term vectors are equivalent - verifyEquals(r1.getTermVectors(id1), r2.getTermVectors(id2)); + verifyEquals(r1.termVectors().get(id1), r2.termVectors().get(id2)); } catch (Throwable e) { System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2); - Fields tv1 = r1.getTermVectors(id1); + Fields tv1 = r1.termVectors().get(id1); System.out.println(" d1=" + tv1); if (tv1 != null) { PostingsEnum dpEnum = null; @@ -461,7 +462,7 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) } } - Fields tv2 = r2.getTermVectors(id2); + Fields tv2 = r2.termVectors().get(id2); System.out.println(" d2=" + tv2); if (tv2 != null) { PostingsEnum dpEnum = null; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java index c3ed3025ff64..955c335a0851 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java @@ -445,7 +445,7 @@ public void run() { if (results.totalHits.value != 1) { System.out.println("FAIL: hits id:" + id + " val=" + val); for (ScoreDoc sd : results.scoreDocs) { - final Document doc = r.document(sd.doc); + final Document doc = r.storedFields().document(sd.doc); System.out.println( " docID=" + sd.doc @@ -456,7 +456,7 @@ public void run() { } fail("id=" + id + " reader=" + r + " totalHits=" + results.totalHits.value); } - Document doc = searcher.doc(results.scoreDocs[0].doc); + Document doc = searcher.storedFields().document(results.scoreDocs[0].doc); long foundVal = Long.parseLong(doc.get(field)); if (foundVal < Math.abs(val)) { fail("foundVal=" + foundVal + " val=" + val + " id=" + id + " reader=" + r); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java index 7f4bae3d6632..3f2eee16d561 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java @@ -57,11 +57,12 @@ private Document createDoc() { private void verifyIndex(Directory dir) throws IOException { IndexReader r = DirectoryReader.open(dir); + TermVectors termVectors = r.termVectors(); int numDocs = r.numDocs(); for (int i = 0; i < numDocs; i++) { assertNotNull( "term vectors should not have been null for document " + i, - r.getTermVectors(i).terms("c")); + termVectors.get(i).terms("c")); } r.close(); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java index 632b94bb810d..2ccbb0c45701 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java @@ -62,7 +62,7 @@ public void testDoubleOffsetCounting() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - Terms vector = r.getTermVectors(0).terms("field"); + Terms vector = r.termVectors().get(0).terms("field"); assertNotNull(vector); TermsEnum termsEnum = vector.iterator(); assertNotNull(termsEnum.next()); @@ -118,7 +118,7 @@ public void testDoubleOffsetCounting2() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); assertEquals(2, termsEnum.totalTermFreq()); @@ -153,7 +153,7 @@ public void testEndOffsetPositionCharAnalyzer() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); assertEquals(2, termsEnum.totalTermFreq()); @@ -191,7 +191,7 @@ public void testEndOffsetPositionWithCachingTokenFilter() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); assertEquals(2, termsEnum.totalTermFreq()); @@ -231,7 +231,7 @@ public void testEndOffsetPositionStopFilter() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); assertEquals(2, termsEnum.totalTermFreq()); @@ -267,7 +267,7 @@ public void testEndOffsetPositionStandard() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); @@ -311,7 +311,7 @@ public void testEndOffsetPositionStandardEmptyField() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); @@ -353,7 +353,7 @@ public void testEndOffsetPositionStandardEmptyField2() throws Exception { w.close(); IndexReader r = DirectoryReader.open(dir); - TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(); + TermsEnum termsEnum = r.termVectors().get(0).terms("field").iterator(); assertNotNull(termsEnum.next()); PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL); @@ -411,9 +411,11 @@ public void testTermVectorCorruption() throws IOException { writer.close(); IndexReader reader = DirectoryReader.open(dir); + StoredFields storedFields = reader.storedFields(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < reader.numDocs(); i++) { - reader.document(i); - reader.getTermVectors(i); + storedFields.document(i); + termVectors.get(i); } reader.close(); @@ -470,9 +472,9 @@ public void testTermVectorCorruption2() throws IOException { writer.close(); IndexReader reader = DirectoryReader.open(dir); - assertNull(reader.getTermVectors(0)); - assertNull(reader.getTermVectors(1)); - assertNotNull(reader.getTermVectors(2)); + assertNull(reader.termVectors().get(0)); + assertNull(reader.termVectors().get(1)); + assertNotNull(reader.termVectors().get(2)); reader.close(); } dir.close(); @@ -519,9 +521,11 @@ public void testTermVectorCorruption3() throws IOException { writer.close(); IndexReader reader = DirectoryReader.open(dir); + StoredFields storedFields = reader.storedFields(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < 10; i++) { - reader.getTermVectors(i); - reader.document(i); + termVectors.get(i); + storedFields.document(i); } reader.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java index 841e55586874..17d98e249f36 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java @@ -944,6 +944,7 @@ public void testCommonPrefixTerms() throws Exception { TermsEnum termsEnum = MultiTerms.getTerms(r, "id").iterator(); PostingsEnum postingsEnum = null; PerThreadPKLookup pkLookup = new PerThreadPKLookup(r, "id"); + StoredFields storedFields = r.storedFields(); int iters = atLeast(numTerms * 3); List termsList = new ArrayList<>(terms); @@ -972,7 +973,7 @@ public void testCommonPrefixTerms() throws Exception { int docID = postingsEnum.nextDoc(); assertTrue(docID != PostingsEnum.NO_MORE_DOCS); assertEquals(docID, pkLookup.lookup(termBytesRef)); - Document doc = r.document(docID); + Document doc = storedFields.document(docID); assertEquals(term, doc.get("id")); if (random().nextInt(7) == 1) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java index 1efae1a6fabb..b3028f3a0c6b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java @@ -93,9 +93,10 @@ private void checkExpecteds(BitSet expecteds) throws Exception { // Perhaps not the most efficient approach but meets our // needs here. final Bits liveDocs = MultiBits.getLiveDocs(r); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < r.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { - String sval = r.document(i).get(FIELD_RECORD_ID); + String sval = storedFields.document(i).get(FIELD_RECORD_ID); if (sval != null) { int val = Integer.parseInt(sval); assertTrue("Did not expect document #" + val, expecteds.get(val)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java index 1db3b4ebc746..90eff45832bb 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java @@ -23,6 +23,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -467,8 +468,9 @@ protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) thro DecimalFormat f = new DecimalFormat("0.000000", DecimalFormatSymbols.getInstance(Locale.ROOT)); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < h.length; i++) { - Document d = searcher.doc(h[i].doc); + Document d = storedFields.document(h[i].doc); float score = h[i].score; System.err.println( "#" + i + ": " + f.format(score) + " - " + d.get("id") + " - " + d.get("data")); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java index d33fddf57866..444b304f603e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -79,8 +80,9 @@ public void testReverseDateSort() throws Exception { // Execute the search and process the search results. String[] actualOrder = new String[5]; ScoreDoc[] hits = searcher.search(query, 1000, sort).scoreDocs; + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.length; i++) { - Document document = searcher.doc(hits[i].doc); + Document document = storedFields.document(hits[i].doc); String text = document.get(TEXT_FIELD); actualOrder[i] = text; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index d9d7f909b8b4..e7385131d5fd 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -178,7 +179,8 @@ public void testSkipToFirsttimeMiss() throws IOException { final Scorer ds = dw.scorer(context); final boolean skipOk = ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS; if (skipOk) { - fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id")); + fail( + "firsttime skipTo found a match? ... " + r.storedFields().document(ds.docID()).get("id")); } } @@ -195,7 +197,7 @@ public void testSkipToFirsttimeHit() throws IOException { assertTrue( "firsttime skipTo found no match", ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS); - assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); + assertEquals("found wrong docid", "d4", r.storedFields().document(ds.docID()).get("id")); } public void testSimpleEqualScores1() throws Exception { @@ -275,7 +277,7 @@ public void testSimpleTiebreaker() throws Exception { try { assertEquals("3 docs should match " + q.toString(), 3, h.length); - assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id")); + assertEquals("wrong first", "d2", s.storedFields().document(h[0].doc).get("id")); float score0 = h[0].score; float score1 = h[1].score; float score2 = h[2].score; @@ -347,7 +349,7 @@ public void testBooleanOptionalNoTiebreaker() throws Exception { /* note: -1 */ assertEquals("score #" + i + " is not the same", score, h[i].score, SCORE_COMP_THRESH); } - assertEquals("wrong last", "d1", s.doc(h[h.length - 1].doc).get("id")); + assertEquals("wrong last", "d1", s.storedFields().document(h[h.length - 1].doc).get("id")); float score1 = h[h.length - 1].score; assertTrue( "d1 does not have worse score then others: " + score + " >? " + score1, score > score1); @@ -384,10 +386,10 @@ public void testBooleanOptionalWithTiebreaker() throws Exception { float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.storedFields().document(h[0].doc).get("id"); + String doc1 = s.storedFields().document(h[1].doc).get("id"); + String doc2 = s.storedFields().document(h[2].doc).get("id"); + String doc3 = s.storedFields().document(h[3].doc).get("id"); assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2") || doc0.equals("d4")); assertTrue("doc1 should be d2 or d4: " + doc0, doc1.equals("d2") || doc1.equals("d4")); @@ -435,10 +437,10 @@ public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception { float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.storedFields().document(h[0].doc).get("id"); + String doc1 = s.storedFields().document(h[1].doc).get("id"); + String doc2 = s.storedFields().document(h[2].doc).get("id"); + String doc3 = s.storedFields().document(h[3].doc).get("id"); assertEquals("doc0 should be d4: ", "d4", doc0); assertEquals("doc1 should be d3: ", "d3", doc1); @@ -580,8 +582,9 @@ protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) thro DecimalFormat f = new DecimalFormat("0.000000000", DecimalFormatSymbols.getInstance(Locale.ROOT)); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < h.length; i++) { - Document d = searcher.doc(h[i].doc); + Document d = storedFields.document(h[i].doc); float score = h[i].score; System.err.println("#" + i + ": " + f.format(score) + " - " + d.get("id")); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java index b385a95b0668..ebe50dae17fe 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java @@ -116,11 +116,12 @@ private void runTest(boolean reversed) throws Throwable { } /* + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < nDocsReturned; i++) { ScoreDoc scoreDoc = topDocs.scoreDocs[i]; ids[i] = scoreDoc.doc; scores[i] = scoreDoc.score; - documents[i] = searcher.doc(ids[i]); + documents[i] = storedFields.document(ids[i]); System.out.println("ids[i] = " + ids[i]); System.out.println("documents[i] = " + documents[i]); System.out.println("scores[i] = " + scores[i]); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java index 682fd94dc2ac..5ad8f5c263b5 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.similarities.ClassicSimilarity; @@ -108,8 +109,9 @@ public void testFuzziness() throws Exception { hits = searcher.search(query, 1000).scoreDocs; assertEquals("3 documents should match", 3, hits.length); List order = Arrays.asList("bbbbb", "abbbb", "aabbb"); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = storedFields.document(hits[i].doc).get("field"); // System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -121,7 +123,7 @@ public void testFuzziness() throws Exception { assertEquals("only 2 documents should match", 2, hits.length); order = Arrays.asList("bbbbb", "abbbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = storedFields.document(hits[i].doc).get("field"); // System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -142,43 +144,43 @@ public void testFuzziness() throws Exception { query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMaxEdits, 0); hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); // default allows for up to two edits: - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[2].doc).get("field"), ("aaabb")); // query similar to a word in the index: query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 0); hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[2].doc).get("field"), ("aaabb")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 1); hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 2); hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 3); hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 4); hits = searcher.search(query, 1000).scoreDocs; assertEquals(2, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(storedFields.document(hits[1].doc).get("field"), ("aaaab")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMaxEdits, 5); hits = searcher.search(query, 1000).scoreDocs; assertEquals(0, hits.length); @@ -186,25 +188,25 @@ public void testFuzziness() throws Exception { query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 0); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("ddddd")); // now with prefix query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 1); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 2); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 3); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 4); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(storedFields.document(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMaxEdits, 5); hits = searcher.search(query, 1000).scoreDocs; assertEquals(0, hits.length); @@ -315,15 +317,16 @@ public void testSingleQueryExactMatchScoresHighest() throws Exception { new ClassicSimilarity()); // avoid randomisation of similarity algo by test framework writer.close(); String[] searchTerms = {"smith", "smythe", "smdssasd"}; + StoredFields storedFields = reader.storedFields(); for (String searchTerm : searchTerms) { FuzzyQuery query = new FuzzyQuery(new Term("field", searchTerm), 2, 1); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; - Document bestDoc = searcher.doc(hits[0].doc); + Document bestDoc = storedFields.document(hits[0].doc); assertTrue(hits.length > 0); String topMatch = bestDoc.get("field"); assertEquals(searchTerm, topMatch); if (hits.length > 1) { - Document worstDoc = searcher.doc(hits[hits.length - 1].doc); + Document worstDoc = storedFields.document(hits[hits.length - 1].doc); String worstMatch = worstDoc.get("field"); assertNotSame(searchTerm, worstMatch); } @@ -367,15 +370,15 @@ public void testMultipleQueriesIdfWorks() throws Exception { // Matches on the rare surname should be worth more than matches on the common forename assertEquals(7, hits.length); - Document bestDoc = searcher.doc(hits[0].doc); + Document bestDoc = searcher.storedFields().document(hits[0].doc); String topMatch = bestDoc.get("field"); assertTrue(topMatch.contains(rareSearchTerm)); - Document runnerUpDoc = searcher.doc(hits[1].doc); + Document runnerUpDoc = searcher.storedFields().document(hits[1].doc); String runnerUpMatch = runnerUpDoc.get("field"); assertTrue(runnerUpMatch.contains("cuttin")); - Document worstDoc = searcher.doc(hits[hits.length - 1].doc); + Document worstDoc = searcher.storedFields().document(hits[hits.length - 1].doc); String worstMatch = worstDoc.get("field"); assertTrue(worstMatch.contains("micheal")); // misspelling of common name @@ -447,9 +450,9 @@ public void testBoostOnlyRewrite() throws Exception { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. - assertEquals("Lucene", reader.document(hits[0].doc).get("field")); - assertEquals("Lucene", reader.document(hits[1].doc).get("field")); - assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); + assertEquals("Lucene", reader.storedFields().document(hits[0].doc).get("field")); + assertEquals("Lucene", reader.storedFields().document(hits[1].doc).get("field")); + assertEquals("Lucenne", reader.storedFields().document(hits[2].doc).get("field")); reader.close(); directory.close(); } @@ -485,7 +488,7 @@ public void testGiga() throws Exception { IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); + assertEquals("Giga byte", searcher.storedFields().document(hits[0].doc).get("field")); r.close(); w.close(); index.close(); @@ -504,12 +507,12 @@ public void testDistanceAsEditsSearching() throws Exception { FuzzyQuery q = new FuzzyQuery(new Term("field", "fouba"), 2); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.storedFields().document(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "foubara"), 2); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.storedFields().document(hits[0].doc).get("field")); expectThrows( IllegalArgumentException.class, @@ -642,8 +645,9 @@ public void testRandom() throws Exception { new FuzzyQuery(new Term("field", queryTerm), ed, prefixLength, queueSize, true); TopDocs hits = s.search(query, terms.size()); Set actual = new HashSet<>(); + StoredFields storedFields = s.storedFields(); for (ScoreDoc hit : hits.scoreDocs) { - Document doc = s.doc(hit.doc); + Document doc = storedFields.document(hit.doc); actual.add(doc.get("field")); // System.out.println(" actual: " + doc.get("field") + " score=" + hit.score); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndriAndQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestIndriAndQuery.java index dae1b3d6b053..80f370713e5b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestIndriAndQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestIndriAndQuery.java @@ -21,6 +21,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.similarities.IndriDirichletSimilarity; @@ -156,8 +157,9 @@ protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) thro System.err.println("------- " + test + " -------"); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < h.length; i++) { - Document d = searcher.doc(h[i].doc); + Document d = storedFields.document(h[i].doc); float score = h[i].score; System.err.println("#" + i + ": " + score + " - " + d.get("body")); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestKnnVectorQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestKnnVectorQuery.java index 56e5bbf63ef9..6c0399ffeacc 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestKnnVectorQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestKnnVectorQuery.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.store.Directory; @@ -704,8 +705,9 @@ public void testDeletes() throws IOException { IndexSearcher searcher = new IndexSearcher(reader); KnnVectorQuery query = new KnnVectorQuery("vector", randomVector(dim), hits); TopDocs topDocs = searcher.search(query, numDocs); + StoredFields storedFields = reader.storedFields(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - Document doc = reader.document(scoreDoc.doc, Set.of("index")); + Document doc = storedFields.document(scoreDoc.doc, Set.of("index")); String index = doc.get("index"); assertFalse( "search returned a deleted document: " + index, @@ -868,7 +870,7 @@ private void assertMatches(IndexSearcher searcher, Query q, int expectedMatches) private void assertIdMatches(IndexReader reader, String expectedId, ScoreDoc scoreDoc) throws IOException { - String actualId = reader.document(scoreDoc.doc).get("id"); + String actualId = reader.storedFields().document(scoreDoc.doc).get("id"); assertEquals(expectedId, actualId); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java index d377018d4892..dfbc94f4b2aa 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java @@ -68,7 +68,7 @@ protected Integer lookupFromSearcher(IndexSearcher s, String id) throws IOExcept if (hits.totalHits.value == 0) { return null; } else { - Document doc = s.doc(hits.scoreDocs[0].doc); + Document doc = s.storedFields().document(hits.scoreDocs[0].doc); return (Integer) doc.getField("field").numericValue(); } } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 232d835c0548..245ee9d57aa0 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -56,9 +56,9 @@ public void testQuery() throws Exception { hits = is.search(new MatchAllDocsQuery(), 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc(hits[0].doc).get("key")); - assertEquals("two", is.doc(hits[1].doc).get("key")); - assertEquals("three four", is.doc(hits[2].doc).get("key")); + assertEquals("one", is.storedFields().document(hits[0].doc).get("key")); + assertEquals("two", is.storedFields().document(hits[1].doc).get("key")); + assertEquals("three four", is.storedFields().document(hits[2].doc).get("key")); // some artificial queries to trigger the use of skipTo(): diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java index 76931cfc325a..5cd653d4c517 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.store.Directory; @@ -126,11 +127,12 @@ public void run() { private void testTermVectors() throws Exception { // check: int numDocs = reader.numDocs(); + TermVectors termVectors = reader.termVectors(); for (int docId = 0; docId < numDocs; docId++) { - Fields vectors = reader.getTermVectors(docId); + Fields vectors = termVectors.get(docId); // verify vectors result verifyVectors(vectors, docId); - Terms vector = reader.getTermVectors(docId).terms("field"); + Terms vector = termVectors.get(docId).terms("field"); verifyVector(vector.iterator(), docId); } } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNearest.java b/lucene/core/src/test/org/apache/lucene/search/TestNearest.java index e35a1875c4f2..10abdc8b79fe 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNearest.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNearest.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.geo.GeoTestUtil; @@ -57,7 +58,7 @@ public void testNearestNeighborWithDeletedDocs() throws Exception { // with its own points impl: IndexSearcher s = newSearcher(r, false); FieldDoc hit = (FieldDoc) LatLonPoint.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; - assertEquals("0", r.document(hit.doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); @@ -66,7 +67,7 @@ public void testNearestNeighborWithDeletedDocs() throws Exception { // with its own points impl: s = newSearcher(r, false); hit = (FieldDoc) LatLonPoint.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; - assertEquals("1", r.document(hit.doc).getField("id").stringValue()); + assertEquals("1", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.close(); dir.close(); @@ -89,7 +90,7 @@ public void testNearestNeighborWithAllDeletedDocs() throws Exception { // with its own points impl: IndexSearcher s = newSearcher(r, false); FieldDoc hit = (FieldDoc) LatLonPoint.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; - assertEquals("0", r.document(hit.doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); @@ -120,8 +121,8 @@ public void testTieBreakByDocID() throws Exception { // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps // with its own points impl: ScoreDoc[] hits = LatLonPoint.nearest(newSearcher(r, false), "point", 45.0, 50.0, 2).scoreDocs; - assertEquals("0", r.document(hits[0].doc).getField("id").stringValue()); - assertEquals("1", r.document(hits[1].doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hits[0].doc).getField("id").stringValue()); + assertEquals("1", r.storedFields().document(hits[1].doc).getField("id").stringValue()); r.close(); w.close(); @@ -232,11 +233,12 @@ public int compare(FieldDoc a, FieldDoc b) { new Sort(LatLonDocValuesField.newDistanceSort("point", pointLat, pointLon))); ScoreDoc[] hits = LatLonPoint.nearest(s, "point", pointLat, pointLon, topN).scoreDocs; + StoredFields storedFields = r.storedFields(); for (int i = 0; i < topN; i++) { FieldDoc expected = expectedHits[i]; FieldDoc expected2 = (FieldDoc) fieldDocs.scoreDocs[i]; FieldDoc actual = (FieldDoc) hits[i]; - Document actualDoc = r.document(actual.doc); + Document actualDoc = storedFields.document(actual.doc); if (VERBOSE) { System.out.println("hit " + i); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java index 6867d6501d20..d7fbe694613e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java @@ -170,8 +170,8 @@ public void testBasic() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Now, resort using PhraseQuery: PhraseQuery pq = new PhraseQuery(5, "field", "wizard", "oz"); @@ -180,8 +180,8 @@ public void testBasic() throws Exception { // Resorting changed the order: assertEquals(2, hits2.totalHits.value); - assertEquals("1", searcher.doc(hits2.scoreDocs[0].doc).get("id")); - assertEquals("0", searcher.doc(hits2.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits2.scoreDocs[0].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits2.scoreDocs[1].doc).get("id")); r.close(); dir.close(); @@ -213,8 +213,8 @@ public void testNullScorerTermQuery() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Now, resort using TermQuery on term that does not exist. TermQuery tq = new TermQuery(new Term("field", "gold")); @@ -251,8 +251,8 @@ public void testCustomCombine() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Now, resort using PhraseQuery, but with an // opposite-world combine: @@ -273,8 +273,8 @@ protected float combine( // Resorting didn't change the order: assertEquals(2, hits2.totalHits.value); - assertEquals("0", searcher.doc(hits2.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits2.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits2.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits2.scoreDocs[1].doc).get("id")); r.close(); dir.close(); @@ -304,8 +304,8 @@ public void testExplain() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Now, resort using PhraseQuery: PhraseQuery pq = new PhraseQuery("field", "wizard", "oz"); @@ -327,8 +327,8 @@ protected float combine( // Resorting changed the order: assertEquals(2, hits2.totalHits.value); - assertEquals("1", searcher.doc(hits2.scoreDocs[0].doc).get("id")); - assertEquals("0", searcher.doc(hits2.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits2.scoreDocs[0].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits2.scoreDocs[1].doc).get("id")); int docID = hits2.scoreDocs[0].doc; Explanation explain = rescorer.explain(searcher, searcher.explain(bq.build(), docID), docID); @@ -377,8 +377,8 @@ public void testMissingSecondPassScore() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Now, resort using PhraseQuery, no slop: PhraseQuery pq = new PhraseQuery("field", "wizard", "oz"); @@ -387,8 +387,8 @@ public void testMissingSecondPassScore() throws Exception { // Resorting changed the order: assertEquals(2, hits2.totalHits.value); - assertEquals("1", searcher.doc(hits2.scoreDocs[0].doc).get("id")); - assertEquals("0", searcher.doc(hits2.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits2.scoreDocs[0].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits2.scoreDocs[1].doc).get("id")); r.close(); dir.close(); @@ -446,8 +446,8 @@ protected float combine( @Override public int compare(Integer a, Integer b) { try { - int av = idToNum[Integer.parseInt(r.document(a).get("id"))]; - int bv = idToNum[Integer.parseInt(r.document(b).get("id"))]; + int av = idToNum[Integer.parseInt(r.storedFields().document(a).get("id"))]; + int bv = idToNum[Integer.parseInt(r.storedFields().document(b).get("id"))]; if (av < bv) { return -reverseInt; } else if (bv < av) { @@ -465,7 +465,7 @@ public int compare(Integer a, Integer b) { boolean fail = false; for (int i = 0; i < numHits; i++) { // System.out.println("expected=" + expected[i] + " vs " + hits2.scoreDocs[i].doc + " v=" + - // idToNum[Integer.parseInt(r.document(expected[i]).get("id"))]); + // idToNum[Integer.parseInt(r.storedFields().document(expected[i]).get("id"))]); if (expected[i].intValue() != hits2.scoreDocs[i].doc) { // System.out.println(" diff!"); fail = true; @@ -537,7 +537,9 @@ public int advance(int target) { @Override public float score() throws IOException { - int num = idToNum[Integer.parseInt(context.reader().document(docID).get("id"))]; + int num = + idToNum[ + Integer.parseInt(context.reader().storedFields().document(docID).get("id"))]; if (reverse) { // System.out.println("score doc=" + docID + " num=" + num); return num; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java index 0324ca4dc5e2..a1b7198fa3bf 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java @@ -30,6 +30,7 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -247,12 +248,13 @@ void assertQuery(Query query, Sort sort) throws Exception { if (VERBOSE) { System.out.println(" all.totalHits.value=" + all.totalHits.value); int upto = 0; + StoredFields storedFields = searcher.storedFields(); for (ScoreDoc scoreDoc : all.scoreDocs) { System.out.println( " hit " + (upto++) + ": id=" - + searcher.doc(scoreDoc.doc).get("id") + + storedFields.document(scoreDoc.doc).get("id") + " " + scoreDoc); } @@ -303,13 +305,16 @@ void assertQuery(Query query, Sort sort) throws Exception { void assertPage(int pageStart, TopDocs all, TopDocs paged) throws IOException { assertEquals(all.totalHits.value, paged.totalHits.value); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < paged.scoreDocs.length; i++) { ScoreDoc sd1 = all.scoreDocs[pageStart + i]; ScoreDoc sd2 = paged.scoreDocs[i]; if (VERBOSE) { System.out.println(" hit " + (pageStart + i)); - System.out.println(" expected id=" + searcher.doc(sd1.doc).get("id") + " " + sd1); - System.out.println(" actual id=" + searcher.doc(sd2.doc).get("id") + " " + sd2); + System.out.println( + " expected id=" + storedFields.document(sd1.doc).get("id") + " " + sd1); + System.out.println( + " actual id=" + storedFields.document(sd2.doc).get("id") + " " + sd2); } assertEquals(sd1.doc, sd2.doc); assertEquals(sd1.score, sd2.score, 0f); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSort.java b/lucene/core/src/test/org/apache/lucene/search/TestSort.java index c7fb099db485..a160d4a204c7 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSort.java @@ -111,8 +111,8 @@ public void testString() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'foo' - assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); ir.close(); dir.close(); @@ -139,8 +139,8 @@ public void testStringReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'foo' comes after 'bar' in reverse order - assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); ir.close(); dir.close(); @@ -167,8 +167,8 @@ public void testStringVal() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'foo' - assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); ir.close(); dir.close(); @@ -195,8 +195,8 @@ public void testStringValReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'foo' comes after 'bar' in reverse order - assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); ir.close(); dir.close(); @@ -227,9 +227,9 @@ public void testInt() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // numeric order - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("300000", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -260,9 +260,9 @@ public void testIntReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // reverse numeric order - assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("300000", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -291,9 +291,9 @@ public void testIntMissing() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as a 0 - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -326,9 +326,9 @@ public void testIntMissingLast() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as a Integer.MAX_VALUE - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -359,9 +359,9 @@ public void testLong() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // numeric order - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("3000000000", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -392,9 +392,9 @@ public void testLongReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // reverse numeric order - assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("3000000000", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -423,9 +423,9 @@ public void testLongMissing() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as 0 - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -458,9 +458,9 @@ public void testLongMissingLast() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as Long.MAX_VALUE - assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -491,9 +491,9 @@ public void testFloat() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // numeric order - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4.2", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("30.1", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -524,9 +524,9 @@ public void testFloatReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // reverse numeric order - assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("30.1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4.2", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -555,9 +555,9 @@ public void testFloatMissing() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as 0 - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals("4.2", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -590,9 +590,9 @@ public void testFloatMissingLast() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null is treated as Float.MAX_VALUE - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[2].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("4.2", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); ir.close(); dir.close(); @@ -627,10 +627,12 @@ public void testDouble() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(4, td.totalHits.value); // numeric order - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value")); - assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals( + "4.2333333333332", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals( + "4.2333333333333", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); + assertEquals("30.1", searcher.storedFields().document(td.scoreDocs[3].doc).get("value")); ir.close(); dir.close(); @@ -658,8 +660,8 @@ public void testDoubleSignedZero() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // numeric order - assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value")); + assertEquals("-0", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals("+0", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); ir.close(); dir.close(); @@ -694,10 +696,12 @@ public void testDoubleReverse() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(4, td.totalHits.value); // numeric order - assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value")); - assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value")); + assertEquals("30.1", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals( + "4.2333333333333", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals( + "4.2333333333332", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[3].doc).get("value")); ir.close(); dir.close(); @@ -730,10 +734,12 @@ public void testDoubleMissing() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(4, td.totalHits.value); // null treated as a 0 - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value")); - assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals( + "4.2333333333332", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); + assertEquals( + "4.2333333333333", searcher.storedFields().document(td.scoreDocs[3].doc).get("value")); ir.close(); dir.close(); @@ -771,10 +777,12 @@ public void testDoubleMissingLast() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(4, td.totalHits.value); // null treated as Double.MAX_VALUE - assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value")); - assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value")); - assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value")); - assertNull(searcher.doc(td.scoreDocs[3].doc).get("value")); + assertEquals("-1.3", searcher.storedFields().document(td.scoreDocs[0].doc).get("value")); + assertEquals( + "4.2333333333332", searcher.storedFields().document(td.scoreDocs[1].doc).get("value")); + assertEquals( + "4.2333333333333", searcher.storedFields().document(td.scoreDocs[2].doc).get("value")); + assertNull(searcher.storedFields().document(td.scoreDocs[3].doc).get("value")); ir.close(); dir.close(); @@ -820,21 +828,21 @@ public void testMultiSort() throws IOException { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(4, td.totalHits.value); // 'bar' comes before 'foo' - assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value1")); - assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value1")); - assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value1")); - assertEquals("foo", searcher.doc(td.scoreDocs[3].doc).get("value1")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[0].doc).get("value1")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[1].doc).get("value1")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[2].doc).get("value1")); + assertEquals("foo", searcher.storedFields().document(td.scoreDocs[3].doc).get("value1")); // 0 comes before 1 - assertEquals("0", searcher.doc(td.scoreDocs[0].doc).get("value2")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("value2")); - assertEquals("0", searcher.doc(td.scoreDocs[2].doc).get("value2")); - assertEquals("1", searcher.doc(td.scoreDocs[3].doc).get("value2")); + assertEquals("0", searcher.storedFields().document(td.scoreDocs[0].doc).get("value2")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("value2")); + assertEquals("0", searcher.storedFields().document(td.scoreDocs[2].doc).get("value2")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[3].doc).get("value2")); // Now with overflow td = searcher.search(new MatchAllDocsQuery(), 1, sort); assertEquals(4, td.totalHits.value); - assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value1")); - assertEquals("0", searcher.doc(td.scoreDocs[0].doc).get("value2")); + assertEquals("bar", searcher.storedFields().document(td.scoreDocs[0].doc).get("value1")); + assertEquals("0", searcher.storedFields().document(td.scoreDocs[0].doc).get("value2")); ir.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortOptimization.java b/lucene/core/src/test/org/apache/lucene/search/TestSortOptimization.java index d300d5e5e035..fabe831303a1 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortOptimization.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortOptimization.java @@ -48,6 +48,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; @@ -599,8 +600,9 @@ public void testDocSortOptimization() throws IOException { TopDocs topDocs = searcher.search(bq.build(), manager); assertEquals(numHits, topDocs.scoreDocs.length); + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < numHits; i++) { - Document d = searcher.doc(topDocs.scoreDocs[i].doc); + Document d = storedFields.document(topDocs.scoreDocs[i].doc); assertEquals(Integer.toString(i + lowerRange), d.get("slf")); assertEquals("seg1", d.get("tf")); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java index 6e7e976e45ba..5cf03aea1518 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; @@ -203,6 +204,7 @@ public int compare(BytesRef a, BytesRef b) { if (VERBOSE) { System.out.println(" actual:"); + StoredFields storedFields = s.storedFields(); for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) { final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX]; BytesRef br = (BytesRef) fd.fields[0]; @@ -213,7 +215,7 @@ public int compare(BytesRef a, BytesRef b) { + ": " + (br == null ? "" : br.utf8ToString()) + " id=" - + s.doc(fd.doc).get("id")); + + storedFields.document(fd.doc).get("id")); } } for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java index b6807f5c3648..e48828e5b2f3 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java @@ -85,18 +85,18 @@ public void testBasic() throws Exception { // Just first pass query TopDocs hits = searcher.search(query, 10); assertEquals(3, hits.totalHits.value); - assertEquals("3", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("2", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); // Now, rescore: Sort sort = new Sort(new SortField("popularity", SortField.Type.INT, true)); Rescorer rescorer = new SortRescorer(sort); hits = rescorer.rescore(searcher, hits, 10); assertEquals(3, hits.totalHits.value); - assertEquals("2", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); String expl = rescorer @@ -121,9 +121,9 @@ public void testDoubleValuesSourceSort() throws Exception { // Just first pass query TopDocs hits = searcher.search(query, 10); assertEquals(3, hits.totalHits.value); - assertEquals("3", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("2", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); DoubleValuesSource source = DoubleValuesSource.fromLongField("popularity"); @@ -132,9 +132,9 @@ public void testDoubleValuesSourceSort() throws Exception { Rescorer rescorer = new SortRescorer(sort); hits = rescorer.rescore(searcher, hits, 10); assertEquals(3, hits.totalHits.value); - assertEquals("2", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); String expl = rescorer @@ -197,8 +197,8 @@ public void testRandom() throws Exception { @Override public int compare(Integer a, Integer b) { try { - int av = idToNum[Integer.parseInt(r.document(a).get("id"))]; - int bv = idToNum[Integer.parseInt(r.document(b).get("id"))]; + int av = idToNum[Integer.parseInt(r.storedFields().document(a).get("id"))]; + int bv = idToNum[Integer.parseInt(r.storedFields().document(b).get("id"))]; if (av < bv) { return -reverseInt; } else if (bv < av) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java index e44b7df40d4f..4d77af3ff87b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java @@ -96,8 +96,8 @@ public void testForward() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 3 comes before 5 - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -125,8 +125,8 @@ public void testReverse() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -159,9 +159,9 @@ public void testMissingFirst() throws Exception { assertEquals(3, td.totalHits.value); // 3 comes before 5 // null comes first - assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -193,10 +193,10 @@ public void testMissingLast() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // 3 comes before 5 - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); // null comes last - assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -222,8 +222,8 @@ public void testSingleton() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 3 comes before 5 - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -250,8 +250,8 @@ public void testFloat() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // -5 comes before -3 - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -278,8 +278,8 @@ public void testDouble() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // -5 comes before -3 - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java index 1bf887e36536..614c0376f236 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java @@ -55,8 +55,8 @@ public void testMax() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'baz' comes before 'foo' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -85,8 +85,8 @@ public void testMaxReverse() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'baz' comes before 'foo' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -120,10 +120,10 @@ public void testMaxMissingFirst() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null comes first - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); // 'baz' comes before 'foo' - assertEquals("3", searcher.doc(td.scoreDocs[1].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -157,10 +157,10 @@ public void testMaxMissingLast() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // 'baz' comes before 'foo' - assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); // null comes last - assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -187,8 +187,8 @@ public void testMaxSingleton() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -218,8 +218,8 @@ public void testMiddleMin() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'b' comes before 'c' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -249,8 +249,8 @@ public void testMiddleMinReverse() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'b' comes before 'c' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -285,10 +285,10 @@ public void testMiddleMinMissingFirst() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null comes first - assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); // 'b' comes before 'c' - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -323,10 +323,10 @@ public void testMiddleMinMissingLast() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // 'b' comes before 'c' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); // null comes last - assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -353,8 +353,8 @@ public void testMiddleMinSingleton() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -384,8 +384,8 @@ public void testMiddleMax() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'b' comes before 'c' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -415,8 +415,8 @@ public void testMiddleMaxReverse() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'b' comes before 'c' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -451,10 +451,10 @@ public void testMiddleMaxMissingFirst() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // null comes first - assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); // 'b' comes before 'c' - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -489,10 +489,10 @@ public void testMiddleMaxMissingLast() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // 'b' comes before 'c' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); // null comes last - assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -519,8 +519,8 @@ public void testMiddleMaxSingleton() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java index 9b1ff79e6d02..aad6552ccf83 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java @@ -81,8 +81,8 @@ public void testForward() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -110,8 +110,8 @@ public void testReverse() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); @@ -144,9 +144,9 @@ public void testMissingFirst() throws Exception { assertEquals(3, td.totalHits.value); // 'bar' comes before 'baz' // null comes first - assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -178,10 +178,10 @@ public void testMissingLast() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(3, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); // null comes last - assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id")); + assertEquals("3", searcher.storedFields().document(td.scoreDocs[2].doc).get("id")); ir.close(); dir.close(); @@ -207,8 +207,8 @@ public void testSingleton() throws Exception { TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits.value); // 'bar' comes before 'baz' - assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id")); - assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(td.scoreDocs[0].doc).get("id")); + assertEquals("2", searcher.storedFields().document(td.scoreDocs[1].doc).get("id")); ir.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java b/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java index 0ca587232860..d0656d22edd0 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestXYPointDistanceSort.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.geo.ShapeTestUtil; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -206,6 +207,7 @@ private void doRandomTest(int numDocs, int numQueries) throws IOException { writer.addDocument(doc); } IndexReader reader = writer.getReader(); + StoredFields storedFields = reader.storedFields(); IndexSearcher searcher = newSearcher(reader); for (int i = 0; i < numQueries; i++) { @@ -216,7 +218,7 @@ private void doRandomTest(int numDocs, int numQueries) throws IOException { Result[] expected = new Result[reader.maxDoc()]; for (int doc = 0; doc < reader.maxDoc(); doc++) { - Document targetDoc = reader.document(doc); + Document targetDoc = storedFields.document(doc); final double distance; if (targetDoc.getField("x") == null) { distance = missingValue; // missing diff --git a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java index b16a87c3432c..49bee3f8a80e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java +++ b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java @@ -499,7 +499,7 @@ public void testHeartRanking() throws IOException { assertEquals( "Failed: " + sim.toString(), "2", - reader.document(topDocs.scoreDocs[0].doc).get(FIELD_ID)); + reader.storedFields().document(topDocs.scoreDocs[0].doc).get(FIELD_ID)); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/hnsw/KnnGraphTester.java b/lucene/core/src/test/org/apache/lucene/util/hnsw/KnnGraphTester.java index dd44898af759..d33a97151fef 100644 --- a/lucene/core/src/test/org/apache/lucene/util/hnsw/KnnGraphTester.java +++ b/lucene/core/src/test/org/apache/lucene/util/hnsw/KnnGraphTester.java @@ -53,6 +53,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.apache.lucene.search.ConstantScoreScorer; @@ -405,6 +406,7 @@ private void testSearch(Path indexPath, Path queryPath, Path outputPath, int[][] totalCpuTime = TimeUnit.NANOSECONDS.toMillis(bean.getCurrentThreadCpuTime() - cpuTimeStartNs); elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); // ns -> ms + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numIters; i++) { totalVisited += results[i].totalHits.value; for (ScoreDoc doc : results[i].scoreDocs) { @@ -412,7 +414,7 @@ private void testSearch(Path indexPath, Path queryPath, Path outputPath, int[][] // there is a bug somewhere that can result in doc=NO_MORE_DOCS! I think it happens // in some degenerate case (like input query has NaN in it?) that causes no results to // be returned from HNSW search? - doc.doc = Integer.parseInt(reader.document(doc.doc).get("id")); + doc.doc = Integer.parseInt(storedFields.document(doc.doc).get("id")); } else { System.out.println("NO_MORE_DOCS!"); } diff --git a/lucene/core/src/test/org/apache/lucene/util/hnsw/TestHnswGraph.java b/lucene/core/src/test/org/apache/lucene/util/hnsw/TestHnswGraph.java index 3e8f019b7f4a..9b3f0bb5bb13 100644 --- a/lucene/core/src/test/org/apache/lucene/util/hnsw/TestHnswGraph.java +++ b/lucene/core/src/test/org/apache/lucene/util/hnsw/TestHnswGraph.java @@ -45,6 +45,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.apache.lucene.index.VectorValues; @@ -213,6 +214,7 @@ public KnnVectorsFormat getKnnVectorsFormatForField(String field) { TopDocs topDocs = searcher.search(query, 5); float lastScore = -1; + StoredFields storedFields = reader.storedFields(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { if (scoreDoc.score == lastScore) { // if we have repeated score this test is invalid @@ -220,13 +222,14 @@ public KnnVectorsFormat getKnnVectorsFormatForField(String field) { } else { lastScore = scoreDoc.score; } - Document doc = reader.document(scoreDoc.doc, Set.of("id")); + Document doc = storedFields.document(scoreDoc.doc, Set.of("id")); ids1.add(doc.get("id")); docs1.add(scoreDoc.doc); } TopDocs topDocs2 = searcher2.search(query, 5); + StoredFields storedFields2 = reader2.storedFields(); for (ScoreDoc scoreDoc : topDocs2.scoreDocs) { - Document doc = reader2.document(scoreDoc.doc, Set.of("id")); + Document doc = storedFields2.document(scoreDoc.doc, Set.of("id")); ids2.add(doc.get("id")); docs2.add(scoreDoc.doc); } diff --git a/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java b/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java index e6195c9a8010..73e1fbe3af1a 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java @@ -31,6 +31,7 @@ import org.apache.lucene.demo.knn.KnnVectorDict; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.BooleanClause; @@ -205,13 +206,14 @@ public static void doPagingSearch( end = Math.min(hits.length, start + hitsPerPage); + StoredFields storedFields = searcher.storedFields(); for (int i = start; i < end; i++) { if (raw) { // output raw format System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score); continue; } - Document doc = searcher.doc(hits[i].doc); + Document doc = storedFields.document(hits[i].doc); String path = doc.get("path"); if (path != null) { System.out.println((i + 1) + ". " + path); diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java index 57a7b6054dd3..9997170e04ea 100644 --- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java +++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java @@ -88,9 +88,9 @@ public void testBasic() throws Exception { // Just first pass query TopDocs hits = searcher.search(query, 10); assertEquals(3, hits.totalHits.value); - assertEquals("3", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("2", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); // Now, rescore: @@ -102,9 +102,9 @@ public void testBasic() throws Exception { hits = rescorer.rescore(searcher, hits, 10); assertEquals(3, hits.totalHits.value); - assertEquals("2", r.document(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(hits.scoreDocs[2].doc).get("id")); + assertEquals("2", r.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", r.storedFields().document(hits.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(hits.scoreDocs[2].doc).get("id")); String expl = rescorer diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java index 04668aff4b1c..893bcdb5f54f 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java @@ -1218,7 +1218,8 @@ public Scorer scorer(LeafReaderContext context) throws IOException { @Override public boolean matches() throws IOException { int docID = approximation.docID(); - return (Integer.parseInt(context.reader().document(docID).get("id")) + return (Integer.parseInt( + context.reader().storedFields().document(docID).get("id")) & 1) == 0; } @@ -1323,7 +1324,7 @@ public int hashCode() { TopDocs hits = s.search(baseQuery, numDocs); Map scores = new HashMap<>(); for (ScoreDoc sd : hits.scoreDocs) { - scores.put(s.doc(sd.doc).get("id"), sd.score); + scores.put(s.storedFields().document(sd.doc).get("id"), sd.score); } if (VERBOSE) { System.out.println(" verify all facets"); @@ -1624,7 +1625,7 @@ private TestFacetResult slowDrillSidewaysSearch( Map idToDocID = new HashMap<>(); for (int i = 0; i < s.getIndexReader().maxDoc(); i++) { - idToDocID.put(s.doc(i).get("id"), i); + idToDocID.put(s.storedFields().document(i).get("id"), i); } Collections.sort(hits); @@ -1668,7 +1669,8 @@ void verifyEquals( if (VERBOSE) { System.out.println(" hit " + i + " expected=" + expected.hits.get(i).id); } - assertEquals(expected.hits.get(i).id, s.doc(actual.results.get(i).doc).get("id")); + assertEquals( + expected.hits.get(i).id, s.storedFields().document(actual.results.get(i).doc).get("id")); // Score should be IDENTICAL: assertEquals(scores.get(expected.hits.get(i).id), actual.results.get(i).score, 0.0f); } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java index b60f0a4becbe..0ef446101458 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestBlockGrouping.java @@ -61,7 +61,7 @@ public void testSimple() throws IOException { assertEquals(topDoc.scoreDocs[0].doc, tg.groups[0].scoreDocs[0].doc); for (int i = 0; i < tg.groups.length; i++) { - String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); + String bookName = searcher.storedFields().document(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone Query filtered = @@ -99,7 +99,7 @@ public void testTopLevelSort() throws IOException { assertEquals(((FieldDoc) topDoc.scoreDocs[0]).fields[0], tg.groups[0].groupSortValues[0]); for (int i = 0; i < tg.groups.length; i++) { - String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); + String bookName = searcher.storedFields().document(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone, sorted by score Query filtered = @@ -140,7 +140,7 @@ public void testWithinGroupSort() throws IOException { assertEquals(topDoc.scoreDocs[0].score, (float) tg.groups[0].groupSortValues[0], 0); for (int i = 0; i < tg.groups.length; i++) { - String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book"); + String bookName = searcher.storedFields().document(tg.groups[i].scoreDocs[0].doc).get("book"); // The contents of each group should be equal to the results of a search for // that group alone, sorted by length Query filtered = diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java index 295add673b88..012c09cb0879 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestDistinctValuesCollector.java @@ -462,7 +462,7 @@ private IndexContext createIndexContext() throws Exception { DirectoryReader reader = w.getReader(); if (VERBOSE) { for (int docID = 0; docID < reader.maxDoc(); docID++) { - Document doc = reader.document(docID); + Document doc = reader.storedFields().document(docID); System.out.println( "docID=" + docID diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java index 71cc8830aad8..ca044d913183 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java @@ -33,6 +33,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; @@ -179,6 +181,20 @@ public Fields getTermVectors(int docID) throws IOException { return fields; } + @Override + public TermVectors termVectors() throws IOException { + return new TermVectors() { + @Override + public Fields get(int docID) { + if (docID != 0) { + return null; + } else { + return fields; + } + } + }; + } + @Override public int numDocs() { return 1; @@ -192,6 +208,14 @@ public int maxDoc() { @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException {} + @Override + public StoredFields storedFields() throws IOException { + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException {} + }; + } + @Override public LeafMetaData getMetaData() { return new LeafMetaData(Version.LATEST.major, null, null); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java index a3d8dbb3c35a..7f32666079dc 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; /** @@ -44,9 +45,8 @@ private TokenSources() {} * field values were concatenated. * * @param field The field to either get term vectors from or to analyze the text from. - * @param tvFields from {@link IndexReader#getTermVectors(int)}. Possibly null. For performance, - * this instance should be re-used for the same document (e.g. when highlighting multiple - * fields). + * @param tvFields from {@link TermVectors#get(int)}. Possibly null. For performance, this + * instance should be re-used for the same document (e.g. when highlighting multiple fields). * @param text the text to analyze, failing term vector un-inversion * @param analyzer the analyzer to analyze {@code text} with, failing term vector un-inversion * @param maxStartOffset Terms with a startOffset greater than this aren't returned. Use -1 for no @@ -73,9 +73,8 @@ public static TokenStream getTokenStream( * offsets. Positions are recommended on the term vector but it isn't strictly required. * * @param field The field to get term vectors from. - * @param tvFields from {@link IndexReader#getTermVectors(int)}. Possibly null. For performance, - * this instance should be re-used for the same document (e.g. when highlighting multiple - * fields). + * @param tvFields from {@link TermVectors#get(int)}. Possibly null. For performance, this + * instance should be re-used for the same document (e.g. when highlighting multiple fields). * @param maxStartOffset Terms with a startOffset greater than this aren't returned. Use -1 for no * limit. Suggest using {@link Highlighter#getMaxDocCharsToAnalyze()} - 1 * @return a token stream from term vectors. Null if no term vectors with the right options. @@ -114,7 +113,7 @@ public static TokenStream getAnyTokenStream( throws IOException { TokenStream ts = null; - Fields vectors = reader.getTermVectors(docId); + Fields vectors = reader.termVectors().get(docId); if (vectors != null) { Terms vector = vectors.terms(field); if (vector != null) { @@ -142,7 +141,7 @@ public static TokenStream getAnyTokenStream( IndexReader reader, int docId, String field, Analyzer analyzer) throws IOException { TokenStream ts = null; - Fields vectors = reader.getTermVectors(docId); + Fields vectors = reader.termVectors().get(docId); if (vectors != null) { Terms vector = vectors.terms(field); if (vector != null) { @@ -203,7 +202,7 @@ public static TokenStream getTokenStream(final Terms tpv) throws IOException { public static TokenStream getTokenStreamWithOffsets(IndexReader reader, int docId, String field) throws IOException { - Fields vectors = reader.getTermVectors(docId); + Fields vectors = reader.termVectors().get(docId); if (vectors == null) { return null; } @@ -223,7 +222,7 @@ public static TokenStream getTokenStreamWithOffsets(IndexReader reader, int docI @Deprecated // maintenance reasons LUCENE-6445 public static TokenStream getTokenStream( IndexReader reader, int docId, String field, Analyzer analyzer) throws IOException { - Document doc = reader.document(docId); + Document doc = reader.storedFields().document(docId); return getTokenStream(doc, field, analyzer); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchHighlighter.java index 72887ce0d1e8..156d0e30a116 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchHighlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchHighlighter.java @@ -200,7 +200,7 @@ public Status needsField(FieldInfo fieldInfo) { } }; - leafReader.document(leafDocId, visitor); + leafReader.storedFields().document(leafDocId, visitor); return visitor.getDocument(); } } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchRegionRetriever.java b/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchRegionRetriever.java index aa4068177812..8b228eebee7f 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchRegionRetriever.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/matchhighlight/MatchRegionRetriever.java @@ -293,7 +293,7 @@ private static final class DocumentFieldValueProvider implements FieldValueProvi public DocumentFieldValueProvider( LeafReaderContext currentContext, int docId, Set preloadFields) { - docSupplier = () -> currentContext.reader().document(docId, preloadFields); + docSupplier = () -> currentContext.reader().storedFields().document(docId, preloadFields); } @Override diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java index ae9405b5ff3f..8aee67d6ac12 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java @@ -35,7 +35,7 @@ public PostingsWithTermVectorsOffsetStrategy(UHComponents components) { @Override public OffsetsEnum getOffsetsEnum(LeafReader leafReader, int docId, String content) throws IOException { - Terms docTerms = leafReader.getTermVector(docId, getField()); + Terms docTerms = leafReader.termVectors().get(docId, getField()); if (docTerms == null) { return OffsetsEnum.EMPTY; } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java index 84c0120182d3..b6c8fb57d7d3 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java @@ -40,7 +40,7 @@ public UnifiedHighlighter.OffsetSource getOffsetSource() { @Override public OffsetsEnum getOffsetsEnum(LeafReader reader, int docId, String content) throws IOException { - Terms tvTerms = reader.getTermVector(docId, getField()); + Terms tvTerms = reader.termVectors().get(docId, getField()); if (tvTerms == null) { return OffsetsEnum.EMPTY; } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java index 44385b9085ad..615db5ecd5ff 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java @@ -46,7 +46,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; @@ -1320,6 +1322,7 @@ protected List loadFieldValues( new ArrayList<>(cacheCharsThreshold == 0 ? 1 : (int) Math.min(64, docIter.cost())); LimitedStoredFieldVisitor visitor = newLimitedStoredFieldsVisitor(fields); + StoredFields storedFields = searcher.storedFields(); int sumChars = 0; do { int docId = docIter.nextDoc(); @@ -1327,7 +1330,7 @@ protected List loadFieldValues( break; } visitor.init(); - searcher.doc(docId, visitor); + storedFields.document(docId, visitor); CharSequence[] valuesByField = visitor.getValuesByField(); docListOfFields.add(valuesByField); for (CharSequence val : valuesByField) { @@ -1418,9 +1421,9 @@ CharSequence[] getValuesByField() { } /** - * Wraps an IndexReader that remembers/caches the last call to {@link - * LeafReader#getTermVectors(int)} so that if the next call has the same ID, then it is reused. If - * TV's were column-stride (like doc-values), there would be no need for this. + * Wraps an IndexReader that remembers/caches the last call to {@link TermVectors#get(int)} so + * that if the next call has the same ID, then it is reused. If TV's were column-stride (like + * doc-values), there would be no need for this. */ private static class TermVectorReusingLeafReader extends FilterLeafReader { @@ -1458,6 +1461,21 @@ public Fields getTermVectors(int docID) throws IOException { return tvFields; } + @Override + public TermVectors termVectors() throws IOException { + TermVectors orig = in.termVectors(); + return new TermVectors() { + @Override + public Fields get(int docID) throws IOException { + if (docID != lastDocId) { + lastDocId = docID; + tvFields = orig.get(docID); + } + return tvFields; + } + }; + } + @Override public CacheHelper getCoreCacheHelper() { return null; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java index 4c6ddf3b34dd..e46ecbfb466b 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java @@ -177,23 +177,25 @@ protected Field[] getFields(IndexReader reader, int docId, final String fieldNam throws IOException { // according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field??? final List fields = new ArrayList<>(); - reader.document( - docId, - new StoredFieldVisitor() { - - @Override - public void stringField(FieldInfo fieldInfo, String value) { - Objects.requireNonNull(value, "String value should not be null"); - FieldType ft = new FieldType(TextField.TYPE_STORED); - ft.setStoreTermVectors(fieldInfo.hasVectors()); - fields.add(new Field(fieldInfo.name, value, ft)); - } - - @Override - public Status needsField(FieldInfo fieldInfo) { - return fieldInfo.name.equals(fieldName) ? Status.YES : Status.NO; - } - }); + reader + .storedFields() + .document( + docId, + new StoredFieldVisitor() { + + @Override + public void stringField(FieldInfo fieldInfo, String value) { + Objects.requireNonNull(value, "String value should not be null"); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(fieldInfo.hasVectors()); + fields.add(new Field(fieldInfo.name, value, ft)); + } + + @Override + public Status needsField(FieldInfo fieldInfo) { + return fieldInfo.name.equals(fieldName) ? Status.YES : Status.NO; + } + }); return fields.toArray(new Field[fields.size()]); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java index 3508710aed3d..93d81f234dd6 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java @@ -57,7 +57,7 @@ public FieldTermStack( // just return to make null snippet if un-matched fieldName specified when fieldMatch == true if (termSet == null) return; - final Fields vectors = reader.getTermVectors(docId); + final Fields vectors = reader.termVectors().get(docId); if (vectors == null) { // null snippet return; diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java index 4779562ced2d..b8bd645935e7 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighter.java @@ -49,6 +49,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.queries.payloads.SpanPayloadCheckQuery; @@ -139,8 +140,8 @@ public class TestHighlighter extends BaseTokenStreamTestCase implements Formatte private TokenStream getAnyTokenStream(String fieldName, int docId) throws IOException { return TokenSources.getTokenStream( fieldName, - searcher.getIndexReader().getTermVectors(docId), - searcher.doc(docId).get(fieldName), + searcher.getIndexReader().termVectors().get(docId), + searcher.storedFields().document(docId).get(fieldName), analyzer, -1); } @@ -156,7 +157,7 @@ public void testFunctionScoreQuery() throws Exception { Highlighter highlighter = new Highlighter(scorer); final int docId0 = hits.scoreDocs[0].doc; - Document doc = searcher.doc(docId0); + Document doc = searcher.storedFields().document(docId0); String storedField = doc.get(FIELD_NAME); TokenStream stream = getAnyTokenStream(FIELD_NAME, docId0); @@ -180,7 +181,7 @@ public void testQueryScorerHits() throws Exception { for (int i = 0; i < hits.scoreDocs.length; i++) { final int docId = hits.scoreDocs[i].doc; - Document doc = searcher.doc(docId); + Document doc = searcher.storedFields().document(docId); String storedField = doc.get(FIELD_NAME); TokenStream stream = getAnyTokenStream(FIELD_NAME, docId); @@ -208,7 +209,7 @@ public void testHighlightingCommonTermsQuery() throws Exception { Highlighter highlighter = new Highlighter(scorer); final int docId0 = hits.scoreDocs[0].doc; - Document doc = searcher.doc(docId0); + Document doc = searcher.storedFields().document(docId0); String storedField = doc.get(FIELD_NAME); TokenStream stream = getAnyTokenStream(FIELD_NAME, docId0); @@ -220,7 +221,7 @@ public void testHighlightingCommonTermsQuery() throws Exception { fragment); final int docId1 = hits.scoreDocs[1].doc; - doc = searcher.doc(docId1); + doc = searcher.storedFields().document(docId1); storedField = doc.get(FIELD_NAME); stream = getAnyTokenStream(FIELD_NAME, docId1); @@ -243,12 +244,12 @@ public void testHighlightingSynonymQuery() throws Exception { TokenStream stream = getAnyTokenStream(FIELD_NAME, 2); Fragmenter fragmenter = new SimpleSpanFragmenter(scorer); highlighter.setTextFragmenter(fragmenter); - String storedField = searcher.doc(2).get(FIELD_NAME); + String storedField = searcher.storedFields().document(2).get(FIELD_NAME); String fragment = highlighter.getBestFragment(stream, storedField); assertEquals("JFK has been shot", fragment); stream = getAnyTokenStream(FIELD_NAME, 3); - storedField = searcher.doc(3).get(FIELD_NAME); + storedField = searcher.storedFields().document(3).get(FIELD_NAME); fragment = highlighter.getBestFragment(stream, storedField); assertEquals("John Kennedy has been shot", fragment); } @@ -293,7 +294,7 @@ public boolean equals(Object obj) { Highlighter highlighter = new Highlighter(scorer); final int docId0 = hits.scoreDocs[0].doc; - Document doc = searcher.doc(docId0); + Document doc = searcher.storedFields().document(docId0); String storedField = doc.get(FIELD_NAME); TokenStream stream = getAnyTokenStream(FIELD_NAME, docId0); @@ -305,7 +306,7 @@ public boolean equals(Object obj) { fragment); final int docId1 = hits.scoreDocs[1].doc; - doc = searcher.doc(docId1); + doc = searcher.storedFields().document(docId1); storedField = doc.get(FIELD_NAME); stream = getAnyTokenStream(FIELD_NAME, docId1); @@ -373,7 +374,7 @@ public void testSimpleSpanHighlighter() throws Exception { Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits.value; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.storedFields().document(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -398,7 +399,7 @@ public void testSimpleSpanHighlighterWithStopWordsStraddlingFragmentBoundaries() assertEquals("Must have one hit", 1, hits.totalHits.value); for (int i = 0; i < hits.totalHits.value; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.storedFields().document(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 36)); @@ -465,7 +466,7 @@ public void testSimpleQueryScorerPhraseHighlighting() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -498,7 +499,7 @@ public void testSimpleQueryScorerPhraseHighlighting() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -531,7 +532,7 @@ public void testSimpleQueryScorerPhraseHighlighting() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -560,7 +561,7 @@ public void testSpanRegexQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -587,7 +588,7 @@ public void testRegexQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -614,7 +615,7 @@ public void testExternalReader() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -644,7 +645,8 @@ public void testDimensionalRangeQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { String text = searcher - .doc(hits.scoreDocs[i].doc) + .storedFields() + .document(hits.scoreDocs[i].doc) .getField(NUMERIC_FIELD_NAME) .numericValue() .toString(); @@ -670,7 +672,7 @@ public void testSimpleQueryScorerPhraseHighlighting2() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -692,7 +694,7 @@ public void testSimpleQueryScorerPhraseHighlighting3() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); @@ -726,7 +728,7 @@ public void testSimpleSpanFragmenter() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -747,7 +749,7 @@ public void testSimpleSpanFragmenter() throws Exception { highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits.value; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.storedFields().document(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20)); @@ -775,7 +777,7 @@ public void testPosTermStdTerm() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -856,7 +858,7 @@ public void testSimpleQueryTermScorerHighlighter() throws Exception { int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -1101,7 +1103,7 @@ public void testConstantScoreMultiTermQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); int maxNumFragmentsRequired = 2; @@ -1129,7 +1131,7 @@ public void testConstantScoreMultiTermQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); int maxNumFragmentsRequired = 2; @@ -1157,7 +1159,7 @@ public void testConstantScoreMultiTermQuery() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); int maxNumFragmentsRequired = 2; @@ -1355,7 +1357,7 @@ public void run() throws Exception { numHighlights = 0; for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -1371,7 +1373,7 @@ public void run() throws Exception { numHighlights = 0; for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); Highlighter highlighter = getHighlighter(query, FIELD_NAME, TestHighlighter.this); @@ -1384,7 +1386,7 @@ public void run() throws Exception { numHighlights = 0; for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -1519,7 +1521,7 @@ public void run() throws Exception { // new Highlighter(TestHighlighter.this, new QueryTermScorer(query)); for (int i = 0; i < hits.totalHits.value; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.storedFields().document(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); Highlighter highlighter = getHighlighter(query, FIELD_NAME, TestHighlighter.this); String result = highlighter.getBestFragment(tokenStream, text); @@ -1544,7 +1546,7 @@ public void run() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); @@ -1758,7 +1760,7 @@ public void run() throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(FIELD_NAME); TokenStream tokenStream = getAnyTokenStream(FIELD_NAME, docId); Highlighter highlighter = @@ -2148,7 +2150,7 @@ private void searchIndex() throws IOException, InvalidTokenOffsetsException { TopDocs hits = searcher.search(query, 10); for (int i = 0; i < hits.totalHits.value; i++) { - Document doc = searcher.doc(hits.scoreDocs[i].doc); + Document doc = searcher.storedFields().document(hits.scoreDocs[i].doc); String result = h.getBestFragment(a, "t_text1", doc.get("t_text1")); if (VERBOSE) System.out.println("result:" + result); assertEquals("more random words for second field", result); @@ -2217,7 +2219,7 @@ public void doSearching(Query unReWrittenQuery) throws Exception { public void assertExpectedHighlightCount( final int maxNumFragmentsRequired, final int expectedHighlights) throws Exception { for (int i = 0; i < hits.totalHits.value; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.storedFields().document(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -2482,20 +2484,17 @@ void doStandardHighlights( boolean expandMT) throws Exception { + TermVectors termVectors = searcher.getIndexReader().termVectors(); for (int i = 0; i < hits.totalHits.value; i++) { final int docId = hits.scoreDocs[i].doc; - final Document doc = searcher.doc(docId); + final Document doc = searcher.storedFields().document(docId); String text = doc.get(TestHighlighter.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; TokenStream tokenStream = TokenSources.getTokenStream( - TestHighlighter.FIELD_NAME, - searcher.getIndexReader().getTermVectors(docId), - text, - analyzer, - -1); + TestHighlighter.FIELD_NAME, termVectors.get(docId), text, analyzer, -1); if (mode == QUERY) { scorer = new QueryScorer(query); } else if (mode == QUERY_TERM) { diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighterPhrase.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighterPhrase.java index ebff31cf5bbb..f52f277cb37f 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighterPhrase.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestHighlighterPhrase.java @@ -79,7 +79,7 @@ public void testConcurrentPhrase() throws IOException, InvalidTokenOffsetsExcept new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals( highlighter.getBestFragment(new TokenStreamConcurrent(), TEXT), highlighter.getBestFragment(tokenStream, TEXT)); @@ -134,7 +134,7 @@ public void testConcurrentSpan() throws IOException, InvalidTokenOffsetsExceptio assertEquals(0, position); final TokenStream tokenStream = TokenSources.getTermVectorTokenStreamOrNull( - FIELD, indexReader.getTermVectors(position), -1); + FIELD, indexReader.termVectors().get(position), -1); assertEquals( highlighter.getBestFragment(new TokenStreamConcurrent(), TEXT), highlighter.getBestFragment(tokenStream, TEXT)); @@ -175,7 +175,7 @@ public void testSparsePhrase() throws IOException, InvalidTokenOffsetsException new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals( highlighter.getBestFragment(new TokenStreamSparse(), TEXT), highlighter.getBestFragment(tokenStream, TEXT)); @@ -214,7 +214,7 @@ public void testSparsePhraseWithNoPositions() throws IOException, InvalidTokenOf new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals( "the fox did not jump", highlighter.getBestFragment(tokenStream, TEXT)); } finally { @@ -260,7 +260,7 @@ public void testSparseSpan() throws IOException, InvalidTokenOffsetsException { new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals( highlighter.getBestFragment(new TokenStreamSparse(), TEXT), highlighter.getBestFragment(tokenStream, TEXT)); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestTokenSources.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestTokenSources.java index 8b622831ac14..087b460c1da0 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestTokenSources.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TestTokenSources.java @@ -133,7 +133,7 @@ public void testOverlapWithOffset() throws IOException, InvalidTokenOffsetsExcep new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(query)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals("the fox did not jump", highlighter.getBestFragment(tokenStream, TEXT)); } finally { indexReader.close(); @@ -176,7 +176,7 @@ public void testOverlapWithPositionsAndOffset() throws IOException, InvalidToken new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(query)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals("the fox did not jump", highlighter.getBestFragment(tokenStream, TEXT)); } finally { indexReader.close(); @@ -220,7 +220,7 @@ public void testOverlapWithOffsetExactPhrase() throws IOException, InvalidTokenO new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals("the fox did not jump", highlighter.getBestFragment(tokenStream, TEXT)); } finally { indexReader.close(); @@ -265,7 +265,7 @@ public void testOverlapWithPositionsAndOffsetExactPhrase() new Highlighter( new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery)); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertEquals("the fox did not jump", highlighter.getBestFragment(tokenStream, TEXT)); } finally { indexReader.close(); @@ -292,7 +292,7 @@ public void testTermVectorWithoutOffsetsDoesntWork() try { assertEquals(1, indexReader.numDocs()); final TokenStream tokenStream = - TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.termVectors().get(0), -1); assertNull(tokenStream); } finally { indexReader.close(); @@ -337,7 +337,7 @@ public void testPayloads() throws Exception { assertEquals(1, reader.numDocs()); TokenStream ts = - TokenSources.getTermVectorTokenStreamOrNull("field", reader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull("field", reader.termVectors().get(0), -1); CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class); PositionIncrementAttribute posIncAtt = ts.getAttribute(PositionIncrementAttribute.class); @@ -421,7 +421,7 @@ public void testRandomizedRoundTrip() throws Exception { assertEquals(1, reader.numDocs()); TokenStream vectorTokenStream = - TokenSources.getTermVectorTokenStreamOrNull("field", reader.getTermVectors(0), -1); + TokenSources.getTermVectorTokenStreamOrNull("field", reader.termVectors().get(0), -1); // sometimes check payloads PayloadAttribute payloadAttribute = null; @@ -469,7 +469,7 @@ public void testMaxStartOffsetConsistency() throws IOException { reader = writer.getReader(); } try { - Fields tvFields = reader.getTermVectors(0); + Fields tvFields = reader.termVectors().get(0); for (int maxStartOffset = -1; maxStartOffset <= TEXT.length(); maxStartOffset++) { TokenStream tvStream = TokenSources.getTokenStream("fld_tv", tvFields, TEXT, analyzer, maxStartOffset); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchRegionRetriever.java b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchRegionRetriever.java index 008b83347471..8db2641f0897 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchRegionRetriever.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchRegionRetriever.java @@ -749,7 +749,7 @@ private List highlights( (docId, leafReader, leafDocId, fieldHighlights) -> { StringBuilder sb = new StringBuilder(); - Document document = leafReader.document(leafDocId); + Document document = leafReader.storedFields().document(leafDocId); formatter .apply(document, new TreeMap<>(fieldHighlights)) .forEach( diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java index 95feb4165708..6cc524a9fac4 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighter.java @@ -1032,7 +1032,7 @@ public void testMultipleDocs() throws Exception { String[] snippets = highlighter.highlight("body", query, hits); assertEquals(numDocs, snippets.length); for (int hit = 0; hit < numDocs; hit++) { - Document doc = searcher.doc(hits.scoreDocs[hit].doc); + Document doc = searcher.storedFields().document(hits.scoreDocs[hit].doc); int id = Integer.parseInt(doc.get("id")); String expected = "the answer is " + id; if ((id & 1) == 0) { diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java index b3ff90662c51..d4d8f03f337d 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermIntervals.java @@ -868,7 +868,7 @@ public void testMultipleDocs() throws Exception { String[] snippets = highlighter.highlight("body", query, hits); assertEquals(numDocs, snippets.length); for (int hit = 0; hit < numDocs; hit++) { - Document doc = searcher.doc(hits.scoreDocs[hit].doc); + Document doc = searcher.storedFields().document(hits.scoreDocs[hit].doc); int id = Integer.parseInt(doc.get("id")); String expected = "the answer is " + id; if ((id & 1) == 0) { diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java index 27296a6b0a18..e3fba7bea728 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.ParallelLeafReader; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -123,7 +124,7 @@ public void testFetchTermVecsOncePerDoc() throws IOException { assertArrayEquals(expectedSnippetsByDoc, fieldToSnippets.get(field)); } - ir.document(0); // ensure this works because the ir hasn't been closed + ir.storedFields().document(0); // ensure this works because the ir hasn't been closed ir.close(); } @@ -136,17 +137,24 @@ public LeafReader wrap(LeafReader reader) { BitSet seenDocIDs = new BitSet(); @Override - public Fields getTermVectors(int docID) throws IOException { - // if we're invoked by ParallelLeafReader then we can't do our assertion. TODO see - // LUCENE-6868 - if (callStackContains(ParallelLeafReader.class) == false - && callStackContains(CheckIndex.class) == false) { - assertFalse( - "Should not request TVs for doc more than once.", seenDocIDs.get(docID)); - seenDocIDs.set(docID); - } - - return super.getTermVectors(docID); + public TermVectors termVectors() throws IOException { + TermVectors orig = in.termVectors(); + return new TermVectors() { + @Override + public Fields get(int docID) throws IOException { + // if we're invoked by ParallelLeafReader then we can't do our assertion. TODO + // see + // LUCENE-6868 + if (callStackContains(ParallelLeafReader.class) == false + && callStackContains(CheckIndex.class) == false) { + assertFalse( + "Should not request TVs for doc more than once.", seenDocIDs.get(docID)); + seenDocIDs.set(docID); + } + + return orig.get(docID); + } + }; } @Override diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java index 74b4558b4033..b77e634aac20 100644 --- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java +++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java @@ -136,22 +136,22 @@ public void testEmptyChildFilter() throws Exception { assertEquals( asSet("Lisa", "Frank"), asSet( - s.doc(topDocs.scoreDocs[0].doc).get("name"), - s.doc(topDocs.scoreDocs[1].doc).get("name"))); + s.storedFields().document(topDocs.scoreDocs[0].doc).get("name"), + s.storedFields().document(topDocs.scoreDocs[1].doc).get("name"))); ParentChildrenBlockJoinQuery childrenQuery = new ParentChildrenBlockJoinQuery( parentsFilter, childQuery.build(), topDocs.scoreDocs[0].doc); TopDocs matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); childrenQuery = new ParentChildrenBlockJoinQuery( parentsFilter, childQuery.build(), topDocs.scoreDocs[1].doc); matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); r.close(); dir.close(); @@ -208,22 +208,22 @@ public void testBQShouldJoinedChild() throws Exception { assertEquals( asSet("Lisa", "Frank"), asSet( - s.doc(topDocs.scoreDocs[0].doc).get("name"), - s.doc(topDocs.scoreDocs[1].doc).get("name"))); + s.storedFields().document(topDocs.scoreDocs[0].doc).get("name"), + s.storedFields().document(topDocs.scoreDocs[1].doc).get("name"))); ParentChildrenBlockJoinQuery childrenQuery = new ParentChildrenBlockJoinQuery( parentsFilter, childQuery.build(), topDocs.scoreDocs[0].doc); TopDocs matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); childrenQuery = new ParentChildrenBlockJoinQuery( parentsFilter, childQuery.build(), topDocs.scoreDocs[1].doc); matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); r.close(); dir.close(); @@ -280,7 +280,7 @@ public void testSimple() throws Exception { // assertEquals(1, results.totalHitCount); assertEquals(1, topDocs.totalHits.value); - Document parentDoc = s.doc(topDocs.scoreDocs[0].doc); + Document parentDoc = s.storedFields().document(topDocs.scoreDocs[0].doc); assertEquals("Lisa", parentDoc.get("name")); ParentChildrenBlockJoinQuery childrenQuery = @@ -288,7 +288,7 @@ public void testSimple() throws Exception { parentsFilter, childQuery.build(), topDocs.scoreDocs[0].doc); TopDocs matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); // System.out.println("TEST: now test up"); @@ -301,7 +301,7 @@ public void testSimple() throws Exception { // System.out.println("FULL: " + fullChildQuery); TopDocs hits = s.search(fullChildQuery.build(), 10); assertEquals(1, hits.totalHits.value); - Document childDoc = s.doc(hits.scoreDocs[0].doc); + Document childDoc = s.storedFields().document(hits.scoreDocs[0].doc); // System.out.println("CHILD = " + childDoc + " docID=" + hits.scoreDocs[0].doc); assertEquals("java", childDoc.get("skill")); assertEquals(2007, childDoc.getField("year").numericValue()); @@ -399,7 +399,7 @@ public void testSimpleFilter() throws Exception { .build(); TopDocs ukOnly = s.search(query, 1); assertEquals("has filter - single passed", 1, ukOnly.totalHits.value); - assertEquals("Lisa", r.document(ukOnly.scoreDocs[0].doc).get("name")); + assertEquals("Lisa", r.storedFields().document(ukOnly.scoreDocs[0].doc).get("name")); query = new BooleanQuery.Builder() @@ -409,7 +409,7 @@ public void testSimpleFilter() throws Exception { // looking for US candidates TopDocs usThen = s.search(query, 1); assertEquals("has filter - single passed", 1, usThen.totalHits.value); - assertEquals("Frank", r.document(usThen.scoreDocs[0].doc).get("name")); + assertEquals("Frank", r.storedFields().document(usThen.scoreDocs[0].doc).get("name")); TermQuery us = new TermQuery(new Term("country", "United States")); assertEquals( @@ -449,7 +449,7 @@ private Document getParentDoc(IndexReader reader, BitSetProducer parents, int ch final int subIndex = ReaderUtil.subIndex(childDocID, leaves); final LeafReaderContext leaf = leaves.get(subIndex); final BitSet bits = parents.getBitSet(leaf); - return leaf.reader().document(bits.nextSetBit(childDocID - leaf.docBase)); + return leaf.reader().storedFields().document(bits.nextSetBit(childDocID - leaf.docBase)); } public void testBoostBug() throws Exception { @@ -669,7 +669,7 @@ public void testRandom() throws Exception { " docID=" + docIDX + " doc=" - + joinR.document(docIDX) + + joinR.storedFields().document(docIDX) + " deleted?=" + (liveDocs != null && liveDocs.get(docIDX) == false)); } @@ -817,7 +817,7 @@ public void testRandom() throws Exception { + parentAndChildSort); final ScoreDoc[] hits = results.scoreDocs; for (int hitIDX = 0; hitIDX < hits.length; hitIDX++) { - final Document doc = s.doc(hits[hitIDX].doc); + final Document doc = s.storedFields().document(hits[hitIDX].doc); // System.out.println(" score=" + hits[hitIDX].score + " parentID=" + doc.get("parentID") // + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")"); System.out.println( @@ -849,7 +849,7 @@ public void testRandom() throws Exception { ParentChildrenBlockJoinQuery childrenQuery = new ParentChildrenBlockJoinQuery(parentsFilter, childQuery, parentHit.doc); TopDocs childTopDocs = joinS.search(childrenQuery, maxNumChildrenPerParent, childSort); - final Document parentDoc = joinS.doc(parentHit.doc); + final Document parentDoc = joinS.storedFields().document(parentHit.doc); joinResults.put(Integer.valueOf(parentDoc.get("parentID")), childTopDocs); } @@ -866,7 +866,7 @@ public void testRandom() throws Exception { System.out.println( " group parentID=" + entry.getKey() + " (docID=" + entry.getKey() + ")"); for (ScoreDoc childHit : entry.getValue().scoreDocs) { - final Document doc = joinS.doc(childHit.doc); + final Document doc = joinS.storedFields().document(childHit.doc); // System.out.println(" score=" + childHit.score + " childID=" + // doc.get("childID") + " (docID=" + childHit.doc + ")"); System.out.println( @@ -888,7 +888,7 @@ public void testRandom() throws Exception { TopDocs b = joinS.search(childJoinQuery, 10); for (ScoreDoc hit : b.scoreDocs) { Explanation explanation = joinS.explain(childJoinQuery, hit.doc); - Document document = joinS.doc(hit.doc - 1); + Document document = joinS.storedFields().document(hit.doc - 1); int childId = Integer.parseInt(document.get("childID")); // System.out.println(" hit docID=" + hit.doc + " childId=" + childId + " parentId=" + // document.get("parentID")); @@ -1040,7 +1040,7 @@ public void testRandom() throws Exception { if (VERBOSE) { System.out.println(" " + results2.totalHits.value + " totalHits:"); for (ScoreDoc sd : results2.scoreDocs) { - final Document doc = s.doc(sd.doc); + final Document doc = s.storedFields().document(sd.doc); System.out.println( " childID=" + doc.get("childID") @@ -1060,7 +1060,7 @@ public void testRandom() throws Exception { if (VERBOSE) { System.out.println(" " + joinResults2.totalHits.value + " totalHits:"); for (ScoreDoc sd : joinResults2.scoreDocs) { - final Document doc = joinS.doc(sd.doc); + final Document doc = joinS.storedFields().document(sd.doc); final Document parentDoc = getParentDoc(joinR, parentsFilter, sd.doc); System.out.println( " childID=" @@ -1088,8 +1088,8 @@ private void compareChildHits( for (int hitCount = 0; hitCount < results.scoreDocs.length; hitCount++) { ScoreDoc hit = results.scoreDocs[hitCount]; ScoreDoc joinHit = joinResults.scoreDocs[hitCount]; - Document doc1 = r.document(hit.doc); - Document doc2 = joinR.document(joinHit.doc); + Document doc1 = r.storedFields().document(hit.doc); + Document doc2 = joinR.storedFields().document(joinHit.doc); assertEquals("hit " + hitCount + " differs", doc1.get("childID"), doc2.get("childID")); // don't compare scores -- they are expected to differ @@ -1109,7 +1109,7 @@ private void compareHits( int childHitSlot = 0; TopDocs childHits = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); for (ScoreDoc controlHit : controlHits.scoreDocs) { - Document controlDoc = r.document(controlHit.doc); + Document controlDoc = r.storedFields().document(controlHit.doc); int parentID = Integer.parseInt(controlDoc.get("parentID")); if (parentID != currentParentID) { assertEquals(childHitSlot, childHits.scoreDocs.length); @@ -1119,7 +1119,7 @@ private void compareHits( } String controlChildID = controlDoc.get("childID"); - Document childDoc = joinR.document(childHits.scoreDocs[childHitSlot++].doc); + Document childDoc = joinR.storedFields().document(childHits.scoreDocs[childHitSlot++].doc); String childID = childDoc.get("childID"); assertEquals(controlChildID, childID); } @@ -1176,7 +1176,7 @@ public void testMultiChildTypes() throws Exception { final TopDocs topDocs = s.search(fullQuery.build(), 10); assertEquals(1, topDocs.totalHits.value); - Document parentDoc = s.doc(topDocs.scoreDocs[0].doc); + Document parentDoc = s.storedFields().document(topDocs.scoreDocs[0].doc); assertEquals("Lisa", parentDoc.get("name")); ParentChildrenBlockJoinQuery childrenQuery = @@ -1184,14 +1184,15 @@ public void testMultiChildTypes() throws Exception { parentsFilter, childJobQuery.build(), topDocs.scoreDocs[0].doc); TopDocs matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill")); + assertEquals("java", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("skill")); childrenQuery = new ParentChildrenBlockJoinQuery( parentsFilter, childQualificationQuery.build(), topDocs.scoreDocs[0].doc); matchingChildren = s.search(childrenQuery, 1); assertEquals(1, matchingChildren.totalHits.value); - assertEquals("maths", s.doc(matchingChildren.scoreDocs[0].doc).get("qualification")); + assertEquals( + "maths", s.storedFields().document(matchingChildren.scoreDocs[0].doc).get("qualification")); r.close(); dir.close(); @@ -1593,7 +1594,7 @@ public void testMultiChildQueriesOfDiffParentLevels() throws Exception { for (ScoreDoc sd : hits.scoreDocs) { // since we're looking for children of jobs, all results must be qualifications - String q = r.document(sd.doc).get("qualification"); + String q = r.storedFields().document(sd.doc).get("qualification"); assertNotNull(sd.doc + " has no qualification", q); assertTrue(q + " MUST contain jv" + qjv, q.contains("jv" + qjv)); assertTrue(q + " MUST contain rv" + qrv, q.contains("rv" + qrv)); diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java index 7700d96e3b52..cf90a633df8a 100644 --- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java +++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java @@ -536,7 +536,7 @@ public void testMinMaxScore() throws Exception { assertEquals(numParents, topDocs.totalHits.value); for (int i = 0; i < topDocs.scoreDocs.length; i++) { ScoreDoc scoreDoc = topDocs.scoreDocs[i]; - String id = searcher.doc(scoreDoc.doc).get("id"); + String id = searcher.storedFields().document(scoreDoc.doc).get("id"); assertEquals(lowestScoresPerParent.get(id), scoreDoc.score, 0f); } @@ -547,7 +547,7 @@ public void testMinMaxScore() throws Exception { assertEquals(numParents, topDocs.totalHits.value); for (int i = 0; i < topDocs.scoreDocs.length; i++) { ScoreDoc scoreDoc = topDocs.scoreDocs[i]; - String id = searcher.doc(scoreDoc.doc).get("id"); + String id = searcher.storedFields().document(scoreDoc.doc).get("id"); assertEquals(highestScoresPerParent.get(id), scoreDoc.score, 0f); } @@ -1550,7 +1550,7 @@ private void assertBitSet(BitSet expectedResult, BitSet actualResult, IndexSearc Locale.ROOT, "Expected doc[%d] with id value %s", doc, - indexSearcher.doc(doc).get("id"))); + indexSearcher.storedFields().document(doc).get("id"))); } System.out.println("actual cardinality:" + actualResult.cardinality()); iterator = new BitSetIterator(actualResult, actualResult.cardinality()); @@ -1562,7 +1562,7 @@ private void assertBitSet(BitSet expectedResult, BitSet actualResult, IndexSearc Locale.ROOT, "Actual doc[%d] with id value %s", doc, - indexSearcher.doc(doc).get("id"))); + indexSearcher.storedFields().document(doc).get("id"))); } } assertEquals(expectedResult, actualResult); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java index 9e36d4ffaeb0..021e5179dc31 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/DocumentsImpl.java @@ -87,7 +87,7 @@ public List getDocumentFields(int docid) { List res = new ArrayList<>(); try { - Document doc = reader.document(docid); + Document doc = reader.storedFields().document(docid); for (FieldInfo finfo : IndexUtils.getFieldInfos(reader)) { // iterate all fields for this document diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java index bfdd227d0233..e277559196da 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/documents/TermVectorsAdapter.java @@ -51,7 +51,7 @@ final class TermVectorsAdapter { * @throws IOException - if there is a low level IO error. */ List getTermVector(int docid, String field) throws IOException { - Terms termVector = reader.getTermVector(docid, field); + Terms termVector = reader.termVectors().get(docid, field); if (termVector == null) { // no term vector available log.warning( diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java index 04a9ba42378a..fa552e65d3f0 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java @@ -250,15 +250,15 @@ private Query parseByStandardParser( @Override public Query mltQuery(int docid, MLTConfig mltConfig, Analyzer analyzer) { - MoreLikeThis mlt = new MoreLikeThis(reader); + try { + MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(analyzer); - mlt.setFieldNames(mltConfig.getFieldNames()); - mlt.setMinDocFreq(mltConfig.getMinDocFreq()); - mlt.setMaxDocFreq(mltConfig.getMaxDocFreq()); - mlt.setMinTermFreq(mltConfig.getMinTermFreq()); + mlt.setAnalyzer(analyzer); + mlt.setFieldNames(mltConfig.getFieldNames()); + mlt.setMinDocFreq(mltConfig.getMinDocFreq()); + mlt.setMaxDocFreq(mltConfig.getMaxDocFreq()); + mlt.setMinTermFreq(mltConfig.getMinTermFreq()); - try { return mlt.like(docid); } catch (IOException e) { throw new LukeException("Failed to create MLT query for doc: " + docid, e); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java index a8061c6aafba..1870392e4116 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchResults.java @@ -27,6 +27,7 @@ import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TotalHits; @@ -64,9 +65,12 @@ static SearchResults of( Objects.requireNonNull(docs); Objects.requireNonNull(searcher); + StoredFields storedFields = searcher.storedFields(); for (ScoreDoc sd : docs) { Document luceneDoc = - (fieldsToLoad == null) ? searcher.doc(sd.doc) : searcher.doc(sd.doc, fieldsToLoad); + (fieldsToLoad == null) + ? storedFields.document(sd.doc) + : storedFields.document(sd.doc, fieldsToLoad); res.hits.add(Doc.of(sd.doc, sd.score, luceneDoc)); res.offset = offset; } diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 4925dfb8ac01..f3628282c4a5 100644 --- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -1812,6 +1812,20 @@ public Fields getTermVectors(int docID) { } } + @Override + public TermVectors termVectors() { + return new TermVectors() { + @Override + public Fields get(int docID) { + if (docID == 0) { + return memoryFields; + } else { + return null; + } + } + }; + } + @Override public int numDocs() { if (DEBUG) System.err.println("MemoryIndexReader.numDocs"); @@ -1827,32 +1841,43 @@ public int maxDoc() { @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException { if (DEBUG) System.err.println("MemoryIndexReader.document"); - for (Info info : fields.values()) { - StoredFieldVisitor.Status status = visitor.needsField(info.fieldInfo); - if (status == StoredFieldVisitor.Status.STOP) { - return; - } - if (status == StoredFieldVisitor.Status.NO) { - continue; - } - if (info.storedValues != null) { - for (Object value : info.storedValues) { - if (value instanceof BytesRef bytes) { - visitor.binaryField(info.fieldInfo, BytesRef.deepCopyOf(bytes).bytes); - } else if (value instanceof Double d) { - visitor.doubleField(info.fieldInfo, d); - } else if (value instanceof Float f) { - visitor.floatField(info.fieldInfo, f); - } else if (value instanceof Long l) { - visitor.longField(info.fieldInfo, l); - } else if (value instanceof Integer i) { - visitor.intField(info.fieldInfo, i); - } else if (value instanceof String s) { - visitor.stringField(info.fieldInfo, s); + storedFields().document(docID, visitor); + } + + @Override + public StoredFields storedFields() { + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + if (DEBUG) System.err.println("MemoryIndexReader.document"); + for (Info info : fields.values()) { + StoredFieldVisitor.Status status = visitor.needsField(info.fieldInfo); + if (status == StoredFieldVisitor.Status.STOP) { + return; + } + if (status == StoredFieldVisitor.Status.NO) { + continue; + } + if (info.storedValues != null) { + for (Object value : info.storedValues) { + if (value instanceof BytesRef bytes) { + visitor.binaryField(info.fieldInfo, BytesRef.deepCopyOf(bytes).bytes); + } else if (value instanceof Double d) { + visitor.doubleField(info.fieldInfo, d); + } else if (value instanceof Float f) { + visitor.floatField(info.fieldInfo, f); + } else if (value instanceof Long l) { + visitor.longField(info.fieldInfo, l); + } else if (value instanceof Integer i) { + visitor.intField(info.fieldInfo, i); + } else if (value instanceof String s) { + visitor.stringField(info.fieldInfo, s); + } + } } } } - } + }; } @Override diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java index 4913e3f7cba3..2d719bc763e1 100644 --- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java +++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java @@ -155,7 +155,7 @@ public void testFieldsOnlyReturnsIndexedFields() throws IOException { IndexSearcher searcher = mi.createSearcher(); IndexReader reader = searcher.getIndexReader(); - assertEquals(reader.getTermVectors(0).size(), 1); + assertEquals(reader.termVectors().get(0).size(), 1); } public void testReaderConsistency() throws IOException { @@ -722,7 +722,7 @@ public void testStoredFields() throws IOException { } MemoryIndex mi = MemoryIndex.fromDocument(doc, new StandardAnalyzer()); - Document d = mi.createSearcher().doc(0); + Document d = mi.createSearcher().storedFields().document(0); assertContains(d, "long", 10L, IndexableField::numericValue); assertContains(d, "int", 1.7, IndexableField::numericValue); diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java index c51ad9d27e6c..439b2ff24116 100644 --- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java +++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java @@ -162,7 +162,7 @@ public void assertAgainstDirectory(MemoryIndex memory) throws Exception { } private void duellReaders(CompositeReader other, LeafReader memIndexReader) throws IOException { - Fields memFields = memIndexReader.getTermVectors(0); + Fields memFields = memIndexReader.termVectors().get(0); for (String field : FieldInfos.getIndexedFields(other)) { Terms memTerms = memFields.terms(field); Terms iwTerms = memIndexReader.terms(field); @@ -704,10 +704,10 @@ public void testDuelMemoryIndexCoreDirectoryWithArrayField() throws Exception { memIndex.addField(field_name, "foo bar foo bar foo", mockAnalyzer); // compare term vectors - Terms ramTv = reader.getTermVector(0, field_name); + Terms ramTv = reader.termVectors().get(0, field_name); IndexReader memIndexReader = memIndex.createSearcher().getIndexReader(); TestUtil.checkReader(memIndexReader); - Terms memTv = memIndexReader.getTermVector(0, field_name); + Terms memTv = memIndexReader.termVectors().get(0, field_name); compareTermVectors(ramTv, memTv, field_name); memIndexReader.close(); diff --git a/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java b/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java index ee3c654e1df4..63a1d1019fd9 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/document/LazyDocument.java @@ -94,7 +94,7 @@ public IndexableField getField(FieldInfo fieldInfo) { synchronized Document getDocument() { if (doc == null) { try { - doc = reader.document(docID, fieldNames); + doc = reader.storedFields().document(docID, fieldNames); } catch (IOException ioe) { throw new IllegalStateException("unable to load document", ioe); } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java b/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java index 9159ad17e09d..ceeea076a3ad 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/document/TestLazyDocument.java @@ -93,7 +93,7 @@ public void testLazy() throws Exception { assertEquals("Too many docs", 1, hits.length); LazyTestingStoredFieldVisitor visitor = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), FIELDS); - reader.document(hits[0].doc, visitor); + reader.storedFields().document(hits[0].doc, visitor); Document d = visitor.doc; int numFieldValues = 0; @@ -154,7 +154,7 @@ public void testLazy() throws Exception { // use the same LazyDoc to ask for one more lazy field visitor = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc), "load_later"); - reader.document(hits[0].doc, visitor); + reader.storedFields().document(hits[0].doc, visitor); d = visitor.doc; // ensure we have all the values we expect now, and that diff --git a/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java index 4615ad65a359..f4a1707034e3 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/index/TestMultiPassIndexSplitter.java @@ -73,7 +73,7 @@ public void testSplitRR() throws Exception { IndexReader ir; ir = DirectoryReader.open(dirs[0]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error - Document doc = ir.document(0); + Document doc = ir.storedFields().document(0); assertEquals("0", doc.get("id")); TermsEnum te = MultiTerms.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1"))); @@ -81,7 +81,7 @@ public void testSplitRR() throws Exception { ir.close(); ir = DirectoryReader.open(dirs[1]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.storedFields().document(0); assertEquals("1", doc.get("id")); te = MultiTerms.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("0"))); @@ -90,7 +90,7 @@ public void testSplitRR() throws Exception { ir.close(); ir = DirectoryReader.open(dirs[2]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.storedFields().document(0); assertEquals("2", doc.get("id")); te = MultiTerms.getTerms(ir, "id").iterator(); @@ -111,19 +111,19 @@ public void testSplitSeq() throws Exception { IndexReader ir; ir = DirectoryReader.open(dirs[0]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - Document doc = ir.document(0); + Document doc = ir.storedFields().document(0); assertEquals("0", doc.get("id")); int start = ir.numDocs(); ir.close(); ir = DirectoryReader.open(dirs[1]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.storedFields().document(0); assertEquals(start + "", doc.get("id")); start += ir.numDocs(); ir.close(); ir = DirectoryReader.open(dirs[2]); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.storedFields().document(0); assertEquals(start + "", doc.get("id")); // make sure the deleted doc is not here TermsEnum te = MultiTerms.getTerms(ir, "id").iterator(); diff --git a/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java index 27cf25898af4..1a286590be89 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/index/TestPKIndexSplitter.java @@ -112,7 +112,7 @@ private void checkContents(IndexReader ir, String indexname) throws Exception { final Bits liveDocs = MultiBits.getLiveDocs(ir); for (int i = 0; i < ir.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { - assertEquals(indexname, ir.document(i).get("indexname")); + assertEquals(indexname, ir.storedFields().document(i).get("indexname")); } } } diff --git a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java index f189699c20b3..ca63678fd2b3 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/search/TestDiversifiedTopDocsCollector.java @@ -454,7 +454,7 @@ private int getMaxNumRecordsPerArtist(ScoreDoc[] sd) throws IOException { int result = 0; HashMap artistCounts = new HashMap(); for (int i = 0; i < sd.length; i++) { - Document doc = reader.document(sd[i].doc); + Document doc = reader.storedFields().document(sd[i].doc); Record record = parsedRecords.get(doc.get("id")); Integer count = artistCounts.get(record.artist); int newCount = 1; diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java index b94756f8feff..9314d5c344a8 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; @@ -291,11 +292,11 @@ public void setBoostFactor(float boostFactor) { } /** Constructor requiring an IndexReader. */ - public MoreLikeThis(IndexReader ir) { + public MoreLikeThis(IndexReader ir) throws IOException { this(ir, new ClassicSimilarity()); } - public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim) { + public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim) throws IOException { this.ir = ir; this.similarity = sim; } @@ -710,8 +711,9 @@ public String describeParams() { */ private PriorityQueue retrieveTerms(int docNum) throws IOException { Map> field2termFreqMap = new HashMap<>(); + TermVectors termVectors = ir.termVectors(); for (String fieldName : fieldNames) { - final Fields vectors = ir.getTermVectors(docNum); + final Fields vectors = termVectors.get(docNum); final Terms vector; if (vectors != null) { vector = vectors.terms(fieldName); @@ -721,7 +723,7 @@ private PriorityQueue retrieveTerms(int docNum) throws IOException { // field does not store term vector info if (vector == null) { - Document d = ir.document(docNum); + Document d = ir.storedFields().document(docNum); IndexableField[] fields = d.getFields(fieldName); for (IndexableField field : fields) { final String stringValue = field.stringValue(); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCommonTermsQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCommonTermsQuery.java index 56c498e70ff9..cca0c162113d 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/TestCommonTermsQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCommonTermsQuery.java @@ -88,9 +88,9 @@ public void testBasics() throws IOException { query.add(new Term("field", "right")); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("2", r.document(search.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(search.scoreDocs[2].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[2].doc).get("id")); } { // only high freq @@ -101,8 +101,8 @@ public void testBasics() throws IOException { query.add(new Term("field", "end")); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 2); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("2", r.document(search.scoreDocs[1].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[1].doc).get("id")); } { // low freq is mandatory @@ -115,7 +115,7 @@ public void testBasics() throws IOException { TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 1); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); } { // low freq is mandatory @@ -126,7 +126,7 @@ public void testBasics() throws IOException { TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 1); - assertEquals("3", r.document(search.scoreDocs[0].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[0].doc).get("id")); } IOUtils.close(r, w, dir, analyzer); } @@ -223,7 +223,7 @@ public void testMinShouldMatch() throws IOException { query.setLowFreqMinimumNumberShouldMatch(0.5f); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 1); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); } { CommonTermsQuery query = @@ -237,7 +237,7 @@ public void testMinShouldMatch() throws IOException { query.setLowFreqMinimumNumberShouldMatch(2.0f); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 1); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); } { @@ -252,9 +252,9 @@ public void testMinShouldMatch() throws IOException { query.setLowFreqMinimumNumberShouldMatch(0.49f); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("2", r.document(search.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(search.scoreDocs[2].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[2].doc).get("id")); } { @@ -269,9 +269,9 @@ public void testMinShouldMatch() throws IOException { query.setLowFreqMinimumNumberShouldMatch(1.0f); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("2", r.document(search.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(search.scoreDocs[2].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[2].doc).get("id")); assertTrue(search.scoreDocs[1].score >= search.scoreDocs[2].score); } @@ -289,14 +289,14 @@ public void testMinShouldMatch() throws IOException { TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); assertEquals(search.scoreDocs[1].score, search.scoreDocs[2].score, 0.0f); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); // doc 2 and 3 only get a score from low freq terms assertEquals( new HashSet<>(Arrays.asList("2", "3")), new HashSet<>( Arrays.asList( - r.document(search.scoreDocs[1].doc).get("id"), - r.document(search.scoreDocs[2].doc).get("id")))); + r.storedFields().document(search.scoreDocs[1].doc).get("id"), + r.storedFields().document(search.scoreDocs[2].doc).get("id")))); } { @@ -327,8 +327,8 @@ public void testMinShouldMatch() throws IOException { new HashSet<>(Arrays.asList("0", "2")), new HashSet<>( Arrays.asList( - r.document(search.scoreDocs[0].doc).get("id"), - r.document(search.scoreDocs[1].doc).get("id")))); + r.storedFields().document(search.scoreDocs[0].doc).get("id"), + r.storedFields().document(search.scoreDocs[1].doc).get("id")))); } IOUtils.close(r, w, dir, analyzer); } @@ -385,9 +385,9 @@ public void testExtend() throws IOException { query.add(new Term("field", "right")); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); - assertEquals("0", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("2", r.document(search.scoreDocs[1].doc).get("id")); - assertEquals("3", r.document(search.scoreDocs[2].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[1].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[2].doc).get("id")); } { @@ -403,9 +403,9 @@ public void testExtend() throws IOException { query.add(new Term("field", "right")); TopDocs search = s.search(query, 10); assertEquals(search.totalHits.value, 3); - assertEquals("2", r.document(search.scoreDocs[0].doc).get("id")); - assertEquals("3", r.document(search.scoreDocs[1].doc).get("id")); - assertEquals("0", r.document(search.scoreDocs[2].doc).get("id")); + assertEquals("2", r.storedFields().document(search.scoreDocs[0].doc).get("id")); + assertEquals("3", r.storedFields().document(search.scoreDocs[1].doc).get("id")); + assertEquals("0", r.storedFields().document(search.scoreDocs[2].doc).get("id")); } IOUtils.close(r, w, dir, analyzer); } diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java index e935be87a436..664e759a9eeb 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java @@ -80,7 +80,7 @@ private void doTestRank(ValueSource valueSource) throws Exception { assertEquals("All docs should be matched!", N_DOCS, h.length); String prevID = "ID" + (N_DOCS + 1); // greater than all ids of docs in this test for (int i = 0; i < h.length; i++) { - String resID = s.doc(h[i].doc).get(ID_FIELD); + String resID = s.storedFields().document(h[i].doc).get(ID_FIELD); log(i + ". score=" + h[i].score + " - " + resID); log(s.explain(functionQuery, h[i].doc)); assertTrue( @@ -127,7 +127,7 @@ private void doTestExactScore(ValueSource valueSource) throws Exception { for (ScoreDoc aSd : sd) { float score = aSd.score; log(s.explain(functionQuery, aSd.doc)); - String id = s.getIndexReader().document(aSd.doc).get(ID_FIELD); + String id = s.getIndexReader().storedFields().document(aSd.doc).get(ID_FIELD); float expectedScore = expectedFieldScore(id); // "ID7" --> 7.0 assertEquals( "score of " + id + " shuould be " + expectedScore + " != " + score, diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java index 499f6926b3e3..a8d30bb5e48c 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java @@ -134,7 +134,7 @@ public void testSearchAfterWhenSortingByFunctionValues() throws IOException { // Verify that sorting works in general int i = 0; for (ScoreDoc hit : hits.scoreDocs) { - int valueFromDoc = Integer.parseInt(reader.document(hit.doc).get("value")); + int valueFromDoc = Integer.parseInt(reader.storedFields().document(hit.doc).get("value")); assertEquals(++i, valueFromDoc); } @@ -149,7 +149,7 @@ public void testSearchAfterWhenSortingByFunctionValues() throws IOException { // Verify that hits are actually "after" int afterValue = ((Double) afterHit.fields[0]).intValue(); for (ScoreDoc hit : hits.scoreDocs) { - int val = Integer.parseInt(reader.document(hit.doc).get("value")); + int val = Integer.parseInt(reader.storedFields().document(hit.doc).get("value")); assertTrue(afterValue <= val); assertFalse(hit.doc == afterHit.doc); } diff --git a/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java b/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java index a6e56f9188b6..e45c764eaeea 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java @@ -97,7 +97,7 @@ public void setUp() throws Exception { mlt = this.getDefaultMoreLikeThis(reader); } - private MoreLikeThis getDefaultMoreLikeThis(IndexReader reader) { + private MoreLikeThis getDefaultMoreLikeThis(IndexReader reader) throws IOException { MoreLikeThis mlt = new MoreLikeThis(reader); Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); mlt.setAnalyzer(analyzer); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpanPositions.java b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpanPositions.java index ed9fec72637c..4e6c2f5b9fa3 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpanPositions.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpanPositions.java @@ -134,7 +134,7 @@ public void testPayloadsPos0() throws Exception { while (spans.nextStartPosition() != Spans.NO_MORE_POSITIONS) { count++; sawZero |= spans.startPosition() == 0; - // System.out.println(spans.doc() + " - " + spans.start() + " - " + + // System.out.println(spans.storedFields().document() + " - " + spans.start() + " - " + // spans.end()); } } diff --git a/lucene/queries/src/test/org/apache/lucene/queries/spans/TestQueryRescorerWithSpans.java b/lucene/queries/src/test/org/apache/lucene/queries/spans/TestQueryRescorerWithSpans.java index 29d337484258..f01732458b2e 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/spans/TestQueryRescorerWithSpans.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/spans/TestQueryRescorerWithSpans.java @@ -67,8 +67,8 @@ public void testBasic() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Resort using SpanNearQuery: SpanTermQuery t1 = new SpanTermQuery(new Term("field", "wizard")); @@ -79,8 +79,8 @@ public void testBasic() throws Exception { // Resorting changed the order: assertEquals(2, hits3.totalHits.value); - assertEquals("1", searcher.doc(hits3.scoreDocs[0].doc).get("id")); - assertEquals("0", searcher.doc(hits3.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits3.scoreDocs[0].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits3.scoreDocs[1].doc).get("id")); r.close(); dir.close(); @@ -110,8 +110,8 @@ public void testMissingSecondPassScore() throws Exception { TopDocs hits = searcher.search(bq.build(), 10); assertEquals(2, hits.totalHits.value); - assertEquals("0", searcher.doc(hits.scoreDocs[0].doc).get("id")); - assertEquals("1", searcher.doc(hits.scoreDocs[1].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits.scoreDocs[0].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits.scoreDocs[1].doc).get("id")); // Resort using SpanNearQuery: SpanTermQuery t1 = new SpanTermQuery(new Term("field", "wizard")); @@ -122,8 +122,8 @@ public void testMissingSecondPassScore() throws Exception { // Resorting changed the order: assertEquals(2, hits3.totalHits.value); - assertEquals("1", searcher.doc(hits3.scoreDocs[0].doc).get("id")); - assertEquals("0", searcher.doc(hits3.scoreDocs[1].doc).get("id")); + assertEquals("1", searcher.storedFields().document(hits3.scoreDocs[0].doc).get("id")); + assertEquals("0", searcher.storedFields().document(hits3.scoreDocs[1].doc).get("id")); r.close(); dir.close(); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 5a670b27eb4f..3e6c12d18978 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; @@ -147,8 +148,9 @@ private void checkMatches(String qString, String expectedVals, Analyzer anAnalyz TopDocs td = searcher.search(q, 10); ScoreDoc[] sd = td.scoreDocs; + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < sd.length; i++) { - Document doc = searcher.doc(sd[i].doc); + Document doc = storedFields.document(sd[i].doc); String id = doc.get("id"); assertTrue(qString + "matched doc#" + id + " not expected", expecteds.contains(id)); expecteds.remove(id); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java index 95ee8cdfd609..102360fae87a 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestCoreParser.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; @@ -346,8 +347,9 @@ protected void dumpResults(String qType, Query q, int numDocs) throws IOExceptio } if (VERBOSE) { ScoreDoc[] scoreDocs = hits.scoreDocs; + StoredFields storedFields = searcher.storedFields(); for (int i = 0; i < Math.min(numDocs, hits.totalHits.value); i++) { - Document ldoc = searcher.doc(scoreDocs[i].doc); + Document ldoc = storedFields.document(scoreDocs[i].doc); System.out.println("[" + ldoc.get("date") + "]" + ldoc.get("contents")); } System.out.println(); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java index a5349abf0d33..66a011dacafd 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java @@ -48,6 +48,7 @@ import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; @@ -858,9 +859,10 @@ private void verifyAtLeastMarkerCount(int expectedAtLeastCount, DataOutput out) + hitCount); TopDocs hits = searcher.search(new TermQuery(new Term("marker", "marker")), expectedAtLeastCount); + StoredFields storedFields = searcher.storedFields(); List seen = new ArrayList<>(); for (ScoreDoc hit : hits.scoreDocs) { - Document doc = searcher.doc(hit.doc); + Document doc = storedFields.document(hit.doc); seen.add(Integer.parseInt(doc.get("docid").substring(1))); } Collections.sort(seen); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java index 121cab098802..84dca332d42b 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -293,9 +294,10 @@ void handleOneConnection( TopDocs hits = searcher.search( new TermQuery(new Term("marker", "marker")), expectedAtLeastCount); + StoredFields storedFields = searcher.storedFields(); List seen = new ArrayList<>(); for (ScoreDoc hit : hits.scoreDocs) { - Document doc = searcher.doc(hit.doc); + Document doc = storedFields.document(hit.doc); seen.add(Integer.parseInt(doc.get("docid").substring(1))); } Collections.sort(seen); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java index 7df51197cc02..368bef88465d 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/document/TestFloatPointNearestNeighbor.java @@ -58,7 +58,7 @@ public void testNearestNeighborWithDeletedDocs() throws Exception { IndexSearcher s = newSearcher(r, false); FieldDoc hit = (FieldDoc) FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; - assertEquals("0", r.document(hit.doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); @@ -67,7 +67,7 @@ public void testNearestNeighborWithDeletedDocs() throws Exception { // with its own points impl: s = newSearcher(r, false); hit = (FieldDoc) LatLonPoint.nearest(s, "point", 40.0, 50.0, 1).scoreDocs[0]; - assertEquals("1", r.document(hit.doc).getField("id").stringValue()); + assertEquals("1", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.close(); dir.close(); @@ -91,7 +91,7 @@ public void testNearestNeighborWithAllDeletedDocs() throws Exception { IndexSearcher s = newSearcher(r, false); FieldDoc hit = (FieldDoc) FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0]; - assertEquals("0", r.document(hit.doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hit.doc).getField("id").stringValue()); r.close(); w.deleteDocuments(new Term("id", "0")); @@ -125,8 +125,8 @@ public void testTieBreakByDocID() throws Exception { ScoreDoc[] hits = FloatPointNearestNeighbor.nearest(newSearcher(r, false), "point", 2, 45.0f, 50.0f) .scoreDocs; - assertEquals("0", r.document(hits[0].doc).getField("id").stringValue()); - assertEquals("1", r.document(hits[1].doc).getField("id").stringValue()); + assertEquals("0", r.storedFields().document(hits[0].doc).getField("id").stringValue()); + assertEquals("1", r.storedFields().document(hits[1].doc).getField("id").stringValue()); r.close(); w.close(); @@ -233,7 +233,7 @@ public void testNearestNeighborRandom() throws Exception { for (int i = 0; i < topK; ++i) { FloatPointNearestNeighbor.NearestHit expected = expectedHits[i]; FieldDoc actual = (FieldDoc) hits[i]; - Document actualDoc = r.document(actual.doc); + Document actualDoc = r.storedFields().document(actual.doc); System.out.println("hit " + i); System.out.println( " expected id=" diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java index 42b4a7ff4f4b..2b8fca789cb9 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestFuzzyLikeThisQuery.java @@ -90,7 +90,7 @@ public void testClosestEditDistanceMatchComesFirst() throws Throwable { TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0)); - Document doc = searcher.doc(sd[0].doc); + Document doc = searcher.storedFields().document(sd[0].doc); assertEquals("Should match most similar not most rare variant", "2", doc.get("id")); } @@ -106,7 +106,7 @@ public void testMultiWord() throws Throwable { TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0)); - Document doc = searcher.doc(sd[0].doc); + Document doc = searcher.storedFields().document(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2", doc.get("id")); } @@ -124,7 +124,7 @@ public void testNonExistingField() throws Throwable { TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0)); - Document doc = searcher.doc(sd[0].doc); + Document doc = searcher.storedFields().document(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2", doc.get("id")); } @@ -139,7 +139,7 @@ public void testNoMatchFirstWordBug() throws Throwable { TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0)); - Document doc = searcher.doc(sd[0].doc); + Document doc = searcher.storedFields().document(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2", doc.get("id")); } diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java index d3d02b70894b..3c3c3aac1598 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java @@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; @@ -587,8 +588,9 @@ public TokenStreamComponents createComponents(String fieldName) { private Set toDocIDs(IndexSearcher s, TopDocs hits) throws IOException { Set result = new HashSet<>(); + StoredFields storedFields = s.storedFields(); for (ScoreDoc hit : hits.scoreDocs) { - result.add(s.doc(hit.doc).get("id")); + result.add(storedFields.document(hit.doc).get("id")); } return result; } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java index ef48a1feee6f..cf8fc8b57d76 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java @@ -27,6 +27,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -106,10 +107,11 @@ protected void verifyDocumentsIndexed(int numDocs) { protected SearchResults executeQuery(Query query, int numDocs) { try { TopDocs topDocs = indexSearcher.search(query, numDocs); + StoredFields storedFields = indexSearcher.storedFields(); List results = new ArrayList<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - results.add(new SearchResult(scoreDoc.score, indexSearcher.doc(scoreDoc.doc))); + results.add(new SearchResult(scoreDoc.score, storedFields.document(scoreDoc.doc))); } return new SearchResults(topDocs.totalHits.value, results); } catch (IOException ioe) { diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialExample.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialExample.java index 32cfb80a840d..fd0a81105c5a 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialExample.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/TestSpatialExample.java @@ -155,7 +155,7 @@ private void search() throws Exception { assertDocMatchedIds(indexSearcher, docs, 2); // Now, lets get the distance for the 1st doc via computing from stored point value: // (this computation is usually not redundant) - Document doc1 = indexSearcher.doc(docs.scoreDocs[0].doc); + Document doc1 = indexSearcher.storedFields().document(docs.scoreDocs[0].doc); String doc1Str = doc1.getField(strategy.getFieldName()).stringValue(); // assume doc1Str is "x y" as written in newSampleDocument() int spaceIdx = doc1Str.indexOf(' '); @@ -200,7 +200,13 @@ private void assertDocMatchedIds(IndexSearcher indexSearcher, TopDocs docs, int. assert docs.totalHits.relation == Relation.EQUAL_TO; int[] gotIds = new int[Math.toIntExact(docs.totalHits.value)]; for (int i = 0; i < gotIds.length; i++) { - gotIds[i] = indexSearcher.doc(docs.scoreDocs[i].doc).getField("id").numericValue().intValue(); + gotIds[i] = + indexSearcher + .storedFields() + .document(docs.scoreDocs[i].doc) + .getField("id") + .numericValue() + .intValue(); } assertArrayEquals(ids, gotIds); } diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java index 6cdb3935895d..ea0bfa2fb904 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestJtsPolygon.java @@ -116,7 +116,7 @@ public void testBadPrefixTreePrune() throws Exception { TopDocs search = indexSearcher.search(query, 10); ScoreDoc[] scoreDocs = search.scoreDocs; for (ScoreDoc scoreDoc : scoreDocs) { - System.out.println(indexSearcher.doc(scoreDoc.doc)); + System.out.println(indexSearcher.storedFields().document(scoreDoc.doc)); } assertEquals(1, search.totalHits.value); diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java index ad83d5c253b4..84ce4bc136fe 100644 --- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java +++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestSpatialPrefixTree.java @@ -106,7 +106,7 @@ public void testBadPrefixTreePrune() throws Exception { TopDocs search = indexSearcher.search(query, 10); ScoreDoc[] scoreDocs = search.scoreDocs; for (ScoreDoc scoreDoc : scoreDocs) { - System.out.println(indexSearcher.doc(scoreDoc.doc)); + System.out.println(indexSearcher.storedFields().document(scoreDoc.doc)); } assertEquals(1, search.totalHits.value); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index be0ad317f327..aa91c2af9049 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -364,9 +365,10 @@ public String[] suggestSimilar( // go thru more than 'maxr' matches in case the distance filter triggers int stop = Math.min(hits.length, maxHits); SuggestWord sugWord = new SuggestWord(); + StoredFields storedFields = indexSearcher.storedFields(); for (int i = 0; i < stop; i++) { - sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word + sugWord.string = storedFields.document(hits[i].doc).get(F_WORD); // get orig word // don't suggest a word for itself, that would be silly if (sugWord.string.equals(word)) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java index a85d3427015f..0c34d96485ef 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.MultiBits; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.search.spell.Dictionary; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -46,6 +47,9 @@ public class DocumentDictionary implements Dictionary { /** {@link IndexReader} to load documents from */ protected final IndexReader reader; + /** {@link StoredFields} for this reader */ + protected final StoredFields storedFields; + /** Field to read payload from */ protected final String payloadField; /** Field to read contexts from */ @@ -58,7 +62,8 @@ public class DocumentDictionary implements Dictionary { * Creates a new dictionary with the contents of the fields named field for the terms * and weightField for the weights that will be used for the corresponding terms. */ - public DocumentDictionary(IndexReader reader, String field, String weightField) { + public DocumentDictionary(IndexReader reader, String field, String weightField) + throws IOException { this(reader, field, weightField, null); } @@ -68,7 +73,8 @@ public DocumentDictionary(IndexReader reader, String field, String weightField) * and payloadField for the corresponding payloads for the entry. */ public DocumentDictionary( - IndexReader reader, String field, String weightField, String payloadField) { + IndexReader reader, String field, String weightField, String payloadField) + throws IOException { this(reader, field, weightField, payloadField, null); } @@ -83,8 +89,10 @@ public DocumentDictionary( String field, String weightField, String payloadField, - String contextsField) { + String contextsField) + throws IOException { this.reader = reader; + this.storedFields = reader.storedFields(); this.field = field; this.weightField = weightField; this.payloadField = payloadField; @@ -157,7 +165,7 @@ public BytesRef next() throws IOException { continue; } - Document doc = reader.document(currentDocId, relevantFields); + Document doc = storedFields.document(currentDocId, relevantFields); BytesRef tempPayload = null; if (hasPayloads) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java index adfd202aff11..f912c19c24b4 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java @@ -60,7 +60,8 @@ public DocumentValueSourceDictionary( String field, LongValuesSource weightsValueSource, String payload, - String contexts) { + String contexts) + throws IOException { super(reader, field, null, payload, contexts); this.weightsValueSource = weightsValueSource; } @@ -71,7 +72,8 @@ public DocumentValueSourceDictionary( * weightsValueSource supplied to determine the score. */ public DocumentValueSourceDictionary( - IndexReader reader, String field, LongValuesSource weightsValueSource, String payload) { + IndexReader reader, String field, LongValuesSource weightsValueSource, String payload) + throws IOException { super(reader, field, null, payload); this.weightsValueSource = weightsValueSource; } @@ -81,7 +83,7 @@ public DocumentValueSourceDictionary( * and uses the weightsValueSource supplied to determine the score. */ public DocumentValueSourceDictionary( - IndexReader reader, String field, LongValuesSource weightsValueSource) { + IndexReader reader, String field, LongValuesSource weightsValueSource) throws IOException { super(reader, field, null, null); this.weightsValueSource = weightsValueSource; } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java index 7fa5c1be2595..463780323179 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java @@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; @@ -225,6 +226,7 @@ protected List createResults( // we reduce the num to the one initially requested int actualNum = num / numFactor; + TermVectors termVectors = searcher.getIndexReader().termVectors(); for (int i = 0; i < hits.scoreDocs.length; i++) { FieldDoc fd = (FieldDoc) hits.scoreDocs[i]; @@ -258,7 +260,7 @@ protected List createResults( // if hit starts with the key, we don't change the score coefficient = 1; } else { - coefficient = createCoefficient(searcher, fd.doc, matchedTokens, prefixToken); + coefficient = createCoefficient(termVectors, fd.doc, matchedTokens, prefixToken); } if (weight == 0) { weight = 1; @@ -314,10 +316,10 @@ private static void boundedTreeAdd( * index. */ private double createCoefficient( - IndexSearcher searcher, int doc, Set matchedTokens, String prefixToken) + TermVectors termVectors, int doc, Set matchedTokens, String prefixToken) throws IOException { - Terms tv = searcher.getIndexReader().getTermVector(doc, TEXT_FIELD_NAME); + Terms tv = termVectors.get(doc, TEXT_FIELD_NAME); TermsEnum it = tv.iterator(); Integer position = Integer.MAX_VALUE; diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java index c8d959954011..9cf270b72e8a 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java @@ -49,6 +49,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; @@ -714,12 +715,13 @@ public void testReturnedDocID() throws Exception { new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_")); TopSuggestDocs suggest = indexSearcher.suggest(query, num, false); assertEquals(num, suggest.totalHits.value); + StoredFields storedFields = reader.storedFields(); for (SuggestScoreDoc suggestScoreDoc : suggest.scoreLookupDocs()) { String key = suggestScoreDoc.key.toString(); assertTrue(key.startsWith("abc_")); String substring = key.substring(4); int fieldValue = Integer.parseInt(substring); - Document doc = reader.document(suggestScoreDoc.doc); + Document doc = storedFields.document(suggestScoreDoc.doc); assertEquals(doc.getField("int_field").numericValue().intValue(), fieldValue); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/codecs/asserting/AssertingStoredFieldsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/tests/codecs/asserting/AssertingStoredFieldsFormat.java index ed750557c71b..56ccc9e28e05 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/codecs/asserting/AssertingStoredFieldsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/codecs/asserting/AssertingStoredFieldsFormat.java @@ -70,10 +70,10 @@ public void close() throws IOException { } @Override - public void visitDocument(int n, StoredFieldVisitor visitor) throws IOException { + public void document(int n, StoredFieldVisitor visitor) throws IOException { AssertingCodec.assertThread("StoredFieldsReader", creationThread); assert n >= 0 && n < maxDoc; - in.visitDocument(n, visitor); + in.document(n, visitor); } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseGeoPointTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseGeoPointTestCase.java index 20705b40ab64..d1112dcf4f44 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseGeoPointTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseGeoPointTestCase.java @@ -53,6 +53,7 @@ import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; @@ -687,6 +688,7 @@ public void testMultiValued() throws Exception { boolean fail = false; + StoredFields storedFields = s.storedFields(); for (int docID = 0; docID < lats.length / 2; docID++) { double latDoc1 = lats[2 * docID]; double lonDoc1 = lons[2 * docID]; @@ -699,7 +701,7 @@ public void testMultiValued() throws Exception { boolean expected = result1 || result2; if (hits.get(docID) != expected) { - String id = s.doc(docID).get("id"); + String id = storedFields.document(docID).get("id"); if (expected) { System.out.println("TEST: id=" + id + " docID=" + docID + " should match but did not"); } else { @@ -1486,6 +1488,7 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numQueries; i++) { double lat = nextLatitude(); double lon = nextLongitude(); @@ -1493,8 +1496,10 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException BitSet expected = new BitSet(); for (int doc = 0; doc < reader.maxDoc(); doc++) { - double docLatitude = reader.document(doc).getField("lat").numericValue().doubleValue(); - double docLongitude = reader.document(doc).getField("lon").numericValue().doubleValue(); + double docLatitude = + storedFields.document(doc).getField("lat").numericValue().doubleValue(); + double docLongitude = + storedFields.document(doc).getField("lon").numericValue().doubleValue(); double distance = SloppyMath.haversinMeters(lat, lon, docLatitude, docLongitude); if (distance <= radius) { expected.set(doc); @@ -1514,8 +1519,10 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException } catch (AssertionError e) { System.out.println("center: (" + lat + "," + lon + "), radius=" + radius); for (int doc = 0; doc < reader.maxDoc(); doc++) { - double docLatitude = reader.document(doc).getField("lat").numericValue().doubleValue(); - double docLongitude = reader.document(doc).getField("lon").numericValue().doubleValue(); + double docLatitude = + storedFields.document(doc).getField("lat").numericValue().doubleValue(); + double docLongitude = + storedFields.document(doc).getField("lon").numericValue().doubleValue(); double distance = SloppyMath.haversinMeters(lat, lon, docLatitude, docLongitude); System.out.println( "" + doc + ": (" + docLatitude + "," + docLongitude + "), distance=" + distance); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseXYPointTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseXYPointTestCase.java index b7aed58b9ba8..e66fa12ce614 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseXYPointTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/geo/BaseXYPointTestCase.java @@ -53,6 +53,7 @@ import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -629,6 +630,7 @@ public void testMultiValued() throws Exception { boolean fail = false; + StoredFields storedFields = s.storedFields(); for (int docID = 0; docID < ys.length / 2; docID++) { float yDoc1 = ys[2 * docID]; float xDoc1 = xs[2 * docID]; @@ -641,7 +643,7 @@ public void testMultiValued() throws Exception { boolean expected = result1 || result2; if (hits.get(docID) != expected) { - String id = s.doc(docID).get("id"); + String id = storedFields.document(docID).get("id"); if (expected) { System.out.println("TEST: id=" + id + " docID=" + docID + " should match but did not"); } else { @@ -1330,6 +1332,7 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < numQueries; i++) { XYCircle circle = ShapeTestUtil.nextCircle(); float x = circle.getX(); @@ -1338,8 +1341,8 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException BitSet expected = new BitSet(); for (int doc = 0; doc < reader.maxDoc(); doc++) { - float docX = reader.document(doc).getField("x").numericValue().floatValue(); - float docY = reader.document(doc).getField("y").numericValue().floatValue(); + float docX = storedFields.document(doc).getField("x").numericValue().floatValue(); + float docY = storedFields.document(doc).getField("y").numericValue().floatValue(); double distance = cartesianDistance(x, y, docX, docY); if (distance <= radius) { expected.set(doc); @@ -1359,8 +1362,8 @@ public PointsReader fieldsReader(SegmentReadState readState) throws IOException } catch (AssertionError e) { System.out.println("center: (" + x + "," + y + "), radius=" + radius); for (int doc = 0; doc < reader.maxDoc(); doc++) { - float docX = reader.document(doc).getField("x").numericValue().floatValue(); - float docY = reader.document(doc).getField("y").numericValue().floatValue(); + float docX = storedFields.document(doc).getField("x").numericValue().floatValue(); + float docY = storedFields.document(doc).getField("y").numericValue().floatValue(); double distance = cartesianDistance(x, y, docX, docY); System.out.println("" + doc + ": (" + x + "," + y + "), distance=" + distance); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/AssertingLeafReader.java index bcaafc32877d..c3d1973fb484 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/AssertingLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/AssertingLeafReader.java @@ -39,7 +39,10 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.TermState; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.internal.tests.IndexPackageAccess; @@ -113,14 +116,60 @@ public Fields getTermVectors(int docID) throws IOException { return fields == null ? null : new AssertingFields(fields); } + @Override + public TermVectors termVectors() throws IOException { + return new AssertingTermVectors(super.termVectors()); + } + + @Override + public StoredFields storedFields() throws IOException { + return new AssertingStoredFields(super.storedFields()); + } + + /** Wraps a StoredFields but with additional asserts */ + public static class AssertingStoredFields extends StoredFields { + private final StoredFields in; + private final Thread creationThread = Thread.currentThread(); + + public AssertingStoredFields(StoredFields in) { + this.in = in; + } + + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + assertThread("StoredFields", creationThread); + in.document(docID, visitor); + } + } + + /** Wraps a TermVectors but with additional asserts */ + public static class AssertingTermVectors extends TermVectors { + private final TermVectors in; + private final Thread creationThread = Thread.currentThread(); + + public AssertingTermVectors(TermVectors in) { + this.in = in; + } + + @Override + public Fields get(int doc) throws IOException { + assertThread("TermVectors", creationThread); + Fields fields = in.get(doc); + return fields == null ? null : new AssertingFields(fields); + } + } + /** Wraps a Fields but with additional asserts */ public static class AssertingFields extends FilterFields { + private final Thread creationThread = Thread.currentThread(); + public AssertingFields(Fields in) { super(in); } @Override public Iterator iterator() { + assertThread("Fields", creationThread); Iterator iterator = super.iterator(); assert iterator != null; return iterator; @@ -128,6 +177,7 @@ public Iterator iterator() { @Override public Terms terms(String field) throws IOException { + assertThread("Fields", creationThread); Terms terms = super.terms(field); return terms == null ? null : new AssertingTerms(terms); } @@ -135,12 +185,15 @@ public Terms terms(String field) throws IOException { /** Wraps a Terms but with additional asserts */ public static class AssertingTerms extends FilterTerms { + private final Thread creationThread = Thread.currentThread(); + public AssertingTerms(Terms in) { super(in); } @Override public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws IOException { + assertThread("Terms", creationThread); TermsEnum termsEnum = in.intersect(automaton, bytes); assert termsEnum != null; assert bytes == null || bytes.isValid(); @@ -149,6 +202,7 @@ public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws I @Override public BytesRef getMin() throws IOException { + assertThread("Terms", creationThread); BytesRef v = in.getMin(); assert v == null || v.isValid(); return v; @@ -156,6 +210,7 @@ public BytesRef getMin() throws IOException { @Override public BytesRef getMax() throws IOException { + assertThread("Terms", creationThread); BytesRef v = in.getMax(); assert v == null || v.isValid(); return v; @@ -163,6 +218,7 @@ public BytesRef getMax() throws IOException { @Override public int getDocCount() throws IOException { + assertThread("Terms", creationThread); final int docCount = in.getDocCount(); assert docCount > 0; return docCount; @@ -170,6 +226,7 @@ public int getDocCount() throws IOException { @Override public long getSumDocFreq() throws IOException { + assertThread("Terms", creationThread); final long sumDf = in.getSumDocFreq(); assert sumDf >= getDocCount(); return sumDf; @@ -177,6 +234,7 @@ public long getSumDocFreq() throws IOException { @Override public long getSumTotalTermFreq() throws IOException { + assertThread("Terms", creationThread); final long sumTtf = in.getSumTotalTermFreq(); if (hasFreqs() == false) { assert sumTtf == in.getSumDocFreq(); @@ -187,6 +245,7 @@ public long getSumTotalTermFreq() throws IOException { @Override public TermsEnum iterator() throws IOException { + assertThread("Terms", creationThread); TermsEnum termsEnum = super.iterator(); assert termsEnum != null; return new AssertingTermsEnum(termsEnum, hasFreqs()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseDocValuesFormatTestCase.java index 5402a6c0f1c2..e5f1559b90f2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseDocValuesFormatTestCase.java @@ -66,6 +66,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum.SeekStatus; @@ -138,6 +139,7 @@ public void testOneNumber() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -145,7 +147,7 @@ public void testOneNumber() throws IOException { assertEquals(1, hits.totalHits.value); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { - Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc); + Document hitDoc = storedFields.document(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv"); @@ -174,6 +176,7 @@ public void testOneFloat() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -182,7 +185,7 @@ public void testOneFloat() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; @@ -212,6 +215,7 @@ public void testTwoNumbers() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -220,7 +224,7 @@ public void testTwoNumbers() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1"); @@ -252,6 +256,7 @@ public void testTwoBinaryValues() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -260,7 +265,7 @@ public void testTwoBinaryValues() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int hitDocID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(hitDocID); + Document hitDoc = storedFields.document(hitDocID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv1"); @@ -308,6 +313,7 @@ public void testVariouslyCompressibleBinaryValues() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); @@ -316,7 +322,7 @@ public void testVariouslyCompressibleBinaryValues() throws IOException { assertEquals(1, hits.totalHits.value); // Iterate through the results: int hitDocID = hits.scoreDocs[0].doc; - Document hitDoc = isearcher.doc(hitDocID); + Document hitDoc = storedFields.document(hitDocID); assertEquals(id, hitDoc.get("id")); assert ireader.leaves().size() == 1; BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv1"); @@ -346,6 +352,7 @@ public void testTwoFieldsMixed() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -354,7 +361,7 @@ public void testTwoFieldsMixed() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1"); @@ -391,11 +398,12 @@ public void testThreeFieldsMixed() throws IOException { assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); TopDocs hits = isearcher.search(query, 1); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, hits.totalHits.value); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv1"); @@ -433,6 +441,7 @@ public void testThreeFieldsMixed2() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -442,7 +451,7 @@ public void testThreeFieldsMixed2() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv2"); @@ -516,8 +525,9 @@ public void testTwoDocumentsMerged() throws IOException { maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true assert ireader.leaves().size() == 1; NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv"); + StoredFields storedFields = ireader.leaves().get(0).reader().storedFields(); for (int i = 0; i < 2; i++) { - Document doc2 = ireader.leaves().get(0).reader().document(i); + Document doc2 = storedFields.document(i); long expected; if (doc2.get("id").equals("0")) { expected = -10; @@ -611,6 +621,7 @@ public void testBytes() throws IOException { IndexReader ireader = maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true IndexSearcher isearcher = new IndexSearcher(ireader); + StoredFields storedFields = isearcher.storedFields(); assertEquals(1, isearcher.count(new TermQuery(new Term("fieldname", longTerm)))); Query query = new TermQuery(new Term("fieldname", "text")); @@ -619,7 +630,7 @@ public void testBytes() throws IOException { // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { int hitDocID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(hitDocID); + Document hitDoc = storedFields.document(hitDocID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv"); @@ -655,8 +666,9 @@ public void testBytesTwoDocumentsMerged() throws IOException { maybeWrapWithMergingReader(DirectoryReader.open(directory)); // read-only=true assert ireader.leaves().size() == 1; BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv"); + StoredFields storedFields = ireader.leaves().get(0).reader().storedFields(); for (int i = 0; i < 2; i++) { - Document doc2 = ireader.leaves().get(0).reader().document(i); + Document doc2 = storedFields.document(i); String expected; if (doc2.get("id").equals("0")) { expected = "hello world 1"; @@ -725,9 +737,10 @@ public void testSortedBytes() throws IOException { assertEquals(1, hits.totalHits.value); BytesRef scratch = newBytesRef(); // Iterate through the results: + StoredFields storedFields = isearcher.storedFields(); for (int i = 0; i < hits.scoreDocs.length; i++) { int docID = hits.scoreDocs[i].doc; - Document hitDoc = isearcher.doc(docID); + Document hitDoc = storedFields.document(docID); assertEquals(text, hitDoc.get("fieldname")); assert ireader.leaves().size() == 1; SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv"); @@ -843,8 +856,9 @@ public void testSortedBytesTwoDocumentsMerged() throws IOException { assertEquals(newBytesRef("hello world 1"), scratch); scratch = dv.lookupOrd(1); assertEquals(newBytesRef("hello world 2"), scratch); + StoredFields storedFields = ireader.leaves().get(0).reader().storedFields(); for (int i = 0; i < 2; i++) { - Document doc2 = ireader.leaves().get(0).reader().document(i); + Document doc2 = storedFields.document(i); String expected; if (doc2.get("id").equals("0")) { expected = "hello world 1"; @@ -1383,8 +1397,9 @@ protected void assertDVIterate(Directory dir) throws IOException { LeafReader r = context.reader(); NumericDocValues docValues = DocValues.getNumeric(r, "dv"); docValues.nextDoc(); + StoredFields storedFields = r.storedFields(); for (int i = 0; i < r.maxDoc(); i++) { - String storedValue = r.document(i).get("stored"); + String storedValue = storedFields.document(i).get("stored"); if (storedValue == null) { assertTrue(docValues.docID() > i); } else { @@ -1448,12 +1463,13 @@ private void doTestSortedNumericsVsStoredFields(LongSupplier counts, LongSupplie TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv"); for (int i = 0; i < r.maxDoc(); i++) { if (i > docValues.docID()) { docValues.nextDoc(); } - String[] expected = r.document(i).getValues("stored"); + String[] expected = storedFields.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expected.length); } else { @@ -1585,10 +1601,11 @@ private void doTestBinaryVsStoredFields(double density, Supplier bytes) TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); BinaryDocValues docValues = DocValues.getBinary(r, "dv"); docValues.nextDoc(); for (int i = 0; i < r.maxDoc(); i++) { - BytesRef binaryValue = r.document(i).getBinaryValue("stored"); + BytesRef binaryValue = storedFields.document(i).getBinaryValue("stored"); if (binaryValue == null) { assertTrue(docValues.docID() > i); } else { @@ -1607,10 +1624,11 @@ private void doTestBinaryVsStoredFields(double density, Supplier bytes) TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); BinaryDocValues docValues = DocValues.getBinary(r, "dv"); docValues.nextDoc(); for (int i = 0; i < r.maxDoc(); i++) { - BytesRef binaryValue = r.document(i).getBinaryValue("stored"); + BytesRef binaryValue = storedFields.document(i).getBinaryValue("stored"); if (binaryValue == null) { assertTrue(docValues.docID() > i); } else { @@ -1711,10 +1729,11 @@ protected void doTestSortedVsStoredFields(int numDocs, double density, Supplier< TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); SortedDocValues docValues = DocValues.getSorted(r, "dv"); docValues.nextDoc(); for (int i = 0; i < r.maxDoc(); i++) { - BytesRef binaryValue = r.document(i).getBinaryValue("stored"); + BytesRef binaryValue = storedFields.document(i).getBinaryValue("stored"); if (binaryValue == null) { assertTrue(docValues.docID() > i); } else { @@ -1733,10 +1752,11 @@ protected void doTestSortedVsStoredFields(int numDocs, double density, Supplier< TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); SortedDocValues docValues = DocValues.getSorted(r, "dv"); docValues.nextDoc(); for (int i = 0; i < r.maxDoc(); i++) { - BytesRef binaryValue = r.document(i).getBinaryValue("stored"); + BytesRef binaryValue = storedFields.document(i).getBinaryValue("stored"); if (binaryValue == null) { assertTrue(docValues.docID() > i); } else { @@ -2310,9 +2330,10 @@ protected void doTestSortedSetVsStoredFields( TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); SortedSetDocValues docValues = r.getSortedSetDocValues("dv"); for (int i = 0; i < r.maxDoc(); i++) { - String[] stringValues = r.document(i).getValues("stored"); + String[] stringValues = storedFields.document(i).getValues("stored"); if (docValues != null) { if (docValues.docID() < i) { docValues.nextDoc(); @@ -2344,9 +2365,10 @@ protected void doTestSortedSetVsStoredFields( TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); SortedSetDocValues docValues = r.getSortedSetDocValues("dv"); for (int i = 0; i < r.maxDoc(); i++) { - String[] stringValues = r.document(i).getValues("stored"); + String[] stringValues = storedFields.document(i).getValues("stored"); if (docValues.docID() < i) { docValues.nextDoc(); } @@ -2742,18 +2764,19 @@ public void run() { startingGun.await(); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); SortedDocValues sorted = r.getSortedDocValues("dvSorted"); NumericDocValues numerics = r.getNumericDocValues("dvNum"); for (int j = 0; j < r.maxDoc(); j++) { - BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); + BytesRef binaryValue = storedFields.document(j).getBinaryValue("storedBin"); assertEquals(j, binaries.nextDoc()); BytesRef scratch = binaries.binaryValue(); assertEquals(binaryValue, scratch); assertEquals(j, sorted.nextDoc()); scratch = sorted.lookupOrd(sorted.ordValue()); assertEquals(binaryValue, scratch); - String expected = r.document(j).get("storedNum"); + String expected = storedFields.document(j).get("storedNum"); assertEquals(j, numerics.nextDoc()); assertEquals(Long.parseLong(expected), numerics.longValue()); } @@ -2858,6 +2881,7 @@ public void run() { startingGun.await(); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); + StoredFields storedFields = r.storedFields(); BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); SortedDocValues sorted = r.getSortedDocValues("dvSorted"); NumericDocValues numerics = r.getNumericDocValues("dvNum"); @@ -2865,7 +2889,7 @@ public void run() { SortedNumericDocValues sortedNumeric = r.getSortedNumericDocValues("dvSortedNumeric"); for (int j = 0; j < r.maxDoc(); j++) { - BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); + BytesRef binaryValue = storedFields.document(j).getBinaryValue("storedBin"); if (binaryValue != null) { if (binaries != null) { assertEquals(j, binaries.nextDoc()); @@ -2877,7 +2901,7 @@ public void run() { } } - String number = r.document(j).get("storedNum"); + String number = storedFields.document(j).get("storedNum"); if (number != null) { if (numerics != null) { assertEquals(j, numerics.advance(j)); @@ -2885,7 +2909,7 @@ public void run() { } } - String[] values = r.document(j).getValues("storedSortedSet"); + String[] values = storedFields.document(j).getValues("storedSortedSet"); if (values.length > 0) { assertNotNull(sortedSet); assertEquals(j, sortedSet.nextDoc()); @@ -2897,7 +2921,7 @@ public void run() { } } - String[] numValues = r.document(j).getValues("storedSortedNumeric"); + String[] numValues = storedFields.document(j).getValues("storedSortedNumeric"); if (numValues.length > 0) { assertNotNull(sortedNumeric); assertEquals(j, sortedNumeric.nextDoc()); @@ -3646,9 +3670,10 @@ private void doTestRandomAdvance(FieldCreator fieldCreator) throws IOException { // Now search the index: IndexReader r = w.getReader(); + StoredFields storedFields = r.storedFields(); BitSet missing = new FixedBitSet(r.maxDoc()); for (int docID = 0; docID < r.maxDoc(); docID++) { - Document doc = r.document(docID); + Document doc = storedFields.document(docID); if (missingSet.contains(doc.getField("id").numericValue())) { missing.set(docID); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseKnnVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseKnnVectorsFormatTestCase.java index 3accb315b059..7f7e01bbc80c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseKnnVectorsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseKnnVectorsFormatTestCase.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; @@ -724,14 +725,15 @@ public void testSortedIndex() throws Exception { try (IndexReader reader = DirectoryReader.open(iw)) { LeafReader leaf = getOnlyLeafReader(reader); + StoredFields storedFields = leaf.storedFields(); VectorValues vectorValues = leaf.getVectorValues(fieldName); assertEquals(2, vectorValues.dimension()); assertEquals(3, vectorValues.size()); - assertEquals("1", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("1", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(-1f, vectorValues.vectorValue()[0], 0); - assertEquals("2", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("2", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(1, vectorValues.vectorValue()[0], 0); - assertEquals("4", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("4", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(0, vectorValues.vectorValue()[0], 0); assertEquals(NO_MORE_DOCS, vectorValues.nextDoc()); } @@ -752,14 +754,15 @@ public void testSortedIndexBytes() throws Exception { try (IndexReader reader = DirectoryReader.open(iw)) { LeafReader leaf = getOnlyLeafReader(reader); + StoredFields storedFields = leaf.storedFields(); VectorValues vectorValues = leaf.getVectorValues(fieldName); assertEquals(2, vectorValues.dimension()); assertEquals(3, vectorValues.size()); - assertEquals("1", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("1", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(-1f, vectorValues.vectorValue()[0], 0); - assertEquals("2", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("2", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(1, vectorValues.vectorValue()[0], 0); - assertEquals("4", leaf.document(vectorValues.nextDoc()).get("id")); + assertEquals("4", storedFields.document(vectorValues.nextDoc()).get("id")); assertEquals(0, vectorValues.vectorValue()[0], 0); assertEquals(NO_MORE_DOCS, vectorValues.nextDoc()); } @@ -868,11 +871,12 @@ public void testRandom() throws Exception { continue; } totalSize += vectorValues.size(); + StoredFields storedFields = ctx.reader().storedFields(); int docId; while ((docId = vectorValues.nextDoc()) != NO_MORE_DOCS) { float[] v = vectorValues.vectorValue(); assertEquals(dimension, v.length); - String idString = ctx.reader().document(docId).getField("id").stringValue(); + String idString = storedFields.document(docId).getField("id").stringValue(); int id = Integer.parseInt(idString); if (ctx.reader().getLiveDocs() == null || ctx.reader().getLiveDocs().get(docId)) { assertArrayEquals(idString, values[id], v, 0); @@ -944,11 +948,12 @@ public void testRandomBytes() throws Exception { continue; } totalSize += vectorValues.size(); + StoredFields storedFields = ctx.reader().storedFields(); int docId; while ((docId = vectorValues.nextDoc()) != NO_MORE_DOCS) { BytesRef v = vectorValues.binaryValue(); assertEquals(dimension, v.length); - String idString = ctx.reader().document(docId).getField("id").stringValue(); + String idString = storedFields.document(docId).getField("id").stringValue(); int id = Integer.parseInt(idString); if (ctx.reader().getLiveDocs() == null || ctx.reader().getLiveDocs().get(docId)) { assertEquals(idString, 0, values[id].compareTo(v)); @@ -1059,12 +1064,13 @@ public void testRandomWithUpdatesAndGraph() throws Exception { if (vectorValues == null) { continue; } + StoredFields storedFields = ctx.reader().storedFields(); int docId; int numLiveDocsWithVectors = 0; while ((docId = vectorValues.nextDoc()) != NO_MORE_DOCS) { float[] v = vectorValues.vectorValue(); assertEquals(dimension, v.length); - String idString = ctx.reader().document(docId).getField("id").stringValue(); + String idString = storedFields.document(docId).getField("id").stringValue(); int id = Integer.parseInt(idString); if (liveDocs == null || liveDocs.get(docId)) { assertArrayEquals( @@ -1318,10 +1324,11 @@ public void testVectorValuesReportCorrectDocs() throws Exception { for (LeafReaderContext ctx : r.leaves()) { VectorValues vectors = ctx.reader().getVectorValues("knn_vector"); if (vectors != null) { + StoredFields storedFields = ctx.reader().storedFields(); docCount += vectors.size(); while (vectors.nextDoc() != NO_MORE_DOCS) { checksum += vectors.vectorValue()[0]; - Document doc = ctx.reader().document(vectors.docID(), Set.of("id")); + Document doc = storedFields.document(vectors.docID(), Set.of("id")); sumDocIds += Integer.parseInt(doc.get("id")); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseStoredFieldsFormatTestCase.java index 2149a310d1e3..d7c16e5567c0 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseStoredFieldsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseStoredFieldsFormatTestCase.java @@ -59,6 +59,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -166,6 +167,7 @@ public void testRandomStoredFields() throws IOException { for (int x = 0; x < 2; x++) { DirectoryReader r = maybeWrapWithMergingReader(w.getReader()); IndexSearcher s = newSearcher(r); + StoredFields storedFields = r.storedFields(); if (VERBOSE) { System.out.println("TEST: cycle x=" + x + " r=" + r); @@ -179,7 +181,7 @@ public void testRandomStoredFields() throws IOException { } TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1); assertEquals(1, hits.totalHits.value); - Document doc = r.document(hits.scoreDocs[0].doc); + Document doc = storedFields.document(hits.scoreDocs[0].doc); Document docExp = docs.get(testID); for (int i = 0; i < fieldCount; i++) { assertEquals( @@ -209,7 +211,8 @@ public void testStoredFieldsOrder() throws Throwable { doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = maybeWrapWithMergingReader(DirectoryReader.open(w)); - Document doc2 = r.document(0); + StoredFields storedFields = r.storedFields(); + Document doc2 = storedFields.document(0); Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); Field f = (Field) it.next(); @@ -250,7 +253,8 @@ public void testBinaryFieldOffsetLength() throws IOException { w.close(); IndexReader ir = DirectoryReader.open(dir); - Document doc2 = ir.document(0); + StoredFields storedFields = ir.storedFields(); + Document doc2 = storedFields.document(0); IndexableField f2 = doc2.getField("binary"); b = f2.binaryValue().bytes; assertTrue(b != null); @@ -314,8 +318,9 @@ public void testNumericField() throws Exception { for (LeafReaderContext ctx : r.leaves()) { final LeafReader sub = ctx.reader(); final NumericDocValues ids = DocValues.getNumeric(sub, "id"); + StoredFields storedFields = sub.storedFields(); for (int docID = 0; docID < sub.numDocs(); docID++) { - final Document doc = sub.document(docID); + final Document doc = storedFields.document(docID); final Field f = (Field) doc.getField("nf"); assertTrue("got f=" + f, f instanceof StoredField); assertEquals(docID, ids.nextDoc()); @@ -336,9 +341,11 @@ public void testIndexedBit() throws Exception { doc.add(new StringField("field2", "value", Field.Store.YES)); w.addDocument(doc); IndexReader r = maybeWrapWithMergingReader(w.getReader()); + StoredFields storedFields = r.storedFields(); w.close(); - assertEquals(IndexOptions.NONE, r.document(0).getField("field").fieldType().indexOptions()); - assertNotNull(r.document(0).getField("field2").fieldType().indexOptions()); + assertEquals( + IndexOptions.NONE, storedFields.document(0).getField("field").fieldType().indexOptions()); + assertNotNull(storedFields.document(0).getField("field2").fieldType().indexOptions()); r.close(); dir.close(); } @@ -379,10 +386,11 @@ public void testReadSkip() throws IOException { iw.commit(); final DirectoryReader reader = maybeWrapWithMergingReader(DirectoryReader.open(dir)); + StoredFields storedFields = reader.storedFields(); final int docID = random().nextInt(100); for (Field fld : fields) { String fldName = fld.name(); - final Document sDoc = reader.document(docID, Collections.singleton(fldName)); + final Document sDoc = storedFields.document(docID, Collections.singleton(fldName)); final IndexableField sField = sDoc.getField(fldName); if (Field.class.equals(fld.getClass())) { assertEquals(fld.binaryValue(), sField.binaryValue()); @@ -410,8 +418,9 @@ public void testEmptyDocs() throws IOException { } iw.commit(); final DirectoryReader rd = maybeWrapWithMergingReader(DirectoryReader.open(dir)); + StoredFields storedFields = rd.storedFields(); for (int i = 0; i < numDocs; ++i) { - final Document doc = rd.document(i); + final Document doc = storedFields.document(i); assertNotNull(doc); assertTrue(doc.getFields().isEmpty()); } @@ -462,12 +471,13 @@ public void run() { for (int q : queries) { final Query query = new TermQuery(new Term("fld", "" + q)); try { + StoredFields storedFields = rd.storedFields(); final TopDocs topDocs = searcher.search(query, 1); if (topDocs.totalHits.value != 1) { throw new IllegalStateException( "Expected 1 hit, got " + topDocs.totalHits.value); } - final Document sdoc = rd.document(topDocs.scoreDocs[0].doc); + final Document sdoc = storedFields.document(topDocs.scoreDocs[0].doc); if (sdoc == null || sdoc.get("fld") == null) { throw new IllegalStateException("Could not find document " + q); } @@ -574,10 +584,11 @@ public void testWriteReadMerge() throws IOException { iw.commit(); final DirectoryReader ir = maybeWrapWithMergingReader(DirectoryReader.open(dir)); + StoredFields storedFields = ir.storedFields(); assertTrue(ir.numDocs() > 0); int numDocs = 0; for (int i = 0; i < ir.maxDoc(); ++i) { - final Document doc = ir.document(i); + final Document doc = storedFields.document(i); if (doc == null) { continue; } @@ -614,6 +625,17 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { super.document(maxDoc() - 1 - docID, visitor); } + @Override + public StoredFields storedFields() throws IOException { + StoredFields orig = in.storedFields(); + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + orig.document(maxDoc() - 1 - docID, visitor); + } + }; + } + @Override public CacheHelper getCoreCacheHelper() { return null; @@ -687,8 +709,9 @@ public void testMergeFilterReader() throws IOException { dir.close(); reader = maybeWrapWithMergingReader(w.getReader()); + StoredFields storedFields = reader.storedFields(); for (int i = 0; i < reader.maxDoc(); ++i) { - final Document doc = reader.document(i); + final Document doc = storedFields.document(i); final int id = doc.getField("id").numericValue().intValue(); final Document expected = docs[id]; assertEquals(expected.get("s"), doc.get("s")); @@ -765,11 +788,12 @@ public void testBigDocuments() throws IOException { iw.forceMerge(1); // look at what happens when big docs are merged final DirectoryReader rd = maybeWrapWithMergingReader(DirectoryReader.open(dir)); final IndexSearcher searcher = new IndexSearcher(rd); + StoredFields storedFields = rd.storedFields(); for (int i = 0; i < numDocs; ++i) { final Query query = new TermQuery(new Term("id", "" + i)); final TopDocs topDocs = searcher.search(query, 1); assertEquals("" + i, 1, topDocs.totalHits.value); - final Document doc = rd.document(topDocs.scoreDocs[0].doc); + final Document doc = storedFields.document(topDocs.scoreDocs[0].doc); assertNotNull(doc); final IndexableField[] fieldValues = doc.getFields("fld"); assertEquals(docs[i].getFields("fld").length, fieldValues.length); @@ -848,8 +872,9 @@ public void testMismatchedFields() throws Exception { iw.forceMerge(1); LeafReader ir = getOnlyLeafReader(DirectoryReader.open(iw)); + StoredFields storedFields = ir.storedFields(); for (int i = 0; i < ir.maxDoc(); i++) { - Document doc = ir.document(i); + Document doc = storedFields.document(i); assertEquals(10, doc.getFields().size()); for (int j = 0; j < 10; j++) { assertEquals(Integer.toString(j), doc.get(Integer.toString(j))); @@ -918,6 +943,7 @@ public void testRandomStoredFieldsWithIndexSort() throws Exception { return; } try (DirectoryReader reader = maybeWrapWithMergingReader(iw.getReader())) { + StoredFields actualStoredFields = reader.storedFields(); IndexSearcher searcher = new IndexSearcher(reader); int iters = TestUtil.nextInt(random(), 1, 10); for (int i = 0; i < iters; i++) { @@ -931,7 +957,7 @@ public void testRandomStoredFieldsWithIndexSort() throws Exception { docs.get(testID).getFields().stream() .filter(f -> f.fieldType().stored()) .collect(Collectors.toList()); - Document actualDoc = reader.document(hits.scoreDocs[0].doc); + Document actualDoc = actualStoredFields.document(hits.scoreDocs[0].doc); assertEquals(expectedFields.size(), actualDoc.getFields().size()); for (IndexableField expectedField : expectedFields) { IndexableField[] actualFields = actualDoc.getFields(expectedField.name()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseTermVectorsFormatTestCase.java index a349e4e918d7..53df7671d85f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseTermVectorsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseTermVectorsFormatTestCase.java @@ -58,6 +58,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum.SeekStatus; @@ -567,16 +568,17 @@ public void testRareVectors() throws IOException { } final IndexReader reader = writer.getReader(); final int docWithVectorsID = docID(reader, "42"); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < 10; ++i) { final int docID = random().nextInt(numDocs); - final Fields fields = reader.getTermVectors(docID); + final Fields fields = termVectors.get(docID); if (docID == docWithVectorsID) { assertEquals(doc, fields); } else { assertNull(fields); } } - final Fields fields = reader.getTermVectors(docWithVectorsID); + final Fields fields = termVectors.get(docWithVectorsID); assertEquals(doc, fields); reader.close(); writer.close(); @@ -596,7 +598,7 @@ public void testHighFreqs() throws IOException { docFactory.newDocument(TestUtil.nextInt(random(), 1, 2), atLeast(2000), options); writer.addDocument(doc.toDocument()); final IndexReader reader = writer.getReader(); - assertEquals(doc, reader.getTermVectors(0)); + assertEquals(doc, reader.termVectors().get(0)); reader.close(); writer.close(); dir.close(); @@ -613,7 +615,7 @@ public void testLotsOfFields() throws IOException { docFactory.newDocument(TestUtil.nextInt(random(), 5, fieldCount), 5, options); writer.addDocument(doc.toDocument()); final IndexReader reader = writer.getReader(); - assertEquals(doc, reader.getTermVectors(0)); + assertEquals(doc, reader.termVectors().get(0)); reader.close(); writer.close(); dir.close(); @@ -637,9 +639,9 @@ public void testMixedOptions() throws IOException { writer.addDocument(addId(doc2.toDocument(), "2")); final IndexReader reader = writer.getReader(); final int doc1ID = docID(reader, "1"); - assertEquals(doc1, reader.getTermVectors(doc1ID)); + assertEquals(doc1, reader.termVectors().get(doc1ID)); final int doc2ID = docID(reader, "2"); - assertEquals(doc2, reader.getTermVectors(doc2ID)); + assertEquals(doc2, reader.termVectors().get(doc2ID)); reader.close(); writer.close(); dir.close(); @@ -664,9 +666,10 @@ public void testRandom() throws IOException { writer.addDocument(addId(docs[i].toDocument(), "" + i)); } final IndexReader reader = writer.getReader(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < numDocs; ++i) { final int docID = docID(reader, "" + i); - assertEquals(docs[i], reader.getTermVectors(docID)); + assertEquals(docs[i], termVectors.get(docID)); } reader.close(); writer.close(); @@ -695,9 +698,10 @@ private void doTestMerge(Sort indexSort, boolean allowDeletes) throws IOExceptio Runnable verifyTermVectors = () -> { try (DirectoryReader reader = maybeWrapWithMergingReader(writer.getReader())) { + TermVectors termVectors = reader.termVectors(); for (String id : liveDocIDs) { final int docID = docID(reader, id); - assertEquals(docs.get(id), reader.getTermVectors(docID)); + assertEquals(docs.get(id), termVectors.get(docID)); } } catch (IOException e) { throw new UncheckedIOException(e); @@ -789,9 +793,10 @@ public void testClone() throws IOException, InterruptedException { writer.addDocument(addId(docs[i].toDocument(), "" + i)); } final IndexReader reader = writer.getReader(); + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < numDocs; ++i) { final int docID = docID(reader, "" + i); - assertEquals(docs[i], reader.getTermVectors(docID)); + assertEquals(docs[i], termVectors.get(docID)); } final AtomicReference exception = new AtomicReference<>(); @@ -802,10 +807,11 @@ public void testClone() throws IOException, InterruptedException { @Override public void run() { try { + TermVectors termVectors = reader.termVectors(); for (int i = 0; i < atLeast(100); ++i) { final int idx = random().nextInt(numDocs); final int docID = docID(reader, "" + idx); - assertEquals(docs[idx], reader.getTermVectors(docID)); + assertEquals(docs[idx], termVectors.get(docID)); } } catch (Throwable t) { exception.set(t); @@ -844,7 +850,7 @@ protected TokenStreamComponents createComponents(String fieldName) { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); @@ -927,7 +933,7 @@ protected TokenStreamComponents createComponents(String fieldName) { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); @@ -1109,7 +1115,7 @@ protected TokenStreamComponents createComponents(String fieldName) { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); @@ -1302,7 +1308,7 @@ protected TokenStreamComponents createComponents(String fieldName) { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); @@ -1493,7 +1499,7 @@ public void testPostingsEnumPayloads() throws Exception { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); @@ -1696,7 +1702,7 @@ public void testPostingsEnumAll() throws Exception { iw.addDocument(doc); DirectoryReader reader = DirectoryReader.open(iw); - Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo"); + Terms terms = getOnlyLeafReader(reader).termVectors().get(0, "foo"); TermsEnum termsEnum = terms.iterator(); assertNotNull(termsEnum); assertEquals(newBytesRef("bar"), termsEnum.next()); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/FieldFilterLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/FieldFilterLeafReader.java index 6d6156fb787c..05d32d7f373b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/FieldFilterLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/FieldFilterLeafReader.java @@ -32,6 +32,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.util.FilterIterator; @@ -69,57 +71,79 @@ public FieldInfos getFieldInfos() { @Override public Fields getTermVectors(int docID) throws IOException { - Fields f = super.getTermVectors(docID); - if (f == null) { - return null; - } - f = new FieldFilterFields(f); - // we need to check for emptyness, so we can return - // null: - return f.iterator().hasNext() ? f : null; + return termVectors().get(docID); + } + + @Override + public TermVectors termVectors() throws IOException { + TermVectors orig = super.termVectors(); + return new TermVectors() { + @Override + public Fields get(int docID) throws IOException { + Fields f = orig.get(docID); + if (f == null) { + return null; + } + f = new FieldFilterFields(f); + // we need to check for emptyness, so we can return + // null: + return f.iterator().hasNext() ? f : null; + } + }; } @Override public void document(final int docID, final StoredFieldVisitor visitor) throws IOException { - super.document( - docID, - new StoredFieldVisitor() { - @Override - public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { - visitor.binaryField(fieldInfo, value); - } - - @Override - public void stringField(FieldInfo fieldInfo, String value) throws IOException { - visitor.stringField( - fieldInfo, Objects.requireNonNull(value, "String value should not be null")); - } - - @Override - public void intField(FieldInfo fieldInfo, int value) throws IOException { - visitor.intField(fieldInfo, value); - } - - @Override - public void longField(FieldInfo fieldInfo, long value) throws IOException { - visitor.longField(fieldInfo, value); - } - - @Override - public void floatField(FieldInfo fieldInfo, float value) throws IOException { - visitor.floatField(fieldInfo, value); - } - - @Override - public void doubleField(FieldInfo fieldInfo, double value) throws IOException { - visitor.doubleField(fieldInfo, value); - } - - @Override - public Status needsField(FieldInfo fieldInfo) throws IOException { - return hasField(fieldInfo.name) ? visitor.needsField(fieldInfo) : Status.NO; - } - }); + storedFields().document(docID, visitor); + } + + @Override + public StoredFields storedFields() throws IOException { + StoredFields orig = super.storedFields(); + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + orig.document( + docID, + new StoredFieldVisitor() { + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.binaryField(fieldInfo, value); + } + + @Override + public void stringField(FieldInfo fieldInfo, String value) throws IOException { + visitor.stringField( + fieldInfo, Objects.requireNonNull(value, "String value should not be null")); + } + + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + visitor.intField(fieldInfo, value); + } + + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + visitor.longField(fieldInfo, value); + } + + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + visitor.floatField(fieldInfo, value); + } + + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + visitor.doubleField(fieldInfo, value); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return hasField(fieldInfo.name) ? visitor.needsField(fieldInfo) : Status.NO; + } + }); + } + }; } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/MergeReaderWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/MergeReaderWrapper.java index 36134697e350..a4822f073ffa 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/MergeReaderWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/MergeReaderWrapper.java @@ -38,6 +38,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.TopDocs; @@ -212,6 +214,16 @@ public Fields getTermVectors(int docID) throws IOException { return vectors.get(docID); } + @Override + public TermVectors termVectors() throws IOException { + ensureOpen(); + if (vectors == null) { + return TermVectors.EMPTY; + } else { + return vectors; + } + } + @Override public PointValues getPointValues(String fieldName) throws IOException { return in.getPointValues(fieldName); @@ -242,7 +254,13 @@ public int maxDoc() { public void document(int docID, StoredFieldVisitor visitor) throws IOException { ensureOpen(); checkBounds(docID); - store.visitDocument(docID, visitor); + store.document(docID, visitor); + } + + @Override + public StoredFields storedFields() throws IOException { + ensureOpen(); + return store; } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/MismatchedLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/MismatchedLeafReader.java index df50df4bda5d..bbf2adbdc046 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/MismatchedLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/MismatchedLeafReader.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; /** * Shuffles field numbers around to try to trip bugs where field numbers are assumed to always be @@ -51,6 +52,17 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { in.document(docID, new MismatchedVisitor(visitor)); } + @Override + public StoredFields storedFields() throws IOException { + final StoredFields inStoredFields = in.storedFields(); + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + inStoredFields.document(docID, new MismatchedVisitor(visitor)); + } + }; + } + @Override public CacheHelper getCoreCacheHelper() { return in.getCoreCacheHelper(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/ThreadedIndexingAndSearchingTestCase.java index 68cdc753bb9d..f57a68184c0b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/ThreadedIndexingAndSearchingTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/ThreadedIndexingAndSearchingTestCase.java @@ -39,6 +39,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -529,9 +530,10 @@ public void runTest(String testName) throws Exception { final Bits liveDocs = reader.getLiveDocs(); int sum = 0; final int inc = Math.max(1, maxDoc / 50); + StoredFields storedFields = reader.storedFields(); for (int docID = 0; docID < maxDoc; docID += inc) { if (liveDocs == null || liveDocs.get(docID)) { - final Document doc = reader.document(docID); + final Document doc = storedFields.document(docID); sum += doc.getFields().size(); } } @@ -654,6 +656,7 @@ public void message(String component, String message) { // Verify: make sure each group of sub-docs are still in docID order: for (SubDocs subDocs : allSubDocs) { TopDocs hits = s.search(new TermQuery(new Term("packID", subDocs.packID)), 20); + StoredFields storedFields = s.storedFields(); if (!subDocs.deleted) { // We sort by relevance but the scores should be identical so sort falls back to by docID: if (hits.totalHits.value != subDocs.subIDs.size()) { @@ -676,7 +679,7 @@ public void message(String component, String message) { startDocID = docID; } lastDocID = docID; - final Document doc = s.doc(docID); + final Document doc = storedFields.document(docID); assertEquals(subDocs.packID, doc.get("packID")); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/tests/search/QueryUtils.java index 413a45db8d84..3c697dcfcf31 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/search/QueryUtils.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/search/QueryUtils.java @@ -37,6 +37,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.BulkScorer; @@ -258,6 +260,11 @@ public Fields getTermVectors(int docID) throws IOException { return null; } + @Override + public TermVectors termVectors() { + return TermVectors.EMPTY; + } + @Override public int numDocs() { return 0; @@ -271,6 +278,14 @@ public int maxDoc() { @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException {} + @Override + public StoredFields storedFields() { + return new StoredFields() { + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException {} + }; + } + @Override protected void doClose() throws IOException {} diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/store/BaseChunkedDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/store/BaseChunkedDirectoryTestCase.java index 30e685f5edaa..195365268871 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/store/BaseChunkedDirectoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/store/BaseChunkedDirectoryTestCase.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -291,10 +292,11 @@ private void assertChunking(Random random, int chunkSize) throws Exception { IndexReader reader = writer.getReader(); writer.close(); + StoredFields storedFields = reader.storedFields(); int numAsserts = atLeast(100); for (int i = 0; i < numAsserts; i++) { int docID = random.nextInt(numDocs); - assertEquals("" + docID, reader.document(docID).get("docid")); + assertEquals("" + docID, storedFields.document(docID).get("docid")); } reader.close(); dir.close(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java index 11d7d52a59f9..5384f46da5cc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java @@ -144,6 +144,8 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum.SeekStatus; @@ -2420,9 +2422,11 @@ public void assertNormsEquals(String info, IndexReader leftReader, IndexReader r public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { assert leftReader.maxDoc() == rightReader.maxDoc(); + StoredFields leftStoredFields = leftReader.storedFields(); + StoredFields rightStoredFields = rightReader.storedFields(); for (int i = 0; i < leftReader.maxDoc(); i++) { - Document leftDoc = leftReader.document(i); - Document rightDoc = rightReader.document(i); + Document leftDoc = leftStoredFields.document(i); + Document rightDoc = rightStoredFields.document(i); // TODO: I think this is bogus because we don't document what the order should be // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff @@ -2465,9 +2469,11 @@ public void assertStoredFieldEquals( public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException { assert leftReader.maxDoc() == rightReader.maxDoc(); + TermVectors leftVectors = leftReader.termVectors(); + TermVectors rightVectors = rightReader.termVectors(); for (int i = 0; i < leftReader.maxDoc(); i++) { - Fields leftFields = leftReader.getTermVectors(i); - Fields rightFields = rightReader.getTermVectors(i); + Fields leftFields = leftVectors.get(i); + Fields rightFields = rightVectors.get(i); // Fields could be null if there are no postings, // but then it must be null for both diff --git a/lucene/test-framework/src/test/org/apache/lucene/tests/analysis/TestMockAnalyzer.java b/lucene/test-framework/src/test/org/apache/lucene/tests/analysis/TestMockAnalyzer.java index 792d58766a75..d421e8548283 100644 --- a/lucene/test-framework/src/test/org/apache/lucene/tests/analysis/TestMockAnalyzer.java +++ b/lucene/test-framework/src/test/org/apache/lucene/tests/analysis/TestMockAnalyzer.java @@ -319,7 +319,7 @@ public int getOffsetGap(String fieldName) { doc.add(new Field("f", "a", ft)); writer.addDocument(doc); final LeafReader reader = getOnlyLeafReader(writer.getReader()); - final Fields fields = reader.getTermVectors(0); + final Fields fields = reader.termVectors().get(0); final Terms terms = fields.terms("f"); final TermsEnum te = terms.iterator(); assertEquals(new BytesRef("a"), te.next());