@Override public AtomicNumericFieldData load(LeafReaderContext context) { final LeafReader reader = context.reader(); final String field = fieldName; switch (numericType) { case HALF_FLOAT: return new SortedNumericHalfFloatFieldData(reader, field); case FLOAT: return new SortedNumericFloatFieldData(reader, field); case DOUBLE: return new SortedNumericDoubleFieldData(reader, field); default: return new SortedNumericLongFieldData(reader, field, numericType); } }
/** * @return the estimate for loading the entire term set into field data, or 0 if unavailable */ public long estimateStringFieldData() { try { LeafReader reader = context.reader(); Terms terms = reader.terms(getFieldName()); Fields fields = reader.fields(); final Terms fieldTerms = fields.terms(getFieldName()); if (fieldTerms instanceof FieldReader) { final Stats stats = ((FieldReader) fieldTerms).getStats(); long totalTermBytes = stats.totalTermBytes; if (logger.isTraceEnabled()) { logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}", totalTermBytes, terms.size(), terms.getSumDocFreq()); } long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq()); return totalBytes; } } catch (Exception e) { logger.warn("Unable to estimate memory overhead", e); } return 0; }
protected TermsEnum filter(Terms terms, TermsEnum iterator, LeafReader reader) throws IOException { if (iterator == null) { return null; } int docCount = terms.getDocCount(); if (docCount == -1) { docCount = reader.maxDoc(); } if (docCount >= minSegmentSize) { final int minFreq = minFrequency > 1.0 ? (int) minFrequency : (int)(docCount * minFrequency); final int maxFreq = maxFrequency > 1.0 ? (int) maxFrequency : (int)(docCount * maxFrequency); if (minFreq > 1 || maxFreq < docCount) { iterator = new FrequencyFilter(iterator, minFreq, maxFreq); } } return iterator; }
/** * Initialize lookup for the provided segment */ PerThreadIDAndVersionLookup(LeafReader reader) throws IOException { TermsEnum termsEnum = null; NumericDocValues versions = null; Fields fields = reader.fields(); if (fields != null) { Terms terms = fields.terms(UidFieldMapper.NAME); if (terms != null) { termsEnum = terms.iterator(); assert termsEnum != null; versions = reader.getNumericDocValues(VersionFieldMapper.NAME); assert versions != null; } } this.versions = versions; this.termsEnum = termsEnum; }
public void testSingleValued() throws IOException { Directory dir = newDirectory(); // we need the default codec to check for singletons IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setCodec(TestUtil.getDefaultCodec())); Document doc = new Document(); for (IndexableField f : NumberFieldMapper.NumberType.HALF_FLOAT.createFields("half_float", 3f, false, true, false)) { doc.add(f); } w.addDocument(doc); final DirectoryReader dirReader = DirectoryReader.open(w); LeafReader reader = getOnlyLeafReader(dirReader); SortedNumericDoubleValues values = new SortedNumericDVIndexFieldData.SortedNumericHalfFloatFieldData( reader, "half_float").getDoubleValues(); assertNotNull(FieldData.unwrapSingleton(values)); values.setDocument(0); assertEquals(1, values.count()); assertEquals(3f, values.valueAt(0), 0f); IOUtils.close(dirReader, w, dir); }
private static FixedBitSet getSeqNosSet(final IndexReader reader, final long highestSeqNo) throws IOException { // _seq_no are stored as doc values for the time being, so this is how we get them // (as opposed to using an IndexSearcher or IndexReader) final FixedBitSet bitSet = new FixedBitSet((int) highestSeqNo + 1); final List<LeafReaderContext> leaves = reader.leaves(); if (leaves.isEmpty()) { return bitSet; } for (int i = 0; i < leaves.size(); i++) { final LeafReader leaf = leaves.get(i).reader(); final NumericDocValues values = leaf.getNumericDocValues(SeqNoFieldMapper.NAME); if (values == null) { continue; } final Bits bits = leaf.getLiveDocs(); for (int docID = 0; docID < leaf.maxDoc(); docID++) { if (bits == null || bits.get(docID)) { final long seqNo = values.get(docID); assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo)); bitSet.set((int) seqNo); } } } return bitSet; }
public void testAddingAClosedReader() throws Exception { LeafReader reader; try (Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) { writer.addDocument(new Document()); try (DirectoryReader dirReader = ElasticsearchDirectoryReader.wrap(writer.getReader(), new ShardId("index1", "_na_", 1))) { reader = dirReader.leaves().get(0).reader(); } } ShardCoreKeyMap map = new ShardCoreKeyMap(); try { map.add(reader); fail("Expected AlreadyClosedException"); } catch (AlreadyClosedException e) { // What we wanted } assertEquals(0, map.size()); }
@Override public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception { LeafReader reader = context.reader(); Terms terms = reader.terms(getFieldNames().indexName()); AtomicGeoPointFieldData data = null; // TODO: Use an actual estimator to estimate before loading. NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA)); if (terms == null) { data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc()); estimator.afterLoad(null, data.ramBytesUsed()); return data; } return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); }
/** * Initialize lookup for the provided segment */ public PerThreadIDAndVersionLookup(LeafReader reader) throws IOException { TermsEnum termsEnum = null; NumericDocValues versions = null; boolean hasPayloads = false; Fields fields = reader.fields(); if (fields != null) { Terms terms = fields.terms(UidFieldMapper.NAME); if (terms != null) { hasPayloads = terms.hasPayloads(); termsEnum = terms.iterator(); assert termsEnum != null; versions = reader.getNumericDocValues(VersionFieldMapper.NAME); } } this.versions = versions; this.termsEnum = termsEnum; this.hasPayloads = hasPayloads; }
private NamedList<Object> buildEntryValue(long count, Term t, List<Entry<LeafReader, Bits>> leaves) throws IOException { NamedList<Object> entry = new NamedList<>(); entry.add("count", count); int i = -1; for (Entry<LeafReader, Bits> e : leaves) { PostingsEnum postings = e.getKey().postings(t, PostingsEnum.PAYLOADS); Bits liveDocs = e.getValue(); while (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { if (!liveDocs.get(postings.docID())) { continue; } i++; NamedList<Object> documentEntry = new NamedList<>(); entry.add("doc" + i, documentEntry); for (int j = 0; j < postings.freq(); j++) { postings.nextPosition(); String extra = postings.getPayload().utf8ToString(); documentEntry.add("position" + j, extra); } } } return entry; }
public LocalEnv(int offset, int limit, int startTermIndex, int adjust, int targetIdx, int nTerms, Predicate<BytesRef> termFilter, int mincount, int[] counts, CharsRefBuilder charsRef, boolean extend, SortedSetDocValues si, SolrIndexSearcher searcher, List<Entry<LeafReader, Bits>> leaves, String fieldName, T ft, NamedList res) { super(offset, limit, targetIdx, mincount, fieldName, ft, res); if (startTermIndex == -1) { // weird case where missing is counted at counts[0]. this.startTermOrd = 0; this.endTermOrd = nTerms - 1; } else if (startTermIndex >= 0) { this.startTermOrd = startTermIndex; this.endTermOrd = startTermIndex + nTerms; } else { throw new IllegalStateException(); } this.startTermIndex = startTermIndex; this.adjust = adjust; this.nTerms = nTerms; this.termFilter = termFilter; this.counts = counts; this.charsRef = charsRef; this.extend = extend; this.si = si; this.searcher = searcher; this.leaves = leaves; }
private Collector getInsanityWrapper(final String field, Collector collector) { SchemaField sf = searcher.getSchema().getFieldOrNull(field); if (sf != null && !sf.hasDocValues() && !sf.multiValued() && sf.getType().getNumberType() != null) { // it's a single-valued numeric field: we must currently create insanity :( // there isn't a GroupedFacetCollector that works on numerics right now... return new FilterCollector(collector) { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { LeafReader insane = Insanity.wrapInsanity(context.reader(), field); return in.getLeafCollector(insane.getContext()); } }; } else { return collector; } }
@Test public void shouldGetEmptyDistinctTrackFieldValuesWhenTermsNull() throws Exception { IndexSearcher mockIndexSearcher = mock(IndexSearcher.class); when(mockTrackManager.acquire()).thenReturn(mockIndexSearcher); IndexReader mockIndexReader = mock(IndexReader.class); when(mockIndexSearcher.getIndexReader()).thenReturn(mockIndexReader); LeafReaderContext mockLeafReaderContext = mock(LeafReaderContext.class); List<LeafReaderContext> mockLeafReaderContexts = Arrays.asList(mockLeafReaderContext); when(mockIndexReader.leaves()).thenReturn(mockLeafReaderContexts); LeafReader mockLeafReader = mock(LeafReader.class); when(mockLeafReaderContext.reader()).thenReturn(mockLeafReader); when(mockLeafReader.terms(anyString())).thenReturn(null); List<String> result = spySearchManager.getDistinctTrackFieldValues(TrackField.ALBUMID); assertThat("Result should be empty", result.isEmpty(), equalTo(true)); }
public static int[] get(LeafReaderContext context, String keyName) throws IOException { LeafReader reader = context.reader(); NumericDocValues ndv = reader.getNumericDocValues(keyName); if (ndv == null) { return null; } CacheValue cacheValue = safeGet(reader, keyName); int[] keyValues = cacheValue.keyValues; if (!cacheValue.newValue) { return keyValues; } for (int i = 0; i < reader.maxDoc(); i++) { keyValues[i] = (int) ndv.get(i); } return keyValues; }
private Object readDocValues(String field, DocValuesType docValType, LeafReader atomicReader) throws IOException{ Object docVals = null; if (docValType == DocValuesType.NUMERIC) { docVals = atomicReader.getNumericDocValues(field); } else if (docValType == DocValuesType.BINARY) { docVals = atomicReader.getBinaryDocValues(field); } else if (docValType == DocValuesType.SORTED) { docVals = atomicReader.getSortedDocValues(field); } else if (docValType == DocValuesType.SORTED_NUMERIC) { docVals = atomicReader.getSortedNumericDocValues(field); } else if (docValType == DocValuesType.SORTED_SET) { docVals = atomicReader.getSortedSetDocValues(field); } return docVals; }
private void showDocId(int docid, int docBase, String field, LeafReader atomicReader, PrintStream out, int segmentid) throws Exception { FieldInfo finfo = atomicReader.getFieldInfos().fieldInfo(field); if (finfo == null || finfo.getDocValuesType() == DocValuesType.NONE) { out.println("docvalue does not exist for field: " + field); return; } DocValuesType docValType = finfo.getDocValuesType(); BytesRef bref = new BytesRef(); showDocId(docid, docBase, readDocValues(field, docValType, atomicReader), docValType, bref, out, segmentid); }
Query createCandidateQuery(IndexReader indexReader) throws IOException { List<BytesRef> extractedTerms = new ArrayList<>(); LeafReader reader = indexReader.leaves().get(0).reader(); Fields fields = reader.fields(); for (String field : fields) { Terms terms = fields.terms(field); if (terms == null) { continue; } BytesRef fieldBr = new BytesRef(field); TermsEnum tenum = terms.iterator(); for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { BytesRefBuilder builder = new BytesRefBuilder(); builder.append(fieldBr); builder.append(FIELD_VALUE_SEPARATOR); builder.append(term); extractedTerms.add(builder.toBytesRef()); } } Query extractionSuccess = new TermInSetQuery(queryTermsField.name(), extractedTerms); // include extractionResultField:failed, because docs with this term have no extractedTermsField // and otherwise we would fail to return these docs. Docs that failed query term extraction // always need to be verified by MemoryIndex: Query extractionFailure = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_FAILED)); return new BooleanQuery.Builder() .add(extractionSuccess, Occur.SHOULD) .add(extractionFailure, Occur.SHOULD) .build(); }
private static PercolateQuery.QueryStore createStore(PercolatorFieldMapper.FieldType fieldType, QueryShardContext context, boolean mapUnmappedFieldsAsString) { return ctx -> { LeafReader leafReader = ctx.reader(); BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.queryBuilderField.name()); if (binaryDocValues == null) { return docId -> null; } Bits bits = leafReader.getDocsWithField(fieldType.queryBuilderField.name()); return docId -> { if (bits.get(docId)) { BytesRef qbSource = binaryDocValues.get(docId); if (qbSource.length > 0) { XContent xContent = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent(); try (XContentParser sourceParser = xContent.createParser(context.getXContentRegistry(), qbSource.bytes, qbSource.offset, qbSource.length)) { return parseQuery(context, mapUnmappedFieldsAsString, sourceParser); } } else { return null; } } else { return null; } }; }; }
public FieldMaskingReader(String field, DirectoryReader in) throws IOException { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { return new FieldFilterLeafReader(reader, Collections.singleton(field), true); } }); this.field = field; }
@Override public void setNextReader(LeafReader reader) throws IOException { DocValuesType type = getDocValuesType(reader, field); if (type == null || type == DocValuesType.NONE) { values = DocValues.emptyNumeric(); docsWithField = new Bits.MatchNoBits(reader.maxDoc()); return ; } docsWithField = DocValues.getDocsWithField(reader, field); switch (type) { case NUMERIC: values = DocValues.getNumeric(reader, field); break; case SORTED_NUMERIC: final SortedNumericDocValues sorted = DocValues.getSortedNumeric(reader, field); values = DocValues.unwrapSingleton(sorted); if (values == null) { values = new NumericDocValues() { @Override public long get(int docID) { sorted.setDocument(docID); assert sorted.count() > 0; if (sorted.count() > 1) { throw new IllegalStateException("failed to collapse " + docID + ", the collapse field must be single valued"); } return sorted.valueAt(0); } }; } break; default: throw new IllegalStateException("unexpected doc values type " + type + "` for field `" + field + "`"); } }
private static DocValuesType getDocValuesType(LeafReader in, String field) { FieldInfo fi = in.getFieldInfos().fieldInfo(field); if (fi != null) { return fi.getDocValuesType(); } return null; }
/** * Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>. * * @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes * separately in the returned {@link CompletionStats} */ public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) { long sizeInBytes = 0; ObjectLongHashMap<String> completionFields = null; if (fieldNamePatterns != null && fieldNamePatterns.length > 0) { completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length); } for (LeafReaderContext atomicReaderContext : indexReader.leaves()) { LeafReader atomicReader = atomicReaderContext.reader(); try { Fields fields = atomicReader.fields(); for (String fieldName : fields) { Terms terms = fields.terms(fieldName); if (terms instanceof CompletionTerms) { // TODO: currently we load up the suggester for reporting its size long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) { completionFields.addTo(fieldName, fstSize); } sizeInBytes += fstSize; } } } catch (IOException ioe) { throw new ElasticsearchException(ioe); } } return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields)); }
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { int hashCode = term.hashCode(); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); }
public static String getParentId(ParentFieldMapper fieldMapper, LeafReader reader, int docId) { try { SortedDocValues docValues = reader.getSortedDocValues(fieldMapper.name()); if (docValues == null) { // hit has no _parent field. return null; } BytesRef parentId = docValues.get(docId); return parentId.length > 0 ? parentId.utf8ToString() : null; } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } }
private PostingsEnum getPostings(int luceneFlags, LeafReader reader) throws IOException { assert identifier.field() != null; assert identifier.bytes() != null; final Fields fields = reader.fields(); PostingsEnum newPostings = null; if (fields != null) { final Terms terms = fields.terms(identifier.field()); if (terms != null) { TermsEnum termsEnum = terms.iterator(); if (termsEnum.seekExact(identifier.bytes())) { newPostings = termsEnum.postings(postings, luceneFlags); final Bits liveDocs = reader.getLiveDocs(); if (liveDocs != null) { newPostings = new FilterPostingsEnum(newPostings) { private int doNext(int d) throws IOException { for (; d != NO_MORE_DOCS; d = super.nextDoc()) { if (liveDocs.get(d)) { return d; } } return NO_MORE_DOCS; } @Override public int nextDoc() throws IOException { return doNext(super.nextDoc()); } @Override public int advance(int target) throws IOException { return doNext(super.advance(target)); } }; } } } } return newPostings; }
@Override public AtomicParentChildFieldData load(LeafReaderContext context) { final LeafReader reader = context.reader(); return new AbstractAtomicParentChildFieldData() { public Set<String> types() { return parentTypes; } @Override public SortedDocValues getOrdinalsValues(String type) { try { return DocValues.getSorted(reader, ParentFieldMapper.joinField(type)); } catch (IOException e) { throw new IllegalStateException("cannot load join doc values field for type [" + type + "]", e); } } @Override public long ramBytesUsed() { // unknown return 0; } @Override public Collection<Accountable> getChildResources() { return Collections.emptyList(); } @Override public void close() throws ElasticsearchException { } }; }
@Override public AtomicGeoPointFieldData load(LeafReaderContext context) { try { LeafReader reader = context.reader(); FieldInfo info = reader.getFieldInfos().fieldInfo(fieldName); if (info != null) { checkCompatible(info); } return new LatLonPointDVAtomicFieldData(DocValues.getSortedNumeric(reader, fieldName)); } catch (IOException e) { throw new IllegalStateException("Cannot load doc values", e); } }
/** * Determine whether the BlockTreeTermsReader.FieldReader can be used * for estimating the field data, adding the estimate to the circuit * breaker if it can, otherwise wrapping the terms in a * RamAccountingTermsEnum to be estimated on a per-term basis. * * @param terms terms to be estimated * @return A possibly wrapped TermsEnum for the terms */ @Override public TermsEnum beforeLoad(Terms terms) throws IOException { LeafReader reader = context.reader(); TermsEnum iterator = terms.iterator(); TermsEnum filteredIterator = filter(terms, iterator, reader); final boolean filtered = iterator != filteredIterator; iterator = filteredIterator; if (filtered) { if (logger.isTraceEnabled()) { logger.trace("Filter exists, can't circuit break normally, using RamAccountingTermsEnum"); } return new RamAccountingTermsEnum(iterator, breaker, this, this.fieldName); } else { estimatedBytes = this.estimateStringFieldData(); // If we weren't able to estimate, wrap in the RamAccountingTermsEnum if (estimatedBytes == 0) { iterator = new RamAccountingTermsEnum(iterator, breaker, this, this.fieldName); } else { breaker.addEstimateBytesAndMaybeBreak(estimatedBytes, fieldName); } return iterator; } }
/** * Tries to extract a segment reader from the given index reader. * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown. */ protected static SegmentReader segmentReader(LeafReader reader) { if (reader instanceof SegmentReader) { return (SegmentReader) reader; } else if (reader instanceof FilterLeafReader) { final FilterLeafReader fReader = (FilterLeafReader) reader; return segmentReader(FilterLeafReader.unwrap(fReader)); } // hard fail - we can't get a SegmentReader throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); }
/** * Returns whether a leaf reader comes from a merge (versus flush or addIndexes). */ protected static boolean isMergedSegment(LeafReader reader) { // We expect leaves to be segment readers final Map<String, String> diagnostics = segmentReader(reader).getSegmentInfo().info.getDiagnostics(); final String source = diagnostics.get(IndexWriter.SOURCE); assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH, IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source; return IndexWriter.SOURCE_MERGE.equals(source); }
private NonClosingReaderWrapper(DirectoryReader in) throws IOException { super(in, new SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { return reader; } }); }
/** * Tries to extract the shard id from a reader if possible, when its not possible, * will return null. */ @Nullable public static ShardId extractShardId(LeafReader reader) { final ElasticsearchLeafReader esReader = ElasticsearchLeafReader.getElasticsearchLeafReader(reader); if (esReader != null) { assert reader.getRefCount() > 0 : "ElasticsearchLeafReader is already closed"; return esReader.shardId(); } return null; }
public static ElasticsearchLeafReader getElasticsearchLeafReader(LeafReader reader) { if (reader instanceof FilterLeafReader) { if (reader instanceof ElasticsearchLeafReader) { return (ElasticsearchLeafReader) reader; } else { // We need to use FilterLeafReader#getDelegate and not FilterLeafReader#unwrap, because // If there are multiple levels of filtered leaf readers then with the unwrap() method it immediately // returns the most inner leaf reader and thus skipping of over any other filtered leaf reader that // may be instance of ElasticsearchLeafReader. This can cause us to miss the shardId. return getElasticsearchLeafReader(((FilterLeafReader) reader).getDelegate()); } } return null; }
private static PerThreadIDAndVersionLookup getLookupState(LeafReader reader) throws IOException { Object key = reader.getCoreCacheKey(); CloseableThreadLocal<PerThreadIDAndVersionLookup> ctl = lookupStates.get(key); if (ctl == null) { // First time we are seeing this reader's core; make a // new CTL: ctl = new CloseableThreadLocal<>(); CloseableThreadLocal<PerThreadIDAndVersionLookup> other = lookupStates.putIfAbsent(key, ctl); if (other == null) { // Our CTL won, we must remove it when the // core is closed: reader.addCoreClosedListener(removeLookupState); } else { // Another thread beat us to it: just use // their CTL: ctl = other; } } PerThreadIDAndVersionLookup lookupState = ctl.get(); if (lookupState == null) { lookupState = new PerThreadIDAndVersionLookup(reader); ctl.set(lookupState); } return lookupState; }
FieldMaskingReader(String field, DirectoryReader in, AtomicInteger closeCalls) throws IOException { super(in, new SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { return new FieldFilterLeafReader(reader, Collections.singleton(field), true); } }); this.closeCalls = closeCalls; this.field = field; }
BrokenWrapper(DirectoryReader in, boolean hideDelegate) throws IOException { super(in, new SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { return reader; } }); this.hideDelegate = hideDelegate; }
public void testDefaultPositionIncrementGap() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "text").endObject().endObject() .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .array("field", new String[] {"a", "b"}) .endObject() .bytes()); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); assertEquals("a", fields[0].stringValue()); assertEquals("b", fields[1].stringValue()); IndexShard shard = indexService.getShard(0); shard.index(new Engine.Index(new Term("_uid", doc.uid() ), doc)); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); TermsEnum terms = leaf.terms("field").iterator(); assertTrue(terms.seekExact(new BytesRef("b"))); PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); assertEquals(0, postings.nextDoc()); assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 1, postings.nextPosition()); } }
public void testPositionIncrementGap() throws IOException { final int positionIncrementGap = randomIntBetween(1, 1000); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") .field("position_increment_gap", positionIncrementGap) .endObject().endObject() .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .array("field", new String[] {"a", "b"}) .endObject() .bytes()); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); assertEquals("a", fields[0].stringValue()); assertEquals("b", fields[1].stringValue()); IndexShard shard = indexService.getShard(0); shard.index(new Engine.Index(new Term("_uid", doc.uid()), doc)); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); TermsEnum terms = leaf.terms("field").iterator(); assertTrue(terms.seekExact(new BytesRef("b"))); PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); assertEquals(0, postings.nextDoc()); assertEquals(positionIncrementGap + 1, postings.nextPosition()); } }
public void testDefaults() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "boolean").endObject().endObject() .endObject().endObject().string(); DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", true) .endObject() .bytes()); try (Directory dir = new RAMDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())))) { w.addDocuments(doc.docs()); try (DirectoryReader reader = DirectoryReader.open(w)) { final LeafReader leaf = reader.leaves().get(0).reader(); // boolean fields are indexed and have doc values by default assertEquals(new BytesRef("T"), leaf.terms("field").iterator().next()); SortedNumericDocValues values = leaf.getSortedNumericDocValues("field"); assertNotNull(values); values.setDocument(0); assertEquals(1, values.count()); assertEquals(1, values.valueAt(0)); } } }
protected FunctionDocSet(LeafReader reader, @Nullable CollectorFieldsVisitor fieldsVisitor, Input<Boolean> condition, List<LuceneCollectorExpression> expressions, DocIdSet docIdSet) { super(docIdSet); this.reader = reader; this.fieldsVisitor = fieldsVisitor; //noinspection SimplifiableConditionalExpression this.fieldsVisitorEnabled = fieldsVisitor == null ? false : fieldsVisitor.required(); this.condition = condition; this.expressions = expressions; }