Java 类org.apache.lucene.index.LeafReaderContext 实例源码
项目:elasticsearch_my
文件:GeoLongitudeValueSource.java
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException {
AtomicGeoPointFieldData leafData = (AtomicGeoPointFieldData) fieldData.load(leaf);
final MultiGeoPointValues values = leafData.getGeoPointValues();
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
values.setDocument(doc);
if (values.count() == 0) {
return 0.0;
} else {
return values.valueAt(0).getLon();
}
}
};
}
项目:elasticsearch_my
文件:DocValuesSliceQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new RandomAccessWeight(this) {
@Override
protected Bits getMatchingDocs(final LeafReaderContext context) throws IOException {
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), getField());
return new Bits() {
@Override
public boolean get(int doc) {
values.setDocument(doc);
for (int i = 0; i < values.count(); i++) {
return contains(BitMixer.mix(values.valueAt(i)));
}
return contains(0);
}
@Override
public int length() {
return context.reader().maxDoc();
}
};
}
};
}
项目:elasticsearch_my
文件:ShardCoreKeyMapTests.java
public void testMissingShard() throws IOException {
try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
w.addDocument(new Document());
try (IndexReader reader = w.getReader()) {
ShardCoreKeyMap map = new ShardCoreKeyMap();
for (LeafReaderContext ctx : reader.leaves()) {
try {
map.add(ctx.reader());
fail();
} catch (IllegalArgumentException expected) {
// ok
}
}
}
}
}
项目:elasticsearch_my
文件:FiltersAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
// no need to provide deleted docs to the filter
final Bits[] bits = new Bits[filters.length];
for (int i = 0; i < filters.length; ++i) {
bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx));
}
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int doc, long bucket) throws IOException {
boolean matched = false;
for (int i = 0; i < bits.length; i++) {
if (bits[i].get(doc)) {
collectBucket(sub, doc, bucketOrd(bucket, i));
matched = true;
}
}
if (showOtherBucket && !matched) {
collectBucket(sub, doc, bucketOrd(bucket, bits.length));
}
}
};
}
项目:Elasticsearch
文件:PercolateContext.java
public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) {
this.docSearcher = docSearcher;
IndexReader indexReader = docSearcher.reader();
LeafReaderContext atomicReaderContext = indexReader.leaves().get(0);
LeafSearchLookup leafLookup = lookup().getLeafSearchLookup(atomicReaderContext);
leafLookup.setDocument(0);
leafLookup.source().setSource(parsedDocument.source());
Map<String, SearchHitField> fields = new HashMap<>();
for (IndexableField field : parsedDocument.rootDoc().getFields()) {
fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList()));
}
hitContext().reset(
new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields),
atomicReaderContext, 0, docSearcher.searcher()
);
}
项目:Elasticsearch
文件:GlobalOrdinalsStringTermsAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
globalOrds = valuesSource.globalOrdinalsValues(ctx);
if (acceptedGlobalOrdinals == null && includeExclude != null) {
acceptedGlobalOrdinals = includeExclude.acceptedGlobalOrdinals(globalOrds, valuesSource);
}
if (acceptedGlobalOrdinals != null) {
globalOrds = new FilteredOrdinals(globalOrds, acceptedGlobalOrdinals);
}
return newCollector(globalOrds, sub);
}
项目:Elasticsearch
文件:DiversifiedBytesHashSamplerAggregator.java
@Override
protected NumericDocValues getKeys(LeafReaderContext context) {
try {
values = valuesSource.bytesValues(context);
} catch (IOException e) {
throw new ElasticsearchException("Error reading values", e);
}
return new NumericDocValues() {
@Override
public long get(int doc) {
values.setDocument(doc);
final int valuesCount = values.count();
if (valuesCount > 1) {
throw new IllegalArgumentException("Sample diversifying key must be a single valued-field");
}
if (valuesCount == 1) {
final BytesRef bytes = values.valueAt(0);
return bytes.hashCode();
}
return 0;
}
};
}
项目:elasticsearch_my
文件:Lucene.java
/**
* Check whether there is one or more documents matching the provided query.
*/
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
final Weight weight = searcher.createNormalizedWeight(query, false);
// the scorer API should be more efficient at stopping after the first
// match than the bulk scorer API
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
if (scorer == null) {
continue;
}
final Bits liveDocs = context.reader().getLiveDocs();
final DocIdSetIterator iterator = scorer.iterator();
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(doc)) {
return true;
}
}
}
return false;
}
项目:Elasticsearch
文件:IncludeNestedDocsQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final Scorer parentScorer = parentWeight.scorer(context);
// no matches
if (parentScorer == null) {
return null;
}
BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
int firstParentDoc = parentScorer.iterator().nextDoc();
if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
}
return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
项目:Elasticsearch
文件:LeafSearchLookup.java
public LeafSearchLookup(LeafReaderContext ctx, LeafDocLookup docMap, SourceLookup sourceLookup,
LeafFieldsLookup fieldsLookup, LeafIndexLookup indexLookup, Map<String, Object> topLevelMap) {
this.ctx = ctx;
this.docMap = docMap;
this.sourceLookup = sourceLookup;
this.fieldsLookup = fieldsLookup;
this.indexLookup = indexLookup;
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
builder.putAll(topLevelMap);
builder.put("doc", docMap);
builder.put("_doc", docMap);
builder.put("_source", sourceLookup);
builder.put("_fields", fieldsLookup);
builder.put("_index", indexLookup);
asMap = builder.build();
}
项目:Elasticsearch
文件:GeoPointArrayIndexFieldData.java
@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
LeafReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
AtomicGeoPointFieldData data = null;
// TODO: Use an actual estimator to estimate before loading.
NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
if (terms == null) {
data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
estimator.afterLoad(null, data.ramBytesUsed());
return data;
}
return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ?
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
项目:elasticsearch_my
文件:LeafSearchLookup.java
public LeafSearchLookup(LeafReaderContext ctx, LeafDocLookup docMap, SourceLookup sourceLookup,
LeafFieldsLookup fieldsLookup, LeafIndexLookup indexLookup, Map<String, Object> topLevelMap) {
this.ctx = ctx;
this.docMap = docMap;
this.sourceLookup = sourceLookup;
this.fieldsLookup = fieldsLookup;
this.indexLookup = indexLookup;
Map<String, Object> asMap = new HashMap<>(topLevelMap.size() + 5);
asMap.putAll(topLevelMap);
asMap.put("doc", docMap);
asMap.put("_doc", docMap);
asMap.put("_source", sourceLookup);
asMap.put("_fields", fieldsLookup);
asMap.put("_index", indexLookup);
this.asMap = unmodifiableMap(asMap);
}
项目:elasticsearch_my
文件:DecayFunctionBuilder.java
@Override
protected String getDistanceString(LeafReaderContext ctx, int docId) {
StringBuilder values = new StringBuilder(mode.name());
values.append("[");
final SortedNumericDoubleValues doubleValues = fieldData.load(ctx).getDoubleValues();
doubleValues.setDocument(docId);
final int num = doubleValues.count();
if (num > 0) {
for (int i = 0; i < num; i++) {
double value = doubleValues.valueAt(i);
values.append("Math.max(Math.abs(");
values.append(value).append("(=doc value) - ");
values.append(origin).append("(=origin))) - ");
values.append(offset).append("(=offset), 0)");
if (i != num - 1) {
values.append(", ");
}
}
} else {
values.append("0.0");
}
values.append("]");
return values.toString();
}
项目:elasticsearch_my
文件:GlobalOrdinalsSignificantTermsAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
return new LeafBucketCollectorBase(super.getLeafCollector(ctx, sub), null) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
numCollectedDocs++;
globalOrds.setDocument(doc);
final int numOrds = globalOrds.cardinality();
for (int i = 0; i < numOrds; i++) {
final long globalOrd = globalOrds.ordAt(i);
long bucketOrd = bucketOrds.add(globalOrd);
if (bucketOrd < 0) {
bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd);
} else {
collectBucket(sub, doc, bucketOrd);
}
}
}
};
}
项目:elasticsearch_my
文件:DecayFunctionBuilder.java
@Override
protected String getDistanceString(LeafReaderContext ctx, int docId) {
StringBuilder values = new StringBuilder(mode.name());
values.append(" of: [");
final MultiGeoPointValues geoPointValues = fieldData.load(ctx).getGeoPointValues();
geoPointValues.setDocument(docId);
final int num = geoPointValues.count();
if (num > 0) {
for (int i = 0; i < num; i++) {
GeoPoint value = geoPointValues.valueAt(i);
values.append("Math.max(arcDistance(");
values.append(value).append("(=doc value),");
values.append(origin).append("(=origin)) - ").append(offset).append("(=offset), 0)");
if (i != num - 1) {
values.append(", ");
}
}
} else {
values.append("0.0");
}
values.append("]");
return values.toString();
}
项目:Elasticsearch
文件:CrateDocCollector.java
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
// trigger keep-alive here as well
// in case we have a long running query without actual matches
for (LuceneCollectorExpression<?> expression : expressions) {
expression.setNextReader(context);
}
}
项目:elasticsearch_my
文件:DateObjectValueSource.java
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException {
AtomicNumericFieldData leafData = (AtomicNumericFieldData) fieldData.load(leaf);
MutableDateTime joda = new MutableDateTime(0, DateTimeZone.UTC);
NumericDoubleValues docValues = multiValueMode.select(leafData.getDoubleValues(), 0d);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int docId) {
long millis = (long)docValues.get(docId);
joda.setMillis(millis);
return function.applyAsInt(joda);
}
};
}
项目:elasticsearch_my
文件:BestDocsDeferringCollector.java
PerParentBucketSamples(long parentBucket, Scorer scorer, LeafReaderContext readerContext) {
try {
this.parentBucket = parentBucket;
tdc = createTopDocsCollector(shardSize);
currentLeafCollector = tdc.getLeafCollector(readerContext);
setScorer(scorer);
} catch (IOException e) {
throw new ElasticsearchException("IO error creating collector", e);
}
}
项目:elasticsearch_my
文件:HistogramAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
values.setDocument(doc);
final int valuesCount = values.count();
double previousKey = Double.NEGATIVE_INFINITY;
for (int i = 0; i < valuesCount; ++i) {
double value = values.valueAt(i);
double key = Math.floor((value - offset) / interval);
assert key >= previousKey;
if (key == previousKey) {
continue;
}
long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key));
if (bucketOrd < 0) { // already seen
bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd);
} else {
collectBucket(sub, doc, bucketOrd);
}
previousKey = key;
}
}
};
}
项目:Elasticsearch
文件:GlobalAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0 : "global aggregator can only be a top level aggregator";
collectBucket(sub, doc, bucket);
}
};
}
项目:Elasticsearch
文件:BitsetFilterCache.java
@Override
public BitSet getBitSet(LeafReaderContext context) throws IOException {
try {
return getAndLoadIfNotPresent(query, context);
} catch (ExecutionException e) {
throw ExceptionsHelper.convertToElastic(e);
}
}
项目:Elasticsearch
文件:XUsageTrackingQueryCachingPolicy.java
@Override
public boolean shouldCache(Query query, LeafReaderContext context) throws IOException {
if (query instanceof MatchAllDocsQuery
// MatchNoDocsQuery currently rewrites to a BooleanQuery,
// but who knows, it might get its own Weight one day
|| query instanceof MatchNoDocsQuery) {
return false;
}
if (query instanceof BooleanQuery) {
BooleanQuery bq = (BooleanQuery) query;
if (bq.clauses().isEmpty()) {
return false;
}
}
if (query instanceof DisjunctionMaxQuery) {
DisjunctionMaxQuery dmq = (DisjunctionMaxQuery) query;
if (dmq.getDisjuncts().isEmpty()) {
return false;
}
}
if (segmentPolicy.shouldCache(query, context) == false) {
return false;
}
final int frequency = frequency(query);
final int minFrequency = minFrequencyToCache(query);
return frequency >= minFrequency;
}
项目:elasticsearch_my
文件:AbstractTDigestPercentilesAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
states = bigArrays.grow(states, bucket + 1);
TDigestState state = states.get(bucket);
if (state == null) {
state = new TDigestState(compression);
states.set(bucket, state);
}
values.setDocument(doc);
final int valueCount = values.count();
for (int i = 0; i < valueCount; i++) {
state.add(values.valueAt(i));
}
}
};
}
项目:elasticsearch_my
文件:DocumentMapper.java
/**
* Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId.
*/
public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException {
ObjectMapper nestedObjectMapper = null;
for (ObjectMapper objectMapper : objectMappers().values()) {
if (!objectMapper.nested().isNested()) {
continue;
}
Query filter = objectMapper.nestedTypeFilter();
if (filter == null) {
continue;
}
// We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and
// therefor is guaranteed to be a live doc.
final Weight nestedWeight = filter.createWeight(sc.searcher(), false);
Scorer scorer = nestedWeight.scorer(context);
if (scorer == null) {
continue;
}
if (scorer.iterator().advance(nestedDocId) == nestedDocId) {
if (nestedObjectMapper == null) {
nestedObjectMapper = objectMapper;
} else {
if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) {
nestedObjectMapper = objectMapper;
}
}
}
}
return nestedObjectMapper;
}
项目:elasticsearch_my
文件:StatsAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
if (bucket >= counts.size()) {
final long from = counts.size();
final long overSize = BigArrays.overSize(bucket + 1);
counts = bigArrays.resize(counts, overSize);
sums = bigArrays.resize(sums, overSize);
mins = bigArrays.resize(mins, overSize);
maxes = bigArrays.resize(maxes, overSize);
mins.fill(from, overSize, Double.POSITIVE_INFINITY);
maxes.fill(from, overSize, Double.NEGATIVE_INFINITY);
}
values.setDocument(doc);
final int valuesCount = values.count();
counts.increment(bucket, valuesCount);
double sum = 0;
double min = mins.get(bucket);
double max = maxes.get(bucket);
for (int i = 0; i < valuesCount; i++) {
double value = values.valueAt(i);
sum += value;
min = Math.min(min, value);
max = Math.max(max, value);
}
sums.increment(bucket, sum);
mins.set(bucket, min);
maxes.set(bucket, max);
}
};
}
项目:elasticsearch_my
文件:AbstractLatLonPointDVIndexFieldData.java
@Override
public AtomicGeoPointFieldData load(LeafReaderContext context) {
try {
LeafReader reader = context.reader();
FieldInfo info = reader.getFieldInfos().fieldInfo(fieldName);
if (info != null) {
checkCompatible(info);
}
return new LatLonPointDVAtomicFieldData(DocValues.getSortedNumeric(reader, fieldName));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
}
项目:elasticsearch_my
文件:SourceScoreOrderFragmentsBuilder.java
@Override
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
// we know its low level reader, and matching docId, since that's how we call the highlighter with
SourceLookup sourceLookup = searchContext.lookup().source();
sourceLookup.setSegmentAndDocument((LeafReaderContext) reader.getContext(), docId);
List<Object> values = sourceLookup.extractRawValues(mapper.fieldType().name());
Field[] fields = new Field[values.size()];
for (int i = 0; i < values.size(); i++) {
fields[i] = new Field(mapper.fieldType().name(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
}
return fields;
}
项目:elasticsearch_my
文件:ValuesSource.java
@Override
public Bits docsWithValue(LeafReaderContext context) throws IOException {
final SortedBinaryDocValues bytes = bytesValues(context);
if (org.elasticsearch.index.fielddata.FieldData.unwrapSingleton(bytes) != null) {
return org.elasticsearch.index.fielddata.FieldData.unwrapSingletonBits(bytes);
} else {
return org.elasticsearch.index.fielddata.FieldData.docsWithValue(bytes, context.reader().maxDoc());
}
}
项目:Elasticsearch
文件:StringTermsAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
final BytesRefBuilder previous = new BytesRefBuilder();
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
values.setDocument(doc);
final int valuesCount = values.count();
// SortedBinaryDocValues don't guarantee uniqueness so we need to take care of dups
previous.clear();
for (int i = 0; i < valuesCount; ++i) {
final BytesRef bytes = values.valueAt(i);
if (includeExclude != null && !includeExclude.accept(bytes)) {
continue;
}
if (previous.get().equals(bytes)) {
continue;
}
long bucketOrdinal = bucketOrds.add(bytes);
if (bucketOrdinal < 0) { // already seen
bucketOrdinal = - 1 - bucketOrdinal;
collectExistingBucket(sub, doc, bucketOrdinal);
} else {
collectBucket(sub, doc, bucketOrdinal);
}
previous.copyBytes(bytes);
}
}
};
}
项目:Elasticsearch
文件:ProfileWeight.java
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
// We use the default bulk scorer instead of the specialized one. The reason
// is that Lucene's BulkScorers do everything at once: finding matches,
// scoring them and calling the collector, so they make it impossible to
// see where time is spent, which is the purpose of query profiling.
// The default bulk scorer will pull a scorer and iterate over matches,
// this might be a significantly different execution path for some queries
// like disjunctions, but in general this is what is done anyway
return super.bulkScorer(context);
}
项目:elasticsearch_my
文件:ValuesSource.java
@Override
public Bits docsWithValue(LeafReaderContext context) {
final RandomAccessOrds ordinals = ordinalsValues(context);
if (DocValues.unwrapSingleton(ordinals) != null) {
return DocValues.docsWithValue(DocValues.unwrapSingleton(ordinals), context.reader().maxDoc());
} else {
return DocValues.docsWithValue(ordinals, context.reader().maxDoc());
}
}
项目:elasticsearch_my
文件:SamplerAggregator.java
@Override
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
if (bdd == null) {
throw new AggregationExecutionException("Sampler aggregation must be used with child aggregations.");
}
return bdd.getLeafCollector(ctx);
}
项目:elasticsearch-linear-regression
文件:MultiValuesSource.java
public NumericDoubleValues getField(final int ordinal, LeafReaderContext ctx)
throws IOException {
if (ordinal > names.length) {
throw new IndexOutOfBoundsException(
"ValuesSource array index " + ordinal + " out of bounds");
}
return multiValueMode.select(values[ordinal].doubleValues(ctx), Double.NEGATIVE_INFINITY);
}
项目:elasticsearch-hyperloglog
文件:HyperUniqueSumAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
hyperLogLogPlusPlusObjectArray = bigArrays.grow(hyperLogLogPlusPlusObjectArray, bucket + 1);
values.setDocument(doc);
final int valuesCount = values.count();
HyperLogLogPlus hll;
for (int i = 0; i < valuesCount; i++) {
hll = deserializeHyperLogLogPlus(values.valueAt(i));
HyperLogLogPlus current = hyperLogLogPlusPlusObjectArray.get(bucket);
if (current == null) {
hyperLogLogPlusPlusObjectArray.set(bucket, hll);
} else {
try {
hyperLogLogPlusPlusObjectArray.set(bucket, (HyperLogLogPlus) hll.merge(current));
} catch (CardinalityMergeException cme) {
throw new ElasticsearchGenerationException("Failed to merge HyperLogLogPlus structures ", cme);
}
}
}
}
};
}
项目:Elasticsearch
文件:BestDocsDeferringCollector.java
public PerParentBucketSamples(long parentBucket, Scorer scorer, LeafReaderContext readerContext) {
try {
this.parentBucket = parentBucket;
tdc = createTopDocsCollector(shardSize);
currentLeafCollector = tdc.getLeafCollector(readerContext);
setScorer(scorer);
} catch (IOException e) {
throw new ElasticsearchException("IO error creating collector", e);
}
}
项目:Elasticsearch
文件:SignificantStringTermsAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
return new LeafBucketCollectorBase(super.getLeafCollector(ctx, sub), null) {
@Override
public void collect(int doc, long bucket) throws IOException {
super.collect(doc, bucket);
numCollectedDocs++;
}
};
}
项目:Elasticsearch
文件:BitsetFilterCache.java
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
final Object coreCacheReader = context.reader().getCoreCacheKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null // can't require it because of the percolator
&& index.getName().equals(shardId.getIndex()) == false) {
// insanity
throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
+ "] with cache of index [" + index.getName() + "]");
}
Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
@Override
public Cache<Query, Value> call() throws Exception {
context.reader().addCoreClosedListener(BitsetFilterCache.this);
return CacheBuilder.newBuilder().build();
}
});
return filterToFbs.get(query,new Callable<Value>() {
@Override
public Value call() throws Exception {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Scorer s = weight.scorer(context);
final BitSet bitSet;
if (s == null) {
bitSet = null;
} else {
bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
}
Value value = new Value(bitSet, shardId);
listener.onCache(shardId, value.bitset);
return value;
}
}).bitset;
}
项目:Elasticsearch
文件:FetchPhase.java
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException {
if (context.mapperService().hasNested()) {
BitSet bits = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()).getBitSet(subReaderContext);
if (!bits.get(subDocId)) {
return bits.nextSetBit(subDocId);
}
}
return -1;
}
项目:elasticsearch_my
文件:AbstractGeoPointDVIndexFieldData.java
@Override
public AtomicGeoPointFieldData load(LeafReaderContext context) {
try {
return new GeoPointDVAtomicFieldData(DocValues.getSortedNumeric(context.reader(), fieldName));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
}
项目:elasticsearch_my
文件:DateHistogramAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final SortedNumericDocValues values = valuesSource.longValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
values.setDocument(doc);
final int valuesCount = values.count();
long previousRounded = Long.MIN_VALUE;
for (int i = 0; i < valuesCount; ++i) {
long value = values.valueAt(i);
long rounded = rounding.round(value - offset) + offset;
assert rounded >= previousRounded;
if (rounded == previousRounded) {
continue;
}
long bucketOrd = bucketOrds.add(rounded);
if (bucketOrd < 0) { // already seen
bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd);
} else {
collectBucket(sub, doc, bucketOrd);
}
previousRounded = rounded;
}
}
};
}