Java 类org.apache.lucene.index.AtomicReaderContext 实例源码
项目:lams
文件:PhraseQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:lams
文件:TermQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:lams
文件:IndexSearcher.java
/**
* Just like {@link #search(Weight, int, Sort, boolean, boolean)}, but you choose
* whether or not the fields in the returned {@link FieldDoc} instances should
* be set by specifying fillFields.
*/
protected TopFieldDocs search(List<AtomicReaderContext> leaves, Weight weight, FieldDoc after, int nDocs,
Sort sort, boolean fillFields, boolean doDocScores, boolean doMaxScore) throws IOException {
// single thread
int limit = reader.maxDoc();
if (limit == 0) {
limit = 1;
}
nDocs = Math.min(nDocs, limit);
TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, after,
fillFields, doDocScores,
doMaxScore, !weight.scoresDocsOutOfOrder());
search(leaves, weight, collector);
return (TopFieldDocs) collector.topDocs();
}
项目:lams
文件:NearSpansOrdered.java
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts, boolean collectPayloads)
throws IOException {
if (spanNearQuery.getClauses().length < 2) {
throw new IllegalArgumentException("Less than 2 clauses: "
+ spanNearQuery);
}
this.collectPayloads = collectPayloads;
allowedSlop = spanNearQuery.getSlop();
SpanQuery[] clauses = spanNearQuery.getClauses();
subSpans = new Spans[clauses.length];
matchPayload = new LinkedList<>();
subSpansByDoc = new Spans[clauses.length];
for (int i = 0; i < clauses.length; i++) {
subSpans[i] = clauses[i].getSpans(context, acceptDocs, termContexts);
subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
}
query = spanNearQuery; // kept for toString() only.
}
项目:lams
文件:SpanWeight.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
SpanScorer scorer = (SpanScorer) scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:lams
文件:FilteredQuery.java
@Override
public Scorer filteredScorer(AtomicReaderContext context,
Weight weight, DocIdSet docIdSet) throws IOException {
final DocIdSetIterator filterIter = docIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
return null;
}
// we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
final Scorer scorer = weight.scorer(context, null);
if (scorer == null) {
return null;
}
if (scorerFirst) {
return new LeapFrogScorer(weight, scorer, filterIter, scorer);
} else {
return new LeapFrogScorer(weight, filterIter, scorer, scorer);
}
}
项目:lams
文件:CachingCollector.java
/**
* Creates a {@link CachingCollector} which does not wrap another collector.
* The cached documents and scores can later be {@link #replay(Collector)
* replayed}.
*
* @param acceptDocsOutOfOrder
* whether documents are allowed to be collected out-of-order
*/
public static CachingCollector create(final boolean acceptDocsOutOfOrder, boolean cacheScores, double maxRAMMB) {
Collector other = new Collector() {
@Override
public boolean acceptsDocsOutOfOrder() {
return acceptDocsOutOfOrder;
}
@Override
public void setScorer(Scorer scorer) {}
@Override
public void collect(int doc) {}
@Override
public void setNextReader(AtomicReaderContext context) {}
};
return create(other, cacheScores, maxRAMMB);
}
项目:lams
文件:MultiPhraseQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:lams
文件:MultiPhraseQuery.java
public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms, Map<Term,TermContext> termContexts, TermsEnum termsEnum) throws IOException {
List<DocsAndPositionsEnum> docsEnums = new LinkedList<>();
for (int i = 0; i < terms.length; i++) {
final Term term = terms[i];
TermState termState = termContexts.get(term).get(context.ord);
if (termState == null) {
// Term doesn't exist in reader
continue;
}
termsEnum.seekExact(term.bytes(), termState);
DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
cost += postings.cost();
docsEnums.add(postings);
}
_queue = new DocsQueue(docsEnums);
_posList = new IntQueue();
}
项目:lams
文件:PayloadSpanUtil.java
private void getPayloads(Collection<byte []> payloads, SpanQuery query)
throws IOException {
Map<Term,TermContext> termContexts = new HashMap<>();
TreeSet<Term> terms = new TreeSet<>();
query.extractTerms(terms);
for (Term term : terms) {
termContexts.put(term, TermContext.build(context, term));
}
for (AtomicReaderContext atomicReaderContext : context.leaves()) {
final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader().getLiveDocs(), termContexts);
while (spans.next() == true) {
if (spans.isPayloadAvailable()) {
Collection<byte[]> payload = spans.getPayload();
for (byte [] bytes : payload) {
payloads.add(bytes);
}
}
}
}
}
项目:lams
文件:CachingWrapperFilter.java
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
final AtomicReader reader = context.reader();
final Object key = reader.getCoreCacheKey();
DocIdSet docIdSet = cache.get(key);
if (docIdSet != null) {
hitCount++;
} else {
missCount++;
docIdSet = docIdSetToCache(filter.getDocIdSet(context, null), reader);
assert docIdSet.isCacheable();
cache.put(key, docIdSet);
}
return docIdSet == EMPTY ? null : BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
项目:lams
文件:ConstantScoreQuery.java
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
final DocIdSetIterator disi;
if (filter != null) {
assert query == null;
final DocIdSet dis = filter.getDocIdSet(context, acceptDocs);
if (dis == null) {
return null;
}
disi = dis.iterator();
} else {
assert query != null && innerWeight != null;
disi = innerWeight.scorer(context, acceptDocs);
}
if (disi == null) {
return null;
}
return new ConstantScorer(disi, this, queryWeight);
}
项目:lams
文件:ConstantScoreQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
final Scorer cs = scorer(context, context.reader().getLiveDocs());
final boolean exists = (cs != null && cs.advance(doc) == doc);
final ComplexExplanation result = new ComplexExplanation();
if (exists) {
result.setDescription(ConstantScoreQuery.this.toString() + ", product of:");
result.setValue(queryWeight);
result.setMatch(Boolean.TRUE);
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(queryNorm, "queryNorm"));
} else {
result.setDescription(ConstantScoreQuery.this.toString() + " doesn't match id " + doc);
result.setValue(0);
result.setMatch(Boolean.FALSE);
}
return result;
}
项目:lams
文件:ConstantScoreQuery.java
private Collector wrapCollector(final Collector collector) {
return new Collector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
collector.setScorer(new ConstantScorer(scorer, weight, theScore));
}
@Override
public void collect(int doc) throws IOException {
collector.collect(doc);
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
};
}
项目:lams
文件:DisjunctionMaxQuery.java
/** Create the scorer used to score our associated DisjunctionMaxQuery */
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
List<Scorer> scorers = new ArrayList<>();
for (Weight w : weights) {
// we will advance() subscorers
Scorer subScorer = w.scorer(context, acceptDocs);
if (subScorer != null) {
scorers.add(subScorer);
}
}
if (scorers.isEmpty()) {
// no sub-scorers had any documents
return null;
} else if (scorers.size() == 1) {
// only one sub-scorer in this segment
return scorers.get(0);
} else {
return new DisjunctionMaxScorer(this, tieBreakerMultiplier, scorers.toArray(new Scorer[scorers.size()]));
}
}
项目:lams
文件:DisjunctionMaxQuery.java
/** Explain the score we computed for doc */
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
if (disjuncts.size() == 1) return weights.get(0).explain(context,doc);
ComplexExplanation result = new ComplexExplanation();
float max = 0.0f, sum = 0.0f;
result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:");
for (Weight wt : weights) {
Explanation e = wt.explain(context, doc);
if (e.isMatch()) {
result.setMatch(Boolean.TRUE);
result.addDetail(e);
sum += e.getValue();
max = Math.max(max, e.getValue());
}
}
result.setValue(max + (sum - max) * tieBreakerMultiplier);
return result;
}
项目:fangorn
文件:IndexTestCase.java
protected DocsAndPositionsEnum getPosEnum(IndexReader r, int docid, Term t)
throws IOException {
List<AtomicReaderContext> leaves = r.getContext().leaves();
for (AtomicReaderContext context : leaves) {
AtomicReader reader = context.reader();
DocsAndPositionsEnum termPositions = reader.termPositionsEnum(t);
int doc;
while ((doc = termPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS
&& doc != docid) {
}
if (doc != DocsEnum.NO_MORE_DOCS) {
return termPositions;
}
}
assertFalse("Expected positions enum for doc " + docid, true);
return null; // will never come here
}
项目:linden
文件:ShardWriter.java
/**
* Process an intermediate form by carrying out, on the Lucene instance of
* the shard, the deletes and the inserts (a ram index) in the form.
* @param form the intermediate form containing deletes and a ram index
* @throws IOException
*/
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
if (facetsConfig != null) {
DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
// merge the taxonomies
taxoWriter.addTaxonomy(form.getTaxoDirectory(), map);
int ordinalMap[] = map.getMap();
DirectoryReader reader = DirectoryReader.open(form.getDirectory());
try {
List<AtomicReaderContext> leaves = reader.leaves();
int numReaders = leaves.size();
AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig);
}
writer.addIndexes(new MultiReader(wrappedLeaves));
} finally {
reader.close();
}
} else {
writer.addIndexes(new Directory[] { form.getDirectory() });
}
numForms++;
}
项目:linden
文件:LindenScoreModelStrategy.java
public void preProcess(AtomicReaderContext context, LindenSchema schema, LindenScoreModel scoreModel) {
this.context = context;
fieldSchemaMap = new HashMap<>();
for (LindenFieldSchema fieldSchema : schema.getFields()) {
fieldSchemaMap.put(fieldSchema.getName(), fieldSchema);
}
// add id field
if (!fieldSchemaMap.containsKey(schema.getId())) {
LindenFieldSchema idFieldSchema = new LindenFieldSchema();
idFieldSchema.setName(schema.getId());
idFieldSchema.setType(LindenType.STRING);
idFieldSchema.setIndexed(true);
idFieldSchema.setOmitNorms(true);
idFieldSchema.setOmitFreqs(true);
idFieldSchema.setStored(true);
idFieldSchema.setTokenized(false);
fieldSchemaMap.put(schema.getId(), idFieldSchema);
}
this.scoreModel = scoreModel;
}
项目:DoSeR-Disambiguation
文件:TermQuery.java
@Override
public Explanation explain(final AtomicReaderContext context,
final int doc) throws IOException {
final Scorer scorer = scorer(context, context.reader()
.getLiveDocs());
if (scorer != null) {
final int newDoc = scorer.advance(doc);
if (newDoc == doc) {
final float freq = scorer.freq();
final SimScorer docScorer = similarity.simScorer(stats,
context);
final ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight(" + getQuery() + " in " + doc
+ ") [" + similarity.getClass().getSimpleName()
+ "], result of:");
final Explanation scoreExplanation = docScorer.explain(doc,
new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:DoSeR-Disambiguation
文件:TermQuery.java
/**
* Returns a {@link TermsEnum} positioned at this weights Term or null
* if the term does not exist in the given context
*/
private TermsEnum getTermsEnum(final AtomicReaderContext context)
throws IOException {
final TermState state = termStates.get(context.ord);
if (state == null) { // term is not present in that reader
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term="
+ term;
return null;
}
// System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
// (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) :
// "null"));
final TermsEnum termsEnum = context.reader().terms(term.field())
.iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
项目:DoSeR-Disambiguation
文件:TermQuery.java
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs)
throws IOException {
assert termStates.topReaderContext == ReaderUtil
.getTopLevelContext(context) : "The top-reader used to create Weight ("
+ termStates.topReaderContext
+ ") is not the same as the current reader's top-reader ("
+ ReaderUtil.getTopLevelContext(context);
final TermsEnum termsEnum = getTermsEnum(context);
if (termsEnum == null) {
return null;
}
final DocsEnum docs = termsEnum.docs(acceptDocs, null);
assert docs != null;
return new TermScorer(this, docs, similarity.simScorer(stats,
context));
}
项目:DoSeR-Disambiguation
文件:LearnToRankTermQuery.java
@Override
public Explanation explain(final AtomicReaderContext context,
final int doc) throws IOException {
final Scorer scorer = scorer(context, context.reader()
.getLiveDocs());
if (scorer != null) {
final int newDoc = scorer.advance(doc);
if (newDoc == doc) {
final float freq = scorer.freq();
final SimScorer docScorer = sim.simScorer(stats, context);
final ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight(" + getQuery() + " in " + doc
+ ") [" + sim.getClass().getSimpleName()
+ "], result of:");
final Explanation scoreExplanation = docScorer.explain(doc,
new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:DoSeR-Disambiguation
文件:LearnToRankTermQuery.java
/**
* Returns a {@link TermsEnum} positioned at this weights Term or null
* if the term does not exist in the given context
*/
private TermsEnum getTermsEnum(final AtomicReaderContext context)
throws IOException {
final TermState state = termStates.get(context.ord);
if (state == null) { // term is not present in that reader
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term="
+ term;
return null;
}
// System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
// (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) :
// "null"));
final TermsEnum termsEnum = context.reader().terms(term.field())
.iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
项目:DoSeR-Disambiguation
文件:LearnToRankTermQuery.java
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs)
throws IOException {
assert termStates.topReaderContext == ReaderUtil
.getTopLevelContext(context) : "The top-reader used to create Weight ("
+ termStates.topReaderContext
+ ") is not the same as the current reader's top-reader ("
+ ReaderUtil.getTopLevelContext(context);
final TermsEnum termsEnum = getTermsEnum(context);
if (termsEnum == null) {
return null;
}
final DocsEnum docs = termsEnum.docs(acceptDocs, null);
assert docs != null;
return new LearnToRankTermScorer(this, docs, sim.simScorer(stats,
context));
}
项目:search
文件:WeightedSpanTermExtractor.java
protected AtomicReaderContext getLeafContext() throws IOException {
if (internalReader == null) {
if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
assert !cachedTokenStream;
tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
cachedTokenStream = true;
}
final MemoryIndex indexer = new MemoryIndex(true);
indexer.addField(DelegatingAtomicReader.FIELD_NAME, tokenStream);
tokenStream.reset();
final IndexSearcher searcher = indexer.createSearcher();
// MEM index has only atomic ctx
internalReader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
}
return internalReader.getContext();
}
项目:search
文件:CustomScoreQuery.java
private Explanation doExplain(AtomicReaderContext info, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(info, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
// match
Explanation[] valSrcExpls = new Explanation[valSrcWeights.length];
for(int i = 0; i < valSrcWeights.length; i++) {
valSrcExpls[i] = valSrcWeights[i].explain(info, doc);
}
Explanation customExp = CustomScoreQuery.this.getCustomScoreProvider(info).customExplain(doc,subQueryExpl,valSrcExpls);
float sc = queryWeight * customExp.getValue();
Explanation res = new ComplexExplanation(
true, sc, CustomScoreQuery.this.toString() + ", product of:");
res.addDetail(customExp);
res.addDetail(new Explanation(queryWeight, "queryWeight"));
return res;
}
项目:search
文件:SolrConstantScoreQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
ConstantScorer cs = new ConstantScorer(context, this, queryWeight, context.reader().getLiveDocs());
boolean exists = cs.docIdSetIterator.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation();
if (exists) {
result.setDescription("ConstantScoreQuery(" + filter
+ "), product of:");
result.setValue(queryWeight);
result.setMatch(Boolean.TRUE);
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(queryNorm,"queryNorm"));
} else {
result.setDescription("ConstantScoreQuery(" + filter
+ ") doesn't match id " + doc);
result.setValue(0);
result.setMatch(Boolean.FALSE);
}
return result;
}
项目:search
文件:TermQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:solrgraph
文件:GraphQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
// TODO: figure out what this actually is!
final Scorer cs = scorer(context, context.reader().getLiveDocs());
final boolean exists = (cs != null && cs.advance(doc) == doc);
final ComplexExplanation result = new ComplexExplanation();
if (exists) {
result.setDescription(GraphQuery.this.toString() + ", product of:");
result.setValue(queryWeight);
result.setMatch(Boolean.TRUE);
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(queryNorm, "queryNorm"));
} else {
result.setDescription(GraphQuery.this.toString() + " doesn't match id " + doc);
result.setValue(0);
result.setMatch(Boolean.FALSE);
}
return result;
}
项目:search
文件:SumTotalTermFreqValueSource.java
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
long sumTotalTermFreq = 0;
for (AtomicReaderContext readerContext : searcher.getTopReaderContext().leaves()) {
Fields fields = readerContext.reader().fields();
if (fields == null) continue;
Terms terms = fields.terms(indexedField);
if (terms == null) continue;
long v = terms.getSumTotalTermFreq();
if (v == -1) {
sumTotalTermFreq = -1;
break;
} else {
sumTotalTermFreq += v;
}
}
final long ttf = sumTotalTermFreq;
context.put(this, new LongDocValues(this) {
@Override
public long longVal(int doc) {
return ttf;
}
});
}
项目:search
文件:QueryValueSource.java
public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, Map fcontext) throws IOException {
super(vs);
this.readerContext = readerContext;
this.acceptDocs = readerContext.reader().getLiveDocs();
this.defVal = vs.defVal;
this.q = vs.q;
this.fcontext = fcontext;
Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
if (w == null) {
IndexSearcher weightSearcher;
if(fcontext == null) {
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
} else {
weightSearcher = (IndexSearcher)fcontext.get("searcher");
if (weightSearcher == null) {
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
}
}
vs.createWeight(fcontext, weightSearcher);
w = (Weight)fcontext.get(vs);
}
weight = w;
}
项目:DoSeR
文件:LearnToRankTermQuery.java
/**
* Returns a {@link TermsEnum} positioned at this weights Term or null
* if the term does not exist in the given context
*/
private TermsEnum getTermsEnum(final AtomicReaderContext context)
throws IOException {
final TermState state = termStates.get(context.ord);
if (state == null) { // term is not present in that reader
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term="
+ term;
return null;
}
// System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
// (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) :
// "null"));
final TermsEnum termsEnum = context.reader().terms(term.field())
.iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
项目:search
文件:SpanWeight.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
SpanScorer scorer = (SpanScorer) scorer(context, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
return new ComplexExplanation(false, 0.0f, "no matching term");
}
项目:search
文件:IndexSearcher.java
/**
* Just like {@link #search(Weight, int, Sort, boolean, boolean)}, but you choose
* whether or not the fields in the returned {@link FieldDoc} instances should
* be set by specifying fillFields.
*/
protected TopFieldDocs search(List<AtomicReaderContext> leaves, Weight weight, FieldDoc after, int nDocs,
Sort sort, boolean fillFields, boolean doDocScores, boolean doMaxScore) throws IOException {
// single thread
int limit = reader.maxDoc();
if (limit == 0) {
limit = 1;
}
nDocs = Math.min(nDocs, limit);
TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, after,
fillFields, doDocScores,
doMaxScore, !weight.scoresDocsOutOfOrder());
search(leaves, weight, collector);
return (TopFieldDocs) collector.topDocs();
}
项目:search
文件:TestBooleanQuery.java
public void testInOrderWithMinShouldMatch() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(newTextField("field", "some text here", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
w.close();
IndexSearcher s = new IndexSearcher(r) {
@Override
protected void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
assertEquals(-1, collector.getClass().getSimpleName().indexOf("OutOfOrder"));
super.search(leaves, weight, collector);
}
};
BooleanQuery bq = new BooleanQuery();
bq.add(new TermQuery(new Term("field", "some")), BooleanClause.Occur.SHOULD);
bq.add(new TermQuery(new Term("field", "text")), BooleanClause.Occur.SHOULD);
bq.add(new TermQuery(new Term("field", "here")), BooleanClause.Occur.SHOULD);
bq.setMinimumNumberShouldMatch(2);
s.search(bq, 10);
r.close();
dir.close();
}
项目:search
文件:ExpressionRescorer.java
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
Explanation result = super.explain(searcher, firstPassExplanation, docID);
List<AtomicReaderContext> leaves = searcher.getIndexReader().leaves();
int subReader = ReaderUtil.subIndex(docID, leaves);
AtomicReaderContext readerContext = leaves.get(subReader);
int docIDInSegment = docID - readerContext.docBase;
Map<String,Object> context = new HashMap<>();
FakeScorer fakeScorer = new FakeScorer();
fakeScorer.score = firstPassExplanation.getValue();
fakeScorer.doc = docIDInSegment;
context.put("scorer", fakeScorer);
for(String variable : expression.variables) {
result.addDetail(new Explanation((float) bindings.getValueSource(variable).getValues(context, readerContext).doubleVal(docIDInSegment),
"variable \"" + variable + "\""));
}
return result;
}
项目:search
文件:SortingResponseWriter.java
protected void writeDoc(SortDoc sortDoc,
List<AtomicReaderContext> leaves,
FieldWriter[] fieldWriters,
FixedBitSet[] sets,
Writer out) throws IOException{
int ord = sortDoc.ord;
FixedBitSet set = sets[ord];
set.clear(sortDoc.docId);
AtomicReaderContext context = leaves.get(ord);
boolean needsComma = false;
for(FieldWriter fieldWriter : fieldWriters) {
if(needsComma) {
out.write(',');
}
fieldWriter.write(sortDoc.docId, context.reader(), out);
needsComma = true;
}
}
项目:search
文件:PayloadSpanUtil.java
private void getPayloads(Collection<byte []> payloads, SpanQuery query)
throws IOException {
Map<Term,TermContext> termContexts = new HashMap<>();
TreeSet<Term> terms = new TreeSet<>();
query.extractTerms(terms);
for (Term term : terms) {
termContexts.put(term, TermContext.build(context, term));
}
for (AtomicReaderContext atomicReaderContext : context.leaves()) {
final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader().getLiveDocs(), termContexts);
while (spans.next() == true) {
if (spans.isPayloadAvailable()) {
Collection<byte[]> payload = spans.getPayload();
for (byte [] bytes : payload) {
payloads.add(bytes);
}
}
}
}
}
项目:search
文件:AnalyzingInfixSuggester.java
@Override
public long ramBytesUsed() {
long mem = RamUsageEstimator.shallowSizeOf(this);
try {
if (searcherMgr != null) {
IndexSearcher searcher = searcherMgr.acquire();
try {
for (AtomicReaderContext context : searcher.getIndexReader().leaves()) {
AtomicReader reader = FilterAtomicReader.unwrap(context.reader());
if (reader instanceof SegmentReader) {
mem += ((SegmentReader) context.reader()).ramBytesUsed();
}
}
} finally {
searcherMgr.release(searcher);
}
}
return mem;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}