Java 类org.apache.lucene.document.BinaryDocValuesField 实例源码

项目:Elasticsearch    文件:IdFieldMapper.java   
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    XContentParser parser = context.parser();
    if (parser.currentName() != null && parser.currentName().equals(Defaults.NAME) && parser.currentToken().isValue()) {
        // we are in the parse Phase
        String id = parser.text();
        if (context.id() != null && !context.id().equals(id)) {
            throw new MapperParsingException("Provided id [" + context.id() + "] does not match the content one [" + id + "]");
        }
        context.id(id);
    } // else we are in the pre/post parse phase

    if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
        fields.add(new Field(fieldType().names().indexName(), context.id(), fieldType()));
    }
    if (fieldType().hasDocValues()) {
        fields.add(new BinaryDocValuesField(fieldType().names().indexName(), new BytesRef(context.id())));
    }
}
项目:SOLR-5170    文件:MultiPointDocValuesField.java   
/**
 * Normally called by Solr's {@link org.apache.solr.update.DocumentBuilder}.
 * It will also be called by {@link org.apache.solr.update.processor.MultiValUpdateRequestProcessorFactory}
 * given a SolrInputField which has access to multiple values. This is
 * arranged to circumvent DocumentBuilder's limitation.
 */
@Override
public IndexableField createField(SchemaField field, Object value, float boost) {
  if (field.stored())
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "This field" +
        "cannot be configured as stored: " + field);
  List<Point> points;
  if (value instanceof SolrInputField) {
    SolrInputField inputField = ((SolrInputField) value);
    points = new ArrayList<Point>(inputField.getValueCount());
    for (Object iVal : inputField.getValues()) {
      points.add(pointFromValue(iVal));
    }
  } else if (value instanceof IndexableField) {//result of MultiValUpdateRequestProcessorFactory
    return (IndexableField) value;
  } else {
    points = Collections.singletonList(pointFromValue(value));
  }

  BytesRef bytes = MultiPointEncoding.pointsToBytes(points);
  return new BinaryDocValuesField(field.getName(), bytes);
}
项目:search    文件:SerializedDVStrategy.java   
@Override
public Field[] createIndexableFields(Shape shape) {
  int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last
  ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize);
  final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes
  try {
    ctx.getBinaryCodec().writeShape(new DataOutputStream(byteStream), shape);
    //this is a hack to avoid redundant byte array copying by byteStream.toByteArray()
    byteStream.writeTo(new FilterOutputStream(null/*not used*/) {
      @Override
      public void write(byte[] b, int off, int len) throws IOException {
        bytesRef.bytes = b;
        bytesRef.offset = off;
        bytesRef.length = len;
      }
    });
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  this.indexLastBufSize = bytesRef.length;//cache heuristic
  return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)};
}
项目:search    文件:AnalyzingInfixSuggester.java   
private Document buildDocument(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
  String textString = text.utf8ToString();
  Document doc = new Document();
  FieldType ft = getTextFieldType();
  doc.add(new Field(TEXT_FIELD_NAME, textString, ft));
  doc.add(new Field("textgrams", textString, ft));
  doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO));
  doc.add(new BinaryDocValuesField(TEXT_FIELD_NAME, text));
  doc.add(new NumericDocValuesField("weight", weight));
  if (payload != null) {
    doc.add(new BinaryDocValuesField("payloads", payload));
  }
  if (contexts != null) {
    for(BytesRef context : contexts) {
      // TODO: if we had a BinaryTermField we could fix
      // this "must be valid ut8f" limitation:
      doc.add(new StringField(CONTEXTS_FIELD_NAME, context.utf8ToString(), Field.Store.NO));
      doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context));
    }
  }
  return doc;
}
项目:search    文件:TestOrdinalMappingAtomicReader.java   
private void buildIndexWithFacets(Directory indexDir, Directory taxoDir, boolean asc) throws IOException {
  IndexWriterConfig config = newIndexWriterConfig(null);
  RandomIndexWriter writer = new RandomIndexWriter(random(), indexDir, config);

  DirectoryTaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
  for (int i = 1; i <= NUM_DOCS; i++) {
    Document doc = new Document();
    for (int j = i; j <= NUM_DOCS; j++) {
      int facetValue = asc ? j: NUM_DOCS - j;
      doc.add(new FacetField("tag", Integer.toString(facetValue)));
    }
    // add a facet under default dim config
    doc.add(new FacetField("id", Integer.toString(i)));

    // make sure OrdinalMappingAtomicReader ignores non-facet BinaryDocValues fields
    doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
    doc.add(new BinaryDocValuesField("cbdv", new BytesRef(Integer.toString(i*2))));
    writer.addDocument(facetConfig.build(taxonomyWriter, doc));
  }
  taxonomyWriter.commit();
  taxonomyWriter.close();
  writer.commit();
  writer.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateSameDocMultipleTimes() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("key", "doc", Store.NO));
  doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
  writer.addDocument(doc); // flushed document
  writer.commit();
  writer.addDocument(doc); // in-memory document

  writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(17L)); // update existing field
  writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(3L)); // update existing field 2nd time in this commit
  writer.close();

  final DirectoryReader reader = DirectoryReader.open(dir);
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(reader);
  BinaryDocValues bdv = r.getBinaryDocValues("bdv");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(3, getValue(bdv, i));
  }
  reader.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateBinaryDVFieldWithSameNameAsPostingField() throws Exception {
  // this used to fail because FieldInfos.Builder neglected to update
  // globalFieldMaps.docValueTypes map
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("f", "mock-value", Store.NO));
  doc.add(new BinaryDocValuesField("f", toBytes(5L)));
  writer.addDocument(doc);
  writer.commit();
  writer.updateBinaryDocValue(new Term("f", "mock-value"), "f", toBytes(17L));
  writer.close();

  DirectoryReader r = DirectoryReader.open(dir);
  BinaryDocValues bdv = r.leaves().get(0).reader().getBinaryDocValues("f");
  assertEquals(17, getValue(bdv, 0));
  r.close();

  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdatesOrder() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("upd", "t1", Store.NO));
  doc.add(new StringField("upd", "t2", Store.NO));
  doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
  doc.add(new BinaryDocValuesField("f2", toBytes(1L)));
  writer.addDocument(doc);
  writer.updateBinaryDocValue(new Term("upd", "t1"), "f1", toBytes(2L)); // update f1 to 2
  writer.updateBinaryDocValue(new Term("upd", "t1"), "f2", toBytes(2L)); // update f2 to 2
  writer.updateBinaryDocValue(new Term("upd", "t2"), "f1", toBytes(3L)); // update f1 to 3
  writer.updateBinaryDocValue(new Term("upd", "t2"), "f2", toBytes(3L)); // update f2 to 3
  writer.updateBinaryDocValue(new Term("upd", "t1"), "f1", toBytes(4L)); // update f1 to 4 (but not f2)
  writer.close();

  DirectoryReader reader = DirectoryReader.open(dir);
  assertEquals(4, getValue(reader.leaves().get(0).reader().getBinaryDocValues("f1"), 0));
  assertEquals(3, getValue(reader.leaves().get(0).reader().getBinaryDocValues("f2"), 0));
  reader.close();

  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateAllDeletedSegment() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("id", "doc", Store.NO));
  doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
  writer.addDocument(doc);
  writer.addDocument(doc);
  writer.commit();
  writer.deleteDocuments(new Term("id", "doc")); // delete all docs in the first segment
  writer.addDocument(doc);
  writer.updateBinaryDocValue(new Term("id", "doc"), "f1", toBytes(2L));
  writer.close();

  DirectoryReader reader = DirectoryReader.open(dir);
  assertEquals(1, reader.leaves().size());
  assertEquals(2L, getValue(reader.leaves().get(0).reader().getBinaryDocValues("f1"), 0));
  reader.close();

  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateTwoNonexistingTerms() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("id", "doc", Store.NO));
  doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
  writer.addDocument(doc);
  // update w/ multiple nonexisting terms in same field
  writer.updateBinaryDocValue(new Term("c", "foo"), "f1", toBytes(2L));
  writer.updateBinaryDocValue(new Term("c", "bar"), "f1", toBytes(2L));
  writer.close();

  DirectoryReader reader = DirectoryReader.open(dir);
  assertEquals(1, reader.leaves().size());
  assertEquals(1L, getValue(reader.leaves().get(0).reader().getBinaryDocValues("f1"), 0));
  reader.close();

  dir.close();
}
项目:search    文件:TestDocValuesIndexing.java   
public void testDifferentTypedDocValuesField() throws Exception {
  Directory d = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), d);
  Document doc = new Document();
  doc.add(new NumericDocValuesField("field", 17));
  w.addDocument(doc);

  // Index doc values are single-valued so we should not
  // be able to add same field more than once:
  doc.add(new BinaryDocValuesField("field", new BytesRef("blah")));
  try {
    w.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException iae) {
    // expected
  }

  DirectoryReader r = w.getReader();
  w.close();
  assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
  r.close();
  d.close();
}
项目:search    文件:TestDocValuesIndexing.java   
public void testMixedTypesAfterReopenAppend2() throws IOException {
  assumeTrue("codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))) ;
  Document doc = new Document();
  doc.add(new SortedSetDocValuesField("foo", new BytesRef("foo")));
  w.addDocument(doc);
  w.close();

  doc = new Document();
  w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
  doc.add(new StringField("foo", "bar", Field.Store.NO));
  doc.add(new BinaryDocValuesField("foo", new BytesRef("foo")));
  try {
    // NOTE: this case follows a different code path inside
    // DefaultIndexingChain/FieldInfos, because the field (foo)
    // is first added without DocValues:
    w.addDocument(doc);
    fail("did not get expected exception");
  } catch (IllegalArgumentException iae) {
    // expected
  }
  w.forceMerge(1);
  w.close();
  dir.close();
}
项目:search    文件:TestDocValuesIndexing.java   
public void testSameFieldNameForPostingAndDocValue() throws Exception {
  // LUCENE-5192: FieldInfos.Builder neglected to update
  // globalFieldNumbers.docValuesType map if the field existed, resulting in
  // potentially adding the same field with different DV types.
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("f", "mock-value", Store.NO));
  doc.add(new NumericDocValuesField("f", 5));
  writer.addDocument(doc);
  writer.commit();

  doc = new Document();
  doc.add(new BinaryDocValuesField("f", new BytesRef("mock")));
  try {
    writer.addDocument(doc);
    fail("should not have succeeded to add a field with different DV type than what already exists");
  } catch (IllegalArgumentException e) {
    writer.rollback();
  }

  dir.close();
}
项目:NYBC    文件:TestDocValuesIndexing.java   
public void testDifferentTypedDocValuesField() throws Exception {
  Directory d = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), d);
  Document doc = new Document();
  // Index doc values are single-valued so we should not
  // be able to add same field more than once:
  Field f;
  doc.add(f = new NumericDocValuesField("field", 17));
  doc.add(new BinaryDocValuesField("field", new BytesRef("blah")));
  try {
    w.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException iae) {
    // expected
  }

  doc = new Document();
  doc.add(f);
  w.addDocument(doc);
  w.forceMerge(1);
  DirectoryReader r = w.getReader();
  w.close();
  assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
  r.close();
  d.close();
}
项目:NYBC    文件:TestDocValuesIndexing.java   
public void testAddBinaryTwice() throws IOException {
  Analyzer analyzer = new MockAnalyzer(random());

  Directory directory = newDirectory();
  // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
  IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  IndexWriter iwriter = new IndexWriter(directory, iwc);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("dv", new BytesRef("foo!")));
  doc.add(new BinaryDocValuesField("dv", new BytesRef("bar!")));
  try {
    iwriter.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException expected) {
    // expected
  }

  iwriter.close();
  directory.close();
}
项目:NYBC    文件:TestDocValuesIndexing.java   
public void testTooLargeBytes() throws IOException {
  Analyzer analyzer = new MockAnalyzer(random());

  Directory directory = newDirectory();
  // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
  IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  IndexWriter iwriter = new IndexWriter(directory, iwc);
  Document doc = new Document();
  byte bytes[] = new byte[100000];
  BytesRef b = new BytesRef(bytes);
  random().nextBytes(bytes);
  doc.add(new BinaryDocValuesField("dv", b));
  try {
    iwriter.addDocument(doc);
    fail("did not get expected exception");
  } catch (IllegalArgumentException expected) {
    // expected
  }
  iwriter.close();

  directory.close();
}
项目:incubator-blur    文件:SecureAtomicReaderTestBase.java   
private Iterable<IndexableField> getDoc(int i) {
  Document document = new Document();
  document.add(new StringField("test", "test", Store.YES));
  document.add(new StringField("info", "info", Store.YES));
  if (i == 3) {
    document.add(new StringField("shouldnotsee", "shouldnotsee", Store.YES));
  }
  if (i == 5) {
    document.add(new StringField("termmask", "term", Store.YES));
  }
  document.add(new NumericDocValuesField("number", i));
  document.add(new BinaryDocValuesField("bin", new BytesRef(Integer.toString(i).getBytes())));
  document.add(new SortedDocValuesField("sorted", new BytesRef(Integer.toString(i).getBytes())));
  document.add(new SortedSetDocValuesField("sortedset", new BytesRef(Integer.toString(i).getBytes())));
  document.add(new SortedSetDocValuesField("sortedset", new BytesRef(("0" + Integer.toString(i)).getBytes())));
  return document;
}
项目:read-open-source-code    文件:SerializedDVStrategy.java   
@Override
public Field[] createIndexableFields(Shape shape) {
  int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last
  ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize);
  final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes
  try {
    ctx.getBinaryCodec().writeShape(new DataOutputStream(byteStream), shape);
    //this is a hack to avoid redundant byte array copying by byteStream.toByteArray()
    byteStream.writeTo(new FilterOutputStream(null/*not used*/) {
      @Override
      public void write(byte[] b, int off, int len) throws IOException {
        bytesRef.bytes = b;
        bytesRef.offset = off;
        bytesRef.length = len;
      }
    });
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  this.indexLastBufSize = bytesRef.length;//cache heuristic
  return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)};
}
项目:read-open-source-code    文件:SerializedDVStrategy.java   
@Override
public Field[] createIndexableFields(Shape shape) {
  int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last
  ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize);
  final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes
  try {
    ctx.getBinaryCodec().writeShape(new DataOutputStream(byteStream), shape);
    //this is a hack to avoid redundant byte array copying by byteStream.toByteArray()
    byteStream.writeTo(new FilterOutputStream(null/*not used*/) {
      @Override
      public void write(byte[] b, int off, int len) throws IOException {
        bytesRef.bytes = b;
        bytesRef.offset = off;
        bytesRef.length = len;
      }
    });
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  this.indexLastBufSize = bytesRef.length;//cache heuristic
  return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)};
}
项目:read-open-source-code    文件:SerializedDVStrategy.java   
@Override
public Field[] createIndexableFields(Shape shape) {
  int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last
  ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize);
  final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes
  try {
    ctx.getBinaryCodec().writeShape(new DataOutputStream(byteStream), shape);
    //this is a hack to avoid redundant byte array copying by byteStream.toByteArray()
    byteStream.writeTo(new FilterOutputStream(null/*not used*/) {
      @Override
      public void write(byte[] b, int off, int len) throws IOException {
        bytesRef.bytes = b;
        bytesRef.offset = off;
        bytesRef.length = len;
      }
    });
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  this.indexLastBufSize = bytesRef.length;//cache heuristic
  return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)};
}
项目:read-open-source-code    文件:AnalyzingInfixSuggester.java   
private Document buildDocument(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
  String textString = text.utf8ToString();
  Document doc = new Document();
  FieldType ft = getTextFieldType();
  doc.add(new Field(TEXT_FIELD_NAME, textString, ft));
  doc.add(new Field("textgrams", textString, ft));
  doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO));
  doc.add(new BinaryDocValuesField(TEXT_FIELD_NAME, text));
  doc.add(new NumericDocValuesField("weight", weight));
  if (payload != null) {
    doc.add(new BinaryDocValuesField("payloads", payload));
  }
  if (contexts != null) {
    for(BytesRef context : contexts) {
      // TODO: if we had a BinaryTermField we could fix
      // this "must be valid ut8f" limitation:
      doc.add(new StringField(CONTEXTS_FIELD_NAME, context.utf8ToString(), Field.Store.NO));
      doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context));
    }
  }
  return doc;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestDocValuesIndexing.java   
public void testDifferentTypedDocValuesField() throws Exception {
  Directory d = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), d);
  Document doc = new Document();
  // Index doc values are single-valued so we should not
  // be able to add same field more than once:
  Field f;
  doc.add(f = new NumericDocValuesField("field", 17));
  doc.add(new BinaryDocValuesField("field", new BytesRef("blah")));
  try {
    w.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException iae) {
    // expected
  }

  doc = new Document();
  doc.add(f);
  w.addDocument(doc);
  w.forceMerge(1);
  DirectoryReader r = w.getReader();
  w.close();
  assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
  r.close();
  d.close();
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestDocValuesIndexing.java   
public void testAddBinaryTwice() throws IOException {
  Analyzer analyzer = new MockAnalyzer(random());

  Directory directory = newDirectory();
  // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
  IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  IndexWriter iwriter = new IndexWriter(directory, iwc);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("dv", new BytesRef("foo!")));
  doc.add(new BinaryDocValuesField("dv", new BytesRef("bar!")));
  try {
    iwriter.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException expected) {
    // expected
  }

  iwriter.close();
  directory.close();
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestDocValuesIndexing.java   
public void testSameFieldNameForPostingAndDocValue() throws Exception {
  // LUCENE-5192: FieldInfos.Builder neglected to update
  // globalFieldNumbers.docValuesType map if the field existed, resulting in
  // potentially adding the same field with different DV types.
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("f", "mock-value", Store.NO));
  doc.add(new NumericDocValuesField("f", 5));
  writer.addDocument(doc);
  writer.commit();

  doc = new Document();
  doc.add(new BinaryDocValuesField("f", new BytesRef("mock")));
  try {
    writer.addDocument(doc);
    fail("should not have succeeded to add a field with different DV type than what already exists");
  } catch (IllegalArgumentException e) {
    writer.rollback();
  }

  dir.close();
}
项目:elasticsearch_my    文件:UidFieldMapper.java   
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    Field uid = new Field(NAME, Uid.createUid(context.sourceToParse().type(), context.sourceToParse().id()), Defaults.FIELD_TYPE);
    fields.add(uid);
    if (fieldType().hasDocValues()) {
        fields.add(new BinaryDocValuesField(NAME, new BytesRef(uid.stringValue())));
    }
}
项目:Elasticsearch    文件:UidFieldMapper.java   
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    Field uid = new Field(NAME, Uid.createUid(context.stringBuilder(), context.type(), context.id()), Defaults.FIELD_TYPE);
    context.uid(uid);
    fields.add(uid);
    if (fieldType().hasDocValues()) {
        fields.add(new BinaryDocValuesField(NAME, new BytesRef(uid.stringValue())));
    }
}
项目:search    文件:FacetsConfig.java   
private void processAssocFacetFields(TaxonomyWriter taxoWriter,
    Map<String,List<AssociationFacetField>> byField, Document doc)
    throws IOException {
  for (Map.Entry<String,List<AssociationFacetField>> ent : byField.entrySet()) {
    byte[] bytes = new byte[16];
    int upto = 0;
    String indexFieldName = ent.getKey();
    for(AssociationFacetField field : ent.getValue()) {
      // NOTE: we don't add parents for associations
      checkTaxoWriter(taxoWriter);
      FacetLabel label = new FacetLabel(field.dim, field.path);
      int ordinal = taxoWriter.addCategory(label);
      if (upto + 4 > bytes.length) {
        bytes = ArrayUtil.grow(bytes, upto+4);
      }
      // big-endian:
      bytes[upto++] = (byte) (ordinal >> 24);
      bytes[upto++] = (byte) (ordinal >> 16);
      bytes[upto++] = (byte) (ordinal >> 8);
      bytes[upto++] = (byte) ordinal;
      if (upto + field.assoc.length > bytes.length) {
        bytes = ArrayUtil.grow(bytes, upto+field.assoc.length);
      }
      System.arraycopy(field.assoc.bytes, field.assoc.offset, bytes, upto, field.assoc.length);
      upto += field.assoc.length;

      // Drill down:
      for (int i = 1; i <= label.length; i++) {
        doc.add(new StringField(indexFieldName, pathToString(label.components, i), Field.Store.NO));
      }
    }
    doc.add(new BinaryDocValuesField(indexFieldName, new BytesRef(bytes, 0, upto)));
  }
}
项目:search    文件:BaseDocValuesFormatTestCase.java   
public void testTwoBytesOneMissing() throws IOException {
  assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
  Directory directory = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(null);
  conf.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
  Document doc = new Document();
  doc.add(new StringField("id", "0", Field.Store.YES));
  doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
  iw.addDocument(doc);
  doc = new Document();
  doc.add(new StringField("id", "1", Field.Store.YES));
  iw.addDocument(doc);
  iw.forceMerge(1);
  iw.close();

  IndexReader ir = DirectoryReader.open(directory);
  assertEquals(1, ir.leaves().size());
  AtomicReader ar = ir.leaves().get(0).reader();
  BinaryDocValues dv = ar.getBinaryDocValues("dv1");
  BytesRef ref = dv.get(0);
  assertEquals(new BytesRef(), ref);
  ref = dv.get(1);
  assertEquals(new BytesRef(), ref);
  Bits docsWithField = ar.getDocsWithField("dv1");
  assertTrue(docsWithField.get(0));
  assertFalse(docsWithField.get(1));
  ir.close();
  directory.close();
}
项目:search    文件:BaseDocValuesFormatTestCase.java   
public void testTwoBytesOneMissingWithMerging() throws IOException {
  assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
  Directory directory = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(null);
  conf.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
  Document doc = new Document();
  doc.add(new StringField("id", "0", Field.Store.YES));
  doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
  iw.addDocument(doc);
  iw.commit();
  doc = new Document();
  doc.add(new StringField("id", "1", Field.Store.YES));
  iw.addDocument(doc);
  iw.forceMerge(1);
  iw.close();

  IndexReader ir = DirectoryReader.open(directory);
  assertEquals(1, ir.leaves().size());
  AtomicReader ar = ir.leaves().get(0).reader();
  BinaryDocValues dv = ar.getBinaryDocValues("dv1");
  BytesRef ref = dv.get(0);
  assertEquals(new BytesRef(), ref);
  ref = dv.get(1);
  assertEquals(new BytesRef(), ref);
  Bits docsWithField = ar.getDocsWithField("dv1");
  assertTrue(docsWithField.get(0));
  assertFalse(docsWithField.get(1));
  ir.close();
  directory.close();
}
项目:search    文件:BaseDocValuesFormatTestCase.java   
public void testThreeBytesOneMissingWithMerging() throws IOException {
  assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
  Directory directory = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(null);
  conf.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
  Document doc = new Document();
  doc.add(new StringField("id", "0", Field.Store.YES));
  doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
  iw.addDocument(doc);
  doc = new Document();
  doc.add(new StringField("id", "1", Field.Store.YES));
  iw.addDocument(doc);
  iw.commit();
  doc = new Document();
  doc.add(new StringField("id", "2", Field.Store.YES));
  doc.add(new BinaryDocValuesField("dv1", new BytesRef("boo")));
  iw.addDocument(doc);
  iw.forceMerge(1);
  iw.close();

  IndexReader ir = DirectoryReader.open(directory);
  assertEquals(1, ir.leaves().size());
  AtomicReader ar = ir.leaves().get(0).reader();
  BinaryDocValues dv = ar.getBinaryDocValues("dv1");
  BytesRef ref = dv.get(0);
  assertEquals(new BytesRef(), ref);
  ref = dv.get(1);
  assertEquals(new BytesRef(), ref);
  ref = dv.get(2);
  assertEquals(new BytesRef("boo"), ref);
  Bits docsWithField = ar.getDocsWithField("dv1");
  assertTrue(docsWithField.get(0));
  assertFalse(docsWithField.get(1));
  assertTrue(docsWithField.get(2));
  ir.close();
  directory.close();
}
项目:search    文件:TestSortDocValues.java   
/** Tests sorting on type string_val */
public void testStringVal() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
  doc.add(newStringField("value", "foo", Field.Store.YES));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
  doc.add(newStringField("value", "bar", Field.Store.YES));
  writer.addDocument(doc);
  IndexReader ir = writer.getReader();
  writer.close();

  IndexSearcher searcher = newSearcher(ir);
  Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));

  TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
  assertEquals(2, td.totalHits);
  // 'bar' comes before 'foo'
  assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
  assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
  assertNoFieldCaches();

  ir.close();
  dir.close();
}
项目:search    文件:TestSortDocValues.java   
/** Tests reverse sorting on type string_val */
public void testStringValReverse() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
  doc.add(newStringField("value", "bar", Field.Store.YES));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
  doc.add(newStringField("value", "foo", Field.Store.YES));
  writer.addDocument(doc);
  IndexReader ir = writer.getReader();
  writer.close();

  IndexSearcher searcher = newSearcher(ir);
  Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));

  TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
  assertEquals(2, td.totalHits);
  // 'foo' comes after 'bar' in reverse order
  assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
  assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
  assertNoFieldCaches();

  ir.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testMultipleBinaryDocValues() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMaxBufferedDocs(10); // prevent merges
  IndexWriter writer = new IndexWriter(dir, conf);

  for (int i = 0; i < 2; i++) {
    Document doc = new Document();
    doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
    doc.add(new BinaryDocValuesField("bdv1", toBytes(i)));
    doc.add(new BinaryDocValuesField("bdv2", toBytes(i)));
    writer.addDocument(doc);
  }
  writer.commit();

  // update all docs' bdv1 field
  writer.updateBinaryDocValue(new Term("dvUpdateKey", "dv"), "bdv1", toBytes(17L));
  writer.close();

  final DirectoryReader reader = DirectoryReader.open(dir);
  AtomicReader r = reader.leaves().get(0).reader();

  BinaryDocValues bdv1 = r.getBinaryDocValues("bdv1");
  BinaryDocValues bdv2 = r.getBinaryDocValues("bdv2");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(17, getValue(bdv1, i));
    assertEquals(i, getValue(bdv2, i));
  }

  reader.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testDocumentWithNoValue() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  for (int i = 0; i < 2; i++) {
    Document doc = new Document();
    doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
    if (i == 0) { // index only one document with value
      doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
    }
    writer.addDocument(doc);
  }
  writer.commit();

  // update all docs' bdv field
  writer.updateBinaryDocValue(new Term("dvUpdateKey", "dv"), "bdv", toBytes(17L));
  writer.close();

  final DirectoryReader reader = DirectoryReader.open(dir);
  AtomicReader r = reader.leaves().get(0).reader();
  BinaryDocValues bdv = r.getBinaryDocValues("bdv");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(17, getValue(bdv, i));
  }

  reader.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testDifferentDVFormatPerField() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setCodec(new Lucene410Codec() {
    @Override
    public DocValuesFormat getDocValuesFormatForField(String field) {
      return new Lucene410DocValuesFormat();
    }
  });
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("key", "doc", Store.NO));
  doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
  doc.add(new SortedDocValuesField("sorted", new BytesRef("value")));
  writer.addDocument(doc); // flushed document
  writer.commit();
  writer.addDocument(doc); // in-memory document

  writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(17L));
  writer.close();

  final DirectoryReader reader = DirectoryReader.open(dir);

  AtomicReader r = SlowCompositeReaderWrapper.wrap(reader);
  BinaryDocValues bdv = r.getBinaryDocValues("bdv");
  SortedDocValues sdv = r.getSortedDocValues("sorted");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(17, getValue(bdv, i));
    BytesRef term = sdv.get(i);
    assertEquals(new BytesRef("value"), term);
  }

  reader.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateDocumentByMultipleTerms() throws Exception {
  // make sure the order of updates is respected, even when multiple terms affect same document
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("k1", "v1", Store.NO));
  doc.add(new StringField("k2", "v2", Store.NO));
  doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
  writer.addDocument(doc); // flushed document
  writer.commit();
  writer.addDocument(doc); // in-memory document

  writer.updateBinaryDocValue(new Term("k1", "v1"), "bdv", toBytes(17L));
  writer.updateBinaryDocValue(new Term("k2", "v2"), "bdv", toBytes(3L));
  writer.close();

  final DirectoryReader reader = DirectoryReader.open(dir);
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(reader);
  BinaryDocValues bdv = r.getBinaryDocValues("bdv");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(3, getValue(bdv, i));
  }
  reader.close();
  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateSegmentWithPostingButNoDocValues() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // prevent merges, otherwise by the time updates are applied
  // (writer.close()), the segments might have merged and that update becomes
  // legit.
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  IndexWriter writer = new IndexWriter(dir, conf);

  // first segment with BDV
  Document doc = new Document();
  doc.add(new StringField("id", "doc0", Store.NO));
  doc.add(new StringField("bdv", "mock-value", Store.NO));
  doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
  writer.addDocument(doc);
  writer.commit();

  // second segment with no BDV
  doc = new Document();
  doc.add(new StringField("id", "doc1", Store.NO));
  doc.add(new StringField("bdv", "mock-value", Store.NO));
  writer.addDocument(doc);
  writer.commit();

  // update document in the second segment
  writer.updateBinaryDocValue(new Term("id", "doc1"), "bdv", toBytes(5L));
  writer.close();

  DirectoryReader reader = DirectoryReader.open(dir);
  for (AtomicReaderContext context : reader.leaves()) {
    AtomicReader r = context.reader();
    BinaryDocValues bdv = r.getBinaryDocValues("bdv");
    for (int i = 0; i < r.maxDoc(); i++) {
      assertEquals(5L, getValue(bdv, i));
    }
  }
  reader.close();

  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testUpdateOldSegments() throws Exception {
  Codec[] oldCodecs = new Codec[] { new Lucene40RWCodec(), new Lucene41RWCodec(), new Lucene42RWCodec(), new Lucene45RWCodec() };
  Directory dir = newDirectory();

  boolean oldValue = OLD_FORMAT_IMPERSONATION_IS_ACTIVE;
  // create a segment with an old Codec
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setCodec(oldCodecs[random().nextInt(oldCodecs.length)]);
  OLD_FORMAT_IMPERSONATION_IS_ACTIVE = true;
  IndexWriter writer = new IndexWriter(dir, conf);
  Document doc = new Document();
  doc.add(new StringField("id", "doc", Store.NO));
  doc.add(new BinaryDocValuesField("f", toBytes(5L)));
  writer.addDocument(doc);
  writer.close();

  conf = newIndexWriterConfig(new MockAnalyzer(random()));
  writer = new IndexWriter(dir, conf);
  writer.updateBinaryDocValue(new Term("id", "doc"), "f", toBytes(4L));
  OLD_FORMAT_IMPERSONATION_IS_ACTIVE = false;
  try {
    writer.close();
    fail("should not have succeeded to update a segment written with an old Codec");
  } catch (UnsupportedOperationException e) {
    writer.rollback(); 
  } finally {
    OLD_FORMAT_IMPERSONATION_IS_ACTIVE = oldValue;
  }

  dir.close();
}
项目:search    文件:TestBinaryDocValuesUpdates.java   
public void testDeleteUnusedUpdatesFiles() throws Exception {
  Directory dir = newDirectory();
  // test explicitly needs files to always be actually deleted
  if (dir instanceof MockDirectoryWrapper) {
    ((MockDirectoryWrapper)dir).setEnableVirusScanner(false);
  }
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);

  Document doc = new Document();
  doc.add(new StringField("id", "d0", Store.NO));
  doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
  doc.add(new BinaryDocValuesField("f2", toBytes(1L)));
  writer.addDocument(doc);

  // update each field twice to make sure all unneeded files are deleted
  for (String f : new String[] { "f1", "f2" }) {
    writer.updateBinaryDocValue(new Term("id", "d0"), f, toBytes(2L));
    writer.commit();
    int numFiles = dir.listAll().length;

    // update again, number of files shouldn't change (old field's gen is
    // removed) 
    writer.updateBinaryDocValue(new Term("id", "d0"), f, toBytes(3L));
    writer.commit();

    assertEquals(numFiles, dir.listAll().length);
  }

  writer.close();
  dir.close();
}
项目:search    文件:TestDocValuesIndexing.java   
public void testAddBinaryTwice() throws IOException {
  Analyzer analyzer = new MockAnalyzer(random());

  Directory directory = newDirectory();
  // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
  IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  IndexWriter iwriter = new IndexWriter(directory, iwc);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("dv", new BytesRef("foo!")));
  iwriter.addDocument(doc);

  doc.add(new BinaryDocValuesField("dv", new BytesRef("bar!")));
  try {
    iwriter.addDocument(doc);
    fail("didn't hit expected exception");
  } catch (IllegalArgumentException expected) {
    // expected
  }

  IndexReader ir = iwriter.getReader();
  assertEquals(1, ir.numDocs());
  ir.close();

  iwriter.close();
  directory.close();
}