Java 类org.apache.lucene.document.LongPoint 实例源码

项目:Java-Data-Science-Cookbook    文件:IndexFiles.java   
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        Document doc = new Document();
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);
        doc.add(new LongPoint("modified", lastModified));
        doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
Query termsQuery(String field, List<Object> values) {
    long[] v = new long[values.size()];
    int upTo = 0;

    for (int i = 0; i < values.size(); i++) {
        Object value = values.get(i);
        if (!hasDecimalPart(value)) {
            v[upTo++] = parse(value, true);
        }
    }

    if (upTo == 0) {
        return Queries.newMatchNoDocsQuery("All values have a decimal part");
    }
    if (upTo != v.length) {
        v = Arrays.copyOf(v, upTo);
    }
    return LongPoint.newSetQuery(field, v);
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
FieldStats.Long stats(IndexReader reader, String fieldName,
                      boolean isSearchable, boolean isAggregatable) throws IOException {
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
    if (fi == null) {
        return null;
    }
    long size = PointValues.size(reader, fieldName);
    if (size == 0) {
        return new FieldStats.Long(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
    }
    int docCount = PointValues.getDocCount(reader, fieldName);
    byte[] min = PointValues.getMinPackedValue(reader, fieldName);
    byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
    return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size,
        isSearchable, isAggregatable,
        LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
项目:elasticsearch_my    文件:DateFieldMapper.java   
@Override
public FieldStats.Date stats(IndexReader reader) throws IOException {
    String field = name();
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
    if (fi == null) {
        return null;
    }
    long size = PointValues.size(reader, field);
    if (size == 0) {
        return new FieldStats.Date(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
    }
    int docCount = PointValues.getDocCount(reader, field);
    byte[] min = PointValues.getMinPackedValue(reader, field);
    byte[] max = PointValues.getMaxPackedValue(reader, field);
    return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size,
        isSearchable(), isAggregatable(),
        dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
@Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower,
                        boolean includeUpper, QueryShardContext context) {
    long l = Long.MIN_VALUE;
    long u = Long.MAX_VALUE;
    if (lowerTerm != null) {
        l = parse(lowerTerm);
        if (includeLower == false) {
            if (l == Long.MAX_VALUE) {
                return new MatchNoDocsQuery();
            }
            ++l;
        }
    }
    if (upperTerm != null) {
        u = parse(upperTerm);
        if (includeUpper == false) {
            if (u == Long.MIN_VALUE) {
                return new MatchNoDocsQuery();
            }
            --u;
        }
    }
    return LongPoint.newRangeQuery(name(), l, u);
}
项目:elasticsearch_my    文件:DateHistogramAggregatorTests.java   
public void testIntervalYear() throws IOException {
    testBothCases(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset,
            aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
            histogram -> {
                List<Histogram.Bucket> buckets = histogram.getBuckets();
                assertEquals(3, buckets.size());

                Histogram.Bucket bucket = buckets.get(0);
                assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString());
                assertEquals(3, bucket.getDocCount());

                bucket = buckets.get(1);
                assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString());
                assertEquals(1, bucket.getDocCount());

                bucket = buckets.get(2);
                assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString());
                assertEquals(1, bucket.getDocCount());
            }
    );
}
项目:elasticsearch_my    文件:DateFieldTypeTests.java   
public void testTermQuery() {
    Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build();
    QueryShardContext context = new QueryShardContext(0,
            new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(),
                    indexSettings),
            null, null, null, null, null, xContentRegistry(), null, null, () -> nowInMillis);
    MappedFieldType ft = createDefaultFieldType();
    ft.setName("field");
    String date = "2015-10-12T14:10:55";
    long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis();
    ft.setIndexOptions(IndexOptions.DOCS);
    Query expected = new IndexOrDocValuesQuery(
            LongPoint.newRangeQuery("field", instant, instant + 999),
            SortedNumericDocValuesField.newRangeQuery("field", instant, instant + 999));
    assertEquals(expected, ft.termQuery(date, context));

    ft.setIndexOptions(IndexOptions.NONE);
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
            () -> ft.termQuery(date, context));
    assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
项目:elasticsearch_my    文件:DateFieldTypeTests.java   
public void testRangeQuery() throws IOException {
    Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build();
    QueryShardContext context = new QueryShardContext(0,
            new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings),
            null, null, null, null, null, xContentRegistry(), null, null, () -> nowInMillis);
    MappedFieldType ft = createDefaultFieldType();
    ft.setName("field");
    String date1 = "2015-10-12T14:10:55";
    String date2 = "2016-04-28T11:33:52";
    long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date1).getMillis();
    long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date2).getMillis() + 999;
    ft.setIndexOptions(IndexOptions.DOCS);
    Query expected = new IndexOrDocValuesQuery(
            LongPoint.newRangeQuery("field", instant1, instant2),
            SortedNumericDocValuesField.newRangeQuery("field", instant1, instant2));
    assertEquals(expected,
            ft.rangeQuery(date1, date2, true, true, context).rewrite(new MultiReader()));

    ft.setIndexOptions(IndexOptions.NONE);
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
            () -> ft.rangeQuery(date1, date2, true, true, context));
    assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
项目:meresco-lucene    文件:JsonQueryConverterTest.java   
@Test
public void testLongRangeQuery() {
    JsonObject json = Json.createObjectBuilder()
            .add("query", Json.createObjectBuilder()
                .add("type", "RangeQuery")
                .add("rangeType", "Long")
                .add("field", "field")
                .add("lowerTerm", 1L)
                .add("upperTerm", 5L)
                .add("includeLower", JsonValue.FALSE)
                .add("includeUpper", JsonValue.TRUE))
            .build();
    QueryData q = new QueryData(new StringReader(json.toString()), queryConverter);
    Query query = LongPoint.newRangeQuery("field", 2L, 5L);
    assertEquals(query, q.query);
}
项目:meresco-lucene    文件:JsonQueryConverterTest.java   
@Test
public void testLongRangeQueryWithNoBounds() {
    JsonObject json = Json.createObjectBuilder()
            .add("query", Json.createObjectBuilder()
                .add("type", "RangeQuery")
                .add("rangeType", "Long")
                .add("field", "field")
                .add("lowerTerm", JsonValue.NULL)
                .add("upperTerm", JsonValue.NULL)
                .add("includeLower", JsonValue.FALSE)
                .add("includeUpper", JsonValue.TRUE))
            .build();
    QueryData q = new QueryData(new StringReader(json.toString()), queryConverter);
    Query query = LongPoint.newRangeQuery("field", Long.MIN_VALUE + 1, Long.MAX_VALUE);
    assertEquals(query, q.query);
}
项目:elasticsearch_my    文件:PercolatorFieldMapperTests.java   
public void testCreateCandidateQuery() throws Exception {
    addQueryMapping();

    MemoryIndex memoryIndex = new MemoryIndex(false);
    memoryIndex.addField("field1", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
    memoryIndex.addField("field2", "some more text", new WhitespaceAnalyzer());
    memoryIndex.addField("_field3", "unhide me", new WhitespaceAnalyzer());
    memoryIndex.addField("field4", "123", new WhitespaceAnalyzer());
    memoryIndex.addField(new LongPoint("number_field", 10L), new WhitespaceAnalyzer());

    IndexReader indexReader = memoryIndex.createSearcher().getIndexReader();

    BooleanQuery candidateQuery = (BooleanQuery) fieldType.createCandidateQuery(indexReader);
    assertEquals(2, candidateQuery.clauses().size());
    assertEquals(Occur.SHOULD, candidateQuery.clauses().get(0).getOccur());
    TermInSetQuery termsQuery = (TermInSetQuery) candidateQuery.clauses().get(0).getQuery();

    PrefixCodedTerms terms = termsQuery.getTermData();
    assertThat(terms.size(), equalTo(14L));
    PrefixCodedTerms.TermIterator termIterator = terms.iterator();
    assertTermIterator(termIterator, "_field3\u0000me", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "_field3\u0000unhide", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000brown", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000dog", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000fox", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000jumps", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000lazy", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000over", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000quick", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field1\u0000the", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field2\u0000more", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field2\u0000some", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field2\u0000text", fieldType.queryTermsField.name());
    assertTermIterator(termIterator, "field4\u0000123", fieldType.queryTermsField.name());

    assertEquals(Occur.SHOULD, candidateQuery.clauses().get(1).getOccur());
    assertEquals(new TermQuery(new Term(fieldType.extractionResultField.name(), EXTRACTION_FAILED)),
            candidateQuery.clauses().get(1).getQuery());
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
Query termQuery(String field, Object value) {
    if (hasDecimalPart(value)) {
        return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part");
    }
    long v = parse(value, true);
    return LongPoint.newExactQuery(field, v);
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
Query rangeQuery(String field, Object lowerTerm, Object upperTerm,
                 boolean includeLower, boolean includeUpper,
                 boolean hasDocValues) {
    long l = Long.MIN_VALUE;
    long u = Long.MAX_VALUE;
    if (lowerTerm != null) {
        l = parse(lowerTerm, true);
        // if the lower bound is decimal:
        // - if the bound is positive then we increment it:
        //      if lowerTerm=1.5 then the (inclusive) bound becomes 2
        // - if the bound is negative then we leave it as is:
        //      if lowerTerm=-1.5 then the (inclusive) bound becomes -1 due to the call to longValue
        boolean lowerTermHasDecimalPart = hasDecimalPart(lowerTerm);
        if ((lowerTermHasDecimalPart == false && includeLower == false) ||
                (lowerTermHasDecimalPart && signum(lowerTerm) > 0)) {
            if (l == Long.MAX_VALUE) {
                return new MatchNoDocsQuery();
            }
            ++l;
        }
    }
    if (upperTerm != null) {
        u = parse(upperTerm, true);
        boolean upperTermHasDecimalPart = hasDecimalPart(upperTerm);
        if ((upperTermHasDecimalPart == false && includeUpper == false) ||
                (upperTermHasDecimalPart && signum(upperTerm) < 0)) {
            if (u == Long.MIN_VALUE) {
                return new MatchNoDocsQuery();
            }
            --u;
        }
    }
    Query query = LongPoint.newRangeQuery(field, l, u);
    if (hasDocValues) {
        Query dvQuery = SortedNumericDocValuesField.newRangeQuery(field, l, u);
        query = new IndexOrDocValuesQuery(query, dvQuery);
    }
    return query;
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
public List<Field> createFields(String name, Number value,
                                boolean indexed, boolean docValued, boolean stored) {
    List<Field> fields = new ArrayList<>();
    if (indexed) {
        fields.add(new LongPoint(name, value.longValue()));
    }
    if (docValued) {
        fields.add(new SortedNumericDocValuesField(name, value.longValue()));
    }
    if (stored) {
        fields.add(new StoredField(name, value.longValue()));
    }
    return fields;
}
项目:elasticsearch_my    文件:DateFieldMapper.java   
Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper,
        @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) {
    failIfNotIndexed();
    DateMathParser parser = forcedDateParser == null
            ? dateMathParser
            : forcedDateParser;
    long l, u;
    if (lowerTerm == null) {
        l = Long.MIN_VALUE;
    } else {
        l = parseToMilliseconds(lowerTerm, !includeLower, timeZone, parser, context);
        if (includeLower == false) {
            ++l;
        }
    }
    if (upperTerm == null) {
        u = Long.MAX_VALUE;
    } else {
        u = parseToMilliseconds(upperTerm, includeUpper, timeZone, parser, context);
        if (includeUpper == false) {
            --u;
        }
    }
    Query query = LongPoint.newRangeQuery(name(), l, u);
    if (hasDocValues()) {
        Query dvQuery = SortedNumericDocValuesField.newRangeQuery(name(), l, u);
        query = new IndexOrDocValuesQuery(query, dvQuery);
    }
    return query;
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
@Override
public Query termsQuery(List<?> values, @Nullable QueryShardContext context) {
    long[] v = new long[values.size()];
    for (int i = 0; i < values.size(); ++i) {
        v[i] = parse(values.get(i));
    }
    return LongPoint.newSetQuery(name(), v);
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
@Override
public FieldStats stats(IndexReader reader) throws IOException {
    String fieldName = name();
    long size = PointValues.size(reader, fieldName);
    if (size == 0) {
        return null;
    }
    int docCount = PointValues.getDocCount(reader, fieldName);
    byte[] min = PointValues.getMinPackedValue(reader, fieldName);
    byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
    return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, true, false,
            LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
@Override
public void postParse(ParseContext context) throws IOException {
    // In the case of nested docs, let's fill nested docs with seqNo=1 and
    // primaryTerm=0 so that Lucene doesn't write a Bitset for documents
    // that don't have the field. This is consistent with the default value
    // for efficiency.
    for (int i = 1; i < context.docs().size(); i++) {
        final Document doc = context.docs().get(i);
        doc.add(new LongPoint(NAME, 1));
        doc.add(new SortedNumericDocValuesField(NAME, 1L));
        doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L));
    }
}
项目:elasticsearch_my    文件:RangeQueryBuilderTests.java   
public void testDateRangeQueryFormat() throws IOException {
    assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
    // We test 01/01/2012 from gte and 2030 for lt
    String query = "{\n" +
            "    \"range\" : {\n" +
            "        \"" + DATE_FIELD_NAME + "\" : {\n" +
            "            \"gte\": \"01/01/2012\",\n" +
            "            \"lt\": \"2030\",\n" +
            "            \"format\": \"dd/MM/yyyy||yyyy\"\n" +
            "        }\n" +
            "    }\n" +
            "}";
    Query parsedQuery = parseQuery(query).toQuery(createShardContext());
    assertThat(parsedQuery, instanceOf(IndexOrDocValuesQuery.class));
    parsedQuery = ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery();
    assertThat(parsedQuery, instanceOf(PointRangeQuery.class));

    assertEquals(LongPoint.newRangeQuery(DATE_FIELD_NAME,
            DateTime.parse("2012-01-01T00:00:00.000+00").getMillis(),
            DateTime.parse("2030-01-01T00:00:00.000+00").getMillis() - 1),
            parsedQuery);

    // Test Invalid format
    final String invalidQuery = "{\n" +
            "    \"range\" : {\n" +
            "        \"" + DATE_FIELD_NAME + "\" : {\n" +
            "            \"gte\": \"01/01/2012\",\n" +
            "            \"lt\": \"2030\",\n" +
            "            \"format\": \"yyyy\"\n" +
            "        }\n" +
            "    }\n" +
            "}";
    expectThrows(ElasticsearchParseException.class, () -> parseQuery(invalidQuery).toQuery(createShardContext()));
}
项目:elasticsearch_my    文件:RangeQueryBuilderTests.java   
public void testDateRangeBoundaries() throws IOException {
    assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
    String query = "{\n" +
            "    \"range\" : {\n" +
            "        \"" + DATE_FIELD_NAME + "\" : {\n" +
            "            \"gte\": \"2014-11-05||/M\",\n" +
            "            \"lte\": \"2014-12-08||/d\"\n" +
            "        }\n" +
            "    }\n" +
            "}\n";
    Query parsedQuery = parseQuery(query).toQuery(createShardContext());
    assertThat(parsedQuery, instanceOf(IndexOrDocValuesQuery.class));
    parsedQuery = ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery();
    assertThat(parsedQuery, instanceOf(PointRangeQuery.class));
    assertEquals(LongPoint.newRangeQuery(DATE_FIELD_NAME,
            DateTime.parse("2014-11-01T00:00:00.000+00").getMillis(),
            DateTime.parse("2014-12-08T23:59:59.999+00").getMillis()),
            parsedQuery);

    query = "{\n" +
            "    \"range\" : {\n" +
            "        \"" + DATE_FIELD_NAME + "\" : {\n" +
            "            \"gt\": \"2014-11-05||/M\",\n" +
            "            \"lt\": \"2014-12-08||/d\"\n" +
            "        }\n" +
            "    }\n" +
            "}";
    parsedQuery = parseQuery(query).toQuery(createShardContext());
    assertThat(parsedQuery, instanceOf(IndexOrDocValuesQuery.class));
    parsedQuery = ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery();
    assertThat(parsedQuery, instanceOf(PointRangeQuery.class));
    assertEquals(LongPoint.newRangeQuery(DATE_FIELD_NAME,
            DateTime.parse("2014-11-30T23:59:59.999+00").getMillis() + 1,
            DateTime.parse("2014-12-08T00:00:00.000+00").getMillis() - 1),
            parsedQuery);
}
项目:elasticsearch_my    文件:ShadowEngineTests.java   
private ParsedDocument testParsedDocument(String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) {
    Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
    Field versionField = new NumericDocValuesField("_version", 0);
    SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
    document.add(uidField);
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    document.add(new LongPoint("point_field", 42)); // so that points report memory/disk usage
    return new ParsedDocument(versionField, seqID, id, type, routing, Arrays.asList(document), source, XContentType.JSON,
        mappingsUpdate);
}
项目:elasticsearch_my    文件:ScaledFloatFieldTypeTests.java   
public void testTermQuery() {
    ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
    ft.setName("scaled_float");
    ft.setScalingFactor(0.1 + randomDouble() * 100);
    double value = (randomDouble() * 2 - 1) * 10000;
    long scaledValue = Math.round(value * ft.getScalingFactor());
    assertEquals(LongPoint.newExactQuery("scaled_float", scaledValue), ft.termQuery(value, null));
}
项目:elasticsearch_my    文件:ScaledFloatFieldTypeTests.java   
public void testTermsQuery() {
    ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
    ft.setName("scaled_float");
    ft.setScalingFactor(0.1 + randomDouble() * 100);
    double value1 = (randomDouble() * 2 - 1) * 10000;
    long scaledValue1 = Math.round(value1 * ft.getScalingFactor());
    double value2 = (randomDouble() * 2 - 1) * 10000;
    long scaledValue2 = Math.round(value2 * ft.getScalingFactor());
    assertEquals(
            LongPoint.newSetQuery("scaled_float", scaledValue1, scaledValue2),
            ft.termsQuery(Arrays.asList(value1, value2), null));
}
项目:elasticsearch_my    文件:ScaledFloatFieldTypeTests.java   
public void testRangeQuery() throws IOException {
    // make sure the accuracy loss of scaled floats only occurs at index time
    // this test checks that searching scaled floats yields the same results as
    // searching doubles that are rounded to the closest half float
    ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
    ft.setName("scaled_float");
    ft.setScalingFactor(0.1 + randomDouble() * 100);
    Directory dir = newDirectory();
    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
    final int numDocs = 1000;
    for (int i = 0; i < numDocs; ++i) {
        Document doc = new Document();
        double value = (randomDouble() * 2 - 1) * 10000;
        long scaledValue = Math.round(value * ft.getScalingFactor());
        double rounded = scaledValue / ft.getScalingFactor();
        doc.add(new LongPoint("scaled_float", scaledValue));
        doc.add(new DoublePoint("double", rounded));
        w.addDocument(doc);
    }
    final DirectoryReader reader = DirectoryReader.open(w);
    w.close();
    IndexSearcher searcher = newSearcher(reader);
    final int numQueries = 1000;
    for (int i = 0; i < numQueries; ++i) {
        Double l = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
        Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
        boolean includeLower = randomBoolean();
        boolean includeUpper = randomBoolean();
        Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false);
        Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, null);
        assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ));
    }
    IOUtils.close(reader, dir);
}
项目:elasticsearch_my    文件:DateFieldTypeTests.java   
public void testIsFieldWithinQuery() throws IOException {
    Directory dir = newDirectory();
    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
    long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12").getMillis();
    long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2016-04-03").getMillis();
    Document doc = new Document();
    LongPoint field = new LongPoint("my_date", instant1);
    doc.add(field);
    w.addDocument(doc);
    field.setLongValue(instant2);
    w.addDocument(doc);
    DirectoryReader reader = DirectoryReader.open(w);
    DateFieldType ft = new DateFieldType();
    ft.setName("my_date");
    DateMathParser alternateFormat = new DateMathParser(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER);
    doTestIsFieldWithinQuery(ft, reader, null, null);
    doTestIsFieldWithinQuery(ft, reader, null, alternateFormat);
    doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null);
    doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, alternateFormat);

    // Fields with no value indexed.
    DateFieldType ft2 = new DateFieldType();
    ft2.setName("my_date2");

    QueryRewriteContext context = new QueryRewriteContext(null, null, null, xContentRegistry(), null, null,
            () -> nowInMillis);
    assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context));
    IOUtils.close(reader, w, dir);
}
项目:elasticsearch_my    文件:NumberFieldTypeTests.java   
public void testLongTermsQueryWithDecimalPart() {
    MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberType.LONG);
    ft.setName("field");
    ft.setIndexOptions(IndexOptions.DOCS);
    assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1, 2.1), null));
    assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1.0, 2.1), null));
    assertTrue(ft.termsQuery(Arrays.asList(1.1, 2.1), null) instanceof MatchNoDocsQuery);
}
项目:elasticsearch_my    文件:NumberFieldTypeTests.java   
public void testTermQuery() {
    MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    ft.setName("field");
    ft.setIndexOptions(IndexOptions.DOCS);
    assertEquals(LongPoint.newExactQuery("field", 42), ft.termQuery("42", null));

    ft.setIndexOptions(IndexOptions.NONE);
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
            () -> ft.termQuery("42", null));
    assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
项目:elasticsearch_my    文件:NumberFieldTypeTests.java   
public void testRangeQuery() {
    MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    ft.setName("field");
    ft.setIndexOptions(IndexOptions.DOCS);
    Query expected = new IndexOrDocValuesQuery(
            LongPoint.newRangeQuery("field", 1, 3),
            SortedNumericDocValuesField.newRangeQuery("field", 1, 3));
    assertEquals(expected, ft.rangeQuery("1", "3", true, true, null));

    ft.setIndexOptions(IndexOptions.NONE);
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
            () -> ft.rangeQuery("1", "3", true, true, null));
    assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
项目:mediaPlayerApp    文件:mediaIndexer.java   
/**
 * Indexes a single document
 * 
 * @throws TikaException
 * @throws SAXException
 */
public static void indexDoc(IndexWriter writer, Path file, TextArea results, long lastModified)
        throws IOException, SAXException, TikaException {
    AutoDetectParser parser = new AutoDetectParser();
    BodyContentHandler handler = new BodyContentHandler();
    Metadata metadata = new Metadata();
    try (InputStream stream = Files.newInputStream(file)) {
        parser.parse(stream, handler, metadata);
        Document doc = new Document();
        String[] metadataNames = metadata.names();
        for (String name : metadataNames)
            doc.add(new TextField(name, metadata.get(name), Field.Store.YES));
        doc.add(new StringField("path", file.toString(), Field.Store.YES));
        doc.add(new LongPoint("modified", lastModified));
        results.appendText("Title: " + metadata.get("title") + "\n");
        results.appendText("Artists: " + metadata.get("xmpDM:artist") + "\n");
        results.appendText("Genre: " + metadata.get("xmpDM:genre") + "\n");
        results.appendText("Year: " + metadata.get("xmpDM:releaseDate") + "\n");
        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            // New index, so we just add the document (no old document can
            // be there):
            results.appendText("adding " + file + "\n");
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been
            // indexed):
            results.appendText("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}
项目:peppol-directory    文件:PDLuceneTest.java   
private static void _doIndex () throws IOException
{
  /*
   * 4. add a sample document to the index
   */
  final Document doc = new Document ();

  // We add an id field that is searchable, but doesn't trigger
  // tokenization of the content
  final Field idField = new StringField ("id", "Apache Lucene 5.0.0", Field.Store.YES);
  doc.add (idField);

  // Add the last big lucene version birthday which we don't want to store
  // but to be indexed nevertheless to be filterable
  doc.add (new LongPoint ("lastVersionBirthday", new GregorianCalendar (2015, 1, 20).getTimeInMillis ()));

  // The version info content should be searchable also be tokens,
  // this is why we use a TextField; as we use a reader, the content is
  // not stored!
  doc.add (new TextField ("pom",
                          new BufferedReader (new InputStreamReader (new FileInputStream (new File ("pom.xml")),
                                                                     StandardCharsets.UTF_8))));

  // Existing index
  try (final PDLucene aLucene = new PDLucene ())
  {
    aLucene.updateDocument (new Term ("id", "Apache Lucene 5.0.0"), doc);
  }
}
项目:lumongo    文件:BasicStorageTest.java   
private static void addDoc(IndexWriter w, String title, String uid) throws IOException {
    Document doc = new Document();
    doc.add(new TextField("title", title, Field.Store.YES));
    doc.add(new TextField("uid", uid, Field.Store.YES));
    doc.add(new StringField("uid", uid, Field.Store.YES));
    doc.add(new IntPoint("testIntField", 3));
    long date = System.currentTimeMillis();
    doc.add(new LongPoint("date", date));
    doc.add(new NumericDocValuesField("date", date));
    doc.add(new SortedSetDocValuesField("category", new BytesRef("Anything")));
    Term uidTerm = new Term("uid", uid);

    w.updateDocument(uidTerm, doc);
}
项目:meresco-lucene    文件:JsonQueryConverter.java   
private Query createRangeQuery(JsonObject query) {
    String field = query.getString("field");
    boolean includeLower = query.getBoolean("includeLower");
    boolean includeUpper = query.getBoolean("includeUpper");
    boolean lower = query.get("lowerTerm") != JsonValue.NULL;
    boolean upper = query.get("upperTerm") != JsonValue.NULL;
    switch (query.getString("rangeType")) {
        case "String":
            return TermRangeQuery.newStringRange(field, lower ? query.getString("lowerTerm") : null, upper ? query.getString("upperTerm") : null, includeLower, includeUpper);
        case "Int":
            Integer iLowerValue = lower ? query.getInt("lowerTerm") : Integer.MIN_VALUE;
            Integer iUpperValue = upper ? query.getInt("upperTerm") : Integer.MAX_VALUE;
            if (!includeLower && iLowerValue != null)
                iLowerValue += 1;
            if (!includeUpper && iUpperValue != null)
                iUpperValue -= 1;
            return IntPoint.newRangeQuery(field, iLowerValue, iUpperValue);
        case "Long":
            Long lLowerValue = lower ? query.getJsonNumber("lowerTerm").longValue() : Long.MIN_VALUE;
            Long lUpperValue = upper ? query.getJsonNumber("upperTerm").longValue() : Long.MAX_VALUE;
            if (!includeLower && lLowerValue != null)
                lLowerValue += 1;
            if (!includeUpper && lUpperValue != null)
                lUpperValue -= 1;
            return LongPoint.newRangeQuery(field, lLowerValue, lUpperValue);
        case "Double":
            Double dLowerValue = lower ? query.getJsonNumber("lowerTerm").doubleValue() : Double.NEGATIVE_INFINITY;
            Double dUpperValue = upper ? query.getJsonNumber("upperTerm").doubleValue() : Double.POSITIVE_INFINITY;
            if (!includeLower && dLowerValue != null)
                dLowerValue = Math.nextUp(dLowerValue);
            if (!includeUpper && dUpperValue != null)
                dUpperValue = Math.nextDown(dUpperValue);
            return DoublePoint.newRangeQuery(field, dLowerValue, dUpperValue);
    }
    return null;
}
项目:meresco-lucene    文件:DocumentStringToDocumentTest.java   
@Test
public void testLongField() {
    JsonArray json = Json.createArrayBuilder()
            .add(Json.createObjectBuilder()
                .add("type", "LongField")
                .add("name", "name")
                .add("value", 1))
            .build();
    Document result = convert(json.toString());
    assertEquals(new LongPoint("name", 1).fieldType(), result.getField("name").fieldType());
    assertEquals(1L, result.getField("name").numericValue().longValue());
}
项目:elasticsearch_my    文件:CandidateQueryTests.java   
public void testDuelSpecificQueries() throws Exception {
    List<ParseContext.Document> documents = new ArrayList<>();

    CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
    commonTermsQuery.add(new Term("field", "quick"));
    commonTermsQuery.add(new Term("field", "brown"));
    commonTermsQuery.add(new Term("field", "fox"));
    addQuery(commonTermsQuery, documents);

    BlendedTermQuery blendedTermQuery = BlendedTermQuery.booleanBlendedQuery(new Term[]{new Term("field", "quick"),
            new Term("field", "brown"), new Term("field", "fox")}, false);
    addQuery(blendedTermQuery, documents);

    SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("field", true)
            .addClause(new SpanTermQuery(new Term("field", "quick")))
            .addClause(new SpanTermQuery(new Term("field", "brown")))
            .addClause(new SpanTermQuery(new Term("field", "fox")))
            .build();
    addQuery(spanNearQuery, documents);

    SpanNearQuery spanNearQuery2 = new SpanNearQuery.Builder("field", true)
            .addClause(new SpanTermQuery(new Term("field", "the")))
            .addClause(new SpanTermQuery(new Term("field", "lazy")))
            .addClause(new SpanTermQuery(new Term("field", "doc")))
            .build();
    SpanOrQuery spanOrQuery = new SpanOrQuery(
            spanNearQuery,
            spanNearQuery2
    );
    addQuery(spanOrQuery, documents);

    SpanNotQuery spanNotQuery = new SpanNotQuery(spanNearQuery, spanNearQuery);
    addQuery(spanNotQuery, documents);

    long lowerLong = randomIntBetween(0, 256);
    long upperLong = lowerLong + randomIntBetween(0, 32);
    addQuery(LongPoint.newRangeQuery("long_field", lowerLong, upperLong), documents);

    indexWriter.addDocuments(documents);
    indexWriter.close();
    directoryReader = DirectoryReader.open(directory);
    IndexSearcher shardSearcher = newSearcher(directoryReader);
    // Disable query cache, because ControlQuery cannot be cached...
    shardSearcher.setQueryCache(null);

    Document document = new Document();
    document.add(new TextField("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO));
    long randomLong = randomIntBetween((int) lowerLong, (int) upperLong);
    document.add(new LongPoint("long_field", randomLong));
    MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
    duelRun(queryStore, memoryIndex, shardSearcher);
}
项目:elasticsearch_my    文件:DateFieldMapper.java   
@Override
public Relation isFieldWithinQuery(IndexReader reader,
        Object from, Object to, boolean includeLower, boolean includeUpper,
        DateTimeZone timeZone, DateMathParser dateParser, QueryRewriteContext context) throws IOException {
    if (dateParser == null) {
        dateParser = this.dateMathParser;
    }

    long fromInclusive = Long.MIN_VALUE;
    if (from != null) {
        fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context);
        if (includeLower == false) {
            if (fromInclusive == Long.MAX_VALUE) {
                return Relation.DISJOINT;
            }
            ++fromInclusive;
        }
    }

    long toInclusive = Long.MAX_VALUE;
    if (to != null) {
        toInclusive = parseToMilliseconds(to, includeUpper, timeZone, dateParser, context);
        if (includeUpper == false) {
            if (toInclusive == Long.MIN_VALUE) {
                return Relation.DISJOINT;
            }
            --toInclusive;
        }
    }

    // This check needs to be done after fromInclusive and toInclusive
    // are resolved so we can throw an exception if they are invalid
    // even if there are no points in the shard
    if (PointValues.size(reader, name()) == 0) {
        // no points, so nothing matches
        return Relation.DISJOINT;
    }

    long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
    long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);

    if (minValue >= fromInclusive && maxValue <= toInclusive) {
        return Relation.WITHIN;
    } else if (maxValue < fromInclusive || minValue > toInclusive) {
        return Relation.DISJOINT;
    } else {
        return Relation.INTERSECTS;
    }
}
项目:elasticsearch_my    文件:DateFieldMapper.java   
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    String dateAsString;
    if (context.externalValueSet()) {
        Object dateAsObject = context.externalValue();
        if (dateAsObject == null) {
            dateAsString = null;
        } else {
            dateAsString = dateAsObject.toString();
        }
    } else {
        dateAsString = context.parser().textOrNull();
    }

    if (dateAsString == null) {
        dateAsString = fieldType().nullValueAsString();
    }

    if (dateAsString == null) {
        return;
    }

    long timestamp;
    try {
        timestamp = fieldType().parse(dateAsString);
    } catch (IllegalArgumentException e) {
        if (ignoreMalformed.value()) {
            return;
        } else {
            throw e;
        }
    }

    if (context.includeInAll(includeInAll, this)) {
        context.allEntries().addText(fieldType().name(), dateAsString, fieldType().boost());
    }

    if (fieldType().indexOptions() != IndexOptions.NONE) {
        fields.add(new LongPoint(fieldType().name(), timestamp));
    }
    if (fieldType().hasDocValues()) {
        fields.add(new SortedNumericDocValuesField(fieldType().name(), timestamp));
    }
    if (fieldType().stored()) {
        fields.add(new StoredField(fieldType().name(), timestamp));
    }
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
public static SequenceID emptySeqID() {
    return new SequenceID(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
            new SortedNumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
            new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
}
项目:elasticsearch_my    文件:SeqNoFieldMapper.java   
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
    long v = parse(value);
    return LongPoint.newExactQuery(name(), v);
}
项目:elasticsearch_my    文件:DateHistogramAggregatorTests.java   
private void executeTestCase(boolean reduced, Query query, List<String> dataset,
                             Consumer<DateHistogramAggregationBuilder> configure,
                             Consumer<Histogram> verify) throws IOException {

    try (Directory directory = newDirectory()) {
        try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
            Document document = new Document();
            for (String date : dataset) {
                if (frequently()) {
                    indexWriter.commit();
                }

                long instant = asLong(date);
                document.add(new SortedNumericDocValuesField(DATE_FIELD, instant));
                document.add(new LongPoint(INSTANT_FIELD, instant));
                indexWriter.addDocument(document);
                document.clear();
            }
        }

        try (IndexReader indexReader = DirectoryReader.open(directory)) {
            IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

            DateHistogramAggregationBuilder aggregationBuilder = new DateHistogramAggregationBuilder("_name");
            if (configure != null) {
                configure.accept(aggregationBuilder);
            }

            DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name");
            DateFieldMapper.DateFieldType fieldType = builder.fieldType();
            fieldType.setHasDocValues(true);
            fieldType.setName(aggregationBuilder.field());

            InternalDateHistogram histogram;
            if (reduced) {
                histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType);
            } else {
                histogram = search(indexSearcher, query, aggregationBuilder, fieldType);
            }
            verify.accept(histogram);
        }
    }
}
项目:elasticsearch_my    文件:InternalEngineTests.java   
public void testSequenceIDs() throws Exception {
    Tuple<Long, Long> seqID = getSequenceID(engine, new Engine.Get(false, newUid("1")));
    // Non-existent doc returns no seqnum and no primary term
    assertThat(seqID.v1(), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
    assertThat(seqID.v2(), equalTo(0L));

    // create a document
    Document document = testDocumentWithTextField();
    document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
    ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null);
    engine.index(indexForDoc(doc));
    engine.refresh("test");

    seqID = getSequenceID(engine, new Engine.Get(false, newUid(doc)));
    logger.info("--> got seqID: {}", seqID);
    assertThat(seqID.v1(), equalTo(0L));
    assertThat(seqID.v2(), equalTo(0L));

    // Index the same document again
    document = testDocumentWithTextField();
    document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
    doc = testParsedDocument("1", "test", null, document, B_1, null);
    engine.index(indexForDoc(doc));
    engine.refresh("test");

    seqID = getSequenceID(engine, new Engine.Get(false, newUid(doc)));
    logger.info("--> got seqID: {}", seqID);
    assertThat(seqID.v1(), equalTo(1L));
    assertThat(seqID.v2(), equalTo(0L));

    // Index the same document for the third time, this time changing the primary term
    document = testDocumentWithTextField();
    document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
    doc = testParsedDocument("1", "test", null, document, B_1, null);
    engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 1,
                    Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY,
                    System.nanoTime(), -1, false));
    engine.refresh("test");

    seqID = getSequenceID(engine, new Engine.Get(false, newUid(doc)));
    logger.info("--> got seqID: {}", seqID);
    assertThat(seqID.v1(), equalTo(2L));
    assertThat(seqID.v2(), equalTo(1L));

    // we can query by the _seq_no
    Engine.Searcher searchResult = engine.acquireSearcher("test");
    MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
    MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(LongPoint.newExactQuery("_seq_no", 2), 1));
    searchResult.close();
}