public MockEngineSupport(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) { Settings settings = config.getIndexSettings().getSettings(); shardId = config.getShardId(); filterCache = config.getQueryCache(); filterCachingPolicy = config.getQueryCachingPolicy(); final long seed = config.getIndexSettings().getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING); Random random = new Random(seed); final double ratio = WRAP_READER_RATIO.get(settings); boolean wrapReader = random.nextDouble() < ratio; if (logger.isTraceEnabled()) { logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, settings); this.searcherCloseable = new SearcherCloseable(); LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine this.disableFlushOnClose = DISABLE_FLUSH_ON_CLOSE.get(settings); }
public static BulkByScrollTask.Status randomStatus() { if (randomBoolean()) { return randomWorkingStatus(null); } boolean canHaveNullStatues = randomBoolean(); List<BulkByScrollTask.StatusOrException> statuses = IntStream.range(0, between(0, 10)) .mapToObj(i -> { if (canHaveNullStatues && LuceneTestCase.rarely()) { return null; } if (randomBoolean()) { return new BulkByScrollTask.StatusOrException(new ElasticsearchException(randomAsciiOfLength(5))); } return new BulkByScrollTask.StatusOrException(randomWorkingStatus(i)); }) .collect(toList()); return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); }
void doStandardHighlights(Analyzer analyzer, IndexSearcher searcher, TopDocs hits, Query query, Formatter formatter, boolean expandMT) throws Exception { for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, text); if (mode == QUERY) { scorer = new QueryScorer(query); } else if (mode == QUERY_TERM) { scorer = new QueryTermScorer(query); } Highlighter highlighter = new Highlighter(formatter, scorer); highlighter.setTextFragmenter(frag); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, fragmentSeparator); if (LuceneTestCase.VERBOSE) System.out.println("\t" + result); } }
@BeforeClass public static void beforeClass() throws Exception { final Locale locale = LuceneTestCase.randomLocale(random()); collator = Collator.getInstance(locale); collator.setStrength(Collator.IDENTICAL); collator.setDecomposition(Collator.NO_DECOMPOSITION); numDocs = 1000 * RANDOM_MULTIPLIER; dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String value = TestUtil.randomUnicodeString(random()); Field field = newStringField("field", value, Field.Store.YES); doc.add(field); iw.addDocument(doc); } splitDoc = TestUtil.randomUnicodeString(random()); reader = iw.getReader(); iw.close(); searcher = newSearcher(reader); }
@Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { if (!LuceneTestCase.OLD_FORMAT_IMPERSONATION_IS_ACTIVE) { return super.fieldsConsumer(state); } else { PostingsWriterBase docs = new Lucene40PostingsWriter(state); // TODO: should we make the terms index more easily // pluggable? Ie so that this codec would record which // index impl was used, and switch on loading? // Or... you must make a new Codec for this? boolean success = false; try { FieldsConsumer ret = new BlockTreeTermsWriter(state, docs, minBlockSize, maxBlockSize); success = true; return ret; } finally { if (!success) { docs.close(); } } } }
@Override public TermVectorsReader vectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) throws IOException { return new Lucene3xTermVectorsReader(directory, segmentInfo, fieldInfos, context) { @Override protected boolean sortTermsByUnicode() { // We carefully peek into stack track above us: if // we are part of a "merge", we must sort by UTF16: boolean unicodeSortOrder = true; StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { //System.out.println(trace[i].getClassName()); if ("merge".equals(trace[i].getMethodName())) { unicodeSortOrder = false; if (LuceneTestCase.VERBOSE) { System.out.println("NOTE: PreFlexRW codec: forcing legacy UTF16 vector term sort order"); } break; } } return unicodeSortOrder; } }; }
/** * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader * is a MultiReader containing the Reader of the original IndexSearcher, * as well as several "empty" IndexReaders -- some of which will have * deleted documents in them. This new IndexSearcher should * behave exactly the same as the original IndexSearcher. * @param s the searcher to wrap * @param edge if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub */ public static IndexSearcher wrapUnderlyingReader(Random random, final IndexSearcher s, final int edge) throws IOException { IndexReader r = s.getIndexReader(); // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : emptyReaders[0], emptyReaders[0], new FCInvisibleMultiReader(edge < 0 ? emptyReaders[4] : emptyReaders[0], emptyReaders[0], 0 == edge ? r : emptyReaders[0]), 0 < edge ? emptyReaders[0] : emptyReaders[7], emptyReaders[0], new FCInvisibleMultiReader(0 < edge ? emptyReaders[0] : emptyReaders[5], emptyReaders[0], 0 < edge ? r : emptyReaders[0]) }; IndexSearcher out = LuceneTestCase.newSearcher(new FCInvisibleMultiReader(readers)); out.setSimilarity(s.getSimilarity()); return out; }
/** * Tests that a query matches the an expected set of documents using Hits. * * <p> * Note that when using the Hits API, documents will only be returned * if they have a positive normalized score. * </p> * @param query the query to test * @param searcher the searcher to test the query against * @param defaultFieldName used for displaing the query in assertion messages * @param results a list of documentIds that must match the query * @see #checkHitCollector */ public static void checkHits( Random random, Query query, String defaultFieldName, IndexSearcher searcher, int[] results) throws IOException { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; Set<Integer> correct = new TreeSet<>(); for (int i = 0; i < results.length; i++) { correct.add(Integer.valueOf(results[i])); } Set<Integer> actual = new TreeSet<>(); for (int i = 0; i < hits.length; i++) { actual.add(Integer.valueOf(hits[i].doc)); } Assert.assertEquals(query.toString(defaultFieldName), correct, actual); QueryUtils.check(random, query,searcher, LuceneTestCase.rarely(random)); }
/** create a RandomIndexWriter with the provided config */ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { // TODO: this should be solved in a different way; Random should not be shared (!). this.r = new Random(r.nextLong()); w = mockIndexWriter(dir, c, r); flushAt = TestUtil.nextInt(r, 10, 1000); codec = w.getConfig().getCodec(); if (LuceneTestCase.VERBOSE) { System.out.println("RIW dir=" + dir + " config=" + w.getConfig()); System.out.println("codec default=" + codec.getName()); } // Make sure we sometimes test indices that don't get // any forced merges: doRandomForceMerge = !(c.getMergePolicy() instanceof NoMergePolicy) && r.nextBoolean(); }
@Override public synchronized void sync(Collection<String> names) throws IOException { maybeYield(); maybeThrowDeterministicException(); if (crashed) { throw new IOException("cannot sync after crash"); } // don't wear out our hardware so much in tests. if (LuceneTestCase.rarely(randomState) || mustSync()) { for (String name : names) { // randomly fail with IOE on any file maybeThrowIOException(name); in.sync(Collections.singleton(name)); unSyncedFiles.remove(name); } } else { unSyncedFiles.removeAll(names); } }
/** * Sets up a RAMDirectory, and adds documents (using English.intToEnglish()) with two fields: field and multiField * and analyzes them using the PayloadAnalyzer * @param similarity The Similarity class to use in the Searcher * @param numDocs The num docs to add * @return An IndexSearcher */ // TODO: randomize public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException { Directory directory = new MockDirectoryWrapper(random, new RAMDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(); // TODO randomize this IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new TextField(FIELD, English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES)); writer.addDocument(doc); } reader = DirectoryReader.open(writer, true); writer.close(); IndexSearcher searcher = LuceneTestCase.newSearcher(reader); searcher.setSimilarity(similarity); return searcher; }
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored) { return LuceneTestCase.newSearcher(other); } }; try { new SearcherManager(w.w, false, theEvilOne); fail("didn't hit expected exception"); } catch (IllegalStateException ise) { // expected } w.close(); other.close(); dir.close(); }
protected void before() throws Throwable { if (!isPropertyEmpty(SysGlobals.SYSPROP_TESTFILTER()) || !isPropertyEmpty(SysGlobals.SYSPROP_TESTCLASS()) || !isPropertyEmpty(SysGlobals.SYSPROP_TESTMETHOD()) || !isPropertyEmpty(SysGlobals.SYSPROP_ITERATIONS())) { // We're running with a complex test filter that is properly handled by classes // which are executed by RandomizedRunner. The "outer" classes testing LuceneTestCase // itself are executed by the default JUnit runner and would be always executed. // We thus always skip execution if any filtering is detected. Assume.assumeTrue(false); } // Check zombie threads from previous suites. Don't run if zombies are around. RandomizedTest.assumeFalse(RandomizedRunner.hasZombieThreads()); TestRuleIgnoreAfterMaxFailures newRule = new TestRuleIgnoreAfterMaxFailures(Integer.MAX_VALUE); prevRule = LuceneTestCase.replaceMaxFailureRule(newRule); RandomizedTest.assumeFalse(FailureMarker.hadFailures()); }
public void testNativeFSLockFactory() throws IOException { NativeFSLockFactory f = new NativeFSLockFactory(createTempDir(LuceneTestCase.getTestClass().getSimpleName())); f.setLockPrefix("test"); Lock l = f.makeLock("commit"); Lock l2 = f.makeLock("commit"); assertTrue("failed to obtain lock", l.obtain()); assertTrue("succeeded in obtaining lock twice", !l2.obtain()); l.close(); assertTrue("failed to obtain 2nd lock after first one was freed", l2.obtain()); l2.close(); // Make sure we can obtain first one again, test isLocked(): assertTrue("failed to obtain lock", l.obtain()); assertTrue(l.isLocked()); assertTrue(l2.isLocked()); l.close(); assertFalse(l.isLocked()); assertFalse(l2.isLocked()); }
@Test public void testConfigSet() throws Exception { SolrServer server = getSolrAdmin(); File testDir = createTempDir(LuceneTestCase.getTestClass().getSimpleName()); File newCoreInstanceDir = new File(testDir, "newcore"); CoreAdminRequest.Create req = new CoreAdminRequest.Create(); req.setCoreName("corewithconfigset"); req.setInstanceDir(newCoreInstanceDir.getAbsolutePath()); req.setConfigSet("configset-2"); CoreAdminResponse response = req.process(server); assertThat((String) response.getResponse().get("core"), is("corewithconfigset")); try (SolrCore core = cores.getCore("corewithconfigset")) { assertThat(core, is(notNullValue())); } }
@Override public Directory create(String path, DirContext dirContext) throws IOException { Directory dir = LuceneTestCase.newFSDirectory(new File(path)); // we can't currently do this check because of how // Solr has to reboot a new Directory sometimes when replicating // or rolling back - the old directory is closed and the following // test assumes it can open an IndexWriter when that happens - we // have a new Directory for the same dir and still an open IW at // this point Directory cdir = reduce(dir); cdir = reduce(cdir); cdir = reduce(cdir); if (cdir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)cdir).setAssertNoUnrefencedFilesOnClose(false); ((MockDirectoryWrapper)cdir).setPreventDoubleWrite(false); ((MockDirectoryWrapper)cdir).setEnableVirusScanner(false); } return dir; }
public void test() throws Exception { File tmpdir = createTempDir(LuceneTestCase.getTestClass().getSimpleName()); createFile(tmpdir, "a.txt", "a line one\na line two\na line three".getBytes(StandardCharsets.UTF_8), false); createFile(tmpdir, "b.txt", "b line one\nb line two".getBytes(StandardCharsets.UTF_8), false); createFile(tmpdir, "c.txt", "c line one\nc line two\nc line three\nc line four".getBytes(StandardCharsets.UTF_8), false); String config = generateConfig(tmpdir); LocalSolrQueryRequest request = lrf.makeRequest( "command", "full-import", "dataConfig", config, "clean", "true", "commit", "true", "synchronous", "true", "indent", "true"); h.query("/dataimport", request); assertQ(req("*:*"), "//*[@numFound='9']"); assertQ(req("id:?\\ line\\ one"), "//*[@numFound='3']"); assertQ(req("id:a\\ line*"), "//*[@numFound='3']"); assertQ(req("id:b\\ line*"), "//*[@numFound='2']"); assertQ(req("id:c\\ line*"), "//*[@numFound='4']"); }
void doStandardHighlights(Analyzer analyzer, IndexSearcher searcher, TopDocs hits, Query query, Formatter formatter, boolean expandMT) throws Exception { for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); if (mode == QUERY) { scorer = new QueryScorer(query); } else if (mode == QUERY_TERM) { scorer = new QueryTermScorer(query); } Highlighter highlighter = new Highlighter(formatter, scorer); highlighter.setTextFragmenter(frag); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, fragmentSeparator); if (LuceneTestCase.VERBOSE) System.out.println("\t" + result); } }
@BeforeClass public static void beforeClass() throws Exception { final Locale locale = LuceneTestCase.randomLocale(random()); collator = Collator.getInstance(locale); collator.setStrength(Collator.IDENTICAL); collator.setDecomposition(Collator.NO_DECOMPOSITION); numDocs = 1000 * RANDOM_MULTIPLIER; dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String value = _TestUtil.randomUnicodeString(random()); Field field = newStringField("field", value, Field.Store.YES); doc.add(field); iw.addDocument(doc); } splitDoc = _TestUtil.randomUnicodeString(random()); reader = iw.getReader(); iw.close(); searcher = newSearcher(reader); }
/** * Tests that a query matches the an expected set of documents using Hits. * * <p> * Note that when using the Hits API, documents will only be returned * if they have a positive normalized score. * </p> * @param query the query to test * @param searcher the searcher to test the query against * @param defaultFieldName used for displaing the query in assertion messages * @param results a list of documentIds that must match the query * @see #checkHitCollector */ public static void checkHits( Random random, Query query, String defaultFieldName, IndexSearcher searcher, int[] results) throws IOException { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; Set<Integer> correct = new TreeSet<Integer>(); for (int i = 0; i < results.length; i++) { correct.add(Integer.valueOf(results[i])); } Set<Integer> actual = new TreeSet<Integer>(); for (int i = 0; i < hits.length; i++) { actual.add(Integer.valueOf(hits[i].doc)); } Assert.assertEquals(query.toString(defaultFieldName), correct, actual); QueryUtils.check(random, query,searcher, LuceneTestCase.rarely(random)); }
/** create a RandomIndexWriter with the provided config */ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { // TODO: this should be solved in a different way; Random should not be shared (!). this.r = new Random(r.nextLong()); w = new MockIndexWriter(r, dir, c); flushAt = _TestUtil.nextInt(r, 10, 1000); codec = w.getConfig().getCodec(); if (LuceneTestCase.VERBOSE) { System.out.println("RIW dir=" + dir + " config=" + w.getConfig()); System.out.println("codec default=" + codec.getName()); } // Make sure we sometimes test indices that don't get // any forced merges: doRandomForceMerge = r.nextBoolean(); }
public void setUp() throws Exception { File home = new File(LuceneTestCase.TEMP_DIR, getClass().getName() + "-" + System.currentTimeMillis()); homeDir = new File(home, name); dataDir = new File(homeDir + "/collection1", "data"); confDir = new File(homeDir + "/collection1", "conf"); homeDir.mkdirs(); dataDir.mkdirs(); confDir.mkdirs(); File f = new File(confDir, "solrconfig.xml"); FileUtils.copyFile(SolrTestCaseJ4.getFile(getSolrConfigFile()), f); f = new File(confDir, "schema.xml"); FileUtils.copyFile(SolrTestCaseJ4.getFile(getSchemaFile()), f); }
@Override public Directory create(String path, DirContext dirContext) throws IOException { Directory dir = LuceneTestCase.newFSDirectory(new File(path)); // we can't currently do this check because of how // Solr has to reboot a new Directory sometimes when replicating // or rolling back - the old directory is closed and the following // test assumes it can open an IndexWriter when that happens - we // have a new Directory for the same dir and still an open IW at // this point Directory cdir = reduce(dir); cdir = reduce(cdir); cdir = reduce(cdir); if (cdir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)cdir).setAssertNoUnrefencedFilesOnClose(false); ((MockDirectoryWrapper)cdir).setPreventDoubleWrite(false); } return dir; }
/** create a RandomIndexWriter with the provided config */ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { // TODO: this should be solved in a different way; Random should not be shared (!). this.r = new Random(r.nextLong()); w = mockIndexWriter(dir, c, r); flushAt = _TestUtil.nextInt(r, 10, 1000); codec = w.getConfig().getCodec(); if (LuceneTestCase.VERBOSE) { System.out.println("RIW dir=" + dir + " config=" + w.getConfig()); System.out.println("codec default=" + codec.getName()); } // Make sure we sometimes test indices that don't get // any forced merges: doRandomForceMerge = !(c.getMergePolicy() instanceof NoMergePolicy) && r.nextBoolean(); }
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored) { return LuceneTestCase.newSearcher(other); } }; try { new SearcherManager(w.w, false, theEvilOne); } catch (IllegalStateException ise) { // expected } w.close(); other.close(); dir.close(); }
private Directory wrap(Directory dir) { final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex); w.setRandomIOExceptionRate(randomIOExceptionRate); w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); w.setThrottling(throttle); w.setCheckIndexOnClose(false); // we do this on the index level // TODO: make this test robust to virus scanner w.setAssertNoDeleteOpenFile(false); w.setUseSlowOpenClosers(false); LuceneTestCase.closeAfterSuite(new CloseableDirectory(w)); return w; }
@LuceneTestCase.AwaitsFix(bugUrl="currently can't perform phrase queries on fields that don't support positions") public void testPhraseQueryOnFieldWithNoPositions() throws Exception { List<IndexRequestBuilder> reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan")); indexRandom(true, false, reqs); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("\"eggplant parmesan\"")).get(); assertHits(resp.getHits(), "1"); assertHitCount(resp, 1L); }
@LuceneTestCase.AwaitsFix(bugUrl="currently can't perform phrase queries on fields that don't support positions") public void testPhraseQueryOnFieldWithNoPositions() throws Exception { String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan")); indexRandom(true, false, reqs); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("\"eggplant parmesan\"")).get(); assertHits(resp.getHits(), "1"); assertHitCount(resp, 1L); }
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException { Directory dir = LuceneTestCase.newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer()); conf.setOpenMode(OpenMode.CREATE); IndexWriter writer = new IndexWriter(dir, conf); FieldType type = new FieldType(TextField.TYPE_STORED); type.setStoreTermVectorOffsets(true); type.setStoreTermVectorPayloads(false); type.setStoreTermVectorPositions(true); type.setStoreTermVectors(true); type.freeze(); Document d = new Document(); d.add(new Field("id", "abc", StringField.TYPE_STORED)); d.add(new Field("plaintext", "the1 quick brown fox jumps over the1 lazy dog comment", type)); d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog comment", type)); writer.updateDocument(new Term("id", "abc"), d); writer.commit(); writer.close(); DirectoryReader dr = DirectoryReader.open(dir); IndexSearcher s = new IndexSearcher(dr); TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1); ScoreDoc[] scoreDocs = search.scoreDocs; int doc = scoreDocs[0].doc; Fields termVectors = dr.getTermVectors(doc); EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets); outResponse.setFields(termVectors, null, flags, termVectors); dr.close(); dir.close(); }