Java 类org.apache.lucene.index.NoMergePolicy 实例源码
项目:elasticsearch_my
文件:Lucene.java
/**
* This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
* files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
* this operation fails.
*/
public static void cleanLuceneIndex(Directory directory) throws IOException {
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (final String file : directory.listAll()) {
if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
directory.deleteFile(file); // remove all segment_N files
}
}
}
try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
.setMergePolicy(NoMergePolicy.INSTANCE) // no merges
.setCommitOnClose(false) // no commits
.setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
{
// do nothing and close this will kick of IndexFileDeleter which will remove all pending files
}
}
项目:elasticsearch_my
文件:InternalEngineTests.java
public void testSegmentsStatsIncludingFileSizes() throws Exception {
try (Store store = createStore();
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
assertThat(engine.segmentsStats(true).getFileSizes().size(), equalTo(0));
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
engine.refresh("test");
SegmentsStats stats = engine.segmentsStats(true);
assertThat(stats.getFileSizes().size(), greaterThan(0));
assertThat(() -> stats.getFileSizes().valuesIt(), everyItem(greaterThan(0L)));
ObjectObjectCursor<String, Long> firstEntry = stats.getFileSizes().iterator().next();
ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null);
engine.index(indexForDoc(doc2));
engine.refresh("test");
assertThat(engine.segmentsStats(true).getFileSizes().get(firstEntry.key), greaterThan(firstEntry.value));
}
}
项目:elasticsearch_my
文件:InternalEngineTests.java
public void testForceVersioningNotAllowedExceptForOlderIndices() throws Exception {
ParsedDocument doc = testParsedDocument("1", "test", null, testDocument(), B_1, null);
Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 42, VersionType.FORCE, PRIMARY, 0, -1, false);
Engine.IndexResult indexResult = engine.index(index);
assertTrue(indexResult.hasFailure());
assertThat(indexResult.getFailure(), instanceOf(IllegalArgumentException.class));
assertThat(indexResult.getFailure().getMessage(), containsString("version type [FORCE] may not be used for indices created after 6.0"));
IndexSettings oldIndexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0_beta1)
.build());
try (Store store = createStore();
Engine engine = createEngine(oldIndexSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 84, VersionType.FORCE, PRIMARY, 0, -1, false);
Engine.IndexResult result = engine.index(index);
assertTrue(result.hasFailure());
assertThat(result.getFailure(), instanceOf(IllegalArgumentException.class));
assertThat(result.getFailure().getMessage(), containsString("version type [FORCE] may not be used for non-translog operations"));
index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 84, VersionType.FORCE,
Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, 0, -1, false);
result = engine.index(index);
assertThat(result.getVersion(), equalTo(84L));
}
}
项目:elasticsearch_my
文件:PercolateQueryTests.java
@Before
public void init() throws Exception {
directory = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer());
config.setMergePolicy(NoMergePolicy.INSTANCE);
indexWriter = new IndexWriter(directory, config);
}
项目:elasticsearch_my
文件:CandidateQueryTests.java
@Before
public void init() throws Exception {
directory = newDirectory();
IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer());
config.setMergePolicy(NoMergePolicy.INSTANCE);
indexWriter = new IndexWriter(directory, config);
String indexName = "test";
IndexService indexService = createIndex(indexName, Settings.EMPTY);
mapperService = indexService.mapperService();
String mapper = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("int_field").field("type", "integer").endObject()
.startObject("long_field").field("type", "long").endObject()
.startObject("half_float_field").field("type", "half_float").endObject()
.startObject("float_field").field("type", "float").endObject()
.startObject("double_field").field("type", "double").endObject()
.startObject("ip_field").field("type", "ip").endObject()
.startObject("field").field("type", "keyword").endObject()
.endObject().endObject().endObject().string();
documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true);
String queryField = "query_field";
String mappingType = "query";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(mappingType)
.startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge(mappingType, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper(mappingType).mappers().getMapper(queryField);
fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType();
queries = new ArrayList<>();
queryStore = ctx -> docId -> this.queries.get(docId);
}
项目:elasticsearch_my
文件:StoreRecovery.java
void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException {
target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats),
new IndexWriterConfig(null)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here
// we also don't specify a codec here and merges should use the engines for this index
.setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE))) {
writer.addIndexes(sources);
writer.commit();
}
}
项目:elasticsearch_my
文件:SearchCancellationTests.java
@BeforeClass
public static void setup() throws IOException {
dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
// we need at least 2 segments - so no merges should be allowed
w.w.getConfig().setMergePolicy(NoMergePolicy.INSTANCE);
w.setDoRandomForceMerge(false);
indexRandomDocuments(w, TestUtil.nextInt(random(), 2, 20));
w.flush();
indexRandomDocuments(w, TestUtil.nextInt(random(), 1, 20));
reader = w.getReader();
w.close();
}
项目:elasticsearch_my
文件:QueryPhaseTests.java
private void countTestCase(boolean withDeletions) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
doc.add(new StringField("foo", "bar", Store.NO));
}
if (randomBoolean()) {
doc.add(new StringField("foo", "baz", Store.NO));
}
if (withDeletions && (rarely() || i == 0)) {
doc.add(new StringField("delete", "yes", Store.NO));
}
w.addDocument(doc);
}
if (withDeletions) {
w.deleteDocuments(new Term("delete", "yes"));
}
final IndexReader reader = w.getReader();
Query matchAll = new MatchAllDocsQuery();
Query matchAllCsq = new ConstantScoreQuery(matchAll);
Query tq = new TermQuery(new Term("foo", "bar"));
Query tCsq = new ConstantScoreQuery(tq);
BooleanQuery bq = new BooleanQuery.Builder()
.add(matchAll, Occur.SHOULD)
.add(tq, Occur.MUST)
.build();
countTestCase(matchAll, reader, false);
countTestCase(matchAllCsq, reader, false);
countTestCase(tq, reader, withDeletions);
countTestCase(tCsq, reader, withDeletions);
countTestCase(bq, reader, true);
reader.close();
w.close();
dir.close();
}
项目:elasticsearch_my
文件:FieldDataCacheTests.java
public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter iw = new IndexWriter(dir, iwc);
long numDocs = scaledRandomIntBetween(32, 128);
for (int i = 1; i <= numDocs; i++) {
Document doc = new Document();
doc.add(new SortedSetDocValuesField("field1", new BytesRef(String.valueOf(i))));
doc.add(new StringField("field2", String.valueOf(i), Field.Store.NO));
iw.addDocument(doc);
if (i % 24 == 0) {
iw.commit();
}
}
iw.close();
DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0));
DummyAccountingFieldDataCache fieldDataCache = new DummyAccountingFieldDataCache();
// Testing SortedSetDVOrdinalsIndexFieldData:
SortedSetDVOrdinalsIndexFieldData sortedSetDVOrdinalsIndexFieldData = createSortedDV("field1", fieldDataCache);
sortedSetDVOrdinalsIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
sortedSetDVOrdinalsIndexFieldData.loadGlobal(new FieldMaskingReader("field1", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
// Testing PagedBytesIndexFieldData
PagedBytesIndexFieldData pagedBytesIndexFieldData = createPagedBytes("field2", fieldDataCache);
pagedBytesIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
pagedBytesIndexFieldData.loadGlobal(new FieldMaskingReader("field2", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
ir.close();
dir.close();
}
项目:elasticsearch_my
文件:InternalEngineTests.java
public void testVerboseSegments() throws Exception {
try (Store store = createStore();
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
List<Segment> segments = engine.segments(true);
assertThat(segments.isEmpty(), equalTo(true));
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
engine.refresh("test");
segments = engine.segments(true);
assertThat(segments.size(), equalTo(1));
assertThat(segments.get(0).ramTree, notNullValue());
ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null);
engine.index(indexForDoc(doc2));
engine.refresh("test");
ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null);
engine.index(indexForDoc(doc3));
engine.refresh("test");
segments = engine.segments(true);
assertThat(segments.size(), equalTo(3));
assertThat(segments.get(0).ramTree, notNullValue());
assertThat(segments.get(1).ramTree, notNullValue());
assertThat(segments.get(2).ramTree, notNullValue());
}
}
项目:elasticsearch_my
文件:InternalEngineTests.java
public void testEngineMaxTimestampIsInitialized() throws IOException {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
}
long maxTimestamp = Math.abs(randomLong());
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE,
maxTimestamp, null))) {
assertEquals(maxTimestamp, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
}
}
项目:elasticsearch_my
文件:ShadowEngineTests.java
public void testVerboseSegments() throws Exception {
primaryEngine.close(); // recreate without merging
primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE);
List<Segment> segments = primaryEngine.segments(true);
assertThat(segments.isEmpty(), equalTo(true));
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(indexForDoc(doc));
primaryEngine.refresh("test");
segments = primaryEngine.segments(true);
assertThat(segments.size(), equalTo(1));
assertThat(segments.get(0).ramTree, notNullValue());
ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null);
primaryEngine.index(indexForDoc(doc2));
primaryEngine.refresh("test");
ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null);
primaryEngine.index(indexForDoc(doc3));
primaryEngine.refresh("test");
segments = primaryEngine.segments(true);
assertThat(segments.size(), equalTo(3));
assertThat(segments.get(0).ramTree, notNullValue());
assertThat(segments.get(1).ramTree, notNullValue());
assertThat(segments.get(2).ramTree, notNullValue());
// Now make the changes visible to the replica
primaryEngine.flush();
replicaEngine.refresh("test");
segments = replicaEngine.segments(true);
assertThat(segments.size(), equalTo(3));
assertThat(segments.get(0).ramTree, notNullValue());
assertThat(segments.get(1).ramTree, notNullValue());
assertThat(segments.get(2).ramTree, notNullValue());
}
项目:elasticsearch_my
文件:ESDirectoryReaderTests.java
/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMaxBufferedDocs(100);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter iw = new IndexWriter(dir, iwc);
// add two docs, id:0 and id:1
Document doc = new Document();
Field idField = new StringField("id", "", Field.Store.NO);
doc.add(idField);
idField.setStringValue("0");
iw.addDocument(doc);
idField.setStringValue("1");
iw.addDocument(doc);
// open reader
ShardId shardId = new ShardId("fake", "_na_", 1);
DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw), shardId);
assertEquals(2, ir.numDocs());
assertEquals(1, ir.leaves().size());
// delete id:0 and reopen
iw.deleteDocuments(new Term("id", "0"));
DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
// we should have the same cache key as before
assertEquals(1, ir2.numDocs());
assertEquals(1, ir2.leaves().size());
assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
IOUtils.close(ir, ir2, iw, dir);
}
项目:es-token-plugin
文件:AllTermsTests.java
@Before
public void initSearcher() throws IOException {
dir = newDirectory();
IndexWriterConfig indexWriterConfig = newIndexWriterConfig(new WhitespaceAnalyzer());
indexWriterConfig.setMergeScheduler(NoMergeScheduler.INSTANCE);
indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
w = new IndexWriter(dir, indexWriterConfig);
Document d = new Document();
d.add(new TextField(FIELD, "don't be", Field.Store.YES));
d.add(new TextField("_uid", "1", Field.Store.YES));
w.addDocument(d);
w.commit();
d = new Document();
d.add(new TextField(FIELD, "ever always forget be", Field.Store.YES));
d.add(new TextField("_uid", "2", Field.Store.YES));
w.addDocument(d);
w.commit();
d = new Document();
d.add(new TextField(FIELD, "careful careful", Field.Store.YES));
d.add(new TextField("_uid", "3", Field.Store.YES));
w.addDocument(d);
w.commit();
d = new Document();
d.add(new TextField(FIELD, "ever always careful careful don't be forget be", Field.Store.YES));
d.add(new TextField("_uid", "4", Field.Store.YES));
w.addDocument(d);
w.commit();
reader = DirectoryReader.open(w, true, true);
}
项目:sagetv
文件:Wizard.java
private void reloadIndex() {
Directory diskDir;
try {
synchronized (getLock()) {
initializedIndex = false;
if(!diskIndex) {
resetIndex();
return;
}
index = new MMapDirectory(directorySnapshot, NoLockFactory.getNoLockFactory());
IndexWriterConfig iwc = new IndexWriterConfig(org.apache.lucene.util.Version.LUCENE_36,
analyzer).setOpenMode(OpenMode.CREATE_OR_APPEND);
if (diskIndexRunning) {
iwc.setRAMBufferSizeMB(INDEX_RAM_BUFFER_SIZE)
.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES) // TESTING
.setMergeScheduler(NoMergeScheduler.INSTANCE); // TETING
}
writer = new IndexWriter(index, iwc);
initializedIndex = (writer.numDocs() > 0) ? true : false;
reader = IndexReader.open(getWriter(), true); /* true: apply all deletes (perfhit) */
searcher = new IndexSearcher(reader);
needsReset = false;
}
} catch (IOException e) {
System.out.println("Execption reloading index from disk; assuming corrupt. e = " + e);
e.printStackTrace();
resetIndex();
}
}
项目:search
文件:TestTaxonomyFacetCounts.java
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
// tests the accumulator when there are segments with no results
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges
IndexWriter indexWriter = new IndexWriter(indexDir, iwc);
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
FacetsConfig config = new FacetsConfig();
indexTwoDocs(taxoWriter, indexWriter, config, false); // 1st segment, no content, with categories
indexTwoDocs(taxoWriter, indexWriter, null, true); // 2nd segment, with content, no categories
indexTwoDocs(taxoWriter, indexWriter, config, true); // 3rd segment ok
indexTwoDocs(taxoWriter, indexWriter, null, false); // 4th segment, no content, or categories
indexTwoDocs(taxoWriter, indexWriter, null, true); // 5th segment, with content, no categories
indexTwoDocs(taxoWriter, indexWriter, config, true); // 6th segment, with content, with categories
indexTwoDocs(taxoWriter, indexWriter, null, true); // 7th segment, with content, no categories
IOUtils.close(indexWriter, taxoWriter);
DirectoryReader indexReader = DirectoryReader.open(indexDir);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher indexSearcher = newSearcher(indexReader);
// search for "f:a", only segments 1 and 3 should match results
Query q = new TermQuery(new Term("f", "a"));
FacetsCollector sfc = new FacetsCollector();
indexSearcher.search(q, sfc);
Facets facets = getTaxonomyFacetCounts(taxoReader, config, sfc);
FacetResult result = facets.getTopChildren(10, "A");
assertEquals("wrong number of children", 2, result.labelValues.length);
for (LabelAndValue labelValue : result.labelValues) {
assertEquals("wrong weight for child " + labelValue.label, 2, labelValue.value.intValue());
}
IOUtils.close(indexReader, taxoReader, indexDir, taxoDir);
}
项目:search
文件:TestTaxonomyFacetCounts2.java
@BeforeClass
public static void beforeClassCountingFacetsAggregatorTest() throws Exception {
indexDir = newDirectory();
taxoDir = newDirectory();
// create an index which has:
// 1. Segment with no categories, but matching results
// 2. Segment w/ categories, but no results
// 3. Segment w/ categories and results
// 4. Segment w/ categories, but only some results
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
conf.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges, so we can control the index segments
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
allExpectedCounts = newCounts();
termExpectedCounts = newCounts();
// segment w/ no categories
indexDocsNoFacets(indexWriter);
// segment w/ categories, no content
indexDocsWithFacetsNoTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and content
indexDocsWithFacetsAndTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and some content
indexDocsWithFacetsAndSomeTerms(indexWriter, taxoWriter, allExpectedCounts);
IOUtils.close(indexWriter, taxoWriter);
}
项目:NYBC
文件:CountingFacetsAggregatorTest.java
@BeforeClass
public static void beforeClassCountingFacetsAggregatorTest() throws Exception {
indexDir = newDirectory();
taxoDir = newDirectory();
// create an index which has:
// 1. Segment with no categories, but matching results
// 2. Segment w/ categories, but no results
// 3. Segment w/ categories and results
// 4. Segment w/ categories, but only some results
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES); // prevent merges, so we can control the index segments
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
Map<String,OrdinalPolicy> policies = new HashMap<String,CategoryListParams.OrdinalPolicy>();
policies.put(CP_B.components[0], OrdinalPolicy.ALL_PARENTS);
policies.put(CP_C.components[0], OrdinalPolicy.NO_PARENTS);
policies.put(CP_D.components[0], OrdinalPolicy.NO_PARENTS);
CategoryListParams clp = new PerDimensionOrdinalPolicy(policies);
fip = new FacetIndexingParams(clp);
allExpectedCounts = newCounts();
termExpectedCounts = newCounts();
// segment w/ no categories
indexDocsNoFacets(indexWriter);
// segment w/ categories, no content
indexDocsWithFacetsNoTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and content
indexDocsWithFacetsAndTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and some content
indexDocsWithFacetsAndSomeTerms(indexWriter, taxoWriter, allExpectedCounts);
IOUtils.close(indexWriter, taxoWriter);
}
项目:Maskana-Gestor-de-Conocimiento
文件:CountingFacetsAggregatorTest.java
@BeforeClass
public static void beforeClassCountingFacetsAggregatorTest() throws Exception {
indexDir = newDirectory();
taxoDir = newDirectory();
// create an index which has:
// 1. Segment with no categories, but matching results
// 2. Segment w/ categories, but no results
// 3. Segment w/ categories and results
// 4. Segment w/ categories, but only some results
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES); // prevent merges, so we can control the index segments
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
Map<String,OrdinalPolicy> policies = new HashMap<String,CategoryListParams.OrdinalPolicy>();
policies.put(CP_B.components[0], OrdinalPolicy.ALL_PARENTS);
policies.put(CP_C.components[0], OrdinalPolicy.NO_PARENTS);
policies.put(CP_D.components[0], OrdinalPolicy.NO_PARENTS);
CategoryListParams clp = new PerDimensionOrdinalPolicy(policies);
fip = new FacetIndexingParams(clp);
allExpectedCounts = newCounts();
termExpectedCounts = newCounts();
// segment w/ no categories
indexDocsNoFacets(indexWriter);
// segment w/ categories, no content
indexDocsWithFacetsNoTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and content
indexDocsWithFacetsAndTerms(indexWriter, taxoWriter, allExpectedCounts);
// segment w/ categories and some content
indexDocsWithFacetsAndSomeTerms(indexWriter, taxoWriter, allExpectedCounts);
IOUtils.close(indexWriter, taxoWriter);
}
项目:elasticsearch_my
文件:MergePolicyConfig.java
MergePolicy getMergePolicy() {
return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}
项目:elasticsearch_my
文件:MergePolicySettingsTests.java
public void testNoMerges() {
MergePolicyConfig mp = new MergePolicyConfig(logger, indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()));
assertTrue(mp.getMergePolicy() instanceof NoMergePolicy);
}
项目:elasticsearch_my
文件:StoreRecoveryTests.java
public void testAddIndices() throws IOException {
Directory[] dirs = new Directory[randomIntBetween(1, 10)];
final int numDocs = randomIntBetween(50, 100);
int id = 0;
for (int i = 0; i < dirs.length; i++) {
dirs[i] = newFSDirectory(createTempDir());
IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE));
for (int j = 0; j < numDocs; j++) {
writer.addDocument(Arrays.asList(new StringField("id", Integer.toString(id++), Field.Store.YES)));
}
writer.commit();
writer.close();
}
StoreRecovery storeRecovery = new StoreRecovery(new ShardId("foo", "bar", 1), logger);
RecoveryState.Index indexStats = new RecoveryState.Index();
Directory target = newFSDirectory(createTempDir());
storeRecovery.addIndices(indexStats, target, dirs);
int numFiles = 0;
Predicate<String> filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false
&& f.startsWith("extra") == false;
for (Directory d : dirs) {
numFiles += Arrays.asList(d.listAll()).stream().filter(filesFilter).count();
}
final long targetNumFiles = Arrays.asList(target.listAll()).stream().filter(filesFilter).count();
assertEquals(numFiles, targetNumFiles);
assertEquals(indexStats.totalFileCount(), targetNumFiles);
if (hardLinksSupported(createTempDir())) {
assertEquals(targetNumFiles, indexStats.reusedFileCount());
} else {
assertEquals(0, indexStats.reusedFileCount(), 0);
}
DirectoryReader reader = DirectoryReader.open(target);
SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target);
for (SegmentCommitInfo info : segmentCommitInfos) { // check that we didn't merge
assertEquals("all sources must be flush", info.info.getDiagnostics().get("source"), "flush");
}
assertEquals(reader.numDeletedDocs(), 0);
assertEquals(reader.numDocs(), id);
reader.close();
target.close();
IOUtils.close(dirs);
}
项目:Elasticsearch
文件:MergePolicyConfig.java
public MergePolicy getMergePolicy() {
return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}
项目:search
文件:CreateIndexTaskTest.java
public void testNoMergePolicy() throws Exception {
PerfRunData runData = createPerfRunData(null);
runData.getConfig().set("merge.policy", NoMergePolicy.class.getName());
new CreateIndexTask(runData).doLogic();
new CloseIndexTask(runData).doLogic();
}
项目:search
文件:TestControlledRealTimeReopenThread.java
public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException {
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
conf.setMergePolicy(NoMergePolicy.INSTANCE);
Directory d = newDirectory();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch signal = new CountDownLatch(1);
LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal);
final TrackingIndexWriter writer = new TrackingIndexWriter(_writer);
final SearcherManager manager = new SearcherManager(_writer, false, null);
Document doc = new Document();
doc.add(newTextField("test", "test", Field.Store.YES));
writer.addDocument(doc);
manager.maybeRefresh();
Thread t = new Thread() {
@Override
public void run() {
try {
signal.await();
manager.maybeRefresh();
writer.deleteDocuments(new TermQuery(new Term("foo", "barista")));
manager.maybeRefresh(); // kick off another reopen so we inc. the internal gen
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown(); // let the add below finish
}
}
};
t.start();
_writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through
final long lastGen = writer.updateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen
assertFalse(manager.isSearcherCurrent()); // false since there is a delete in the queue
IndexSearcher searcher = manager.acquire();
try {
assertEquals(2, searcher.getIndexReader().numDocs());
} finally {
manager.release(searcher);
}
final ControlledRealTimeReopenThread<IndexSearcher> thread = new ControlledRealTimeReopenThread<>(writer, manager, 0.01, 0.01);
thread.start(); // start reopening
if (VERBOSE) {
System.out.println("waiting now for generation " + lastGen);
}
final AtomicBoolean finished = new AtomicBoolean(false);
Thread waiter = new Thread() {
@Override
public void run() {
try {
thread.waitForGeneration(lastGen);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
finished.set(true);
}
};
waiter.start();
manager.maybeRefresh();
waiter.join(1000);
if (!finished.get()) {
waiter.interrupt();
fail("thread deadlocked on waitForGeneration");
}
thread.close();
thread.join();
IOUtils.close(manager, _writer, d);
}
项目:NYBC
文件:CreateIndexTaskTest.java
public void testNoMergePolicy() throws Exception {
PerfRunData runData = createPerfRunData(null);
runData.getConfig().set("merge.policy", NoMergePolicy.class.getName());
new CreateIndexTask(runData).doLogic();
new CloseIndexTask(runData).doLogic();
}
项目:NYBC
文件:TestStandardFacetsAccumulator.java
@Test
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
// tests the accumulator when there are segments with no results
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES); // prevent merges
IndexWriter indexWriter = new IndexWriter(indexDir, iwc);
FacetIndexingParams fip = new FacetIndexingParams(new CategoryListParams() {
@Override
public CategoryListIterator createCategoryListIterator(int partition) throws IOException {
return new AssertingCategoryListIterator(super.createCategoryListIterator(partition));
}
});
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
FacetFields facetFields = new FacetFields(taxoWriter, fip);
indexTwoDocs(indexWriter, facetFields, false); // 1st segment, no content, with categories
indexTwoDocs(indexWriter, null, true); // 2nd segment, with content, no categories
indexTwoDocs(indexWriter, facetFields, true); // 3rd segment ok
indexTwoDocs(indexWriter, null, false); // 4th segment, no content, or categories
indexTwoDocs(indexWriter, null, true); // 5th segment, with content, no categories
indexTwoDocs(indexWriter, facetFields, true); // 6th segment, with content, with categories
indexTwoDocs(indexWriter, null, true); // 7th segment, with content, no categories
IOUtils.close(indexWriter, taxoWriter);
DirectoryReader indexReader = DirectoryReader.open(indexDir);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
// search for "f:a", only segments 1 and 3 should match results
Query q = new TermQuery(new Term("f", "a"));
FacetRequest countNoComplements = new CountFacetRequest(new CategoryPath("A"), 10);
FacetSearchParams fsp = new FacetSearchParams(fip, countNoComplements);
FacetsCollector fc = FacetsCollector.create(fsp , indexReader, taxoReader);
indexSearcher.search(q, fc);
List<FacetResult> results = fc.getFacetResults();
assertEquals("received too many facet results", 1, results.size());
FacetResultNode frn = results.get(0).getFacetResultNode();
assertEquals("wrong number of children", 2, frn.subResults.size());
for (FacetResultNode node : frn.subResults) {
assertEquals("wrong weight for child " + node.label, 2, (int) node.value);
}
IOUtils.close(indexReader, taxoReader);
IOUtils.close(indexDir, taxoDir);
}
项目:NYBC
文件:TestNRTManager.java
public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException {
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
conf.setMergePolicy(random().nextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES);
Directory d = newDirectory();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch signal = new CountDownLatch(1);
LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal);
final NRTManager.TrackingIndexWriter writer = new NRTManager.TrackingIndexWriter(_writer);
final NRTManager manager = new NRTManager(writer, null, false);
Document doc = new Document();
doc.add(newTextField("test", "test", Field.Store.YES));
long gen = writer.addDocument(doc);
manager.maybeRefresh();
assertFalse(gen < manager.getCurrentSearchingGen());
Thread t = new Thread() {
@Override
public void run() {
try {
signal.await();
manager.maybeRefresh();
writer.deleteDocuments(new TermQuery(new Term("foo", "barista")));
manager.maybeRefresh(); // kick off another reopen so we inc. the internal gen
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown(); // let the add below finish
}
}
};
t.start();
_writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through
final long lastGen = writer.updateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen
assertFalse(manager.isSearcherCurrent()); // false since there is a delete in the queue
IndexSearcher searcher = manager.acquire();
try {
assertEquals(2, searcher.getIndexReader().numDocs());
} finally {
manager.release(searcher);
}
NRTManagerReopenThread thread = new NRTManagerReopenThread(manager, 0.01, 0.01);
thread.start(); // start reopening
if (VERBOSE) {
System.out.println("waiting now for generation " + lastGen);
}
final AtomicBoolean finished = new AtomicBoolean(false);
Thread waiter = new Thread() {
@Override
public void run() {
manager.waitForGeneration(lastGen);
finished.set(true);
}
};
waiter.start();
manager.maybeRefresh();
waiter.join(1000);
if (!finished.get()) {
waiter.interrupt();
fail("thread deadlocked on waitForGeneration");
}
thread.close();
thread.join();
IOUtils.close(manager, _writer, d);
}
项目:Maskana-Gestor-de-Conocimiento
文件:CreateIndexTaskTest.java
public void testNoMergePolicy() throws Exception {
PerfRunData runData = createPerfRunData(null);
runData.getConfig().set("merge.policy", NoMergePolicy.class.getName());
new CreateIndexTask(runData).doLogic();
new CloseIndexTask(runData).doLogic();
}
项目:Maskana-Gestor-de-Conocimiento
文件:TestStandardFacetsAccumulator.java
@Test
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
// tests the accumulator when there are segments with no results
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
iwc.setMergePolicy(NoMergePolicy.COMPOUND_FILES); // prevent merges
IndexWriter indexWriter = new IndexWriter(indexDir, iwc);
FacetIndexingParams fip = new FacetIndexingParams(new CategoryListParams() {
@Override
public CategoryListIterator createCategoryListIterator(int partition) throws IOException {
return new AssertingCategoryListIterator(super.createCategoryListIterator(partition));
}
});
TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
FacetFields facetFields = new FacetFields(taxoWriter, fip);
indexTwoDocs(indexWriter, facetFields, false); // 1st segment, no content, with categories
indexTwoDocs(indexWriter, null, true); // 2nd segment, with content, no categories
indexTwoDocs(indexWriter, facetFields, true); // 3rd segment ok
indexTwoDocs(indexWriter, null, false); // 4th segment, no content, or categories
indexTwoDocs(indexWriter, null, true); // 5th segment, with content, no categories
indexTwoDocs(indexWriter, facetFields, true); // 6th segment, with content, with categories
indexTwoDocs(indexWriter, null, true); // 7th segment, with content, no categories
IOUtils.close(indexWriter, taxoWriter);
DirectoryReader indexReader = DirectoryReader.open(indexDir);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
IndexSearcher indexSearcher = newSearcher(indexReader);
// search for "f:a", only segments 1 and 3 should match results
Query q = new TermQuery(new Term("f", "a"));
FacetRequest countNoComplements = new CountFacetRequest(new CategoryPath("A"), 10);
FacetSearchParams fsp = new FacetSearchParams(fip, countNoComplements);
FacetsCollector fc = FacetsCollector.create(fsp , indexReader, taxoReader);
indexSearcher.search(q, fc);
List<FacetResult> results = fc.getFacetResults();
assertEquals("received too many facet results", 1, results.size());
FacetResultNode frn = results.get(0).getFacetResultNode();
assertEquals("wrong number of children", 2, frn.subResults.size());
for (FacetResultNode node : frn.subResults) {
assertEquals("wrong weight for child " + node.label, 2, (int) node.value);
}
IOUtils.close(indexReader, taxoReader);
IOUtils.close(indexDir, taxoDir);
}
项目:Maskana-Gestor-de-Conocimiento
文件:TestControlledRealTimeReopenThread.java
public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException {
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
conf.setMergePolicy(random().nextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES);
Directory d = newDirectory();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch signal = new CountDownLatch(1);
LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal);
final TrackingIndexWriter writer = new TrackingIndexWriter(_writer);
final SearcherManager manager = new SearcherManager(_writer, false, null);
Document doc = new Document();
doc.add(newTextField("test", "test", Field.Store.YES));
writer.addDocument(doc);
manager.maybeRefresh();
Thread t = new Thread() {
@Override
public void run() {
try {
signal.await();
manager.maybeRefresh();
writer.deleteDocuments(new TermQuery(new Term("foo", "barista")));
manager.maybeRefresh(); // kick off another reopen so we inc. the internal gen
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown(); // let the add below finish
}
}
};
t.start();
_writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through
final long lastGen = writer.updateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen
assertFalse(manager.isSearcherCurrent()); // false since there is a delete in the queue
IndexSearcher searcher = manager.acquire();
try {
assertEquals(2, searcher.getIndexReader().numDocs());
} finally {
manager.release(searcher);
}
final ControlledRealTimeReopenThread<IndexSearcher> thread = new ControlledRealTimeReopenThread<IndexSearcher>(writer, manager, 0.01, 0.01);
thread.start(); // start reopening
if (VERBOSE) {
System.out.println("waiting now for generation " + lastGen);
}
final AtomicBoolean finished = new AtomicBoolean(false);
Thread waiter = new Thread() {
@Override
public void run() {
try {
thread.waitForGeneration(lastGen);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
finished.set(true);
}
};
waiter.start();
manager.maybeRefresh();
waiter.join(1000);
if (!finished.get()) {
waiter.interrupt();
fail("thread deadlocked on waitForGeneration");
}
thread.close();
thread.join();
IOUtils.close(manager, _writer, d);
}