Java 类org.apache.lucene.index.LogDocMergePolicy 实例源码
项目:community-edition-old
文件:IndexInfo.java
/**
* Make a lucene index writer
*
* @param location File
* @param analyzer Analyzer
* @return IndexWriter
* @throws IOException
*/
private IndexWriter makeDeltaIndexWriter(File location, Analyzer analyzer) throws IOException
{
IndexWriter writer;
if (!IndexReader.indexExists(location))
{
writer = new IndexWriter(location, analyzer, true, MaxFieldLength.LIMITED);
}
else
{
writer = new IndexWriter(location, analyzer, false, MaxFieldLength.LIMITED);
}
writer.setUseCompoundFile(writerUseCompoundFile);
writer.setMaxBufferedDocs(writerMaxBufferedDocs);
writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
writer.setMergeFactor(writerMergeFactor);
writer.setMaxMergeDocs(writerMaxMergeDocs);
writer.setWriteLockTimeout(writeLockTimeout);
writer.setMaxFieldLength(maxFieldLength);
writer.setTermIndexInterval(termIndexInterval);
writer.setMergeScheduler(new SerialMergeScheduler());
writer.setMergePolicy(new LogDocMergePolicy());
return writer;
}
项目:geolocator-3.0
文件:GetWriter.java
public static IndexWriter getIndexWriter(String indexdirectory, double buffersize)
throws IOException {
Directory dir;
if (OSUtil.isWindows())
dir = FSDirectory.open(new File(indexdirectory));
else
dir = NIOFSDirectory.open(new File(indexdirectory));
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_45);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_45, analyzer);
config.setOpenMode(OpenMode.CREATE_OR_APPEND);
config.setRAMBufferSizeMB(buffersize);
LogDocMergePolicy mergePolicy = new LogDocMergePolicy();
mergePolicy.setMergeFactor(3);
config.setMergePolicy(mergePolicy);
IndexWriter writer = new IndexWriter(dir, config);
return writer;
}
项目:search
文件:LuceneTestCase.java
public static LogMergePolicy newLogMergePolicy(Random r) {
LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
logmp.setCalibrateSizeByDeletes(r.nextBoolean());
if (rarely(r)) {
logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
} else {
logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
}
configureRandom(r, logmp);
return logmp;
}
项目:search
文件:TestPerFieldPostingsFormat2.java
private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
throws IOException {
LogDocMergePolicy logByteSizeMergePolicy = new LogDocMergePolicy();
logByteSizeMergePolicy.setNoCFSRatio(0.0); // make sure we use plain
// files
conf.setMergePolicy(logByteSizeMergePolicy);
final IndexWriter writer = new IndexWriter(dir, conf);
return writer;
}
项目:search
文件:TestIndexSearcher.java
@BeforeClass
public static void beforeClass() throws Exception {
// we need a consistent segmentation because reopen test validation
// dependso n merges not happening when it doesn't expect
System.setProperty("solr.tests.mergePolicy", LogDocMergePolicy.class.getName());
initCore("solrconfig.xml","schema.xml");
}
项目:search
文件:TestGroupingSearch.java
@BeforeClass
public static void beforeTests() throws Exception {
// force LogDocMergePolicy so that we get a predictable doc order
// when doing unsorted group collection
System.setProperty("solr.tests.mergePolicy",
LogDocMergePolicy.class.getName());
System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
initCore("solrconfig.xml", "schema12.xml");
}
项目:search
文件:TestMergePolicyConfig.java
public void testLogMergePolicyConfig() throws Exception {
final Class<? extends LogMergePolicy> mpClass = random().nextBoolean()
? LogByteSizeMergePolicy.class : LogDocMergePolicy.class;
System.setProperty("solr.test.log.merge.policy", mpClass.getName());
initCore("solrconfig-logmergepolicy.xml","schema-minimal.xml");
IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
// verify some props set to -1 get lucene internal defaults
assertEquals(-1, solrConfig.indexConfig.maxBufferedDocs);
assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH,
iwc.getMaxBufferedDocs());
assertEquals(-1, solrConfig.indexConfig.maxIndexingThreads);
assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES,
iwc.getMaxThreadStates());
assertEquals(-1, solrConfig.indexConfig.ramBufferSizeMB, 0.0D);
assertEquals(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB,
iwc.getRAMBufferSizeMB(), 0.0D);
LogMergePolicy logMP = assertAndCast(mpClass, iwc.getMergePolicy());
// set by legacy <mergeFactor> setting
assertEquals(11, logMP.getMergeFactor());
// set by legacy <maxMergeDocs> setting
assertEquals(456, logMP.getMaxMergeDocs());
}
项目:NYBC
文件:TestPerFieldPostingsFormat2.java
private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
throws IOException {
LogDocMergePolicy logByteSizeMergePolicy = new LogDocMergePolicy();
logByteSizeMergePolicy.setUseCompoundFile(false); // make sure we use plain
// files
conf.setMergePolicy(logByteSizeMergePolicy);
final IndexWriter writer = new IndexWriter(dir, conf);
return writer;
}
项目:oodt
文件:LuceneWorkflowInstanceRepository.java
@Override
public synchronized boolean clearWorkflowInstances() throws InstanceRepositoryException {
IndexWriter writer = null;
try {
IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer());
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
LogMergePolicy lmp =new LogDocMergePolicy();
lmp.setMergeFactor(mergeFactor);
config.setMergePolicy(lmp);
writer = new IndexWriter(indexDir, config);
LOG.log(Level.FINE,
"LuceneWorkflowEngine: remove all workflow instances");
writer.deleteDocuments(new Term("myfield", "myvalue"));
} catch (IOException e) {
LOG.log(Level.SEVERE, e.getMessage());
LOG
.log(Level.WARNING,
"Exception removing workflow instances from index: Message: "
+ e.getMessage());
throw new InstanceRepositoryException(e.getMessage());
} finally {
if (writer != null){
try{
writer.close();
}
catch(Exception ignore){}
writer = null;
}
}
return true;
}
项目:Maskana-Gestor-de-Conocimiento
文件:TestPerFieldPostingsFormat2.java
private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
throws IOException {
LogDocMergePolicy logByteSizeMergePolicy = new LogDocMergePolicy();
logByteSizeMergePolicy.setNoCFSRatio(0.0); // make sure we use plain
// files
conf.setMergePolicy(logByteSizeMergePolicy);
final IndexWriter writer = new IndexWriter(dir, conf);
return writer;
}
项目:elasticsearch_my
文件:InternalEngineTests.java
public void testRenewSyncFlush() throws Exception {
final int iters = randomIntBetween(2, 5); // run this a couple of times to get some coverage
for (int i = 0; i < iters; i++) {
try (Store store = createStore();
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
new LogDocMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
Engine.Index doc1 = indexForDoc(testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null));
engine.index(doc1);
assertEquals(engine.getLastWriteNanos(), doc1.startTime());
engine.flush();
Engine.Index doc2 = indexForDoc(testParsedDocument("2", "test", null, testDocumentWithTextField(), B_1, null));
engine.index(doc2);
assertEquals(engine.getLastWriteNanos(), doc2.startTime());
engine.flush();
final boolean forceMergeFlushes = randomBoolean();
final ParsedDocument parsedDoc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_1, null);
if (forceMergeFlushes) {
engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false));
} else {
engine.index(indexForDoc(parsedDoc3));
}
Engine.CommitId commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS);
assertEquals(3, engine.segments(false).size());
engine.forceMerge(forceMergeFlushes, 1, false, false, false);
if (forceMergeFlushes == false) {
engine.refresh("make all segments visible");
assertEquals(4, engine.segments(false).size());
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertTrue(engine.tryRenewSyncCommit());
assertEquals(1, engine.segments(false).size());
} else {
assertBusy(() -> assertEquals(1, engine.segments(false).size()));
}
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
if (randomBoolean()) {
Engine.Index doc4 = indexForDoc(testParsedDocument("4", "test", null, testDocumentWithTextField(), B_1, null));
engine.index(doc4);
assertEquals(engine.getLastWriteNanos(), doc4.startTime());
} else {
Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid());
engine.delete(delete);
assertEquals(engine.getLastWriteNanos(), delete.startTime());
}
assertFalse(engine.tryRenewSyncCommit());
engine.flush(false, true); // we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID));
assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}
项目:search
文件:SolrCmdDistributorTest.java
@BeforeClass
public static void beforeClass() throws Exception {
// we can't use the Randomized merge policy because the test depends on
// being able to call optimize to have all deletes expunged.
System.setProperty("solr.tests.mergePolicy", LogDocMergePolicy.class.getName());
}
项目:meresco-lucene
文件:LuceneSettings.java
public void updateSettings(Reader reader) throws Exception {
JsonObject object = (JsonObject) Json.createReader(reader).read();
for (String key : object.keySet()) {
switch (key) {
case "commitCount":
commitCount = object.getInt(key);
break;
case "commitTimeout":
commitTimeout = object.getInt(key);
break;
case "lruTaxonomyWriterCacheSize":
lruTaxonomyWriterCacheSize = object.getInt(key);
break;
case "mergePolicy":
JsonObject policy = object.getJsonObject("mergePolicy");
switch (policy.getString("type")) {
case "TieredMergePolicy":
this.mergePolicy = getTieredMergePolicy(policy.getJsonNumber("segmentsPerTier").doubleValue(), policy.getInt("maxMergeAtOnce"));
break;
case "LogDocMergePolicy": {
LogDocMergePolicy mp;
this.mergePolicy = mp = new LogDocMergePolicy();
mp.setMaxMergeDocs(policy.getInt("maxMergeDocs"));
mp.setMergeFactor(policy.getInt("mergeFactor"));
break;
}
default:
throw new RuntimeException("Unsupported mergePolicy: " + policy.getString("type"));
}
break;
case "numberOfConcurrentTasks":
numberOfConcurrentTasks = object.getInt(key);
break;
case "cacheFacetOrdinals":
this.cacheFacetOrdinals = object.getBoolean("cacheFacetOrdinals");
break;
case "analyzer":
analyzer = getAnalyzer(object.getJsonObject(key));
break;
case "similarity":
similarity = getSimilarity(object.getJsonObject(key));
break;
case "drilldownFields":
this.updateDrilldownFields(object.getJsonArray(key));
break;
case "clustering":
ClusterConfig clusterConfig = ClusterConfig.parseFromJsonObject(object.getJsonObject(key));
if (clusterConfig != null) {
this.clusterConfig = clusterConfig;
}
break;
}
}
}