/** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * * @param blobContainer blob container * @param blobName blob name */ public T readBlob(BlobContainer blobContainer, String blobName) throws IOException { try (InputStream inputStream = blobContainer.readBlob(blobName)) { ByteArrayOutputStream out = new ByteArrayOutputStream(); Streams.copy(inputStream, out); final byte[] bytes = out.toByteArray(); final String resourceDesc = "ChecksumBlobStoreFormat.readBlob(blob=\"" + blobName + "\")"; try (ByteArrayIndexInput indexInput = new ByteArrayIndexInput(resourceDesc, bytes)) { CodecUtil.checksumEntireFile(indexInput); CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); return read(bytesReference); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); } } }
private void outputBatch(IndexReader indexReader, TopDocs topDocs, int batch, String outputBaseFormat, String pmidOutputFormat, String xmlOutputFormat) throws CorruptIndexException, IOException { String outputBasePath = String.format(outputBaseFormat, batch); OutputDirectory outputBaseDir = new OutputDirectory(outputBasePath); int start = batch * batchSize; int end = Math.min(start + batchSize, topDocs.totalHits); if (pmidOutputFormat != null) { try (PrintStream out = open(batch, outputBaseDir, pmidOutputFormat)) { for (int d = start; d < end; ++d) { outputBatchDocument(indexReader, topDocs, out, PubMedIndexField.PMID, d); } } } if (xmlOutputFormat != null) { try (PrintStream out = open(batch, outputBaseDir, xmlOutputFormat)) { out.println(XML_HEADER); for (int d = start; d < end; ++d) { outputBatchDocument(indexReader, topDocs, out, PubMedIndexField.XML, d); } out.println(XML_FOOTER); } } }
public void testCanOpenIndex() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); IndexWriterConfig iwc = newIndexWriterConfig(); Path tempDir = createTempDir(); final BaseDirectoryWrapper dir = newFSDirectory(tempDir); assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); writer.close(); assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) { @Override public Directory newDirectory() throws IOException { return dir; } }; Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); store.markStoreCorrupted(new CorruptIndexException("foo", "bar")); assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); store.close(); }
private List<ShardStateAction.ShardEntry> createNonExistentShards(ClusterState currentState, String reason) { // add shards from a non-existent index String nonExistentIndexUUID = "non-existent"; Index index = new Index("non-existent", nonExistentIndexUUID); List<String> nodeIds = new ArrayList<>(); for (ObjectCursor<String> nodeId : currentState.nodes().getNodes().keys()) { nodeIds.add(nodeId.toString()); } List<ShardRouting> nonExistentShards = new ArrayList<>(); nonExistentShards.add(nonExistentShardRouting(index, nodeIds, true)); for (int i = 0; i < numberOfReplicas; i++) { nonExistentShards.add(nonExistentShardRouting(index, nodeIds, false)); } List<ShardStateAction.ShardEntry> existingShards = createExistingShards(currentState, reason); List<ShardStateAction.ShardEntry> shardsWithMismatchedAllocationIds = new ArrayList<>(); for (ShardStateAction.ShardEntry existingShard : existingShards) { shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardEntry(existingShard.shardId, UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure)); } List<ShardStateAction.ShardEntry> tasks = new ArrayList<>(); nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, reason, new CorruptIndexException("simulated", nonExistentIndexUUID)))); tasks.addAll(shardsWithMismatchedAllocationIds); return tasks; }
/** * 查询方法 * @throws IOException * @throws CorruptIndexException * @throws ParseException */ public List Search(String searchString,LuceneResultCollector luceneResultCollector) throws CorruptIndexException, IOException, ParseException{ //方法一: System.out.println(this.indexSettings.getAnalyzer().getClass()+"----分词选择"); QueryParser q = new QueryParser(Version.LUCENE_44, "summary", this.indexSettings.getAnalyzer()); String search = new String(searchString.getBytes("ISO-8859-1"),"UTF-8"); System.out.println(search+"----------搜索的词语dd"); Query query = q.parse(search); //方法二: /* Term t = new Term("title", searchString); TermQuery query = new TermQuery(t); */ System.out.println(query.toString()+"--------query.tostring"); ScoreDoc[] docs = this.indexSearcher.search(query,100).scoreDocs; System.out.println("一共有:"+docs.length+"条记录"); List result = luceneResultCollector.collect(docs, this.indexSearcher); return result; }
private void skipField(int bits) throws IOException { final int numeric = bits & FIELD_IS_NUMERIC_MASK; if (numeric != 0) { switch(numeric) { case FIELD_IS_NUMERIC_INT: case FIELD_IS_NUMERIC_FLOAT: fieldsStream.readInt(); return; case FIELD_IS_NUMERIC_LONG: case FIELD_IS_NUMERIC_DOUBLE: fieldsStream.readLong(); return; default: throw new CorruptIndexException("Invalid numeric type: " + Integer.toHexString(numeric)); } } else { final int length = fieldsStream.readVInt(); fieldsStream.seek(fieldsStream.getFilePointer() + length); } }
private NumericDocValues loadByteField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME, Lucene40DocValuesFormat.INTS_VERSION_START, Lucene40DocValuesFormat.INTS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 1) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final byte values[] = new byte[maxDoc]; input.readBytes(values, 0, values.length); ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private NumericDocValues loadShortField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME, Lucene40DocValuesFormat.INTS_VERSION_START, Lucene40DocValuesFormat.INTS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 2) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final short values[] = new short[maxDoc]; for (int i = 0; i < values.length; i++) { values[i] = input.readShort(); } ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private NumericDocValues loadIntField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME, Lucene40DocValuesFormat.INTS_VERSION_START, Lucene40DocValuesFormat.INTS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 4) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final int values[] = new int[maxDoc]; for (int i = 0; i < values.length; i++) { values[i] = input.readInt(); } ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private NumericDocValues loadLongField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME, Lucene40DocValuesFormat.INTS_VERSION_START, Lucene40DocValuesFormat.INTS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 8) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final long values[] = new long[maxDoc]; for (int i = 0; i < values.length; i++) { values[i] = input.readLong(); } ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private NumericDocValues loadFloatField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.FLOATS_CODEC_NAME, Lucene40DocValuesFormat.FLOATS_VERSION_START, Lucene40DocValuesFormat.FLOATS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 4) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final int values[] = new int[maxDoc]; for (int i = 0; i < values.length; i++) { values[i] = input.readInt(); } ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private NumericDocValues loadDoubleField(FieldInfo field, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene40DocValuesFormat.FLOATS_CODEC_NAME, Lucene40DocValuesFormat.FLOATS_VERSION_START, Lucene40DocValuesFormat.FLOATS_VERSION_CURRENT); int valueSize = input.readInt(); if (valueSize != 8) { throw new CorruptIndexException("invalid valueSize: " + valueSize); } int maxDoc = state.segmentInfo.getDocCount(); final long values[] = new long[maxDoc]; for (int i = 0; i < values.length; i++) { values[i] = input.readLong(); } ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values)); return new NumericDocValues() { @Override public long get(int docID) { return values[docID]; } }; }
private static DocValuesType getDocValuesType(IndexInput input, byte b) throws IOException { if (b == 0) { return null; } else if (b == 1) { return DocValuesType.NUMERIC; } else if (b == 2) { return DocValuesType.BINARY; } else if (b == 3) { return DocValuesType.SORTED; } else if (b == 4) { return DocValuesType.SORTED_SET; } else if (b == 5) { return DocValuesType.SORTED_NUMERIC; } else { throw new CorruptIndexException("invalid docvalues byte: " + b + " (resource=" + input + ")"); } }
private void readSortedField(int fieldNumber, IndexInput meta, FieldInfos infos) throws IOException { // sorted = binary + numeric if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } if (meta.readByte() != Lucene45DocValuesFormat.BINARY) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } BinaryEntry b = readBinaryEntry(meta); binaries.put(fieldNumber, b); if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } if (meta.readByte() != Lucene45DocValuesFormat.NUMERIC) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } NumericEntry n = readNumericEntry(meta); ords.put(fieldNumber, n); }
private void readFields(IndexInput meta, FieldInfos infos) throws IOException { int fieldNumber = meta.readVInt(); while (fieldNumber != -1) { FieldInfo info = infos.fieldInfo(fieldNumber); if (info == null) { throw new CorruptIndexException("Invalid field number: " + fieldNumber + " (resource=" + meta + ")"); } else if (!info.hasNorms()) { throw new CorruptIndexException("Invalid field: " + info.name + " (resource=" + meta + ")"); } NormsEntry entry = new NormsEntry(); entry.format = meta.readByte(); entry.offset = meta.readLong(); switch(entry.format) { case CONST_COMPRESSED: case UNCOMPRESSED: case TABLE_COMPRESSED: case DELTA_COMPRESSED: break; default: throw new CorruptIndexException("Unknown format: " + entry.format + ", input=" + meta); } norms.put(fieldNumber, entry); fieldNumber = meta.readVInt(); } }
/** * Decompress the chunk. */ void decompress() throws IOException { // decompress data final int chunkSize = chunkSize(); if (version >= VERSION_BIG_CHUNKS && chunkSize >= 2 * CompressingStoredFieldsReader.this.chunkSize) { bytes.offset = bytes.length = 0; for (int decompressed = 0; decompressed < chunkSize; ) { final int toDecompress = Math.min(chunkSize - decompressed, CompressingStoredFieldsReader.this.chunkSize); decompressor.decompress(fieldsStream, toDecompress, 0, toDecompress, spare); bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + spare.length); System.arraycopy(spare.bytes, spare.offset, bytes.bytes, bytes.length, spare.length); bytes.length += spare.length; decompressed += toDecompress; } } else { decompressor.decompress(fieldsStream, chunkSize, 0, chunkSize, bytes); } if (bytes.length != chunkSize) { throw new CorruptIndexException("Corrupted: expected chunk size = " + chunkSize() + ", got " + bytes.length + " (resource=" + fieldsStream + ")"); } }
private SegmentInfo readUpgradedSegmentInfo(String name, Directory dir, IndexInput input) throws IOException { CodecUtil.checkHeader(input, Lucene3xSegmentInfoFormat.UPGRADED_SI_CODEC_NAME, Lucene3xSegmentInfoFormat.UPGRADED_SI_VERSION_START, Lucene3xSegmentInfoFormat.UPGRADED_SI_VERSION_CURRENT); final Version version; try { version = Version.parse(input.readString()); } catch (ParseException pe) { throw new CorruptIndexException("unable to parse version string (input: " + input + "): " + pe.getMessage(), pe); } final int docCount = input.readInt(); final Map<String,String> attributes = input.readStringStringMap(); final boolean isCompoundFile = input.readByte() == SegmentInfo.YES; final Map<String,String> diagnostics = input.readStringStringMap(); final Set<String> files = input.readStringSet(); SegmentInfo info = new SegmentInfo(dir, version, name, docCount, isCompoundFile, null, diagnostics, Collections.unmodifiableMap(attributes)); info.setFiles(files); return info; }
public final void visitDocument(int n, StoredFieldVisitor visitor) throws CorruptIndexException, IOException { seekIndex(n); fieldsStream.seek(indexStream.readLong()); final int numFields = fieldsStream.readVInt(); for (int fieldIDX = 0; fieldIDX < numFields; fieldIDX++) { int fieldNumber = fieldsStream.readVInt(); FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber); int bits = fieldsStream.readByte() & 0xFF; assert bits <= (FIELD_IS_NUMERIC_MASK | FIELD_IS_BINARY): "bits=" + Integer.toHexString(bits); switch(visitor.needsField(fieldInfo)) { case YES: readField(visitor, fieldInfo, bits); break; case NO: skipField(bits); break; case STOP: return; } } }
private void readSortedField(int fieldNumber, IndexInput meta, FieldInfos infos) throws IOException { // sorted = binary + numeric if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } if (meta.readByte() != Lucene410DocValuesFormat.BINARY) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } BinaryEntry b = readBinaryEntry(meta); binaries.put(fieldNumber, b); if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } if (meta.readByte() != Lucene410DocValuesFormat.NUMERIC) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt (resource=" + meta + ")"); } NumericEntry n = readNumericEntry(meta); ords.put(fieldNumber, n); }
/** Like {@link * #checkHeader(DataInput,String,int,int)} except this * version assumes the first int has already been read * and validated from the input. */ public static int checkHeaderNoMagic(DataInput in, String codec, int minVersion, int maxVersion) throws IOException { final String actualCodec = in.readString(); if (!actualCodec.equals(codec)) { throw new CorruptIndexException("codec mismatch: actual codec=" + actualCodec + " vs expected codec=" + codec + " (resource: " + in + ")"); } final int actualVersion = in.readInt(); if (actualVersion < minVersion) { throw new IndexFormatTooOldException(in, actualVersion, minVersion, maxVersion); } if (actualVersion > maxVersion) { throw new IndexFormatTooNewException(in, actualVersion, minVersion, maxVersion); } return actualVersion; }
/** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * * @param blobContainer blob container * @param blobName blob name */ public T readBlob(BlobContainer blobContainer, String blobName) throws IOException { try (InputStream inputStream = blobContainer.readBlob(blobName)) { byte[] bytes = ByteStreams.toByteArray(inputStream); final String resourceDesc = "ChecksumBlobStoreFormat.readBlob(blob=\"" + blobName + "\")"; try (ByteArrayIndexInput indexInput = new ByteArrayIndexInput(resourceDesc, bytes)) { CodecUtil.checksumEntireFile(indexInput); CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); return read(bytesReference); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); } } }
/** * Reads the state from a given file and compares the expected version against the actual version of * the state. */ public final T read(Path file) throws IOException { try (Directory dir = newDirectory(file.getParent())) { try (final IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, STATE_FILE_VERSION, STATE_FILE_VERSION); final XContentType xContentType = XContentType.values()[indexInput.readInt()]; indexInput.readLong(); // version currently unused long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(new InputStreamIndexInput(slice, contentSize))) { return fromXContent(parser); } } } catch(CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); } } }
/** * Restores a file * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are * added to the {@code failures} list * * @param fileInfo file to be restored */ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final Store store) throws IOException { boolean success = false; try (InputStream partSliceStream = new PartSliceStream(blobContainer, fileInfo)) { final InputStream stream; if (restoreRateLimiter == null) { stream = partSliceStream; } else { stream = new RateLimitingInputStream(partSliceStream, restoreRateLimiter, restoreRateLimitingTimeInNanos::inc); } try (IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) { final byte[] buffer = new byte[BUFFER_SIZE]; int length; while ((length = stream.read(buffer)) > 0) { indexOutput.writeBytes(buffer, 0, length); recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length); } Store.verify(indexOutput); indexOutput.close(); store.directory().sync(Collections.singleton(fileInfo.physicalName())); success = true; } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { try { store.markStoreCorrupted(ex); } catch (IOException e) { logger.warn("store cannot be marked as corrupted", e); } throw ex; } finally { if (success == false) { store.deleteQuiet(fileInfo.physicalName()); } } } }
/** * Returns the last committed segments info for this store * * @throws IOException if the index is corrupted or the segments file is not present */ public SegmentInfos readLastCommittedSegmentsInfo() throws IOException { failIfCorrupted(); try { return readSegmentsInfo(null, directory()); } catch (CorruptIndexException ex) { markStoreCorrupted(ex); throw ex; } }
public static void checkIntegrity(final StoreFileMetaData md, final Directory directory) throws IOException { try (IndexInput input = directory.openInput(md.name(), IOContext.READONCE)) { if (input.length() != md.length()) { // first check the length no matter how old this file is throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input); } // throw exception if the file is corrupt String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input)); // throw exception if metadata is inconsistent if (!checksum.equals(md.checksum())) { throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum + ", metadata checksum=" + md.checksum(), input); } } }
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder, Logger logger, Version version, boolean readFileAsHash) throws IOException { final String checksum; final BytesRefBuilder fileHash = new BytesRefBuilder(); try (IndexInput in = directory.openInput(file, IOContext.READONCE)) { final long length; try { length = in.length(); if (length < CodecUtil.footerLength()) { // truncated files trigger IAE if we seek negative... these files are really corrupted though throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in); } if (readFileAsHash) { final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for... hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length); checksum = digestToString(verifyingIndexInput.verify()); } else { checksum = digestToString(CodecUtil.retrieveChecksum(in)); } } catch (Exception ex) { logger.debug((Supplier<?>) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); } }
@Override public void verify() throws IOException { String footerDigest = null; if (metadata.checksum().equals(actualChecksum) && writtenBytes == metadata.length()) { ByteArrayIndexInput indexInput = new ByteArrayIndexInput("checksum", this.footerChecksum); footerDigest = digestToString(indexInput.readLong()); if (metadata.checksum().equals(footerDigest)) { return; } } throw new CorruptIndexException("verification failed (hardware problem?) : expected=" + metadata.checksum() + " actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" + metadata.length() + " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")"); }
private void readAndCompareChecksum() throws IOException { actualChecksum = digestToString(getChecksum()); if (!metadata.checksum().equals(actualChecksum)) { throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + metadata.checksum() + " actual=" + actualChecksum + " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")"); } }
public long verify() throws CorruptIndexException { long storedChecksum = getStoredChecksum(); if (getChecksum() == storedChecksum) { return storedChecksum; } throw new CorruptIndexException("verification failed : calculated=" + Store.digestToString(getChecksum()) + " stored=" + Store.digestToString(storedChecksum), this); }
/** * Reads the state from a given file and compares the expected version against the actual version of * the state. */ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) throws IOException { try (Directory dir = newDirectory(file.getParent())) { try (IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); final int fileVersion = CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, MIN_COMPATIBLE_STATE_FILE_VERSION, STATE_FILE_VERSION); final XContentType xContentType = XContentType.values()[indexInput.readInt()]; if (fileVersion == STATE_FILE_VERSION_ES_2X_AND_BELOW) { // format version 0, wrote a version that always came from the content state file and was never used indexInput.readLong(); // version currently unused } long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(namedXContentRegistry, new InputStreamIndexInput(slice, contentSize))) { return fromXContent(parser); } } } catch(CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); } } }
public void testFailShard() throws Exception { IndexShard shard = newStartedShard(); final ShardPath shardPath = shard.shardPath(); assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); closeShards(shard); // check state file still exists ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath()); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); // but index can't be opened for a failed shard assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), (shardId, lockTimeoutMS) -> new DummyShardLock(shardId)), equalTo(false)); }
public void testVerifyingIndexOutputOnEmptyFile() throws IOException { Directory dir = newDirectory(); IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0)), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { Store.verify(verifyingOutput); fail("should be a corrupted index"); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // ok } IOUtils.close(verifyingOutput, dir); }
public void testVerifyingIndexOutputWithBogusInput() throws IOException { Directory dir = newDirectory(); int length = scaledRandomIntBetween(10, 1024); IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, ""), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { while (length > 0) { verifyingOutput.writeByte((byte) random().nextInt()); length--; } fail("should be a corrupted index"); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // ok } IOUtils.close(verifyingOutput, dir); }
public void testVerifyingIndexInput() throws IOException { Directory dir = newDirectory(); IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT); int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); } CodecUtil.writeFooter(output); output.close(); // Check file IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT); long checksum = CodecUtil.retrieveChecksum(indexInput); indexInput.seek(0); IndexInput verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo.bar", IOContext.DEFAULT)); readIndexInputFullyWithRandomSeeks(verifyingIndexInput); Store.verify(verifyingIndexInput); assertThat(checksum, equalTo(((ChecksumIndexInput) verifyingIndexInput).getChecksum())); IOUtils.close(indexInput, verifyingIndexInput); // Corrupt file and check again corruptFile(dir, "foo.bar", "foo1.bar"); verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo1.bar", IOContext.DEFAULT)); readIndexInputFullyWithRandomSeeks(verifyingIndexInput); try { Store.verify(verifyingIndexInput); fail("should be a corrupted index"); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // ok } IOUtils.close(verifyingIndexInput); IOUtils.close(dir); }
/** * Tests when the node returns that no data was found for it, it will be moved to ignore unassigned. */ public void testStoreException() { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean(), new CorruptIndexException("test", "test")); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); }
private static List<ShardStateAction.ShardEntry> toTasks(ClusterState currentState, List<ShardRouting> shards, String indexUUID, String message) { return shards .stream() .map(shard -> new ShardStateAction.ShardEntry( shard.shardId(), shard.allocationId().getId(), randomBoolean() ? 0L : currentState.metaData().getIndexSafe(shard.index()).primaryTerm(shard.id()), message, new CorruptIndexException("simulated", indexUUID))) .collect(Collectors.toList()); }
public List collect(ScoreDoc[] result,IndexSearcher indexSearcher) throws CorruptIndexException, IOException{ List posts = new ArrayList(); for(int i=0; i<result.length; i++){ Post post = new Post(); post.setBookID(indexSearcher.doc(result[i].doc).get("bookID")); System.out.println("结果返回======"+indexSearcher.doc(result[i].doc).get("bookID")); post.setBookTitle(indexSearcher.doc(result[i].doc).get("bookTitle")); post.setSummary(indexSearcher.doc(result[i].doc).get("summary")); post.setBookStyle(indexSearcher.doc(result[i].doc).get("bookStyle")); post.setAuthors(indexSearcher.doc(result[i].doc).get("authors")); posts.add(post); } return posts; }
private void readField(StoredFieldVisitor visitor, FieldInfo info, int bits) throws IOException { final int numeric = bits & FIELD_IS_NUMERIC_MASK; if (numeric != 0) { switch(numeric) { case FIELD_IS_NUMERIC_INT: visitor.intField(info, fieldsStream.readInt()); return; case FIELD_IS_NUMERIC_LONG: visitor.longField(info, fieldsStream.readLong()); return; case FIELD_IS_NUMERIC_FLOAT: visitor.floatField(info, Float.intBitsToFloat(fieldsStream.readInt())); return; case FIELD_IS_NUMERIC_DOUBLE: visitor.doubleField(info, Double.longBitsToDouble(fieldsStream.readLong())); return; default: throw new CorruptIndexException("Invalid numeric type: " + Integer.toHexString(numeric)); } } else { final int length = fieldsStream.readVInt(); byte bytes[] = new byte[length]; fieldsStream.readBytes(bytes, 0, length); if ((bits & FIELD_IS_BINARY) != 0) { visitor.binaryField(info, bytes); } else { visitor.stringField(info, new String(bytes, 0, bytes.length, StandardCharsets.UTF_8)); } } }
@Override public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException { String filename = IndexFileNames.fileNameFromGeneration(info.info.name, DELETES_EXTENSION, info.getDelGen()); final BitVector liveDocs = new BitVector(dir, filename, context); if (liveDocs.length() != info.info.getDocCount()) { throw new CorruptIndexException("liveDocs.length()=" + liveDocs.length() + "info.docCount=" + info.info.getDocCount() + " (filename=" + filename + ")"); } if (liveDocs.count() != info.info.getDocCount() - info.getDelCount()) { throw new CorruptIndexException("liveDocs.count()=" + liveDocs.count() + " info.docCount=" + info.info.getDocCount() + " info.getDelCount()=" + info.getDelCount() + " (filename=" + filename + ")"); } return liveDocs; }