/** * Create a file in GridFS with the given filename and write * some random data to it. * @param filename the name of the file to create * @param size the number of random bytes to write * @param vertx the Vert.x instance * @param handler a handler that will be called when the file * has been written */ private void prepareData(String filename, int size, Vertx vertx, Handler<AsyncResult<String>> handler) { vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); try (GridFSUploadStream os = gridFS.openUploadStream(filename)) { for (int i = 0; i < size; ++i) { os.write((byte)(i & 0xFF)); } } } f.complete(filename); }, handler); }
@Override protected void validateAfterStoreAdd(TestContext context, Vertx vertx, String path, Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); GridFSFile file = files.first(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getFilename(), baos); String contents = new String(baos.toByteArray(), StandardCharsets.UTF_8); context.assertEquals(CHUNK_CONTENT, contents); } f.complete(); }, handler); }
@Override public InputStream getAssociatedDocumentStream(String uniqueId, String fileName) { GridFSBucket gridFS = createGridFSConnection(); GridFSFile file = gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))).first(); if (file == null) { return null; } InputStream is = gridFS.openDownloadStream(file.getObjectId()); ; Document metadata = file.getMetadata(); if (metadata.containsKey(COMPRESSED_FLAG)) { boolean compressed = (boolean) metadata.remove(COMPRESSED_FLAG); if (compressed) { is = new InflaterInputStream(is); } } return is; }
private void uploadStream(SmofGridRef ref, String name, InputStream stream) { final String bucketName = ref.getBucketName(); final ObjectId id; final GridFSBucket bucket; Preconditions.checkNotNull(bucketName, "No bucket specified"); final GridFSUploadOptions options = new GridFSUploadOptions().metadata(ref.getMetadata()); bucket = pool.getBucket(bucketName); id = bucket.uploadFromStream(name, stream, options); ref.setId(id); }
@Override public InputStream download(SmofGridRef ref) { final String bucketName = ref.getBucketName(); final ObjectId id = ref.getId(); Preconditions.checkArgument(id != null, "No download source found"); Preconditions.checkArgument(bucketName != null, "No bucket specified"); final GridFSBucket bucket = pool.getBucket(bucketName); return bucket.openDownloadStream(id); }
@Override public void drop(SmofGridRef ref) { final String bucketName = ref.getBucketName(); final ObjectId id = ref.getId(); Preconditions.checkArgument(id != null, "No download source found"); Preconditions.checkArgument(bucketName != null, "No bucket specified"); final GridFSBucket bucket = pool.getBucket(bucketName); bucket.delete(id); }
private void deleteLog(Date olderThan) { MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection, Log.class); Bson filter = Filters.lt("timeStamp", olderThan); logCollection.find(filter).forEach((Block<? super Log>) log -> { log.getLogFiles().forEach(logFile -> { GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database), logFile.getBucket()); gridFSBucket.delete(logFile.getFileObjectId()); }); }); DeleteResult deleteResult = logCollection.deleteMany(filter); }
public BucketStreamResource(MongoDatabase database, String bucket, ObjectId objectId) { super((StreamSource) () -> { GridFSBucket gridFSBucket = GridFSBuckets.create(database, bucket); return gridFSBucket.openDownloadStream(objectId); }, gridFSFile(database, bucket, objectId).getFilename()); this.database = database; this.bucket = bucket; this.objectId = objectId; }
/** * Connect to MongoDB and get the GridFS chunk size * @param vertx the Vert.x instance * @param handler a handler that will be called with the chunk size */ private void getChunkSize(Vertx vertx, Handler<AsyncResult<Integer>> handler) { vertx.<Integer>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); f.complete(gridFS.getChunkSizeBytes()); } }, handler); }
@Override protected void prepareData(TestContext context, Vertx vertx, String path, Handler<AsyncResult<String>> handler) { String filename = PathUtils.join(path, ID); vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8); gridFS.uploadFromStream(filename, new ByteArrayInputStream(contents)); f.complete(filename); } }, handler); }
@Override protected void validateAfterStoreDelete(TestContext context, Vertx vertx, String path, Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); context.assertTrue(Iterables.isEmpty(files)); } f.complete(); }, handler); }
protected void initDB(String dbName) { //set up the persistence layer //Connect to the local MongoDB instance MongoClient m = new MongoClient(); //get the DB with the given Name MongoDatabase chatDB = m.getDatabase(dbName); //initialize our collections DocumentCollections.init(this, chatDB); //set up GridFs for storing files GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages"); //the base class UIEngine needs the gridFS for //persisting sessions super.initDB(chatDB, fs); }
@Override public OperationResult deleteFile( final Database db, final String dbName, final String bucketName, final BsonValue fileId, final String requestEtag, final boolean checkEtag) { final String bucket = extractBucketName(bucketName); GridFSBucket gridFSBucket = GridFSBuckets.create( db.getDatabase(dbName), bucket); GridFSFile file = gridFSBucket .find(eq("_id", fileId)) .limit(1).iterator().tryNext(); if (file == null) { return new OperationResult(HttpStatus.SC_NOT_FOUND); } else if (checkEtag) { Object oldEtag = file.getMetadata().get("_etag"); if (oldEtag != null) { if (requestEtag == null) { return new OperationResult(HttpStatus.SC_CONFLICT, oldEtag); } else if (!Objects.equals(oldEtag.toString(), requestEtag)) { return new OperationResult( HttpStatus.SC_PRECONDITION_FAILED, oldEtag); } } } gridFSBucket.delete(fileId); return new OperationResult(HttpStatus.SC_NO_CONTENT); }
@Override public void handleRequest( HttpServerExchange exchange, RequestContext context) throws Exception { if (context.isInError()) { next(exchange, context); return; } LOGGER.trace("GET " + exchange.getRequestURL()); final String bucket = extractBucketName(context.getCollectionName()); GridFSBucket gridFSBucket = GridFSBuckets.create( MongoDBClientSingleton.getInstance().getClient() .getDatabase(context.getDBName()), bucket); GridFSFile dbsfile = gridFSBucket .find(eq("_id", context.getDocumentId())) .limit(1).iterator().tryNext(); if (dbsfile == null) { fileNotFound(context, exchange); } else if (!checkEtag(exchange, dbsfile)) { sendBinaryContent(context, gridFSBucket, dbsfile, exchange); } next(exchange, context); }
@Override public void deleteAllDocuments() { GridFSBucket gridFS = createGridFSConnection(); gridFS.drop(); MongoDatabase db = mongoClient.getDatabase(database); MongoCollection<Document> coll = db.getCollection(rawCollectionName); coll.deleteMany(new Document()); }
@Override public List<AssociatedDocument> getAssociatedDocuments(String uniqueId, FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); List<AssociatedDocument> assocDocs = new ArrayList<>(); if (!FetchType.NONE.equals(fetchType)) { GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId)); for (GridFSFile file : files) { AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS, file, fetchType); assocDocs.add(ad); } } return assocDocs; }
@Override public AssociatedDocument getAssociatedDocument(String uniqueId, String fileName, FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); if (!FetchType.NONE.equals(fetchType)) { GridFSFile file = gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))).first(); if (null != file) { return loadGridFSToAssociatedDocument(gridFS, file, fetchType); } } return null; }
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS, GridFSFile file, FetchType fetchType) throws IOException { AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder(); aBuilder.setFilename(file.getFilename()); Document metadata = file.getMetadata(); boolean compressed = false; if (metadata.containsKey(COMPRESSED_FLAG)) { compressed = (boolean) metadata.remove(COMPRESSED_FLAG); } long timestamp = (long) metadata.remove(TIMESTAMP); aBuilder.setCompressed(compressed); aBuilder.setTimestamp(timestamp); aBuilder.setDocumentUniqueId((String) metadata.remove(DOCUMENT_UNIQUE_ID_KEY)); for (String field : metadata.keySet()) { aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) metadata.get(field))); } if (FetchType.FULL.equals(fetchType)) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getObjectId(), byteArrayOutputStream); byte[] bytes = byteArrayOutputStream.toByteArray(); if (null != bytes) { if (compressed) { bytes = CommonCompression.uncompressZlib(bytes); } aBuilder.setDocument(ByteString.copyFrom(bytes)); } } aBuilder.setIndexName(indexName); return aBuilder.build(); }
@Override public List<String> getAssociatedFilenames(String uniqueId) throws Exception { GridFSBucket gridFS = createGridFSConnection(); ArrayList<String> fileNames = new ArrayList<>(); gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId)) .forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename())); return fileNames; }
@Override public void deleteAssociatedDocument(String uniqueId, String fileName) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getObjectId())); }
@Override public GridFSFile loadFileMetadata(SmofGridRef ref) { final GridFSBucket bucket = pool.getBucket(ref.getBucketName()); return bucket.find(Filters.eq(Element.ID, ref.getId())).first(); }
public void loadBucket(String bucketName) { final GridFSBucket bucket = GridFSBuckets.create(database, bucketName); dispatcher.put(bucketName, bucket); }
@Override public void put(String bucketName, GridFSBucket bucket) { collections.put(bucketName, bucket); }
@Override public void dropBucket(String bucketName) { final GridFSBucket bucket = collections.getBucket(bucketName); bucket.drop(); collections.dropBucket(bucketName); }
@Override public void put(String bucketName, GridFSBucket bucket) { fsBuckets.put(bucketName, bucket); }
@Override public GridFSBucket getBucket(String bucketName) { return fsBuckets.get(bucketName); }
@Override public void dropAllBuckets() { fsBuckets.values().forEach(GridFSBucket::drop); fsBuckets.clear(); }
/** * The actual test method. Creates a temporary file with random contents. Writes * <code>size</code> bytes to it and reads it again through * {@link MongoDBChunkReadStream}. Finally, checks if the file has been read correctly. * @param size the number of bytes to write/read * @param chunkSize the GridFS chunk size * @param vertx the Vert.x instance * @param context the current test context */ private void doRead(int size, int chunkSize, Vertx vertx, TestContext context) { Async async = context.async(); // create a test file in GridFS prepareData("test_" + size + ".bin", size, vertx, context.asyncAssertSuccess(filename -> { // connect to GridFS com.mongodb.async.client.MongoClient client = createAsyncClient(); com.mongodb.async.client.MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); com.mongodb.async.client.gridfs.GridFSBucket gridfs = com.mongodb.async.client.gridfs.GridFSBuckets.create(db); // open the test file GridFSDownloadStream is = gridfs.openDownloadStream(filename); MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is, size, chunkSize, vertx.getOrCreateContext()); // read from the test file rs.exceptionHandler(context::fail); int[] pos = { 0 }; rs.endHandler(v -> { // the file has been completely read rs.close(); context.assertEquals(size, pos[0]); async.complete(); }); rs.handler(buf -> { // check number of read bytes if (size - pos[0] > chunkSize) { context.assertEquals(chunkSize, buf.length()); } else { context.assertEquals(size - pos[0], buf.length()); } // check file contents for (int i = pos[0]; i < pos[0] + buf.length(); ++i) { context.assertEquals((byte)(i & 0xFF), buf.getByte(i - pos[0])); } pos[0] += buf.length(); }); })); }
public void initDB(MongoDatabase db, GridFSBucket gridFS) { this.gridFS = gridFS; persistedPages = new PersistedPages(engine, db, gridFS); super.initDB(db); }
public PersistedPages(UIEngine engine, MongoDatabase db, GridFSBucket gridFS) { super(engine, db, "persistedPages"); this.gridFS = gridFS; ensureIndex(false, true, USERID, CREATION_TIME); }
public GridFSBucket createBucket(String bucketName) { return GridFSBuckets.create(this.getMongoDatabase(), bucketName); }
private void sendBinaryContent( final RequestContext context, final GridFSBucket gridFSBucket, final GridFSFile file, final HttpServerExchange exchange) throws IOException { LOGGER.trace("Filename = {}", file.getFilename()); LOGGER.trace("Content length = {}", file.getLength()); if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, file.getMetadata().get("contentType").toString()); } else if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, file.getMetadata().get("contentType").toString()); } else { exchange.getResponseHeaders().put( Headers.CONTENT_TYPE, APPLICATION_OCTET_STREAM); } exchange.getResponseHeaders().put( Headers.CONTENT_LENGTH, file.getLength()); exchange.getResponseHeaders().put( Headers.CONTENT_DISPOSITION, String.format("inline; filename=\"%s\"", extractFilename(file))); exchange.getResponseHeaders().put( Headers.CONTENT_TRANSFER_ENCODING, CONTENT_TRANSFER_ENCODING_BINARY); ResponseHelper.injectEtagHeader(exchange, file.getMetadata()); context.setResponseStatusCode(HttpStatus.SC_OK); gridFSBucket.downloadToStream( file.getId(), exchange.getOutputStream()); }
private GridFSBucket createGridFSConnection() { MongoDatabase db = mongoClient.getDatabase(database); return GridFSBuckets.create(db, ASSOCIATED_FILES); }
public void getAssociatedDocuments(OutputStream outputstream, Document filter) throws IOException { Charset charset = Charset.forName("UTF-8"); GridFSBucket gridFS = createGridFSConnection(); GridFSFindIterable gridFSFiles = gridFS.find(filter); outputstream.write("{\n".getBytes(charset)); outputstream.write(" \"associatedDocs\": [\n".getBytes(charset)); boolean first = true; for (GridFSFile gridFSFile : gridFSFiles) { if (first) { first = false; } else { outputstream.write(",\n".getBytes(charset)); } Document metadata = gridFSFile.getMetadata(); String uniqueId = metadata.getString(DOCUMENT_UNIQUE_ID_KEY); String uniquieIdKeyValue = " { \"uniqueId\": \"" + uniqueId + "\", "; outputstream.write(uniquieIdKeyValue.getBytes(charset)); String filename = gridFSFile.getFilename(); String filenameKeyValue = "\"filename\": \"" + filename + "\", "; outputstream.write(filenameKeyValue.getBytes(charset)); Date uploadDate = gridFSFile.getUploadDate(); String uploadDateKeyValue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}"; outputstream.write(uploadDateKeyValue.getBytes(charset)); metadata.remove(TIMESTAMP); metadata.remove(COMPRESSED_FLAG); metadata.remove(DOCUMENT_UNIQUE_ID_KEY); metadata.remove(FILE_UNIQUE_ID_KEY); if (!metadata.isEmpty()) { String metaJson = metadata.toJson(); String metaString = ", \"meta\": " + metaJson; outputstream.write(metaString.getBytes(charset)); } outputstream.write(" }".getBytes(charset)); } outputstream.write("\n ]\n}".getBytes(charset)); }
@Override public void deleteAssociatedDocuments(String uniqueId) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId)) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getObjectId())); }
void put(String bucketName, GridFSBucket bucket);
GridFSBucket getBucket(String bucketName);
public abstract GridFSBucket getFileSystem();