Java 类com.amazonaws.services.s3.model.ObjectMetadata 实例源码
项目:s3-proxy-chunk-upload
文件:AssetsS3.java
/**
* Upload a file which is in the assets bucket.
*
* @param fileName File name
* @param file File
* @param contentType Content type for file
* @return
*/
public static boolean uploadFile(String fileName, File file, String contentType) {
try {
if (S3Module.amazonS3 != null) {
String bucket = S3Module.s3Bucket;
ObjectMetadata metaData = new ObjectMetadata();
if (contentType != null) {
metaData.setContentType(contentType);
}
PutObjectRequest putObj = new PutObjectRequest(bucket,
fileName, file);
putObj.setMetadata(metaData);
putObj.withCannedAcl(CannedAccessControlList.PublicRead);
S3Module.amazonS3.putObject(putObj);
return true;
} else {
Logger.error("Could not save because amazonS3 was null");
return false;
}
} catch (Exception e) {
Logger.error("S3 Upload -" + e.getMessage());
return false;
}
}
项目:S3Mock
文件:AmazonClientUploadIT.java
/**
* Tests if the Metadata of an existing file can be retrieved.
*/
@Test
public void shouldGetObjectMetadata() {
final String nonExistingFileName = "nonExistingFileName";
final File uploadFile = new File(UPLOAD_FILE_NAME);
s3Client.createBucket(BUCKET_NAME);
final PutObjectResult putObjectResult =
s3Client.putObject(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile));
final ObjectMetadata metadataExisting =
s3Client.getObjectMetadata(BUCKET_NAME, UPLOAD_FILE_NAME);
assertThat("The ETags should be identically!", metadataExisting.getETag(),
is(putObjectResult.getETag()));
thrown.expect(AmazonS3Exception.class);
thrown.expectMessage(containsString("Status Code: 404"));
s3Client.getObjectMetadata(BUCKET_NAME, nonExistingFileName);
}
项目:qpp-conversion-tool
文件:StorageServiceImpl.java
/**
* Stores the {@link InputStream} as an object in the S3 bucket.
*
* @param keyName The requested key name for the object.
* @param inStream The {@link InputStream} to write out to an object in S3.
* @param size The size of the {@link InputStream}.
* @return A {@link CompletableFuture} that will eventually contain the S3 object key.
*/
@Override
public CompletableFuture<String> store(String keyName, InputStream inStream, long size) {
final String bucketName = environment.getProperty(Constants.BUCKET_NAME_ENV_VARIABLE);
final String kmsKey = environment.getProperty(Constants.KMS_KEY_ENV_VARIABLE);
if (Strings.isNullOrEmpty(bucketName) || Strings.isNullOrEmpty(kmsKey)) {
API_LOG.warn("No bucket name is specified or no KMS key specified.");
return CompletableFuture.completedFuture("");
}
ObjectMetadata s3ObjectMetadata = new ObjectMetadata();
s3ObjectMetadata.setContentLength(size);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, keyName, inStream, s3ObjectMetadata)
.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsKey));
API_LOG.info("Writing object {} to S3 bucket {}", keyName, bucketName);
return actOnItem(putObjectRequest);
}
项目:elasticsearch_my
文件:S3BlobContainer.java
@Override
public void move(String sourceBlobName, String targetBlobName) throws IOException {
try {
CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName),
blobStore.bucket(), buildKey(targetBlobName));
if (blobStore.serverSideEncryption()) {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setNewObjectMetadata(objectMetadata);
}
SocketAccess.doPrivilegedVoid(() -> {
blobStore.client().copyObject(request);
blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName));
});
} catch (AmazonS3Exception e) {
throw new IOException(e);
}
}
项目:S3Mock
文件:AmazonClientUploadIT.java
/**
* Puts an Object; Copies that object to a new bucket; Downloads the object from the new
* bucket; compares checksums
* of original and copied object
*
* @throws Exception if an Exception occurs
*/
@Test
public void shouldCopyObjectEncrypted() throws Exception {
final File uploadFile = new File(UPLOAD_FILE_NAME);
final String sourceKey = UPLOAD_FILE_NAME;
final String destinationBucketName = "destinationBucket";
final String destinationKey = "copyOf/" + sourceKey;
s3Client.putObject(new PutObjectRequest(BUCKET_NAME, sourceKey, uploadFile));
final CopyObjectRequest copyObjectRequest =
new CopyObjectRequest(BUCKET_NAME, sourceKey, destinationBucketName, destinationKey);
copyObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(TEST_ENC_KEYREF));
final CopyObjectResult copyObjectResult = s3Client.copyObject(copyObjectRequest);
final ObjectMetadata metadata =
s3Client.getObjectMetadata(destinationBucketName, destinationKey);
final InputStream uploadFileIS = new FileInputStream(uploadFile);
final String uploadHash = HashUtil.getDigest(TEST_ENC_KEYREF, uploadFileIS);
assertThat("ETag should match", copyObjectResult.getETag(), is(uploadHash));
assertThat("Files should have the same length", metadata.getContentLength(),
is(uploadFile.length()));
}
项目:gradle-s3-build-cache
文件:AwsS3BuildCacheService.java
@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
logger.info("Start storing cache entry '{}' in S3 bucket", key.getHashCode());
ObjectMetadata meta = new ObjectMetadata();
meta.setContentType(BUILD_CACHE_CONTENT_TYPE);
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
writer.writeTo(os);
meta.setContentLength(os.size());
try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
PutObjectRequest request = getPutObjectRequest(key, meta, is);
if(this.reducedRedundancy) {
request.withStorageClass(StorageClass.ReducedRedundancy);
}
s3.putObject(request);
}
} catch (IOException e) {
throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
}
}
项目:S3Mock
文件:AmazonClientUploadIT.java
/**
* Tests if Object can be uploaded with KMS
*/
@Test
public void shouldUploadWithEncryption() {
final File uploadFile = new File(UPLOAD_FILE_NAME);
final String objectKey = UPLOAD_FILE_NAME;
s3Client.createBucket(BUCKET_NAME);
final PutObjectRequest putObjectRequest =
new PutObjectRequest(BUCKET_NAME, objectKey, uploadFile);
putObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(TEST_ENC_KEYREF));
s3Client.putObject(putObjectRequest);
final GetObjectMetadataRequest getObjectMetadataRequest =
new GetObjectMetadataRequest(BUCKET_NAME, objectKey);
final ObjectMetadata objectMetadata = s3Client.getObjectMetadata(getObjectMetadataRequest);
assertThat(objectMetadata.getContentLength(), is(uploadFile.length()));
}
项目:urlshortner
文件:UrlShortnerService.java
/**
* This method deletes the url for code
*
* @param code
*/
public void deleteShortUrl(String code) {
try {
// get the object
ObjectMetadata metaData = this.s3Client.getObjectMetadata(this.bucket, code);
String url = metaData.getUserMetaDataOf("url");
logger.info("The url to be deleted {}", url);
this.s3Client.deleteObject(this.bucket, code);
this.s3Client.deleteObject(this.bucket + "-dummy", code + Base64.encodeBase64String(url.getBytes()));
} catch (AmazonS3Exception ex) {
if (ex.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
return;
}
logger.warn("Unable to get object status", ex);
throw ex;
}
}
项目:nexus-blobstore-s3
文件:S3BlobStore.java
@Override
@Guarded(by = STARTED)
public Blob create(final InputStream blobData, final Map<String, String> headers) {
checkNotNull(blobData);
return create(headers, destination -> {
try (InputStream data = blobData) {
MetricsInputStream input = new MetricsInputStream(data);
TransferManager transferManager = new TransferManager(s3);
transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata())
.waitForCompletion();
return input.getMetrics();
} catch (InterruptedException e) {
throw new BlobStoreException("error uploading blob", e, null);
}
});
}
项目:hollow-reference-implementation
文件:S3Publisher.java
/**
* Write a list of all of the state versions to S3.
* @param newVersion
*/
private synchronized void updateSnapshotIndex(Long newVersion) {
/// insert the new version into the list
int idx = Collections.binarySearch(snapshotIndex, newVersion);
int insertionPoint = Math.abs(idx) - 1;
snapshotIndex.add(insertionPoint, newVersion);
/// build a binary representation of the list -- gap encoded variable-length integers
byte[] idxBytes = buidGapEncodedVarIntSnapshotIndex();
/// indicate the Content-Length
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader("Content-Length", (long)idxBytes.length);
/// upload the new file content.
try(InputStream is = new ByteArrayInputStream(idxBytes)) {
Upload upload = s3TransferManager.upload(bucketName, getSnapshotIndexObjectName(blobNamespace), is, metadata);
upload.waitForCompletion();
} catch(Exception e) {
throw new RuntimeException(e);
}
}
项目:hadoop
文件:S3AFileSystem.java
private void createEmptyObject(final String bucketName, final String objectName)
throws AmazonClientException, AmazonServiceException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
putObjectRequest.setCannedAcl(cannedACL);
s3.putObject(putObjectRequest);
statistics.incrementWriteOps(1);
}
项目:ibm-cos-sdk-java
文件:AmazonS3Client.java
@Override
public PutObjectResult putObject(String bucketName, String key, String content)
throws AmazonServiceException, SdkClientException {
rejectNull(bucketName, "Bucket name must be provided");
rejectNull(key, "Object key must be provided");
rejectNull(content, "String content must be provided");
byte[] contentBytes = content.getBytes(StringUtils.UTF8);
InputStream is = new ByteArrayInputStream(contentBytes);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType("text/plain");
metadata.setContentLength(contentBytes.length);
return putObject(new PutObjectRequest(bucketName, key, is, metadata));
}
项目:ibm-cos-sdk-java
文件:CopyCallable.java
private void populateMetadataWithEncryptionParams(ObjectMetadata source, ObjectMetadata destination) {
Map<String, String> userMetadataSource = source.getUserMetadata();
Map<String, String> userMetadataDestination = destination.getUserMetadata();
String[] headersToCopy = { Headers.CRYPTO_CEK_ALGORITHM,
Headers.CRYPTO_IV, Headers.CRYPTO_KEY, Headers.CRYPTO_KEY_V2,
Headers.CRYPTO_KEYWRAP_ALGORITHM, Headers.CRYPTO_TAG_LENGTH,
Headers.MATERIALS_DESCRIPTION,
Headers.UNENCRYPTED_CONTENT_LENGTH,
Headers.UNENCRYPTED_CONTENT_MD5 };
if (userMetadataSource != null) {
if(userMetadataDestination == null){
userMetadataDestination= new HashMap<String,String>();
destination.setUserMetadata(userMetadataDestination);
}
String headerValue;
for(String header : headersToCopy){
headerValue = userMetadataSource.get(header);
if(headerValue != null){
userMetadataDestination.put(header, headerValue);
}
}
}
}
项目:Reer
文件:S3Client.java
public void put(InputStream inputStream, Long contentLength, URI destination) {
checkRequiredJigsawModuleIsOnPath();
try {
S3RegionalResource s3RegionalResource = new S3RegionalResource(destination);
String bucketName = s3RegionalResource.getBucketName();
String s3BucketKey = s3RegionalResource.getKey();
configureClient(s3RegionalResource);
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(contentLength);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3BucketKey, inputStream, objectMetadata);
LOGGER.debug("Attempting to put resource:[{}] into s3 bucket [{}]", s3BucketKey, bucketName);
amazonS3Client.putObject(putObjectRequest);
} catch (AmazonClientException e) {
throw ResourceExceptions.putFailed(destination, e);
}
}
项目:pprxmtr
文件:S3.java
public static void storeFileInBucket(String filename, InputStream is, ObjectMetadata metadata) {
String bucketName = properties.getProperty(S3_BUCKET_NAME);
try {
LOG.info("Storing file {} in S3 bucket {}.", filename, bucketName);
S3.putObject(bucketName, filename, is, metadata);
S3.setObjectAcl(bucketName, filename, CannedAccessControlList.PublicRead);
} catch (AmazonServiceException e) {
LOG.error("Exception occured when fetching file from S3 bucket {}.", bucketName, e);
}
}
项目:elasticsearch_my
文件:DefaultS3OutputStream.java
protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName, boolean serverSideEncryption) {
InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName)
.withCannedACL(blobStore.getCannedACL())
.withStorageClass(blobStore.getStorageClass());
if (serverSideEncryption) {
ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setObjectMetadata(md);
}
return blobStore.client().initiateMultipartUpload(request).getUploadId();
}
项目:elasticsearch_my
文件:MockAmazonS3.java
@Override
public ObjectMetadata getObjectMetadata(
GetObjectMetadataRequest getObjectMetadataRequest)
throws AmazonClientException, AmazonServiceException {
String blobName = getObjectMetadataRequest.getKey();
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}
return new ObjectMetadata(); // nothing is done with it
}
项目:ooso
文件:CommonUtilitiesTest.java
@Test
public void storeObjectTest() throws Exception {
S3Object object = s3Client.getObject(DUMMY_BUCKET_NAME, "pref1/dummy1");
ObjectMetadata objectMetadata = object.getObjectMetadata();
BufferedReader reader = new BufferedReader(new InputStreamReader(object.getObjectContent()));
String storedContent = reader.readLine();
reader.close();
assertEquals(objectMetadata.getContentType(), Commons.TEXT_TYPE);
assertEquals(objectMetadata.getContentLength(), KEY_CONTENT_MAPPING.get("pref1/dummy1").getBytes().length);
assertEquals(storedContent, KEY_CONTENT_MAPPING.get("pref1/dummy1"));
}
项目:ibm-cos-sdk-java
文件:S3CryptoModuleBase.java
protected final PutObjectRequest createInstructionPutRequest(
String bucketName, String key, ContentCryptoMaterial cekMaterial) {
byte[] bytes = cekMaterial.toJsonString(cryptoConfig.getCryptoMode())
.getBytes(UTF8);
InputStream is = new ByteArrayInputStream(bytes);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(bytes.length);
metadata.addUserMetadata(Headers.CRYPTO_INSTRUCTION_FILE, "");
InstructionFileId ifileId = new S3ObjectId(bucketName, key)
.instructionFileId();
return new PutObjectRequest(ifileId.getBucket(), ifileId.getKey(),
is, metadata);
}
项目:S3Decorators
文件:FailsafeS3DecoratorTest.java
@Override
public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonServiceException {
AmazonS3Exception exception = new AmazonS3Exception("Internal Error");
exception.setStatusCode(500);
exception.setErrorType(ErrorType.Service);
throw exception;
}
项目:radosgw-admin4j
文件:RgwAdminImplTest.java
@Test
public void removeBucket() throws Exception {
String bucketName = "testremovebkbk" + UUID.randomUUID().toString();
// remove bucket not exist
Thread.sleep(3000);
RGW_ADMIN.removeBucket(bucketName);
testWithAUser(
v -> {
String userId = "testremovebk" + UUID.randomUUID().toString();
User response = RGW_ADMIN.createUser(userId);
AmazonS3 s3 =
createS3(
response.getS3Credentials().get(0).getAccessKey(),
response.getS3Credentials().get(0).getSecretKey());
s3.createBucket(bucketName);
ByteArrayInputStream input = new ByteArrayInputStream("Hello World!".getBytes());
s3.putObject(bucketName, "hello.txt", input, new ObjectMetadata());
RGW_ADMIN.removeBucket(bucketName);
try {
s3.headBucket(new HeadBucketRequest(bucketName));
fail();
} catch (Exception e) {
assertTrue("Not Found".equals(((AmazonS3Exception) e).getErrorMessage()));
}
});
}
项目:S3Decorators
文件:FailsafeS3DecoratorTest.java
@Override
public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonServiceException {
AmazonS3Exception exception = new AmazonS3Exception("Not authed");
exception.setStatusCode(403);
exception.setErrorType(ErrorType.Client);
throw exception;
}
项目:S3Decorators
文件:HystrixS3DecoratorTest.java
@Override
public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonServiceException {
AmazonS3Exception exception = new AmazonS3Exception("Bad Auth");
exception.setStatusCode(403);
exception.setErrorType(ErrorType.Client);
throw exception;
}
项目:S3Mock
文件:AmazonClientUploadIT.java
/**
* Verifies multipart copy.
*
* @throws InterruptedException
*/
@Test
public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException {
final int contentLen = 3 * _1MB;
final ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(contentLen);
final String assumedSourceKey = UUID.randomUUID().toString();
final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString());
final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString());
final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB);
final InputStream sourceInputStream = randomInputStream(contentLen);
final Upload upload = transferManager
.upload(sourceBucket.getName(), assumedSourceKey,
sourceInputStream, objectMetadata);
final UploadResult uploadResult = upload.waitForUploadResult();
assertThat(uploadResult.getKey(), is(assumedSourceKey));
final String assumedDestinationKey = UUID.randomUUID().toString();
final Copy copy =
transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(),
assumedDestinationKey);
final CopyResult copyResult = copy.waitForCopyResult();
assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey));
final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey);
assertThat("Hashes for source and target S3Object do not match.",
HashUtil.getDigest(copiedObject.getObjectContent()) + "-1",
is(uploadResult.getETag()));
}
项目:circus-train
文件:CopyMapper.java
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
URI targetUri = targetPath.toUri();
String bucketName = PathUtil.toBucketName(targetUri);
String key = PathUtil.toBucketKey(targetUri);
Path sourcePath = sourceFileStatus.getPath();
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(sourceFileStatus.getLen());
if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
项目:yass-android
文件:YassBroadcastReceiver.java
@Override
public Long doInBackground(Void... unused) {
Log.d(TAG, "Upload Blob Task:");
MainActivity.YassPreferences preferences = new MainActivity.YassPreferences(context);
ConnectivityManager cm =
(ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo activeNetwork = cm.getActiveNetworkInfo();
boolean isConnected = activeNetwork != null &&
activeNetwork.isConnectedOrConnecting();
if (!isConnected) {
Log.d(TAG, "Skipping camera upload because network is disconnected");
return null;
}
if (preferences.cameraUploadOnlyOnWifi && activeNetwork.getType() != ConnectivityManager.TYPE_WIFI) {
Log.d(TAG, "Skipping camera upload because Wifi is disconnected");
return null;
}
long serial;
Uri uri;
String fileName;
long fileSize;
SQLiteDatabase db = new YassDbHelper(context).getReadableDatabase();
try {
String[] projection = {
"serial",
"file_uri",
"file_name",
"file_size"
};
String selection = null;
String[] selectionArgs = null;
String groupBy = null;
String having = null;
String orderBy = "serial ASC";
String limit = "1";
Cursor cursor = db.query("camera_uploads", projection, selection, selectionArgs,
groupBy, having, orderBy, limit);
try {
if (!cursor.moveToNext()) {
Log.d(TAG, "Did not find image to upload");
return null;
}
serial = cursor.getLong(cursor.getColumnIndexOrThrow("serial"));
uri = Uri.parse(cursor.getString(cursor.getColumnIndexOrThrow("file_uri")));
fileName = cursor.getString(cursor.getColumnIndexOrThrow("file_name"));
fileSize = cursor.getLong(cursor.getColumnIndexOrThrow("file_size"));
} finally {
cursor.close();
}
} finally {
db.close();
}
Log.d(TAG, "Found image to upload: " + fileName);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(fileSize);
metadata.setContentType(context.getContentResolver().getType(uri));
PutObjectResult result;
try (InputStream is = context.getContentResolver().openInputStream(uri)) {
// TODO: limited to 5 GB
result = MainActivity.getS3Client(preferences).putObject(preferences.bucketName,
"Camera Uploads/" + fileName, is, metadata);
} catch (AmazonClientException | IOException e) {
Log.e(TAG, "Could not upload file: " + e.getMessage());
return null;
}
return serial;
}
项目:ibm-cos-sdk-java
文件:S3CryptoModuleBase.java
/**
* Returns the plaintext length from the request and metadata; or -1 if
* unknown.
*/
protected final long plaintextLength(AbstractPutObjectRequest request,
ObjectMetadata metadata) {
if (request.getFile() != null) {
return request.getFile().length();
} else if (request.getInputStream() != null
&& metadata.getRawMetadataValue(Headers.CONTENT_LENGTH) != null) {
return metadata.getContentLength();
}
return -1;
}
项目:urlshortner
文件:UrlShortnerService.java
private void createDummyRecord(String url, String code) {
// Add a dummy object for pointer
byte[] dummyFileContentBytes = new String(code).getBytes(StandardCharsets.UTF_8);
InputStream fileInputStream = new ByteArrayInputStream(dummyFileContentBytes);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType("text/html");
metadata.setContentLength(dummyFileContentBytes.length);
PutObjectRequest putObjectRequest = new PutObjectRequest(this.bucket + "-dummy", code
+ Base64.encodeBase64String(url.getBytes()), fileInputStream, metadata);
this.s3Client.putObject(putObjectRequest);
}
项目:aws-photosharing-example
文件:UploadThread.java
@Override
public void run() {
ObjectMetadata meta_data = new ObjectMetadata();
if (p_content_type != null)
meta_data.setContentType(p_content_type);
meta_data.setContentLength(p_size);
PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
PutObjectResult res = s3Client.putObject(putObjectRequest);
}
项目:opentest
文件:GetS3Metadata.java
@Override
public void run() {
super.run();
String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
String bucket = this.readStringArgument("bucket");
String objectKey = this.readStringArgument("objectKey");
AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
ObjectMetadata metadata = s3Client.getObjectMetadata(
new GetObjectMetadataRequest(bucket, objectKey));
try {
Date expirationTime = metadata.getExpirationTime();
if (expirationTime != null) {
this.writeOutput("expirationTime", metadata.getExpirationTime().getTime());
} else {
this.writeOutput("expirationTime", null);
}
this.writeOutput("lastModified", metadata.getLastModified().getTime());
this.writeOutput("userMetadata", metadata.getUserMetadata());
this.writeOutput("size", metadata.getContentLength());
this.writeOutput("storageClass", metadata.getStorageClass());
this.writeOutput("versionId", metadata.getVersionId());
} catch (Exception ex) {
throw new RuntimeException(String.format(
"Failed to get object metadata for object key %s in bucket %s",
objectKey,
bucket), ex);
}
}
项目:nexus-blobstore-s3
文件:S3PropertiesFile.java
public void store() throws IOException {
log.debug("Storing: {}/{}", bucket, key);
ByteArrayOutputStream bufferStream = new ByteArrayOutputStream();
store(bufferStream, null);
byte[] buffer = bufferStream.toByteArray();
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(buffer.length);
s3.putObject(bucket, key, new ByteArrayInputStream(buffer), metadata);
}
项目:s3-channels
文件:S3ReadableObjectChannelBuilderTest.java
@BeforeAll
static void initMock() {
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(100);
when(amazonS3.getObjectMetadata(anyString(), anyString()))
.thenReturn(meta);
}
项目:github-bucket
文件:RepositoryS3.java
private boolean walk(Iterator<S3ObjectSummary> iter, ObjectId file, String path) throws IOException {
byte[] content;
byte[] newHash;
LOG.debug("Start processing file: {}", path);
try (DigestInputStream is = new DigestInputStream(repository.open(file).openStream(), DigestUtils.getMd5Digest())) {
// Get content
content = IOUtils.toByteArray(is);
// Get hash
newHash = is.getMessageDigest().digest();
}
if (isUploadFile(iter, path, Hex.encodeHexString(newHash))) {
LOG.info("Uploading file: {}", path);
ObjectMetadata bucketMetadata = new ObjectMetadata();
bucketMetadata.setContentMD5(Base64.encodeAsString(newHash));
bucketMetadata.setContentLength(content.length);
// Give Tika a few hints for the content detection
Metadata tikaMetadata = new Metadata();
tikaMetadata.set(Metadata.RESOURCE_NAME_KEY, FilenameUtils.getName(FilenameUtils.normalize(path)));
// Fire!
try (InputStream bis = TikaInputStream.get(content, tikaMetadata)) {
bucketMetadata.setContentType(TIKA_DETECTOR.detect(bis, tikaMetadata).toString());
s3.putObject(bucket.getName(), path, bis, bucketMetadata);
return true;
}
}
LOG.info("Skipping file (same checksum): {}", path);
return false;
}
项目:hollow-reference-implementation
文件:S3Publisher.java
public void publishSnapshot(Blob blob) {
String objectName = getS3ObjectName(blobNamespace, "snapshot", blob.getToVersion());
ObjectMetadata metadata = new ObjectMetadata();
metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion()));
metadata.setHeader("Content-Length", blob.getFile().length());
uploadFile(blob.getFile(), objectName, metadata);
/// now we update the snapshot index
updateSnapshotIndex(blob.getToVersion());
}
项目:hollow-reference-implementation
文件:S3Publisher.java
public void publishDelta(Blob blob) {
String objectName = getS3ObjectName(blobNamespace, "delta", blob.getFromVersion());
ObjectMetadata metadata = new ObjectMetadata();
metadata.addUserMetadata("from_state", String.valueOf(blob.getFromVersion()));
metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion()));
metadata.setHeader("Content-Length", blob.getFile().length());
uploadFile(blob.getFile(), objectName, metadata);
}
项目:hollow-reference-implementation
文件:S3Publisher.java
public void publishReverseDelta(Blob blob) {
String objectName = getS3ObjectName(blobNamespace, "reversedelta", blob.getFromVersion());
ObjectMetadata metadata = new ObjectMetadata();
metadata.addUserMetadata("from_state", String.valueOf(blob.getFromVersion()));
metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion()));
metadata.setHeader("Content-Length", blob.getFile().length());
uploadFile(blob.getFile(), objectName, metadata);
}
项目:hollow-reference-implementation
文件:S3Publisher.java
private void uploadFile(File file, String s3ObjectName, ObjectMetadata metadata) {
try (InputStream is = new BufferedInputStream(new FileInputStream(file))) {
Upload upload = s3TransferManager.upload(bucketName, s3ObjectName, is, metadata);
upload.waitForCompletion();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
项目:hollow-reference-implementation
文件:S3BlobRetriever.java
private Blob knownDeltaBlob(String fileType, long fromVersion) {
String objectName = S3Publisher.getS3ObjectName(blobNamespace, fileType, fromVersion);
ObjectMetadata objectMetadata = s3.getObjectMetadata(bucketName, objectName);
long fromState = Long.parseLong(objectMetadata.getUserMetaDataOf("from_state"));
long toState = Long.parseLong(objectMetadata.getUserMetaDataOf("to_state"));
return new S3Blob(objectName, fromState, toState);
}
项目:ibm-cos-sdk-java
文件:SkipMd5CheckStrategy.java
private boolean skipClientSideValidationPerResponse(ObjectMetadata metadata) {
if (metadata == null) {
return true;
}
// If Etag is not provided or was computed from a multipart upload then skip the check, the
// etag won't be the MD5 of the original content
if (metadata.getETag() == null || isMultipartUploadETag(metadata.getETag())) {
return true;
}
return metadataInvolvesSse(metadata);
}
项目:hadoop
文件:S3AFastOutputStream.java
private void putObject() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
key);
}
final ObjectMetadata om = createDefaultMetadata();
om.setContentLength(buffer.size());
final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
new ByteArrayInputStream(buffer.toByteArray()), om);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setGeneralProgressListener(progressListener);
ListenableFuture<PutObjectResult> putObjectResult =
executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
return client.putObject(putObjectRequest);
}
});
//wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw new IOException("Regular upload failed", ee.getCause());
}
}