Java 类com.amazonaws.services.s3.model.PartETag 实例源码
项目:elasticsearch_my
文件:DefaultS3OutputStream.java
private void uploadMultipart(byte[] bytes, int off, int len, boolean lastPart) throws IOException {
try (ByteArrayInputStream is = new ByteArrayInputStream(bytes, off, len)) {
int retry = 0;
while (retry <= getNumberOfRetries()) {
try {
PartETag partETag = doUploadMultipart(getBlobStore(), getBucketName(), getBlobName(), multipartId, is, len, lastPart);
multiparts.add(partETag);
multipartChunks++;
return;
} catch (AmazonClientException e) {
if (getBlobStore().shouldRetry(e) && retry < getNumberOfRetries()) {
is.reset();
retry++;
} else {
abortMultipart();
throw e;
}
}
}
}
}
项目:hadoop
文件:S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
int partSize) {
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request =
new UploadPartRequest().withBucketName(bucket).withKey(key)
.withUploadId(uploadId).withInputStream(inputStream)
.withPartNumber(currentPartNumber).withPartSize(partSize);
request.setGeneralProgressListener(progressListener);
ListenableFuture<PartETag> partETagFuture =
executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
uploadId);
}
return client.uploadPart(request).getPartETag();
}
});
partETagsFutures.add(partETagFuture);
}
项目:hadoop
文件:S3AFastOutputStream.java
public List<PartETag> waitForAllPartUploads() throws IOException {
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
//there is no way of recovering so abort
//cancel all partUploads
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
//abort multipartupload
this.abort();
throw new IOException("Part upload failed in multi-part upload with " +
"id '" +uploadId + "':" + ee, ee);
}
//should not happen?
return null;
}
项目:ibm-cos-sdk-java
文件:CompleteMultipartUpload.java
/**
* Collects the Part ETags for initiating the complete multi-part upload
* request. This is blocking as it waits until all the upload part threads
* complete.
*/
private List<PartETag> collectPartETags() {
final List<PartETag> partETags = new ArrayList<PartETag>();
partETags.addAll(eTagsBeforeResume);
for (Future<PartETag> future : futures) {
try {
partETags.add(future.get());
} catch (Exception e) {
throw new SdkClientException(
"Unable to complete multi-part upload. Individual part upload failed : "
+ e.getCause().getMessage(), e.getCause());
}
}
return partETags;
}
项目:ibm-cos-sdk-java
文件:UploadCallable.java
/**
* Submits a callable for each part to upload to our thread pool and records its corresponding Future.
*/
private void uploadPartsInParallel(UploadPartRequestFactory requestFactory,
String uploadId) {
Map<Integer,PartSummary> partNumbers = identifyExistingPartsForResume(uploadId);
while (requestFactory.hasMoreRequests()) {
if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown");
UploadPartRequest request = requestFactory.getNextUploadPartRequest();
if (partNumbers.containsKey(request.getPartNumber())) {
PartSummary summary = partNumbers.get(request.getPartNumber());
eTagsToSkip.add(new PartETag(request.getPartNumber(), summary
.getETag()));
transferProgress.updateProgress(summary.getSize());
continue;
}
futures.add(threadPool.submit(new UploadPartCallable(s3, request)));
}
}
项目:ibm-cos-sdk-java
文件:RequestXmlFactory.java
/**
* Converts the specified list of PartETags to an XML fragment that can be
* sent to the CompleteMultipartUpload operation of Amazon S3.
*
* @param partETags
* The list of part ETags containing the data to include in the
* new XML fragment.
*
* @return A byte array containing the data
*/
public static byte[] convertToXmlByteArray(List<PartETag> partETags) {
XmlWriter xml = new XmlWriter();
xml.start("CompleteMultipartUpload");
if (partETags != null) {
List<PartETag> sortedPartETags = new ArrayList<PartETag>(partETags);
Collections.sort(sortedPartETags, new Comparator<PartETag>() {
public int compare(PartETag tag1, PartETag tag2) {
if (tag1.getPartNumber() < tag2.getPartNumber()) return -1;
if (tag1.getPartNumber() > tag2.getPartNumber()) return 1;
return 0;
}
});
for (PartETag partEtag : sortedPartETags) {
xml.start("Part");
xml.start("PartNumber").value(Integer.toString(partEtag.getPartNumber())).end();
xml.start("ETag").value(partEtag.getETag()).end();
xml.end();
}
}
xml.end();
return xml.getBytes();
}
项目:aliyun-oss-hadoop-fs
文件:S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
int partSize) {
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request =
new UploadPartRequest().withBucketName(bucket).withKey(key)
.withUploadId(uploadId).withInputStream(inputStream)
.withPartNumber(currentPartNumber).withPartSize(partSize);
request.setGeneralProgressListener(progressListener);
ListenableFuture<PartETag> partETagFuture =
executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
uploadId);
}
return client.uploadPart(request).getPartETag();
}
});
partETagsFutures.add(partETagFuture);
}
项目:aliyun-oss-hadoop-fs
文件:S3AFastOutputStream.java
public List<PartETag> waitForAllPartUploads() throws IOException {
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
//there is no way of recovering so abort
//cancel all partUploads
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
//abort multipartupload
this.abort();
throw new IOException("Part upload failed in multi-part upload with " +
"id '" +uploadId + "':" + ee, ee);
}
//should not happen?
return null;
}
项目:stocator
文件:COSBlockOutputStream.java
/**
* Block awaiting all outstanding uploads to complete.
*
* @return list of results
* @throws IOException IO Problems
*/
private List<PartETag> waitForAllPartUploads() throws IOException {
LOG.debug("Waiting for {} uploads to complete", partETagsFutures.size());
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload", ie);
Thread.currentThread().interrupt();
return null;
} catch (ExecutionException ee) {
// there is no way of recovering so abort
// cancel all partUploads
LOG.debug("While waiting for upload completion", ee);
LOG.debug("Cancelling futures");
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
// abort multipartupload
abort();
throw extractException("Multi-part upload with id '" + uploadId + "' to " + key, key, ee);
}
}
项目:stocator
文件:COSBlockOutputStream.java
/**
* This completes a multipart upload. Sometimes it fails; here retries are
* handled to avoid losing all data on a transient failure.
*
* @param partETags list of partial uploads
* @throws IOException on any problem
*/
private CompleteMultipartUploadResult complete(List<PartETag> partETags) throws IOException {
int retryCount = 0;
AmazonClientException lastException;
String operation = String.format("Completing multi-part upload for key '%s',"
+ " id '%s' with %s partitions ",
key, uploadId, partETags.size());
do {
try {
LOG.debug(operation);
return writeOperationHelper.completeMultipartUpload(uploadId, partETags);
} catch (AmazonClientException e) {
lastException = e;
}
} while (shouldRetry(operation, lastException, retryCount++));
// this point is only reached if the operation failed more than
// the allowed retry count
throw translateException(operation, key, lastException);
}
项目:big-c
文件:S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
int partSize) {
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request =
new UploadPartRequest().withBucketName(bucket).withKey(key)
.withUploadId(uploadId).withInputStream(inputStream)
.withPartNumber(currentPartNumber).withPartSize(partSize);
request.setGeneralProgressListener(progressListener);
ListenableFuture<PartETag> partETagFuture =
executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
uploadId);
}
return client.uploadPart(request).getPartETag();
}
});
partETagsFutures.add(partETagFuture);
}
项目:big-c
文件:S3AFastOutputStream.java
public List<PartETag> waitForAllPartUploads() throws IOException {
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
//there is no way of recovering so abort
//cancel all partUploads
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
//abort multipartupload
this.abort();
throw new IOException("Part upload failed in multi-part upload with " +
"id '" +uploadId + "':" + ee, ee);
}
//should not happen?
return null;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
int partSize) {
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request =
new UploadPartRequest().withBucketName(bucket).withKey(key)
.withUploadId(uploadId).withInputStream(inputStream)
.withPartNumber(currentPartNumber).withPartSize(partSize);
request.setGeneralProgressListener(progressListener);
ListenableFuture<PartETag> partETagFuture =
executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
uploadId);
}
return client.uploadPart(request).getPartETag();
}
});
partETagsFutures.add(partETagFuture);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:S3AFastOutputStream.java
public List<PartETag> waitForAllPartUploads() throws IOException {
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload:" + ie, ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
//there is no way of recovering so abort
//cancel all partUploads
for (ListenableFuture<PartETag> future : partETagsFutures) {
future.cancel(true);
}
//abort multipartupload
this.abort();
throw new IOException("Part upload failed in multi-part upload with " +
"id '" +uploadId + "':" + ee, ee);
}
//should not happen?
return null;
}
项目:elasticsearch_my
文件:DefaultS3OutputStream.java
protected PartETag doUploadMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, InputStream is,
int length, boolean lastPart) throws AmazonS3Exception {
UploadPartRequest request = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(blobName)
.withUploadId(uploadId)
.withPartNumber(multipartChunks)
.withInputStream(is)
.withPartSize(length)
.withLastPart(lastPart);
UploadPartResult response = blobStore.client().uploadPart(request);
return response.getPartETag();
}
项目:elasticsearch_my
文件:MockDefaultS3OutputStream.java
@Override
protected PartETag doUploadMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, InputStream is, int length, boolean lastPart) throws AmazonS3Exception {
try {
long copied = Streams.copy(is, out);
if (copied != length) {
throw new AmazonS3Exception("Not all the bytes were copied");
}
return new PartETag(numberOfUploadRequests++, RandomizedTest.randomAsciiOfLength(50));
} catch (IOException e) {
throw new AmazonS3Exception(e.getMessage());
}
}
项目:hadoop
文件:S3AFastOutputStream.java
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
if (multiPartUpload == null) {
putObject();
} else {
if (buffer.size() > 0) {
//send last part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), buffer.size());
}
final List<PartETag> partETags = multiPartUpload
.waitForAllPartUploads();
multiPartUpload.complete(partETags);
}
statistics.incrementWriteOps(1);
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
if (LOG.isDebugEnabled()) {
LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
}
} finally {
buffer = null;
super.close();
}
}
项目:hadoop
文件:S3AFastOutputStream.java
public MultiPartUpload(String uploadId) {
this.uploadId = uploadId;
this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
if (LOG.isDebugEnabled()) {
LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
"id '{}'", bucket, key, uploadId);
}
}
项目:hadoop
文件:S3AFastOutputStream.java
public void complete(List<PartETag> partETags) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
uploadId);
}
final CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
client.completeMultipartUpload(completeRequest);
}
项目:ibm-cos-sdk-java
文件:AmazonS3Client.java
/**
* Used for performance testing purposes only. Hence package private.
* This method is subject to removal anytime without notice.
*/
CompleteMultipartUploadResult uploadObject(final UploadObjectRequest req)
throws IOException, InterruptedException, ExecutionException {
// Set up the pipeline for concurrent encrypt and upload
// Set up a thread pool for this pipeline
ExecutorService es = req.getExecutorService();
final boolean defaultExecutorService = es == null;
if (es == null)
es = Executors.newFixedThreadPool(clientConfiguration.getMaxConnections());
UploadObjectObserver observer = req.getUploadObjectObserver();
if (observer == null)
observer = new UploadObjectObserver();
// initialize the observer
observer.init(req, this, this, es);
// Initiate upload
observer.onUploadInitiation(req);
final List<PartETag> partETags = new ArrayList<PartETag>();
MultiFileOutputStream mfos = req.getMultiFileOutputStream();
if (mfos == null)
mfos = new MultiFileOutputStream();
try {
// initialize the multi-file output stream
mfos.init(observer, req.getPartSize(), req.getDiskLimit());
// Kicks off the encryption-upload pipeline;
// Note mfos is automatically closed upon method completion.
putLocalObject(req, mfos);
// block till all part have been uploaded
for (Future<UploadPartResult> future: observer.getFutures()) {
UploadPartResult partResult = future.get();
partETags.add(new PartETag(partResult.getPartNumber(), partResult.getETag()));
}
} finally {
if (defaultExecutorService)
es.shutdownNow(); // shut down the locally created thread pool
mfos.cleanup(); // delete left-over temp files
}
// Complete upload
return observer.onCompletion(partETags);
}
项目:ibm-cos-sdk-java
文件:CompleteMultipartUpload.java
public CompleteMultipartUpload(String uploadId, AmazonS3 s3,
PutObjectRequest putObjectRequest, List<Future<PartETag>> futures,
List<PartETag> eTagsBeforeResume, ProgressListenerChain progressListenerChain,
UploadMonitor monitor) {
this.uploadId = uploadId;
this.s3 = s3;
this.origReq = putObjectRequest;
this.futures = futures;
this.eTagsBeforeResume = eTagsBeforeResume;
this.listener = progressListenerChain;
this.monitor = monitor;
}
项目:ibm-cos-sdk-java
文件:UploadMonitor.java
/**
* Cancels the inflight transfers if they are not completed.
*/
private void cancelFutures() {
cancelFuture();
for (Future<PartETag> f : futures) {
f.cancel(true);
}
multipartUploadCallable.getFutures().clear();
futures.clear();
}
项目:ibm-cos-sdk-java
文件:UploadCallable.java
/**
* Uploads all parts in the request in serial in this thread, then completes
* the upload and returns the result.
*/
private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) {
final List<PartETag> partETags = new ArrayList<PartETag>();
while (requestFactory.hasMoreRequests()) {
if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown");
UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest();
// Mark the stream in case we need to reset it
InputStream inputStream = uploadPartRequest.getInputStream();
if (inputStream != null && inputStream.markSupported()) {
if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) {
inputStream.mark(Integer.MAX_VALUE);
} else {
inputStream.mark((int)uploadPartRequest.getPartSize());
}
}
partETags.add(s3.uploadPart(uploadPartRequest).getPartETag());
}
CompleteMultipartUploadRequest req =
new CompleteMultipartUploadRequest(
origReq.getBucketName(), origReq.getKey(), multipartUploadId,
partETags)
.withRequesterPays(origReq.isRequesterPays())
.withGeneralProgressListener(origReq.getGeneralProgressListener())
.withRequestMetricCollector(origReq.getRequestMetricCollector())
;
CompleteMultipartUploadResult res = s3.completeMultipartUpload(req);
UploadResult uploadResult = new UploadResult();
uploadResult.setBucketName(res.getBucketName());
uploadResult.setKey(res.getKey());
uploadResult.setETag(res.getETag());
uploadResult.setVersionId(res.getVersionId());
return uploadResult;
}
项目:ibm-cos-sdk-java
文件:CompleteMultipartCopy.java
public CompleteMultipartCopy(String uploadId, AmazonS3 s3,
CopyObjectRequest copyObjectRequest, List<Future<PartETag>> futures,
ProgressListenerChain progressListenerChain, CopyMonitor monitor) {
this.uploadId = uploadId;
this.s3 = s3;
this.origReq = copyObjectRequest;
this.futures = futures;
this.listener = progressListenerChain;
this.monitor = monitor;
}
项目:ibm-cos-sdk-java
文件:CompleteMultipartCopy.java
/**
* Collects the Part ETags for initiating the complete multi-part copy
* request. This is blocking as it waits until all the upload part threads
* complete.
*/
private List<PartETag> collectPartETags() {
final List<PartETag> partETags = new ArrayList<PartETag>();
for (Future<PartETag> future : futures) {
try {
partETags.add(future.get());
} catch (Exception e) {
throw new SdkClientException("Unable to copy part: "
+ e.getCause().getMessage(), e.getCause());
}
}
return partETags;
}
项目:aliyun-oss-hadoop-fs
文件:S3AFastOutputStream.java
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
if (multiPartUpload == null) {
putObject();
} else {
if (buffer.size() > 0) {
//send last part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), buffer.size());
}
final List<PartETag> partETags = multiPartUpload
.waitForAllPartUploads();
multiPartUpload.complete(partETags);
}
statistics.incrementWriteOps(1);
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
if (LOG.isDebugEnabled()) {
LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
}
} finally {
buffer = null;
super.close();
}
}
项目:aliyun-oss-hadoop-fs
文件:S3AFastOutputStream.java
public MultiPartUpload(String uploadId) {
this.uploadId = uploadId;
this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
if (LOG.isDebugEnabled()) {
LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
"id '{}'", bucket, key, uploadId);
}
}
项目:aliyun-oss-hadoop-fs
文件:S3AFastOutputStream.java
public void complete(List<PartETag> partETags) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
uploadId);
}
final CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
client.completeMultipartUpload(completeRequest);
}
项目:stocator
文件:COSAPIClient.java
/**
* Complete a multipart upload operation.
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @return the result
* @throws AmazonClientException on problems
*/
CompleteMultipartUploadResult completeMultipartUpload(String uploadId,
List<PartETag> partETags) throws AmazonClientException {
LOG.debug("Completing multipart upload {} with {} parts",
uploadId, partETags.size());
return mClient.completeMultipartUpload(
new CompleteMultipartUploadRequest(mBucket,
key,
uploadId,
partETags));
}
项目:stocator
文件:COSBlockOutputStream.java
/**
* Upload a block of data. This will take the block
*
* @param block block to upload
* @throws IOException upload failure
*/
private void uploadBlockAsync(final COSDataBlocks.DataBlock block) throws IOException {
LOG.debug("Queueing upload of {}", block);
final int size = block.dataSize();
final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request = writeOperationHelper.newUploadPartRequest(uploadId,
currentPartNumber, size,
uploadData.getUploadStream(), uploadData.getFile());
ListenableFuture<PartETag> partETagFuture = executorService.submit(new Callable<PartETag>() {
@Override
public PartETag call() throws Exception {
// this is the queued upload operation
LOG.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
// do the upload
PartETag partETag;
try {
partETag = fs.uploadPart(request).getPartETag();
LOG.debug("Completed upload of {} to part {}", block, partETag.getETag());
} finally {
// close the stream and block
closeAll(LOG, uploadData, block);
}
return partETag;
}
});
partETagsFutures.add(partETagFuture);
}
项目:backuprotator
文件:AWSHandler.java
static List<PartETag> GetETags(List<CopyPartResult> responses)
{
List<PartETag> etags = new ArrayList<PartETag>();
for (CopyPartResult response : responses)
{
etags.add(new PartETag(response.getPartNumber(), response.getETag()));
}
return etags;
}
项目:big-c
文件:S3AFastOutputStream.java
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
if (multiPartUpload == null) {
putObject();
} else {
if (buffer.size() > 0) {
//send last part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), buffer.size());
}
final List<PartETag> partETags = multiPartUpload
.waitForAllPartUploads();
multiPartUpload.complete(partETags);
}
statistics.incrementWriteOps(1);
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
if (LOG.isDebugEnabled()) {
LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
}
} finally {
buffer = null;
super.close();
}
}
项目:big-c
文件:S3AFastOutputStream.java
public MultiPartUpload(String uploadId) {
this.uploadId = uploadId;
this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
if (LOG.isDebugEnabled()) {
LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
"id '{}'", bucket, key, uploadId);
}
}
项目:big-c
文件:S3AFastOutputStream.java
public void complete(List<PartETag> partETags) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
uploadId);
}
final CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
client.completeMultipartUpload(completeRequest);
}
项目:cloudkeeper
文件:S3ConnectionImpl.java
@Override
public CompletableFuture<CompleteMultipartUploadResult> completeMultipartUpload(String bucketName, String key,
String uploadId, List<PartETag> partETags) {
CompleteMultipartUploadRequest request
= new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags);
return CompletableFuture.supplyAsync(() -> s3Client.completeMultipartUpload(request), executorService);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:S3AFastOutputStream.java
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
if (multiPartUpload == null) {
putObject();
} else {
if (buffer.size() > 0) {
//send last part
multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
.toByteArray()), buffer.size());
}
final List<PartETag> partETags = multiPartUpload
.waitForAllPartUploads();
multiPartUpload.complete(partETags);
}
statistics.incrementWriteOps(1);
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
if (LOG.isDebugEnabled()) {
LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
}
} finally {
buffer = null;
super.close();
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:S3AFastOutputStream.java
public MultiPartUpload(String uploadId) {
this.uploadId = uploadId;
this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
if (LOG.isDebugEnabled()) {
LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
"id '{}'", bucket, key, uploadId);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:S3AFastOutputStream.java
public void complete(List<PartETag> partETags) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
uploadId);
}
final CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
client.completeMultipartUpload(completeRequest);
}
项目:apex-malhar
文件:S3FileMerger.java
/**
* Process to merge the uploaded block into a file.
* @param tuple uploaded block meta data
*/
protected void processUploadBlock(S3BlockUploadOperator.UploadBlockMetadata tuple)
{
List<PartETag> listOfUploads = uploadParts.get(tuple.getKeyName());
if (listOfUploads == null) {
listOfUploads = new ArrayList<>();
uploadParts.put(tuple.getKeyName(), listOfUploads);
}
listOfUploads.add(tuple.getPartETag());
if (fileMetadatas.get(tuple.getKeyName()) != null) {
verifyAndEmitFileMerge(tuple.getKeyName());
}
}
项目:apex-malhar
文件:S3FileMerger.java
/**
* Send the CompleteMultipartUploadRequest to S3 if all the blocks of a file are uploaded into S3.
* @param keyName file to upload into S3
*/
private void verifyAndEmitFileMerge(String keyName)
{
if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
return;
}
S3InitiateFileUploadOperator.UploadFileMetadata uploadFileMetadata = fileMetadatas.get(keyName);
List<PartETag> partETags = uploadParts.get(keyName);
if (partETags == null || uploadFileMetadata == null ||
uploadFileMetadata.getFileMetadata().getNumberOfBlocks() != partETags.size()) {
return;
}
if (partETags.size() <= 1) {
uploadedFiles.add(keyName);
LOG.debug("Uploaded file {} successfully", keyName);
return;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName,
keyName, uploadFileMetadata.getUploadId(), partETags);
CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
if (result.getETag() != null) {
uploadedFiles.add(keyName);
LOG.debug("Uploaded file {} successfully", keyName);
}
}