@Override public CompleteMultipartUploadResult completeMultipartUploadSecurely( CompleteMultipartUploadRequest req) { appendUserAgent(req, USER_AGENT); String uploadId = req.getUploadId(); final T uploadContext = multipartUploadContexts.get(uploadId); if (uploadContext != null && !uploadContext.hasFinalPartBeenSeen()) { throw new SdkClientException( "Unable to complete an encrypted multipart upload without being told which part was the last. " + "Without knowing which part was the last, the encrypted data in Amazon S3 is incomplete and corrupt."); } CompleteMultipartUploadResult result = s3.completeMultipartUpload(req); // In InstructionFile mode, we want to write the instruction file only // after the whole upload has completed correctly. if (uploadContext != null && cryptoConfig.getStorageMode() == InstructionFile) { // Put the instruction file into S3 s3.putObject(createInstructionPutRequest( uploadContext.getBucketName(), uploadContext.getKey(), uploadContext.getContentCryptoMaterial())); } multipartUploadContexts.remove(uploadId); return result; }
/** * This completes a multipart upload. Sometimes it fails; here retries are * handled to avoid losing all data on a transient failure. * * @param partETags list of partial uploads * @throws IOException on any problem */ private CompleteMultipartUploadResult complete(List<PartETag> partETags) throws IOException { int retryCount = 0; AmazonClientException lastException; String operation = String.format("Completing multi-part upload for key '%s'," + " id '%s' with %s partitions ", key, uploadId, partETags.size()); do { try { LOG.debug(operation); return writeOperationHelper.completeMultipartUpload(uploadId, partETags); } catch (AmazonClientException e) { lastException = e; } } while (shouldRetry(operation, lastException, retryCount++)); // this point is only reached if the operation failed more than // the allowed retry count throw translateException(operation, key, lastException); }
/** * Used for performance testing purposes only. Hence package private. * This method is subject to removal anytime without notice. */ CompleteMultipartUploadResult uploadObject(final UploadObjectRequest req) throws IOException, InterruptedException, ExecutionException { // Set up the pipeline for concurrent encrypt and upload // Set up a thread pool for this pipeline ExecutorService es = req.getExecutorService(); final boolean defaultExecutorService = es == null; if (es == null) es = Executors.newFixedThreadPool(clientConfiguration.getMaxConnections()); UploadObjectObserver observer = req.getUploadObjectObserver(); if (observer == null) observer = new UploadObjectObserver(); // initialize the observer observer.init(req, this, this, es); // Initiate upload observer.onUploadInitiation(req); final List<PartETag> partETags = new ArrayList<PartETag>(); MultiFileOutputStream mfos = req.getMultiFileOutputStream(); if (mfos == null) mfos = new MultiFileOutputStream(); try { // initialize the multi-file output stream mfos.init(observer, req.getPartSize(), req.getDiskLimit()); // Kicks off the encryption-upload pipeline; // Note mfos is automatically closed upon method completion. putLocalObject(req, mfos); // block till all part have been uploaded for (Future<UploadPartResult> future: observer.getFutures()) { UploadPartResult partResult = future.get(); partETags.add(new PartETag(partResult.getPartNumber(), partResult.getETag())); } } finally { if (defaultExecutorService) es.shutdownNow(); // shut down the locally created thread pool mfos.cleanup(); // delete left-over temp files } // Complete upload return observer.onCompletion(partETags); }
@Override public UploadResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(origReq .getBucketName()); uploadResult.setKey(origReq.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); monitor.uploadComplete(); return uploadResult; }
/** * Uploads all parts in the request in serial in this thread, then completes * the upload and returns the result. */ private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) { final List<PartETag> partETags = new ArrayList<PartETag>(); while (requestFactory.hasMoreRequests()) { if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown"); UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest(); // Mark the stream in case we need to reset it InputStream inputStream = uploadPartRequest.getInputStream(); if (inputStream != null && inputStream.markSupported()) { if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) { inputStream.mark(Integer.MAX_VALUE); } else { inputStream.mark((int)uploadPartRequest.getPartSize()); } } partETags.add(s3.uploadPart(uploadPartRequest).getPartETag()); } CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), multipartUploadId, partETags) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; CompleteMultipartUploadResult res = s3.completeMultipartUpload(req); UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(res.getBucketName()); uploadResult.setKey(res.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); return uploadResult; }
@Override public CopyResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getDestinationBucketName(), origReq.getDestinationKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } CopyResult copyResult = new CopyResult(); copyResult.setSourceBucketName(origReq.getSourceBucketName()); copyResult.setSourceKey(origReq.getSourceKey()); copyResult.setDestinationBucketName(res .getBucketName()); copyResult.setDestinationKey(res.getKey()); copyResult.setETag(res.getETag()); copyResult.setVersionId(res.getVersionId()); monitor.copyComplete(); return copyResult; }
@Override public CompleteMultipartUploadResult completeMultipartUploadSecurely( CompleteMultipartUploadRequest req) throws SdkClientException, AmazonServiceException { return defaultCryptoMode == EncryptionOnly ? eo.completeMultipartUploadSecurely(req) : ae.completeMultipartUploadSecurely(req) ; }
/** * Complete a multipart upload operation. * @param uploadId multipart operation Id * @param partETags list of partial uploads * @return the result * @throws AmazonClientException on problems */ CompleteMultipartUploadResult completeMultipartUpload(String uploadId, List<PartETag> partETags) throws AmazonClientException { LOG.debug("Completing multipart upload {} with {} parts", uploadId, partETags.size()); return mClient.completeMultipartUpload( new CompleteMultipartUploadRequest(mBucket, key, uploadId, partETags)); }
public void moveFile(Bucket srcBucket, Bucket targetBucket, String fileName, Logbook log) { log.addEntry(new FileMoveLogEntry(srcBucket, targetBucket, fileName)); ObjectMetadata metadata = getMetaData(srcBucket, fileName); if (!simulation && isAccessibleStorageClass(metadata)) { logger.trace("Not simulation , so moving files from {}/{} to {}", srcBucket.getName(), srcBucket.getPath(), targetBucket); long size = getFileSize(metadata); CopyObjectResult result = null; if (size > FIVE_GB) { logger.trace("Big file multi attachement copy chosen, file size {}", size); CompleteMultipartUploadResult completeUploadResponse = copyMultipartFile(srcBucket, targetBucket, fileName, size); if (completeUploadResponse != null) { s3Client.deleteObject(srcBucket.getName(), srcBucket.getPath() + SEPARATOR + fileName); } } else { logger.trace("Small file movement chosen, file size {}", size); result = s3Client.copyObject(srcBucket.getName(), srcBucket.getPath() + SEPARATOR + fileName, targetBucket.getName(), targetBucket.getPath() + SEPARATOR + fileName); if (result != null) { s3Client.deleteObject(srcBucket.getName(), srcBucket.getPath() + SEPARATOR + fileName); } } } else { logger.trace("Simulation mode or wrong storage class, moving files from {} to {}", srcBucket.getPath(), targetBucket.getName()); } }
@Override public CompletableFuture<CompleteMultipartUploadResult> completeMultipartUpload(String bucketName, String key, String uploadId, List<PartETag> partETags) { CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags); return CompletableFuture.supplyAsync(() -> s3Client.completeMultipartUpload(request), executorService); }
/** * Send the CompleteMultipartUploadRequest to S3 if all the blocks of a file are uploaded into S3. * @param keyName file to upload into S3 */ private void verifyAndEmitFileMerge(String keyName) { if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) { return; } S3InitiateFileUploadOperator.UploadFileMetadata uploadFileMetadata = fileMetadatas.get(keyName); List<PartETag> partETags = uploadParts.get(keyName); if (partETags == null || uploadFileMetadata == null || uploadFileMetadata.getFileMetadata().getNumberOfBlocks() != partETags.size()) { return; } if (partETags.size() <= 1) { uploadedFiles.add(keyName); LOG.debug("Uploaded file {} successfully", keyName); return; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, uploadFileMetadata.getUploadId(), partETags); CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest); if (result.getETag() != null) { uploadedFiles.add(keyName); LOG.debug("Uploaded file {} successfully", keyName); } }
private CompleteMultipartUploadResult completeMultiPart() throws IOException { FileUtils.copyFile(inputFile, new File(outputDir + File.separator + FILE)); CompleteMultipartUploadResult result = new CompleteMultipartUploadResult(); result.setETag(outputDir); return result; }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException { return delegate.completeMultipartUpload(request); }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public CompleteMultipartUploadResult completeMultipartUpload( CompleteMultipartUploadRequest completeMultipartUploadRequest) throws SdkClientException, AmazonServiceException { completeMultipartUploadRequest = beforeClientExecution(completeMultipartUploadRequest); rejectNull(completeMultipartUploadRequest, "The request parameter must be specified when completing a multipart upload"); String bucketName = completeMultipartUploadRequest.getBucketName(); String key = completeMultipartUploadRequest.getKey(); String uploadId = completeMultipartUploadRequest.getUploadId(); rejectNull(bucketName, "The bucket name parameter must be specified when completing a multipart upload"); rejectNull(key, "The key parameter must be specified when completing a multipart upload"); rejectNull(uploadId, "The upload ID parameter must be specified when completing a multipart upload"); rejectNull(completeMultipartUploadRequest.getPartETags(), "The part ETags parameter must be specified when completing a multipart upload"); int retries = 0; CompleteMultipartUploadHandler handler; do { Request<CompleteMultipartUploadRequest> request = createRequest(bucketName, key, completeMultipartUploadRequest, HttpMethodName.POST); request.addParameter("uploadId", uploadId); populateRequesterPaysHeader(request, completeMultipartUploadRequest.isRequesterPays()); byte[] xml = RequestXmlFactory.convertToXmlByteArray(completeMultipartUploadRequest.getPartETags()); request.addHeader("Content-Type", "application/xml"); request.addHeader("Content-Length", String.valueOf(xml.length)); request.setContent(new ByteArrayInputStream(xml)); @SuppressWarnings("unchecked") ResponseHeaderHandlerChain<CompleteMultipartUploadHandler> responseHandler = new ResponseHeaderHandlerChain<CompleteMultipartUploadHandler>( // xml payload unmarshaller new Unmarshallers.CompleteMultipartUploadResultUnmarshaller(), // header handlers new ServerSideEncryptionHeaderHandler<CompleteMultipartUploadHandler>(), new ObjectExpirationHeaderHandler<CompleteMultipartUploadHandler>(), new S3VersionHeaderHandler<CompleteMultipartUploadHandler>(), new S3RequesterChargedHeaderHandler<CompleteMultipartUploadHandler>()); handler = invoke(request, responseHandler, bucketName, key); if (handler.getCompleteMultipartUploadResult() != null) { return handler.getCompleteMultipartUploadResult(); } } while (shouldRetryCompleteMultipartUpload(completeMultipartUploadRequest, handler.getAmazonS3Exception(), retries++)); throw handler.getAmazonS3Exception(); }
public abstract CompleteMultipartUploadResult completeMultipartUpload( CompleteMultipartUploadRequest req);
public abstract CompleteMultipartUploadResult completeMultipartUploadSecurely( CompleteMultipartUploadRequest req);
public CompleteMultipartUploadResult completeMultipartUpload( CompleteMultipartUploadRequest req);
@Override public CompleteMultipartUploadResult completeMultipartUpload( CompleteMultipartUploadRequest req) { return crypto.completeMultipartUploadSecurely(req); }
@Override public CompleteMultipartUploadResult completeMultipartUpload( CompleteMultipartUploadRequest req) { return AmazonS3EncryptionClient.super.completeMultipartUpload(req); }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) { return call(() -> getDelegate().completeMultipartUpload(request)); }
private CompleteMultipartUploadResult copyMultipartFile(Bucket srcBucket, Bucket targetBucket, String fileName, long size) { // Create lists to hold copy responses List<CopyPartResult> copyResponses = new ArrayList<CopyPartResult>(); // Step 2: Initialize InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(targetBucket.getName(), targetBucket.getPath() + SEPARATOR + fileName); InitiateMultipartUploadResult initResult = s3Client.initiateMultipartUpload(initiateRequest); // Step 4. Copy parts. long partSize = 5 * (long)Math.pow(2.0, 20.0); // 5 MB long bytePosition = 0; for (int i = 1; bytePosition < size; i++) { // Step 5. Save copy response. CopyPartRequest copyRequest = new CopyPartRequest() .withDestinationBucketName(targetBucket.getName()) .withDestinationKey(targetBucket.getPath() + SEPARATOR + fileName) .withSourceBucketName(srcBucket.getName()) .withSourceKey(srcBucket.getPath() + SEPARATOR + fileName) .withUploadId(initResult.getUploadId()) .withFirstByte(bytePosition) .withLastByte(bytePosition + partSize -1 >= size ? size - 1 : bytePosition + partSize - 1) .withPartNumber(i); copyResponses.add(s3Client.copyPart(copyRequest)); bytePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest( targetBucket.getName(), targetBucket.getPath() + SEPARATOR + fileName, initResult.getUploadId(), GetETags(copyResponses)); // Step 7. Complete copy operation. CompleteMultipartUploadResult completeUploadResponse = s3Client.completeMultipartUpload(completeRequest); return completeUploadResponse; }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException { return null; }
@Test public void testMultipartCopy() throws Exception { // B2 requires two parts to issue an MPU assumeTrue(!blobStoreType.equals("b2")); String sourceBlobName = "testMultipartCopy-source"; String targetBlobName = "testMultipartCopy-target"; ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, sourceBlobName, BYTE_SOURCE.openStream(), metadata); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(containerName, targetBlobName); InitiateMultipartUploadResult initResult = client.initiateMultipartUpload(initiateRequest); String uploadId = initResult.getUploadId(); CopyPartRequest copyRequest = new CopyPartRequest() .withDestinationBucketName(containerName) .withDestinationKey(targetBlobName) .withSourceBucketName(containerName) .withSourceKey(sourceBlobName) .withUploadId(uploadId) .withFirstByte(0L) .withLastByte(BYTE_SOURCE.size() - 1) .withPartNumber(1); CopyPartResult copyPartResult = client.copyPart(copyRequest); CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest( containerName, targetBlobName, uploadId, ImmutableList.of(copyPartResult.getPartETag())); CompleteMultipartUploadResult completeUploadResponse = client.completeMultipartUpload(completeRequest); S3Object object = client.getObject(containerName, targetBlobName); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasContentEqualTo(expected); } }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException { // TODO Auto-generated method stub return null; }
public void actionPerformed(ActionEvent ae) { JFileChooser fileChooser = new JFileChooser(); int showOpenDialog = fileChooser.showOpenDialog(frame); if (showOpenDialog != JFileChooser.APPROVE_OPTION) return; createAmazonS3Bucket(); File fileToUpload = fileChooser.getSelectedFile(); initiateRequest = new InitiateMultipartUploadRequest(AWSResources.S3_BUCKET_NAME,fileToUpload.getName()); initResult = AWSResources.S3.initiateMultipartUpload(initiateRequest); uploadId = initResult.getUploadId(); try { long objectSize = fileToUpload.length(); long partSize = 5 * (long)Math.pow(2.0, 20.0); //5MB long bytePosition = 0; List<UploadPartResult> uploadResponses = new ArrayList<UploadPartResult>(); for (int i = 1; bytePosition < objectSize; i++) { System.out.print("Uploading:" + i +"\n"); partSize=Math.min(partSize, (objectSize - bytePosition)); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(AWSResources.S3_BUCKET_NAME) .withKey(fileToUpload.getName()) .withFile(fileToUpload) .withPartSize(partSize) .withUploadId(uploadId) .withFileOffset(bytePosition) .withPartNumber(i); //uploadRequest.setProgressListener(new ProgressListener(fileToUpload, i, partSize)); uploadResponses.add(AWSResources.S3.uploadPart(uploadRequest)); bytePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(AWSResources.S3_BUCKET_NAME,fileToUpload.getName(),uploadId,GetETags(uploadResponses)); CompleteMultipartUploadResult completeUploadResult = AWSResources.S3.completeMultipartUpload(completeRequest); System.out.println(completeUploadResult.getETag()); } catch (AmazonS3Exception ex){ System.out.println(ex.getErrorMessage()); } }
/** * Notified from * {@link AmazonS3EncryptionClient#uploadObject(UploadObjectRequest)} when * all parts have been successfully uploaded to S3. This method is * responsible for finishing off the upload by making a complete multi-part * upload request to S3 with the given list of etags. * * @param partETags * all the etags returned from S3 for the previous part uploads. * * @return the completed multi-part upload result */ public CompleteMultipartUploadResult onCompletion(List<PartETag> partETags) { return s3.completeMultipartUpload( new CompleteMultipartUploadRequest( req.getBucketName(), req.getKey(), uploadId, partETags)); }
/** * Completes a multi-part upload. * * @return Future that will be completed with an complete-multipart-upload result on success, and an * {@link com.amazonaws.AmazonClientException} in case of transmission failure. The future may also be completed * with another runtime time exception; however, this indicates a logical bug (programming error). * @see AmazonS3#completeMultipartUpload(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest) */ CompletableFuture<CompleteMultipartUploadResult> completeMultipartUpload(String bucketName, String key, String uploadId, List<PartETag> partETags);