/** * Determine if an interrupted exception is caused by the client execution timer * interrupting the current thread or some other task interrupting the thread for another * purpose. * * @return {@link ClientExecutionTimeoutException} if the {@link InterruptedException} was * caused by the {@link ClientExecutionTimer}. Otherwise re-interrupts the current thread * and returns a {@link SdkClientException} wrapping an {@link InterruptedException} */ private RuntimeException handleInterruptedException(InterruptedException e) { if (e instanceof SdkInterruptedException) { if (((SdkInterruptedException) e).getResponse() != null) { ((SdkInterruptedException) e).getResponse().getHttpResponse().getHttpRequest().abort(); } } if (executionContext.getClientExecutionTrackerTask().hasTimeoutExpired()) { // Clear the interrupt status Thread.interrupted(); return new ClientExecutionTimeoutException(); } else { Thread.currentThread().interrupt(); return new AbortedException(e); } }
@Test(expected = AbortedException.class) public void clientExecutionTimeoutEnabled_aborted_exception_occurs_timeout_not_expired() throws Exception { ClientConfiguration config = new ClientConfiguration() .withClientExecutionTimeout(CLIENT_EXECUTION_TIMEOUT) .withMaxErrorRetry(0); ConnectionManagerAwareHttpClient rawHttpClient = createRawHttpClientSpy(config); doThrow(new AbortedException()).when(rawHttpClient).execute(any (HttpRequestBase.class), any(HttpContext.class)); httpClient = new AmazonHttpClient(config, rawHttpClient, null); execute(httpClient, createMockGetRequest()); }
@Override public void run() { while(!closed.get()) { try { process(client.receiveMessage(request).getMessages()); status.lazySet(CheckResult.OK); failureBackoff = DEFAULT_BACKOFF; } catch (AbortedException ae) { status.lazySet(CheckResult.failed(ae)); } catch (Exception e) { logger.log(Level.WARNING, "sqs receive failed", e); status.lazySet(CheckResult.failed(e)); // backoff on failures to avoid pinging SQS in a tight loop if there are failures. try { Thread.sleep(failureBackoff); } catch (InterruptedException ie) {} finally { failureBackoff = Math.max (failureBackoff * 2, MAX_BACKOFF); } } } }
private void closeStream() { if (in != null) { try { if (in instanceof S3ObjectInputStream) { ((S3ObjectInputStream) in).abort(); } else { in.close(); } } catch (IOException | AbortedException ignored) { // thrown if the current thread is in the interrupted state } in = null; STATS.connectionReleased(); } }
/** * Executes the request and returns the result. */ private Response<Output> execute() { if (executionContext == null) { throw new SdkClientException( "Internal SDK Error: No execution context parameter specified."); } try { return executeWithTimer(); } catch (InterruptedException ie) { throw handleInterruptedException(ie); } catch (AbortedException ae) { throw handleAbortedException(ae); } }
/** * Aborts with subclass specific abortion logic executed if needed. * Note the interrupted status of the thread is cleared by this method. * @throws AbortedException if found necessary. */ protected final void abortIfNeeded() { if (shouldAbort()) { abort(); // execute subclass specific abortion logic throw new AbortedException(); } }
/** * Aborts with subclass specific abortion logic executed if needed. * Note the interrupted status of the thread is cleared by this method. * @throws AbortedException if found necessary. */ protected final void abortIfNeeded() { if (shouldAbort()) { try { abort(); // execute subclass specific abortion logic } catch (IOException e) { LogFactory.getLog(getClass()).debug("FYI", e); } throw new AbortedException(); } }
/** * Blocks the running thread if running out of disk space. * * @throws AbortedException * if the running thread is interrupted while acquiring a * semaphore */ private void blockIfNecessary() { if (diskPermits == null || diskLimit == Long.MAX_VALUE) return; try { diskPermits.acquire(); } catch (InterruptedException e) { // don't want to re-interrupt so it won't cause SDK stream to be // closed in case the thread is reused for a different request throw new AbortedException(e); } }
@Test public void whenShortTimeoutSet_timesOut() throws Exception { // By setting a timeout of 1ms, it's not physically possible to complete both the us-west-2 and eu-central-1 // requests due to speed of light limits. KmsMasterKeyProvider mkp = KmsMasterKeyProvider.builder() .withClientBuilder( AWSKMSClientBuilder.standard() .withClientConfiguration( new ClientConfiguration() .withRequestTimeout(1) ) ) .withKeysForEncryption(Arrays.asList(KMSTestFixtures.TEST_KEY_IDS)) .build(); try { new AwsCrypto().encryptData(mkp, new byte[1]); fail("Expected exception"); } catch (Exception e) { if (e instanceof AbortedException) { // ok - one manifestation of a timeout } else if (e.getCause() instanceof HttpRequestTimeoutException) { // ok - another kind of timeout } else { throw e; } } }
@Override public void putItems(T... items) { if ( null == items || 0 == items.length ) return; for ( int chunk=0; chunk < items.length; chunk += DDB_MAX_BATCH_WRITE_ITEM ) { TableWriteItems request = new TableWriteItems(_tableName); int max = Math.min(items.length-chunk, DDB_MAX_BATCH_WRITE_ITEM); for ( int idx=0; idx < max; idx++ ) { request.addItemToPut(_encryption.encrypt(toItem(items[chunk+idx]))); } BatchWriteItemOutcome response = maybeBackoff(false, () -> _dynamodb.batchWriteItem(request)); while ( true ) { if ( null == response.getUnprocessedItems() ) break; List<WriteRequest> unprocessed = response.getUnprocessedItems().get(_tableName); if ( null == unprocessed || unprocessed.size() == 0 ) { resetPTE(null); break; } if(LOG.isDebugEnabled()) LOG.debug("putItems() unprocessed: "+unprocessed.size()); gotPTE(false); try { Thread.sleep(backoffSleep(false)); } catch ( InterruptedException ex ) { Thread.currentThread().interrupt(); throw new AbortedException(ex); } Map<String,List<WriteRequest>> unproc = response.getUnprocessedItems(); response = maybeBackoff(false, () -> _dynamodb.batchWriteItemUnprocessed(unproc)); } } }
@Override public void deleteItems(IndexKey... keys) { if ( null == keys || 0 == keys.length ) return; for ( int chunk=0; chunk < keys.length; chunk += DDB_MAX_BATCH_WRITE_ITEM ) { TableWriteItems request = new TableWriteItems(_tableName); int max = Math.min(keys.length-chunk, DDB_MAX_BATCH_WRITE_ITEM); for ( int idx=0; idx < max; idx++ ) { IndexKey key = keys[chunk+idx]; if ( null == key ) continue; request.addPrimaryKeyToDelete(toPrimaryKey(key)); } BatchWriteItemOutcome response = maybeBackoff(false, () -> _dynamodb.batchWriteItem(request)); while ( true ) { if ( null == response.getUnprocessedItems() ) break; List<WriteRequest> unprocessed = response.getUnprocessedItems().get(_tableName); if ( null == unprocessed || unprocessed.size() == 0 ) { resetPTE(null); break; } if(LOG.isDebugEnabled()) LOG.debug("deleteItems() unprocessed: "+unprocessed.size()); gotPTE(false); try { Thread.sleep(backoffSleep(false)); } catch ( InterruptedException ex ) { Thread.currentThread().interrupt(); throw new AbortedException(ex); } Map<String,List<WriteRequest>> unproc = response.getUnprocessedItems(); response = maybeBackoff(false, () -> _dynamodb.batchWriteItemUnprocessed(unproc)); } } }
public void newReadError(Exception e) { if (e instanceof SocketException) { socketExceptions.update(1); } else if (e instanceof SocketTimeoutException) { socketTimeoutExceptions.update(1); } else if (e instanceof AbortedException) { awsAbortedExceptions.update(1); } else { otherReadErrors.update(1); } }
private void deleteMessageFromQueue(String messageReceiptHandle, String messageQueueUrl) throws MessagingException { try { amazonSQS.deleteMessage(new DeleteMessageRequest(messageQueueUrl, messageReceiptHandle)); } catch (AbortedException e) { LOG.info("Client abort delete message."); } catch (AmazonClientException ase) { throw new MessagingException("Failed to delete message with receipt handle " + messageReceiptHandle + " from queue " + messageQueueUrl, ase); } }
/** * Determine if an aborted exception is caused by the client execution timer interrupting * the current thread. If so throws {@link ClientExecutionTimeoutException} else throws the * original {@link AbortedException} * * @param ae aborted exception that occurred * @return {@link ClientExecutionTimeoutException} if the {@link AbortedException} was * caused by the {@link ClientExecutionTimer}. Otherwise throws the original {@link * AbortedException} */ private RuntimeException handleAbortedException(final AbortedException ae) { if (executionContext.getClientExecutionTrackerTask().hasTimeoutExpired()) { return new ClientExecutionTimeoutException(); } else { return ae; } }