/** * This method differentiates OpenraspDailyRollingFileAppender from its * super class. * * <p>Before actually logging, this method will check whether it is * time to do a rollover. If it is, it will schedule the next * rollover time and then rollover. * */ protected void subAppend(LoggingEvent event) { long n = System.currentTimeMillis(); if (n >= nextCheck) { now.setTime(n); nextCheck = rc.getNextCheckMillis(now); try { rollOver(); } catch(IOException ioe) { if (ioe instanceof InterruptedIOException) { Thread.currentThread().interrupt(); } LogLog.error("rollOver() failed.", ioe); } } super.subAppend(event); }
/** * Creates a directory for a filesystem and configuration object. Assumes the user has already * checked for this directory existence. * * @param fs * @param conf * @param dir * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks * whether the directory exists or not, and returns true if it exists. * @throws IOException */ private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir) throws IOException { int i = 0; IOException lastIOE = null; int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER); int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries", DEFAULT_BASE_SLEEP_BEFORE_RETRIES); do { try { return fs.mkdirs(dir); } catch (IOException ioe) { lastIOE = ioe; if (fs.exists(dir)) return true; // directory is present try { sleepBeforeRetry("Create Directory", i + 1, baseSleepBeforeRetries, hdfsClientRetriesNumber); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } while (++i <= hdfsClientRetriesNumber); throw new IOException("Exception in createDir", lastIOE); }
@Override public boolean retryRequest(IOException exception, int executionCount, HttpContext context) { if (executionCount >= 3) {// 如果已经重试了3次,就放弃 return false; } if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试 return true; } if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常 return false; } if (exception instanceof InterruptedIOException) {// 超时 return true; } if (exception instanceof UnknownHostException) {// 目标服务器不可达 return false; } if (exception instanceof ConnectTimeoutException) {// 连接被拒绝 return false; } if (exception instanceof SSLException) {// ssl握手异常 return false; } HttpClientContext clientContext = HttpClientContext.adapt(context); HttpRequest request = clientContext.getRequest(); // 如果请求是幂等的,就再次尝试 if (!(request instanceof HttpEntityEnclosingRequest)) { return true; } return false; }
/** * Returns once the peer is ready to receive {@code count} bytes. * * @throws IOException if the stream was finished or closed, or the * thread was interrupted. */ private void waitUntilWritable(int count, boolean last) throws IOException { try { while (unacknowledgedBytes + count >= writeWindowSize) { SpdyStream.this.wait(); // Wait until we receive a WINDOW_UPDATE. // The stream may have been closed or reset while we were waiting! if (!last && closed) { throw new IOException("stream closed"); } else if (finished) { throw new IOException("stream finished"); } else if (errorCode != null) { throw new IOException("stream was reset: " + errorCode); } } } catch (InterruptedException e) { throw new InterruptedIOException(); } }
@Test public void interruptReadingResponseBody() throws Exception { int responseBodySize = 2 * 1024 * 1024; // 2 MiB server.enqueue(new MockResponse() .setBody(new Buffer().write(new byte[responseBodySize])) .throttleBody(64 * 1024, 125, TimeUnit.MILLISECONDS)); // 500 Kbps server.start(); interruptLater(500); HttpURLConnection connection = new OkUrlFactory(client).open(server.url("/").url()); InputStream responseBody = connection.getInputStream(); byte[] buffer = new byte[1024]; try { while (responseBody.read(buffer) != -1) { } fail("Expected thread to be interrupted"); } catch (InterruptedIOException expected) { } responseBody.close(); }
@Override public void connect() throws IOException { if (executed) return; Call call = buildCall(); executed = true; call.enqueue(this); synchronized (lock) { try { while (connectPending && response == null && callFailure == null) { lock.wait(); // Wait 'til the network interceptor is reached or the call fails. } if (callFailure != null) { throw propagate(callFailure); } } catch (InterruptedException e) { throw new InterruptedIOException(); } } }
@Override public void onError(Throwable e) { LogUtils.e("Retrofit", e.getMessage()); dismissProgress(); if (e instanceof HttpException) { // HTTP错误 onException(ExceptionReason.BAD_NETWORK); } else if (e instanceof ConnectException || e instanceof UnknownHostException) { // 连接错误 onException(CONNECT_ERROR); } else if (e instanceof InterruptedIOException) { // 连接超时 onException(CONNECT_TIMEOUT); } else if (e instanceof JsonParseException || e instanceof JSONException || e instanceof ParseException) { // 解析错误 onException(PARSE_ERROR); } else { onException(UNKNOWN_ERROR); } }
@Override public void awaitReadable(long l, TimeUnit timeUnit) throws IOException { if(Thread.currentThread() == getIoThread()) { throw UndertowMessages.MESSAGES.awaitCalledFromIoThread(); } if (data == null) { synchronized (lock) { if (data == null) { try { waiters++; lock.wait(timeUnit.toMillis(l)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new InterruptedIOException(); } finally { waiters--; } } } } }
/** * This test sets off a blocking thread and then interrupts it, before * checking that the thread was interrupted * * @throws Throwable any exception other than that which was expected */ @Test(timeout = 10000) public void testInterruptedWaitForProxy() throws Throwable { RpcThread worker = new RpcThread(100); worker.start(); Thread.sleep(1000); assertTrue("worker hasn't started", worker.waitStarted); worker.interrupt(); worker.join(); Throwable caught = worker.getCaught(); assertNotNull("No exception was raised", caught); // looking for the root cause here, which can be wrapped // as part of the NetUtils work. Having this test look // a the type of exception there would be brittle to improvements // in exception diagnostics. Throwable cause = caught.getCause(); if (cause == null) { // no inner cause, use outer exception as root cause. cause = caught; } if (!(cause instanceof InterruptedIOException) && !(cause instanceof ClosedByInterruptException)) { throw caught; } }
/** * Creates a directory. Assumes the user has already checked for this directory existence. * * @param dir * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks * whether the directory exists or not, and returns true if it exists. * @throws IOException */ boolean createDir(Path dir) throws IOException { int i = 0; IOException lastIOE = null; do { try { return fs.mkdirs(dir); } catch (IOException ioe) { lastIOE = ioe; if (fs.exists(dir)) return true; // directory is present try { sleepBeforeRetry("Create Directory", i + 1); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } while (++i <= hdfsClientRetriesNumber); throw new IOException("Exception in createDir", lastIOE); }
/** * Will block until a write entry has been assigned by they WAL subsystem. * @return A WriteEntry gotten from local WAL subsystem. Must be completed by calling * mvcc#complete or mvcc#completeAndWait. * @throws InterruptedIOException * @see * #setWriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) */ @InterfaceAudience.Private // For internal use only. public MultiVersionConcurrencyControl.WriteEntry getWriteEntry() throws InterruptedIOException { try { this.seqNumAssignedLatch.await(); } catch (InterruptedException ie) { // If interrupted... clear out our entry else we can block up mvcc. MultiVersionConcurrencyControl mvcc = getMvcc(); LOG.debug("mvcc=" + mvcc + ", writeEntry=" + this.writeEntry); if (mvcc != null) { if (this.writeEntry != null) { mvcc.complete(this.writeEntry); } } InterruptedIOException iie = new InterruptedIOException(); iie.initCause(ie); throw iie; } return this.writeEntry; }
/** * Process pipelined HTTP requests using the specified input and output * streams. * * @throws IOException error during an I/O operation */ @Override public SocketState event(SocketStatus status) throws IOException { RequestInfo rp = request.getRequestProcessor(); try { rp.setStage(org.apache.coyote.Constants.STAGE_SERVICE); if (!getAdapter().event(request, response, status)) { setErrorState(ErrorState.CLOSE_NOW, null); } } catch (InterruptedIOException e) { setErrorState(ErrorState.CLOSE_NOW, e); } catch (Throwable t) { ExceptionUtils.handleThrowable(t); // 500 - Internal Server Error response.setStatus(500); setErrorState(ErrorState.CLOSE_NOW, t); getAdapter().log(request, response, 0); log.error(sm.getString("http11processor.request.process"), t); } rp.setStage(org.apache.coyote.Constants.STAGE_ENDED); if (getErrorState().isError() || status==SocketStatus.STOP) { return SocketState.CLOSED; } else if (!comet) { inputBuffer.nextRequest(); outputBuffer.nextRequest(); return SocketState.OPEN; } else { return SocketState.LONG; } }
/** * Display an exception prefaced with the command name. Also increments * the error count for the command which will result in a non-zero exit * code. * @param e exception to display */ public void displayError(Exception e) { // build up a list of exceptions that occurred exceptions.add(e); // use runtime so it rips up through the stack and exits out if (e instanceof InterruptedIOException) { throw new CommandInterruptException(); } String errorMessage = e.getLocalizedMessage(); if (errorMessage == null) { // this is an unexpected condition, so dump the whole exception since // it's probably a nasty internal error where the backtrace would be // useful errorMessage = StringUtils.stringifyException(e); LOG.debug(errorMessage); } else { errorMessage = errorMessage.split("\n", 2)[0]; } displayError(errorMessage); }
@Test public void testInterruptedRename() throws Exception { FSDataOutputStream out = mock(FSDataOutputStream.class); whenFsCreate().thenReturn(out); when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat); when(mockFs.rename(eq(tmpPath), eq(path))).thenThrow( new InterruptedIOException()); FSInputStream in = mock(FSInputStream.class); when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1); tryCopyStream(in, false); verify(mockFs).delete(eq(tmpPath), anyBoolean()); verify(mockFs).rename(eq(tmpPath), eq(path)); verify(mockFs, never()).delete(eq(path), anyBoolean()); verify(mockFs, never()).close(); }
private void waitTableDisabled(final long deadlineTs) throws IOException, TimeoutException { waitForState(deadlineTs, new WaitForStateCallable() { @Override public boolean checkState(int tries) throws IOException { return getAdmin().isTableDisabled(tableName); } @Override public void throwInterruptedException() throws InterruptedIOException { throw new InterruptedIOException("Interrupted when waiting for table to be disabled"); } @Override public void throwTimeoutException(long elapsedTime) throws TimeoutException { throw new TimeoutException("Table " + tableName + " not yet disabled after " + elapsedTime + "msec"); } }); }
/** * Returns once the input stream is either readable or finished. Throws * a {@link SocketTimeoutException} if the read timeout elapses before * that happens. */ private void waitUntilReadable() throws IOException { long start = 0; long remaining = 0; if (readTimeoutMillis != 0) { start = (System.nanoTime() / 1000000); remaining = readTimeoutMillis; } try { while (pos == -1 && !finished && !closed && errorCode == null) { if (readTimeoutMillis == 0) { SpdyStream.this.wait(); } else if (remaining > 0) { SpdyStream.this.wait(remaining); remaining = start + readTimeoutMillis - (System.nanoTime() / 1000000); } else { throw new SocketTimeoutException(); } } } catch (InterruptedException e) { throw new InterruptedIOException(); } }
@Test public void close_wrapsInterruptedAndSetsFlag() throws Exception { LazyRegisterEventProcessorFactoryWithHost lazy = new TestLazyRegisterEventProcessorFactoryWithHost() { @Override void unregisterEventProcessorFactoryFromHost() throws InterruptedException { throw new InterruptedException(); } }; lazy.get(); try { lazy.close(); } catch (InterruptedIOException e) { assertThat(e).hasCauseInstanceOf(InterruptedException.class); assertThat(Thread.currentThread().isInterrupted()).isTrue(); } }
/** * Esegue la decodifica * @param toDecode stringa da decodificare * @return l'oggetto dopo la decodifica * @throws InterruptedIOException quando manca la stringa da decodificare * @throws IllegalArgumentException quando c'è un errore nella stringa decodificata */ public Object decode(String toDecode) throws InterruptedIOException, NullPointerException { JsonParser parser = new JsonParser(); if (toDecode == null) { throw new InterruptedIOException("Errore"); } System.out.println(toDecode); String method = parser.parse(toDecode).getAsJsonObject().get("methodInvoked").getAsString(); if (commands.containsKey(method)) { return commands.get(method).execute(toDecode); } else { throw new IllegalArgumentException("Comunicazione sconosciuta!"); } }
@Override public void send(byte[] packet, int len) throws IOException { while (!stopped.get()) { Tunnel tunnel = null; try { tunnel = provider.getCurrentTunnel(); tunnel.send(packet, len); return; } catch (IOException | InterruptedException e) { Log.e(TAG, "Cannot send to tunnel", e); if (tunnel != null) { provider.invalidateTunnel(tunnel); } } } throw new InterruptedIOException("Persistent tunnel stopped"); }
protected void doException(final Exception e) throws Exception { if (parent.launchLocation != null) { final ArrayList<StackTraceElement> stack = new ArrayList<>(Arrays.asList(e.getStackTrace())); stack.addAll(Arrays.asList(parent.launchLocation)); e.setStackTrace(stack.toArray(new StackTraceElement[stack.size()])); } postToUiThreadAndWait(new Callable<Object>() { public Object call() throws Exception { if (e instanceof InterruptedException || e instanceof InterruptedIOException) parent.onInterrupted(e); else parent.onException(e); return null; } }); }
private RaftClientReply sendRequestWithRetry( Supplier<RaftClientRequest> supplier) throws InterruptedIOException, StateMachineException, GroupMismatchException { for(;;) { final RaftClientRequest request = supplier.get(); final RaftClientReply reply = sendRequest(request); if (reply != null) { return reply; } // sleep and then retry try { retryInterval.sleep(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw IOUtils.toInterruptedIOException( "Interrupted when sending " + request, ie); } } }
/** * Renames a directory. Assumes the user has already checked for this directory existence. * * @param srcpath * @param dstPath * @return true if rename is successful. * @throws IOException */ public boolean rename(Path srcpath, Path dstPath) throws IOException { IOException lastIOE = null; int i = 0; do { try { return fs.rename(srcpath, dstPath); } catch (IOException ioe) { lastIOE = ioe; if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // successful move // dir is not there, retry after some time. try { sleepBeforeRetry("Rename Directory", i + 1); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } while (++i <= hdfsClientRetriesNumber); throw new IOException("Exception in rename", lastIOE); }
/** * Randomly pick a connection and process the batch of actions for a given table * @param actions the actions * @param tableName table name * @param results the results array * @param callback * @throws IOException */ @SuppressWarnings("deprecation") public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName, Object[] results, Batch.Callback<R> callback) throws IOException { // Currently used by RegionStateStore // A deprecated method is used as multiple threads accessing RegionStateStore do a single put // and htable is not thread safe. Alternative would be to create an Htable instance for each // put but that is not very efficient. // See HBASE-11610 for more details. try { hConnections[ThreadLocalRandom.current().nextInt(noOfConnections)].processBatchCallback( actions, tableName, this.batchPool, results, callback); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } }
@Override //AbstractDelegationTokenManager protected void logExpireToken(final DelegationTokenIdentifier dtId) throws IOException { synchronized (noInterruptsLock) { // The edit logging code will fail catastrophically if it // is interrupted during a logSync, since the interrupt // closes the edit log files. Doing this inside the // above lock and then checking interruption status // prevents this bug. if (Thread.interrupted()) { throw new InterruptedIOException( "Interrupted before expiring delegation token"); } namesystem.logExpireDelegationToken(dtId); } }
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws IOException { for (int c = 1;; c++) { HttpResponse response = backend.execute(target, request, context); if (retryStrategy.retryRequest(response, c, context)) { long nextInterval = retryStrategy.getRetryInterval(); try { log.trace("Wait for " + nextInterval); Thread.sleep(nextInterval); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } } else { return response; } } }
/** Wait until the async does not have more than max tasks in progress. */ private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException { long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; while ((currentInProgress = this.tasksInProgress.get()) > max) { if (oldInProgress != currentInProgress) { // Wait for in progress to change. long now = EnvironmentEdgeManager.currentTime(); if (now > lastLog + 10000) { lastLog = now; LOG.info("#" + id + ", waiting for some tasks to finish. Expected max=" + max + ", tasksInProgress=" + currentInProgress); } } oldInProgress = currentInProgress; try { synchronized (this.tasksInProgress) { if (tasksInProgress.get() != oldInProgress) break; this.tasksInProgress.wait(100); } } catch (InterruptedException e) { throw new InterruptedIOException("#" + id + ", interrupted." + " currentNumberOfTask=" + currentInProgress); } } }
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/ private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws InterruptedIOException { final byte[] buf; final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize; try { buf = byteArrayManager.newByteArray(bufferSize); } catch (InterruptedException ie) { final InterruptedIOException iioe = new InterruptedIOException( "seqno=" + seqno); iioe.initCause(ie); throw iioe; } return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize(), lastPacketInBlock); }
@Override public void awaitWritable(long l, TimeUnit timeUnit) throws IOException { if(Thread.currentThread() == getIoThread()) { throw UndertowMessages.MESSAGES.awaitCalledFromIoThread(); } synchronized (lock) { if (anyAreSet(state, STATE_CLOSED) || broken) { return; } if (readyForFlush) { try { waiterCount++; if(readyForFlush && !anyAreSet(state, STATE_CLOSED) && !broken) { lock.wait(timeUnit.toMillis(l)); } } catch (InterruptedException e) { throw new InterruptedIOException(); } finally { waiterCount--; } } } }
/** * Returns the stream's response headers, blocking if necessary if they * have not been received yet. */ public synchronized List<String> getResponseHeaders() throws IOException { long remaining = 0; long start = 0; if (readTimeoutMillis != 0) { start = (System.nanoTime() / 1000000); remaining = readTimeoutMillis; } try { while (responseHeaders == null && errorCode == null) { if (readTimeoutMillis == 0) { // No timeout configured. wait(); } else if (remaining > 0) { wait(remaining); remaining = start + readTimeoutMillis - (System.nanoTime() / 1000000); } else { throw new SocketTimeoutException("Read response header timeout. readTimeoutMillis: " + readTimeoutMillis); } } if (responseHeaders != null) { return responseHeaders; } throw new IOException("stream was reset: " + errorCode); } catch (InterruptedException e) { InterruptedIOException rethrow = new InterruptedIOException(); rethrow.initCause(e); throw rethrow; } }
static InetAddress getAddressByName(String host) { try { return InetAddress.getByName(host); } catch(Exception e) { if (e instanceof InterruptedIOException || e instanceof InterruptedException) { Thread.currentThread().interrupt(); } LogLog.error("Could not find address of ["+host+"].", e); return null; } }
private void handleException(final IOException ioe) { // If we couldn't read the request, return; if (msg == null) { return; } // If have been aborted by other thread if (mode == ABORT_MODE) { return; } // If the request was successfully completed, but exception happened // later if (mode == PIPE_MODE) { return; } int error_code = SocksProxyBase.SOCKS_FAILURE; if (ioe instanceof SocksException) { error_code = ((SocksException) ioe).errCode; } else if (ioe instanceof NoRouteToHostException) { error_code = SocksProxyBase.SOCKS_HOST_UNREACHABLE; } else if (ioe instanceof ConnectException) { error_code = SocksProxyBase.SOCKS_CONNECTION_REFUSED; } else if (ioe instanceof InterruptedIOException) { error_code = SocksProxyBase.SOCKS_TTL_EXPIRE; } if ((error_code > SocksProxyBase.SOCKS_ADDR_NOT_SUPPORTED) || (error_code < 0)) { error_code = SocksProxyBase.SOCKS_FAILURE; } sendErrorMessage(error_code); }
public PairOfSameType<Region> stepsAfterPONR(final Server server, final RegionServerServices services, final PairOfSameType<Region> regions, User user) throws IOException { openDaughters(server, services, regions.getFirst(), regions.getSecond()); if (useCoordinatedStateManager(server)) { ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) .getSplitTransactionCoordination().completeSplitTransaction(services, regions.getFirst(), regions.getSecond(), std, parent); } transition(SplitTransactionPhase.BEFORE_POST_SPLIT_HOOK); // Coprocessor callback if (parent.getCoprocessorHost() != null) { if (user == null) { this.parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond()); } else { try { user.getUGI().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond()); return null; } }); } catch (InterruptedException ie) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(ie); throw iioe; } } } transition(SplitTransactionPhase.AFTER_POST_SPLIT_HOOK); return regions; }
@Override public Result[] call(int timeout) throws IOException { if (this.closed) return null; if (Thread.interrupted()) { throw new InterruptedIOException(); } ScanRequest request = RequestConverter.buildScanRequest(getLocation() .getRegionInfo().getRegionName(), getScan(), getCaching(), true); ScanResponse response = null; controller = controllerFactory.newController(); try { controller.setPriority(getTableName()); controller.setCallTimeout(timeout); response = getStub().scan(controller, request); Result[] results = ResponseConverter.getResults(controller.cellScanner(), response); if (response.hasMoreResultsInRegion()) { setHasMoreResultsContext(true); setServerHasMoreResults(response.getMoreResultsInRegion()); } else { setHasMoreResultsContext(false); } // We need to update result metrics since we are overriding call() updateResultsMetrics(results); return results; } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } }
/** * Reports a failure to complete a connection. Determines the next {@link ConnectionSpec} to try, * if any. * * @return {@code true} if the connection should be retried using {@link * #configureSecureSocket(SSLSocket)} or {@code false} if not */ public boolean connectionFailed(IOException e) { // Any future attempt to connect using this strategy will be a fallback attempt. isFallback = true; if (!isFallbackPossible) { return false; } // If there was a protocol problem, don't recover. if (e instanceof ProtocolException) { return false; } // If there was an interruption or timeout (SocketTimeoutException), don't recover. // For the socket connect timeout case we do not try the same host with a different // ConnectionSpec: we assume it is unreachable. if (e instanceof InterruptedIOException) { return false; } // Look for known client-side or negotiation errors that are unlikely to be fixed by trying // again with a different connection spec. if (e instanceof SSLHandshakeException) { // If the problem was a CertificateException from the X509TrustManager, // do not retry. if (e.getCause() instanceof CertificateException) { return false; } } if (e instanceof SSLPeerUnverifiedException) { // e.g. a certificate pinning error. return false; } // On Android, SSLProtocolExceptions can be caused by TLS_FALLBACK_SCSV failures, which means we // retry those when we probably should not. return (e instanceof SSLHandshakeException || e instanceof SSLProtocolException); }
@Test public void getResponseHeadersTimesOut() throws Exception { // write the mocking script peer.sendFrame().settings(new Settings()); peer.acceptFrame(); // ACK peer.acceptFrame(); // SYN_STREAM peer.acceptFrame(); // RST_STREAM peer.play(); // play it back Http2Connection connection = connect(peer); Http2Stream stream = connection.newStream(headerEntries("b", "banana"), false); stream.readTimeout().timeout(500, TimeUnit.MILLISECONDS); long startNanos = System.nanoTime(); try { stream.takeResponseHeaders(); fail(); } catch (InterruptedIOException expected) { } long elapsedNanos = System.nanoTime() - startNanos; awaitWatchdogIdle(); assertEquals(500d, TimeUnit.NANOSECONDS.toMillis(elapsedNanos), 200d /* 200ms delta */); assertEquals(0, connection.openStreamCount()); // verify the peer received what was expected assertEquals(Http2.TYPE_HEADERS, peer.takeFrame().type); assertEquals(Http2.TYPE_RST_STREAM, peer.takeFrame().type); }
public static CloseableHttpClient createHttpClient() { ConnectionSocketFactory plainsf = PlainConnectionSocketFactory.getSocketFactory(); ConnectionSocketFactory sslsf = new EasySSLConnectionSocketFactory(); //SSLConnectionSocketFactory.getSocketFactory(); Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register("http", plainsf) .register("https", sslsf) .build(); PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registry); // 将最大连接数增加到200 cm.setMaxTotal(200); // 将每个路由基础的连接增加到20 cm.setDefaultMaxPerRoute(20); //请求重试处理 HttpRequestRetryHandler httpRequestRetryHandler = new HttpRequestRetryHandler() { public boolean retryRequest(IOException exception, int executionCount, HttpContext context) { if (executionCount >= 5) {// 如果已经重试了5次,就放弃 return false; } if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试 return true; } if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常 return false; } if (exception instanceof InterruptedIOException) {// 超时 return false; } if (exception instanceof UnknownHostException) {// 目标服务器不可达 return false; } if (exception instanceof ConnectTimeoutException) {// 连接被拒绝 return false; } if (exception instanceof SSLException) {// ssl握手异常 return false; } HttpClientContext clientContext = HttpClientContext.adapt(context); HttpRequest request = clientContext.getRequest(); // 如果请求是幂等的,就再次尝试 if (!(request instanceof HttpEntityEnclosingRequest)) { return true; } return false; } }; CloseableHttpClient httpClient = HttpClients.custom() .setConnectionManager(cm) .setRetryHandler(httpRequestRetryHandler) .build(); return httpClient; }
private void waitForTableEnabled(final long deadlineTs) throws IOException, TimeoutException { waitForState(deadlineTs, new WaitForStateCallable() { @Override public boolean checkState(int tries) throws IOException { try { if (getAdmin().isTableAvailable(desc.getTableName())) { return true; } } catch (TableNotFoundException tnfe) { LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+ tries); } return false; } @Override public void throwInterruptedException() throws InterruptedIOException { throw new InterruptedIOException("Interrupted when waiting for table " + desc.getTableName() + " to be enabled"); } @Override public void throwTimeoutException(long elapsedTime) throws TimeoutException { throw new TimeoutException("Table " + desc.getTableName() + " not enabled after " + elapsedTime + "msec"); } }); }
@Test(timeout = 30000) public void testWriteLockExcludesWriters() throws Exception { final String testName = "testWriteLockExcludesWriters"; final ZKInterProcessReadWriteLock readWriteLock = getReadWriteLock(testName); List<Future<Void>> results = Lists.newArrayList(); for (int i = 0; i < NUM_THREADS; ++i) { final String threadDesc = testName + i; results.add(executor.submit(new Callable<Void>() { @Override public Void call() throws IOException { ZKInterProcessWriteLock writeLock = readWriteLock.writeLock(Bytes.toBytes(threadDesc)); try { writeLock.acquire(); try { // No one else should hold the lock assertTrue(isLockHeld.compareAndSet(false, true)); Thread.sleep(1000); // No one else should have released the lock assertTrue(isLockHeld.compareAndSet(true, false)); } finally { isLockHeld.set(false); writeLock.release(); } } catch (InterruptedException e) { LOG.warn(threadDesc + " interrupted", e); Thread.currentThread().interrupt(); throw new InterruptedIOException(); } return null; } })); } MultithreadedTestUtil.assertOnFutures(results); }