protected void checkCassandraException(Exception e) { _mexceptions.incr(); if (e instanceof AlreadyExistsException || e instanceof AuthenticationException || e instanceof DriverException || e instanceof DriverInternalError || e instanceof InvalidConfigurationInQueryException || e instanceof InvalidQueryException || e instanceof InvalidTypeException || e instanceof QueryExecutionException || e instanceof QueryTimeoutException || e instanceof QueryValidationException || e instanceof ReadTimeoutException || e instanceof SyntaxError || e instanceof TraceRetrievalException || e instanceof TruncateException || e instanceof UnauthorizedException || e instanceof UnavailableException || e instanceof ReadTimeoutException || e instanceof WriteTimeoutException) { throw new ReportedFailedException(e); } else { throw new RuntimeException(e); } }
@Override public void commit(Long txid) { DriverException lastException = null; // Read current value. //if we failed to apply the update , maybe the state has change already , we need to calculate the new state and apply it again for (Map.Entry<K, V> entry : aggregateValues.entrySet()) { int attempts = 0; boolean applied = false; while (!applied && attempts < maxAttempts) { try{ applied = updateState(entry, txid); } catch(QueryExecutionException e) { lastException = e; LOG.warn("Catching {} attempt {}"+txid+"-"+partitionIndex, e.getMessage(), attempts); } attempts++; } if(!applied) { if(lastException != null) { throw new CassandraCqlIncrementalStateException("Ran out of attempts ["+attempts+"] max of ["+maxAttempts+"] "+txid+"-"+ partitionIndex, lastException); } else { throw new CassandraCqlIncrementalStateException("Ran out of attempts ["+attempts+"] max of ["+maxAttempts+"] "+txid+"-"+ partitionIndex); } } } }
public ResultSet executeSync(Statement cql) { if(logCql) { logger.debug("Executing QueryBuilder Query: {}", cql.toString()); } //just run a normal execute without a prepared statement try { return session.execute(cql); } catch(NoHostAvailableException e) { throw new RhombusTimeoutException(e); } catch(QueryExecutionException e2) { throw new RhombusTimeoutException(e2); } }
public void executeBatch(List<CQLStatementIterator> statementIterators) { BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); for(CQLStatementIterator statementIterator : statementIterators) { while(statementIterator.hasNext()) { CQLStatement statement = statementIterator.next(); batchStatement.add(getBoundStatement(session, statement)); } } try { session.execute(batchStatement); } catch(NoHostAvailableException e) { throw new RhombusTimeoutException(e); } catch(QueryExecutionException e2) { throw new RhombusTimeoutException(e2); } }
@Override protected void writeSchedule( ScheduleUpdate update, Map<ScheduleRef.Entry, EquivalentScheduleEntry> content ) throws WriteException { DateTime now = clock.now(); ImmutableMultimap<LocalDate, BroadcastRow> currentBroadcastRows = resolveBroadcasts( update.getSource(), update.getSchedule().getChannel(), update.getSchedule().getInterval() ); ImmutableList<EquivalentScheduleEntry> updateEntries = getEquivalentScheduleEntries( update.getSchedule(), content ); ImmutableSet<BroadcastRef> updateBroadcastRefs = updateEntries .stream() .map(EquivalentScheduleEntry::getBroadcast) .map(Broadcast::toRef) .collect(MoreCollectors.toImmutableSet()); Set<BroadcastRow> staleBroadcasts = getStaleBroadcasts( updateBroadcastRefs, currentBroadcastRows ); List<Statement> deletes = deleteStatements( update.getSource(), update.getSchedule().getChannel(), staleBroadcasts ); log.info( "Processing equivalent schedule update for {} {} {}: content: {}, " + "currentEntries:{}, update:{}, stale broadcasts from update:{}, " + "stale broadcasts:{}", update.getSource(), update.getSchedule().getChannel().longValue(), update.getSchedule().getInterval(), content, currentBroadcastRows.values() .stream() .map(BroadcastRow::toString) .collect(Collectors.joining(",")), updateLog(updateBroadcastRefs), updateLog(update.getStaleBroadcasts()), staleBroadcasts.stream() .map(BroadcastRow::toString) .collect(Collectors.joining(",")) ); ImmutableList<Statement> updates = updateEntries .stream() .flatMap(entry -> statementsForEntry(update.getSource(), entry, now).stream()) .collect(MoreCollectors.toImmutableList()); if (updates.isEmpty() && deletes.isEmpty()) { return; } BatchStatement updateBatch = new BatchStatement(); updateBatch.addAll(Iterables.concat(updates, deletes)); try { session.execute(updateBatch.setConsistencyLevel(write)); log.info( "Processed equivalent schedule update for {} {} {}, updates: {}, " + "deletes: {}", update.getSource(), update.getSchedule().getChannel().longValue(), update.getSchedule().getInterval(), updates.size(), deletes.size() ); } catch (NoHostAvailableException | QueryExecutionException e) { throw new WriteException(e); } }