@Test public void testShouldFailToConnectWithOlderProtocolVersion() { try (BoundNode node = server.register(NodeSpec.builder().build()); Cluster cluster = defaultBuilder(node).withProtocolVersion(ProtocolVersion.V2).build()) { // Since simulacron does not support < V3, an exception should be thrown if we try to force // an older version. try { cluster.connect(); } catch (UnsupportedProtocolVersionException e) { // expected } // Should get a query log indicating invalid protocol version was used. assertThat(node.getLogs().getQueryLogs()).hasSize(1); QueryLog log = node.getLogs().getQueryLogs().get(0); Frame frame = log.getFrame(); assertThat(frame.protocolVersion).isEqualTo(2); assertThat(frame.warnings).hasSize(1); assertThat(frame.warnings.get(0)) .isEqualTo( "This message contains a non-supported protocol version by this node. STARTUP is inferred, but may not reflect the actual message sent."); assertThat(frame.message).isInstanceOf(Startup.class); } }
/** * Creates a cluster object. */ public void buildCluster() { try { if (protocolVersion != null && protocolVersion.length() != 0) { ProtocolVersion version = getCassandraProtocolVersion(); cluster = Cluster.builder().addContactPoint(node).withCredentials(userName, password).withProtocolVersion(version).build(); } else { cluster = Cluster.builder().addContactPoint(node).withCredentials(userName, password).build(); } } catch (DriverException ex) { throw new RuntimeException("closing database resource", ex); } catch (Throwable t) { DTThrowable.rethrow(t); } }
@Override public Session call() throws Exception { try { return new Cluster.Builder() .addContactPoints(nodes.split(",")) .withPort(new Integer(cqlPort)) .withProtocolVersion(ProtocolVersion.V3) .withoutJMXReporting() .build().connect(); } catch (Exception e) { if (attempts != 0) { logger.attemptToConnectToCassandraFailed(attempts, e); attempts--; Thread.sleep(interval); return call(); } else { logger.cannotConnectToCassandra(e); return null; } } }
private Observable.Transformer<BoundStatement, Integer> applyMicroBatching() { return tObservable -> tObservable .groupBy(b -> { ByteBuffer routingKey = b.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); Token token = metadata.newToken(routingKey); for (TokenRange tokenRange : session.getCluster().getMetadata().getTokenRanges()) { if (tokenRange.contains(token)) { return tokenRange; } } log.warn("Unable to find any Cassandra node to insert token " + token.toString()); return session.getCluster().getMetadata().getTokenRanges().iterator().next(); }) .flatMap(g -> g.compose(new BoundBatchStatementTransformer())) .flatMap(batch -> rxSession .execute(batch) .compose(applyInsertRetryPolicy()) .map(resultSet -> batch.size()) ); }
private <T extends Serializable> void doSendMessage( T body, List<String> regions ) throws IOException { createQueueIfNecessary(); ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(bos); oos.writeObject(body); oos.flush(); oos.close(); ByteBuffer byteBuffer = ByteBuffer.wrap( bos.toByteArray() ); queueMessageManager.sendMessages( scope.getName(), regions, null, // delay millis null, // expiration seconds "application/octet-stream", DataType.serializeValue( byteBuffer, ProtocolVersion.NEWEST_SUPPORTED )); }
@Override public CassandraSession newSession(CassandraSinkConnectorConfig config) { Cluster.Builder clusterBuilder = Cluster.builder() .withPort(config.port) .addContactPoints(config.contactPoints) .withProtocolVersion(ProtocolVersion.NEWEST_SUPPORTED); if (config.securityEnabled) { clusterBuilder.withCredentials(config.username, config.password); } if (config.sslEnabled) { final SslContextBuilder sslContextBuilder = SslContextBuilder.forClient(); sslContextBuilder.sslProvider(config.sslProvider); final SslContext context; try { context = sslContextBuilder.build(); } catch (SSLException e) { throw new ConnectException(e); } final SSLOptions sslOptions = new RemoteEndpointAwareNettySSLOptions(context); clusterBuilder.withSSL(sslOptions); } clusterBuilder.withCompression(config.compression); Cluster cluster = clusterBuilder.build(); log.info("Creating session"); final Session session = cluster.newSession(); return new CassandraSessionImpl(config, cluster, session); }
@Test public void testSerialize() { Address address = randomAddress(); AddressCodec codec = new AddressCodec(); ByteBuffer buffer = codec.serialize(address, ProtocolVersion.NEWEST_SUPPORTED); Address address1 = codec.deserialize(buffer, ProtocolVersion.NEWEST_SUPPORTED); Assert.assertEquals(address, address1); }
/** * @param ip String * @param port String * @param poolingOptions PoolingOptions * @return Cluster Cluster */ private static Cluster createCluster(String ip, String port, PoolingOptions poolingOptions) { return Cluster.builder().addContactPoint(ip).withPort(Integer.parseInt(port)) .withProtocolVersion(ProtocolVersion.V3).withRetryPolicy(DefaultRetryPolicy.INSTANCE) .withTimestampGenerator(new AtomicMonotonicTimestampGenerator()) .withPoolingOptions(poolingOptions).build(); }
@Before public void init() throws Exception { initCassandraFS(); Cluster cluster = Cluster.builder().addContactPoints("localhost").withProtocolVersion(ProtocolVersion.V4) .build(); session = cluster.connect(); session.execute("USE test;"); }
@Before public void init() throws Exception { String cassandraServer = System.getProperty("CassandraNode"); if (cassandraServer == null || cassandraServer.length() == 0) { cassandraServer = "localhost"; } Cluster cluster = Cluster.builder().addContactPoints(cassandraServer).withProtocolVersion(ProtocolVersion.V4) .build(); session = cluster.connect(); session.execute("USE test;"); }
@Override protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable { // Only join traces, don't start them. This prevents LocalCollector's thread from amplifying. if (brave.serverSpanThreadBinder().getCurrentServerSpan() != null && brave.serverSpanThreadBinder().getCurrentServerSpan().getSpan() != null // Only trace named statements for now, since that's what we use && method.getName().equals("executeAsync") && args[0] instanceof NamedBoundStatement) { NamedBoundStatement statement = (NamedBoundStatement) args[0]; SpanId spanId = brave.clientTracer().startNewSpan(statement.name); // o.a.c.tracing.Tracing.newSession must use the same format for the key zipkin if (version.compareTo(ProtocolVersion.V4) >= 0) { statement.enableTracing(); statement.setOutgoingPayload(singletonMap("zipkin", ByteBuffer.wrap(spanId.bytes()))); } brave.clientTracer().setClientSent(); // start the span and store it brave.clientTracer() .submitBinaryAnnotation("cql.query", statement.preparedStatement().getQueryString()); cache.put(statement, brave.clientSpanThreadBinder().getCurrentClientSpan()); // let go of the client span as it is only used for the RPC (will have no local children) brave.clientSpanThreadBinder().setCurrentSpan(null); return new BraveResultSetFuture(target.executeAsync(statement), brave); } try { return method.invoke(target, args); } catch (InvocationTargetException e) { if (e.getCause() instanceof RuntimeException) throw e.getCause(); throw e; } }
@Override protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable { // Only join traces, don't start them. This prevents LocalCollector's thread from amplifying. if (brave.serverSpanThreadBinder().getCurrentServerSpan() != null && brave.serverSpanThreadBinder().getCurrentServerSpan().getSpan() != null && method.getName().equals("executeAsync") && args[0] instanceof BoundStatement) { BoundStatement statement = (BoundStatement) args[0]; // via an internal class z.s.cassandra3.NamedBoundStatement, toString() is a nice name SpanId spanId = brave.clientTracer().startNewSpan(statement.toString()); // o.a.c.tracing.Tracing.newSession must use the same format for the key zipkin if (version.compareTo(ProtocolVersion.V4) >= 0) { statement.enableTracing(); statement.setOutgoingPayload(singletonMap("zipkin", ByteBuffer.wrap(spanId.bytes()))); } brave.clientTracer().setClientSent(); // start the span and store it brave.clientTracer() .submitBinaryAnnotation("cql.query", statement.preparedStatement().getQueryString()); cache.put(statement, brave.clientSpanThreadBinder().getCurrentClientSpan()); // let go of the client span as it is only used for the RPC (will have no local children) brave.clientSpanThreadBinder().setCurrentSpan(null); return new BraveResultSetFuture(target.executeAsync(statement), brave); } try { return method.invoke(target, args); } catch (InvocationTargetException e) { if (e.getCause() instanceof RuntimeException) throw e.getCause(); throw e; } }
public CqlDeltaIterator(Iterator<Row> iterator, final int blockIndex, final int changeIdIndex, final int contentIndex, boolean reversed, int prefixLength, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { super(iterator, reversed, prefixLength); _blockIndex = blockIndex; _changeIdIndex = changeIdIndex; _contentIndex = contentIndex; _protocolVersion = protocolVersion; _codecRegistry = codecRegistry; }
@Override public long deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { if (bytes == null || bytes.remaining() == 0) { return 0; } if (bytes.remaining() != 8) { throw new InvalidTypeException("Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); } return bytes.getLong(bytes.position()); }
private ProtocolVersion getCassandraProtocolVersion() { switch (protocolVersion.toUpperCase()) { case "V1": return ProtocolVersion.V1; case "V2": return ProtocolVersion.V2; case "V3": return ProtocolVersion.V3; default: throw new RuntimeException("Unsupported Cassandra Protocol Version."); } }
@Test public void testCassandraProtocolVersion() { TestOutputOperator outputOperator = setupForOutputOperatorTest(); outputOperator.getStore().setProtocolVersion("v2"); outputOperator.setup(context); Configuration config = outputOperator.getStore().getCluster().getConfiguration(); Assert.assertEquals("Procotol version was not set to V2.", ProtocolVersion.V2, config.getProtocolOptions().getProtocolVersion()); }
@Override public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { if (bytes == null || bytes.remaining() == 0) return null; return new String(bytes.array()); }
@Override public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { if (value == null) return null; Days days = daysBetween(EPOCH, LocalDate.parse(value)); int unsigned = fromSignedToUnsignedInt(days.getDays()); return cint().serializeNoBoxing(unsigned, protocolVersion); }
@Override public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { if (bytes == null || bytes.remaining() == 0) return null; int unsigned = cint().deserializeNoBoxing(bytes, protocolVersion); int signed = fromUnsignedToSignedInt(unsigned); return FORMATTER.print(EPOCH.plusDays(signed)); }
@Override public byte[] deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { if (bytes == null || bytes.remaining() == 0) return null; return bytes.duplicate().array(); }
@Override public Timestamp deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { if (bytes == null || bytes.remaining() == 0) return null; long millis = bigint().deserializeNoBoxing(bytes, protocolVersion); return new Timestamp(millis); }
@Override public Time deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { if (bytes == null || bytes.remaining() == 0) return null; long nanosOfDay = bigint().deserializeNoBoxing(bytes, protocolVersion); return new Time(LocalTime.fromMillisOfDay(nanosOfDay / 1000000L).toDateTimeToday().getMillis()); }
@Override public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { if (bytes == null || bytes.remaining() == 0) return null; long nanosOfDay = bigint().deserializeNoBoxing(bytes, protocolVersion); return LocalTime.fromMillisOfDay(nanosOfDay / 1000000).toString(); }
@Override public ByteBuffer serialize(Date value, ProtocolVersion protocolVersion) { if (value == null) return null; Days days = daysBetween(EPOCH, LocalDate.fromDateFields(value)); int unsigned = fromSignedToUnsignedInt(days.getDays()); return cint().serializeNoBoxing(unsigned, protocolVersion); }
@Override public Date deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { if (bytes == null || bytes.remaining() == 0) return null; int unsigned = cint().deserializeNoBoxing(bytes, protocolVersion); int signed = fromUnsignedToSignedInt(unsigned); return new Date(EPOCH.plusDays(signed).toDate().getTime()); }
@Override public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { if (value == null) { return null; } if (value.indexOf(' ') == 10 && value.indexOf('Z') < 0) { StringBuilder builder = new StringBuilder(value).append('Z'); builder.setCharAt(10, 'T'); value = builder.toString(); } return bigint().serializeNoBoxing(Instant.parse(value).getMillis(), protocolVersion); }
@Override public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { if (bytes == null || bytes.remaining() == 0) return null; long millis = bigint().deserializeNoBoxing(bytes, protocolVersion); return new Instant(millis).toString(); }
private void startServerIfNotRunning() throws IOException, TTransportException, InterruptedException { try { session = new Cluster.Builder() .addContactPoints("localhost") .withProtocolVersion(ProtocolVersion.V3) .build().connect(); } catch (NoHostAvailableException e) { String cassandraYmlLocation = findPathForCassandraYaml("./cassandra.yml"); if (null == cassandraYmlLocation || cassandraYmlLocation.isEmpty()) { cassandraYmlLocation = findPathForCassandraYaml("./secret-store-api/target/test-classes/cassandra.yml"); } if (null == cassandraYmlLocation || cassandraYmlLocation.isEmpty()) { throw new IllegalArgumentException("Could not find a cassandra.yml"); } System.setProperty("cassandra.config", "file://" + cassandraYmlLocation); EmbeddedCassandraService service = new EmbeddedCassandraService(); service.start(); session = new Cluster.Builder() .addContactPoints("localhost") .withPort(9142) .withProtocolVersion(ProtocolVersion.V3) .build().connect(); } }
@SuppressWarnings("unchecked") @BeforeClass public static void setUpClass() throws InterruptedException, TTransportException, ConfigurationException, IOException { EmbeddedCassandraServerHelper.startEmbeddedCassandra(CASSANDRA_STARTUP_TIMEOUT); cluster = Cluster.builder() .addContactPoint("127.0.0.1") .withPort(CASSANDRA_NATIVE_PORT) .withProtocolVersion(ProtocolVersion.V4) .build(); session = cluster.connect(); }
@Test public void testWriteEmptyBatch() throws InterruptedException, StageException { final String tableName = "test.trips"; List<CassandraFieldMappingConfig> fieldMappings = ImmutableList.of( new CassandraFieldMappingConfig("[0]", "driver_id"), new CassandraFieldMappingConfig("[1]", "trip_id"), new CassandraFieldMappingConfig("[2]", "time"), new CassandraFieldMappingConfig("[3]", "x"), new CassandraFieldMappingConfig("[4]", "y"), new CassandraFieldMappingConfig("[5]", "time_id"), new CassandraFieldMappingConfig("[6]", "unique_id") ); CassandraTargetConfig conf = new CassandraTargetConfig(); conf.contactPoints.add("localhost"); conf.port = CASSANDRA_NATIVE_PORT; conf.protocolVersion = ProtocolVersion.V4; conf.authProviderOption = AuthProviderOption.NONE; conf.compression = CassandraCompressionCodec.NONE; conf.columnNames = fieldMappings; conf.qualifiedTableName = tableName; Target target = new CassandraTarget(conf); TargetRunner targetRunner = new TargetRunner.Builder(CassandraDTarget.class, target).build(); List<Record> emptyBatch = ImmutableList.of(); targetRunner.runInit(); targetRunner.runWrite(emptyBatch); targetRunner.runDestroy(); }
@Test(expected = StageException.class) public void testMalformedTableName() throws Exception { List<CassandraFieldMappingConfig> fieldMappings = ImmutableList.of( new CassandraFieldMappingConfig("/driver", "driver_id"), new CassandraFieldMappingConfig("/trip", "trip_id"), new CassandraFieldMappingConfig("/time", "time"), new CassandraFieldMappingConfig("/x", "x"), new CassandraFieldMappingConfig("/y", "y"), new CassandraFieldMappingConfig("/time_id", "time_id"), new CassandraFieldMappingConfig("/unique_id", "unique_id") ); CassandraTargetConfig conf = new CassandraTargetConfig(); conf.contactPoints.add("localhost"); conf.port = CASSANDRA_NATIVE_PORT; conf.protocolVersion = ProtocolVersion.V4; conf.authProviderOption = AuthProviderOption.NONE; conf.compression = CassandraCompressionCodec.NONE; conf.columnNames = fieldMappings; conf.qualifiedTableName = "tableName"; Target target = new CassandraTarget(conf); TargetRunner targetRunner = new TargetRunner.Builder(CassandraDTarget.class, target).build(); targetRunner.runInit(); fail("should have thrown a StageException!"); }
@Override public ByteBuffer serialize(Long paramT, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramT == null) { return null; } return ByteBufferUtil.bytes(paramT); }
@Override public Long deserialize(ByteBuffer paramByteBuffer, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramByteBuffer == null) { return null; } // always duplicate the ByteBuffer instance before consuming it! return ByteBufferUtil.toLong(paramByteBuffer.duplicate()); }
@Override public ByteBuffer serialize(BigDecimal paramT, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramT == null) { return null; } return ByteBufferUtil.bytes(paramT.longValue()); }
@Override public BigDecimal deserialize(ByteBuffer paramByteBuffer, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramByteBuffer == null) { return null; } // always duplicate the ByteBuffer instance before consuming it! Long value = ByteBufferUtil.toLong(paramByteBuffer.duplicate()); return new BigDecimal(value); }
@Override public ByteBuffer serialize(Integer paramT, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramT == null) { return null; } return ByteBufferUtil.bytes(paramT); }
@Override public Integer deserialize(ByteBuffer paramByteBuffer, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramByteBuffer == null) { return null; } // always duplicate the ByteBuffer instance before consuming it! Long value = ByteBufferUtil.toLong(paramByteBuffer.duplicate()); return value.intValue(); }
@Override public ByteBuffer serialize(Double paramT, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramT == null) { return null; } return ByteBufferUtil.bytes(paramT.floatValue()); }
@Override public Double deserialize(ByteBuffer paramByteBuffer, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramByteBuffer == null) { return null; } // always duplicate the ByteBuffer instance before consuming it! Float value = ByteBufferUtil.toFloat(paramByteBuffer.duplicate()); return value.doubleValue(); }
@Override public ByteBuffer serialize(Long paramT, ProtocolVersion paramProtocolVersion) throws InvalidTypeException { if (paramT == null) { return null; } return ByteBufferUtil.bytes(paramT.intValue()); }