@Override public void start(Map<String, String> settings) { this.config = new CassandraSinkConnectorConfig(settings); this.settings = settings; if (this.config.keyspaceCreateEnabled) { KeyspaceOptions createKeyspace = SchemaBuilder.createKeyspace(this.config.keyspace) .ifNotExists() .with() .durableWrites(true) .replication(ImmutableMap.of( "class", (Object) "SimpleStrategy", "replication_factor", 3 )); try (CassandraSession session = this.sessionFactory.newSession(this.config)) { log.info("start() - Executing\n{}", createKeyspace); session.executeStatement(createKeyspace); } catch (IOException ex) { throw new ConnectException("Exception thrown while managing keyspace.", ex); } } }
public static ConfigDef config() { return new ConfigDef() .define(CONTACT_POINTS_CONFIG, ConfigDef.Type.LIST, ImmutableList.of("localhost"), ConfigDef.Importance.MEDIUM, CONTACT_POINTS_DOC) .define(PORT_CONFIG, ConfigDef.Type.INT, 9042, ValidPort.of(), ConfigDef.Importance.MEDIUM, PORT_DOC) .define(CONSISTENCY_LEVEL_CONFIG, ConfigDef.Type.STRING, ConsistencyLevel.LOCAL_QUORUM.toString(), ValidEnum.of(ConsistencyLevel.class), ConfigDef.Importance.MEDIUM, CONSISTENCY_LEVEL_DOC) .define(USERNAME_CONFIG, ConfigDef.Type.STRING, "cassandra", ConfigDef.Importance.MEDIUM, USERNAME_DOC) .define(PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, "cassandra", ConfigDef.Importance.MEDIUM, PASSWORD_DOC) .define(SECURITY_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, false, ConfigDef.Importance.MEDIUM, SECURITY_ENABLE_DOC) .define(COMPRESSION_CONFIG, ConfigDef.Type.STRING, "NONE", ConfigDef.ValidString.in(CLIENT_COMPRESSION.keySet().stream().toArray(String[]::new)), ConfigDef.Importance.MEDIUM, COMPRESSION_DOC) .define(SSL_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, false, ConfigDef.Importance.MEDIUM, SSL_ENABLED_DOC) .define(SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING, SslProvider.JDK.toString(), ValidEnum.of(SslProvider.class), ConfigDef.Importance.MEDIUM, SSL_PROVIDER_DOC) .define(DELETES_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, DELETES_ENABLE_DOC) .define(KEYSPACE_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, KEYSPACE_DOC) .define(KEYSPACE_CREATE_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.HIGH, KEYSPACE_CREATE_ENABLED_DOC) .define(TABLE_MANAGE_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, SCHEMA_MANAGE_CREATE_DOC) .define(TABLE_CREATE_COMPRESSION_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, TABLE_CREATE_COMPRESSION_ENABLED_DOC) .define(TABLE_CREATE_COMPRESSION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, "NONE", ConfigDef.ValidString.in(TABLE_COMPRESSION.keySet().stream().toArray(String[]::new)), ConfigDef.Importance.MEDIUM, TABLE_CREATE_COMPRESSION_ALGORITHM_DOC) .define(TABLE_CREATE_CACHING_CONFIG, ConfigDef.Type.STRING, SchemaBuilder.Caching.NONE.toString(), ValidEnum.of(SchemaBuilder.Caching.class), ConfigDef.Importance.MEDIUM, TABLE_CREATE_CACHING_DOC); }
/** * Build schema programmatically * <p> * DDL equivalent: * * <pre> * CREATE TABLE messages ( * sessionId uuid, * seqNo bigint, * message blob, * PRIMARY KEY (sessionId, seqNo ) ); * </pre> * * @throws StoreException if the store is not open * */ public void buildSchema() throws StoreException { if (session != null) { // Appropriate for a local test only session.execute(new SimpleStatement("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE_NAME + " WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")); System.out.format("Keyspace %s available\n", KEYSPACE_NAME); Create create = SchemaBuilder.createTable(KEYSPACE_NAME, TABLE_NAME).ifNotExists() .addPartitionKey(SESSION_ID_COLNAME, DataType.uuid()) .addClusteringColumn(SEQ_NO_COLNAME, DataType.bigint()) .addColumn(MESSAGE_COLNAME, DataType.blob()); ResultSet resultSet = session.execute(create); System.out.format("Table %s available\n", TABLE_NAME); } else { throw new StoreException("Schema not created; store not open"); } }
private static void cqlBuilders() { FieldModel model = SampleModels.wrapper(); Create create = SchemaBuilder.createTable("Field").addClusteringColumn(LOGIN.name(), text()) .addPartitionKey("snapshot_id", timeuuid()); model.getFieldInfos().stream().filter(f -> f.id() != LOGIN) .forEach(f -> create.addColumn(f.id().name(), cqlType(f))); Create.Options createWithOptions = create.withOptions().clusteringOrder(LOGIN.name(), DESC); System.out.println(createWithOptions); Insert insert = QueryBuilder.insertInto("Field"); model.stream().forEach(e -> insert.value(e.getKey().name(), e.getValue())); System.out.println(insert.getQueryString(codecRegistry())); }
private static void createTableIfNotExists(final com.datastax.driver.core.Session session, final String table, final Logger log) { Create createTable = SchemaBuilder.createTable(table) .addPartitionKey(ID, DataType.varchar()) .addColumn(CREATED_AT, DataType.timestamp()) .addColumn(ACCESSED_AT, DataType.timestamp()) .addColumn(SAVED_AT, DataType.timestamp()) .addColumn(ATTRIBUTES, DataType.map(DataType.varchar(), DataType.varchar())) .ifNotExists(); Futures.addCallback(session.executeAsync(createTable), new FutureCallback<ResultSet>() { @Override public void onSuccess(final ResultSet result) { log.debug("Session table successfully created"); } @Override public void onFailure(final Throwable x) { log.error("Create session table resulted in exception", x); } }); }
public CassandraSinkConnectorConfig(Map<?, ?> originals) { super(config(), originals); this.port = getInt(PORT_CONFIG); final List<String> contactPoints = this.getList(CONTACT_POINTS_CONFIG); this.contactPoints = contactPoints.toArray(new String[contactPoints.size()]); this.consistencyLevel = ConfigUtils.getEnum(ConsistencyLevel.class, this, CONSISTENCY_LEVEL_CONFIG); // this.compression = ConfigUtils.getEnum(ProtocolOptions.Compression.class, this, COMPRESSION_CONFIG); this.username = getString(USERNAME_CONFIG); this.password = getPassword(PASSWORD_CONFIG).value(); this.securityEnabled = getBoolean(SECURITY_ENABLE_CONFIG); this.sslEnabled = getBoolean(SSL_ENABLED_CONFIG); this.deletesEnabled = getBoolean(DELETES_ENABLE_CONFIG); final String keyspace = getString(KEYSPACE_CONFIG); if (Strings.isNullOrEmpty(keyspace)) { this.keyspace = null; } else { this.keyspace = keyspace; } final String compression = getString(COMPRESSION_CONFIG); this.compression = CLIENT_COMPRESSION.get(compression); this.sslProvider = ConfigUtils.getEnum(SslProvider.class, this, SSL_PROVIDER_CONFIG); this.keyspaceCreateEnabled = getBoolean(KEYSPACE_CREATE_ENABLED_CONFIG); this.tableManageEnabled = getBoolean(TABLE_MANAGE_ENABLED_CONFIG); this.tableCompressionEnabled = getBoolean(TABLE_CREATE_COMPRESSION_ENABLED_CONFIG); this.tableCompressionAlgorithm = ConfigUtils.getEnum(TableOptions.CompressionOptions.Algorithm.class, this, TABLE_CREATE_COMPRESSION_ALGORITHM_CONFIG); this.tableCaching = ConfigUtils.getEnum(SchemaBuilder.Caching.class, this, TABLE_CREATE_CACHING_CONFIG); }
@AfterClass public void tearDown() { if (!_runTests) { return; } _session.execute(SchemaBuilder.dropKeyspace(_keyspaceName)); _session.close(); _cluster.close(); }
private void ensureStashTokenRangeTableExists() { if (!_verifiedStashTokenRangeTableExists) { synchronized(this) { if (!_verifiedStashTokenRangeTableExists) { // Primary key is ((stash_id, data_center), placement, range_token, is_start_token). // Note that Cassandra performs unsigned byte comparison for "range_token" and sorts False before // True for "is_start_token". The latter is necessary because it sorts two tables with // adjacent UUIDs correctly, returning the exclusive "to" token for the previous table before the // inclusive "from" token for the next table. _placementCache.get(_systemTablePlacement) .getKeyspace() .getCqlSession() .execute(SchemaBuilder.createTable(STASH_TOKEN_RANGE_TABLE) .ifNotExists() .addPartitionKey(STASH_ID_COLUMN, DataType.text()) .addPartitionKey(DATA_CENTER_COLUMN, DataType.text()) .addClusteringColumn(PLACEMENT_COLUMN, DataType.text()) .addClusteringColumn(RANGE_TOKEN_COLUMN, DataType.blob()) .addClusteringColumn(IS_START_TOKEN_COLUMN, DataType.cboolean()) .addColumn(TABLE_JSON_COLUMN, DataType.text()) .withOptions() // The following cluster orders should be the defaults but for clarity let's be explicit .clusteringOrder(PLACEMENT_COLUMN, SchemaBuilder.Direction.ASC) .clusteringOrder(RANGE_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .clusteringOrder(IS_START_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .compactStorage() .defaultTimeToLive(TTL)); _verifiedStashTokenRangeTableExists = true; } } } }
public void dropSchema() throws StoreException { if (session != null) { Drop drop = SchemaBuilder.dropTable(KEYSPACE_NAME, TABLE_NAME).ifExists(); session.execute(drop); System.out.println("Schema dropped"); } else { throw new StoreException("Schema not dropped; store not open"); } }
@Test public void simpleCassandraSchema() { FieldModel model = SampleModels.wrapper(); Create createRequest = SchemaBuilder.createTable("fields_model") .addClusteringColumn(LOGIN.name(), text()) .addPartitionKey("snapshot_id", timeuuid()); model.getFieldInfos().stream() .filter(info -> info.id() != LOGIN) .forEach(info -> createRequest.addColumn(info.id().name(), cqlType(info))); Options createRequestWithOptions = createRequest.withOptions().clusteringOrder(LOGIN.name(), DESC); print(createRequestWithOptions.getQueryString()); }