/** * Cassandra {@link com.datastax.driver.core.Statement}s can be used together with {@link CassandraTemplate} and the * mapping layer. */ @Test public void insertAndSelect() { Insert insert = QueryBuilder.insertInto("users").value("user_id", 42L) // .value("uname", "heisenberg") // .value("fname", "Walter") // .value("lname", "White") // .ifNotExists(); // template.execute(insert); User user = template.selectOneById(User.class, 42L); assertThat(user.getUsername(), is(equalTo("heisenberg"))); List<User> users = template.select(QueryBuilder.select().from("users"), User.class); assertThat(users, hasSize(1)); assertThat(users.get(0), is(equalTo(user))); }
private Optional<Event> save(EventEntity entity, boolean ifNotExists) { if (entity.getId() == null) { entity.setId(UUIDs.timeBased()); } Insert insert = QueryBuilder.insertInto(getColumnFamilyName()) .value(ModelConstants.ID_PROPERTY, entity.getId()) .value(ModelConstants.EVENT_TENANT_ID_PROPERTY, entity.getTenantId()) .value(ModelConstants.EVENT_ENTITY_TYPE_PROPERTY, entity.getEntityType()) .value(ModelConstants.EVENT_ENTITY_ID_PROPERTY, entity.getEntityId()) .value(ModelConstants.EVENT_TYPE_PROPERTY, entity.getEventType()) .value(ModelConstants.EVENT_UID_PROPERTY, entity.getEventUid()) .value(ModelConstants.EVENT_BODY_PROPERTY, entity.getBody()); if (ifNotExists) { insert = insert.ifNotExists(); } ResultSet rs = executeWrite(insert); if (rs.wasApplied()) { return Optional.of(DaoUtil.getData(entity)); } else { return Optional.empty(); } }
Indexer(Session session, @Nullable Integer indexTtl, @Nullable ConcurrentMap<PartitionKeyToTraceId, Pair<Long>> sharedState, IndexSupport index) { this.index = index; this.boundName = UPPER_CAMEL.to(LOWER_HYPHEN, index.getClass().getSimpleName()); Insert insert = index.declarePartitionKey(QueryBuilder.insertInto(index.table()) .value("ts", QueryBuilder.bindMarker("ts")) .value("trace_id", QueryBuilder.bindMarker("trace_id"))); if (indexTtl != null) { insert.using(QueryBuilder.ttl(QueryBuilder.bindMarker("ttl_"))); } this.prepared = session.prepare(insert); this.indexTtl = indexTtl; this.session = session; this.timestampCodec = new TimestampCodec(session); this.sharedState = sharedState; }
@Test public void managed() throws Throwable { final CassandraRule rule = CassandraRule.newBuilder() .withManagedKeyspace() .withManagedTable(TABLE_SCHEMA) .build(); rule.before(); final Session session = rule.getSession(); Insert insert = QueryBuilder.insertInto("mytable") .value("key", KEY) .value("value", VALUE); session.execute(insert); Statement select = QueryBuilder.select() .from("mytable") .where(QueryBuilder.eq("key", KEY)) .limit(1); Row result = session.execute(select).all().get(0); assertEquals(KEY, result.getString("key")); assertEquals(VALUE, result.getBytes("value")); rule.after(); }
private static void cqlBuilders() { FieldModel model = SampleModels.wrapper(); Create create = SchemaBuilder.createTable("Field").addClusteringColumn(LOGIN.name(), text()) .addPartitionKey("snapshot_id", timeuuid()); model.getFieldInfos().stream().filter(f -> f.id() != LOGIN) .forEach(f -> create.addColumn(f.id().name(), cqlType(f))); Create.Options createWithOptions = create.withOptions().clusteringOrder(LOGIN.name(), DESC); System.out.println(createWithOptions); Insert insert = QueryBuilder.insertInto("Field"); model.stream().forEach(e -> insert.value(e.getKey().name(), e.getValue())); System.out.println(insert.getQueryString(codecRegistry())); }
@Test public void getAppliedMigrations_shouldQueryAndSortByName() throws Exception { ResultSet appliedMigrationResultSet = mock(ResultSet.class); Row row = mock(Row.class); when(session.execute(argThat(new CustomMatcher<Insert>("Get applied migrations") { @Override public boolean matches(Object item) { return "SELECT name,status FROM test.migrations;".equals(item.toString()); } }))).thenReturn(appliedMigrationResultSet); when(appliedMigrationResultSet.all()).thenReturn(Collections.singletonList(row)); when(row.getString(0)).thenReturn("001_initial_migration.cql"); when(row.getString(1)).thenReturn("APPLIED"); assertThat(client.getAppliedMigrations()).containsOnly("001_initial_migration.cql"); }
@Test public void getAppliedMigrations_shouldIgnoreFailedAndIncompleteMigrations() throws Exception { ResultSet appliedMigrationResultSet = mock(ResultSet.class); Row row1 = mock(Row.class); Row row2 = mock(Row.class); when(session.execute(argThat(new CustomMatcher<Insert>("Get applied migrations") { @Override public boolean matches(Object item) { return "SELECT name,status FROM test.migrations;".equals(item.toString()); } }))).thenReturn(appliedMigrationResultSet); when(appliedMigrationResultSet.all()).thenReturn(Arrays.asList(row1, row2)); when(row1.getString(0)).thenReturn("001_initial_migration.cql"); when(row1.getString(1)).thenReturn("APPLIED"); when(row2.getString(0)).thenReturn("002_add_stuff.cql"); when(row2.getString(1)).thenReturn("FAILED"); assertThat(client.getAppliedMigrations()).containsOnly("001_initial_migration.cql"); }
private void runBatchInsert(List<Insert> insertRequest) { try { Batch batch; if (config.getLoggedBatch()) { batch = QueryBuilder.batch(insertRequest .toArray(new RegularStatement[insertRequest.size()])); } else { batch = QueryBuilder.unloggedBatch(insertRequest .toArray(new RegularStatement[insertRequest.size()])); } totalCassandraInsertRequest.addAndGet(insertRequest.size()); ResultSetFuture future = cassandraSession.executeAsync(batch); CallBackListener listener = new CallBackListener(future, null); future.addListener(listener, pool); incrementBatchInsertCounter(); pendingRequestCounter.incrementAndGet(); } catch (Throwable ex) { LOGGER.error("Error publising metrics in MetricCassandraCollector:" + ex.getMessage()); cassandraErrorCount.increment(); registerError(ex); } finally { insertRequest.clear(); } }
private void createPreparedStatements() { Select.Where select = select() .all() .from( CassandraModel.CF_METRICS_METADATA_NAME ) .where( eq( KEY, bindMarker() )); getValue = DatastaxIO.getSession().prepare( select ); Insert insert = insertInto( CassandraModel.CF_METRICS_METADATA_NAME ) .value( KEY, bindMarker() ) .value( COLUMN1, bindMarker() ) .value( VALUE, bindMarker() ); putValue = DatastaxIO.getSession().prepare( insert ); putValue.setConsistencyLevel( ConsistencyLevel.LOCAL_ONE ); }
/** * Create all prepared statements use in this class for metrics_locator */ private void createPreparedStatements() { // create a generic select statement for retrieving from metrics_locator Select.Where select = QueryBuilder .select() .all() .from( CassandraModel.CF_METRICS_LOCATOR_NAME ) .where( eq ( KEY, bindMarker() )); getValue = DatastaxIO.getSession().prepare( select ); // create a generic insert statement for inserting into metrics_locator Insert insert = QueryBuilder.insertInto( CassandraModel.CF_METRICS_LOCATOR_NAME) .using(ttl(TenantTtlProvider.LOCATOR_TTL)) .value(KEY, bindMarker()) .value(COLUMN1, bindMarker()) .value(VALUE, bindMarker()); putValue = DatastaxIO.getSession() .prepare(insert) .setConsistencyLevel( ConsistencyLevel.LOCAL_ONE ); }
public DDelayedLocatorIO() { // create a generic select statement for retrieving from metrics_delayed_locator Select.Where select = QueryBuilder .select() .all() .from( CassandraModel.CF_METRICS_DELAYED_LOCATOR_NAME ) .where( eq ( KEY, bindMarker() )); getValue = DatastaxIO.getSession().prepare( select ); // create a generic insert statement for inserting into metrics_delayed_locator Insert insert = QueryBuilder.insertInto( CassandraModel.CF_METRICS_DELAYED_LOCATOR_NAME) .using(ttl(TenantTtlProvider.DELAYED_LOCATOR_TTL)) .value(KEY, bindMarker()) .value(COLUMN1, bindMarker()) .value(VALUE, bindMarker()); putValue = DatastaxIO.getSession() .prepare(insert) .setConsistencyLevel( ConsistencyLevel.LOCAL_ONE ); }
private Insert createInsertQuery(EntityEvent entityEvent) { Insert insert = QueryBuilder.insertInto(tableName); insert.value(ENTITY_ID, entityEvent.getEventKey().getEntityId()); insert.value(VERSION, entityEvent.getEventKey().getVersion()); insert.value(OP_ID, entityEvent.getOpId()); insert.value(OP_DATE, entityEvent.getOpDate()); insert.value(EVENT_TYPE, entityEvent.getEventType()); insert.value(STATUS, entityEvent.getStatus().name()); insert.value(AUDIT_INFO, entityEvent.getAuditInfo()); insert.value(EVENT_DATA, entityEvent.getEventData()); insert.ifNotExists(); return insert; }
@Override public void storeRefreshToken(OAuth2RefreshToken refreshToken, OAuth2Authentication authentication) { List<RegularStatement> statementList = new ArrayList<RegularStatement>(); byte[] serializedRefreshToken = SerializationUtils.serialize(refreshToken); ByteBuffer bufferedRefreshToken = ByteBuffer.wrap(serializedRefreshToken); byte[] serializedAuthentication = SerializationUtils.serialize(authentication); ByteBuffer bufferedAuthentication = ByteBuffer.wrap(serializedAuthentication); WriteOptions refreshWriteOptions = new WriteOptions(); if (refreshToken instanceof ExpiringOAuth2RefreshToken) { ExpiringOAuth2RefreshToken expiringRefreshToken = (ExpiringOAuth2RefreshToken) refreshToken; Date expiration = expiringRefreshToken.getExpiration(); if (expiration != null) { int seconds = Long.valueOf((expiration.getTime() - System.currentTimeMillis()) / 1000L).intValue(); refreshWriteOptions.setTtl(seconds); } } // Insert into RefreshToken table Insert accessInsert = CassandraTemplate.createInsertQuery(RefreshToken.TABLE, new RefreshToken(refreshToken.getValue(), bufferedRefreshToken), refreshWriteOptions, cassandraTemplate.getConverter()); statementList.add(accessInsert); // Insert into RefreshTokenAuthentication table Insert authInsert = CassandraTemplate.createInsertQuery(RefreshTokenAuthentication.TABLE, new RefreshTokenAuthentication(refreshToken.getValue(), bufferedAuthentication), refreshWriteOptions, cassandraTemplate.getConverter()); statementList.add(authInsert); Batch batch = QueryBuilder.batch(statementList.toArray(new RegularStatement[statementList.size()])); cassandraTemplate.execute(batch); }
private boolean insertTimeline(XmTimeline xmTimeline, String table) { Insert insert = QueryBuilder.insertInto(xmTimeline.getTenant(), table) .value(LOGIN_COL, xmTimeline.getLogin()) .value(USER_KEY_COL, xmTimeline.getUserKey()) .value(START_DATE_COL, xmTimeline.getStartDate()) .value(RID_COL, xmTimeline.getRid()) .value(TENANT_COL, xmTimeline.getTenant()) .value(MS_NAME_COL, xmTimeline.getMsName()) .value(OPERATION_COL, xmTimeline.getOperationName()) .value(ENTITY_ID_COL, xmTimeline.getEntityId()) .value(ENTITY_KEY_COL, xmTimeline.getEntityKey()) .value(ENTITY_TYPE_KEY_COL, xmTimeline.getEntityTypeKey()) .value(OPERATION_URL_COL, xmTimeline.getOperationUrl()) .value(HTTP_METHOD_COL, xmTimeline.getHttpMethod()) .value(REQUEST_BODY_COL, xmTimeline.getRequestBody()) .value(REQUEST_LENGTH_COL, xmTimeline.getRequestLength()) .value(RESPONSE_BODY_COL, xmTimeline.getResponseBody()) .value(RESPONSE_LENGTH_COL, xmTimeline.getResponseLength()) .value(REQUEST_HEADERS_COL, xmTimeline.getRequestHeaders()) .value(RESPONSE_HEADERS_COL, xmTimeline.getResponseHeaders()) .value(HTTP_STATUS_CODE_COL, xmTimeline.getHttpStatusCode()) .value(CHANNEL_TYPE_COL, xmTimeline.getChannelType()) .value(EXEC_TIME_COL, xmTimeline.getExecTime()); ResultSet resultSet = session.execute(insert); return resultSet.wasApplied(); }
public void save(List<Event> events) { final BatchStatement batch = new BatchStatement(); for (final Event event : events) { final Map<String, Object> parsedEvent = parse(event); if (parsedEvent.isEmpty()) { log.warn("Event {} could not be mapped", event); continue; } if (!hasPrimaryKey(parsedEvent)) { break; } final Insert insert = QueryBuilder.insertInto(table); for (final Map.Entry<String, Object> entry : parsedEvent.entrySet()) { insert.value(entry.getKey(), entry.getValue()); } if (log.isTraceEnabled()) { log.trace("Preparing insert for table {}: {}", table.getName(), insert.getQueryString()); } batch.add(insert); } if (batch.getStatements().isEmpty()) { log.warn("No event produced insert query for table {}", table.getName()); return; } batch.setConsistencyLevel(consistencyLevel); session.execute(batch); }
/** * Generate Insert CQL. */ public static Insert generateInsert(String table, String[] columns, boolean ifNotExists, Integer ttl) { Insert insert = insertInto(table); for (String column : columns) { insert = insert.value(column, bindMarker()); } if (ifNotExists) { insert = insert.ifNotExists(); } if (ttl != null) { insert.using(ttl(ttl)); } return insert; }
private void initInsertStatement() { Insert insert = generateInsert(table, getAllColumns(), false, ttl); insert = applyConsistencyLevel(insert, writeConsistencyLevel); LOGGER.debug("Generated Insert {}", insert); insertStatement = getSession().prepare(insert); }
@Test public void simpleCasandraInsert() { FieldModel model = SampleModels.wrapper(); Insert insertRequest = QueryBuilder.insertInto("fields_model"); insertRequest.value("snapshot_id", UUID.randomUUID()); insertRequest.values( model.stream().map(e -> e.getKey().name()).collect(toList()), model.stream().map(Entry::getValue).collect(toList())); print(insertRequest.getQueryString(codecRegistry())); }
@Override public Statement map(List<String> keys, Number value) { Insert statement = QueryBuilder.insertInto(KEYSPACE_NAME, TABLE_NAME); statement.value(WORD_KEY_NAME, keys.get(0)); statement.value(SOURCE_KEY_NAME, keys.get(1)); statement.value(VALUE_NAME, value); return statement; }
@Test public void acquireLock_shouldReturnTrueIfInsertSucceeds() throws Exception { ResultSet leaseResultSet = mock(ResultSet.class); when(session.execute(argThat(new CustomMatcher<Insert>("Get lease insert statement") { @Override public boolean matches(Object item) { String cql = item.toString(); return cql.startsWith("INSERT INTO test.leases") && cql.contains("VALUES ('migration',"); } }))).thenReturn(leaseResultSet); when(leaseResultSet.wasApplied()).thenReturn(true); assertThat(client.acquireLock()).isTrue(); }
@Test public void acquireLock_shouldReturnFalseIfInsertFails() throws Exception { ResultSet leaseResultSet = mock(ResultSet.class); when(session.execute(argThat(new CustomMatcher<Insert>("Get lease insert statement") { @Override public boolean matches(Object item) { String cql = item.toString(); return cql.startsWith("INSERT INTO test.leases") && cql.contains("VALUES ('migration',"); } }))).thenReturn(leaseResultSet); when(leaseResultSet.wasApplied()).thenReturn(false); assertThat(client.acquireLock()).isFalse(); }
@Override public void migrate(Session session) throws Exception { Insert insert = QueryBuilder.insertInto("test1"); insert.value("space", "web"); insert.value("key", "google"); insert.value("value", "google.com"); session.execute(insert); }
@Override public void migrate(Session session) throws Exception { Insert insert = QueryBuilder.insertInto("test1"); insert.value("space", "web"); insert.value("key", "facebook"); insert.value("value", "facebook.com"); session.execute(insert); }
private static ListenableFuture<Statement> toInsertStatementAsync(WriteQueryData data, ExecutionSpec executionSpec, UDTValueMapper udtValueMapper, DBSession dbSession) { final Insert insert = (data.getTablename().getKeyspacename() == null) ? insertInto(data.getTablename().getTablename()) : insertInto(data.getTablename().getKeyspacename(), data.getTablename().getTablename()); final List<Object> values = Lists.newArrayList(); for(Entry<String, Optional<Object>> entry : data.getValuesToMutate().entrySet()) { insert.value(entry.getKey(), bindMarker()); values.add(udtValueMapper.toStatementValue(data.getTablename(), entry.getKey(), entry.getValue().orNull())); } if (data.getIfNotExits() != null) { insert.ifNotExists(); if (executionSpec.getSerialConsistencyLevel() != null) { insert.setSerialConsistencyLevel(executionSpec.getSerialConsistencyLevel()); } } if (executionSpec.getTtl() != null) { insert.using(ttl(bindMarker())); values.add((Integer) executionSpec.getTtl()); } final ListenableFuture<PreparedStatement> preparedStatementFuture = dbSession.prepareAsync(insert); return dbSession.bindAsync(preparedStatementFuture, values.toArray()); }
public static <W> void doCql3SaveToCassandra(RDD<W> rdd, ICassandraDeepJobConfig<W> writeConfig, Function1<W, Tuple2<Cells, Cells>> transformer) { if (!writeConfig.getIsWriteConfig()) { throw new IllegalArgumentException("Provided configuration object is not suitable for writing"); } Tuple2<Map<String, ByteBuffer>, Map<String, ByteBuffer>> tuple = new Tuple2<>(null, null); RDD<Tuple2<Cells, Cells>> mappedRDD = rdd.map(transformer, ClassTag$.MODULE$.<Tuple2<Cells, Cells>>apply(tuple.getClass())); ((CassandraDeepJobConfig) writeConfig).createOutputTableIfNeeded(mappedRDD.first()); final int pageSize = writeConfig.getBatchSize(); int offset = 0; List<Tuple2<Cells, Cells>> elements = Arrays.asList((Tuple2<Cells, Cells>[]) mappedRDD.collect()); List<Tuple2<Cells, Cells>> split; do { split = elements.subList(pageSize * (offset++), Math.min(pageSize * offset, elements.size())); Batch batch = QueryBuilder.batch(); for (Tuple2<Cells, Cells> t : split) { Tuple2<String[], Object[]> bindVars = Utils.prepareTuple4CqlDriver(t); Insert insert = QueryBuilder .insertInto(quote(writeConfig.getKeyspace()), quote(writeConfig.getTable())) .values(bindVars._1(), bindVars._2()); batch.add(insert); } writeConfig.getSession().execute(batch); } while (!split.isEmpty() && split.size() == pageSize); }
@Override public void unlockBadge(long playerId, UUID badgeId) { Insert insertStatement = QueryBuilder.insertInto(session.getLoggedKeyspace(), "player_badges") .value("player_id", playerId) .value("badge_id", badgeId) .value("unlocked", new Date()); session.execute(insertStatement); }
public Insert createInsertStatement(String streamName, List<ColumnNameTypeValue> columns, String timestampColumnName) { Insert insert = QueryBuilder.insertInto(addQuotes(STREAMING.STREAMING_KEYSPACE_NAME), addQuotes(streamName)); for (ColumnNameTypeValue column : columns) { insert.value(addQuotes(column.getColumn()), column.getValue()); } insert.value(addQuotes(timestampColumnName), UUIDs.timeBased()); return insert; }
@Test public void testCreateInsertStatement() throws Exception { Insert insert= service.createInsertStatement(TABLE, columns, "timestamp"); assertEquals("Expected keyspace not found", "\"" + STREAMING.STREAMING_KEYSPACE_NAME + "\"", insert.getKeyspace()); }
public void save(final List<Event> events) { final BatchStatement batch = new BatchStatement(); for (final Event event : events) { final Map<String, Object> parsedEvent = parse(event); if (parsedEvent.isEmpty()) { log.warn("Event {} could not be mapped. Suggestion: Cassandra is case sensitive, so maybe you can check field names.", event); continue; } if (!hasPrimaryKey(parsedEvent)) { break; } final Insert insert = QueryBuilder.insertInto(table); for (final Map.Entry<String, Object> entry : parsedEvent.entrySet()) { insert.value(entry.getKey(), entry.getValue()); } if (log.isTraceEnabled()) { log.trace("Preparing insert for table {}: {}", table.getName(), insert.getQueryString()); } batch.add(insert); } if (batch.getStatements().isEmpty()) { log.warn("No event produced insert query for table {}", table.getName()); return; } batch.setConsistencyLevel(consistencyLevel); session.execute(batch); }
private static String insertSQL(final String table, final int timeout) { Insert insertInto = insertInto(table) .value(ID, raw("?")) .value(CREATED_AT, raw("?")) .value(ACCESSED_AT, raw("?")) .value(SAVED_AT, raw("?")) .value(ATTRIBUTES, raw("?")); if (timeout > 0) { insertInto.using(ttl(timeout)); } return insertInto.getQueryString(); }
private void apply(SchemaUpdatingScript script) throws IOException { if (isNotApplied(script)) { LOG.info("Applying script " + script); session.execute(script.readCQLContents()); Insert insert = QueryBuilder.insertInto(TABLE_NAME) .value("key", script.name) .value("executed", new Date()); session.execute(insert); LOG.debug("Script " + script + " successfully applied."); } }
private RegularStatement maybeUseTtl(Insert value) { return indexTtl == null ? value : value.using(QueryBuilder.ttl(QueryBuilder.bindMarker("ttl_"))); }
@Override public Insert declarePartitionKey(Insert insert) { return insert.value("service_name", QueryBuilder.bindMarker("service_name")) .value("bucket", QueryBuilder.bindMarker("bucket")); }
@Override public Insert declarePartitionKey(Insert insert) { return insert.value("service_span_name", QueryBuilder.bindMarker("service_span_name")); }
@Override public Insert declarePartitionKey(Insert insert) { return insert.value("annotation", QueryBuilder.bindMarker("annotation")) .value("bucket", QueryBuilder.bindMarker("bucket")); }
protected void initInsertStatement() { Insert insert = generateInsert(table, pkColumns, true, ttl); insert = applyConsistencyLevel(insert, writeConsistencyLevel); LOGGER.debug("Generated Insert {}", insert); insertStatement = getSession().prepare(insert); }
@Override public <T> T create(T object, ConsistencyLevel consistency) { checkNotNull(object, "object argument"); checkNotNull(consistency, "consistency argument"); checkState(m_isOpen, format("%s is closed", getClass().getSimpleName())); checkArgument( object.getClass().isAnnotationPresent(ENTITY), format("%s not annotated with @%s", getClass().getSimpleName(), ENTITY.getCanonicalName())); Schema schema = getSchema(object); checkArgument( schema.getID().getValue(object) == null, format("property annotated with @%s must be null", ID.getCanonicalName())); // Object persistence (incl. indices) UUID id = UUID.randomUUID(); Batch batch = batch(); Insert insertStatement = insertInto(schema.getTableName()).value(schema.getID().getName(), id); for (ColumnSpec colSpec : schema.getColumns()) { insertStatement.value(colSpec.getName(), colSpec.getValue(object)); if (colSpec.isIndexed()) { String tableName = indexTableName(schema.getTableName(), colSpec.getName()); batch.add( insertInto(tableName) .value(colSpec.getName(), colSpec.getValue(object)) .value(joinColumnName(schema.getTableName()), id) ); } } batch.add(insertStatement); // One-to-Many relationship persistence for (OneToManySpec relationSpec : schema.getOneToManys()) { Schema s = relationSpec.getSchema(); Object relations = relationSpec.getValue(object); if (relations == null) { continue; } for (Object item : (Collection<?>) relations) { UUID relationID = (UUID) s.getID().getValue(item); if (relationID == null) { throw new IllegalStateException( "encountered relation with null ID property (entity not persisted?)"); } String joinTable = joinTableName(schema.getTableName(), s.getTableName()); batch.add( insertInto(joinTable) .value(joinColumnName(schema.getTableName()), id) .value(joinColumnName(s.getTableName()), relationID) ); } } executeStatement(batch, consistency); schema.getID().setValue(object, id); cacheInstance(object); return object; }
/** * Insert into cassandra. * * @param entityId entity id * @param entityKey entity key * @param tenant tenant name * @return the result of insert */ public boolean insertKeyById(Long entityId, String entityKey, String tenant) { Insert insert = QueryBuilder.insertInto(tenant, TABLE_KEY_ID).value(ENTITY_ID_COL, entityId) .value(ENTITY_KEY_COL, entityKey); ResultSet resultSet = session.execute(insert); return resultSet.wasApplied(); }
Insert declarePartitionKey(Insert insert);