private void connect(String seeds) { if (getWithSSL()) { LOGGER.info("SSL mode enabled"); try { SSLOptions sslOptions = new SSLOptions(SSLContext.getDefault(), CIPHERS); builder = Cluster.builder().withSSL(sslOptions); } catch (NoSuchAlgorithmException e) { LOGGER.error("Unable to setup SSL Options for Cassandra"); } } String[] contactPoints = seeds.split(","); for (String contactPoint : contactPoints) { LOGGER.info("Adding Cassandra contact point " + contactPoint); builder.addContactPoints(contactPoint); } cluster = builder.build(); Metadata metadata = cluster.getMetadata(); for (Host host : metadata.getAllHosts()) { LOGGER.info("Datacenter "+ host.getDatacenter() + "Host " + host.getAddress() + "Rack " + host.getRack()); session = cluster.connect(); } }
@Override public ResultSet<CassandraDBContext> execute(Query<CassandraDBContext> query) throws QueryExecutionException { try (Cluster cassandraConnection = buildConnection()) { final Metadata metadata = cassandraConnection.getMetadata(); System.out.printf("Connected to cluster: %s", metadata.getClusterName()); for (final Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s", host.getDatacenter(), host.getAddress(), host.getRack()); } try (Session session = cassandraConnection.connect()) { String queryToExecute = query.getQuery(); System.out.println(queryToExecute); com.datastax.driver.core.ResultSet resultSet = session.execute(queryToExecute); printResultSet(resultSet); ExecutionInfo executionInfo = resultSet.getExecutionInfo(); System.out.println(executionInfo); } } // There isn't any resultset for these use-case return new CassandraResultSet(); }
private void connectToMultipleAddresses(String address) { PoolingOptions poolingOptions = new PoolingOptions() .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); String[] music_hosts = address.split(","); if (cluster == null) { logger.debug("Initializing MUSIC Client with endpoints "+address); cluster = Cluster.builder() .withPort(9042) .withPoolingOptions(poolingOptions) .withoutMetrics() .addContactPoints(music_hosts) .build(); Metadata metadata = cluster.getMetadata(); logger.debug("Connected to cluster:"+metadata.getClusterName()+" at address:"+address); } session = cluster.connect(); }
@SuppressWarnings("unused") private void connectToCassaCluster(String address) { PoolingOptions poolingOptions = new PoolingOptions() .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); Iterator<String> it = getAllPossibleLocalIps().iterator(); logger.debug("Iterating through possible ips:"+getAllPossibleLocalIps()); while (it.hasNext()) { try { cluster = Cluster.builder() .withPort(9042) .withPoolingOptions(poolingOptions) .withoutMetrics() .addContactPoint(address) .build(); //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE); Metadata metadata = cluster.getMetadata(); logger.debug("Connected to cluster:"+metadata.getClusterName()+" at address:"+address); session = cluster.connect(); break; } catch (NoHostAvailableException e) { address = it.next(); } } }
private void connectToCassaCluster(){ Iterator<String> it = getAllPossibleLocalIps().iterator(); String address= "localhost"; logger.debug("Connecting to cassa cluster: Iterating through possible ips:"+getAllPossibleLocalIps()); while(it.hasNext()){ try { cluster = Cluster.builder().withPort(9042).addContactPoint(address).build(); //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE); Metadata metadata = cluster.getMetadata(); logger.debug("Connected to cassa cluster "+metadata.getClusterName()+" at "+address); /* for ( Host host : metadata.getAllHosts() ) { .out.printf("Datacenter: %s; Host broadcast: %s; Rack: %s\n", host.getDatacenter(), host.getBroadcastAddress(), host.getRack()); }*/ session = cluster.connect(); break; } catch (NoHostAvailableException e) { address= it.next(); } } }
private void connectToCassaCluster(){ Iterator<String> it = getAllPossibleLocalIps().iterator(); String address= "localhost"; // logger.debug("Connecting to cassa cluster: Iterating through possible ips:"+getAllPossibleLocalIps()); while(it.hasNext()){ try { cluster = Cluster.builder().withPort(9042).addContactPoint(address).build(); //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE); Metadata metadata = cluster.getMetadata(); // logger.debug("Connected to cassa cluster "+metadata.getClusterName()+" at "+address); /* for ( Host host : metadata.getAllHosts() ) { System.out.printf("Datacenter: %s; Host broadcast: %s; Rack: %s\n", host.getDatacenter(), host.getBroadcastAddress(), host.getRack()); }*/ session = cluster.connect(); break; } catch (NoHostAvailableException e) { address= it.next(); } } }
@Test public void testClusterHintsPollerWhenNodeDown() throws UnknownHostException { ClusterHintsPoller clusterHintsPoller = new ClusterHintsPoller(); Session mockSession = mock(Session.class); Cluster mockCluster = mock(Cluster.class); Metadata mockMetadata = mock(Metadata.class); when(mockCluster.getMetadata()).thenReturn(mockMetadata); when(mockCluster.getClusterName()).thenReturn("test-cluster"); Host node1 = mock(Host.class); when(node1.getAddress()).thenReturn(InetAddress.getByName("127.0.0.1")); Host node2 = mock(Host.class); when(node2.getAddress()).thenReturn(InetAddress.getByName("127.0.0.2")); Host node3 = mock(Host.class); when(node3.getAddress()).thenReturn(InetAddress.getByName("127.0.0.3")); when(mockSession.getCluster()).thenReturn(mockCluster); // The first node queried is down when(mockSession.execute(any(Statement.class))).thenThrow(new NoHostAvailableException(ImmutableMap.<InetSocketAddress, Throwable>of())); when(mockMetadata.getAllHosts()).thenReturn(ImmutableSet.of(node1, node2, node3)); HintsPollerResult actualResult = clusterHintsPoller.getOldestHintsInfo(mockSession); // Make sure HintsPollerResult fails assertFalse(actualResult.areAllHostsPolling(), "Result should show hosts failing"); assertEquals(actualResult.getHostFailure(), ImmutableSet.of(InetAddress.getByName("127.0.0.1")), "Node 1 should return with host failure"); }
/** * Ensures that the Mock Cassandra instance is up and running. Will reinit * the database every time it is called. * * @param cassandraKeyspace Cassandra keyspace to setup. * @return A cluster object. * @throws ConfigurationException * @throws IOException * @throws InterruptedException * @throws TTransportException */ public static Cluster ensureMockCassandraRunningAndEstablished(String cassandraKeyspace) throws ConfigurationException, IOException, InterruptedException, TTransportException { Cluster cluster; long timeout = 60000; EmbeddedCassandraServerHelper.startEmbeddedCassandra(timeout); cluster = Cluster.builder().addContactPoints("127.0.0.1").withPort(9142).build(); //Thread.sleep(20000);//time to let cassandra startup final Metadata metadata = cluster.getMetadata(); Session session = cluster.connect(); Utils.initDatabase(DB_CQL, session); session = cluster.connect(cassandraKeyspace); logger.info("Connected to cluster: " + metadata.getClusterName() + '\n'); return cluster; }
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException { Map<String, UUID> cfs = parseOverrides(cfidOverrides); Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port); Cluster cluster = builder.build(); Metadata metadata = cluster.getMetadata(); IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner()); if (DatabaseDescriptor.getPartitioner() == null) DatabaseDescriptor.setPartitionerUnsafe(partitioner); for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) { if (!ksm.getName().equals("system")) { for (TableMetadata tm : ksm.getTables()) { String name = ksm.getName()+"."+tm.getName(); try { CassandraUtils.tableFromCQL( new ByteArrayInputStream(tm.asCQLQuery().getBytes()), cfs.get(name) != null ? cfs.get(name) : tm.getId()); } catch(SyntaxException e) { // ignore tables that we cant parse (probably dse) logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage()); } } } } return cluster; }
/** * Constructor * * @param nodes a list of one or more Cassandra nodes to connect to. Note * that not all Cassandra nodes in the cluster need be * supplied; one will suffice however if that node is * unavailable the connection attempt will fail, even if the * others are available. */ public StoreConnection(List<String> nodes) { Cluster.Builder builder = Cluster.builder(); for (String node : nodes) { builder.addContactPoint(node); } cluster = builder.build(); Metadata metadata = cluster.getMetadata(); System.out.printf("Connected to cluster: %s%n", metadata.getClusterName()); for (Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s%n", host.getDatacenter(), host.getAddress(), host.getRack()); } session = cluster.connect(); }
public SmartThriftClient(StressSettings settings, String keyspace, Metadata metadata) { this.metadata = metadata; this.keyspace = keyspace; this.settings = settings; if (!settings.node.isWhiteList) { whiteset = null; whitelist = null; } else { whiteset = settings.node.resolveAllSpecified(); whitelist = Arrays.asList(whiteset.toArray(new InetAddress[0])); } }
@Test @ConditionalIgnoreRule.IgnoreIf(condition = NoRunningCassandraDbForTests.class) public void connectsToTheCassandraCluster() throws Exception { Cluster cluster = Cluster.builder() .addContactPoint(new NoRunningCassandraDbForTests().getHost()) .withPort(9042) .withClusterName("BluePrint") .build(); Metadata metadata = cluster.getMetadata(); LOG.info("Clustername:" + metadata.getClusterName()); LOG.info("Partitioner:" + metadata.getPartitioner()); LOG.info("Hosts:" + metadata.getAllHosts()); LOG.info("KeySpaces:" + metadata.getKeyspaces()); Session session = cluster.connect("system");//system keyspace should always be present assertNotNull(session); session.close(); }
public List<Column> getSchema(String keySpace, String tableName) { Metadata m = session.getCluster().getMetadata(); KeyspaceMetadata km = m.getKeyspace(keySpace); if (km == null) return null; TableMetadata tm = km.getTable(tableName); if (tm == null) return null; // build schema List<Column> columns = new LinkedList<Column>(); for (ColumnMetadata cm : tm.getColumns()) { if (!meta.contains(cm.getName())) columns.add(Column.newBuilder().setName(cm.getName()) .setType(toSimbaType(cm.getType().toString())).build()); } return columns; }
private void fetchKeys() { // get CF meta data TableMetadata tableMetadata = session.getCluster() .getMetadata() .getKeyspace(Metadata.quote(keyspace)) .getTable(Metadata.quote(cfName)); if (tableMetadata == null) { throw new RuntimeException("No table metadata found for " + keyspace + "." + cfName); } //Here we assume that tableMetadata.getPartitionKey() always //returns the list of columns in order of component_index for (ColumnMetadata partitionKey : tableMetadata.getPartitionKey()) { partitionKeys.add(partitionKey.getName()); } }
@Provides @Singleton Cluster provideCluster() { try { Cluster cluster = Cluster.builder() .addContactPointsWithPorts(Arrays.asList( // new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042) // mvn cassandra:run + nodetool enablebinary new InetSocketAddress(InetAddress.getByName("localhost"), 9142) // cassandraunit )) .withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE) .withReconnectionPolicy(new ExponentialReconnectionPolicy(100L, 5000L)) .build(); Metadata metadata = cluster.getMetadata(); LOGGER.info("Connected to cluster: '{}'", metadata.getClusterName()); metadata.getAllHosts() .forEach(host -> LOGGER.info("Datacenter: '{}'; Host: '{}'; Rack: '{}'", new Object[] { host.getDatacenter(), host.getAddress(), host.getRack() }) ); return cluster; } catch (UnknownHostException e) { LOGGER.error("Can't connect to Cassandra", e); return null; } }
public void logCluster(Cluster cluster) { try { if (cluster != null && !cluster.isClosed()) { String clusterName = cluster.getClusterName(); Metadata metadata = cluster.getMetadata(); Set<Host> allHosts = metadata.getAllHosts(); StringBuilder b = new StringBuilder("\nCassandra Cluster '" + clusterName + "' details (via native client driver) are :"); for (Host host : allHosts) { b.append(ClusterProbe.prettyHost(host)); } LOG.info(b.toString()); } else { LOG.warn("Null or closed cluster"); } } catch (Throwable t) { } }
@Test(groups = {"system"}) public void testMetadata() throws Exception { Metadata metadata = cluster.getMetadata(); assertTrue(metadata.getClusterName().length() > 0); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Connected to cluster: %s\n", metadata.getClusterName())); } assertTrue(metadata.getAllHosts().size() > 0); for (Host host : metadata.getAllHosts()) { assertTrue(host.getDatacenter().length() > 0); assertNotNull(host.getAddress()); assertTrue(host.getRack().length() > 0); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack())); } } }
private void createKeySpace() { try (Session session = cluster.connect()) { // session without an associated keyspace Metadata clusterMetadata = cluster.getMetadata(); boolean keyspaceExists = clusterMetadata.getKeyspace(configuration.keyspace()) != null; if (!keyspaceExists) { log.debug("Creating a keyspace " + configuration.keyspace()); session.execute("CREATE KEYSPACE IF NOT EXISTS " + configuration.keyspace() + " WITH replication = " + configuration.replicationStrategy() + ";"); } boolean entryTableExists = clusterMetadata.getKeyspace(configuration.keyspace()) .getTable(configuration.entryTable()) != null; if (!entryTableExists) { log.debug("Creating an entry table " + configuration.entryTable()); session.execute("CREATE TABLE " + configuration.keyspace() + "." + configuration.entryTable() + " (" + "key blob PRIMARY KEY," + "value blob," + "metadata blob) WITH COMPRESSION = " + configuration.compression() + ";"); } } catch (Exception e) { throw log.errorCreatingKeyspace(e); } }
/** * Performs an analysis of the given keyspace in a Cassandra cluster * {@link Cluster} instance and detects the cassandra types structure based * on the metadata provided by the datastax cassandra java client. * * @see #detectTable(TableMetadata) * * @param cluster * the cluster to inspect * @param keyspaceName * @return a mutable schema instance, useful for further fine tuning by the * user. */ public static SimpleTableDef[] detectSchema(Cluster cluster, String keyspaceName) { final Metadata metadata = cluster.getMetadata(); final KeyspaceMetadata keyspace = metadata.getKeyspace(keyspaceName); if (keyspace == null) { throw new IllegalArgumentException("Keyspace '" + keyspaceName + "' does not exist in the database"); } final Collection<TableMetadata> tables = keyspace.getTables(); final SimpleTableDef[] result = new SimpleTableDef[tables.size()]; int i = 0; for (final TableMetadata tableMetaData : tables) { final SimpleTableDef table = detectTable(tableMetaData); result[i] = table; i++; } return result; }
@Before public void setUp() throws Exception { Metadata metadata = cluster.getMetadata(); System.out.printf("Connected to cluster: %s\n", metadata.getClusterName()); for (Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()); } session = cluster.connect(); repository.createKeyspace(); repository.createAoisTable(); repository.createAclsTable(); repository.createChilrenTable(); SecurityContextHolder.getContext().setAuthentication( new UsernamePasswordAuthenticationToken(sid1, "password", Arrays.asList(new SimpleGrantedAuthority[] { new SimpleGrantedAuthority( ROLE_ADMIN) }))); }
@Before public void setUp() throws Exception { Metadata metadata = cluster.getMetadata(); System.out.printf("Connected to cluster: %s\n", metadata.getClusterName()); for (Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()); } session = cluster.connect(); service.createKeyspace(); service.createAoisTable(); service.createAclsTable(); service.createChilrenTable(); SecurityContextHolder.getContext().setAuthentication( new UsernamePasswordAuthenticationToken(sid1, "password", Arrays.asList(new SimpleGrantedAuthority[] { new SimpleGrantedAuthority( ROLE_ADMIN) }))); }
@Before public void setUp() throws Exception { Metadata metadata = cluster.getMetadata(); System.out.printf("Connected to cluster: %s\n", metadata.getClusterName()); for (Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()); } session = cluster.connect(); service.createKeyspace(); service.createAoisTable(); service.createAclsTable(); service.createChilrenTable(); SecurityContextHolder.getContext().setAuthentication( new UsernamePasswordAuthenticationToken(sid1, "password", Arrays.asList(new SimpleGrantedAuthority[] { new SimpleGrantedAuthority( "ROLE_USER") }))); }
private static Cluster getCluster(){ if(cluster==null){ synchronized (SessionManager.class) { if(cluster==null){ PoolingOptions poolingOptions = new PoolingOptions(); poolingOptions .setMaxRequestsPerConnection(HostDistance.REMOTE, max) .setMaxRequestsPerConnection(HostDistance.LOCAL,max) .setMaxQueueSize(max*10) .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) .setMaxConnectionsPerHost( HostDistance.LOCAL, 2) .setCoreConnectionsPerHost(HostDistance.REMOTE, 1) .setMaxConnectionsPerHost( HostDistance.REMOTE, 2); SocketOptions socketOptions = new SocketOptions(); socketOptions.setConnectTimeoutMillis(60000); socketOptions.setReadTimeoutMillis(60000); cluster = Cluster.builder().addContactPoint(url).withPoolingOptions(poolingOptions).withSocketOptions(socketOptions).build(); Metadata metadata = cluster.getMetadata(); Set<Host> allHosts = metadata.getAllHosts(); for(Host host:allHosts){ System.out.println("host:"+host.getAddress()); } } } } return cluster; }
private void connectToCassaCluster(String address){ cluster = Cluster.builder().withPort(9042).addContactPoint(address).build(); Metadata metadata = cluster.getMetadata(); logger.debug("Connected to cassa cluster "+metadata.getClusterName()+" at "+address); /* for ( Host host : metadata.getAllHosts() ) { System.out.printf("Datacenter: %s; Host broadcast: %s; Rack: %s\n", host.getDatacenter(), host.getBroadcastAddress(), host.getRack()); }*/ session = cluster.connect(); }
public ArrayList<String> getAllNodePublicIps(){ Metadata metadata = cluster.getMetadata(); ArrayList<String> nodePublicIps = new ArrayList<String>(); for ( Host host : metadata.getAllHosts() ) { nodePublicIps.add(host.getBroadcastAddress().getHostAddress()); } return nodePublicIps; }
private void connectToCassaCluster(String address){ cluster = Cluster.builder().withPort(9042).addContactPoint(address).build(); Metadata metadata = cluster.getMetadata(); /* for ( Host host : metadata.getAllHosts() ) { System.out.printf("Datacenter: %s; Host broadcast: %s; Rack: %s\n", host.getDatacenter(), host.getBroadcastAddress(), host.getRack()); }*/ session = cluster.connect(); }
public void connect(String node) { cassandraCluster = Cluster.builder().addContactPoint(node).build(); Metadata metadata = cassandraCluster.getMetadata(); System.out.printf("Connected to cluster: %s\n", metadata.getClusterName()); for(Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()); } }
private Cluster createClusterMock() { Metadata metadata = Mockito.mock(Metadata.class); Mockito.when(metadata.getPartitioner()).thenReturn(MURMUR3_PARTITIONER); Cluster cluster = Mockito.mock(Cluster.class); Mockito.when(cluster.getMetadata()).thenReturn(metadata); return cluster; }
/** * Private constructor as this is a singleton object */ private Fixtures(String seedsList, boolean mockCassandra) throws Exception { cassandraKeyspace = "docussandra"; cassandraSeeds = seedsList.split(","); Cluster cluster; if (mockCassandra)//using cassandra-unit for testing { cluster = Fixtures.ensureMockCassandraRunningAndEstablished(cassandraKeyspace); } else if (seedsList.startsWith("172.17.")) { cluster = Fixtures.ensureDockerCassandraRunningAndEstablished(cassandraKeyspace, cassandraSeeds[0]); } else //using a remote or local server for testing { cluster = Cluster.builder().addContactPoints(cassandraSeeds).build(); } final Metadata metadata = cluster.getMetadata(); session = cluster.connect(this.getCassandraKeyspace()); logger.info("Connected to cluster: " + metadata.getClusterName() + '\n'); indexRepo = new IndexRepositoryImpl(session); cleanUpInstance = new ITableRepositoryImpl(getSession()); databaseRepo = new DatabaseRepositoryImpl(getSession()); docRepo = new DocumentRepositoryImpl(getSession()); tableRepo = new TableRepositoryImpl(getSession()); indexStatusRepo = new IndexStatusRepositoryImpl(getSession()); //set up bus just like rest express would EventBus bus = new LocalEventBusBuilder() .subscribe(new IndexCreatedHandler(indexRepo, indexStatusRepo, docRepo)) .build(); DomainEvents.addBus("local", bus); }
/** * Ensures that the Docker Cassandra instance is up and running. Will reinit * the database every time it is called. * * @param cassandraKeyspace Cassandra keyspace to setup. * @return A cluster object. * @throws ConfigurationException * @throws IOException * @throws InterruptedException * @throws TTransportException */ public static Cluster ensureDockerCassandraRunningAndEstablished(String cassandraKeyspace, String seed) throws ConfigurationException, IOException, InterruptedException, TTransportException { Cluster cluster = Cluster.builder().addContactPoints(seed).withPort(9042).build(); //Thread.sleep(20000);//time to let cassandra startup final Metadata metadata = cluster.getMetadata(); Session session = cluster.connect(); Utils.initDatabase(DB_CQL, session); session = cluster.connect(cassandraKeyspace); logger.info("Connected to cluster: " + metadata.getClusterName() + '\n'); return cluster; }
public void initialize(String node) { cluster = Cluster.builder().addContactPoint(node).build(); final Metadata metadata = cluster.getMetadata(); log.info("Connected to cluster: {}", metadata.getClusterName()); metadata.getAllHosts().stream(). forEach((host) -> { log.info("Datacenter: {}; Host: {}; Rack: {}", host.getDatacenter(), host.getAddress(), host.getRack()); }); }
public CassandraConn(String node, String keyspace, String username, String password) { PoolingOptions pools = new PoolingOptions(); pools.setMaxSimultaneousRequestsPerConnectionThreshold(HostDistance.LOCAL, maxRequestPerConnection); pools.setMaxSimultaneousRequestsPerConnectionThreshold(HostDistance.LOCAL, minRequestPerConnection); pools.setCoreConnectionsPerHost(HostDistance.LOCAL, coreConnectionLocalPerHost); pools.setMaxConnectionsPerHost(HostDistance.LOCAL, maxConnectionLocalPerHost); pools.setCoreConnectionsPerHost(HostDistance.REMOTE, coreConnectionRemotePerHost); pools.setMaxConnectionsPerHost(HostDistance.REMOTE, maxConnectionRemotePerHost); cluster = Cluster.builder() .addContactPoint(node) .withPoolingOptions(pools) .withCredentials(username, password) .withSocketOptions(new SocketOptions().setTcpNoDelay(true)) .build(); Metadata metadata = cluster.getMetadata(); System.out.printf("Connected to cluster: %s\n", metadata.getClusterName()); for ( Host host : metadata.getAllHosts() ) { System.out.printf("Datatacenter: %s; Host: %s; Rack: %s\n", host.getDatacenter(), host.getAddress(), host.getRack()); } session = cluster.connect(keyspace); }
public List<String> getTableNames(String keySpace) { Metadata clusterMetadata = cluster.getMetadata(); KeyspaceMetadata keyspaceMetadata = clusterMetadata.getKeyspace(keySpace); if (keyspaceMetadata == null) { return ImmutableList.of(); } return keyspaceMetadata.getTables().stream() .map(TableMetadata::getName) .collect(toList()); }
private void initialize() { Session session = cluster.connect(); Metadata metadata = cluster.getMetadata(); KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(KEYSPACE); if (keyspaceMetadata == null) { session.execute("CREATE KEYSPACE " + KEYSPACE + " WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true;"); } session = cluster.connect(KEYSPACE); metadata = cluster.getMetadata(); keyspaceMetadata = metadata.getKeyspace(KEYSPACE); TableMetadata dataTableMetadata = keyspaceMetadata.getTable(DATA_TABLE); if (dataTableMetadata == null) { session.execute("CREATE TABLE " + KEYSPACE + "." + DATA_TABLE + " (" + DATA_TABLE_ATTRIBUTE_1 + " ascii," + DATA_TABLE_ATTRIBUTE_2 + " timeuuid," + DATA_TABLE_ATTRIBUTE_3 + " blob,PRIMARY KEY (" + DATA_TABLE_ATTRIBUTE_1 + ", " + DATA_TABLE_ATTRIBUTE_2 + ")) WITH COMPACT STORAGE AND read_repair_chance = 0.0 AND dclocal_read_repair_chance = 0.1 AND gc_grace_seconds = 864000 AND bloom_filter_fp_chance = 0.01 AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } AND comment = '' AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } AND default_time_to_live = 0 AND speculative_retry = 'NONE' AND min_index_interval = 128 AND max_index_interval = 2048;"); } }
private String getConnectionInfo(Metadata metadata) { StringBuilder sb = new StringBuilder(); sb.append("Connected to cluster: "); sb.append(metadata.getClusterName()); sb.append("\n"); for (Host host : metadata.getAllHosts()) { sb.append("Data center: "); sb.append(host.getDatacenter()); sb.append("; Host: "); sb.append(host.getAddress()); } return sb.toString(); }
@Test public void testKeyspaceQuoting() throws Exception { CassandraManagedConnectionFactory config = new CassandraManagedConnectionFactory(); config.setKeyspace("\"x\""); Metadata metadata = Mockito.mock(Metadata.class); CassandraConnectionImpl cci = new CassandraConnectionImpl(config, metadata); KeyspaceMetadata key_metadata = Mockito.mock(KeyspaceMetadata.class); Mockito.stub(metadata.getKeyspace("x")).toReturn(key_metadata); assertNotNull(cci.keyspaceInfo()); }