public void start() { int mb = 1024 * 1024; LOG.info("Max memory: {} mb", Runtime.getRuntime().maxMemory() / mb); LOG.info("Starting up Kinesis Consumer... (may take a few seconds)"); AmazonKinesisClient kinesisClient = new AmazonKinesisClient(kinesisCfg.getKinesisCredentialsProvider(), kinesisCfg.getKinesisClientConfiguration()); AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(kinesisCfg.getDynamoDBCredentialsProvider(), kinesisCfg.getDynamoDBClientConfiguration()); AmazonCloudWatch cloudWatchClient = new AmazonCloudWatchClient(kinesisCfg.getCloudWatchCredentialsProvider(), kinesisCfg.getCloudWatchClientConfiguration()); Worker worker = new Worker.Builder() .recordProcessorFactory(() -> new RecordProcessor(unitOfWorkListener, exceptionStrategy, metricsCallback, dry)) .config(kinesisCfg) .kinesisClient(kinesisClient) .dynamoDBClient(dynamoDBClient) .cloudWatchClient(cloudWatchClient) .build(); worker.run(); }
private double getInstanceAverageLoad(AmazonCloudWatchClient cloudWatchClient, String instanceId) { long offsetInMilliseconds = 1000 * 60 * 60; GetMetricStatisticsRequest request = new GetMetricStatisticsRequest() .withStartTime(new Date(new Date().getTime() - offsetInMilliseconds)) .withNamespace("AWS/EC2") .withPeriod(60 * 60) .withDimensions(new Dimension().withName("InstanceId").withValue(instanceId)) .withMetricName("CPUUtilization") .withStatistics("Average", "Maximum") .withEndTime(new Date()); GetMetricStatisticsResult getMetricStatisticsResult = cloudWatchClient.getMetricStatistics(request); double avgCPUUtilization = 0; List dataPoint = getMetricStatisticsResult.getDatapoints(); for (Object aDataPoint : dataPoint) { Datapoint dp = (Datapoint) aDataPoint; avgCPUUtilization = dp.getAverage(); } return avgCPUUtilization; }
private CloudWatchReporter(MetricRegistry registry, AmazonCloudWatchClient client, String namespace, TimeUnit rateUnit, TimeUnit durationUnit, boolean reportAggregates, MetricFilter filter, Map<String, String> dimensions) { super(registry, "cloudwatch-reporter", filter, rateUnit, durationUnit); this.client = client; this.namespace = namespace; this.dimensions = new ArrayList<>(); this.reportAggregates = reportAggregates; for (Map.Entry<String, String> me : dimensions.entrySet()) { this.dimensions.add(new Dimension().withName(me.getKey()).withValue(me.getValue())); } }
public KafkaDynamoStreamAdapter(String regionName, String srcTable, IRecordProcessorFactory processorFactory) { sourceTable = srcTable; credentialsProvider = new DefaultAWSCredentialsProviderChain(); recordProcessorFactory = processorFactory; adapterClient = new AmazonDynamoDBStreamsAdapterClient(credentialsProvider, new ClientConfiguration()); dynamoDBClient = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration()); cloudWatchClient = new AmazonCloudWatchClient(credentialsProvider, new ClientConfiguration()); if ("local".equalsIgnoreCase(regionName)) { setClientEndpoints(localddbEndpoint); } else if (regionName != null) { Region region = Region.getRegion(Regions.fromName(regionName)); adapterClient.setRegion(region); dynamoDBClient.setRegion(region); cloudWatchClient.setRegion(region); } }
/** * Collect data for CloudWatch. * * @param stats * current statistics object. * @param account * currently used credentials object. * @param region * currently used aws region. */ public static void scanCloudWatch(AwsStats stats, AwsAccount account, Regions region) { LOG.debug("Scan for CloudWatch in region " + region.getName() + " in account " + account.getAccountId()); try { AmazonCloudWatchClient cw = new AmazonCloudWatchClient(account.getCredentials()); cw.setRegion(Region.getRegion(region)); int totalMetrics = 0; for (Metric m : cw.listMetrics().getMetrics()) { AwsResource res = new AwsResource(m.getMetricName(), account.getAccountId(), AwsResourceType.CloudWatch, region); stats.add(res); totalMetrics++; } LOG.info(totalMetrics + " CloudWatch metrics in region " + region.getName() + " in account " + account.getAccountId()); } catch (AmazonServiceException ase) { LOG.error("Exception of CloudWatch: " + ase.getMessage()); } catch (Exception ex) { LOG.error("Exception of CloudWatch: " + ex.getMessage()); } }
public void monitorCPUUsage() { AmazonCloudWatchClient cloudClient = new AmazonCloudWatchClient( getAwsCredentials()); GetMetricStatisticsRequest request = new GetMetricStatisticsRequest(); Calendar cal = Calendar.getInstance(); request.setEndTime(cal.getTime()); cal.add(Calendar.MINUTE, -5); request.setStartTime(cal.getTime()); request.setNamespace("AWS/EC2"); List<String> statistics = new ArrayList<String>(); statistics.add("Maximium"); statistics.add("Average"); request.setStatistics(statistics); request.setMetricName("CPUUtilization"); request.setPeriod(300); Dimension dimension = new Dimension(); dimension.setName("InstanceId"); dimension.setValue("i-d93fa2a4"); List<Dimension> dimensions = new ArrayList<Dimension>(); dimensions.add(dimension); request.setDimensions(dimensions); GetMetricStatisticsResult result = cloudClient .getMetricStatistics(request); List<Datapoint> dataPoints = result.getDatapoints(); for (Datapoint dataPoint : dataPoints) { System.out.println(dataPoint.getAverage()); } }
/*** * * All input parameters are required * * @param topic * @param config * @param client * @param dynamoClient * @param cloudwatchClient */ public KinesisConsumer(final String topic, final KinesisClientLibConfiguration config, final AmazonKinesisClient client, final AmazonDynamoDBClient dynamoClient, final AmazonCloudWatchClient cloudwatchClient, final ObjectMapper mapper){ this.topic = Preconditions.checkNotNull(topic, "A valid kinesis topic is required"); this.config = Preconditions.checkNotNull(config, "KinesisClientLibConfiguration is required"); this.client = Preconditions.checkNotNull(client, "AmazonKinesisClient is required"); this.dynamoClient = Preconditions.checkNotNull(dynamoClient, "AmazonDynamoDBClient is required"); this.cloudwatchClient = Preconditions.checkNotNull(cloudwatchClient, "AmazonCloudWatchClient is required"); this.mapper = Preconditions.checkNotNull(mapper, "ObjectMapper is required"); }
public GetStatisticsTask(PulseInstance pulse, String namespace, AWSProperties properties) { this.pulse = pulse; this.namespace = namespace; this.properties = properties; if (provider == null) { provider = AWSCredentialUtil.getCredentialProvider(properties.getCredential().getAccesskey(), properties.getCredential().getSecretkey()); } cloudWatch = new AmazonCloudWatchClient(provider); cloudWatch.setEndpoint("monitoring." + properties.getRegion() + ".amazonaws.com"); }
/** * Create client using aws credentials provider. This is the preferred way for creating clients */ @Override protected AmazonCloudWatchClient createClient(ProcessContext processContext, AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { getLogger().info("Creating client using aws credentials provider"); return new AmazonCloudWatchClient(awsCredentialsProvider, clientConfiguration); }
@Override public void setConf(AbstractConfig config) { this.config = (CloudwatchReporterConfig) config; this.client = new AmazonCloudWatchClient(); if (this.config.getRegion() != null) { this.client = this.client.withRegion(this.config.getRegion()); } }
@Provides @Singleton protected AmazonCloudWatch provideAmazonCloudWatch(Region region, AWSCredentialsProvider credentialsProvider) { AmazonCloudWatch cloudWatch = new AmazonCloudWatchClient(credentialsProvider); cloudWatch.setRegion(region); return cloudWatch; }
/** * Set up the builder. * @param registry which registry to report from. * @param client a client to use to push the metrics to CloudWatch. */ public Builder(MetricRegistry registry, AmazonCloudWatchClient client) { this.registry = registry; this.client = client; this.rateUnit = TimeUnit.SECONDS; this.durationUnit = TimeUnit.MILLISECONDS; this.filter = MetricFilter.ALL; this.dimensions = new HashMap<>(); }
private void init() { String region = ConstrettoConfig.getString("cloudwatch.region"); int intervalSeconds = ConstrettoConfig.getInt("cloudwatch.metrics.intervalSeconds"); namespace = ConstrettoConfig.getString("cloudwatch.metrics.namespace"); awsClient = new AmazonCloudWatchClient().withRegion(Region.getRegion(Regions.fromName(region))); log.info("Created CloudWatch metrics publisher for AWS region {}, using namespace {}", region, namespace); // Start thread which regularly publishes metrics. Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(new Worker(), intervalSeconds, intervalSeconds, TimeUnit.SECONDS); }
private List<List<Dimension>> getDimensions(MetricRule rule, AmazonCloudWatchClient client) { List<List<Dimension>> dimensions = new ArrayList<List<Dimension>>(); if (rule.awsDimensions == null) { dimensions.add(new ArrayList<Dimension>()); return dimensions; } ListMetricsRequest request = new ListMetricsRequest(); request.setNamespace(rule.awsNamespace); request.setMetricName(rule.awsMetricName); List<DimensionFilter> dimensionFilters = new ArrayList<DimensionFilter>(); for (String dimension: rule.awsDimensions) { dimensionFilters.add(new DimensionFilter().withName(dimension)); } request.setDimensions(dimensionFilters); String nextToken = null; do { request.setNextToken(nextToken); ListMetricsResult result = client.listMetrics(request); cloudwatchRequests.inc(); for (Metric metric: result.getMetrics()) { if (metric.getDimensions().size() != dimensionFilters.size()) { // AWS returns all the metrics with dimensions beyond the ones we ask for, // so filter them out. continue; } if (useMetric(rule, metric)) { dimensions.add(metric.getDimensions()); } } nextToken = result.getNextToken(); } while (nextToken != null); return dimensions; }
/** * Create the observer with a given configuration. */ public CloudWatchObserver(PluginConfig config, PushManager pushManager) { this.config = config; this.pushManager = pushManager; final AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); final AmazonCloudWatchClient cloudWatchClient = new AmazonCloudWatchClient(credentialsProvider); final String region = NetflixEnvironment.region(); if (region != null) { cloudWatchClient.setEndpoint(String.format("monitoring.%s.amazonaws.com", region)); } this.cloudWatchMetricObserver = new CloudWatchMetricObserver(getName(), config.getCloudwatchNamespace(), cloudWatchClient); }
@Override public AmazonCloudWatchClient getClient() { AmazonCloudWatchClient client = new AmazonCloudWatchClient(new Aws().getAwsCredentials()); String region = getUserProperty(AWS_REGION); String endpoint = "https://monitoring." + region + ".amazonaws.com"; //$NON-NLS-1$ //$NON-NLS-2$ client.setEndpoint(endpoint); return client; }
protected MetricsEmittingBasicClickEventsToKinesis( BlockingQueue<ClickEvent> inputQueue) { super(inputQueue); kinesis = new AmazonKinesisClient().withRegion( Regions.fromName(REGION)); cw = new AmazonCloudWatchClient().withRegion(Regions.fromName(REGION)); }
/*** * Implementations that subclass {@link AwsApplicationFactory} should * override this method to create implementation specific factories for * events, http, queues, etc.. where an AWS service is not being used. * * @param config */ protected void createConfiguredFactories(final AwsConfig config) { /*** Create any clients that have been configured **/ if (config != null) { if (config.getKinesis() != null || config.getDynamo() != null) { /** both kinesis and dynamodb rely on the AmazonDynamoDbClient **/ this.dynamoClient = new AmazonDynamoDBClient(); /** Kinesis KCL uses the cloudwatchClient **/ if (this.config.getKinesis() != null) { this.kinesisClient = new AmazonKinesisClient(); this.cloudwatchClient = new AmazonCloudWatchClient(); events = new KinesisEventFactory(kinesisClient, this.dynamoClient, this.cloudwatchClient, this.mapper); } } if (config.getS3() != null) { this.s3Client = new AmazonS3Client(); files = new S3BlobFactory(this.s3Client, config.getS3().getDefaultDrive()); } if (config.getDynamo() != null) { databases = new DynamoDbMapperFactory(this.dynamoClient); } if (config.getSqs() != null) { this.sqsClient = new AmazonSQSClient(); queues = new SqsFactory(this.sqsClient, config.getSqs()); } } }
/*** * Run the command to consume from a kinesis topic. This requires a configuration file that is able to load * the an instance of KinesisConsumerConfig */ @Override protected void run(final Bootstrap<AppConfiguration> bootstrap,final Namespace namespace, final AppConfiguration configuration) throws Exception { Preconditions.checkNotNull(configuration.getCommands(), "the commands section must be configured in your yml configuration file"); Preconditions.checkNotNull(configuration.getCommands().getKinesisConsumer(), "the kinesis consumer must be configured in your configuration file to run the kinesis consumer"); Preconditions.checkNotNull(configuration.getCommands().getKinesisConsumer().getClientId(), "Kinesis consumer client id is required"); Preconditions.checkNotNull(configuration.getCommands().getKinesisConsumer().getTopic(), "Kinesis consumer topic is required in your yaml configuration file"); final KinesisConsumerConfig config = configuration.getCommands().getKinesisConsumer(); final AmazonKinesisClient kinesis = new AmazonKinesisClient(); final AmazonDynamoDBClient dynamodb = new AmazonDynamoDBClient(); final AmazonCloudWatchClient cloudwatch = new AmazonCloudWatchClient(); /** start the kinesis consumer **/ try(EventFactory events = new KinesisEventFactory(kinesis, dynamodb, cloudwatch, bootstrap.getObjectMapper())){ final KinesisConsumerConfig kinesisConfig = configuration.getCommands().getKinesisConsumer(); LOGGER.info("configuring kinesis consumer for clientId: {} - topicId: {}", config.getClientId(), config.getTopic()); final TopicConsumer consumer = new TopicConsumer(events.createSubscriber(kinesisConfig.getClientId(), kinesisConfig.getTopic()), bootstrap.getObjectMapper()); final Thread thread = new Thread(consumer); LOGGER.info("starting consumer for client: {} - topic: {}", config.getClientId(), config.getTopic()); thread.start(); thread.join(); }catch(Exception ex){ LOGGER.error(ex.getMessage(), ex); }finally{ LOGGER.info("shutting down consumer - client: {} - topic: {}", config.getClientId(), config.getTopic()); kinesis.shutdown(); dynamodb.shutdown(); cloudwatch.shutdown(); } }
public static AmazonCloudWatchClient createCloudWatchClient() { BasicCredentialsProvider credentials = BasicCredentialsProvider.standard(); AmazonCloudWatchClient client = !credentials.isValid() ? null : (AmazonCloudWatchClient) AmazonCloudWatchClientBuilder.standard() .withCredentials(credentials) .withRegion("eu-west-1") .build(); return client; }
/** * Create client using AWSCredentials * * @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead */ @Override protected AmazonCloudWatchClient createClient(ProcessContext processContext, AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { getLogger().debug("Creating client with aws credentials"); return new AmazonCloudWatchClient(awsCredentials, clientConfiguration); }
protected PutMetricDataResult putMetricData(PutMetricDataRequest metricDataRequest) throws AmazonClientException { final AmazonCloudWatchClient client = getClient(); final PutMetricDataResult result = client.putMetricData(metricDataRequest); return result; }
public PutMetricAlarmResult putAlarmData(PutMetricAlarmRequest metricAlarmRequest) { final AmazonCloudWatchClient client = getClient(); final PutMetricAlarmResult result = client.putMetricAlarm(metricAlarmRequest); return result; }
private static void init() throws Exception { //vamos buscar um historico de metricas guardados na memoria persistente do Load Balancer e guardamos na memoria interna para acesso mais rapido internalStorage = Request.getRanksFromPersistentMemory(); if(internalStorage == null) internalStorage = new HashMap<String,BigInteger>(); Connection.setupServer(); try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } ec2 = new AmazonEC2Client(credentials); cloudWatch= new AmazonCloudWatchClient(credentials); try { /* Using AWS Ireland. Pick the zone where you have AMI, key and secgroup */ ec2.setEndpoint("ec2.eu-west-1.amazonaws.com"); cloudWatch.setEndpoint("monitoring.eu-west-1.amazonaws.com"); describeInstancesResult = ec2.describeInstances(); reservations = describeInstancesResult.getReservations(); listOfActiveInstances = new ArrayList<InstanceObject>(); //funcionamento:vamos buscar todas as instancias e guardamos numa lista auxiliar, depois metemos na lista definitiva //apenas as instancias que estao a correr ArrayList<Instance> auxListOfInstances = new ArrayList<Instance>(); for (Reservation reservation : reservations) { auxListOfInstances.addAll(reservation.getInstances()); } lockListOfActiveInstances.lock(); for (Instance instance : auxListOfInstances) { if(AutoScaling.checkInstanceState(instance)) //se a instancia estiver running, adicionamo-la { if(!instance.getInstanceId().equals(Connection.checkInstanceId())) //id do load balancer, nao adicionamos este as instancias { InstanceObject instanceObject = new InstanceObject(instance); listOfActiveInstances.add(instanceObject); } } } lockListOfActiveInstances.unlock(); instanceDimension = new Dimension(); instanceDimension.setName("InstanceId"); List<Dimension> dims = new ArrayList<Dimension>(); dims.add(instanceDimension); } catch (AmazonServiceException ase) { lockListOfActiveInstances.unlock(); System.out.println("Caught Exception: " + ase.getMessage()); System.out.println("Reponse Status Code: " + ase.getStatusCode()); System.out.println("Error Code: " + ase.getErrorCode()); System.out.println("Request ID: " + ase.getRequestId()); } Thread thread = new ThreadScaling(); thread.start(); //thread responsavel pela monitorizacao das instancias (Fara o papel de Auto Scaling) //vamos ter outra thread que e responsavel por fazer os health checks das instancias iniciais Thread threadHealth = new ThreadHealthChecks(); threadHealth.start(); }
/** * * @param namespace the namespace of the metric * @param name the name of the metric field */ public CloudwatchMetric(String namespace, String name) { this.client = new AmazonCloudWatchClient(); this.namespace = namespace; this.name = name; }
public CloudwatchReporter(AmazonCloudWatchClient client, final String namespace, final List<StatFilter> statFilters) { this.client = client; this.namespace = namespace; this.statFilters = statFilters; }
public TryThisOut() { registry = new MetricRegistry(); gauge = new Gauge<Integer>() { @Override public Integer getValue() { return gaugeValue; } }; registry.register("gauge", gauge); counter = new Counter(); registry.register("counter", counter); histogram = new Histogram(new SlidingTimeWindowReservoir(10, TimeUnit.SECONDS)); registry.register("histogram", histogram); meter = new Meter(); registry.register("meter", meter); timer = new Timer(); registry.register("timer", timer); String instanceId = EC2MetadataUtils.getInstanceId(); // try out the default constructor. AmazonCloudWatchClient client = new AmazonCloudWatchClient(); reporter = new CloudWatchReporter.Builder(registry, client) .dimension("instanceId", instanceId) .namespace("some-namespace") .build(); reporter.start(5, TimeUnit.SECONDS); executorService = Executors.newSingleThreadScheduledExecutor(); executorService.scheduleAtFixedRate(new Runnable() { private int counterVal; private Random random = new Random(); @Override public void run() { gaugeValue = counterVal++; counter.inc(); Timer.Context context = timer.time(); meter.mark(); histogram.update((int)(random.nextGaussian() * 10)); context.stop(); } }, 0, 1, TimeUnit.SECONDS); }
public AmazonCloudWatchClient getCloudWatchClient() { return cloudWatchClient; }
public void setCloudWatchClient(AmazonCloudWatchClient cloudWatchClient) { this.cloudWatchClient = cloudWatchClient; }
public KouplerMetrics(KinesisEventProducer producer, KinesisProducerConfiguration config, String appName) { this(producer, appName); cloudWatch = new AmazonCloudWatchClient(); Region region = Region.getRegion(Regions.fromName(config.getRegion())); cloudWatch.setRegion(region); }
protected CloudWatchCollector(String jsonConfig, AmazonCloudWatchClient client) { this((Map<String, Object>)new Yaml().load(jsonConfig), client); }
private CloudWatchCollector(Map<String, Object> config, AmazonCloudWatchClient client) { loadConfig(config, client); }
protected void loadConfig(Reader in, AmazonCloudWatchClient client) throws IOException { loadConfig((Map<String, Object>)new Yaml().load(in), client); }
private void loadConfig(ArrayList<MetricRule> rules, AmazonCloudWatchClient client) { synchronized (activeConfig) { activeConfig.client = client; activeConfig.rules = rules; } }
@Before public void setUp() { client = Mockito.mock(AmazonCloudWatchClient.class); registry = new CollectorRegistry(); }
public void run() throws Exception { adapterClient = new AmazonDynamoDBStreamsAdapterClient(new ClientConfiguration()); adapterClient.setEndpoint(streamsEndpoint); dynamoDBClient = new AmazonDynamoDBClient(new ClientConfiguration()); dynamoDBClient.setEndpoint(dynamodbEndpoint); cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration()); TcpDiscoverySpi spi = new TcpDiscoverySpi(); TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(); List<String> hostList = Arrays.asList(Properties.getString("hostList").split(",")); ipFinder.setAddresses(hostList); spi.setIpFinder(ipFinder); IgniteConfiguration cfg = new IgniteConfiguration(); cfg.setDiscoverySpi(spi); cfg.setClientMode(true); cfg.setPeerClassLoadingEnabled(true); @SuppressWarnings("unused") Ignite ignite = Ignition.start(cfg); cache = Ignition.ignite().cache(Properties.getString("cacheName")); LOG.info(">>> cache acquired"); recordProcessorFactory = new StreamsRecordProcessorFactory(cache); workerConfig = new KinesisClientLibConfiguration(Properties.getString("applicationName"), streamArn, streamsCredentials, "ddbstreamsworker") .withMaxRecords(Integer.parseInt(Properties.getString("maxRecords"))) .withInitialPositionInStream( InitialPositionInStream.valueOf(Properties.getString("initialPositionInStream"))); LOG.info("Creating worker for stream: " + streamArn); worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient); LOG.info("Starting worker..."); int exitCode = 0; try { worker.run(); } catch (Throwable t) { LOG.error("Caught throwable while processing data."); t.printStackTrace(); exitCode = 1; } System.exit(exitCode); }
public KinesisEventFactory(final AmazonKinesisClient kinesisClient, final AmazonDynamoDBClient dynamoDbClient, final AmazonCloudWatchClient cloudwatchClient, final ObjectMapper mapper){ this.mapper = mapper; this.kinesisClient = kinesisClient; this.dynamoDbClient = dynamoDbClient; this.cloudwatchClient = cloudwatchClient; }
/** * @param args */ public static void main(String[] args) throws Exception { System.out.println("Starting demo..."); String srcTable = tablePrefix + "-src"; String destTable = tablePrefix + "-dest"; streamsCredentials = new ProfileCredentialsProvider(); dynamoDBCredentials = new ProfileCredentialsProvider(); recordProcessorFactory = new StreamsRecordProcessorFactory(dynamoDBCredentials, dynamodbEndpoint, serviceName, destTable); /* ===== REQUIRED ===== * Users will have to explicitly instantiate and configure the adapter, then pass it to * the KCL worker. */ adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsCredentials, new ClientConfiguration()); adapterClient.setEndpoint(streamsEndpoint); dynamoDBClient = new AmazonDynamoDBClient(dynamoDBCredentials, new ClientConfiguration()); dynamoDBClient.setEndpoint(dynamodbEndpoint); cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration()); setUpTables(); workerConfig = new KinesisClientLibConfiguration("streams-adapter-demo", streamArn, streamsCredentials, "streams-demo-worker") .withMaxRecords(1) .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); System.out.println("Creating worker for stream: " + streamArn); worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient); System.out.println("Starting worker..."); Thread t = new Thread(worker); t.start(); Thread.sleep(25000); worker.shutdown(); t.join(); if(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, srcTable).getItems().equals(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, destTable).getItems())) { System.out.println("Scan result is equal."); } else { System.out.println("Tables are different!"); } System.out.println("Done."); cleanupAndExit(0); }
@Test public void testKeyValueOperations() throws Exception { AmazonCloudWatchClient cwClient = provider.getClient(); Assume.assumeNotNull("AWS client not null", cwClient); List<Metric> staleMetrics = cwClient.listMetrics(new ListMetricsRequest().withNamespace(NAMESPACE)).getMetrics() .stream() // .filter(metric -> !metric.getMetricName().startsWith(CloudWatchIntegrationTest.class.getSimpleName()) || System.currentTimeMillis() - AWSUtils.toEpochMillis(metric.getMetricName()) > AWSUtils.TWO_WEEKS) // .collect(Collectors.toList()); if (staleMetrics.size() > 0) { Assert.fail("Found '" + CloudWatchIntegrationTest.class.getName() + "-*' metrics older than two weeks: " + staleMetrics); } WildFlyCamelContext camelctx = new WildFlyCamelContext(); camelctx.getNamingContext().bind("cwClient", cwClient); camelctx.addRoutes(new RouteBuilder() { @Override public void configure() throws Exception { from("direct:metrics").to("aws-cw://" + NAMESPACE + "?amazonCwClient=#cwClient"); } }); camelctx.start(); try { Map<String, Object> headers = new HashMap<>(); headers.put(CwConstants.METRIC_NAME, METRIC_NAME); headers.put(CwConstants.METRIC_DIMENSION_NAME, DIM_NAME); headers.put(CwConstants.METRIC_DIMENSION_VALUE, DIM_VALUE); ListMetricsRequest request = new ListMetricsRequest().withNamespace(NAMESPACE).withMetricName(METRIC_NAME) .withDimensions(new DimensionFilter().withName(DIM_NAME).withValue(DIM_VALUE)); List<Metric> metrics = Collections.emptyList(); ProducerTemplate producer = camelctx.createProducerTemplate(); for (int i = 100; i < 105 && metrics.size() == 0; i++) { producer.sendBodyAndHeaders("direct:metrics", new Double(i), headers); metrics = cwClient.listMetrics(request).getMetrics(); System.out.println("metrics #" + i + ": " + metrics); Thread.sleep(1000); } // It may take several minutes for the metric to show up // Assert.assertEquals(1, metrics.size()); } finally { camelctx.stop(); } }
CloudWatchClientProvider(AmazonCloudWatchClient client) { this.client = client; }