Java 类com.facebook.presto.spi.Connector 实例源码
项目:presto
文件:ExampleConnectorFactory.java
@Override
public Connector create(final String connectorId, Map<String, String> requiredConfig)
{
requireNonNull(requiredConfig, "requiredConfig is null");
requireNonNull(optionalConfig, "optionalConfig is null");
try {
// A plugin is not required to use Guice; it is just very convenient
Bootstrap app = new Bootstrap(
new JsonModule(),
new ExampleModule(connectorId, typeManager));
Injector injector = app
.strictConfig()
.doNotInitializeLogging()
.setRequiredConfigurationProperties(requiredConfig)
.setOptionalConfigurationProperties(optionalConfig)
.initialize();
return injector.getInstance(ExampleConnector.class);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto
文件:JdbcConnectorFactory.java
@Override
public Connector create(String connectorId, Map<String, String> requiredConfig)
{
requireNonNull(requiredConfig, "requiredConfig is null");
requireNonNull(optionalConfig, "optionalConfig is null");
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new JdbcModule(connectorId), module);
Injector injector = app
.strictConfig()
.doNotInitializeLogging()
.setRequiredConfigurationProperties(requiredConfig)
.setOptionalConfigurationProperties(optionalConfig)
.initialize();
return injector.getInstance(JdbcConnector.class);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto
文件:TestHiveConnectorFactory.java
private static void assertCreateConnector(String metastoreUri)
{
HiveConnectorFactory connectorFactory = new HiveConnectorFactory(
"hive-test",
ImmutableMap.<String, String>builder()
.put("node.environment", "test")
.put("hive.metastore.uri", metastoreUri)
.build(),
HiveConnector.class.getClassLoader(),
null,
new TypeRegistry(),
new GroupByHashPageIndexerFactory());
Connector connector = connectorFactory.create("hive-test", ImmutableMap.<String, String>of());
assertInstanceOf(connector.getMetadata(), ClassLoaderSafeConnectorMetadata.class);
assertInstanceOf(connector.getSplitManager(), ClassLoaderSafeConnectorSplitManager.class);
assertInstanceOf(connector.getPageSourceProvider(), ConnectorPageSourceProvider.class);
}
项目:presto-riak
文件:RiakConnectorFactory.java
@Override
public Connector create(final String connectorId, Map<String, String> requiredConfig) {
checkNotNull(requiredConfig, "requiredConfig is null");
checkNotNull(optionalConfig, "optionalConfig is null");
try {
// A plugin is not required to use Guice; it is just very convenient
Bootstrap app = new Bootstrap(
new JsonModule(),
new RiakModule(connectorId, typeManager));
Injector injector = app
.strictConfig()
.doNotInitializeLogging()
.setRequiredConfigurationProperties(requiredConfig)
.setOptionalConfigurationProperties(optionalConfig)
.initialize();
return injector.getInstance(RiakConnector.class);
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto
文件:RedisConnectorFactory.java
@Override
public Connector create(String connectorId, Map<String, String> config)
{
requireNonNull(connectorId, "connectorId is null");
requireNonNull(config, "config is null");
try {
Bootstrap app = new Bootstrap(
new JsonModule(),
new RedisConnectorModule(),
binder -> {
binder.bind(RedisConnectorId.class).toInstance(new RedisConnectorId(connectorId));
binder.bind(TypeManager.class).toInstance(typeManager);
binder.bind(NodeManager.class).toInstance(nodeManager);
if (tableDescriptionSupplier.isPresent()) {
binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {}).toInstance(tableDescriptionSupplier.get());
}
else {
binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {})
.to(RedisTableDescriptionSupplier.class)
.in(Scopes.SINGLETON);
}
}
);
Injector injector = app.strictConfig()
.doNotInitializeLogging()
.setRequiredConfigurationProperties(config)
.setOptionalConfigurationProperties(optionalConfig)
.initialize();
return injector.getInstance(RedisConnector.class);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto
文件:TestRedisPlugin.java
@Test
public void testStartup()
{
ConnectorFactory factory = testConnectorExists();
Connector c = factory.create("test-connector", ImmutableMap.<String, String>builder()
.put("redis.table-names", "test")
.put("redis.nodes", "localhost:6379")
.build());
assertNotNull(c);
}
项目:cloudata
文件:SqlEngine.java
public SqlEngine(StructuredStore store, ExecutorService executor) {
this.store = store;
this.executor = executor;
MetadataManager metadataManager = new MetadataManager();
SplitManager splitManager = new SplitManager(Sets.<ConnectorSplitManager> newHashSet());
this.dataStreamManager = new DataStreamManager();
HandleResolver handleResolver = new HandleResolver();
Map<String, ConnectorFactory> connectorFactories = Maps.newHashMap();
Map<String, Connector> globalConnectors = Maps.newHashMap();
RecordSinkManager recordSinkManager = new RecordSinkManager();
Map<String, ConnectorOutputHandleResolver> handleIdResolvers = Maps.newHashMap();
OutputTableHandleResolver outputTableHandleResolver = new OutputTableHandleResolver(handleIdResolvers);
this.connectorManager = new ConnectorManager(metadataManager, splitManager, dataStreamManager,
recordSinkManager, handleResolver, outputTableHandleResolver, connectorFactories, globalConnectors);
// NodeManager nodeManager = new InMemoryNodeManager();
PlanOptimizersFactory planOptimizersFactory = new PlanOptimizersFactory(metadataManager, splitManager);
List<PlanOptimizer> planOptimizers = planOptimizersFactory.get();
this.metadataManager = metadataManager;
this.planOptimizers = planOptimizers;
this.periodicImportManager = new StubPeriodicImportManager();
this.storageManager = new StubStorageManager();
NodeManager nodeManager = new InMemoryNodeManager();
CloudataConnectorFactory cloudataConnectorFactory = new CloudataConnectorFactory(nodeManager,
Maps.<String, String> newHashMap(), store);
connectorManager.addConnectorFactory(cloudataConnectorFactory);
connectorManager.createConnection(catalogName, CloudataConnectorFactory.PROVIDER_ID,
Maps.<String, String> newHashMap());
this.cloudataConnector = cloudataConnectorFactory.get(catalogName);
}
项目:cloudata
文件:CloudataConnectorFactory.java
@Override
public Connector create(final String connectorId, Map<String, String> requiredConfig) {
checkNotNull(requiredConfig, "requiredConfig is null");
checkNotNull(optionalConfig, "optionalConfig is null");
try {
// // A plugin is not required to use Guice; it is just very convenient
// Bootstrap app = new Bootstrap(new JsonModule(), new ExampleModule(connectorId));
//
// Injector injector = app.strictConfig().doNotInitializeLogging()
// .setRequiredConfigurationProperties(requiredConfig)
// .setOptionalConfigurationProperties(optionalConfig).initialize();
ClassToInstanceMap<Object> services = ImmutableClassToInstanceMap.builder()
.put(ConnectorMetadata.class, new CloudataConnectorMetadata(connectorId, store))
.put(ConnectorSplitManager.class, new CloudataSplitManager(nodeManager, connectorId))
.put(ConnectorRecordSetProvider.class, new CloudataConnectorRecordSetProvider())
.put(ConnectorHandleResolver.class, new CloudataConnectorHandleResolver()).build();
CloudataConnector connector = new CloudataConnector(store, services);
connectors.put(connectorId, connector);
return connector;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto
文件:HiveConnectorFactory.java
@Override
public Connector create(String connectorId, Map<String, String> config)
{
requireNonNull(config, "config is null");
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(
new NodeModule(),
new MBeanModule(),
new JsonModule(),
new HiveClientModule(connectorId, metastore, typeManager, pageIndexerFactory),
installModuleIf(
SecurityConfig.class,
security -> ALLOW_ALL_ACCESS_CONTROL.equalsIgnoreCase(security.getSecuritySystem()),
new NoSecurityModule()),
installModuleIf(
SecurityConfig.class,
security -> "read-only".equalsIgnoreCase(security.getSecuritySystem()),
new ReadOnlySecurityModule()),
installModuleIf(
SecurityConfig.class,
security -> "sql-standard".equalsIgnoreCase(security.getSecuritySystem()),
new SqlStandardSecurityModule()),
binder -> {
MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(platformMBeanServer));
}
);
Injector injector = app
.strictConfig()
.doNotInitializeLogging()
.setRequiredConfigurationProperties(config)
.setOptionalConfigurationProperties(optionalConfig)
.initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
ConnectorMetadata metadata = injector.getInstance(ConnectorMetadata.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
HiveSessionProperties hiveSessionProperties = injector.getInstance(HiveSessionProperties.class);
HiveTableProperties hiveTableProperties = injector.getInstance(HiveTableProperties.class);
ConnectorAccessControl accessControl = injector.getInstance(ConnectorAccessControl.class);
return new HiveConnector(
lifeCycleManager,
new ClassLoaderSafeConnectorMetadata(metadata, classLoader),
new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader),
new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader),
new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader),
ImmutableSet.of(),
hiveSessionProperties.getSessionProperties(),
hiveTableProperties.getTableProperties(),
accessControl);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
项目:presto-kafka-connector
文件:KafkaConnectorFactory.java
@Override
public Connector create(String connectorId, Map<String, String> config)
{
checkNotNull(config, "config is null");
try {
KafkaClientModule kafkaClientModule = new KafkaClientModule(connectorId);
Bootstrap app = new Bootstrap(
new NodeModule(),
new JsonModule(),
kafkaClientModule
);
Injector injector = app.strictConfig().doNotInitializeLogging()
.setRequiredConfigurationProperties(config)
.quiet()
.requireExplicitBindings(false)
.setOptionalConfigurationProperties(optionalConfig).initialize();
KafkaClientConfig clientConfig = KafkaClientConfig.INSTANCE;
KafkaPluginConfig pluginConfig = KafkaPluginConfig.INSTANCE;
KafkaConnectorId kafkaConnectorId = KafkaConnectorId.INSTANCE;
KafkaHiveClient hiveClient = new KafkaHiveClient(kafkaConnectorId,
clientConfig, pluginConfig);
KafkaMetadata kafkaMetadata = new KafkaMetadata(hiveClient, kafkaConnectorId);
KafkaSplitManager kafkaSplitManager = new KafkaSplitManager(hiveClient, kafkaConnectorId, clientConfig);
KafkaRecordSetProvider kafkaRecordSetProvider = new KafkaRecordSetProvider(kafkaConnectorId);
KafkaHandleResolver kafkaHandleResolver = new KafkaHandleResolver(kafkaConnectorId);
ConnectorMetadata connMetadata = new ClassLoaderSafeConnectorMetadata(kafkaMetadata, classLoader);
ConnectorSplitManager connSplitManager = new ClassLoaderSafeConnectorSplitManager(kafkaSplitManager, classLoader);
ConnectorRecordSetProvider connRecordSetProvider = new ClassLoaderSafeConnectorRecordSetProvider(kafkaRecordSetProvider, classLoader);
ConnectorHandleResolver connHandleResolver = new ClassLoaderSafeConnectorHandleResolver(kafkaHandleResolver, classLoader);
return new KafkaConnector(connMetadata, connSplitManager,
connRecordSetProvider, connHandleResolver);
// return injector.getInstance(KafkaConnector.class);
// KafkaMetadata kafkaMetadata = injector.getInstance(KafkaMetadata.class);
// KafkaSplitManager kafkaSplitManager = injector.getInstance(KafkaSplitManager.class);
// KafkaRecordSetProvider kafkaRecordSetProvider = injector.getInstance(KafkaRecordSetProvider.class);
// KafkaHandleResolver kafkaHandleResolver = injector.getInstance(KafkaHandleResolver.class);
// return new KafkaConnector(kafkaMetadata, kafkaSplitManager,
// kafkaRecordSetProvider, kafkaHandleResolver);
// return new KafkaConnector(
// new ClassLoaderSafeConnectorMetadata(kafkaMetadata, classLoader),
// new ClassLoaderSafeConnectorSplitManager(kafkaSplitManager, classLoader),
// new ClassLoaderSafeConnectorRecordSetProvider(kafkaRecordSetProvider, classLoader),
// new ClassLoaderSafeConnectorHandleResolver(kafkaHandleResolver, classLoader));
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}