本文整理汇总了Java中org.apache.storm.utils.NimbusClient类的典型用法代码示例。如果您正苦于以下问题:Java NimbusClient类的具体用法?Java NimbusClient怎么用?Java NimbusClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
NimbusClient类属于org.apache.storm.utils包,在下文中一共展示了NimbusClient类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new FastRandomSentenceSpout(), 4);
builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));
Config conf = new Config();
conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
String name = "wc-test";
if (args != null && args.length > 0) {
name = args[0];
}
conf.setNumWorkers(1);
StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
Map clusterConf = Utils.readStormConfig();
clusterConf.putAll(Utils.readCommandLineOpts());
Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
//Sleep for 5 mins
for (int i = 0; i < 10; i++) {
Thread.sleep(30 * 1000);
printMetrics(client, name);
}
kill(client, name);
}
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:32,代码来源:FastWordCountTopology.java
示例2: C
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
public C(Map conf) {
Map clusterConf = Utils.readStormConfig();
if (conf != null) {
clusterConf.putAll(conf);
}
Boolean isLocal = (Boolean)clusterConf.get("run.local");
if (isLocal != null && isLocal) {
_local = new LocalCluster();
} else {
_client = NimbusClient.getConfiguredClient(clusterConf).getClient();
}
}
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:13,代码来源:ThroughputVsLatency.java
示例3: main
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new InOrderSpout(), 8);
builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));
Config conf = new Config();
conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
String name = "in-order-test";
if (args != null && args.length > 0) {
name = args[0];
}
conf.setNumWorkers(1);
StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
Map clusterConf = Utils.readStormConfig();
clusterConf.putAll(Utils.readCommandLineOpts());
Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
//Sleep for 50 mins
for (int i = 0; i < 50; i++) {
Thread.sleep(30 * 1000);
printMetrics(client, name);
}
kill(client, name);
}
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:30,代码来源:InOrderDeliveryTest.java
示例4: getNimbusClient
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
public Nimbus.Client getNimbusClient(Config stormConfig) {
return NimbusClient.getConfiguredClient(stormConfig).getClient();
}
开发者ID:MBtech,项目名称:stormbenchmark,代码行数:4,代码来源:BasicMetricsCollector.java
示例5: testStormNimbusClient
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
@Test
public void testStormNimbusClient() throws Exception {
Config conf = stormLocalCluster.getStormConf();
NimbusClient nimbusClient = NimbusClient.getConfiguredClient(conf);
assertTrue(nimbusClient.getClient().getNimbusConf().length() > 0);
}
开发者ID:sakserv,项目名称:hadoop-mini-clusters,代码行数:7,代码来源:StormLocalClusterIntegrationTest.java
示例6: runOnStormCluster
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
private static Nimbus.Client runOnStormCluster(Config conf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
Map<String, Object> clusterConf = Utils.readStormConfig();
StormSubmitter.submitTopologyWithProgressBar(TOPO_NAME, conf, topology);
return NimbusClient.getConfiguredClient(clusterConf).getClient();
}
开发者ID:hortonworks,项目名称:streamline,代码行数:6,代码来源:WindowedQueryBolt_TestTopology.java
示例7: main
import org.apache.storm.utils.NimbusClient; //导入依赖的package包/类
/** Copies text file content from sourceDir to destinationDir. Moves source files into sourceDir after its done consuming */
public static void main(String[] args) throws Exception {
// 0 - validate args
if (args.length < 7) {
System.err.println("Please check command line arguments.");
System.err.println("Usage :");
System.err.println(HdfsSpoutTopology.class.toString() + " topologyName hdfsUri fileFormat sourceDir sourceArchiveDir badDir destinationDir.");
System.err.println(" topologyName - topology name.");
System.err.println(" hdfsUri - hdfs name node URI");
System.err.println(" fileFormat - Set to 'TEXT' for reading text files or 'SEQ' for sequence files.");
System.err.println(" sourceDir - read files from this HDFS dir using HdfsSpout.");
System.err.println(" archiveDir - after a file in sourceDir is read completely, it is moved to this HDFS location.");
System.err.println(" badDir - files that cannot be read properly will be moved to this HDFS location.");
System.err.println(" spoutCount - Num of spout instances.");
System.err.println();
System.exit(-1);
}
// 1 - parse cmd line args
String topologyName = args[0];
String hdfsUri = args[1];
String fileFormat = args[2];
String sourceDir = args[3];
String sourceArchiveDir = args[4];
String badDir = args[5];
int spoutNum = Integer.parseInt(args[6]);
// 2 - create and configure spout and bolt
ConstBolt bolt = new ConstBolt();
HdfsSpout spout = new HdfsSpout().withOutputFields("line");
Config conf = new Config();
conf.put(Configs.SOURCE_DIR, sourceDir);
conf.put(Configs.ARCHIVE_DIR, sourceArchiveDir);
conf.put(Configs.BAD_DIR, badDir);
conf.put(Configs.READER_TYPE, fileFormat);
conf.put(Configs.HDFS_URI, hdfsUri);
conf.setDebug(true);
conf.setNumWorkers(1);
conf.setNumAckers(1);
conf.setMaxTaskParallelism(1);
// 3 - Create and configure topology
conf.setDebug(true);
conf.setNumWorkers(WORKER_NUM);
conf.registerMetricsConsumer(LoggingMetricsConsumer.class);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(BOLT_ID, bolt, 1).shuffleGrouping(SPOUT_ID);
// 4 - submit topology, wait for a few min and terminate it
Map clusterConf = Utils.readStormConfig();
StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, builder.createTopology());
Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
// 5 - Print metrics every 30 sec, kill topology after 20 min
for (int i = 0; i < 40; i++) {
Thread.sleep(30 * 1000);
FastWordCountTopology.printMetrics(client, topologyName);
}
FastWordCountTopology.kill(client, topologyName);
}
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:64,代码来源:HdfsSpoutTopology.java
注:本文中的org.apache.storm.utils.NimbusClient类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论