• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Java Units类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units的典型用法代码示例。如果您正苦于以下问题:Java Units类的具体用法?Java Units怎么用?Java Units使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



Units类属于org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy包,在下文中一共展示了Units类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: createHdfsBolt

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
/**
 * Create bolt which will persist ticks to HDFS.
 */
private static HdfsBolt createHdfsBolt() {

  // Use "|" instead of "," for field delimiter:
  RecordFormat format = new DelimitedRecordFormat()
    .withFieldDelimiter("|");
  // sync the filesystem after every 1k tuples:
  SyncPolicy syncPolicy = new CountSyncPolicy(100);

  // Rotate files when they reach 5MB:
  FileRotationPolicy rotationPolicy = 
    new FileSizeRotationPolicy(5.0f, Units.MB);

  // Write records to <user>/stock-ticks/ directory in HDFS:
  FileNameFormat fileNameFormat = new DefaultFileNameFormat()
    .withPath("stock-ticks/");

  HdfsBolt hdfsBolt = new HdfsBolt()
    .withFsUrl("hdfs://localhost:8020")
    .withFileNameFormat(fileNameFormat)
    .withRecordFormat(format)
    .withRotationPolicy(rotationPolicy)
    .withSyncPolicy(syncPolicy);

  return hdfsBolt;
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:29,代码来源:MovingAvgLocalTopologyRunner.java


示例2: WARCHdfsBolt

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
public WARCHdfsBolt() {
    super();
    FileSizeRotationPolicy rotpol = new FileSizeRotationPolicy(1.0f,
            Units.GB);
    withRecordFormat(new WARCRecordFormat());
    withRotationPolicy(rotpol);
    // dummy sync policy
    withSyncPolicy(new CountSyncPolicy(10));
    // default local filesystem
    withFsUrl("file:///");
}
 
开发者ID:DigitalPebble,项目名称:storm-crawler,代码行数:12,代码来源:WARCHdfsBolt.java


示例3: main

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {

		String zkIp = "localhost";

		String nimbusHost = "sandbox.hortonworks.com";

		String zookeeperHost = zkIp +":2181";

		ZkHosts zkHosts = new ZkHosts(zookeeperHost);
		List<String> zkServers = new ArrayList<String>();
		zkServers.add(zkIp);
		SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, "spertus-weather-events", "/spertus-weather-events","test_id");
		kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		kafkaConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
		kafkaConfig.zkServers = zkServers;
		kafkaConfig.zkRoot = "/spertus-weather-events";
		kafkaConfig.zkPort = 2181;
		kafkaConfig.forceFromStart = true;
		KafkaSpout kafkaSpout = new KafkaSpout(kafkaConfig);

		TopologyBuilder builder = new TopologyBuilder();

		HdfsBolt hdfsBolt = new HdfsBolt().withFsUrl("hdfs://sandbox.hortonworks.com:8020")
				.withFileNameFormat(new DefaultFileNameFormat().withPath("/tmp/test"))
				.withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|"))
				.withSyncPolicy(new CountSyncPolicy(10))
				.withRotationPolicy(new FileSizeRotationPolicy(5.0f, Units.MB));
		builder.setSpout("raw-weather-events", kafkaSpout, 1);
		builder.setBolt("filter-airports", new FilterAirportsBolt(), 1).shuffleGrouping("raw-weather-events");
		//        builder.setBolt("test-bolt", new TestBolt(), 1).shuffleGrouping("raw-weather-events");
		//        builder.setBolt("hdfs-bolt", hdfsBolt, 1).shuffleGrouping("raw-weather-events");


		Map conf = new HashMap();
		conf.put(backtype.storm.Config.TOPOLOGY_WORKERS, 4);
		conf.put(backtype.storm.Config.TOPOLOGY_DEBUG, true);
		if (args != null && args.length > 0) {
			StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
		}   else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("weather-topology", conf, builder.createTopology());
		}
	}
 
开发者ID:mspertus,项目名称:Big-Data-tutorial,代码行数:44,代码来源:WeatherTopology.java


示例4: initializeHDFSBolt

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
private boolean initializeHDFSBolt(String topology_name, String name) {
	try {

		String messageUpstreamComponent = messageComponents
				.get(messageComponents.size() - 1);

		System.out.println("[OpenSOC] ------" + name
				+ " is initializing from " + messageUpstreamComponent);

		RecordFormat format = new DelimitedRecordFormat()
				.withFieldDelimiter(
						config.getString("bolt.hdfs.field.delimiter")
								.toString()).withFields(
						new Fields("message"));

		// sync the file system after every x number of tuples
		SyncPolicy syncPolicy = new CountSyncPolicy(Integer.valueOf(config
				.getString("bolt.hdfs.batch.size").toString()));

		// rotate files when they reach certain size
		FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(
				Float.valueOf(config.getString(
						"bolt.hdfs.file.rotation.size.in.mb").toString()),
				Units.MB);

		FileNameFormat fileNameFormat = new DefaultFileNameFormat()
				.withPath(config.getString("bolt.hdfs.wip.file.path")
						.toString());

		// Post rotate action
		MoveFileAction moveFileAction = (new MoveFileAction())
				.toDestination(config.getString(
						"bolt.hdfs.finished.file.path").toString());

		HdfsBolt hdfsBolt = new HdfsBolt()
				.withFsUrl(
						config.getString("bolt.hdfs.file.system.url")
								.toString())
				.withFileNameFormat(fileNameFormat)
				.withRecordFormat(format)
				.withRotationPolicy(rotationPolicy)
				.withSyncPolicy(syncPolicy)
				.addRotationAction(moveFileAction);
		if (config.getString("bolt.hdfs.compression.codec.class") != null) {
			hdfsBolt.withCompressionCodec(config.getString(
					"bolt.hdfs.compression.codec.class").toString());
		}

		builder.setBolt(name, hdfsBolt,
				config.getInt("bolt.hdfs.parallelism.hint"))
				.shuffleGrouping(messageUpstreamComponent, "message")
				.setNumTasks(config.getInt("bolt.hdfs.num.tasks"));

	} catch (Exception e) {
		e.printStackTrace();
		System.exit(0);
	}

	return true;
}
 
开发者ID:OpenSOC,项目名称:opensoc-streaming,代码行数:61,代码来源:TopologyRunner.java


示例5: configureHDFSBolt

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
public void configureHDFSBolt(TopologyBuilder builder) {
	// Use pipe as record boundary
	
	String rootPath = topologyConfig.getProperty("hdfs.path");
	String prefix = topologyConfig.getProperty("hdfs.file.prefix");
	String fsUrl = topologyConfig.getProperty("hdfs.url");
	String sourceMetastoreUrl = topologyConfig.getProperty("hive.metastore.url");
	String hiveStagingTableName = topologyConfig.getProperty("hive.staging.table.name");
	String databaseName = topologyConfig.getProperty("hive.database.name");
	Float rotationTimeInMinutes = Float.valueOf(topologyConfig.getProperty("hdfs.file.rotation.time.minutes"));
	
	RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");

	//Synchronize data buffer with the filesystem every 1000 tuples
	SyncPolicy syncPolicy = new CountSyncPolicy(1000);

	// Rotate data files when they reach five MB
	//FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
	
	//Rotate every X minutes
	FileTimeRotationPolicy rotationPolicy = new FileTimeRotationPolicy(rotationTimeInMinutes, FileTimeRotationPolicy.Units.MINUTES);
	
	//Hive Partition Action
	HiveTablePartitionAction hivePartitionAction = new HiveTablePartitionAction(sourceMetastoreUrl, hiveStagingTableName, databaseName, fsUrl);
	
	//MoveFileAction moveFileAction = new MoveFileAction().toDestination(rootPath + "/working");


	
	FileNameFormat fileNameFormat = new DefaultFileNameFormat()
			.withPath(rootPath + "/staging")
			.withPrefix(prefix);

	// Instantiate the HdfsBolt
	HdfsBolt hdfsBolt = new HdfsBolt()
			 .withFsUrl(fsUrl)
	         .withFileNameFormat(fileNameFormat)
	         .withRecordFormat(format)
	         .withRotationPolicy(rotationPolicy)
	         .withSyncPolicy(syncPolicy)
	         .addRotationAction(hivePartitionAction);
	
	int hdfsBoltCount = Integer.valueOf(topologyConfig.getProperty("hdfsbolt.thread.count"));
	builder.setBolt("hdfs_bolt", hdfsBolt, hdfsBoltCount).shuffleGrouping("kafkaSpout");
}
 
开发者ID:patw,项目名称:storm-sample,代码行数:46,代码来源:TruckEventProcessorKafkaTopology.java


示例6: main

import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setNumWorkers(1);

    SentenceSpout spout = new SentenceSpout();

    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // rotate files when they reach 5MB
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/source/")
            .withExtension(".seq");

    // create sequence format instance.
    DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");

    SequenceFileBolt bolt = new SequenceFileBolt()
            .withFsUrl(args[0])
            .withFileNameFormat(fileNameFormat)
            .withSequenceFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy)
            .withCompressionType(SequenceFile.CompressionType.RECORD)
            .withCompressionCodec("deflate")
            .addRotationAction(new MoveFileAction().toDestination("/dest/"));




    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
    // SentenceSpout --> MyBolt
    builder.setBolt(BOLT_ID, bolt, 4)
            .shuffleGrouping(SENTENCE_SPOUT_ID);


    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();

        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(120);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
        System.exit(0);
    } else if(args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    }
}
 
开发者ID:ptgoetz,项目名称:storm-hdfs,代码行数:53,代码来源:SequenceFileTopology.java



注:本文中的org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java IPreferenceConstants类代码示例发布时间:2022-05-23
下一篇:
Java Assignment类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap