本文整理汇总了Java中org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter类的典型用法代码示例。如果您正苦于以下问题:Java FirstKeyValueMatchingQualifiersFilter类的具体用法?Java FirstKeyValueMatchingQualifiersFilter怎么用?Java FirstKeyValueMatchingQualifiersFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FirstKeyValueMatchingQualifiersFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了FirstKeyValueMatchingQualifiersFilter类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testPartialResultsWithColumnFilter
import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; //导入依赖的package包/类
/**
* Test partial Result re-assembly in the presence of different filters. The Results from the
* partial scanner should match the Results returned from a scanner that receives all of the
* results in one RPC to the server. The partial scanner is tested with a variety of different
* result sizes (all of which are less than the size necessary to fetch an entire row)
* @throws Exception
*/
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true,
Bytes.toBytes("testQualifier7"), true));
Set<byte[]> qualifiers = new LinkedHashSet<>();
qualifiers.add(Bytes.toBytes("testQualifier5"));
testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestPartialResultsFromClientSide.java
示例2: createSubmittableJob
import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; //导入依赖的package包/类
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
String tableName = args[0];
String startKey = null;
String endKey = null;
StringBuilder sb = new StringBuilder();
final String rangeSwitch = "--range=";
// First argument is table name, starting from second
for (int i = 1; i < args.length; i++) {
if (args[i].startsWith(rangeSwitch)) {
String[] startEnd = args[i].substring(rangeSwitch.length()).split(",", 2);
if (startEnd.length != 2 || startEnd[1].contains(",")) {
printUsage("Please specify range in such format as \"--range=a,b\" " +
"or, with only one boundary, \"--range=,b\" or \"--range=a,\"");
return null;
}
startKey = startEnd[0];
endKey = startEnd[1];
}
else {
// if no switch, assume column names
sb.append(args[i]);
sb.append(" ");
}
}
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(RowCounter.class);
Scan scan = new Scan();
scan.setCacheBlocks(false);
Set<byte []> qualifiers = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
if (startKey != null && !startKey.equals("")) {
scan.setStartRow(Bytes.toBytes(startKey));
}
if (endKey != null && !endKey.equals("")) {
scan.setStopRow(Bytes.toBytes(endKey));
}
scan.setFilter(new FirstKeyOnlyFilter());
if (sb.length() > 0) {
for (String columnName : sb.toString().trim().split(" ")) {
String family = StringUtils.substringBefore(columnName, ":");
String qualifier = StringUtils.substringAfter(columnName, ":");
if (StringUtils.isBlank(qualifier)) {
scan.addFamily(Bytes.toBytes(family));
}
else {
scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
}
}
}
// specified column may or may not be part of first key value for the row.
// Hence do not use FirstKeyOnlyFilter if scan has columns, instead use
// FirstKeyValueMatchingQualifiersFilter.
if (qualifiers.size() == 0) {
scan.setFilter(new FirstKeyOnlyFilter());
} else {
scan.setFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
job.setOutputFormatClass(NullOutputFormat.class);
TableMapReduceUtil.initTableMapperJob(tableName, scan,
RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
job.setNumReduceTasks(0);
return job;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:76,代码来源:RowCounter.java
示例3: createSubmittableJob
import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; //导入依赖的package包/类
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
String tableName = args[0];
String startKey = null;
String endKey = null;
StringBuilder sb = new StringBuilder();
final String rangeSwitch = "--range=";
// First argument is table name, starting from second
for (int i = 1; i < args.length; i++) {
if (args[i].startsWith(rangeSwitch)) {
String[] startEnd = args[i].substring(rangeSwitch.length()).split(",", 2);
if (startEnd.length != 2 || startEnd[1].contains(",")) {
printUsage("Please specify range in such format as \"--range=a,b\" " +
"or, with only one boundary, \"--range=,b\" or \"--range=a,\"");
return null;
}
startKey = startEnd[0];
endKey = startEnd[1];
}
else {
// if no switch, assume column names
sb.append(args[i]);
sb.append(" ");
}
}
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(RowCounter.class);
Scan scan = new Scan();
scan.setCacheBlocks(false);
Set<byte []> qualifiers = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
if (startKey != null && !startKey.equals("")) {
scan.setStartRow(Bytes.toBytes(startKey));
}
if (endKey != null && !endKey.equals("")) {
scan.setStopRow(Bytes.toBytes(endKey));
}
if (sb.length() > 0) {
for (String columnName : sb.toString().trim().split(" ")) {
String family = StringUtils.substringBefore(columnName, ":");
String qualifier = StringUtils.substringAfter(columnName, ":");
if (StringUtils.isBlank(qualifier)) {
scan.addFamily(Bytes.toBytes(family));
}
else {
scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
}
}
}
// specified column may or may not be part of first key value for the row.
// Hence do not use FirstKeyOnlyFilter if scan has columns, instead use
// FirstKeyValueMatchingQualifiersFilter.
if (qualifiers.size() == 0) {
scan.setFilter(new FirstKeyOnlyFilter());
} else {
scan.setFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
job.setOutputFormatClass(NullOutputFormat.class);
TableMapReduceUtil.initTableMapperJob(tableName, scan,
RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
job.setNumReduceTasks(0);
return job;
}
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:75,代码来源:RowCounter.java
示例4: createSubmittableJob
import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; //导入依赖的package包/类
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
String tableName = args[0];
String startKey = null;
String endKey = null;
StringBuilder sb = new StringBuilder();
final String rangeSwitch = "--range=";
// First argument is table name, starting from second
for (int i = 1; i < args.length; i++) {
if (args[i].startsWith(rangeSwitch)) {
String[] startEnd = args[i].substring(rangeSwitch.length()).split(",", 2);
if (startEnd.length != 2 || startEnd[1].contains(",")) {
printUsage("Please specify range in such format as \"--range=a,b\" " +
"or, with only one boundary, \"--range=,b\" or \"--range=a,\"");
return null;
}
startKey = startEnd[0];
endKey = startEnd[1];
}
else {
// if no switch, assume column names
sb.append(args[i]);
sb.append(" ");
}
}
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(RowCounter.class);
Scan scan = new Scan();
scan.setCacheBlocks(false);
Set<byte []> qualifiers = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
if (startKey != null && !startKey.equals("")) {
scan.setStartRow(Bytes.toBytes(startKey));
}
if (endKey != null && !endKey.equals("")) {
scan.setStopRow(Bytes.toBytes(endKey));
}
scan.setFilter(new FirstKeyOnlyFilter());
if (sb.length() > 0) {
for (String columnName : sb.toString().trim().split(" ")) {
String [] fields = columnName.split(":");
if(fields.length == 1) {
scan.addFamily(Bytes.toBytes(fields[0]));
} else {
byte[] qualifier = Bytes.toBytes(fields[1]);
qualifiers.add(qualifier);
scan.addColumn(Bytes.toBytes(fields[0]), qualifier);
}
}
}
// specified column may or may not be part of first key value for the row.
// Hence do not use FirstKeyOnlyFilter if scan has columns, instead use
// FirstKeyValueMatchingQualifiersFilter.
if (qualifiers.size() == 0) {
scan.setFilter(new FirstKeyOnlyFilter());
} else {
scan.setFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
job.setOutputFormatClass(NullOutputFormat.class);
TableMapReduceUtil.initTableMapperJob(tableName, scan,
RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
job.setNumReduceTasks(0);
return job;
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:75,代码来源:RowCounter.java
注:本文中的org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论