本文整理汇总了Java中org.apache.hadoop.hbase.io.FSDataInputStreamWrapper类的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStreamWrapper类的具体用法?Java FSDataInputStreamWrapper怎么用?Java FSDataInputStreamWrapper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FSDataInputStreamWrapper类属于org.apache.hadoop.hbase.io包,在下文中一共展示了FSDataInputStreamWrapper类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: pickReaderVersion
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Method returns the reader given the specified arguments.
* TODO This is a bad abstraction. See HBASE-6635.
*
* @param path hfile's path
* @param fsdis stream of path's file
* @param size max size of the trailer.
* @param cacheConf Cache configuation values, cannot be null.
* @param hfs
* @return an appropriate instance of HFileReader
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional")
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis, long size,
CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
FixedFileTrailer trailer = null;
try {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum; // Initially we must read with FS checksum.
trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
switch (trailer.getMajorVersion()) {
case 2:
return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
case 3:
return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
default:
throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
}
} catch (Throwable t) {
try {
fsdis.close();
} catch (Throwable t2) {
LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
}
throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:HFile.java
示例2: testNewBlocksHaveDefaultChecksum
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i)
dos.writeInt(i);
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, -1, false);
assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestChecksum.java
示例3: cacheBlocks
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Read all blocks from {@code path} to populate {@code blockCache}.
*/
private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, FileSystem fs,
Path path, HFileContext cxt) throws IOException {
FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path);
long fileSize = fs.getFileStatus(path).getLen();
FixedFileTrailer trailer =
FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
HFileReaderV2 reader = new HFileReaderV2(path, trailer, fsdis, fileSize, cacheConfig,
fsdis.getHfs(), conf);
reader.loadFileInfo();
long offset = trailer.getFirstDataBlockOffset(),
max = trailer.getLastDataBlockOffset();
List<HFileBlock> blocks = new ArrayList<HFileBlock>(4);
HFileBlock block;
while (offset <= max) {
block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false,
/* isCompaction */ false, /* updateCacheMetrics */ true, null, null);
offset += block.getOnDiskSizeWithHeader();
blocks.add(block);
}
LOG.info("read " + Iterables.toString(blocks));
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestLazyDataBlockDecompression.java
示例4: pickReaderVersion
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Method returns the reader given the specified arguments.
* TODO This is a bad abstraction. See HBASE-6635.
*
* @param path hfile's path
* @param fsdis stream of path's file
* @param size max size of the trailer.
* @param cacheConf Cache configuation values, cannot be null.
* @param hfs
* @return an appropriate instance of HFileReader
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
*/
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
FixedFileTrailer trailer = null;
try {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum; // Initially we must read with FS checksum.
trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
switch (trailer.getMajorVersion()) {
case 2:
return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
case 3 :
return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
default:
throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
}
} catch (Throwable t) {
try {
fsdis.close();
} catch (Throwable t2) {
LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
}
throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:37,代码来源:HFile.java
示例5: postStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param cacheConf
* @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
* @throws IOException
*/
public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r, StoreFile.Reader reader) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
reader = ((RegionObserver) env.getInstance()).postStoreFileReaderOpen(ctx, fs, p, in,
size, cacheConf, r, reader);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
return reader;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:37,代码来源:RegionCoprocessorHost.java
示例6: preStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
@Override
public StoreFile.Reader preStoreFileReaderOpen(
ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p,
FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, Reference r,
StoreFile.Reader reader) throws IOException {
try {
long delay = rnd.nextInt(3);
LOG.info("@@@ Delaying region " +
ctx.getEnvironment().getRegion().getRegionInfo().
getRegionNameAsString() + " for " + delay + " seconds...");
Thread.sleep(delay * 1000);
} catch (InterruptedException ie) {
LOG.error(ie);
}
return reader;
}
开发者ID:lhfei,项目名称:hbase-in-action,代码行数:17,代码来源:DelayRegionCloseObserver.java
示例7: preStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param cacheConf
* @param r original reference file. This will be not null only when reading a split file.
* @return a Reader instance to use instead of the base reader if overriding
* default behavior, null otherwise
* @throws IOException
*/
public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r) throws IOException {
if (coprocEnvironments.isEmpty()) {
return null;
}
return execOperationWithResult(
new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, null) {
@Override
public StoreFileReader call(RegionObserver observer) throws IOException {
return observer.preStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
getResult());
}
});
}
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:RegionCoprocessorHost.java
示例8: postStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param cacheConf
* @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
* @throws IOException
*/
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r, final StoreFileReader reader) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return reader;
}
return execOperationWithResult(
new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, reader) {
@Override
public StoreFileReader call(RegionObserver observer) throws IOException {
return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
getResult());
}
});
}
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:RegionCoprocessorHost.java
示例9: FSReaderImpl
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path,
HFileContext fileContext) throws IOException {
this.fileSize = fileSize;
this.hfs = hfs;
if (path != null) {
this.pathName = path.toString();
}
this.fileContext = fileContext;
this.hdrSize = headerSize(fileContext.isUseHBaseChecksum());
this.streamWrapper = stream;
// Older versions of HBase didn't support checksum.
this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum());
defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext);
encodedBlockDecodingCtx = defaultDecodingCtx;
}
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:HFileBlock.java
示例10: testNewBlocksHaveDefaultChecksum
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i)
dos.writeInt(i);
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, false, false);
assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
开发者ID:apache,项目名称:hbase,代码行数:24,代码来源:TestChecksum.java
示例11: cacheBlocks
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Read all blocks from {@code path} to populate {@code blockCache}.
*/
private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, FileSystem fs,
Path path, HFileContext cxt) throws IOException {
FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path);
long fileSize = fs.getFileStatus(path).getLen();
FixedFileTrailer trailer =
FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
HFile.Reader reader = new HFileReaderImpl(path, trailer, fsdis, fileSize, cacheConfig,
fsdis.getHfs(), conf);
reader.loadFileInfo();
long offset = trailer.getFirstDataBlockOffset(),
max = trailer.getLastDataBlockOffset();
List<HFileBlock> blocks = new ArrayList<>(4);
HFileBlock block;
while (offset <= max) {
block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false,
/* isCompaction */ false, /* updateCacheMetrics */ true, null, null);
offset += block.getOnDiskSizeWithHeader();
blocks.add(block);
}
LOG.info("read " + Iterables.toString(blocks));
}
开发者ID:apache,项目名称:hbase,代码行数:25,代码来源:TestLazyDataBlockDecompression.java
示例12: pickReaderVersion
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Method returns the reader given the specified arguments.
* TODO This is a bad abstraction. See HBASE-6635.
*
* @param path hfile's path
* @param fsdis stream of path's file
* @param size max size of the trailer.
* @param cacheConf Cache configuation values, cannot be null.
* @param hfs
* @return an appropriate instance of HFileReader
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
*/
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
long size, CacheConfig cacheConf, HFileSystem hfs) throws IOException {
FixedFileTrailer trailer = null;
try {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum; // Initially we must read with FS checksum.
trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
switch (trailer.getMajorVersion()) {
case 2:
return new HFileReaderV2(
path, trailer, fsdis, size, cacheConf, hfs);
default:
throw new CorruptHFileException("Invalid HFile version " + trailer.getMajorVersion());
}
} catch (Throwable t) {
try {
fsdis.close();
} catch (Throwable t2) {
LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
}
throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
}
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:36,代码来源:HFile.java
示例13: preStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param cacheConf
* @param r original reference file. This will be not null only when reading a split file.
* @return a Reader instance to use instead of the base reader if overriding
* default behavior, null otherwise
* @throws IOException
*/
public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r) throws IOException {
return execOperationWithResult(null,
coprocessors.isEmpty() ? null : new RegionOperationWithResult<StoreFile.Reader>() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
setResult(oserver.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult()));
}
});
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RegionCoprocessorHost.java
示例14: postStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param cacheConf
* @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
* @throws IOException
*/
public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r, final StoreFile.Reader reader) throws IOException {
return execOperationWithResult(reader,
coprocessors.isEmpty() ? null : new RegionOperationWithResult<StoreFile.Reader>() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
setResult(oserver.postStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult()));
}
});
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RegionCoprocessorHost.java
示例15: FSReaderImpl
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path,
HFileContext fileContext) throws IOException {
super(fileSize, hfs, path, fileContext);
this.streamWrapper = stream;
// Older versions of HBase didn't support checksum.
this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum());
defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext);
encodedBlockDecodingCtx = defaultDecodingCtx;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:HFileBlock.java
示例16: createHFileContext
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize,
HFileSystem hfs, Path path, FixedFileTrailer trailer) throws IOException {
return new HFileContextBuilder()
.withIncludesMvcc(this.includesMemstoreTS)
.withCompression(this.compressAlgo)
.withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM)
.build();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:HFileReaderV2.java
示例17: preStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
@Override
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
Reference r, Reader reader) throws IOException {
ctPreStoreFileReaderOpen.incrementAndGet();
return null;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:SimpleRegionObserver.java
示例18: postStoreFileReaderOpen
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
@Override
public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
Reference r, Reader reader) throws IOException {
ctPostStoreFileReaderOpen.incrementAndGet();
return reader;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:SimpleRegionObserver.java
示例19: testAllChecksumTypes
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* Test all checksum types by writing and reading back blocks.
*/
@Test
public void testAllChecksumTypes() throws IOException {
List<ChecksumType> cktypes = new ArrayList<>(Arrays.asList(ChecksumType.values()));
for (Iterator<ChecksumType> itr = cktypes.iterator(); itr.hasNext(); ) {
ChecksumType cktype = itr.next();
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + cktype.getName());
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder()
.withChecksumType(cktype).build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i)
dos.writeInt(i);
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, -1, false);
ByteBuffer data = b.getBufferWithoutHeader();
for (int i = 0; i < 1000; i++) {
assertEquals(i, data.getInt());
}
boolean exception_thrown = false;
try {
data.getInt();
} catch (BufferUnderflowException e) {
exception_thrown = true;
}
assertTrue(exception_thrown);
assertEquals(0, HFile.getChecksumFailuresCount());
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:TestChecksum.java
示例20: createReaderFromStream
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; //导入依赖的package包/类
/**
* This factory method is used only by unit tests
*/
static Reader createReaderFromStream(Path path,
FSDataInputStream fsdis, long size, CacheConfig cacheConf, Configuration conf)
throws IOException {
FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
return pickReaderVersion(path, wrapper, size, cacheConf, null, conf);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:10,代码来源:HFile.java
注:本文中的org.apache.hadoop.hbase.io.FSDataInputStreamWrapper类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论