本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.response.WccAttr类的典型用法代码示例。如果您正苦于以下问题:Java WccAttr类的具体用法?Java WccAttr怎么用?Java WccAttr使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WccAttr类属于org.apache.hadoop.nfs.nfs3.response包,在下文中一共展示了WccAttr类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getWccAttr
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:Nfs3Utils.java
示例2: createWccData
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public static WccData createWccData(final WccAttr preOpAttr,
DFSClient dfsClient, final String fileIdPath,
final IdMappingServiceProvider iug)
throws IOException {
Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug);
return new WccData(preOpAttr, postOpDirAttr);
}
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:Nfs3Utils.java
示例3: getWccAttr
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? Nfs3FileAttributes.getDirSize(fstat
.getChildrenNum()) : fstat.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:13,代码来源:Nfs3Utils.java
示例4: getWccAttr
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size =
fstat.isDir() ? Nfs3FileAttributes.getDirSize(fstat.getChildrenNum()) :
fstat.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
}
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:Nfs3Utils.java
示例5: receivedNewWriteInternal
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
WRITE3Request request, Channel channel, int xid,
AsyncDataService asyncDataService, IdUserGroup iug) {
WriteStableHow stableHow = request.getStableHow();
WccAttr preOpAttr = latestAttr.getWccAttr();
int count = request.getCount();
WriteCtx writeCtx = addWritesToCache(request, channel, xid);
if (writeCtx == null) {
// offset < nextOffset
processOverWrite(dfsClient, request, channel, xid, iug);
} else {
// The writes is added to pendingWrites.
// Check and start writing back if necessary
boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
if (!startWriting) {
// offset > nextOffset. check if we need to dump data
checkDump();
// In test, noticed some Linux client sends a batch (e.g., 1MB)
// of reordered writes and won't send more writes until it gets
// responses of the previous batch. So here send response immediately
// for unstable non-sequential write
if (request.getStableHow() == WriteStableHow.UNSTABLE) {
if (LOG.isDebugEnabled()) {
LOG.debug("UNSTABLE write request, send response for offset: "
+ writeCtx.getOffset());
}
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils
.writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
xid, new VerifierNone()), xid);
writeCtx.setReplied(true);
}
}
}
}
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:40,代码来源:OpenFileCtx.java
示例6: receivedNewWriteInternal
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
WRITE3Request request, Channel channel, int xid,
AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
WriteStableHow stableHow = request.getStableHow();
WccAttr preOpAttr = latestAttr.getWccAttr();
int count = request.getCount();
WriteCtx writeCtx = addWritesToCache(request, channel, xid);
if (writeCtx == null) {
// offset < nextOffset
processOverWrite(dfsClient, request, channel, xid, iug);
} else {
// The write is added to pendingWrites.
// Check and start writing back if necessary
boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
if (!startWriting) {
// offset > nextOffset. check if we need to dump data
waitForDump();
// In test, noticed some Linux client sends a batch (e.g., 1MB)
// of reordered writes and won't send more writes until it gets
// responses of the previous batch. So here send response immediately
// for unstable non-sequential write
if (stableHow != WriteStableHow.UNSTABLE) {
LOG.info("Have to change stable write to unstable write: "
+ request.getStableHow());
stableHow = WriteStableHow.UNSTABLE;
}
if (LOG.isDebugEnabled()) {
LOG.debug("UNSTABLE write request, send response for offset: "
+ writeCtx.getOffset());
}
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
RpcProgramNfs3.metrics.addWrite(Nfs3Utils
.getElapsedTime(writeCtx.startTime));
Nfs3Utils
.writeChannel(channel, response.serialize(new XDR(),
xid, new VerifierNone()), xid);
writeCtx.setReplied(true);
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:OpenFileCtx.java
示例7: getWccAttr
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public WccAttr getWccAttr() {
return new WccAttr(size, mtime, ctime);
}
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:Nfs3FileAttributes.java
示例8: createWccData
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
public static WccData createWccData(final WccAttr preOpAttr,
DFSClient dfsClient, final String fileIdPath, final IdUserGroup iug)
throws IOException {
Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug);
return new WccData(preOpAttr, postOpDirAttr);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:7,代码来源:Nfs3Utils.java
示例9: receivedNewWriteInternal
import org.apache.hadoop.nfs.nfs3.response.WccAttr; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
WRITE3Request request, Channel channel, int xid,
AsyncDataService asyncDataService, IdUserGroup iug) {
WriteStableHow stableHow = request.getStableHow();
WccAttr preOpAttr = latestAttr.getWccAttr();
int count = request.getCount();
WriteCtx writeCtx = addWritesToCache(request, channel, xid);
if (writeCtx == null) {
// offset < nextOffset
processOverWrite(dfsClient, request, channel, xid, iug);
} else {
// The writes is added to pendingWrites.
// Check and start writing back if necessary
boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
if (!startWriting) {
// offset > nextOffset. check if we need to dump data
checkDump();
// In test, noticed some Linux client sends a batch (e.g., 1MB)
// of reordered writes and won't send more writes until it gets
// responses of the previous batch. So here send response immediately
// for unstable non-sequential write
if (stableHow != WriteStableHow.UNSTABLE) {
LOG.info("Have to change stable write to unstable write:" +
request.getStableHow());
stableHow = WriteStableHow.UNSTABLE;
}
if (LOG.isDebugEnabled()) {
LOG.debug("UNSTABLE write request, send response for offset: " +
writeCtx.getOffset());
}
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response =
new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel,
response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
xid);
writeCtx.setReplied(true);
}
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:45,代码来源:OpenFileCtx.java
注:本文中的org.apache.hadoop.nfs.nfs3.response.WccAttr类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论