本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.Nfs3Constant类的典型用法代码示例。如果您正苦于以下问题:Java Nfs3Constant类的具体用法?Java Nfs3Constant怎么用?Java Nfs3Constant使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Nfs3Constant类属于org.apache.hadoop.nfs.nfs3包,在下文中一共展示了Nfs3Constant类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getAccessRights
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static int getAccessRights(int mode, int type) {
int rtn = 0;
if (isSet(mode, Nfs3Constant.ACCESS_MODE_READ)) {
rtn |= Nfs3Constant.ACCESS3_READ;
// LOOKUP is only meaningful for dir
if (type == NfsFileType.NFSDIR.toValue()) {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_WRITE)) {
rtn |= Nfs3Constant.ACCESS3_MODIFY;
rtn |= Nfs3Constant.ACCESS3_EXTEND;
// Set delete bit, UNIX may ignore it for regular file since it's up to
// parent dir op permission
rtn |= Nfs3Constant.ACCESS3_DELETE;
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
} else {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
return rtn;
}
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:Nfs3Utils.java
示例2: addDeprecatedKeys
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
private static void addDeprecatedKeys() {
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("nfs3.server.port",
NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY),
new DeprecationDelta("nfs3.mountd.port",
NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY),
new DeprecationDelta("dfs.nfs.exports.cache.size",
Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY),
new DeprecationDelta("dfs.nfs.exports.cache.expirytime.millis",
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY),
new DeprecationDelta("hadoop.nfs.userupdate.milly",
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
new DeprecationDelta("nfs.usergroup.update.millis",
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
new DeprecationDelta("nfs.static.mapping.file",
IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY),
new DeprecationDelta("dfs.nfs3.enableDump",
NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY),
new DeprecationDelta("dfs.nfs3.dump.dir",
NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY),
new DeprecationDelta("dfs.nfs3.max.open.files",
NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY),
new DeprecationDelta("dfs.nfs3.stream.timeout",
NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY),
new DeprecationDelta("dfs.nfs3.export.point",
NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY),
new DeprecationDelta("nfs.allow.insecure.ports",
NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY),
new DeprecationDelta("dfs.nfs.keytab.file",
NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY),
new DeprecationDelta("dfs.nfs.kerberos.principal",
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY),
new DeprecationDelta("dfs.nfs.rtmax",
NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY),
new DeprecationDelta("dfs.nfs.wtmax",
NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY),
new DeprecationDelta("dfs.nfs.dtmax",
NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY) });
}
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:NfsConfiguration.java
示例3: testCreate
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
@Test(timeout = 60000)
public void testCreate() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
CREATE3Request req = new CREATE3Request(handle, "fubar",
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
response2.getStatus());
}
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestRpcProgramNfs3.java
示例4: deserialize
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static CREATE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int mode = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
long verf = 0;
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
objAttr.deserialize(xdr);
} else if (mode == Nfs3Constant.CREATE_EXCLUSIVE) {
verf = xdr.readHyper();
} else {
throw new IOException("Wrong create mode:" + mode);
}
return new CREATE3Request(handle, name, mode, objAttr, verf);
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:CREATE3Request.java
示例5: getInstance
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static synchronized NfsExports getInstance(Configuration conf) {
if (exports == null) {
String matchHosts = conf.get(
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
long expirationPeriodNano = conf.getLong(
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
try {
exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
} catch (IllegalArgumentException e) {
LOG.error("Invalid NFS Exports provided: ", e);
return exports;
}
}
return exports;
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NfsExports.java
示例6: RpcProgramNfs3
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public RpcProgramNfs3(List<String> exports, Configuration config)
throws IOException {
super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100);
config.set(FsPermission.UMASK_LABEL, "000");
iug = new IdUserGroup();
writeManager = new WriteManager(iug, config);
clientCache = new DFSClientCache(config);
superUserClient = new DFSClient(NameNode.getAddress(config), config);
replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
bufferSize = config.getInt("io.file.buffer.size", 4096);
writeDumpDir = config.get("dfs.nfs3.dump.dir", "/tmp/.hdfs-nfs");
boolean enableDump = config.getBoolean("dfs.nfs3.enableDump", true);
if (!enableDump) {
writeDumpDir = null;
} else {
clearDirectory(writeDumpDir);
}
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:RpcProgramNfs3.java
示例7: getAccessRights
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static int getAccessRights(int mode, int type) {
int rtn = 0;
if (isSet(mode, Nfs3Constant.ACCESS_MODE_READ)) {
rtn |= Nfs3Constant.ACCESS3_READ;
// LOOKUP is only meaningful for dir
if (type == NfsFileType.NFSDIR.toValue()) {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_WRITE)) {
rtn |= Nfs3Constant.ACCESS3_MODIFY;
rtn |= Nfs3Constant.ACCESS3_EXTEND;
// Set delete bit, UNIX may ignore it for regular file since it's up to
// parent dir op permission
rtn |= Nfs3Constant.ACCESS3_DELETE;
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
}
}
return rtn;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:Nfs3Utils.java
示例8: create
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR create() {
XDR request = new XDR();
RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3_CREATE);
// credentials
request.writeInt(0); // auth null
request.writeInt(0); // length zero
// verifier
request.writeInt(0); // auth null
request.writeInt(0); // length zero
SetAttr3 objAttr = new SetAttr3();
CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
"out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
createReq.serialize(request);
return request;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestOutOfOrderWrite.java
示例9: write
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
byte[] data) {
XDR request = new XDR();
RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
Nfs3Constant.NFSPROC3_WRITE);
// credentials
request.writeInt(0); // auth null
request.writeInt(0); // length zero
// verifier
request.writeInt(0); // auth null
request.writeInt(0); // length zero
WRITE3Request write1 = new WRITE3Request(handle, offset, count,
WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
write1.serialize(request);
return request;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestOutOfOrderWrite.java
示例10: main
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
RpcProgramMountd.PORT);
XDR mappingRequest = PortmapRequest.create(mapEntry);
RegistrationClient registrationClient = new RegistrationClient(
"localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
registrationClient.run();
Thread t1 = new Runtest1();
//Thread t2 = testa.new Runtest2();
t1.start();
//t2.start();
t1.join();
//t2.join();
//testDump();
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestPortmapRegister.java
示例11: streamCleanup
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Check stream status to decide if it should be closed
*
* @return true, remove stream; false, keep stream
*/
public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
Preconditions.checkState(
streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
if (!activeState) {
return true;
}
boolean flag = false;
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
LOG.debug("stream can be closed for fileId:" + fileId);
}
flag = true;
}
return flag;
}
开发者ID:hopshadoop,项目名称:hops,代码行数:23,代码来源:OpenFileCtx.java
示例12: WriteManager
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
this.iug = iug;
this.config = config;
streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
LOG.info("Stream timeout is " + streamTimeout + "ms.");
if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
LOG.info("Reset stream timeout to minimum value " +
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
}
maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
LOG.info("Maximum open streams is " + maxStreams);
this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:WriteManager.java
示例13: NfsExports
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Constructor.
* @param cacheSize The size of the access privilege cache.
* @param expirationPeriodNano The period
* @param matchingHosts A string specifying one or multiple matchers.
*/
NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) {
this.cacheExpirationPeriod = expirationPeriodNano;
accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
cacheSize, cacheSize, expirationPeriodNano, 0);
String[] matchStrings = matchHosts.split(
Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR);
mMatches = new ArrayList<Match>(matchStrings.length);
for(String mStr : matchStrings) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing match string '" + mStr + "'");
}
mStr = mStr.trim();
if(!mStr.isEmpty()) {
mMatches.add(getMatch(mStr));
}
}
}
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:24,代码来源:NfsExports.java
示例14: streamCleanup
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Check stream status to decide if it should be closed
* @return true, remove stream; false, keep stream
*/
public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
Preconditions
.checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
if (!activeState) {
return true;
}
boolean flag = false;
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
LOG.debug("stream can be closed for fileId:" + fileId);
}
flag = true;
}
return flag;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:22,代码来源:OpenFileCtx.java
示例15: WriteManager
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
this.iug = iug;
this.config = config;
streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
LOG.info("Stream timeout is " + streamTimeout + "ms.");
if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
LOG.info("Reset stream timeout to minimum value "
+ Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
}
maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
LOG.info("Maximum open streams is "+ maxStreams);
this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:17,代码来源:WriteManager.java
示例16: create
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR create() {
XDR request = new XDR();
RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
new VerifierNone()).write(request);
SetAttr3 objAttr = new SetAttr3();
CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
"out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
createReq.serialize(request);
return request;
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java
示例17: write
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
byte[] data) {
XDR request = new XDR();
RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
new VerifierNone()).write(request);
WRITE3Request write1 = new WRITE3Request(handle, offset, count,
WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
write1.serialize(request);
return request;
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java
示例18: testIdempotent
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
@Test(timeout=1000)
public void testIdempotent() {
Object[][] procedures = {
{ Nfs3Constant.NFSPROC3.NULL, 1 },
{ Nfs3Constant.NFSPROC3.GETATTR, 1 },
{ Nfs3Constant.NFSPROC3.SETATTR, 1 },
{ Nfs3Constant.NFSPROC3.LOOKUP, 1 },
{ Nfs3Constant.NFSPROC3.ACCESS, 1 },
{ Nfs3Constant.NFSPROC3.READLINK, 1 },
{ Nfs3Constant.NFSPROC3.READ, 1 },
{ Nfs3Constant.NFSPROC3.WRITE, 1 },
{ Nfs3Constant.NFSPROC3.CREATE, 0 },
{ Nfs3Constant.NFSPROC3.MKDIR, 0 },
{ Nfs3Constant.NFSPROC3.SYMLINK, 0 },
{ Nfs3Constant.NFSPROC3.MKNOD, 0 },
{ Nfs3Constant.NFSPROC3.REMOVE, 0 },
{ Nfs3Constant.NFSPROC3.RMDIR, 0 },
{ Nfs3Constant.NFSPROC3.RENAME, 0 },
{ Nfs3Constant.NFSPROC3.LINK, 0 },
{ Nfs3Constant.NFSPROC3.READDIR, 1 },
{ Nfs3Constant.NFSPROC3.READDIRPLUS, 1 },
{ Nfs3Constant.NFSPROC3.FSSTAT, 1 },
{ Nfs3Constant.NFSPROC3.FSINFO, 1 },
{ Nfs3Constant.NFSPROC3.PATHCONF, 1 },
{ Nfs3Constant.NFSPROC3.COMMIT, 1 } };
for (Object[] procedure : procedures) {
boolean idempotent = procedure[1].equals(Integer.valueOf(1));
Nfs3Constant.NFSPROC3 proc = (Nfs3Constant.NFSPROC3)procedure[0];
if (idempotent) {
Assert.assertTrue(("Procedure " + proc + " should be idempotent"),
proc.isIdempotent());
} else {
Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"),
proc.isIdempotent());
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestRpcProgramNfs3.java
注:本文中的org.apache.hadoop.nfs.nfs3.Nfs3Constant类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论