本文整理汇总了Java中org.apache.hadoop.net.unix.DomainSocket类的典型用法代码示例。如果您正苦于以下问题:Java DomainSocket类的具体用法?Java DomainSocket怎么用?Java DomainSocket使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DomainSocket类属于org.apache.hadoop.net.unix包,在下文中一共展示了DomainSocket类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: nextDomainPeer
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
/**
* Get the next DomainPeer-- either from the cache or by creating it.
*
* @return the next DomainPeer, or null if we could not construct one.
*/
private BlockReaderPeer nextDomainPeer() {
if (remainingCacheTries > 0) {
Peer peer = clientContext.getPeerCache().get(datanode, true);
if (peer != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("nextDomainPeer: reusing existing peer " + peer);
}
return new BlockReaderPeer(peer, true);
}
}
DomainSocket sock = clientContext.getDomainSocketFactory().
createSocket(pathInfo, conf.socketTimeout);
if (sock == null) return null;
return new BlockReaderPeer(new DomainPeer(sock), false);
}
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:BlockReaderFactory.java
示例2: DomainSocketFactory
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
public DomainSocketFactory(Conf conf) {
final String feature;
if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
feature = "The short-circuit local reads feature";
} else if (conf.isDomainSocketDataTraffic()) {
feature = "UNIX domain socket data traffic";
} else {
feature = null;
}
if (feature == null) {
PerformanceAdvisory.LOG.debug(
"Both short-circuit local reads and UNIX domain socket are disabled.");
} else {
if (conf.getDomainSocketPath().isEmpty()) {
throw new HadoopIllegalArgumentException(feature + " is enabled but "
+ DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
} else if (DomainSocket.getLoadingFailureReason() != null) {
LOG.warn(feature + " cannot be used because "
+ DomainSocket.getLoadingFailureReason());
} else {
LOG.debug(feature + " is enabled.");
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DomainSocketFactory.java
示例3: getPathInfo
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
/**
* Get information about a domain socket path.
*
* @param addr The inet address to use.
* @param conf The client configuration.
*
* @return Information about the socket path.
*/
public PathInfo getPathInfo(InetSocketAddress addr, DFSClient.Conf conf) {
// If there is no domain socket path configured, we can't use domain
// sockets.
if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
// If we can't do anything with the domain socket, don't create it.
if (!conf.isDomainSocketDataTraffic() &&
(!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
return PathInfo.NOT_CONFIGURED;
}
// If the DomainSocket code is not loaded, we can't create
// DomainSocket objects.
if (DomainSocket.getLoadingFailureReason() != null) {
return PathInfo.NOT_CONFIGURED;
}
// UNIX domain sockets can only be used to talk to local peers
if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
String escapedPath = DomainSocket.getEffectivePath(
conf.getDomainSocketPath(), addr.getPort());
PathState status = pathMap.getIfPresent(escapedPath);
if (status == null) {
return new PathInfo(escapedPath, PathState.VALID);
} else {
return new PathInfo(escapedPath, status);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:DomainSocketFactory.java
示例4: createSocket
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
public DomainSocket createSocket(PathInfo info, int socketTimeout) {
Preconditions.checkArgument(info.getPathState() != PathState.UNUSABLE);
boolean success = false;
DomainSocket sock = null;
try {
sock = DomainSocket.connect(info.getPath());
sock.setAttribute(DomainSocket.RECEIVE_TIMEOUT, socketTimeout);
success = true;
} catch (IOException e) {
LOG.warn("error creating DomainSocket", e);
// fall through
} finally {
if (!success) {
if (sock != null) {
IOUtils.closeQuietly(sock);
}
pathMap.put(info.getPath(), PathState.UNUSABLE);
sock = null;
}
}
return sock;
}
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DomainSocketFactory.java
示例5: handle
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
/**
* Handle the closure of the UNIX domain socket associated with this shared
* memory segment by marking this segment as stale.
*
* If there are no slots associated with this shared memory segment, it will
* be freed immediately in this function.
*/
@Override
public boolean handle(DomainSocket sock) {
manager.unregisterShm(getShmId());
synchronized (this) {
Preconditions.checkState(!disconnected);
disconnected = true;
boolean hadSlots = false;
for (Iterator<Slot> iter = slotIterator(); iter.hasNext(); ) {
Slot slot = iter.next();
slot.makeInvalid();
hadSlots = true;
}
if (!hadSlots) {
free();
}
}
return true;
}
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DfsClientShm.java
示例6: accept
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@Override
public Peer accept() throws IOException, SocketTimeoutException {
DomainSocket connSock = sock.accept();
Peer peer = null;
boolean success = false;
try {
peer = new DomainPeer(connSock);
success = true;
return peer;
} finally {
if (!success) {
if (peer != null) peer.close();
connSock.close();
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DomainPeerServer.java
示例7: createShortCircuitConf
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
private static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName).getAbsolutePath());
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
return conf;
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestShortCircuitCache.java
示例8: DomainSocketFactory
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
public DomainSocketFactory(ShortCircuitConf conf) {
final String feature;
if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
feature = "The short-circuit local reads feature";
} else if (conf.isDomainSocketDataTraffic()) {
feature = "UNIX domain socket data traffic";
} else {
feature = null;
}
if (feature == null) {
PerformanceAdvisory.LOG.debug(
"Both short-circuit local reads and UNIX domain socket are disabled.");
} else {
if (conf.getDomainSocketPath().isEmpty()) {
throw new HadoopIllegalArgumentException(feature + " is enabled but "
+ HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
} else if (DomainSocket.getLoadingFailureReason() != null) {
LOG.warn(feature + " cannot be used because "
+ DomainSocket.getLoadingFailureReason());
} else {
LOG.debug(feature + " is enabled.");
}
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:DomainSocketFactory.java
示例9: getPathInfo
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
/**
* Get information about a domain socket path.
*
* @param addr The inet address to use.
* @param conf The client configuration.
*
* @return Information about the socket path.
*/
public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
// If there is no domain socket path configured, we can't use domain
// sockets.
if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
// If we can't do anything with the domain socket, don't create it.
if (!conf.isDomainSocketDataTraffic() &&
(!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
return PathInfo.NOT_CONFIGURED;
}
// If the DomainSocket code is not loaded, we can't create
// DomainSocket objects.
if (DomainSocket.getLoadingFailureReason() != null) {
return PathInfo.NOT_CONFIGURED;
}
// UNIX domain sockets can only be used to talk to local peers
if (!DFSUtilClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
String escapedPath = DomainSocket.getEffectivePath(
conf.getDomainSocketPath(), addr.getPort());
PathState status = pathMap.getIfPresent(escapedPath);
if (status == null) {
return new PathInfo(escapedPath, PathState.VALID);
} else {
return new PathInfo(escapedPath, status);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:DomainSocketFactory.java
示例10: createShortCircuitConf
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
private static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName).getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
return conf;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestShortCircuitCache.java
示例11: testSkipWithLocalBlockReader
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@Test(timeout=60000)
public void testSkipWithLocalBlockReader() throws IOException {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
DFSInputStream.tcpReadsDisabledForTesting = true;
testSkipInner(cluster);
} finally {
DFSInputStream.tcpReadsDisabledForTesting = false;
cluster.shutdown();
sockDir.close();
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestDFSInputStream.java
示例12: sendShmSuccessResponse
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
private void sendShmSuccessResponse(DomainSocket sock, NewShmInfo shmInfo)
throws IOException {
DataNodeFaultInjector.get().sendShortCircuitShmResponse();
ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS).
setId(PBHelper.convert(shmInfo.shmId)).build().
writeDelimitedTo(socketOut);
// Send the file descriptor for the shared memory segment.
byte buf[] = new byte[] { (byte)0 };
FileDescriptor shmFdArray[] =
new FileDescriptor[] { shmInfo.stream.getFD() };
sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length);
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:DataXceiver.java
示例13: getDomainPeerServer
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
static DomainPeerServer getDomainPeerServer(Configuration conf,
int port) throws IOException {
String domainSocketPath =
conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
if (domainSocketPath.isEmpty()) {
if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) &&
(!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
LOG.warn("Although short-circuit local reads are configured, " +
"they are disabled because you didn't configure " +
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
}
return null;
}
if (DomainSocket.getLoadingFailureReason() != null) {
throw new RuntimeException("Although a UNIX domain socket " +
"path is configured as " + domainSocketPath + ", we cannot " +
"start a localDataXceiverServer because " +
DomainSocket.getLoadingFailureReason());
}
DomainPeerServer domainPeerServer =
new DomainPeerServer(domainSocketPath, port);
domainPeerServer.setReceiveBufferSize(
HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
return domainPeerServer;
}
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:DataNode.java
示例14: handle
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@Override
public boolean handle(DomainSocket sock) {
synchronized (registry) {
synchronized (this) {
registry.removeShm(this);
}
}
return true;
}
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:ShortCircuitRegistry.java
示例15: createNewMemorySegment
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
/**
* Handle a DFSClient request to create a new memory segment.
*
* @param clientName Client name as reported by the client.
* @param sock The DomainSocket to associate with this memory
* segment. When this socket is closed, or the
* other side writes anything to the socket, the
* segment will be closed. This can happen at any
* time, including right after this function returns.
* @return A NewShmInfo object. The caller must close the
* NewShmInfo object once they are done with it.
* @throws IOException If the new memory segment could not be created.
*/
public NewShmInfo createNewMemorySegment(String clientName,
DomainSocket sock) throws IOException {
NewShmInfo info = null;
RegisteredShm shm = null;
ShmId shmId = null;
synchronized (this) {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace("createNewMemorySegment: ShortCircuitRegistry is " +
"not enabled.");
}
throw new UnsupportedOperationException();
}
FileInputStream fis = null;
try {
do {
shmId = ShmId.createRandom();
} while (segments.containsKey(shmId));
fis = shmFactory.createDescriptor(clientName, SHM_LENGTH);
shm = new RegisteredShm(clientName, shmId, fis, this);
} finally {
if (shm == null) {
IOUtils.closeQuietly(fis);
}
}
info = new NewShmInfo(shmId, fis);
segments.put(shmId, shm);
}
// Drop the registry lock to prevent deadlock.
// After this point, RegisteredShm#handle may be called at any time.
watcher.add(sock, shm);
if (LOG.isTraceEnabled()) {
LOG.trace("createNewMemorySegment: created " + info.shmId);
}
return info;
}
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:ShortCircuitRegistry.java
示例16: init
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new CacheManipulator() {
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
LOG.info("mlocking " + identifier);
}
});
}
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestEnhancedByteBufferAccess.java
示例17: ShortCircuitTestContext
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
public ShortCircuitTestContext(String testName) {
this.testName = testName;
this.sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DFSTestUtil.java
示例18: setupCluster
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting = true;
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "");
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
conf.setBoolean(DFSConfigKeys.
DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName());
DomainSocket.disableBindPathValidation();
setupCluster(1, conf);
}
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestParallelShortCircuitLegacyRead.java
示例19: setupCluster
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
// Enabling data transfer encryption should have no effect when using
// short-circuit local reads. This is a regression test for HDFS-5353.
conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setBoolean(DFSConfigKeys.
DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
conf.setBoolean(DFSConfigKeys.
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
// We want to test reading from stale sockets.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
5 * 60 * 1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
// Avoid using the FileInputStreamCache.
conf.setInt(DFSConfigKeys.
DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY, 0);
DomainSocket.disableBindPathValidation();
DFSInputStream.tcpReadsDisabledForTesting = true;
setupCluster(1, conf);
}
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestParallelShortCircuitReadUnCached.java
示例20: getDomainSocket
import org.apache.hadoop.net.unix.DomainSocket; //导入依赖的package包/类
@Override
public DomainSocket getDomainSocket() {
if (!hasDomain) return null;
// Return a mock which throws an exception whenever any function is
// called.
return Mockito.mock(DomainSocket.class,
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
throw new RuntimeException("injected fault.");
} });
}
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestPeerCache.java
注:本文中的org.apache.hadoop.net.unix.DomainSocket类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论