• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Java Connection类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.hadoop.ipc.Server.Connection的典型用法代码示例。如果您正苦于以下问题:Java Connection类的具体用法?Java Connection怎么用?Java Connection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



Connection类属于org.apache.hadoop.ipc.Server包,在下文中一共展示了Connection类的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: cleanupIdleConnections

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
synchronized private void cleanupIdleConnections(String serverName) {
  long currentTime = System.currentTimeMillis();
  for (int i = 0; i < connectionArray.size();) {
    Connection c = connectionArray.get(i);
    if (c.timedOut(currentTime)) {
      if (Server.LOG.isDebugEnabled()) {
        Server.LOG.debug(serverName + ": disconnecting client "
            + c.getHostAddress());
      }
      closeConnectionWithoutException(c);
      replaceConnectionWithTheLastOne(i);
    } else {
      i++;
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:ConnectionSet.java


示例2: callAndVerify

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
/**
 * Make a call from a client and verify if header info is changed in server side
 */
private static void callAndVerify(Server server, InetSocketAddress addr,
    int serviceClass, boolean noChanged) throws IOException{
  Client client = new Client(LongWritable.class, conf);

  call(client, addr, serviceClass, conf);
  Connection connection = server.getConnections()[0];
  int serviceClass2 = connection.getServiceClass();
  assertFalse(noChanged ^ serviceClass == serviceClass2);
  client.stop();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:TestIPC.java


示例3: callAndVerify

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
/**
 * Make a call from a client and verify if header info is changed in server side
 */
private void callAndVerify(Server server, InetSocketAddress addr,
    int serviceClass, boolean noChanged) throws IOException{
  Client client = new Client(LongWritable.class, conf);

  client.call(new LongWritable(RANDOM.nextLong()),
      addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
  Connection connection = server.getConnections()[0];
  int serviceClass2 = connection.getServiceClass();
  assertFalse(noChanged ^ serviceClass == serviceClass2);
  client.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestIPC.java


示例4: callAndVerify

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
/**
 * Make a call from a client and verify if header info is changed in server side
 */
private void callAndVerify(Server server, InetSocketAddress addr,
    int serviceClass, boolean noChanged) throws Exception{
  Client client = new Client(LongWritable.class, conf);

  client.call(new LongWritable(RANDOM.nextLong()),
      addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
  Connection connection = server.getConnections()[0];
  int serviceClass2 = connection.getServiceClass();
  assertFalse(noChanged ^ serviceClass == serviceClass2);
  client.stop();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:TestIPC.java


示例5: removeConnection

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
void removeConnection(Connection c) {
  if (c == null) {
    return;
  }
  ConnectionBucket bucket = getBucket(c);
  bucket.removeConnection(c);
  rpcMetrics.numOpenConnections.inc(-1);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:ConnectionSet.java


示例6: getBucketIndexFromConnection

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
int getBucketIndexFromConnection(Connection c) {
  String connString = null;
  if (c == null || (connString = c.toString()) == null) {
    return 0;
  }
  int hashCode = Math.abs(connString.hashCode());
  return hashCode % numBuckets;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:ConnectionSet.java


示例7: cleanConnections

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
synchronized private void cleanConnections() {
  for (Connection c : connectionArray) {
    closeConnectionWithoutException(c);
  }
  numConnections.addAndGet(-connectionArray.size());
  connectionArray.clear();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:ConnectionSet.java


示例8: isConnectionsClean

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
private synchronized boolean isConnectionsClean() {
  for (Connection c : connectionArray) {
    if (!c.responseQueue.isEmpty()) {
      return false;
    }
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:ConnectionSet.java


示例9: closeConnectionWithoutException

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
private void closeConnectionWithoutException(Connection c) {
  try {
    c.close();
  } catch (IOException e) {
    if (Server.LOG.isDebugEnabled()) {
      Server.LOG.debug("IOException when closing connection", e);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:ConnectionSet.java


示例10: callAndVerify

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
/**
 * Make a call from a client and verify if header info is changed in server side
 */
private void callAndVerify(Server server, InetSocketAddress addr,
    int serviceClass, boolean noChanged) throws IOException{
  Client client = new Client(LongWritable.class, conf);

  client.call(new LongWritable(RANDOM.nextLong()),
      addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
  Connection connection = server.getConnections().get(0);
  int serviceClass2 = connection.getServiceClass();
  assertFalse(noChanged ^ serviceClass == serviceClass2);
  client.stop();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:TestIPC.java


示例11: doDigestRpc

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
private void doDigestRpc(Server server, TestTokenSecretManager sm)
    throws Exception {
  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);

  TestRpcService proxy = null;
  try {
    proxy = getClient(addr, conf);
    AuthMethod authMethod = convert(
        proxy.getAuthMethod(null, newEmptyRequest()));
    assertEquals(TOKEN, authMethod);
    //QOP must be auth
    assertEquals(expectedQop.saslQop,
                 RPC.getConnectionIdForProxy(proxy).getSaslQop());
    int n = 0;
    for (Connection connection : server.getConnections()) {
      // only qop auth should dispose of the sasl server
      boolean hasServer = (connection.saslServer != null);
      assertTrue("qop:" + expectedQop + " hasServer:" + hasServer,
          (expectedQop == QualityOfProtection.AUTHENTICATION) ^ hasServer);
      n++;
    }
    assertTrue(n > 0);
    proxy.ping(null, newEmptyRequest());
  } finally {
    stop(server, proxy);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:34,代码来源:TestSaslRPC.java


示例12: create

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslServer create(final Connection connection,
                         final Map<String,?> saslProperties,
                         SecretManager<TokenIdentifier> secretManager
    ) throws IOException, InterruptedException {
  UserGroupInformation ugi = null;
  final CallbackHandler callback;
  switch (authMethod) {
    case TOKEN: {
      callback = new SaslDigestCallbackHandler(secretManager, connection);
      break;
    }
    case KERBEROS: {
      ugi = UserGroupInformation.getCurrentUser();
      if (serverId.isEmpty()) {
        throw new AccessControlException(
            "Kerberos principal name does NOT have the expected "
                + "hostname part: " + ugi.getUserName());
      }
      callback = new SaslGssCallbackHandler();
      break;
    }
    default:
      // we should never be able to get here
      throw new AccessControlException(
          "Server does not support SASL " + authMethod);
  }
  
  final SaslServer saslServer;
  if (ugi != null) {
    saslServer = ugi.doAs(
      new PrivilegedExceptionAction<SaslServer>() {
        @Override
        public SaslServer run() throws SaslException  {
          return saslFactory.createSaslServer(mechanism, protocol, serverId,
              saslProperties, callback);
        }
      });
  } else {
    saslServer = saslFactory.createSaslServer(mechanism, protocol, serverId,
        saslProperties, callback);
  }
  if (saslServer == null) {
    throw new AccessControlException(
        "Unable to find SASL server implementation for " + mechanism);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Created SASL server with mechanism = " + mechanism);
  }
  return saslServer;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:53,代码来源:SaslRpcServer.java


示例13: SaslDigestCallbackHandler

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
public SaslDigestCallbackHandler(
    SecretManager<TokenIdentifier> secretManager,
    Server.Connection connection) {
  this.secretManager = secretManager;
  this.connection = connection;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:7,代码来源:SaslRpcServer.java


示例14: addConnection

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
void addConnection(Connection c) {
  ConnectionBucket bucket = getBucket(c);
  bucket.addConnection(c);
  rpcMetrics.numOpenConnections.inc(1);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:6,代码来源:ConnectionSet.java


示例15: getBucket

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
ConnectionBucket getBucket(Connection c) {
  return getBucket(getBucketIndexFromConnection(c));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:4,代码来源:ConnectionSet.java


示例16: create

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslServer create(Connection connection,
                         SecretManager<TokenIdentifier> secretManager
    ) throws IOException, InterruptedException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  final CallbackHandler callback;
  switch (authMethod) {
    case TOKEN: {
      secretManager.checkAvailableForRead();
      callback = new SaslDigestCallbackHandler(secretManager, connection);
      break;
    }
    case KERBEROS: {
      if (serverId.isEmpty()) {
        throw new AccessControlException(
            "Kerberos principal name does NOT have the expected "
                + "hostname part: " + ugi.getUserName());
      }
      callback = new SaslGssCallbackHandler();
      break;
    }
    default:
      // we should never be able to get here
      throw new AccessControlException(
          "Server does not support SASL " + authMethod);
  }
  
  SaslServer saslServer = ugi.doAs(
      new PrivilegedExceptionAction<SaslServer>() {
        @Override
        public SaslServer run() throws SaslException  {
          return Sasl.createSaslServer(mechanism, protocol, serverId,
              SaslRpcServer.SASL_PROPS, callback);
        }
      });
  if (saslServer == null) {
    throw new AccessControlException(
        "Unable to find SASL server implementation for " + mechanism);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Created SASL server with mechanism = " + mechanism);
  }
  return saslServer;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:SaslRpcServer.java


示例17: create

import org.apache.hadoop.ipc.Server.Connection; //导入依赖的package包/类
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslServer create(Connection connection,
                         SecretManager<TokenIdentifier> secretManager
    ) throws IOException, InterruptedException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  final CallbackHandler callback;
  switch (authMethod) {
    case TOKEN: {
      callback = new SaslDigestCallbackHandler(secretManager, connection);
      break;
    }
    case KERBEROS: {
      if (serverId.isEmpty()) {
        throw new AccessControlException(
            "Kerberos principal name does NOT have the expected "
                + "hostname part: " + ugi.getUserName());
      }
      callback = new SaslGssCallbackHandler();
      break;
    }
    default:
      // we should never be able to get here
      throw new AccessControlException(
          "Server does not support SASL " + authMethod);
  }
  
  SaslServer saslServer = ugi.doAs(
      new PrivilegedExceptionAction<SaslServer>() {
        @Override
        public SaslServer run() throws SaslException  {
          return Sasl.createSaslServer(mechanism, protocol, serverId,
              SaslRpcServer.SASL_PROPS, callback);
        }
      });
  if (saslServer == null) {
    throw new AccessControlException(
        "Unable to find SASL server implementation for " + mechanism);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Created SASL server with mechanism = " + mechanism);
  }
  return saslServer;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:45,代码来源:SaslRpcServer.java



注:本文中的org.apache.hadoop.ipc.Server.Connection类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java RootDeniedException类代码示例发布时间:2022-05-22
下一篇:
Java DataPacketReceiveEvent类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap