本文整理汇总了Java中org.apache.hadoop.hbase.util.ExceptionUtil类的典型用法代码示例。如果您正苦于以下问题:Java ExceptionUtil类的具体用法?Java ExceptionUtil怎么用?Java ExceptionUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ExceptionUtil类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ExceptionUtil类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: callWithoutRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Call the server once only.
* {@link RetryingCallable} has a strange shape so we can do retrys. Use this invocation if you
* want to do a single call only (A call to {@link RetryingCallable#call(int)} will not likely
* succeed).
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
*/
public T callWithoutRetries(RetryingCallable<T> callable, int callTimeout)
throws IOException, RuntimeException {
// The code of this method should be shared with withRetries.
this.globalStartTime = EnvironmentEdgeManager.currentTime();
try {
callable.prepare(false);
return callable.call(callTimeout);
} catch (Throwable t) {
Throwable t2 = translateException(t);
ExceptionUtil.rethrowIfInterrupt(t2);
// It would be nice to clear the location cache here.
if (t2 instanceof IOException) {
throw (IOException)t2;
} else {
throw new RuntimeException(t2);
}
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:RpcRetryingCaller.java
示例2: handleConnectionFailure
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Handle connection failures
*
* If the current number of retries is equal to the max number of retries,
* stop retrying and throw the exception; Otherwise backoff N seconds and
* try connecting again.
*
* This Method is only called from inside setupIOstreams(), which is
* synchronized. Hence the sleep is synchronized; the locks will be retained.
*
* @param curRetries current number of retries
* @param maxRetries max number of retries allowed
* @param ioe failure reason
* @throws IOException if max number of retries is reached
*/
private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe)
throws IOException {
closeConnection();
// throw the exception if the maximum number of retries is reached
if (curRetries >= maxRetries || ExceptionUtil.isInterrupt(ioe)) {
throw ioe;
}
// otherwise back off and retry
try {
Thread.sleep(failureSleep);
} catch (InterruptedException ie) {
ExceptionUtil.rethrowIfInterrupt(ie);
}
LOG.info("Retrying connect to server: " + remoteId.getAddress() +
" after sleeping " + failureSleep + "ms. Already tried " + curRetries +
" time(s).");
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:RpcClientImpl.java
示例3: callWithoutRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Call the server once only.
* {@link RetryingCallable} has a strange shape so we can do retrys. Use this invocation if you
* want to do a single call only (A call to {@link RetryingCallable#call(int)} will not likely
* succeed).
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
*/
public T callWithoutRetries(RetryingCallable<T> callable, int callTimeout)
throws IOException, RuntimeException {
// The code of this method should be shared with withRetries.
this.globalStartTime = EnvironmentEdgeManager.currentTime();
try {
callable.prepare(false);//call 的准备工作
return callable.call(callTimeout);//具体的call调用
} catch (Throwable t) {
Throwable t2 = translateException(t);
ExceptionUtil.rethrowIfInterrupt(t2);
// It would be nice to clear the location cache here.
if (t2 instanceof IOException) {
throw (IOException)t2;
} else {
throw new RuntimeException(t2);
}
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:28,代码来源:RpcRetryingCaller.java
示例4: callWithoutRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Call the server once only.
* {@link RetryingCallable} has a strange shape so we can do retrys. Use this invocation if you
* want to do a single call only (A call to {@link RetryingCallable#call()} will not likely
* succeed).
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
*/
public T callWithoutRetries(RetryingCallable<T> callable)
throws IOException, RuntimeException {
// The code of this method should be shared with withRetries.
this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
try {
beforeCall();
callable.prepare(false);
return callable.call();
} catch (Throwable t) {
Throwable t2 = translateException(t);
ExceptionUtil.rethrowIfInterrupt(t2);
// It would be nice to clear the location cache here.
if (t2 instanceof IOException) {
throw (IOException)t2;
} else {
throw new RuntimeException(t2);
}
} finally {
afterCall();
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:31,代码来源:RpcRetryingCaller.java
示例5: callWithoutRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
@Override
public T callWithoutRetries(RetryingCallable<T> callable, int callTimeout)
throws IOException, RuntimeException {
// The code of this method should be shared with withRetries.
try {
callable.prepare(false);
return callable.call(callTimeout);
} catch (Throwable t) {
Throwable t2 = translateException(t);
ExceptionUtil.rethrowIfInterrupt(t2);
// It would be nice to clear the location cache here.
if (t2 instanceof IOException) {
throw (IOException)t2;
} else {
throw new RuntimeException(t2);
}
}
}
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:RpcRetryingCallerImpl.java
示例6: handleConnectionFailure
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Handle connection failures If the current number of retries is equal to the max number of
* retries, stop retrying and throw the exception; Otherwise backoff N seconds and try connecting
* again. This Method is only called from inside setupIOstreams(), which is synchronized. Hence
* the sleep is synchronized; the locks will be retained.
* @param curRetries current number of retries
* @param maxRetries max number of retries allowed
* @param ioe failure reason
* @throws IOException if max number of retries is reached
*/
private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe)
throws IOException {
closeSocket();
// throw the exception if the maximum number of retries is reached
if (curRetries >= maxRetries || ExceptionUtil.isInterrupt(ioe)) {
throw ioe;
}
// otherwise back off and retry
try {
Thread.sleep(this.rpcClient.failureSleep);
} catch (InterruptedException ie) {
ExceptionUtil.rethrowIfInterrupt(ie);
}
if (LOG.isInfoEnabled()) {
LOG.info("Retrying connect to server: " + remoteId.getAddress() +
" after sleeping " + this.rpcClient.failureSleep + "ms. Already tried " + curRetries +
" time(s).");
}
}
开发者ID:apache,项目名称:hbase,代码行数:33,代码来源:BlockingRpcConnection.java
示例7: callWithoutRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Call the server once only.
* {@link RetryingCallable} has a strange shape so we can do retrys. Use this invocation if you
* want to do a single call only (A call to {@link RetryingCallable#call(int)} will not likely
* succeed).
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
*/
public T callWithoutRetries(RetryingCallable<T> callable, int callTimeout)
throws IOException, RuntimeException {
// The code of this method should be shared with withRetries.
this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
try {
callable.prepare(false);
return callable.call(callTimeout);
} catch (Throwable t) {
Throwable t2 = translateException(t);
ExceptionUtil.rethrowIfInterrupt(t2);
// It would be nice to clear the location cache here.
if (t2 instanceof IOException) {
throw (IOException)t2;
} else {
throw new RuntimeException(t2);
}
}
}
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:28,代码来源:RpcRetryingCaller.java
示例8: run
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
@Override
public void run() {
try {
LOG.info("SplitLogWorker " + server.getServerName() + " starting");
coordination.registerListener();
// wait for Coordination Engine is ready
boolean res = false;
while (!res && !coordination.isStop()) {
res = coordination.isReady();
}
if (!coordination.isStop()) {
coordination.taskLoop();
}
} catch (Throwable t) {
if (ExceptionUtil.isInterrupt(t)) {
LOG.info("SplitLogWorker interrupted. Exiting. " + (coordination.isStop() ? "" :
" (ERROR: exitWorker is not set, exiting anyway)"));
} else {
// only a logical error can cause here. Printing it out
// to make debugging easier
LOG.error("unexpected error ", t);
}
} finally {
coordination.removeListener();
LOG.info("SplitLogWorker " + server.getServerName() + " exiting");
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:SplitLogWorker.java
示例9: getRemoteException
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Return the IOException thrown by the remote server wrapped in
* ServiceException as cause.
*
* @param se ServiceException that wraps IO exception thrown by the server
* @return Exception wrapped in ServiceException or
* a new IOException that wraps the unexpected ServiceException.
*/
public static IOException getRemoteException(ServiceException se) {
Throwable e = se.getCause();
if (e == null) {
return new IOException(se);
}
if (ExceptionUtil.isInterrupt(e)) {
return ExceptionUtil.asInterrupt(e);
}
if (e instanceof RemoteException) {
e = ((RemoteException) e).unwrapRemoteException();
}
return e instanceof IOException ? (IOException) e : new IOException(se);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:ProtobufUtil.java
示例10: makeStubNoRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Create a stub. Try once only. It is not typed because there is no common type to
* protobuf services nor their interfaces. Let the caller do appropriate casting.
* @return A stub for master services.
* @throws IOException
* @throws KeeperException
* @throws ServiceException
*/
private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
ZooKeeperKeepAliveConnection zkw;
try {
zkw = getKeepAliveZooKeeperWatcher();
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
}
try {
checkIfBaseNodeAvailable(zkw);
ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
if (sn == null) {
String msg = "ZooKeeper available but no active master location found";
LOG.info(msg);
throw new MasterNotRunningException(msg);
}
if (isDeadServer(sn)) {
throw new MasterNotRunningException(sn + " is dead.");
}
// Use the security info interface name as our stub key
String key = getStubKey(getServiceName(),
sn.getHostname(), sn.getPort(), hostnamesCanChange);
connectionLock.putIfAbsent(key, key);
Object stub = null;
synchronized (connectionLock.get(key)) {
stub = stubs.get(key);
if (stub == null) {
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
stub = makeStub(channel);
isMasterRunning();
stubs.put(key, stub);
}
}
return stub;
} finally {
zkw.close();
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:ConnectionManager.java
示例11: setFailed
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Set failed
*
* @param exception to set
*/
public void setFailed(IOException exception) {
if (ExceptionUtil.isInterrupt(exception)) {
exception = ExceptionUtil.asInterrupt(exception);
}
if (exception instanceof RemoteException) {
exception = ((RemoteException) exception).unwrapRemoteException();
}
this.setFailure(exception);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:AsyncCall.java
示例12: makeStubNoRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Create a stub. Try once only. It is not typed because there is no common type to
* protobuf services nor their interfaces. Let the caller do appropriate casting.
*
* @return A stub for master services.
* @throws IOException
* @throws KeeperException
* @throws ServiceException
*/
private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
ZooKeeperKeepAliveConnection zkw;
try {
zkw = getKeepAliveZooKeeperWatcher();
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
}
try {
checkIfBaseNodeAvailable(zkw);
ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
if (sn == null) {
String msg = "ZooKeeper available but no active master location found";
LOG.info(msg);
throw new MasterNotRunningException(msg);
}
if (isDeadServer(sn)) {
throw new MasterNotRunningException(sn + " is dead.");
}
// Use the security info interface name as our stub key
String key = getStubKey(getServiceName(), sn.getHostAndPort());
connectionLock.putIfAbsent(key, key);
Object stub = null;
synchronized (connectionLock.get(key)) {
stub = stubs.get(key);
if (stub == null) {
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
stub = makeStub(channel);
isMasterRunning();
stubs.put(key, stub);
}
}
return stub;
} finally {
zkw.close();
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:47,代码来源:ConnectionManager.java
示例13: makeStubNoRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Create a stub. Try once only. It is not typed because there is no common type to
* protobuf services nor their interfaces. Let the caller do appropriate casting.
* @return A stub for master services.
* @throws IOException
* @throws KeeperException
* @throws ServiceException
*/
private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
ZooKeeperKeepAliveConnection zkw;
try {
zkw = getKeepAliveZooKeeperWatcher();
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
}
try {
checkIfBaseNodeAvailable(zkw);
ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
if (sn == null) {
String msg = "ZooKeeper available but no active master location found";
LOG.info(msg);
throw new MasterNotRunningException(msg);
}
if (isDeadServer(sn)) {
throw new MasterNotRunningException(sn + " is dead.");
}
// Use the security info interface name as our stub key
String key = getStubKey(getServiceName(), sn.getHostAndPort());
connectionLock.putIfAbsent(key, key);
Object stub = null;
synchronized (connectionLock.get(key)) {
stub = stubs.get(key);
if (stub == null) {
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
user, rpcTimeout);
stub = makeStub(channel);
isMasterRunning();
stubs.put(key, stub);
}
}
return stub;
} finally {
zkw.close();
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:47,代码来源:HConnectionManager.java
示例14: getServiceException
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Return the Exception thrown by the remote server wrapped in
* ServiceException as cause. RemoteException are left untouched.
*
* @param e ServiceException that wraps IO exception thrown by the server
* @return Exception wrapped in ServiceException.
*/
public static IOException getServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {
Throwable t = e.getCause();
if (ExceptionUtil.isInterrupt(t)) {
return ExceptionUtil.asInterrupt(t);
}
return t instanceof IOException ? (IOException) t : new HBaseIOException(t);
}
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:ProtobufUtil.java
示例15: makeIOExceptionOfException
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
private static IOException makeIOExceptionOfException(Exception e) {
Throwable t = e;
if (e instanceof ServiceException ||
e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) {
t = e.getCause();
}
if (ExceptionUtil.isInterrupt(t)) {
return ExceptionUtil.asInterrupt(t);
}
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
return t instanceof IOException? (IOException)t: new HBaseIOException(t);
}
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:ProtobufUtil.java
示例16: makeIOExceptionOfException
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
private static IOException makeIOExceptionOfException(Exception e) {
Throwable t = e;
if (e instanceof ServiceException) {
t = e.getCause();
}
if (ExceptionUtil.isInterrupt(t)) {
return ExceptionUtil.asInterrupt(t);
}
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
return t instanceof IOException? (IOException)t: new HBaseIOException(t);
}
开发者ID:apache,项目名称:hbase,代码行数:14,代码来源:ProtobufUtil.java
示例17: makeStubNoRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Create a stub. Try once only. It is not typed because there is no common type to
* protobuf services nor their interfaces. Let the caller do appropriate casting.
* @return A stub for master services.
* @throws IOException
* @throws KeeperException
* @throws ServiceException
*/
private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
ZooKeeperKeepAliveConnection zkw;
try {
zkw = getKeepAliveZooKeeperWatcher();
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
}
try {
checkIfBaseNodeAvailable(zkw);
ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
if (sn == null) {
String msg = "ZooKeeper available but no active master location found";
LOG.info(msg);
throw new MasterNotRunningException(msg);
}
if (isDeadServer(sn)) {
throw new MasterNotRunningException(sn + " is dead.");
}
// Use the security info interface name as our stub key
String key = getStubKey(getServiceName(), sn.getHostAndPort());
connectionLock.putIfAbsent(key, key);
Object stub = null;
synchronized (connectionLock.get(key)) {
stub = stubs.get(key);
if (stub == null) {
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
stub = makeStub(channel);
isMasterRunning();
stubs.put(key, stub);
}
}
return stub;
} finally {
zkw.close();
}
}
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:46,代码来源:ConnectionManager.java
示例18: nextScanner
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
@Override
protected boolean nextScanner(int nbRows, final boolean done)
throws IOException {
// Close the previous scanner if it's open
if (this.callable != null) {
this.callable.setClose();
// callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas,
// we do a callWithRetries
this.caller.callWithoutRetries(callable, scannerTimeout);
this.callable = null;
}
// Where to start the next scanner
byte[] localStartKey;
boolean locateTheClosestFrontRow = true;
// if we're at start of table, close and return false to stop iterating
if (this.currentRegion != null) {
byte[] startKey = this.currentRegion.getStartKey();
if (startKey == null
|| Bytes.equals(startKey, HConstants.EMPTY_BYTE_ARRAY)
|| checkScanStopRow(startKey) || done) {
close();
if (LOG.isDebugEnabled()) {
LOG.debug("Finished " + this.currentRegion);
}
return false;
}
localStartKey = startKey;
if (LOG.isDebugEnabled()) {
LOG.debug("Finished " + this.currentRegion);
}
} else {
localStartKey = this.scan.getStartRow();
if (!Bytes.equals(localStartKey, HConstants.EMPTY_BYTE_ARRAY)) {
locateTheClosestFrontRow = false;
}
}
if (LOG.isDebugEnabled() && this.currentRegion != null) {
// Only worth logging if NOT first region in scan.
LOG.debug("Advancing internal scanner to startKey at '"
+ Bytes.toStringBinary(localStartKey) + "'");
}
try {
// In reversed scan, we want to locate the previous region through current
// region's start key. In order to get that previous region, first we
// create a closest row before the start key of current region, then
// locate all the regions from the created closest row to start key of
// current region, thus the last one of located regions should be the
// previous region of current region. The related logic of locating
// regions is implemented in ReversedScannerCallable
byte[] locateStartRow = locateTheClosestFrontRow ? createClosestRowBefore(localStartKey)
: null;
callable = getScannerCallable(localStartKey, nbRows, locateStartRow);
// Open a scanner on the region server starting at the
// beginning of the region
// callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas,
// we do a callWithRetries
this.caller.callWithoutRetries(callable, scannerTimeout);
this.currentRegion = callable.getHRegionInfo();
if (this.scanMetrics != null) {
this.scanMetrics.countOfRegions.incrementAndGet();
}
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
close();
throw e;
}
return true;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:71,代码来源:ReversedClientScanner.java
示例19: nextScanner
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
@Override
protected boolean nextScanner(int nbRows, final boolean done)
throws IOException {
// Close the previous scanner if it's open
if (this.callable != null) {
this.callable.setClose();
this.caller.callWithRetries(callable);
this.callable = null;
}
// Where to start the next scanner
byte[] localStartKey;
boolean locateTheClosestFrontRow = true;
// if we're at start of table, close and return false to stop iterating
if (this.currentRegion != null) {
byte[] startKey = this.currentRegion.getStartKey();
if (startKey == null
|| Bytes.equals(startKey, HConstants.EMPTY_BYTE_ARRAY)
|| checkScanStopRow(startKey) || done) {
close();
if (LOG.isDebugEnabled()) {
LOG.debug("Finished " + this.currentRegion);
}
return false;
}
localStartKey = startKey;
if (LOG.isDebugEnabled()) {
LOG.debug("Finished " + this.currentRegion);
}
} else {
localStartKey = this.scan.getStartRow();
if (!Bytes.equals(localStartKey, HConstants.EMPTY_BYTE_ARRAY)) {
locateTheClosestFrontRow = false;
}
}
if (LOG.isDebugEnabled() && this.currentRegion != null) {
// Only worth logging if NOT first region in scan.
LOG.debug("Advancing internal scanner to startKey at '"
+ Bytes.toStringBinary(localStartKey) + "'");
}
try {
// In reversed scan, we want to locate the previous region through current
// region's start key. In order to get that previous region, first we
// create a closest row before the start key of current region, then
// locate all the regions from the created closest row to start key of
// current region, thus the last one of located regions should be the
// previous region of current region. The related logic of locating
// regions is implemented in ReversedScannerCallable
byte[] locateStartRow = locateTheClosestFrontRow ? createClosestRowBefore(localStartKey)
: null;
callable = getScannerCallable(localStartKey, nbRows, locateStartRow);
// Open a scanner on the region server starting at the
// beginning of the region
this.caller.callWithRetries(callable);
this.currentRegion = callable.getHRegionInfo();
if (this.scanMetrics != null) {
this.scanMetrics.countOfRegions.incrementAndGet();
}
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
close();
throw e;
}
return true;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:67,代码来源:ReversedClientScanner.java
示例20: callWithRetries
import org.apache.hadoop.hbase.util.ExceptionUtil; //导入依赖的package包/类
/**
* Retries if invocation fails.
* @param callTimeout Timeout for this call
* @param callable The {@link RetryingCallable} to run.
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings
(value = "SWL_SLEEP_WITH_LOCK_HELD", justification = "na")
public synchronized T callWithRetries(RetryingCallable<T> callable, int callTimeout)
throws IOException, RuntimeException {
this.callTimeout = callTimeout;
List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions =
new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
for (int tries = 0;; tries++) {
long expectedSleep = 0;
try {
beforeCall();
callable.prepare(tries != 0); // if called with false, check table status on ZK
return callable.call();
} catch (Throwable t) {
if (LOG.isTraceEnabled()) {
LOG.trace("Call exception, tries=" + tries + ", retries=" + retries + ", retryTime=" +
(EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + "ms", t);
}
// translateException throws exception when should not retry: i.e. when request is bad.
t = translateException(t);
callable.throwable(t, retries != 1);
RetriesExhaustedException.ThrowableWithExtraContext qt =
new RetriesExhaustedException.ThrowableWithExtraContext(t,
EnvironmentEdgeManager.currentTimeMillis(), toString());
exceptions.add(qt);
ExceptionUtil.rethrowIfInterrupt(t);
if (tries >= retries - 1) {
throw new RetriesExhaustedException(tries, exceptions);
}
// If the server is dead, we need to wait a little before retrying, to give
// a chance to the regions to be
// tries hasn't been bumped up yet so we use "tries + 1" to get right pause time
expectedSleep = callable.sleep(pause, tries + 1);
// If, after the planned sleep, there won't be enough time left, we stop now.
long duration = singleCallDuration(expectedSleep);
if (duration > this.callTimeout) {
String msg = "callTimeout=" + this.callTimeout + ", callDuration=" + duration +
": " + callable.getExceptionMessageAdditionalDetail();
throw (SocketTimeoutException)(new SocketTimeoutException(msg).initCause(t));
}
} finally {
afterCall();
}
try {
Thread.sleep(expectedSleep);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted after " + tries + " tries on " + retries);
}
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:61,代码来源:RpcRetryingCaller.java
注:本文中的org.apache.hadoop.hbase.util.ExceptionUtil类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论