本文整理汇总了Java中org.apache.hadoop.hbase.ClusterId类的典型用法代码示例。如果您正苦于以下问题:Java ClusterId类的具体用法?Java ClusterId怎么用?Java ClusterId使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ClusterId类属于org.apache.hadoop.hbase包,在下文中一共展示了ClusterId类的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
String fakeRs1 = ZKUtil.joinZNode(zkw.rsZNode, "hostname1.example.org:1234");
try {
ZKClusterId.setClusterId(zkw, new ClusterId());
rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw);
rp.init();
rt = ReplicationFactory.getReplicationTracker(zkw, rp, conf, zkw, new DummyServer(fakeRs1));
} catch (Exception e) {
fail("Exception during test setup: " + e);
}
rsRemovedCount = new AtomicInteger(0);
rsRemovedData = "";
plChangedCount = new AtomicInteger(0);
plChangedData = new ArrayList<String>();
peerRemovedCount = new AtomicInteger(0);
peerRemovedData = "";
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestReplicationTrackerZKImpl.java
示例2: setUp
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
String fakeRs1 = ZNodePaths.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234");
try {
ZKClusterId.setClusterId(zkw, new ClusterId());
rp = ReplicationFactory.getReplicationPeers(zkw, conf);
rp.init();
rt = ReplicationFactory.getReplicationTracker(zkw, new DummyServer(fakeRs1),
new DummyServer(fakeRs1));
} catch (Exception e) {
fail("Exception during test setup: " + e);
}
rsRemovedCount = new AtomicInteger(0);
rsRemovedData = "";
}
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:TestReplicationTrackerZKImpl.java
示例3: rewriteAsPb
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* @param cid
* @throws IOException
*/
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
final ClusterId cid)
throws IOException {
// Rewrite the file as pb. Move aside the old one first, write new
// then delete the moved-aside file.
Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
setClusterId(fs, rootdir, cid, 100);
if (!fs.delete(movedAsideName, false)) {
throw new IOException("Failed delete of " + movedAsideName);
}
LOG.debug("Rewrote the hbase.id file as pb");
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:FSUtils.java
示例4: initPeerClusterState
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
private static String initPeerClusterState(String baseZKNode)
throws IOException, KeeperException {
// Add a dummy region server and set up the cluster id
Configuration testConf = new Configuration(conf);
testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
ZooKeeperWatcher zkw1 = new ZooKeeperWatcher(testConf, "test1", null);
String fakeRs = ZKUtil.joinZNode(zkw1.rsZNode, "hostname1.example.org:1234");
ZKUtil.createWithParents(zkw1, fakeRs);
ZKClusterId.setClusterId(zkw1, new ClusterId());
return ZKConfig.getZooKeeperClusterKey(testConf);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestReplicationStateZKImpl.java
示例5: initPeerClusterState
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
private static String initPeerClusterState(String baseZKNode)
throws IOException, KeeperException {
// Add a dummy region server and set up the cluster id
Configuration testConf = new Configuration(conf);
testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
ZooKeeperWatcher zkw1 = new ZooKeeperWatcher(testConf, "test1", null);
String fakeRs = ZKUtil.joinZNode(zkw1.rsZNode, "hostname1.example.org:1234");
ZKUtil.createWithParents(zkw1, fakeRs);
ZKClusterId.setClusterId(zkw1, new ClusterId());
return ZKUtil.getZooKeeperClusterKey(testConf);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:12,代码来源:TestReplicationStateZKImpl.java
示例6: getClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
public String getClusterId() {
if (fileSystemManager == null) {
return "";
}
ClusterId id = fileSystemManager.getClusterId();
if (id == null) {
return "";
}
return id.toString();
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:11,代码来源:HMaster.java
示例7: setClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Writes a new unique identifier for this cluster to the "hbase.id" file
* in the HBase root directory
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @param clusterId the unique identifier to store
* @param wait how long (in milliseconds) to wait between retries
* @throws IOException if writing to the FileSystem fails and no wait value
*/
public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
int wait) throws IOException {
while (true) {
try {
Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = fs.create(filePath);
try {
s.write(clusterId.toByteArray());
} finally {
s.close();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
}
return;
} catch (IOException ioe) {
if (wait > 0) {
LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
try {
Thread.sleep(wait);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
} else {
throw ioe;
}
}
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:41,代码来源:FSUtils.java
示例8: readClusterIdZNode
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
public static String readClusterIdZNode(ZooKeeperWatcher watcher)
throws KeeperException {
if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) {
byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode);
if (data != null) {
try {
return ClusterId.parseFrom(data).toString();
} catch (DeserializationException e) {
throw ZKUtil.convert(e);
}
}
}
return null;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:ZKClusterId.java
示例9: initPeerClusterState
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
private static String initPeerClusterState(String baseZKNode)
throws IOException, KeeperException {
// Add a dummy region server and set up the cluster id
Configuration testConf = new Configuration(conf);
testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null);
String fakeRs = ZNodePaths.joinZNode(zkw1.znodePaths.rsZNode, "hostname1.example.org:1234");
ZKUtil.createWithParents(zkw1, fakeRs);
ZKClusterId.setClusterId(zkw1, new ClusterId());
return ZKConfig.getZooKeeperClusterKey(testConf);
}
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:TestReplicationStateZKImpl.java
示例10: getClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
private static String getClusterId(byte[] data) throws DeserializationException {
if (data == null || data.length == 0) {
return null;
}
data = removeMetaData(data);
return ClusterId.parseFrom(data).toString();
}
开发者ID:apache,项目名称:hbase,代码行数:8,代码来源:ZKAsyncRegistry.java
示例11: setClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Writes a new unique identifier for this cluster to the "hbase.id" file
* in the HBase root directory
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @param clusterId the unique identifier to store
* @param wait how long (in milliseconds) to wait between retries
* @throws IOException if writing to the FileSystem fails and no wait value
*/
public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
int wait) throws IOException {
while (true) {
try {
Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = fs.create(filePath);
try {
s.write(clusterId.toByteArray());
} finally {
s.close();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
}
return;
} catch (IOException ioe) {
if (wait > 0) {
LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
try {
Thread.sleep(wait);
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
} else {
throw ioe;
}
}
}
}
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:40,代码来源:FSUtils.java
示例12: setClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Writes a new unique identifier for this cluster to the "hbase.id" file
* in the HBase root directory
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @param clusterId the unique identifier to store
* @param wait how long (in milliseconds) to wait between retries
* @throws IOException if writing to the FileSystem fails and no wait value
*/
public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
int wait) throws IOException {
while (true) {
try {
Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = fs.create(filePath);
try {
s.write(clusterId.toByteArray());
} finally {
s.close();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
}
return;
} catch (IOException ioe) {
if (wait > 0) {
LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
try {
Thread.sleep(wait);
} catch (InterruptedException ie) {
Thread.interrupted();
break;
}
} else {
throw ioe;
}
}
}
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:41,代码来源:FSUtils.java
示例13: getClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Returns the value of the unique cluster ID stored for this HBase instance.
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @return the unique cluster identifier
* @throws IOException if reading the cluster ID file fails
*/
public static ClusterId getClusterId(FileSystem fs, Path rootdir)
throws IOException {
Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
ClusterId clusterId = null;
FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
if (status != null) {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
FSDataInputStream in = fs.open(idPath);
try {
in.readFully(content);
} catch (EOFException eof) {
LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
} finally{
in.close();
}
try {
clusterId = ClusterId.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toString(content), e);
}
// If not pb'd, make it so.
if (!ProtobufUtil.isPBMagicPrefix(content)) rewriteAsPb(fs, rootdir, idPath, clusterId);
return clusterId;
} else {
LOG.warn("Cluster ID file does not exist at " + idPath.toString());
}
return clusterId;
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:37,代码来源:FSUtils.java
示例14: rewriteAsPb
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* @param cid
* @throws IOException
*/
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
final ClusterId cid)
throws IOException {
// Rewrite the file as pb. Move aside the old one first, write new
// then delete the moved-aside file.
Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
setClusterId(fs, rootdir, cid, 100);
if (!fs.delete(movedAsideName, false)) {
throw new IOException("Failed delete of " + movedAsideName);
}
LOG.debug("Rewrote the hbase.id file as pb");
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:18,代码来源:FSUtils.java
示例15: getClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* @return The unique identifier generated for this cluster
*/
public ClusterId getClusterId() {
return clusterId;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:MasterFileSystem.java
示例16: setClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Writes a new unique identifier for this cluster to the "hbase.id" file
* in the HBase root directory
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @param clusterId the unique identifier to store
* @param wait how long (in milliseconds) to wait between retries
* @throws IOException if writing to the FileSystem fails and no wait value
*/
public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
int wait) throws IOException {
while (true) {
try {
Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
// Write the id file to a temporary location
FSDataOutputStream s = fs.create(tempIdFile);
try {
s.write(clusterId.toByteArray());
s.close();
s = null;
// Move the temporary file to its normal location. Throw an IOE if
// the rename failed
if (!fs.rename(tempIdFile, idFile)) {
throw new IOException("Unable to move temp version file to " + idFile);
}
} finally {
// Attempt to close the stream if still open on the way out
try {
if (s != null) s.close();
} catch (IOException ignore) { }
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
}
return;
} catch (IOException ioe) {
if (wait > 0) {
LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
try {
Thread.sleep(wait);
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
} else {
throw ioe;
}
}
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:FSUtils.java
示例17: setClusterId
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
public static void setClusterId(ZooKeeperWatcher watcher, ClusterId id)
throws KeeperException {
ZKUtil.createSetData(watcher, watcher.clusterIdZNode, id.toByteArray());
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:ZKClusterId.java
示例18: checkRootDir
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
/**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @param rd
* @param c
* @param fs
* @return hbase.rootdir (after checks for existence and bootstrapping if
* needed populating the directory with necessary bootup files).
* @throws IOException
*/
@SuppressWarnings("deprecation")
private Path checkRootDir(final Path rd, final Configuration c,
final FileSystem fs)
throws IOException {
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
// Filesystem is good. Go ahead and check for hbase.rootdir.
try {
if (!fs.exists(rd)) {
fs.mkdirs(rd);
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
} else {
if (!fs.isDirectory(rd)) {
throw new IllegalArgumentException(rd.toString() + " is not a directory");
}
// as above
FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
} catch (DeserializationException de) {
LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
IOException ioe = new IOException();
ioe.initCause(de);
throw ioe;
} catch (IllegalArgumentException iae) {
LOG.fatal("Please fix invalid configuration for "
+ HConstants.HBASE_DIR + " " + rd.toString(), iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
}
clusterId = FSUtils.getClusterId(fs, rd);
// Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c);
} else {
// Migrate table descriptor files if necessary
org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
.migrateFSTableDescriptorsIfNecessary(fs, rd);
}
// Create tableinfo-s for hbase:meta if not already there.
// meta table is a system table, so descriptors are predefined,
// we should get them from registry.
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
fsd.createTableDescriptor(
new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
return rd;
}
开发者ID:grokcoder,项目名称:pbase,代码行数:75,代码来源:MasterFileSystem.java
示例19: setUpBeforeClass
import org.apache.hadoop.hbase.ClusterId; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = HBaseConfiguration.create();
conf.set("replication.replicationsource.implementation",
ReplicationSourceDummy.class.getCanonicalName());
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY,
HConstants.REPLICATION_ENABLE_DEFAULT);
conf.setLong("replication.sleep.before.failover", 2000);
conf.setInt("replication.source.maxretriesmultiplier", 10);
utility = new HBaseTestingUtility(conf);
utility.startMiniZKCluster();
zkw = new ZooKeeperWatcher(conf, "test", null);
ZKUtil.createWithParents(zkw, "/hbase/replication");
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1");
ZKUtil.setData(zkw, "/hbase/replication/peers/1",
Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
+ conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state",
ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
ZKClusterId.setClusterId(zkw, new ClusterId());
FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir());
fs = FileSystem.get(conf);
oldLogDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME);
replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
manager = replication.getReplicationManager();
manager.addSource(slaveId);
htd = new HTableDescriptor(test);
HColumnDescriptor col = new HColumnDescriptor("f1");
col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
htd.addFamily(col);
col = new HColumnDescriptor("f2");
col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
htd.addFamily(col);
hri = new HRegionInfo(htd.getTableName(), r1, r2);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:48,代码来源:TestReplicationSourceManager.java
注:本文中的org.apache.hadoop.hbase.ClusterId类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论