本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress类的典型用法代码示例。如果您正苦于以下问题:Java StartupProgress类的具体用法?Java StartupProgress怎么用?Java StartupProgress使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StartupProgress类属于org.apache.hadoop.hdfs.server.namenode.startupprogress包,在下文中一共展示了StartupProgress类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: saveCurrentTokens
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Private helper methods to save delegation keys and tokens in fsimage
*/
private synchronized void saveCurrentTokens(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(currentTokens.size());
Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
.iterator();
while (iter.hasNext()) {
DelegationTokenIdentifier id = iter.next();
id.write(out);
DelegationTokenInformation info = currentTokens.get(id);
out.writeLong(info.getRenewDate());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DelegationTokenSecretManager.java
示例2: saveAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(allKeys.size());
Iterator<Integer> iter = allKeys.keySet().iterator();
while (iter.hasNext()) {
Integer key = iter.next();
allKeys.get(key).write(out);
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DelegationTokenSecretManager.java
示例3: loadCurrentTokens
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Private helper methods to load Delegation tokens from fsimage
*/
private synchronized void loadCurrentTokens(DataInput in)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfTokens = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfTokens; i++) {
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
addPersistedDelegationToken(id, expiryTime);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DelegationTokenSecretManager.java
示例4: loadAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Private helper method to load delegation keys from fsimage.
* @throws IOException on error
*/
private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfKeys = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey();
value.readFields(in);
addKey(value);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DelegationTokenSecretManager.java
示例5: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = monotonicNow();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
startOpt, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSEditLogLoader.java
示例6: incrementSafeBlockCount
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Increment number of safe blocks if current block has
* reached minimal replication.
* @param replication current replication
*/
private synchronized void incrementSafeBlockCount(short replication) {
if (replication == safeReplication) {
this.blockSafe++;
// Report startup progress only if we haven't completed startup yet.
StartupProgress prog = NameNode.getStartupProgress();
if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
if (this.awaitingReportedBlocksCounter == null) {
this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
STEP_AWAITING_REPORTED_BLOCKS);
}
this.awaitingReportedBlocksCounter.increment();
}
checkMode();
}
}
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:FSNamesystem.java
示例7: loadDirectives
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Load cache directives from the fsimage
*/
private void loadDirectives(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numDirectives = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numDirectives; i++) {
CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
// Get pool reference by looking it up in the map
final String poolName = info.getPool();
CacheDirective directive =
new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
info.getReplication(), info.getExpiration().getAbsoluteMillis());
addCacheDirective(poolName, directive);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CacheManager.java
示例8: setUp
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
startupProgress = new StartupProgress();
ServletContext context = mock(ServletContext.class);
when(context.getAttribute(NameNodeHttpServer.STARTUP_PROGRESS_ATTRIBUTE_KEY))
.thenReturn(startupProgress);
servlet = mock(StartupProgressServlet.class);
when(servlet.getServletContext()).thenReturn(context);
doCallRealMethod().when(servlet).doGet(any(HttpServletRequest.class),
any(HttpServletResponse.class));
req = mock(HttpServletRequest.class);
respOut = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(respOut);
resp = mock(HttpServletResponse.class);
when(resp.getWriter()).thenReturn(writer);
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestStartupProgressServlet.java
示例9: loadSecretManagerSection
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
private void loadSecretManagerSection(InputStream in, StartupProgress prog,
Step currentStep) throws IOException {
SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(in);
int numKeys = s.getNumKeys(), numTokens = s.getNumTokens();
ArrayList<SecretManagerSection.DelegationKey> keys = Lists
.newArrayListWithCapacity(numKeys);
ArrayList<SecretManagerSection.PersistToken> tokens = Lists
.newArrayListWithCapacity(numTokens);
for (int i = 0; i < numKeys; ++i)
keys.add(SecretManagerSection.DelegationKey.parseDelimitedFrom(in));
prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numTokens);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
for (int i = 0; i < numTokens; ++i) {
tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in));
counter.increment();
}
fsn.loadSecretManagerState(s, keys, tokens);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:FSImageFormatProtobuf.java
示例10: loadCacheManagerSection
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
private void loadCacheManagerSection(InputStream in, StartupProgress prog,
Step currentStep) throws IOException {
CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(in);
int numPools = s.getNumPools();
ArrayList<CachePoolInfoProto> pools = Lists
.newArrayListWithCapacity(numPools);
ArrayList<CacheDirectiveInfoProto> directives = Lists
.newArrayListWithCapacity(s.getNumDirectives());
prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numPools);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
for (int i = 0; i < numPools; ++i) {
pools.add(CachePoolInfoProto.parseDelimitedFrom(in));
counter.increment();
}
for (int i = 0; i < s.getNumDirectives(); ++i)
directives.add(CacheDirectiveInfoProto.parseDelimitedFrom(in));
fsn.getCacheManager().loadState(
new CacheManager.PersistState(s, pools, directives));
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatProtobuf.java
示例11: loadINodeSection
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
Step currentStep) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.dir.resetLastInodeId(s.getLastInodeId());
long numInodes = s.getNumInodes();
LOG.info("Loading " + numInodes + " INodes.");
prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
for (int i = 0; i < numInodes; ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getId() == INodeId.ROOT_INODE_ID) {
loadRootINode(p);
} else {
INode n = loadINode(p);
dir.addToInodeMap(n);
}
counter.increment();
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatPBINode.java
示例12: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = now();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
startOpt, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (now()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:FSEditLogLoader.java
示例13: testGenerateStartupProgress
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
@Test
public void testGenerateStartupProgress() throws Exception {
cluster.waitClusterUp();
NamenodeJspHelper.HealthJsp jsp = new NamenodeJspHelper.HealthJsp();
StartupProgress prog = NameNode.getStartupProgress();
JspWriter out = mock(JspWriter.class);
jsp.generateStartupProgress(out, prog);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
verify(out, atLeastOnce()).println(captor.capture());
List<String> contents = captor.getAllValues();
// Verify 100% overall completion and all phases mentioned in output.
Assert.assertTrue(containsMatch(contents, "Elapsed Time\\:"));
Assert.assertTrue(containsMatch(contents, "Percent Complete\\:.*?100\\.00%"));
Assert.assertTrue(containsMatch(contents, LOADING_FSIMAGE.getDescription()));
Assert.assertTrue(containsMatch(contents, LOADING_EDITS.getDescription()));
Assert.assertTrue(containsMatch(contents,
SAVING_CHECKPOINT.getDescription()));
Assert.assertTrue(containsMatch(contents, SAFEMODE.getDescription()));
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:21,代码来源:TestNameNodeJspHelper.java
示例14: loadAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Private helper method to load delegation keys from fsimage.
* @param in
* @throws IOException
*/
private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfKeys = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey();
value.readFields(in);
addKey(value);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:DelegationTokenSecretManager.java
示例15: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = now();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false,
expectedStartingTxId, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (now()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:FSEditLogLoader.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论