本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData类的典型用法代码示例。如果您正苦于以下问题:Java PersistedRecoveryPaxosData类的具体用法?Java PersistedRecoveryPaxosData怎么用?Java PersistedRecoveryPaxosData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PersistedRecoveryPaxosData类属于org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos包,在下文中一共展示了PersistedRecoveryPaxosData类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: completeHalfDoneAcceptRecovery
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* In the case the node crashes in between downloading a log segment
* and persisting the associated paxos recovery data, the log segment
* will be left in its temporary location on disk. Given the paxos data,
* we can check if this was indeed the case, and "roll forward"
* the atomic operation.
*
* See the inline comments in
* {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more
* details.
*
* @throws IOException if the temporary file is unable to be renamed into
* place
*/
private void completeHalfDoneAcceptRecovery(
PersistedRecoveryPaxosData paxosData) throws IOException {
if (paxosData == null) {
return;
}
long segmentId = paxosData.getSegmentState().getStartTxId();
long epoch = paxosData.getAcceptedInEpoch();
File tmp = storage.getSyncLogTemporaryFile(segmentId, epoch);
if (tmp.exists()) {
File dst = storage.getInProgressEditLog(segmentId);
LOG.info("Rolling forward previously half-completed synchronization: " +
tmp + " -> " + dst);
FileUtil.replaceFile(tmp, dst);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:Journal.java
示例2: getPersistedPaxosData
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* Retrieve the persisted data for recovering the given segment from disk.
*/
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId)
throws IOException {
File f = storage.getPaxosFile(segmentTxId);
if (!f.exists()) {
// Default instance has no fields filled in (they're optional)
return null;
}
InputStream in = new FileInputStream(f);
try {
PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
Preconditions.checkState(ret != null &&
ret.getSegmentState().getStartTxId() == segmentTxId,
"Bad persisted data for segment %s: %s",
segmentTxId, ret);
return ret;
} finally {
IOUtils.closeStream(in);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:Journal.java
示例3: completeHalfDoneAcceptRecovery
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* In the case the node crashes in between downloading a log segment
* and persisting the associated paxos recovery data, the log segment
* will be left in its temporary location on disk. Given the paxos data,
* we can check if this was indeed the case, and "roll forward"
* the atomic operation.
*
* See the inline comments in
* {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more
* details.
*
* @throws IOException if the temporary file is unable to be renamed into
* place
*/
private void completeHalfDoneAcceptRecovery(
PersistedRecoveryPaxosData paxosData) throws IOException {
if (paxosData == null) {
return;
}
long segmentId = paxosData.getSegmentState().getStartTxId();
long epoch = paxosData.getAcceptedInEpoch();
File tmp = journalStorage.getSyncLogTemporaryFile(segmentId, epoch);
if (tmp.exists()) {
File dst = journalStorage.getInProgressEditLog(segmentId);
LOG.info("Rolling forward previously half-completed synchronization: " +
tmp + " -> " + dst);
FileUtil.replaceFile(tmp, dst);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:33,代码来源:Journal.java
示例4: getPersistedPaxosData
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* Retrieve the persisted data for recovering the given segment from disk.
*/
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId)
throws IOException {
File f = journalStorage.getPaxosFile(segmentTxId);
if (!f.exists()) {
// Default instance has no fields filled in (they're optional)
return null;
}
InputStream in = new FileInputStream(f);
try {
PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
Preconditions.checkState(ret != null &&
ret.getSegmentState().getStartTxId() == segmentTxId,
"Bad persisted data for segment %s: %s",
segmentTxId, ret);
return ret;
} finally {
IOUtils.closeStream(in);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:Journal.java
示例5: persistPaxosData
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* Persist data for recovering the given segment from disk.
*/
private void persistPaxosData(long segmentTxId,
PersistedRecoveryPaxosData newData) throws IOException {
File f = storage.getPaxosFile(segmentTxId);
boolean success = false;
AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
try {
newData.writeDelimitedTo(fos);
fos.write('\n');
// Write human-readable data after the protobuf. This is only
// to assist in debugging -- it's not parsed at all.
OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);
writer.write(String.valueOf(newData));
writer.write('\n');
writer.flush();
fos.flush();
success = true;
} finally {
if (success) {
IOUtils.closeStream(fos);
} else {
fos.abort();
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Journal.java
示例6: prepareRecovery
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* @see QJournalProtocol#prepareRecovery(RequestInfo, long)
*/
public synchronized PrepareRecoveryResponseProto prepareRecovery(
RequestInfo reqInfo, long segmentTxId) throws IOException {
checkJournalStorageFormatted();
checkRequest(reqInfo);
abortCurSegment();
PrepareRecoveryResponseProto ret = new PrepareRecoveryResponseProto();
PersistedRecoveryPaxosData previouslyAccepted = getPersistedPaxosData(segmentTxId);
completeHalfDoneAcceptRecovery(previouslyAccepted);
SegmentStateProto segInfo = getSegmentInfo(segmentTxId);
boolean hasFinalizedSegment = segInfo != null && !segInfo.getIsInProgress();
if (previouslyAccepted != null && !hasFinalizedSegment) {
ret.setAcceptedInEpoch(previouslyAccepted.getAcceptedInEpoch());
ret.setSegmentState(previouslyAccepted.getSegmentState());
} else {
if (segInfo != null) {
ret.setSegmentState(segInfo);
}
}
ret.setLastWriterEpoch(lastWriterEpoch.get());
if (committedTxnId.get() != HdfsConstants.INVALID_TXID) {
ret.setLastCommittedTxId(committedTxnId.get());
}
LOG.info("Prepared recovery for segment " + segmentTxId + ": " + ret);
return ret;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:Journal.java
示例7: persistPaxosData
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; //导入依赖的package包/类
/**
* Persist data for recovering the given segment from disk.
*/
private void persistPaxosData(long segmentTxId,
PersistedRecoveryPaxosData newData) throws IOException {
File f = journalStorage.getPaxosFile(segmentTxId);
boolean success = false;
AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
try {
newData.writeDelimitedTo(fos);
fos.write('\n');
// Write human-readable data after the protobuf. This is only
// to assist in debugging -- it's not parsed at all.
OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);
writer.write(String.valueOf(newData));
writer.write('\n');
writer.flush();
fos.flush();
success = true;
} finally {
if (success) {
IOUtils.closeStream(fos);
} else {
fos.abort();
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:Journal.java
注:本文中的org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论