本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff类的典型用法代码示例。如果您正苦于以下问题:Java ChildrenDiff类的具体用法?Java ChildrenDiff怎么用?Java ChildrenDiff使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ChildrenDiff类属于org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature包,在下文中一共展示了ChildrenDiff类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: generateReport
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* Generate a {@link SnapshotDiffReport} based on detailed diff information.
* @return A {@link SnapshotDiffReport} describing the difference
*/
public SnapshotDiffReport generateReport() {
List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
for (INode node : diffMap.keySet()) {
diffReportList.add(new DiffReportEntry(DiffType.MODIFY, diffMap
.get(node)));
if (node.isDirectory()) {
ChildrenDiff dirDiff = dirDiffMap.get(node);
List<DiffReportEntry> subList = dirDiff.generateReport(
diffMap.get(node), isFromEarlier());
diffReportList.addAll(subList);
}
}
return new SnapshotDiffReport(snapshotRoot.getFullPathName(),
Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to),
diffReportList);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:21,代码来源:INodeDirectorySnapshottable.java
示例2: testRenameUndo_1
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test
public void testRenameUndo_1() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1
// should be empty
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestRenameWithSnapshots.java
示例3: testRenameUndo_2
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test
public void testRenameUndo_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
// create foo after taking snapshot
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain
// 1 element
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
assertFalse(hdfs.exists(foo_s1));
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestRenameWithSnapshots.java
示例4: testRenameDirAndDeleteSnapshot_3
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
*
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// delete foo2
hdfs.delete(foo2, true);
// delete s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3, q1.getNameSpace());
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.getNameSpace());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestRenameWithSnapshots.java
示例5: testRenameDirAndDeleteSnapshot_4
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
*
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// rename foo2 again
hdfs.rename(foo2, foo);
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(7, q1.getNameSpace());
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.getNameSpace());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
final INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
final INode fooRef2 = fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2 =
(WithCount) fooRef2.asReference().getReferredINode();
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
restartClusterAndCheckImage(true);
}
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestRenameWithSnapshots.java
示例6: testRenameDirAndDeleteSnapshot_3
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
*
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// delete foo2
hdfs.delete(foo2, true);
// delete s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3, q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:70,代码来源:TestRenameWithSnapshots.java
示例7: testRenameDirAndDeleteSnapshot_4
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
*
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// rename foo2 again
hdfs.rename(foo2, foo);
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(7, q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
final INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
final INode fooRef2 = fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2 =
(WithCount) fooRef2.asReference().getReferredINode();
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
restartClusterAndCheckImage(true);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:80,代码来源:TestRenameWithSnapshots.java
示例8: testRenameDirAndDeleteSnapshot_3
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
*
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// delete foo2
hdfs.delete(foo2, true);
// delete s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4, q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:70,代码来源:TestRenameWithSnapshots.java
示例9: testRenameDirAndDeleteSnapshot_4
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
*
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// rename foo2 again
hdfs.rename(foo2, foo);
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9, q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
final INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
final INode fooRef2 = fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2 =
(WithCount) fooRef2.asReference().getReferredINode();
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
restartClusterAndCheckImage(true);
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:80,代码来源:TestRenameWithSnapshots.java
示例10: addDirDiff
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/** Add a dir-diff pair */
private void addDirDiff(INodeDirectory dir, byte[][] relativePath,
ChildrenDiff diff) {
dirDiffMap.put(dir, diff);
diffMap.put(dir, relativePath);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:7,代码来源:INodeDirectorySnapshottable.java
示例11: testRenameUndo_1
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test
public void testRenameUndo_1() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1
// should be empty
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:68,代码来源:TestRenameWithSnapshots.java
示例12: testRenameUndo_2
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test
public void testRenameUndo_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
// create foo after taking snapshot
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain
// 1 element
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
assertFalse(hdfs.exists(foo_s1));
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:67,代码来源:TestRenameWithSnapshots.java
示例13: testRenameDirAndDeleteSnapshot_3
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
*
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// delete foo2
hdfs.delete(foo2, true);
// delete s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectorySnapshottable dir1Node =
(INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4, q1.get(Quota.NAMESPACE));
final INodeDirectorySnapshottable dir2Node =
(INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:70,代码来源:TestRenameWithSnapshots.java
示例14: testRenameDirAndDeleteSnapshot_4
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; //导入依赖的package包/类
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
*
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// rename foo2 again
hdfs.rename(foo2, foo);
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectorySnapshottable dir1Node =
(INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9, q1.get(Quota.NAMESPACE));
final INodeDirectorySnapshottable dir2Node =
(INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
final INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
final INode fooRef2 = fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2 =
(WithCount) fooRef2.asReference().getReferredINode();
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
restartClusterAndCheckImage(true);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:80,代码来源:TestRenameWithSnapshots.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论