本文整理汇总了Java中org.apache.cassandra.db.compaction.CompactionController类的典型用法代码示例。如果您正苦于以下问题:Java CompactionController类的具体用法?Java CompactionController怎么用?Java CompactionController使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CompactionController类属于org.apache.cassandra.db.compaction包,在下文中一共展示了CompactionController类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testNumberOfFiles_abort
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
});
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:23,代码来源:SSTableRewriterTest.java
示例2: testNumberOfFiles_abort3
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort3() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (files == 1 && rewriter.currentWriter().getFilePointer() > 10000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
});
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:23,代码来源:SSTableRewriterTest.java
示例3: PurgeStatisticBackend
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
public PurgeStatisticBackend(ColumnFamilyStore cfs, Collection<SSTableReader> sstables, RateLimiter rateLimiter) {
bytesRead = 0;
readerQueue = new PriorityQueue<>(sstables.size());
for (SSTableReader sstable : sstables) {
length += sstable.uncompressedLength();
ScannerWrapper scanner = new ScannerWrapper(sstable.descriptor.generation, sstable.getScanner(rateLimiter));
if (scanner.next()) {
readerQueue.add(scanner);
}
}
this.controller = new CompactionController(cfs, null, cfs.gcBefore(Util.NOW));
}
开发者ID:instaclustr,项目名称:cassandra-sstable-tools,代码行数:13,代码来源:PurgeStatisticBackend.java
示例4: basicTest
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void basicTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
for (int j = 0; j < 100; j ++)
{
ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
Mutation rm = new Mutation(KEYSPACE, key);
rm.add(CF, Util.cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while(scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables , OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:37,代码来源:SSTableRewriterTest.java
示例5: basicTest2
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void basicTest2() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while (scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables, OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:32,代码来源:SSTableRewriterTest.java
示例6: testNumberOfFiles_abort2
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort2() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
if (files == 3)
{
//testing to abort when we have nothing written in the new file
rewriter.abort();
break;
}
}
}
});
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:28,代码来源:SSTableRewriterTest.java
示例7: basicTest
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void basicTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int j = 0; j < 100; j ++)
{
new RowUpdateBuilder(cfs.metadata, j, String.valueOf(j))
.clustering("0")
.add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
.build()
.apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while(ci.hasNext())
{
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list());
assertEquals(1, filecounts);
truncate(cfs);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:40,代码来源:SSTableRewriterTest.java
示例8: basicTest2
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void basicTest2() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list());
assertEquals(1, filecounts);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:32,代码来源:SSTableRewriterTest.java
示例9: testNumberOfFiles_abort
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner,
CompactionController controller,
SSTableReader sstable,
ColumnFamilyStore cfs,
SSTableRewriter rewriter,
LifecycleTransaction txn)
{
try (CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
int files = 1;
while (ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory, txn));
files++;
assertEquals(cfs.getLiveSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
}
});
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:31,代码来源:SSTableRewriterTest.java
示例10: testNumberOfFiles_abort2
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort2() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner,
CompactionController controller,
SSTableReader sstable,
ColumnFamilyStore cfs,
SSTableRewriter rewriter,
LifecycleTransaction txn)
{
try (CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
int files = 1;
while (ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory, txn));
files++;
assertEquals(cfs.getLiveSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
if (files == 3)
{
//testing to abort when we have nothing written in the new file
rewriter.abort();
break;
}
}
}
}
});
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:36,代码来源:SSTableRewriterTest.java
示例11: testNumberOfFiles_abort3
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort3() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner,
CompactionController controller,
SSTableReader sstable,
ColumnFamilyStore cfs,
SSTableRewriter rewriter,
LifecycleTransaction txn)
{
try(CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
int files = 1;
while (ci.hasNext())
{
rewriter.append(ci.next());
if (files == 1 && rewriter.currentWriter().getFilePointer() > 10000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory, txn));
files++;
assertEquals(cfs.getLiveSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
}
});
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:31,代码来源:SSTableRewriterTest.java
示例12: testNumberOfFiles_truncate
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_truncate() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while(ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
assertEquals(cfs.getLiveSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
sstables = rewriter.finish();
}
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:40,代码来源:SSTableRewriterTest.java
示例13: testCanonicalView
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testCanonicalView() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = Sets.newHashSet(s);
assertEquals(1, sstables.size());
boolean checked = false;
try (ISSTableScanner scanner = sstables.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, sstables, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())
)
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
writer.append(ci.next());
if (!checked && writer.currentWriter().getFilePointer() > 15000000)
{
checked = true;
ColumnFamilyStore.ViewFragment viewFragment = cfs.select(View.selectFunction(SSTableSet.CANONICAL));
// canonical view should have only one SSTable which is not opened early.
assertEquals(1, viewFragment.sstables.size());
SSTableReader sstable = viewFragment.sstables.get(0);
assertEquals(s.descriptor, sstable.descriptor);
assertTrue("Found early opened SSTable in canonical view: " + sstable.getFilename(), sstable.openReason != SSTableReader.OpenReason.EARLY);
}
}
}
truncateCF();
validateCFS(cfs);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:39,代码来源:SSTableRewriterTest.java
示例14: testTwoWriters
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
/**
* emulates anticompaction - writing from one source sstable to two new sstables
*
* @throws IOException
*/
@Test
public void testTwoWriters() throws IOException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = Sets.newHashSet(s);
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, false);
SSTableRewriter writer2 = new SSTableRewriter(txn, 1000, false, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())
)
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
writer2.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
if (writer.currentWriter().getFilePointer() < 15000000)
writer.append(ci.next());
else
writer2.append(ci.next());
}
for (int i = 0; i < 5000; i++)
assertFalse(Util.getOnlyPartition(Util.cmd(cfs, ByteBufferUtil.bytes(i)).build()).isEmpty());
}
truncateCF();
validateCFS(cfs);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:41,代码来源:SSTableRewriterTest.java
示例15: EchoedRow
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
public EchoedRow(CompactionController controller, SSTableIdentityIterator row)
{
super(row.getKey());
this.row = row;
this.gcBefore = controller.gcBefore;
// Reset SSTableIdentityIterator because we have not guarantee the filePointer hasn't moved since the Iterator was built
row.reset();
}
开发者ID:devdattakulkarni,项目名称:Cassandra-KVPM,代码行数:9,代码来源:EchoedRow.java
示例16: LazilyCompactedRow
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
public LazilyCompactedRow(CompactionController controller, List<? extends OnDiskAtomIterator> rows) {
super(controller, rows);
}
开发者ID:instaclustr,项目名称:cassandra-sstable-tools,代码行数:4,代码来源:PurgeStatisticBackend.java
示例17: getPositionsTest
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void getPositionsTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
boolean checked = false;
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while (scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
if (!checked && writer.currentWriter().getFilePointer() > 15000000)
{
checked = true;
for (SSTableReader sstable : cfs.getSSTables())
{
if (sstable.openReason == SSTableReader.OpenReason.EARLY)
{
SSTableReader c = sstables.iterator().next();
Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.partitioner.getMinimumToken(), cfs.partitioner.getMinimumToken()));
List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
assertEquals(1, tmplinkPositions.size());
assertEquals(1, compactingPositions.size());
assertEquals(0, tmplinkPositions.get(0).left.longValue());
// make sure we have no overlap between the early opened file and the compacting one:
assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
}
}
}
}
}
assertTrue(checked);
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables, OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
cfs.truncateBlocking();
Thread.sleep(1000); // make sure the deletion tasks have run etc
validateCFS(cfs);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:57,代码来源:SSTableRewriterTest.java
示例18: testNumberOfFilesAndSizes
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFilesAndSizes() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
long startStorageMetricsLoad = StorageMetrics.load.count();
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.count());
assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.count());
}
}
}
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
long sum = 0;
for (SSTableReader x : cfs.getSSTables())
sum += x.bytesOnDisk();
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.count());
assertEquals(startStorageMetricsLoad - s.bytesOnDisk() + sum, StorageMetrics.load.count());
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
Thread.sleep(1000);
// tmplink and tmp files should be gone:
assertEquals(sum, cfs.metric.totalDiskSpaceUsed.count());
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:49,代码来源:SSTableRewriterTest.java
示例19: testNumberOfFiles_dont_clean_readers
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_dont_clean_readers() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
List<SSTableReader> sstables = rewriter.finish();
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
assertEquals(1, cfs.getDataTracker().getView().shadowed.size());
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
assertEquals(files, cfs.getSSTables().size());
assertEquals(0, cfs.getDataTracker().getView().shadowed.size());
Thread.sleep(1000);
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:48,代码来源:SSTableRewriterTest.java
示例20: testNumberOfFiles_finish_empty_new_writer
import org.apache.cassandra.db.compaction.CompactionController; //导入依赖的package包/类
@Test
public void testNumberOfFiles_finish_empty_new_writer() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
if (files == 3)
{
//testing to finish when we have nothing written in the new file
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
break;
}
}
Thread.sleep(1000);
assertEquals(files - 1, cfs.getSSTables().size()); // we never wrote anything to the last file
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:49,代码来源:SSTableRewriterTest.java
注:本文中的org.apache.cassandra.db.compaction.CompactionController类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论