Java源码示例:org.apache.cassandra.db.commitlog.CommitLog

示例1
public void applyModels() throws IOException
{
    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(cfm.tableName);
    ksm = makeNewKeyspaceDefinition(ksm);
    try
    {
        CFMetaData.map(cfm);
    }
    catch (ConfigurationException ex)
    {
        throw new IOException(ex);
    }
    Table.open(cfm.tableName); // make sure it's init-ed w/ the old definitions first, since we're going to call initCf on the new one manually
    DatabaseDescriptor.setTableDefinition(ksm, newVersion);
    if (!clientMode)
        Table.open(ksm.name).initCf(cfm.cfId, cfm.cfName);
 
    if (!clientMode)
        CommitLog.instance().forceNewSegment();
}
 
示例2
public void applyModels() throws IOException
{
    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(cfm.tableName);
    ksm = makeNewKeyspaceDefinition(ksm);
    try
    {
        CFMetaData.map(cfm);
    }
    catch (ConfigurationException ex)
    {
        throw new IOException(ex);
    }
    Table.open(cfm.tableName); // make sure it's init-ed w/ the old definitions first, since we're going to call initCf on the new one manually
    DatabaseDescriptor.setTableDefinition(ksm, newVersion);
   // these definitions could have come from somewhere else.
   CFMetaData.fixMaxId();
    if (!clientMode)
        Table.open(ksm.name).initCf(cfm.cfId, cfm.cfName);
 
    if (!clientMode)
        CommitLog.instance().forceNewSegment();
}
 
示例3
public void applyModels() throws IOException
{
    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(cfm.tableName);
    ksm = makeNewKeyspaceDefinition(ksm);
    try
    {
        CFMetaData.map(cfm);
    }
    catch (ConfigurationException ex)
    {
        throw new IOException(ex);
    }
    Table.open(cfm.tableName); 
    DatabaseDescriptor.setTableDefinition(ksm, newVersion);
    if (!clientMode)
        Table.open(ksm.name).initCf(cfm.cfId, cfm.cfName);
 
    if (!clientMode)
        CommitLog.instance().forceNewSegment();
}
 
示例4
public void applyModels() throws IOException
{
    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(cfm.tableName);
    ksm = makeNewKeyspaceDefinition(ksm);
    try
    {
        CFMetaData.map(cfm);
    }
    catch (ConfigurationException ex)
    {
        throw new IOException(ex);
    }
    Table.open(cfm.tableName); 
    DatabaseDescriptor.setTableDefinition(ksm, newVersion);
   CFMetaData.fixMaxId();
    if (!clientMode)
        Table.open(ksm.name).initCf(cfm.cfId, cfm.cfName);
 
    if (!clientMode)
        CommitLog.instance().forceNewSegment();
}
 
示例5
public void applyModels() throws IOException
{
    KSMetaData ksm = DatabaseDescriptor.getTableDefinition(cfm.tableName);
    ksm = makeNewKeyspaceDefinition(ksm);
    try
    {
        CFMetaData.map(cfm);
    }
    catch (ConfigurationException ex)
    {
        throw new IOException(ex);
    }
    Table.open(cfm.tableName); 
    DatabaseDescriptor.setTableDefinition(ksm, newVersion);
    CFMetaData.fixMaxId();
    if (!clientMode)
        Table.open(ksm.name).initCf(cfm.cfId, cfm.cfName);
 
    if (!clientMode)
        CommitLog.instance().forceNewSegment();
}
 
示例6
@Test
public void testExceedRecordLimit() throws Exception
{
    CommitLog.instance.resetUnsafe();
    try
    {
        Mutation rm = new Mutation("Keyspace1", bytes("k"));
        rm.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate(1 + getMaxRecordDataSize()), 0);
        CommitLog.instance.add(rm);
        throw new AssertionError("mutation larger than limit was accepted");
    }
    catch (IllegalArgumentException e)
    {
        // IAE is thrown on too-large mutations
    }
}
 
示例7
@Test
public void testCommitFailurePolicy_stop() throws ConfigurationException
{
    // Need storage service active so stop policy can shutdown gossip
    StorageService.instance.initServer();
    Assert.assertTrue(Gossiper.instance.isEnabled());

    Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
    try
    {
        DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.stop);
        CommitLog.handleCommitError("Test stop error", new Throwable());
        Assert.assertFalse(Gossiper.instance.isEnabled());
    }
    finally
    {
        DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
    }
}
 
示例8
@Test
public void testCommitFailurePolicy_die()
{
    KillerForTests killerForTests = new KillerForTests();
    JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests);
    Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
    try
    {
        DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
        CommitLog.handleCommitError("Testing die policy", new Throwable());
        Assert.assertTrue(killerForTests.wasKilled());
    }
    finally
    {
        DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
        JVMStabilityInspector.replaceKiller(originalKiller);
    }
}
 
示例9
@Test
public void testTruncate() throws IOException
{
	Keyspace keyspace = Keyspace.open("Keyspace1");
	ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");

	Mutation rm;
	ColumnFamily cf;

	// add a single cell
       cf = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
	cf.addColumn(column("col1", "val1", 1L));
       rm = new Mutation("Keyspace1", ByteBufferUtil.bytes("keymulti"), cf);
	rm.apply();

	// Make sure data was written
	assertNotNull(getFromTable(keyspace, "Standard1", "keymulti", "col1"));

	// and now truncate it
	cfs.truncateBlocking();
       CommitLog.instance.resetUnsafe();
	CommitLog.instance.recover();

	// and validate truncation.
	assertNull(getFromTable(keyspace, "Standard1", "keymulti", "col1"));
}
 
示例10
public static void prepareServer()
{
    // Cleanup first
    cleanupAndLeaveDirs();

    CommitLog.instance.allocator.enableReserveSegmentCreation();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler()
    {
        public void uncaughtException(Thread t, Throwable e)
        {
            logger.error("Fatal exception in thread " + t, e);
        }
    });

    Keyspace.setInitialized();
}
 
示例11
private static void cleanupAndLeaveDirs() throws IOException {
    mkdirs();
    cleanup();
    mkdirs();
    CommitLog.instance.resetUnsafe(); // cleanup screws w/ CommitLog, this
    // brings it back to safe state
}
 
示例12
/**
 * This method appends a row to the global CommitLog, then updates memtables and indexes.
 *
 * @param mutation       the row to write.  Must not be modified after calling apply, since commitlog append
 *                       may happen concurrently, depending on the CL Executor type.
 * @param writeCommitLog false to disable commitlog append entirely
 * @param updateIndexes  false to disable index updates (used by CollationController "defragmenting")
 */
public void apply(Mutation mutation, boolean writeCommitLog, boolean updateIndexes)
{
    try (OpOrder.Group opGroup = writeOrder.start())
    {
        // write the mutation to the commitlog and memtables
        ReplayPosition replayPosition = null;
        if (writeCommitLog)
        {
            Tracing.trace("Appending to commitlog");
            replayPosition = CommitLog.instance.add(mutation);
        }

        DecoratedKey key = StorageService.getPartitioner().decorateKey(mutation.key());
        for (ColumnFamily cf : mutation.getColumnFamilies())
        {
            ColumnFamilyStore cfs = columnFamilyStores.get(cf.id());
            if (cfs == null)
            {
                logger.error("Attempting to mutate non-existant column family {}", cf.id());
                continue;
            }

            Tracing.trace("Adding to {} memtable", cf.metadata().cfName);
            SecondaryIndexManager.Updater updater = updateIndexes
                                                  ? cfs.indexManager.updaterFor(key, cf, opGroup)
                                                  : SecondaryIndexManager.nullUpdater;
            cfs.apply(key, cf, updater, opGroup, replayPosition);
        }
    }
}
 
示例13
@Test
public void testOne() throws IOException
{
    CommitLog.instance.resetUnsafe();
    Keyspace keyspace1 = Keyspace.open("Keyspace1");
    Keyspace keyspace2 = Keyspace.open("Keyspace2");

    Mutation rm;
    DecoratedKey dk = Util.dk("keymulti");
    ColumnFamily cf;

    cf = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
    cf.addColumn(column("col1", "val1", 1L));
    rm = new Mutation("Keyspace1", dk.getKey(), cf);
    rm.apply();

    cf = ArrayBackedSortedColumns.factory.create("Keyspace2", "Standard3");
    cf.addColumn(column("col2", "val2", 1L));
    rm = new Mutation("Keyspace2", dk.getKey(), cf);
    rm.apply();

    keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
    keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();

    CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
    CommitLog.instance.recover();

    assertColumns(Util.getColumnFamily(keyspace1, dk, "Standard1"), "col1");
    assertColumns(Util.getColumnFamily(keyspace2, dk, "Standard3"), "col2");
}
 
示例14
@Test
public void testMissingHeader() throws IOException
{
    Keyspace keyspace1 = Keyspace.open("Keyspace1");
    Keyspace keyspace2 = Keyspace.open("Keyspace2");

    Mutation rm;
    DecoratedKey dk = Util.dk("keymulti");
    ColumnFamily cf;

    cf = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
    cf.addColumn(column("col1", "val1", 1L));
    rm = new Mutation("Keyspace1", dk.getKey(), cf);
    rm.apply();

    cf = ArrayBackedSortedColumns.factory.create("Keyspace2", "Standard3");
    cf.addColumn(column("col2", "val2", 1L));
    rm = new Mutation("Keyspace2", dk.getKey(), cf);
    rm.apply();

    keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
    keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();

    // nuke the header
    for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
    {
        if (file.getName().endsWith(".header"))
            FileUtils.deleteWithConfirm(file);
    }

    CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
    CommitLog.instance.recover();

    assertColumns(Util.getColumnFamily(keyspace1, dk, "Standard1"), "col1");
    assertColumns(Util.getColumnFamily(keyspace2, dk, "Standard3"), "col2");
}
 
示例15
@Test
public void testEqualRecordLimit() throws Exception
{
    CommitLog.instance.resetUnsafe();

    Mutation rm = new Mutation("Keyspace1", bytes("k"));
    rm.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate(getMaxRecordDataSize()), 0);
    CommitLog.instance.add(rm);
}
 
示例16
protected void testRecovery(byte[] logData) throws Exception
{
    File logFile = tmpFile();
    try (OutputStream lout = new FileOutputStream(logFile))
    {
        lout.write(logData);
        //statics make it annoying to test things correctly
        CommitLog.instance.recover(new File[]{ logFile }); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
    }
}
 
示例17
@Test
public void testTruncateWithoutSnapshot()  throws ExecutionException, InterruptedException
{
    CommitLog.instance.resetUnsafe();
    boolean prev = DatabaseDescriptor.isAutoSnapshot();
    DatabaseDescriptor.setAutoSnapshot(false);
    ColumnFamilyStore cfs1 = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    ColumnFamilyStore cfs2 = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard2");

    final Mutation rm1 = new Mutation("Keyspace1", bytes("k"));
    rm1.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate(100), 0);
    rm1.apply();
    cfs1.truncateBlocking();
    DatabaseDescriptor.setAutoSnapshot(prev);
    final Mutation rm2 = new Mutation("Keyspace1", bytes("k"));
    rm2.add("Standard2", Util.cellname("c1"), ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize() / 4), 0);

    for (int i = 0 ; i < 5 ; i++)
        CommitLog.instance.add(rm2);

    Assert.assertEquals(2, CommitLog.instance.activeSegments());
    ReplayPosition position = CommitLog.instance.getContext();
    for (Keyspace ks : Keyspace.system())
        for (ColumnFamilyStore syscfs : ks.getColumnFamilyStores())
            CommitLog.instance.discardCompletedSegments(syscfs.metadata.cfId, position);
    CommitLog.instance.discardCompletedSegments(cfs2.metadata.cfId, position);
    Assert.assertEquals(1, CommitLog.instance.activeSegments());
}
 
示例18
@Test
public void testTruncateWithoutSnapshotNonDurable()  throws ExecutionException, InterruptedException
{
    CommitLog.instance.resetUnsafe();
    boolean prevAutoSnapshot = DatabaseDescriptor.isAutoSnapshot();
    DatabaseDescriptor.setAutoSnapshot(false);
    Keyspace notDurableKs = Keyspace.open("NoCommitlogSpace");
    Assert.assertFalse(notDurableKs.metadata.durableWrites);
    ColumnFamilyStore cfs = notDurableKs.getColumnFamilyStore("Standard1");
    CellNameType type = notDurableKs.getColumnFamilyStore("Standard1").getComparator();
    Mutation rm;
    DecoratedKey dk = Util.dk("key1");

    // add data
    rm = new Mutation("NoCommitlogSpace", dk.getKey());
    rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("abcd"), 0);
    rm.apply();

    ReadCommand command = new SliceByNamesReadCommand("NoCommitlogSpace", dk.getKey(), "Standard1", System.currentTimeMillis(), new NamesQueryFilter(FBUtilities.singleton(Util.cellname("Column1"), type)));
    Row row = command.getRow(notDurableKs);
    Cell col = row.cf.getColumn(Util.cellname("Column1"));
    Assert.assertEquals(col.value(), ByteBuffer.wrap("abcd".getBytes()));
    cfs.truncateBlocking();
    DatabaseDescriptor.setAutoSnapshot(prevAutoSnapshot);
    row = command.getRow(notDurableKs);
    Assert.assertEquals(null, row.cf);
}
 
示例19
public static void cleanupAndLeaveDirs()
{
    mkdirs();
    cleanup();
    mkdirs();
    CommitLog.instance.resetUnsafe(); // cleanup screws w/ CommitLog, this brings it back to safe state
}
 
示例20
private static void cleanupAndLeaveDirs() throws IOException {
    mkdirs();
    cleanup();
    mkdirs();
    CommitLog.instance.resetUnsafe(true); // cleanup screws w/ CommitLog, this
    // brings it back to safe state
}
 
示例21
@Test
public void testProcessCommitLogs() throws Exception {
    int commitLogRowSize = 10;
    context.getCassandraClient().execute("CREATE TABLE IF NOT EXISTS " + keyspaceTable("cdc_table") + " (a int, b int, PRIMARY KEY(a)) WITH cdc = true;");
    context.getSchemaHolder().refreshSchemas();

    // programmatically add insertion and deletion events into commit log, this is because running an 'INSERT' or 'DELETE'
    // cql against the embedded Cassandra does not modify the commit log file on disk.
    CFMetaData cfMetaData = Schema.instance.getCFMetaData(TEST_KEYSPACE, "cdc_table");
    for (int i = 0; i < commitLogRowSize; i++) {
        SimpleBuilders.PartitionUpdateBuilder puBuilder = new SimpleBuilders.PartitionUpdateBuilder(cfMetaData, i);
        Row row = puBuilder.row().add("b", i).build();
        PartitionUpdate pu = PartitionUpdate.singleRowUpdate(cfMetaData, puBuilder.build().partitionKey(), row);
        Mutation m = new Mutation(pu);
        CommitLog.instance.add(m);
    }
    CommitLog.instance.sync(true);

    // check to make sure there are no records in the queue to begin with
    ChangeEventQueue<Event> queue = context.getQueue();
    assertEquals(queue.totalCapacity(), queue.remainingCapacity());

    // process the logs in commit log directory
    File cdcLoc = new File(DatabaseDescriptor.getCommitLogLocation());
    File[] commitLogs = CommitLogUtil.getCommitLogs(cdcLoc);
    for (File commitLog : commitLogs) {
        commitLogProcessor.processCommitLog(commitLog);
    }

    // verify the commit log has been processed and records have been enqueued
    List<Event> events = queue.poll();
    int eofEventSize = commitLogs.length;
    assertEquals(commitLogRowSize + eofEventSize, events.size());
    for (int i = 0; i < events.size(); i++) {
        Event event = events.get(i);
        if (event instanceof Record) {
            Record record = (Record) events.get(i);
            assertEquals(record.getEventType(), Event.EventType.CHANGE_EVENT);
            assertEquals(record.getSource().cluster, DatabaseDescriptor.getClusterName());
            assertFalse(record.getSource().snapshot);
            assertEquals(record.getSource().keyspaceTable.name(), keyspaceTable("cdc_table"));
            assertTrue(record.getSource().offsetPosition.fileName.contains(String.valueOf(CommitLog.instance.getCurrentPosition().segmentId)));
        }
        else if (event instanceof EOFEvent) {
            EOFEvent eofEvent = (EOFEvent) event;
            assertTrue(eofEvent.success);
        }
        else {
            throw new Exception("unexpected event type");
        }
    }

    deleteTestKeyspaceTables();
}
 
示例22
public void run()
{
    writeBarrier.await();

    /**
     * we can flush 2is as soon as the barrier completes, as they will be consistent with (or ahead of) the
     * flushed memtables and CL position, which is as good as we can guarantee.
     * TODO: SecondaryIndex should support setBarrier(), so custom implementations can co-ordinate exactly
     * with CL as we do with memtables/CFS-backed SecondaryIndexes.
     */

    if (flushSecondaryIndexes)
    {
        for (SecondaryIndex index : indexManager.getIndexesNotBackedByCfs())
        {
            // flush any non-cfs backed indexes
            logger.info("Flushing SecondaryIndex {}", index);
            index.forceBlockingFlush();
        }
    }

    try
    {
        // we wait on the latch for the lastReplayPosition to be set, and so that waiters
        // on this task can rely on all prior flushes being complete
        latch.await();
    }
    catch (InterruptedException e)
    {
        throw new IllegalStateException();
    }

    // must check lastReplayPosition != null because Flush may find that all memtables are clean
    // and so not set a lastReplayPosition
    if (lastReplayPosition != null)
    {
        CommitLog.instance.discardCompletedSegments(metadata.cfId, lastReplayPosition);
    }

    metric.pendingFlushes.dec();
}
 
示例23
private Flush(boolean truncate)
{
    // if true, we won't flush, we'll just wait for any outstanding writes, switch the memtable, and discard
    this.truncate = truncate;

    metric.pendingFlushes.inc();
    /**
     * To ensure correctness of switch without blocking writes, run() needs to wait for all write operations
     * started prior to the switch to complete. We do this by creating a Barrier on the writeOrdering
     * that all write operations register themselves with, and assigning this barrier to the memtables,
     * after which we *.issue()* the barrier. This barrier is used to direct write operations started prior
     * to the barrier.issue() into the memtable we have switched out, and any started after to its replacement.
     * In doing so it also tells the write operations to update the lastReplayPosition of the memtable, so
     * that we know the CL position we are dirty to, which can be marked clean when we complete.
     */
    writeBarrier = keyspace.writeOrder.newBarrier();
    memtables = new ArrayList<>();

    // submit flushes for the memtable for any indexed sub-cfses, and our own
    AtomicReference<ReplayPosition> lastReplayPositionHolder = new AtomicReference<>();
    for (ColumnFamilyStore cfs : concatWithIndexes())
    {
        // switch all memtables, regardless of their dirty status, setting the barrier
        // so that we can reach a coordinated decision about cleanliness once they
        // are no longer possible to be modified
        Memtable mt = cfs.data.switchMemtable(truncate);
        mt.setDiscarding(writeBarrier, lastReplayPositionHolder);
        memtables.add(mt);
    }

    // we now attempt to define the lastReplayPosition; we do this by grabbing the current limit from the CL
    // and attempting to set the holder to this value. at the same time all writes to the memtables are
    // also maintaining this value, so if somebody sneaks ahead of us somehow (should be rare) we simply retry,
    // so that we know all operations prior to the position have not reached it yet
    ReplayPosition lastReplayPosition;
    while (true)
    {
        lastReplayPosition = new Memtable.LastReplayPosition(CommitLog.instance.getContext());
        ReplayPosition currentLast = lastReplayPositionHolder.get();
        if ((currentLast == null || currentLast.compareTo(lastReplayPosition) <= 0)
            && lastReplayPositionHolder.compareAndSet(currentLast, lastReplayPosition))
            break;
    }

    // we then issue the barrier; this lets us wait for all operations started prior to the barrier to complete;
    // since this happens after wiring up the lastReplayPosition, we also know all operations with earlier
    // replay positions have also completed, i.e. the memtables are done and ready to flush
    writeBarrier.issue();
    postFlush = new PostFlush(!truncate, writeBarrier, lastReplayPosition);
}
 
示例24
private static void dropKeyspace(String ksName)
{
    KSMetaData ksm = Schema.instance.getKSMetaData(ksName);
    String snapshotName = Keyspace.getTimestampedSnapshotName(ksName);

    CompactionManager.instance.interruptCompactionFor(ksm.cfMetaData().values(), true);

    Keyspace keyspace = Keyspace.open(ksm.name);

    // remove all cfs from the keyspace instance.
    List<UUID> droppedCfs = new ArrayList<>();
    for (CFMetaData cfm : ksm.cfMetaData().values())
    {
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfm.cfName);

        Schema.instance.purge(cfm);

        if (!StorageService.instance.isClientMode())
        {
            if (DatabaseDescriptor.isAutoSnapshot())
                cfs.snapshot(snapshotName);
            Keyspace.open(ksm.name).dropCf(cfm.cfId);
        }

        droppedCfs.add(cfm.cfId);
    }

    // remove the keyspace from the static instances.
    Keyspace.clear(ksm.name);
    Schema.instance.clearKeyspaceDefinition(ksm);

    keyspace.writeOrder.awaitNewBarrier();

    // force a new segment in the CL
    CommitLog.instance.forceRecycleAllSegments(droppedCfs);

    if (!StorageService.instance.isClientMode())
    {
        MigrationManager.instance.notifyDropKeyspace(ksm);
    }
}
 
示例25
@Test
public void testNothingToRecover() throws IOException
{
    CommitLog.instance.resetUnsafe();
    CommitLog.instance.recover();
}
 
示例26
@Test
public void testRecoveryWithEmptyLog() throws Exception
{
    CommitLog.instance.recover(new File[]{ tmpFile() });
}