Java源码示例:org.elasticsearch.index.translog.Translog

示例1
public Location writeToLocal(BytesReference data) throws IOException {
    final long position;
    final long generation;
    try (ReleasableLock lock = writeLock.acquire()) {
        ensureOpen();
        if (writtenOffset > TRANSLOG_ROLLING_SIZE_BYTES) {
            IOUtils.close(writeChannel);
            tmpTranslogGeneration.incrementAndGet();
            writeChannel = FileChannel.open(this.translogPath.resolve(getFileNameFromId(tmpTranslogGeneration.get())), StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE);
            writtenOffset = 0;
        }
        generation = tmpTranslogGeneration.get();
        position = writtenOffset;
        try {
            data.writeTo(writeChannel);
        } catch (Throwable e) {
            throw e;
        }
        writtenOffset = writtenOffset + data.length();
    }
    return new Translog.Location(generation, position, data.length());
}
 
示例2
public Translog.Operation readFromLocal(Location location) {
    try (ReleasableLock lock = readLock.acquire()) {
        ensureOpen();
        long fileGeneration = location.generation;
        FileChannel readChannel = readChannels.get(fileGeneration);
        if (readChannel == null) {
            readChannel = openReader(fileGeneration);
        }
        if (readChannel == null) {
            // throw exception since could not find reader
            throw new ElasticsearchException("could not open reader for file generation {}", fileGeneration);
        }
        ByteBuffer buffer = ByteBuffer.allocate(location.size);
        Channels.readFromFileChannelWithEofException(readChannel, location.translogLocation, buffer);
        buffer.flip();
        ByteBufferStreamInput in = new ByteBufferStreamInput(buffer);
        Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte());
        Translog.Operation operation = DistributedTranslog.newOperationFromType(type);
        operation.readFrom(in);
        in.close();
        return operation;
    } catch (IOException e) {
        logger.error("errors while read from local", e);
        throw new ElasticsearchException("failed to read source from translog location " + location, e);
    }
}
 
示例3
/**
 * Perform phase2 of the recovery process
 * <p/>
 * Phase2 takes a snapshot of the current translog *without* acquiring the
 * write lock (however, the translog snapshot is a point-in-time view of
 * the translog). It then sends each translog operation to the target node
 * so it can be replayed into the new shard.
 */
public void phase2(Translog.Snapshot snapshot) {
    if (shard.state() == IndexShardState.CLOSED) {
        throw new IndexShardClosedException(request.shardId());
    }
    cancellableThreads.checkForCancel();

    StopWatch stopWatch = new StopWatch().start();

    logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode());
    // Send all the snapshot's translog operations to the target
    int totalOperations = sendSnapshot(snapshot);
    stopWatch.stop();
    logger.trace("{} recovery [phase2] to {}: took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime());
    response.phase2Time = stopWatch.totalTime().millis();
    response.phase2Operations = totalOperations;
}
 
示例4
protected void prepareTargetForTranslog(final Translog.View translogView) {
    StopWatch stopWatch = new StopWatch().start();
    logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode());
    final long startEngineStart = stopWatch.totalTime().millis();
    cancellableThreads.execute(new Interruptable() {
        @Override
        public void run() throws InterruptedException {
            // Send a request preparing the new shard's translog to receive
            // operations. This ensures the shard engine is started and disables
            // garbage collection (not the JVM's GC!) of tombstone deletes
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
                    new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), translogView.totalOperations()),
                    TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
        }
    });

    stopWatch.stop();

    response.startTime = stopWatch.totalTime().millis() - startEngineStart;
    logger.trace("{} recovery [phase1] to {}: remote engine start took [{}]",
            request.shardId(), request.targetNode(), stopWatch.totalTime());
}
 
示例5
/**
 * Perform phase2 of the recovery process
 * <p>
 * Phase2 takes a snapshot of the current translog *without* acquiring the
 * write lock (however, the translog snapshot is a point-in-time view of
 * the translog). It then sends each translog operation to the target node
 * so it can be replayed into the new shard.
 */
public void phase2(Translog.Snapshot snapshot) {
    if (shard.state() == IndexShardState.CLOSED) {
        throw new IndexShardClosedException(request.shardId());
    }
    cancellableThreads.checkForCancel();

    StopWatch stopWatch = new StopWatch().start();

    logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode());
    // Send all the snapshot's translog operations to the target
    int totalOperations = sendSnapshot(snapshot);
    stopWatch.stop();
    logger.trace("{} recovery [phase2] to {}: took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime());
    response.phase2Time = stopWatch.totalTime().millis();
    response.phase2Operations = totalOperations;
}
 
示例6
@Override
public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService,
                                            long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
    // TODO: Should we defer the refresh until we really need it?
    ensureOpen();
    refreshIfNeeded(source, toSeqNo);
    Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL);
    try {
        LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot(
            searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange);
        searcher = null;
        return snapshot;
    } catch (Exception e) {
        try {
            maybeFailEngine("acquire changes snapshot", e);
        } catch (Exception inner) {
            e.addSuppressed(inner);
        }
        throw e;
    } finally {
        IOUtils.close(searcher);
    }
}
 
示例7
private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) {
    if (random.nextBoolean()) {
        builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
                new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB));
    }
    if (random.nextBoolean()) {
        builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
                new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush
    }
    if (random.nextBoolean()) {
        builder.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(),
                RandomPicks.randomFrom(random, Translog.Durability.values()));
    }

    if (random.nextBoolean()) {
        builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(),
                RandomNumbers.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS);
    }

    return builder;
}
 
示例8
protected void recoverFromTranslog(EngineConfig engineConfig, Translog.TranslogGeneration translogGeneration) throws IOException {
    int opsRecovered = 0;
    final TranslogRecoveryPerformer handler = engineConfig.getTranslogRecoveryPerformer();
    try (Translog.Snapshot snapshot = translog.newSnapshot()) {
        opsRecovered = handler.recoveryFromSnapshot(this, snapshot);
    } catch (Throwable e) {
        throw new EngineException(shardId, "failed to recover from translog", e);
    }

    // flush if we recovered something or if we have references to older translogs
    // note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length.
    if (opsRecovered > 0) {
        logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
                opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog
                        .currentFileGeneration());
        flush(true, true);
    } else if (translog.isCurrent(translogGeneration) == false) {
        commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
    }
}
 
示例9
/**
 * Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current
 * translog id into lucene and returns null.
 */
@Nullable
private Translog.TranslogGeneration loadTranslogIdFromCommit(IndexWriter writer) throws IOException {
    // commit on a just opened writer will commit even if there are no changes done to it
    // we rely on that for the commit data translog id key
    final Map<String, String> commitUserData = writer.getCommitData();
    if (commitUserData.containsKey("translog_id")) {
        assert commitUserData.containsKey(Translog.TRANSLOG_UUID_KEY) == false : "legacy commit contains translog UUID";
        return new Translog.TranslogGeneration(null, Long.parseLong(commitUserData.get("translog_id")));
    } else if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY)) {
        if (commitUserData.containsKey(Translog.TRANSLOG_UUID_KEY) == false) {
            throw new IllegalStateException("commit doesn't contain translog UUID");
        }
        final String translogUUID = commitUserData.get(Translog.TRANSLOG_UUID_KEY);
        final long translogGen = Long.parseLong(commitUserData.get(Translog.TRANSLOG_GENERATION_KEY));
        return new Translog.TranslogGeneration(translogUUID, translogGen);
    }
    return null;
}
 
示例10
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
    try {
        Translog.TranslogGeneration translogGeneration = translog.getGeneration();
        logger.trace("committing writer with translog id [{}]  and sync id [{}] ", translogGeneration.translogFileGeneration, syncId);
        Map<String, String> commitData = new HashMap<>(2);
        commitData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGeneration.translogFileGeneration));
        commitData.put(Translog.TRANSLOG_UUID_KEY, translogGeneration.translogUUID);
        if (syncId != null) {
            commitData.put(Engine.SYNC_COMMIT_ID, syncId);
        }
        indexWriter.setCommitData(commitData);
        writer.commit();
    } catch (Throwable ex) {
        failEngine("lucene commit failed", ex);
        throw ex;
    }
}
 
示例11
public int recoveryFromSnapshot(Engine engine, Translog.Snapshot snapshot) throws IOException {
    Translog.Operation operation;
    int opsRecovered = 0;
    while ((operation = snapshot.next()) != null) {
        try {
            performRecoveryOperation(engine, operation, true);
            opsRecovered++;
        } catch (ElasticsearchException e) {
            if (e.status() == RestStatus.BAD_REQUEST) {
                // mainly for MapperParsingException and Failure to detect xcontent
                logger.info("ignoring recovery of a corrupt translog entry", e);
            } else {
                throw e;
            }
        }
    }
    return opsRecovered;
}
 
示例12
SnapshotSender(Logger logger, SyncAction syncAction, ResyncTask task, ShardId shardId, String primaryAllocationId, long primaryTerm,
               Translog.Snapshot snapshot, int chunkSizeInBytes, long startingSeqNo, long maxSeqNo,
               long maxSeenAutoIdTimestamp, ActionListener<Void> listener) {
    this.logger = logger;
    this.syncAction = syncAction;
    this.task = task;
    this.shardId = shardId;
    this.primaryAllocationId = primaryAllocationId;
    this.primaryTerm = primaryTerm;
    this.snapshot = snapshot;
    this.chunkSizeInBytes = chunkSizeInBytes;
    this.startingSeqNo = startingSeqNo;
    this.maxSeqNo = maxSeqNo;
    this.maxSeenAutoIdTimestamp = maxSeenAutoIdTimestamp;
    this.listener = listener;
    task.setTotalOperations(snapshot.totalOperations());
}
 
示例13
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
        if (request.routing() == null) {
            throw new RoutingMissingException(request.shardId().getIndex(), request.type(), request.id());
        }
    }

    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.shardSafe(request.shardId().id());
    indexShard.checkDiskSpace(fsService);
    final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard, mappingUpdatedAction);
    final IndexResponse response = result.response;
    final Translog.Location location = result.location;
    processAfterWrite(request.refresh(), indexShard, location);
    return new Tuple<>(response, request);
}
 
示例14
@Override
public void indexTranslogOperations(List<Translog.Operation> operations,
                                    int totalTranslogOps,
                                    long maxSeenAutoIdTimestampOnPrimary,
                                    long maxSeqNoOfDeletesOrUpdatesOnPrimary,
                                    ActionListener<Long> listener) {
    final RecoveryTranslogOperationsRequest request = new RecoveryTranslogOperationsRequest(
        recoveryId,
        shardId,
        operations,
        totalTranslogOps,
        maxSeenAutoIdTimestampOnPrimary,
        maxSeqNoOfDeletesOrUpdatesOnPrimary);
    transportService.submitRequest(
        targetNode,
        PeerRecoveryTargetService.Actions.TRANSLOG_OPS,
        request,
        translogOpsRequestOptions,
        new ActionListenerResponseHandler<>(
            ActionListener.wrap(
                r -> listener.onResponse(r.localCheckpoint), listener::onFailure),
            RecoveryTranslogOperationsResponse::new,
            ThreadPool.Names.GENERIC)
    );
}
 
示例15
@Override
public void indexTranslogOperations(List<Translog.Operation> operations,
                                    int totalTranslogOps,
                                    long timestamp,
                                    long msu,
                                    ActionListener<Long> listener) {
}
 
示例16
private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy,
                                    LongSupplier globalCheckpointSupplier) throws IOException {

    final TranslogConfig translogConfig = engineConfig.getTranslogConfig();
    final String translogUUID = loadTranslogUUIDFromLastCommit();
    // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong!
    return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier,
        engineConfig.getPrimaryTermSupplier());
}
 
示例17
protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException {
    TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE);
    String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId,
        primaryTermSupplier.getAsLong());
    return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS),
        () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, seqNo -> {});
}
 
示例18
/**
 * Reads the current stored translog ID from the last commit data.
 */
@Nullable
private String loadTranslogUUIDFromLastCommit() throws IOException {
    final Map<String, String> commitUserData = store.readLastCommittedSegmentsInfo().getUserData();
    if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY) == false) {
        throw new IllegalStateException("commit doesn't contain translog generation id");
    }
    return commitUserData.get(Translog.TRANSLOG_UUID_KEY);
}
 
示例19
private static void trimUnsafeCommits(EngineConfig engineConfig) throws IOException {
    final Store store = engineConfig.getStore();
    final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
    final Path translogPath = engineConfig.getTranslogConfig().getTranslogPath();
    final long globalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID);
    final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogPath, translogUUID);
    store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, engineConfig.getIndexSettings().getIndexVersionCreated());
}
 
示例20
private void maybeFSyncTranslogs() {
    if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
        for (IndexShard shard : this.shards.values()) {
            try {
                if (shard.isSyncNeeded()) {
                    shard.sync();
                }
            } catch (AlreadyClosedException ex) {
                // fine - continue;
            } catch (IOException e) {
                logger.warn("failed to sync translog", e);
            }
        }
    }
}
 
示例21
/**
 * Add a listener for refreshes, calling it immediately if the location is already visible. If this runs out of listener slots then it
 * forces a refresh and calls the listener immediately as well.
 *
 * @param location the location to listen for
 * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with
 *        false otherwise.
 * @return did we call the listener (true) or register the listener to call later (false)?
 */
public boolean addOrNotify(Translog.Location location, Consumer<Boolean> listener) {
    requireNonNull(listener, "listener cannot be null");
    requireNonNull(location, "location cannot be null");

    if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) {
        // Location already visible, just call the listener
        listener.accept(false);
        return true;
    }
    synchronized (this) {
        List<Tuple<Translog.Location, Consumer<Boolean>>> listeners = refreshListeners;
        if (listeners == null) {
            if (closed) {
                throw new IllegalStateException("can't wait for refresh on a closed index");
            }
            listeners = new ArrayList<>();
            refreshListeners = listeners;
        }
        if (listeners.size() < getMaxRefreshListeners.getAsInt()) {
            // We have a free slot so register the listener
            listeners.add(new Tuple<>(location, listener));
            return false;
        }
    }
    // No free slot so force a refresh and call the listener in this thread
    forceRefresh.run();
    listener.accept(true);
    return true;
}
 
示例22
/**
 * this method is useless now, since we will skip translog recovery from remote node
 */
public int performBatchRecovery(Iterable<Translog.Operation> operations) {
    if (state != IndexShardState.RECOVERING) {
        throw new IndexShardNotRecoveringException(shardId, state);
    }

    // This will activate our shard so we get our fair share of the indexing buffer during recovery:
    markLastWrite();
    return 0;
}
 
示例23
public static Operation getOperationFromLogRecord(LogRecordWithDLSN logRecord) throws IOException {
    ByteBuffer bf = ByteBuffer.wrap(logRecord.getPayload());
    ByteBufferStreamInput in = new ByteBufferStreamInput(bf);
    Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte());
    Translog.Operation operation = newOperationFromType(type);
    operation.readFrom(in);
    in.close();
    return operation;
}
 
示例24
/**
 * Returns a new empty translog operation for the given {@link Translog.Operation.Type}
 */
public static Translog.Operation newOperationFromType(Translog.Operation.Type type) throws IOException {
    switch (type) {
        case CREATE:
            return new Translog.Create();
        case DELETE:
            return new Translog.Delete();
        case SAVE:
            return new Translog.Index();
        default:
            throw new IOException("No type for [" + type + "]");
    }
}
 
示例25
public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, final long maxSeenAutoIdTimestampOnPrimary,
                                final Translog.Operation[]operations) {
    super(shardId);
    this.trimAboveSeqNo = trimAboveSeqNo;
    this.maxSeenAutoIdTimestampOnPrimary = maxSeenAutoIdTimestampOnPrimary;
    this.operations = operations;
}
 
示例26
@Override
public RecoveryResponse recoverToTarget() {
   boolean engineClosed = false;
    try {
        logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode());
        if (isPrimaryRelocation()) {
            logger.debug("[phase1] closing engine on primary for shared filesystem recovery");
            try {
                // if we relocate we need to close the engine in order to open a new
                // IndexWriter on the other end of the relocation
                engineClosed = true;
                shard.engine().flushAndClose();
            } catch (IOException e) {
                logger.warn("close engine failed", e);
                shard.failShard("failed to close engine (phase1)", e);
            }
        }
        prepareTargetForTranslog(Translog.View.EMPTY_VIEW);
        finalizeRecovery();
        return response;
    } catch (Throwable t) {
        if (engineClosed) {
            // If the relocation fails then the primary is closed and can't be
            // used anymore... (because it's closed) that's a problem, so in
            // that case, fail the shard to reallocate a new IndexShard and
            // create a new IndexWriter
            logger.info("recovery failed for primary shadow shard, failing shard");
            // pass the failure as null, as we want to ensure the store is not marked as corrupted
            shard.failShard("primary relocation failed on shared filesystem caused by: [" + t.getMessage() + "]", null);
        } else {
            logger.info("recovery failed on shared filesystem", t);
        }
        throw t;
    }
}
 
示例27
/**
 * Creates a new history snapshot for reading operations since the provided seqno.
 * The returned snapshot can be retrieved from either Lucene index or translog files.
 */
@Override
public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
    if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
        return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false);
    } else {
        return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo);
    }
}
 
示例28
@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    recoveryId = in.readLong();
    shardId = ShardId.readShardId(in);
    operations = Translog.readOperations(in);
    totalTranslogOps = in.readVInt();
}
 
示例29
@Override
public void writeTo(StreamOutput out) throws IOException {
    super.writeTo(out);
    out.writeLong(recoveryId);
    shardId.writeTo(out);
    Translog.writeOperations(out, operations);
    out.writeVInt(totalTranslogOps);
}
 
示例30
/**
 * Reads the current stored translog ID from the last commit data.
 */
@Nullable
private String loadTranslogUUIDFromLastCommit() throws IOException {
    final Map<String, String> commitUserData = store.readLastCommittedSegmentsInfo().getUserData();
    if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY) == false) {
        throw new IllegalStateException("commit doesn't contain translog generation id");
    }
    return commitUserData.get(Translog.TRANSLOG_UUID_KEY);
}