Java源码示例:org.apache.lucene.store.RateLimiter

示例1
public SimpleReplicaNode(Random random, int id, int tcpPort, Path indexPath, long curPrimaryGen, int primaryTCPPort,
                         SearcherFactory searcherFactory, boolean doCheckIndexOnClose) throws IOException {
  super(id, getDirectory(random, id, indexPath, doCheckIndexOnClose), searcherFactory, System.out);
  this.tcpPort = tcpPort;
  this.random = new Random(random.nextLong());

  // Random IO throttling on file copies: 5 - 20 MB/sec:
  double mbPerSec = 5 * (1.0 + 3*random.nextDouble());
  message(String.format(Locale.ROOT, "top: will rate limit file fetch to %.2f MB/sec", mbPerSec));
  fetchRateLimiter = new RateLimiter.SimpleRateLimiter(mbPerSec);
  this.curPrimaryTCPPort = primaryTCPPort;
  
  start(curPrimaryGen);

  // Handles fetching files from primary:
  jobs = new Jobs(this);
  jobs.setName("R" + id + ".copyJobs");
  jobs.setDaemon(true);
  jobs.start();
}
 
示例2
public DirectoryFileStream(SolrParams solrParams) {
  params = solrParams;
  delPolicy = core.getDeletionPolicy();

  fileName = validateFilenameOrError(params.get(FILE));
  cfileName = validateFilenameOrError(params.get(CONF_FILE_SHORT));
  tlogFileName = validateFilenameOrError(params.get(TLOG_FILE));
  
  sOffset = params.get(OFFSET);
  sLen = params.get(LEN);
  compress = params.get(COMPRESSION);
  useChecksum = params.getBool(CHECKSUM, false);
  indexGen = params.getLong(GENERATION);
  if (useChecksum) {
    checksum = new Adler32();
  }
  //No throttle if MAX_WRITE_PER_SECOND is not specified
  double maxWriteMBPerSec = params.getDouble(MAX_WRITE_PER_SECOND, Double.MAX_VALUE);
  rateLimiter = new RateLimiter.SimpleRateLimiter(maxWriteMBPerSec);
}
 
示例3
/**
 * Configures RateLimiter based on repository and global settings
 *
 * @param repositorySettings repository settings
 * @param setting            setting to use to configure rate limiter
 * @param defaultRate        default limiting rate
 * @return rate limiter or null of no throttling is needed
 */
private RateLimiter getRateLimiter(RepositorySettings repositorySettings, String setting, ByteSizeValue defaultRate) {
    ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.settings().getAsBytesSize(setting,
            settings.getAsBytesSize(setting, defaultRate));
    if (maxSnapshotBytesPerSec.bytes() <= 0) {
        return null;
    } else {
        return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.mbFrac());
    }
}
 
示例4
/**
 * Called by {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} on repository startup
 *
 * @param blobStore blob store
 * @param basePath  base path to blob store
 * @param chunkSize chunk size
 */
public void initialize(BlobStore blobStore, BlobPath basePath, ByteSizeValue chunkSize,
                       RateLimiter snapshotRateLimiter, RateLimiter restoreRateLimiter,
                       final RateLimiterListener rateLimiterListener, boolean compress) {
    this.blobStore = blobStore;
    this.basePath = basePath;
    this.chunkSize = chunkSize;
    this.snapshotRateLimiter = snapshotRateLimiter;
    this.restoreRateLimiter = restoreRateLimiter;
    this.rateLimiterListener = rateLimiterListener;
    this.snapshotThrottleListener = new RateLimitingInputStream.Listener() {
        @Override
        public void onPause(long nanos) {
            rateLimiterListener.onSnapshotPause(nanos);
        }
    };
    this.restoreThrottleListener = new RateLimitingInputStream.Listener() {
        @Override
        public void onPause(long nanos) {
            rateLimiterListener.onRestorePause(nanos);
        }
    };
    this.compress = compress;
    indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress());
    indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher);
    indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress());
}
 
示例5
@Override
public void writeFileChunk(StoreFileMetaData fileMetaData,
                           long position,
                           BytesReference content,
                           boolean lastChunk,
                           int totalTranslogOps,
                           ActionListener<Void> listener) {
    // Pause using the rate limiter, if desired, to throttle the recovery
    final long throttleTimeInNanos;
    // always fetch the ratelimiter - it might be updated in real-time on the recovery settings
    final RateLimiter rl = recoverySettings.rateLimiter();
    if (rl != null) {
        long bytes = bytesSinceLastPause.addAndGet(content.length());
        if (bytes > rl.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            try {
                throttleTimeInNanos = rl.pause(bytes);
                onSourceThrottle.accept(throttleTimeInNanos);
            } catch (IOException e) {
                throw new ElasticsearchException("failed to pause recovery", e);
            }
        } else {
            throttleTimeInNanos = 0;
        }
    } else {
        throttleTimeInNanos = 0;
    }

    transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK,
        new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk,
            totalTranslogOps,
            /* we send estimateTotalOperations with every request since we collect stats on the target and that way we can
             * see how many translog ops we accumulate while copying files across the network. A future optimization
             * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
             */
            throttleTimeInNanos), fileChunkRequestOptions, new ActionListenerResponseHandler<>(
            ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure), in -> TransportResponse.Empty.INSTANCE));
}
 
示例6
@Override
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel, Task task) throws Exception {
    try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final RecoveryTarget recoveryTarget = recoveryRef.target();
        final RecoveryState.Index indexState = recoveryTarget.state().getIndex();
        if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
            indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
        }

        RateLimiter rateLimiter = recoverySettings.rateLimiter();
        if (rateLimiter != null) {
            long bytes = bytesSinceLastPause.addAndGet(request.content().length());
            if (bytes > rateLimiter.getMinPauseCheckBytes()) {
                // Time to pause
                bytesSinceLastPause.addAndGet(-bytes);
                long throttleTimeInNanos = rateLimiter.pause(bytes);
                indexState.addTargetThrottling(throttleTimeInNanos);
                recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
            }
        }

        final ActionListener<TransportResponse> listener =
            new HandledTransportAction.ChannelActionListener<>(channel, Actions.FILE_CHUNK, request);
        recoveryTarget.writeFileChunk(
            request.metadata(),
            request.position(),
            request.content(),
            request.lastChunk(),
            request.totalTranslogOps(),
            ActionListener.wrap(
                nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE),
                listener::onFailure)
        );
    }
    channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
 
示例7
/**
 * Configures RateLimiter based on repository and global settings
 *
 * @param repositorySettings repository settings
 * @param setting            setting to use to configure rate limiter
 * @param defaultRate        default limiting rate
 * @return rate limiter or null of no throttling is needed
 */
private RateLimiter getRateLimiter(Settings repositorySettings, String setting, ByteSizeValue defaultRate) {
    ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting,
            settings.getAsBytesSize(setting, defaultRate));
    if (maxSnapshotBytesPerSec.getBytes() <= 0) {
        return null;
    } else {
        return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.getMbFrac());
    }
}
 
示例8
public RateLimiter rateLimiter() {
    return rateLimiter;
}
 
示例9
public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
    super(delegate);
    this.rateLimiter = rateLimiter;
    this.listener = listener;
}
 
示例10
public RateLimiter rateLimiter() {
    return rateLimiter;
}
 
示例11
public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
    super(delegate);
    this.rateLimiter = rateLimiter;
    this.listener = listener;
}