Python源码示例:concurrent.futures.FIRST_COMPLETED
示例1
def perform_requests(self):
signal.signal(signal.SIGINT, self.exit_fast)
signal.signal(signal.SIGTERM, self.exit_fast)
self.state = b'E'
for q_batch in self.get_batch():
for (_, _) in self.split_batch(q_batch):
if self.state != b"R":
self.state = b'R'
yield
continue
# wait for all batches to finish before returning
self.state = b'W'
while self.futures:
f_len = len(self.futures)
self.futures = [i for i in self.futures if not i.done()]
if f_len != len(self.futures):
self.ui.debug('Waiting for final requests to finish. '
'remaining requests: {}'
''.format(len(self.futures)))
wait(self.futures, return_when=FIRST_COMPLETED)
self.state = b'D'
yield True
示例2
def _get_layers(self):
"""
Wait for renderers to produce new layers, yields until at least one
layer is active.
"""
# schedule tasks to wait on each renderer queue
for r_idx, _ in enumerate(self.layers):
if _.waiter is None or _.waiter.done():
_.waiter = ensure_future(self._dequeue(r_idx))
# async wait for at least one completion
waiters = [layer.waiter for layer in self.layers]
if not waiters:
return
await asyncio.wait(waiters, return_when=futures.FIRST_COMPLETED)
# check the rest without waiting
for r_idx, _ in enumerate(self.layers):
if _.waiter is not None and not _.waiter.done():
self._dequeue_nowait(r_idx)
示例3
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED]
for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
示例4
def _run(self):
first_completed = concurrent.FIRST_COMPLETED
if self._get_max_tasks() < 1:
raise RuntimeError("Executor has no workers")
try:
while not self.goal(self.learner):
futures = self._get_futures()
done, _ = concurrent.wait(futures, return_when=first_completed)
self._process_futures(done)
finally:
remaining = self._remove_unfinished()
if remaining:
concurrent.wait(remaining)
self._cleanup()
示例5
def _run(self):
first_completed = asyncio.FIRST_COMPLETED
if self._get_max_tasks() < 1:
raise RuntimeError("Executor has no workers")
try:
while not self.goal(self.learner):
futures = self._get_futures()
done, _ = await asyncio.wait(
futures, return_when=first_completed, loop=self.ioloop
)
self._process_futures(done)
finally:
remaining = self._remove_unfinished()
if remaining:
await asyncio.wait(remaining)
self._cleanup()
示例6
def process(self, queue, workflow):
while queue.__futures__:
done, _ = wait(queue.__futures__, return_when=FIRST_COMPLETED)
queue.progress(done)
return workflow.result()
示例7
def start(self, jobs=None):
"""
Engine starts to run jobs
:param jobs: A list contains at least one job
:return:
"""
try:
if not jobs:
logger.warning("CloudConnectEngine just exits with no jobs to run")
return
for job in jobs:
self._add_job(job)
while not self._shutdown:
logger.info("CloudConnectEngine starts to run...")
if not self._pending_job_results:
logger.info("CloudConnectEngine has no more jobs to run")
break
# check the intermediate results to find the done jobs and not
# done jobs
done_and_not_done_jobs = cf.wait(self._pending_job_results,
return_when=cf.FIRST_COMPLETED)
self._pending_job_results = done_and_not_done_jobs.not_done
done_job_results = done_and_not_done_jobs.done
for future in done_job_results:
# get the result of each done jobs and add new jobs to the
# engine if the result spawns more jobs
result = future.result()
if result:
if isinstance(result, Iterable):
for temp in result:
self._add_job(temp)
else:
self._add_job(result)
except:
logger.exception("CloudConnectEngine encountered exception")
finally:
self._teardown()
示例8
def start(self, jobs=None):
"""
Engine starts to run jobs
:param jobs: A list contains at least one job
:return:
"""
try:
if not jobs:
logger.warning("CloudConnectEngine just exits with no jobs to run")
return
for job in jobs:
self._add_job(job)
while not self._shutdown:
logger.info("CloudConnectEngine starts to run...")
if not self._pending_job_results:
logger.info("CloudConnectEngine has no more jobs to run")
break
# check the intermediate results to find the done jobs and not
# done jobs
done_and_not_done_jobs = cf.wait(self._pending_job_results,
return_when=cf.FIRST_COMPLETED)
self._pending_job_results = done_and_not_done_jobs.not_done
done_job_results = done_and_not_done_jobs.done
for future in done_job_results:
# get the result of each done jobs and add new jobs to the
# engine if the result spawns more jobs
result = future.result()
if result:
if isinstance(result, Iterable):
for temp in result:
self._add_job(temp)
else:
self._add_job(result)
except:
logger.exception("CloudConnectEngine encountered exception")
finally:
self._teardown()
示例9
def wait_any_request(requests, do_raise=False, timeout=None):
if not AsyncArctic._wait_until_scheduled(requests, timeout):
raise AsyncArcticException("Timed-out while waiting for request to be scheduled")
while requests and not any(r.is_completed for r in requests):
AsyncArctic.wait_tasks(tuple(r.future for r in requests if not r.is_completed and r.future is not None),
timeout=timeout, return_when=FIRST_COMPLETED, raise_exceptions=do_raise)
示例10
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
示例11
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
示例12
def test_wait(self):
done, not_done = futures.wait([QUEUED_RESULT, FINISHED_RESULT],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([FINISHED_RESULT]), done)
self.assertEqual(set([QUEUED_RESULT]), not_done)
示例13
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
示例14
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
示例15
def run(self):
# init listeners, add them to the event loop
for s in self.sockets:
s.setblocking(False)
self.poller.register(s, selectors.EVENT_READ, self.accept)
timeout = self.cfg.timeout or 0.5
while self.alive:
# notify the arbiter we are alive
self.notify()
# can we accept more connections?
if self.nr < self.worker_connections:
# wait for an event
events = self.poller.select(0.02)
for key, mask in events:
callback = key.data
callback(key.fileobj)
if not self.is_parent_alive():
break
# hanle keepalive timeouts
self.murder_keepalived()
# if the number of connections is < to the max we can handle at
# the same time there is no need to wait for one
if len(self.futures) < self.cfg.threads:
continue
result = futures.wait(self.futures, timeout=timeout,
return_when=futures.FIRST_COMPLETED)
if not result.done:
break
else:
[self.futures.remove(f) for f in result.done]
self.tpool.shutdown(False)
self.poller.close()
示例16
def wait_for_any(fs, timeout=None):
"""Wait for one (**any**) of the futures to complete.
Works correctly with both green and non-green futures (but not both
together, since this can't be guaranteed to avoid dead-lock due to how
the waiting implementations are different when green threads are being
used).
Returns pair (done futures, not done futures).
"""
return _wait_for(fs, futures.FIRST_COMPLETED, _wait_for_any_green,
'wait_for_any', timeout=timeout)
示例17
def __anext__(self) -> Any:
if self.is_closed:
if not isasyncgen(self.iterator):
raise StopAsyncIteration
value = await self.iterator.__anext__()
result = self.callback(value)
else:
aclose = ensure_future(self._close_event.wait())
anext = ensure_future(self.iterator.__anext__())
pending: Set[Future] = (
await wait([aclose, anext], return_when=FIRST_COMPLETED)
)[1]
for task in pending:
task.cancel()
if aclose.done():
raise StopAsyncIteration
error = anext.exception()
if error:
if not self.reject_callback or isinstance(
error, (StopAsyncIteration, GeneratorExit)
):
raise error
result = self.reject_callback(error)
else:
value = anext.result()
result = self.callback(value)
return await result if isawaitable(result) else result
示例18
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait([CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
assert set([future1]) == done
assert set([CANCELLED_FUTURE, future2]) == not_done
示例19
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait([CANCELLED_AND_NOTIFIED_FUTURE,
SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
assert (set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]) ==
finished)
assert set([future1]) == pending
示例20
def _batched_pool_runner(pool, batch_size, f, iterable):
it = iter(iterable)
futures = set(pool.submit(f, x) for x in islice(it, batch_size))
while futures:
done, futures = wait(futures, return_when=FIRST_COMPLETED)
futures.update(pool.submit(f, x) for x in islice(it, len(done)))
for d in done:
yield d
示例21
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
示例22
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
示例23
def _wait_on_in_progress_entries(self):
"Waits on any in-progress entry to finish."
futures = [entry.future for entry in self._in_progress_entries.values()]
finished_futures, _ = wait(futures, return_when=FIRST_COMPLETED)
for finished_future in finished_futures:
task_key = finished_future.result()
entry = self._entries_by_task_key[task_key]
entry.state.sync_after_subprocess_completion()
self._mark_entry_completed(entry)
示例24
def batched_pool_runner(f, iterable, pool, batch_size):
# http://code.activestate.com/lists/python-list/666786/
it = iter(iterable)
# Submit the first batch of tasks.
futures = set(pool.submit(f, x) for x in islice(it, batch_size))
while futures:
done, futures = wait(futures, return_when=FIRST_COMPLETED)
# Replenish submitted tasks up to the number that completed.
futures.update(pool.submit(f, x) for x in islice(it, len(done)))
yield from done
示例25
def AwaitAnyTask(self):
completed_tasks = None
while not completed_tasks:
completed_tasks, _ = futures.wait(
self._active_futures, timeout=_LONG_TIMEOUT,
return_when=futures.FIRST_COMPLETED)
future = completed_tasks.pop()
task_id = self._active_futures.pop(future)
task = self.tasks[task_id]
task.return_value, task.traceback = future.result()
return task_id
示例26
def thread_exector(thread, res):
"""
线程池启动
:param thread: 线程池对象
:param res: 自定义ThreadProxy对象
:return:
"""
tasks = [thread.submit(res.get_test_proxy, proxy) for proxy in res.proxy_list]
# wait(tasks, return_when=FIRST_COMPLETED)
thread.shutdown()
result = [obj for obj in as_completed(tasks)]
return result
示例27
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
示例28
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
示例29
def _get_results(FetcherClass, services, kwargs, num_results=None, fast=0, verbose=False, timeout=None):
"""
Does the fetching in multiple threads of needed. Used by paranoid and fast mode.
"""
results = []
if not num_results or fast:
num_results = len(services)
with futures.ThreadPoolExecutor(max_workers=len(services)) as executor:
fetches = {}
for service in services[:num_results]:
tail = [x for x in services if x is not service]
random.shuffle(tail)
srv = FetcherClass(services=[service] + tail, verbose=verbose, timeout=timeout)
fetches[executor.submit(srv.action, **kwargs)] = srv
if fast == 1:
raise NotImplementedError
# ths code is a work in progress. futures.FIRST_COMPLETED works differently than I thought...
to_iterate, still_going = futures.wait(fetches, return_when=futures.FIRST_COMPLETED)
for x in still_going:
try:
x.result(timeout=1.001)
except futures._base.TimeoutError:
pass
elif fast > 1:
raise Exception("fast level greater than 1 not yet implemented")
else:
to_iterate = futures.as_completed(fetches)
for future in to_iterate:
service = fetches[future]
results.append([service, future.result()])
return results
示例30
def run(self):
# init listeners, add them to the event loop
for sock in self.sockets:
sock.setblocking(False)
# a race condition during graceful shutdown may make the listener
# name unavailable in the request handler so capture it once here
server = sock.getsockname()
acceptor = partial(self.accept, server)
self.poller.register(sock, selectors.EVENT_READ, acceptor)
while self.alive:
# notify the arbiter we are alive
self.notify()
# can we accept more connections?
if self.nr_conns < self.worker_connections:
# wait for an event
events = self.poller.select(1.0)
for key, mask in events:
callback = key.data
callback(key.fileobj)
# check (but do not wait) for finished requests
result = futures.wait(self.futures, timeout=0,
return_when=futures.FIRST_COMPLETED)
else:
# wait for a request to finish
result = futures.wait(self.futures, timeout=1.0,
return_when=futures.FIRST_COMPLETED)
# clean up finished requests
for fut in result.done:
self.futures.remove(fut)
if not self.is_parent_alive():
break
# hanle keepalive timeouts
self.murder_keepalived()
self.tpool.shutdown(False)
self.poller.close()
for s in self.sockets:
s.close()
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)