Python源码示例:asyncio.Task()
示例1
def __init__(self, bot: Bot):
super().__init__()
self.bot = bot
# Categories
self.available_category: discord.CategoryChannel = None
self.in_use_category: discord.CategoryChannel = None
self.dormant_category: discord.CategoryChannel = None
# Queues
self.channel_queue: asyncio.Queue[discord.TextChannel] = None
self.name_queue: t.Deque[str] = None
self.name_positions = self.get_names()
self.last_notification: t.Optional[datetime] = None
# Asyncio stuff
self.queue_tasks: t.List[asyncio.Task] = []
self.ready = asyncio.Event()
self.on_message_lock = asyncio.Lock()
self.init_task = self.bot.loop.create_task(self.init_cog())
示例2
def _patch_asyncio() -> None:
# This patches asyncio to add a sync_wait method to the event
# loop. This method can then be called from within a task
# including a synchronous function called from a task. Sadly it
# requires the python Task and Future implementations, which
# invokes some performance cost.
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = asyncio.tasks._PyTask # type: ignore
asyncio.Future = ( # type: ignore
asyncio.futures._CFuture # type: ignore
) = asyncio.futures.Future = asyncio.futures._PyFuture # type: ignore # noqa
current_policy = asyncio.get_event_loop_policy()
if hasattr(asyncio, "unix_events"):
target_policy = asyncio.unix_events._UnixDefaultEventLoopPolicy
else:
target_policy = object # type: ignore
if not isinstance(current_policy, target_policy):
raise RuntimeError("Flask Patching only works with the default event loop policy")
_patch_loop()
_patch_task()
示例3
def _patch_task() -> None:
# Patch the asyncio task to allow it to be re-entered.
def enter_task(loop, task): # type: ignore
asyncio.tasks._current_tasks[loop] = task
asyncio.tasks._enter_task = enter_task # type: ignore
def leave_task(loop, task): # type: ignore
del asyncio.tasks._current_tasks[loop]
asyncio.tasks._leave_task = leave_task # type: ignore
def step(self, exception=None): # type: ignore
current_task = asyncio.tasks._current_tasks.get(self._loop)
try:
self._Task__step_orig(exception)
finally:
if current_task is None:
asyncio.tasks._current_tasks.pop(self._loop, None)
else:
asyncio.tasks._current_tasks[self._loop] = current_task
asyncio.Task._Task__step_orig = asyncio.Task._Task__step # type: ignore
asyncio.Task._Task__step = step # type: ignore
示例4
def wait_for_iterator(self, connection_observer, connection_observer_future):
"""
Version of wait_for() intended to be used by Python3 to implement awaitable object.
Note: we don't have timeout parameter here. If you want to await with timeout please do use asyncio machinery.
For ex.: await asyncio.wait_for(connection_observer, timeout=10)
:param connection_observer: The one we are awaiting for.
:param connection_observer_future: Future of connection-observer returned from submit().
:return: iterator
"""
self.logger.debug("go foreground: {!r}".format(connection_observer))
# assuming that connection_observer.start() / runner.submit(connection_observer)
# has already scheduled future via asyncio.ensure_future
assert asyncio.futures.isfuture(connection_observer_future)
return connection_observer_future.__iter__()
# Note: even if code is so simple we can't move it inside ConnectionObserver.__await__() since different runners
# may provide different iterator implementing awaitable
# Here we know, connection_observer_future is asyncio.Future (precisely asyncio.tasks.Task)
# and we know it has __await__() method.
示例5
def __new__(cls, name, bases, attrs, **kwargs):
""" put the :class:`~peony.commands.tasks.Task`s in the right place """
tasks = {'tasks': set()}
for base in bases:
if hasattr(base, '_tasks'):
for key, value in base._tasks.items():
tasks[key] |= value
for attr in attrs.values():
if isinstance(attr, task):
tasks['tasks'].add(attr)
attrs['_tasks'] = tasks
attrs['_streams'] = EventStreams()
return super().__new__(cls, name, bases, attrs)
示例6
def test_changes_continuous_reading(self):
ids = [utils.uuid() for _ in range(3)]
@asyncio.coroutine
def task():
for idx in ids:
yield from self.db[idx].update({})
asyncio.Task(task())
with (yield from self.db.changes(feed='continuous',
timeout=1000)) as feed:
while True:
self.assertTrue(feed.is_active())
event = yield from feed.next()
if event is None:
break
self.assertIsInstance(event, dict)
self.assertIn(event['id'], ids)
self.assertFalse(feed.is_active())
示例7
def test_changes_eventsource(self):
ids = [utils.uuid() for _ in range(3)]
@asyncio.coroutine
def task():
for idx in ids:
yield from self.db[idx].update({})
asyncio.Task(task())
with (yield from self.db.changes(feed='eventsource',
timeout=1000)) as feed:
while True:
self.assertTrue(feed.is_active())
event = yield from feed.next()
if event is None:
break
self.assertIsInstance(event, dict)
self.assertIn(event['id'], ids)
示例8
def _patch_asyncio():
"""
Patch asyncio module to use pure Python tasks and futures,
use module level _current_tasks, all_tasks and patch run method.
"""
def run(future, *, debug=False):
loop = asyncio.get_event_loop()
loop.set_debug(debug)
return loop.run_until_complete(future)
if sys.version_info >= (3, 6, 0):
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = \
asyncio.tasks._PyTask
asyncio.Future = asyncio.futures._CFuture = asyncio.futures.Future = \
asyncio.futures._PyFuture
if sys.version_info < (3, 7, 0):
asyncio.tasks._current_tasks = asyncio.tasks.Task._current_tasks # noqa
asyncio.all_tasks = asyncio.tasks.Task.all_tasks # noqa
if not hasattr(asyncio, '_run_orig'):
asyncio._run_orig = getattr(asyncio, 'run', None)
asyncio.run = run
示例9
def __init__(self,
impl: SubscriberImpl[MessageClass],
loop: asyncio.AbstractEventLoop,
queue_capacity: typing.Optional[int]):
"""
Do not call this directly! Use :meth:`Presentation.make_subscriber`.
"""
if queue_capacity is None:
queue_capacity = 0 # This case is defined by the Queue API. Means unlimited.
else:
queue_capacity = int(queue_capacity)
if queue_capacity < 1:
raise ValueError(f'Invalid queue capacity: {queue_capacity}')
self._closed = False
self._impl = impl
self._loop = loop
self._maybe_task: typing.Optional[asyncio.Task[None]] = None
self._rx: _Listener[MessageClass] = _Listener(asyncio.Queue(maxsize=queue_capacity, loop=loop))
impl.add_listener(self._rx)
# ---------------------------------------- HANDLER-BASED API ----------------------------------------
示例10
def __init__(self,
dtype: typing.Type[ServiceClass],
input_transport_session: pyuavcan.transport.InputSession,
output_transport_session_factory: OutputTransportSessionFactory,
finalizer: TypedSessionFinalizer,
loop: asyncio.AbstractEventLoop):
"""
Do not call this directly! Use :meth:`Presentation.get_server`.
"""
self._dtype = dtype
self._input_transport_session = input_transport_session
self._output_transport_session_factory = output_transport_session_factory
self._finalizer = finalizer
self._loop = loop
self._output_transport_sessions: typing.Dict[int, pyuavcan.transport.OutputSession] = {}
self._maybe_task: typing.Optional[asyncio.Task[None]] = None
self._closed = False
self._send_timeout = DEFAULT_SERVICE_REQUEST_TIMEOUT
self._served_request_count = 0
self._deserialization_failure_count = 0
self._malformed_request_count = 0
# ---------------------------------------- MAIN API ----------------------------------------
示例11
def create_utilitary_tasks(loop):
tasks = []
stats_printer_task = asyncio.Task(stats_printer())
tasks.append(stats_printer_task)
if config.USE_MIDDLE_PROXY:
middle_proxy_updater_task = asyncio.Task(update_middle_proxy_info())
tasks.append(middle_proxy_updater_task)
if config.GET_TIME_PERIOD:
time_get_task = asyncio.Task(get_srv_time())
tasks.append(time_get_task)
get_cert_len_task = asyncio.Task(get_mask_host_cert_len())
tasks.append(get_cert_len_task)
clear_resolving_cache_task = asyncio.Task(clear_ip_resolving_cache())
tasks.append(clear_resolving_cache_task)
return tasks
示例12
def _patch_task(self, task):
# In Python 3.8 we'll need proper API on asyncio.Task to
# make TaskGroups possible. We need to be able to access
# information about task cancellation, more specifically,
# we need a flag to say if a task was cancelled or not.
# We also need to be able to flip that flag.
def _task_cancel(task, orig_cancel):
task.__cancel_requested__ = True
return orig_cancel()
if hasattr(task, '__cancel_requested__'):
return
task.__cancel_requested__ = False
# confirm that we were successful at adding the new attribute:
assert not task.__cancel_requested__
orig_cancel = task.cancel
task.cancel = functools.partial(_task_cancel, task, orig_cancel)
示例13
def test_proxy_connect_http(loop):
tr, proto = mock.Mock(name='transport'), mock.Mock(name='protocol')
loop_mock = mock.Mock()
loop_mock.getaddrinfo = make_mocked_coro([
[0, 0, 0, 0, ['127.0.0.1', 1080]]])
loop_mock.create_connection = make_mocked_coro((tr, proto))
loop_mock.create_task.return_value = asyncio.Task(
make_mocked_coro([
{'host': 'host', 'port': 80, 'family': 1,
'hostname': 'hostname', 'flags': 11, 'proto': 'proto'}])())
req = ProxyClientRequest(
'GET', URL('http://python.org'), loop=loop,
proxy=URL('http://127.0.0.1'))
connector = ProxyConnector(loop=loop_mock)
await connector.connect(req, [], ClientTimeout())
示例14
def __init__(
self, max_active: int = 0, timed: bool = False, trace_fn: Callable = None
):
"""
Initialize the task queue.
Args:
max_active: The maximum number of tasks to automatically run
timed: A flag indicating that timing should be collected for tasks
trace_fn: A callback for all completed tasks
"""
self.loop = asyncio.get_event_loop()
self.active_tasks = []
self.pending_tasks = []
self.timed = timed
self.total_done = 0
self.total_failed = 0
self.total_started = 0
self._trace_fn = trace_fn
self._cancelled = False
self._drain_evt = asyncio.Event()
self._drain_task: asyncio.Task = None
self._max_active = max_active
示例15
def add_active(
self,
task: asyncio.Task,
task_complete: Callable = None,
ident: str = None,
timing: dict = None,
) -> asyncio.Task:
"""
Register an active async task with an optional completion callback.
Args:
task: The asyncio task instance
task_complete: An optional callback to run on completion
ident: A string identifer for the task
timing: An optional dictionary of timing information
"""
self.active_tasks.append(task)
task.add_done_callback(
lambda fut: self.completed_task(task, task_complete, ident, timing)
)
self.total_started += 1
return task
示例16
def __init__(
self,
context: InjectionContext,
message: OutboundMessage,
target: ConnectionTarget,
transport_id: str,
):
"""Initialize the queued outbound message."""
self.context = context
self.endpoint = target and target.endpoint
self.error: Exception = None
self.message = message
self.payload: Union[str, bytes] = None
self.retries = None
self.retry_at: float = None
self.state = self.STATE_NEW
self.target = target
self.task: asyncio.Task = None
self.transport_id: str = transport_id
示例17
def __init__(
self, context: InjectionContext, handle_not_delivered: Callable = None
):
"""
Initialize a `OutboundTransportManager` instance.
Args:
context: The application context
handle_not_delivered: An optional handler for undelivered messages
"""
self.context = context
self.loop = asyncio.get_event_loop()
self.handle_not_delivered = handle_not_delivered
self.outbound_buffer = []
self.outbound_event = asyncio.Event()
self.outbound_new = []
self.registered_schemes = {}
self.registered_transports = {}
self.running_transports = {}
self.task_queue = TaskQueue(max_active=200)
self._process_task: asyncio.Task = None
if self.context.settings.get("transport.max_outbound_retry"):
self.MAX_RETRY_COUNT = self.context.settings["transport.max_outbound_retry"]
示例18
def crawl(self):
"""Run the crawler until all finished."""
try:
workers = [asyncio.Task(self.work(), loop=self.loop)
for _ in range(self.max_tasks)]
self.t0 = time.time()
# yield from asyncio.gather(*workers, loop=self.loop, return_exceptions=True)
yield from router.quit_event.wait()
for w in workers:
w.cancel()
self.t1 = time.time()
except asyncio.CancelledError:
logger.warning('canceling the crawler')
finally:
logger.warning('closing the crawler')
yield from self.close()
示例19
def enqueue(self, job: Job) -> None:
"""
Enqueue a job into the job queue of the client.
.. important:: Only the following jobs shall be enqueued:
- Messages from the server towards this client.
- Messages from other clients **towards** this
client (i.e. relayed messages).
- Delayed close operations towards this client.
.. note:: Coroutines will be closed and :class:`asyncio.Task`s
will be cancelled when the job queue has been closed
or cancelled. The awaitable must be prepared for that.
Arguments:
- `job`: A coroutine or a :class:`asyncio.Task`.
"""
if self._state == JobQueueState.open:
await self._queue.put(job)
else:
util.cancel_awaitable(job, self._log)
示例20
def __init__(
self,
app: ASGIFramework,
loop: asyncio.AbstractEventLoop,
config: Config,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
self.app = app
self.config = config
self.loop = loop
self.protocol: ProtocolWrapper
self.reader = reader
self.writer = writer
self.send_lock = asyncio.Lock()
self.timeout_lock = asyncio.Lock()
self._keep_alive_timeout_handle: Optional[asyncio.Task] = None
示例21
def test_missing_pong_disconnect():
peer1, peer2 = create_peers()
f1 = asyncio.Task(standards.initial_handshake(peer1, VERSION_MSG))
f2 = asyncio.Task(standards.initial_handshake(peer2, VERSION_MSG_2))
asyncio.get_event_loop().run_until_complete(asyncio.wait([f1, f2]))
standards.install_ping_manager(peer1, heartbeat_rate=0.5, missing_pong_disconnect_timeout=0.01)
asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.6))
next_message = peer1.new_get_next_message_f()
## make sure peer1 is disconnected
got_eof = False
try:
asyncio.get_event_loop().run_until_complete(asyncio.wait_for(next_message(), timeout=0.2))
except EOFError:
got_eof = True
assert got_eof
示例22
def test_BitcoinPeerProtocol_multiplex():
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(None)
peer.connection_made(pt)
next_message_list = [peer.new_get_next_message_f() for i in range(50)]
COUNT = 0
@asyncio.coroutine
def async_test(next_message):
name, data = yield from next_message()
assert name == 'version'
assert data == VERSION_MSG
name, data = yield from next_message()
assert name == 'verack'
assert data == {}
nonlocal COUNT
COUNT += 1
peer.data_received(VERSION_MSG_BIN)
peer.data_received(VERACK_MSG_BIN)
asyncio.get_event_loop().run_until_complete(asyncio.wait([asyncio.Task(async_test(nm)) for nm in next_message_list]))
assert COUNT == 50
示例23
def dns_bootstrap_host_port_q(network_info, getaddrinfo=asyncio.get_event_loop().getaddrinfo):
"""
Accepts network_info type (from pycoinnet.helpers.networks) and returns an asyncio.queue.
You MUST call queue.task.close() on the return value when you're done with it.
"""
dns_bootstrap = network_info["DNS_BOOTSTRAP"]
superpeer_ip_queue = asyncio.Queue()
@asyncio.coroutine
def bootstrap_superpeer_addresses(dns_bootstrap):
for h in dns_bootstrap:
try:
r = yield from getaddrinfo(h, network_info["DEFAULT_PORT"])
results = set(t[-1][:2] for t in r)
for t in results:
yield from superpeer_ip_queue.put(t)
logging.debug("got address %s", t)
except Exception:
logging.exception("problem in bootstrap_superpeer_addresses")
yield from superpeer_ip_queue.put(None)
superpeer_ip_queue.task = asyncio.Task(bootstrap_superpeer_addresses(dns_bootstrap))
return superpeer_ip_queue
示例24
def __init__(self, *, parent, loop):
self._parent = parent
self._transport = None
self.data = None
self.http_parser = HttpRequestParser(self)
self.request = None
self._loop = loop
self._task: asyncio.Task = None
示例25
def crawl(self):
workers = [asyncio.Task(self.work())
for _ in range(self._max_tasks)]
# When all work is done, exit.
log.info(f"DomainCrawler - await queue.join - count: {len(self._obj_dict)}")
await self._q.join()
log.info(f"DomainCrawler - join complete - count: {len(self._obj_dict)}")
for w in workers:
w.cancel()
log.debug("DomainCrawler - workers canceled")
示例26
def crawl(self):
workers = [asyncio.Task(self.work())
for _ in range(self._max_tasks)]
# When all work is done, exit.
log.info(f"FolderCrawler max_tasks {self._max_tasks} = await queue.join - count: {len(self._domain_dict)}")
await self._q.join()
log.info(f"FolderCrawler - join complete - count: {len(self._domain_dict)}")
for w in workers:
w.cancel()
log.debug("FolderCrawler - workers canceled")
示例27
def __init__(self):
# Keep track of the child cog's name so the logs are clear.
self.cog_name = self.__class__.__name__
self._scheduled_tasks: t.Dict[t.Hashable, asyncio.Task] = {}
示例28
def _task_done_callback(self, task_id: t.Hashable, done_task: asyncio.Task) -> None:
"""
Delete the task and raise its exception if one exists.
If `done_task` and the task associated with `task_id` are different, then the latter
will not be deleted. In this case, a new task was likely rescheduled with the same ID.
"""
log.trace(f"{self.cog_name}: performing done callback for task #{task_id} {id(done_task)}.")
scheduled_task = self._scheduled_tasks.get(task_id)
if scheduled_task and done_task is scheduled_task:
# A task for the ID exists and its the same as the done task.
# Since this is the done callback, the task is already done so no need to cancel it.
log.trace(f"{self.cog_name}: deleting task #{task_id} {id(done_task)}.")
del self._scheduled_tasks[task_id]
elif scheduled_task:
# A new task was likely rescheduled with the same ID.
log.debug(
f"{self.cog_name}: the scheduled task #{task_id} {id(scheduled_task)} "
f"and the done task {id(done_task)} differ."
)
elif not done_task.cancelled():
log.warning(
f"{self.cog_name}: task #{task_id} not found while handling task {id(done_task)}! "
f"A task somehow got unscheduled improperly (i.e. deleted but not cancelled)."
)
with contextlib.suppress(asyncio.CancelledError):
exception = done_task.exception()
# Log the exception if one exists.
if exception:
log.error(
f"{self.cog_name}: error in task #{task_id} {id(done_task)}!",
exc_info=exception
)
示例29
def test_xread_blocking(redis, create_redis, server, server_bin):
"""Test the blocking read features"""
fields = OrderedDict((
(b'field1', b'value1'),
(b'field2', b'value2'),
))
other_redis = await create_redis(
server.tcp_address)
# create blocking task in separate connection
consumer = other_redis.xread(['test_stream'], timeout=1000)
producer_task = asyncio.Task(
add_message_with_sleep(redis, 'test_stream', fields))
results = await asyncio.gather(consumer, producer_task)
received_messages, sent_message_id = results
assert len(received_messages) == 1
assert sent_message_id
received_stream, received_message_id, received_fields \
= received_messages[0]
assert received_stream == b'test_stream'
assert sent_message_id == received_message_id
assert fields == received_fields
# Test that we get nothing back from an empty stream
results = await redis.xread(['another_stream'], timeout=100)
assert results == []
other_redis.close()
示例30
def cancel_remaining_feeders(loop, logger_name="moler.runner.asyncio", in_shutdown=False):
remaining = [task for task in asyncio.Task.all_tasks(loop=loop) if (not task.done()) and (is_feeder(task))]
if remaining:
logger = logging.getLogger(logger_name)
loop_id = instance_id(loop)
log_level = logging.WARNING if in_shutdown else logging.DEBUG
logger.log(level=log_level, msg="cancelling all remaining feeders of loop {}:".format(loop_id))
remaining_tasks = asyncio.gather(*remaining, loop=loop, return_exceptions=True)
for feeder in remaining:
logger.log(level=log_level, msg=" remaining {}:{}".format(instance_id(feeder), feeder))
remaining_tasks.cancel()
if not loop.is_running():
# Keep the event loop running until it is either destroyed or all tasks have really terminated
loop.run_until_complete(remaining_tasks)