Python源码示例:asyncio.LimitOverrunError()
示例1
def run(self):
await self._client_connected()
while not self.__writer.is_closing():
try:
data = await self.__reader.readuntil(
separator=Spheniscidae.Delimiter)
if data:
await self.__data_received(data)
else:
self.__writer.close()
await self.__writer.drain()
except IncompleteReadError:
self.__writer.close()
except CancelledError:
self.__writer.close()
except ConnectionResetError:
self.__writer.close()
except LimitOverrunError:
self.__writer.close()
except BaseException as e:
self.logger.exception(e.__traceback__)
await self._client_disconnected()
示例2
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
示例3
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
示例4
def readuntil(self, separator=b"\n"):
# Re-implement `readuntil` to work around self._limit.
# The limit is still useful to prevent the internal buffer
# from growing too large when it's not necessary, but it
# needs to be disabled when the user code is purposely
# reading from stdin.
while True:
try:
return await super().readuntil(separator)
except asyncio.LimitOverrunError as e:
if self._buffer.startswith(separator, e.consumed):
chunk = self._buffer[: e.consumed + len(separator)]
del self._buffer[: e.consumed + len(separator)]
self._maybe_resume_transport()
return bytes(chunk)
await self._wait_for_data("readuntil")
示例5
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
示例6
def read_stream(self, stream):
while self.is_active:
try:
line = await stream.readline()
except (asyncio.LimitOverrunError, ValueError):
continue
if line:
line = line.decode("utf-8")[:-1]
print(f"[Cluster {self.id}] {line}")
else:
break
示例7
def async_run_command(self, command, first_try=True):
"""Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection.
"""
await self.async_connect()
try:
with (await self._io_lock):
self._writer.write('{}\n'.format(
"%s && %s" % (
_PATH_EXPORT_COMMAND, command)).encode('ascii'))
data = ((await asyncio.wait_for(self._reader.readuntil(
self._prompt_string), 9)).split(b'\n')[1:-1])
except (BrokenPipeError, LimitOverrunError):
if first_try:
return await self.async_run_command(command, False)
else:
_LOGGER.warning("connection is lost to host.")
return[]
except TimeoutError:
_LOGGER.error("Host timeout.")
return []
finally:
self._writer.close()
return [line.decode('utf-8') for line in data]
示例8
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
示例9
def connection_loop(execute_rpc: Callable[[Any], Any],
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter) -> None:
# TODO: we should look into using an io.StrinIO here for more efficient
# writing to the end of the string.
raw_request = ''
while True:
request_bytes = b''
try:
request_bytes = await reader.readuntil(b'}')
except asyncio.LimitOverrunError as e:
logger.info("Client request was too long. Erasing buffer and restarting...")
request_bytes = await reader.read(e.consumed)
await write_error(
writer,
f"reached limit: {e.consumed} bytes, starting with '{request_bytes[:20]!r}'",
)
continue
raw_request += request_bytes.decode()
bad_prefix, raw_request = strip_non_json_prefix(raw_request)
if bad_prefix:
logger.info("Client started request with non json data: %r", bad_prefix)
await write_error(writer, 'Cannot parse json: ' + bad_prefix)
try:
request = json.loads(raw_request)
except json.JSONDecodeError:
# invalid json request, keep reading data until a valid json is formed
logger.debug("Invalid JSON, waiting for rest of message: %r", raw_request)
continue
# reset the buffer for the next message
raw_request = ''
if not request:
logger.debug("Client sent empty request")
await write_error(writer, 'Invalid Request: empty')
continue
try:
result = await execute_rpc(request)
except Exception as e:
logger.exception("Unrecognized exception while executing RPC")
await write_error(writer, "unknown failure: " + str(e))
else:
if not result.endswith(NEW_LINE):
result += NEW_LINE
writer.write(result.encode())
await writer.drain()
示例10
def stream(self, chunk_size: int=1*1024*1024, chunk_timeout: int=10,
complete_timeout: int=300):
"""
:param complete_timeout:
:param chunk_timeout:
:param chunk_size:
:return:
"""
if self._parser_status not in (ResponseStatus.PENDING_HEADERS, ResponseStatus.PENDING_BODY):
raise StreamAlreadyConsumed
if self._parser_status == ResponseStatus.PENDING_HEADERS:
await wait_for(self.receive_headers(), chunk_timeout)
try:
length = int(self.headers['Content-Length'])
remaining = length
while remaining:
bytes_to_read = min(remaining, chunk_size)
task = self._connection.read_exactly(bytes_to_read)
start_time = time.time()
self._parser.feed(await wait_for(task, min(chunk_timeout, complete_timeout)))
complete_timeout -= time.time() - start_time
remaining -= bytes_to_read
yield bytes(self._content)
self._content = bytearray()
except KeyError:
if self._headers.get('Transfer-Encoding') == 'chunked':
self._parser_status = ResponseStatus.CHUNKED_TRANSFER
while self._parser_status == ResponseStatus.CHUNKED_TRANSFER:
task = self._connection.read_until(b'\r\n')
start_time = time.time()
try:
self._parser.feed(await wait_for(task, min(chunk_timeout, complete_timeout)))
except asyncio.LimitOverrunError as error:
self._parser.feed(await self._connection.read_exactly(error.consumed))
complete_timeout -= time.time() - start_time
while len(self._content) >= chunk_size:
yield self._content[:chunk_size]
self._content = self._content[chunk_size:]
while len(self._content) > 0:
yield self._content[:chunk_size]
self._content = self._content[chunk_size:]
else:
raise Exception('Invalid response.')
except ValueError:
raise Exception('Invalid content-length header.')