Python源码示例:elasticsearch.ConnectionTimeout()
示例1
def test_does_not_retry_on_timeout_if_not_wanted(self):
import elasticsearch
delegate = mock.Mock(side_effect=as_future(exception=elasticsearch.ConnectionTimeout(408, "timed out")))
es = None
params = {
"retries": 3,
"retry-wait-period": 0.01,
"retry-on-timeout": False,
"retry-on-error": True
}
retrier = runner.Retry(delegate)
with self.assertRaises(elasticsearch.ConnectionTimeout):
await retrier(es, params)
delegate.assert_called_once_with(es, params)
示例2
def test_perform_request_ssl_error(auto_close, loop):
for exc, expected in [
(aiohttp.ClientConnectorCertificateError(mock.Mock(), mock.Mock()), SSLError), # noqa
(aiohttp.ClientConnectorSSLError(mock.Mock(), mock.Mock()), SSLError),
(aiohttp.ClientSSLError(mock.Mock(), mock.Mock()), SSLError),
(aiohttp.ClientError('Other'), ConnectionError),
(asyncio.TimeoutError, ConnectionTimeout),
]:
session = aiohttp.ClientSession(loop=loop)
async def coro(*args, **Kwargs):
raise exc
session._request = coro
conn = auto_close(AIOHttpConnection(session=session, loop=loop,
use_ssl=True))
with pytest.raises(expected):
await conn.perform_request('HEAD', '/')
示例3
def test_timeout_is_properly_raised(connection, server):
@asyncio.coroutine
def slow_request():
yield from asyncio.sleep(0.01)
return {}
server.register_response('/_search', slow_request())
with raises(ConnectionTimeout):
yield from connection.perform_request('GET', '/_search', timeout=0.0001)
示例4
def main_loop(self, method, url, params, body, headers=None, ignore=(), timeout=None):
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
status, headers, data = yield from connection.perform_request(
method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
if method == 'HEAD':
return 200 <= status < 300
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if data:
data = self.deserializer.loads(data, headers.get('content-type'))
return data
示例5
def execute_single(runner, es, params, on_error):
"""
Invokes the given runner once and provides the runner's return value in a uniform structure.
:return: a triple of: total number of operations, unit of operations, a dict of request meta data (may be None).
"""
import elasticsearch
fatal_error = False
try:
async with runner:
return_value = await runner(es, params)
if isinstance(return_value, tuple) and len(return_value) == 2:
total_ops, total_ops_unit = return_value
request_meta_data = {"success": True}
elif isinstance(return_value, dict):
total_ops = return_value.pop("weight", 1)
total_ops_unit = return_value.pop("unit", "ops")
request_meta_data = return_value
if "success" not in request_meta_data:
request_meta_data["success"] = True
else:
total_ops = 1
total_ops_unit = "ops"
request_meta_data = {"success": True}
except elasticsearch.TransportError as e:
# we *specifically* want to distinguish connection refused (a node died?) from connection timeouts
# pylint: disable=unidiomatic-typecheck
if type(e) is elasticsearch.ConnectionError:
fatal_error = True
total_ops = 0
total_ops_unit = "ops"
request_meta_data = {
"success": False,
"error-type": "transport"
}
# The ES client will sometimes return string like "N/A" or "TIMEOUT" for connection errors.
if isinstance(e.status_code, int):
request_meta_data["http-status"] = e.status_code
# connection timeout errors don't provide a helpful description
if isinstance(e, elasticsearch.ConnectionTimeout):
request_meta_data["error-description"] = "network connection timed out"
elif e.info:
request_meta_data["error-description"] = "%s (%s)" % (e.error, e.info)
else:
request_meta_data["error-description"] = e.error
except KeyError as e:
logging.getLogger(__name__).exception("Cannot execute runner [%s]; most likely due to missing parameters.", str(runner))
msg = "Cannot execute [%s]. Provided parameters are: %s. Error: [%s]." % (str(runner), list(params.keys()), str(e))
raise exceptions.SystemSetupError(msg)
if not request_meta_data["success"]:
if on_error == "abort" or (on_error == "continue-on-non-fatal" and fatal_error):
msg = "Request returned an error. Error type: %s" % request_meta_data.get("error-type", "Unknown")
description = request_meta_data.get("error-description")
if description:
msg += ", Description: %s" % description
raise exceptions.RallyAssertionError(msg)
return total_ops, total_ops_unit, request_meta_data
示例6
def test_scroll_query_cannot_clear_scroll(self, es):
import elasticsearch
# page 1
search_response = {
"_scroll_id": "some-scroll-id",
"timed_out": False,
"took": 53,
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"hits": [
{
"title": "some-doc-1"
}
]
}
}
es.transport.perform_request.return_value = as_future(io.StringIO(json.dumps(search_response)))
es.clear_scroll.return_value = as_future(exception=elasticsearch.ConnectionTimeout())
query_runner = runner.Query()
params = {
"pages": 5,
"results-per-page": 100,
"index": "unittest",
"cache": False,
"body": {
"query": {
"match_all": {}
}
}
}
async with query_runner:
results = await query_runner(es, params)
self.assertEqual(1, results["weight"])
self.assertEqual(1, results["pages"])
self.assertEqual(1, results["hits"])
self.assertEqual("eq", results["hits_relation"])
self.assertEqual("pages", results["unit"])
self.assertEqual(53, results["took"])
self.assertFalse("error-type" in results)
es.clear_scroll.assert_called_once_with(body={"scroll_id": ["some-scroll-id"]})