From eacac7cff6044cb19f6f90f6e3a36b59e87a4e47 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Mon, 13 Jan 2020 12:22:33 +0000 Subject: [PATCH 1/8] Add tenacity source to _vendor --- src/pip/_vendor/tenacity/LICENSE | 202 ++++++++++ src/pip/_vendor/tenacity/__init__.py | 486 +++++++++++++++++++++++ src/pip/_vendor/tenacity/_asyncio.py | 66 +++ src/pip/_vendor/tenacity/_utils.py | 154 +++++++ src/pip/_vendor/tenacity/after.py | 35 ++ src/pip/_vendor/tenacity/before.py | 32 ++ src/pip/_vendor/tenacity/before_sleep.py | 37 ++ src/pip/_vendor/tenacity/compat.py | 301 ++++++++++++++ src/pip/_vendor/tenacity/nap.py | 34 ++ src/pip/_vendor/tenacity/retry.py | 187 +++++++++ src/pip/_vendor/tenacity/stop.py | 103 +++++ src/pip/_vendor/tenacity/tornadoweb.py | 53 +++ src/pip/_vendor/tenacity/wait.py | 195 +++++++++ 13 files changed, 1885 insertions(+) create mode 100644 src/pip/_vendor/tenacity/LICENSE create mode 100644 src/pip/_vendor/tenacity/__init__.py create mode 100644 src/pip/_vendor/tenacity/_asyncio.py create mode 100644 src/pip/_vendor/tenacity/_utils.py create mode 100644 src/pip/_vendor/tenacity/after.py create mode 100644 src/pip/_vendor/tenacity/before.py create mode 100644 src/pip/_vendor/tenacity/before_sleep.py create mode 100644 src/pip/_vendor/tenacity/compat.py create mode 100644 src/pip/_vendor/tenacity/nap.py create mode 100644 src/pip/_vendor/tenacity/retry.py create mode 100644 src/pip/_vendor/tenacity/stop.py create mode 100644 src/pip/_vendor/tenacity/tornadoweb.py create mode 100644 src/pip/_vendor/tenacity/wait.py diff --git a/src/pip/_vendor/tenacity/LICENSE b/src/pip/_vendor/tenacity/LICENSE new file mode 100644 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/src/pip/_vendor/tenacity/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py new file mode 100644 index 00000000000..3718a5e5362 --- /dev/null +++ b/src/pip/_vendor/tenacity/__init__.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Julien Danjou +# Copyright 2017 Elisey Zanko +# Copyright 2016 Étienne Bersac +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import asyncio +except ImportError: + asyncio = None + +try: + import tornado +except ImportError: + tornado = None + +import sys +import threading +from concurrent import futures + +import six + +from tenacity import _utils +from tenacity import compat as _compat + +# Import all built-in retry strategies for easier usage. +from .retry import retry_all # noqa +from .retry import retry_always # noqa +from .retry import retry_any # noqa +from .retry import retry_if_exception # noqa +from .retry import retry_if_exception_type # noqa +from .retry import retry_if_not_result # noqa +from .retry import retry_if_result # noqa +from .retry import retry_never # noqa +from .retry import retry_unless_exception_type # noqa +from .retry import retry_if_exception_message # noqa +from .retry import retry_if_not_exception_message # noqa + +# Import all nap strategies for easier usage. +from .nap import sleep # noqa +from .nap import sleep_using_event # noqa + +# Import all built-in stop strategies for easier usage. +from .stop import stop_after_attempt # noqa +from .stop import stop_after_delay # noqa +from .stop import stop_all # noqa +from .stop import stop_any # noqa +from .stop import stop_never # noqa +from .stop import stop_when_event_set # noqa + +# Import all built-in wait strategies for easier usage. +from .wait import wait_chain # noqa +from .wait import wait_combine # noqa +from .wait import wait_exponential # noqa +from .wait import wait_fixed # noqa +from .wait import wait_incrementing # noqa +from .wait import wait_none # noqa +from .wait import wait_random # noqa +from .wait import wait_random_exponential # noqa +from .wait import wait_random_exponential as wait_full_jitter # noqa + +# Import all built-in before strategies for easier usage. +from .before import before_log # noqa +from .before import before_nothing # noqa + +# Import all built-in after strategies for easier usage. +from .after import after_log # noqa +from .after import after_nothing # noqa + +# Import all built-in after strategies for easier usage. +from .before_sleep import before_sleep_log # noqa +from .before_sleep import before_sleep_nothing # noqa + + +def retry(*dargs, **dkw): + """Wrap a function with a new `Retrying` object. + + :param dargs: positional arguments passed to Retrying object + :param dkw: keyword arguments passed to the Retrying object + """ + # support both @retry and @retry() as valid syntax + if len(dargs) == 1 and callable(dargs[0]): + return retry()(dargs[0]) + else: + def wrap(f): + if asyncio and asyncio.iscoroutinefunction(f): + r = AsyncRetrying(*dargs, **dkw) + elif tornado and hasattr(tornado.gen, 'is_coroutine_function') \ + and tornado.gen.is_coroutine_function(f): + r = TornadoRetrying(*dargs, **dkw) + else: + r = Retrying(*dargs, **dkw) + + return r.wraps(f) + + return wrap + + +class TryAgain(Exception): + """Always retry the executed function when raised.""" + + +NO_RESULT = object() + + +class DoAttempt(object): + pass + + +class DoSleep(float): + pass + + +class BaseAction(object): + """Base class for representing actions to take by retry object. + + Concrete implementations must define: + - __init__: to initialize all necessary fields + - REPR_ATTRS: class variable specifying attributes to include in repr(self) + - NAME: for identification in retry object methods and callbacks + """ + + REPR_FIELDS = () + NAME = None + + def __repr__(self): + state_str = ', '.join('%s=%r' % (field, getattr(self, field)) + for field in self.REPR_FIELDS) + return '%s(%s)' % (type(self).__name__, state_str) + + def __str__(self): + return repr(self) + + +class RetryAction(BaseAction): + REPR_FIELDS = ('sleep',) + NAME = 'retry' + + def __init__(self, sleep): + self.sleep = float(sleep) + + +_unset = object() + + +class RetryError(Exception): + """Encapsulates the last attempt instance right before giving up.""" + + def __init__(self, last_attempt): + self.last_attempt = last_attempt + super(RetryError, self).__init__(last_attempt) + + def reraise(self): + if self.last_attempt.failed: + raise self.last_attempt.result() + raise self + + def __str__(self): + return "{0}[{1}]".format(self.__class__.__name__, self.last_attempt) + + +class AttemptManager(object): + """Manage attempt context.""" + + def __init__(self, retry_state): + self.retry_state = retry_state + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + if isinstance(exc_value, BaseException): + self.retry_state.set_exception((exc_type, exc_value, traceback)) + return True # Swallow exception. + else: + # We don't have the result, actually. + self.retry_state.set_result(None) + + +class BaseRetrying(object): + + def __init__(self, + sleep=sleep, + stop=stop_never, wait=wait_none(), + retry=retry_if_exception_type(), + before=before_nothing, + after=after_nothing, + before_sleep=None, + reraise=False, + retry_error_cls=RetryError, + retry_error_callback=None): + self.sleep = sleep + self._stop = stop + self._wait = wait + self._retry = retry + self._before = before + self._after = after + self._before_sleep = before_sleep + self.reraise = reraise + self._local = threading.local() + self.retry_error_cls = retry_error_cls + self._retry_error_callback = retry_error_callback + + # This attribute was moved to RetryCallState and is deprecated on + # Retrying objects but kept for backward compatibility. + self.fn = None + + @_utils.cached_property + def stop(self): + return _compat.stop_func_accept_retry_state(self._stop) + + @_utils.cached_property + def wait(self): + return _compat.wait_func_accept_retry_state(self._wait) + + @_utils.cached_property + def retry(self): + return _compat.retry_func_accept_retry_state(self._retry) + + @_utils.cached_property + def before(self): + return _compat.before_func_accept_retry_state(self._before) + + @_utils.cached_property + def after(self): + return _compat.after_func_accept_retry_state(self._after) + + @_utils.cached_property + def before_sleep(self): + return _compat.before_sleep_func_accept_retry_state(self._before_sleep) + + @_utils.cached_property + def retry_error_callback(self): + return _compat.retry_error_callback_accept_retry_state( + self._retry_error_callback) + + def copy(self, sleep=_unset, stop=_unset, wait=_unset, + retry=_unset, before=_unset, after=_unset, before_sleep=_unset, + reraise=_unset): + """Copy this object with some parameters changed if needed.""" + if before_sleep is _unset: + before_sleep = self.before_sleep + return self.__class__( + sleep=self.sleep if sleep is _unset else sleep, + stop=self.stop if stop is _unset else stop, + wait=self.wait if wait is _unset else wait, + retry=self.retry if retry is _unset else retry, + before=self.before if before is _unset else before, + after=self.after if after is _unset else after, + before_sleep=before_sleep, + reraise=self.reraise if after is _unset else reraise, + ) + + def __repr__(self): + attrs = dict( + _utils.visible_attrs(self, attrs={'me': id(self)}), + __class__=self.__class__.__name__, + ) + return ("<%(__class__)s object at 0x%(me)x (stop=%(stop)s, " + "wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, " + "before=%(before)s, after=%(after)s)>") % (attrs) + + @property + def statistics(self): + """Return a dictionary of runtime statistics. + + This dictionary will be empty when the controller has never been + ran. When it is running or has ran previously it should have (but + may not) have useful and/or informational keys and values when + running is underway and/or completed. + + .. warning:: The keys in this dictionary **should** be some what + stable (not changing), but there existence **may** + change between major releases as new statistics are + gathered or removed so before accessing keys ensure that + they actually exist and handle when they do not. + + .. note:: The values in this dictionary are local to the thread + running call (so if multiple threads share the same retrying + object - either directly or indirectly) they will each have + there own view of statistics they have collected (in the + future we may provide a way to aggregate the various + statistics from each thread). + """ + try: + return self._local.statistics + except AttributeError: + self._local.statistics = {} + return self._local.statistics + + def wraps(self, f): + """Wrap a function for retrying. + + :param f: A function to wraps for retrying. + """ + @_utils.wraps(f) + def wrapped_f(*args, **kw): + return self.call(f, *args, **kw) + + def retry_with(*args, **kwargs): + return self.copy(*args, **kwargs).wraps(f) + + wrapped_f.retry = self + wrapped_f.retry_with = retry_with + + return wrapped_f + + def begin(self, fn): + self.statistics.clear() + self.statistics['start_time'] = _utils.now() + self.statistics['attempt_number'] = 1 + self.statistics['idle_for'] = 0 + self.fn = fn + + def iter(self, retry_state): # noqa + fut = retry_state.outcome + if fut is None: + if self.before is not None: + self.before(retry_state) + return DoAttempt() + + is_explicit_retry = retry_state.outcome.failed \ + and isinstance(retry_state.outcome.exception(), TryAgain) + if not (is_explicit_retry or self.retry(retry_state=retry_state)): + return fut.result() + + if self.after is not None: + self.after(retry_state=retry_state) + + self.statistics['delay_since_first_attempt'] = \ + retry_state.seconds_since_start + if self.stop(retry_state=retry_state): + if self.retry_error_callback: + return self.retry_error_callback(retry_state=retry_state) + retry_exc = self.retry_error_cls(fut) + if self.reraise: + raise retry_exc.reraise() + six.raise_from(retry_exc, fut.exception()) + + if self.wait: + sleep = self.wait(retry_state=retry_state) + else: + sleep = 0.0 + retry_state.next_action = RetryAction(sleep) + retry_state.idle_for += sleep + self.statistics['idle_for'] += sleep + self.statistics['attempt_number'] += 1 + + if self.before_sleep is not None: + self.before_sleep(retry_state=retry_state) + + return DoSleep(sleep) + + def __iter__(self): + self.begin(None) + + retry_state = RetryCallState(self, fn=None, args=(), kwargs={}) + while True: + do = self.iter(retry_state=retry_state) + if isinstance(do, DoAttempt): + yield AttemptManager(retry_state=retry_state) + elif isinstance(do, DoSleep): + retry_state.prepare_for_next_attempt() + self.sleep(do) + else: + break + + +class Retrying(BaseRetrying): + """Retrying controller.""" + + def call(self, fn, *args, **kwargs): + self.begin(fn) + + retry_state = RetryCallState( + retry_object=self, fn=fn, args=args, kwargs=kwargs) + while True: + do = self.iter(retry_state=retry_state) + if isinstance(do, DoAttempt): + try: + result = fn(*args, **kwargs) + except BaseException: + retry_state.set_exception(sys.exc_info()) + else: + retry_state.set_result(result) + elif isinstance(do, DoSleep): + retry_state.prepare_for_next_attempt() + self.sleep(do) + else: + return do + + __call__ = call + + +class Future(futures.Future): + """Encapsulates a (future or past) attempted call to a target function.""" + + def __init__(self, attempt_number): + super(Future, self).__init__() + self.attempt_number = attempt_number + + @property + def failed(self): + """Return whether a exception is being held in this future.""" + return self.exception() is not None + + @classmethod + def construct(cls, attempt_number, value, has_exception): + """Construct a new Future object.""" + fut = cls(attempt_number) + if has_exception: + fut.set_exception(value) + else: + fut.set_result(value) + return fut + + +class RetryCallState(object): + """State related to a single call wrapped with Retrying.""" + + def __init__(self, retry_object, fn, args, kwargs): + #: Retry call start timestamp + self.start_time = _utils.now() + #: Retry manager object + self.retry_object = retry_object + #: Function wrapped by this retry call + self.fn = fn + #: Arguments of the function wrapped by this retry call + self.args = args + #: Keyword arguments of the function wrapped by this retry call + self.kwargs = kwargs + + #: The number of the current attempt + self.attempt_number = 1 + #: Last outcome (result or exception) produced by the function + self.outcome = None + #: Timestamp of the last outcome + self.outcome_timestamp = None + #: Time spent sleeping in retries + self.idle_for = 0 + #: Next action as decided by the retry manager + self.next_action = None + + @property + def seconds_since_start(self): + if self.outcome_timestamp is None: + return None + return self.outcome_timestamp - self.start_time + + def prepare_for_next_attempt(self): + self.outcome = None + self.outcome_timestamp = None + self.attempt_number += 1 + self.next_action = None + + def set_result(self, val): + ts = _utils.now() + fut = Future(self.attempt_number) + fut.set_result(val) + self.outcome, self.outcome_timestamp = fut, ts + + def set_exception(self, exc_info): + ts = _utils.now() + fut = Future(self.attempt_number) + _utils.capture(fut, exc_info) + self.outcome, self.outcome_timestamp = fut, ts + + +if asyncio: + from tenacity._asyncio import AsyncRetrying + +if tornado: + from tenacity.tornadoweb import TornadoRetrying diff --git a/src/pip/_vendor/tenacity/_asyncio.py b/src/pip/_vendor/tenacity/_asyncio.py new file mode 100644 index 00000000000..035699d2700 --- /dev/null +++ b/src/pip/_vendor/tenacity/_asyncio.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 Étienne Bersac +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import asyncio +except ImportError: + asyncio = None + +import sys + +from tenacity import BaseRetrying +from tenacity import DoAttempt +from tenacity import DoSleep +from tenacity import RetryCallState + + +if asyncio: + class AsyncRetrying(BaseRetrying): + + def __init__(self, + sleep=asyncio.sleep, + **kwargs): + super(AsyncRetrying, self).__init__(**kwargs) + self.sleep = sleep + + def wraps(self, fn): + fn = super().wraps(fn) + # Ensure wrapper is recognized as a coroutine function. + fn._is_coroutine = asyncio.coroutines._is_coroutine + return fn + + @asyncio.coroutine + def call(self, fn, *args, **kwargs): + self.begin(fn) + + retry_state = RetryCallState( + retry_object=self, fn=fn, args=args, kwargs=kwargs) + while True: + do = self.iter(retry_state=retry_state) + if isinstance(do, DoAttempt): + try: + result = yield from fn(*args, **kwargs) + except BaseException: + retry_state.set_exception(sys.exc_info()) + else: + retry_state.set_result(result) + elif isinstance(do, DoSleep): + retry_state.prepare_for_next_attempt() + yield from self.sleep(do) + else: + return do diff --git a/src/pip/_vendor/tenacity/_utils.py b/src/pip/_vendor/tenacity/_utils.py new file mode 100644 index 00000000000..6703bd9c903 --- /dev/null +++ b/src/pip/_vendor/tenacity/_utils.py @@ -0,0 +1,154 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import sys +import time +from functools import update_wrapper + +import six + +# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... +try: + MAX_WAIT = sys.maxint / 2 +except AttributeError: + MAX_WAIT = 1073741823 + + +if six.PY2: + from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES + + def wraps(fn): + """Do the same as six.wraps but only copy attributes that exist. + + For example, object instances don't have __name__ attribute, so + six.wraps fails. This is fixed in Python 3 + (https://bugs.python.org/issue3445), but didn't get backported to six. + + Also, see https://github.com/benjaminp/six/issues/250. + """ + def filter_hasattr(obj, attrs): + return tuple(a for a in attrs if hasattr(obj, a)) + return six.wraps( + fn, + assigned=filter_hasattr(fn, WRAPPER_ASSIGNMENTS), + updated=filter_hasattr(fn, WRAPPER_UPDATES)) + + def capture(fut, tb): + # TODO(harlowja): delete this in future, since its + # has to repeatedly calculate this crap. + fut.set_exception_info(tb[1], tb[2]) + + def getargspec(func): + # This was deprecated in Python 3. + return inspect.getargspec(func) +else: + from functools import wraps # noqa + + def capture(fut, tb): + fut.set_exception(tb[1]) + + def getargspec(func): + return inspect.getfullargspec(func) + + +def visible_attrs(obj, attrs=None): + if attrs is None: + attrs = {} + for attr_name, attr in inspect.getmembers(obj): + if attr_name.startswith("_"): + continue + attrs[attr_name] = attr + return attrs + + +def find_ordinal(pos_num): + # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers + if pos_num == 0: + return "th" + elif pos_num == 1: + return 'st' + elif pos_num == 2: + return 'nd' + elif pos_num == 3: + return 'rd' + elif pos_num >= 4 and pos_num <= 20: + return 'th' + else: + return find_ordinal(pos_num % 10) + + +def to_ordinal(pos_num): + return "%i%s" % (pos_num, find_ordinal(pos_num)) + + +def get_callback_name(cb): + """Get a callback fully-qualified name. + + If no name can be produced ``repr(cb)`` is called and returned. + """ + segments = [] + try: + segments.append(cb.__qualname__) + except AttributeError: + try: + segments.append(cb.__name__) + if inspect.ismethod(cb): + try: + # This attribute doesn't exist on py3.x or newer, so + # we optionally ignore it... (on those versions of + # python `__qualname__` should have been found anyway). + segments.insert(0, cb.im_class.__name__) + except AttributeError: + pass + except AttributeError: + pass + if not segments: + return repr(cb) + else: + try: + # When running under sphinx it appears this can be none? + if cb.__module__: + segments.insert(0, cb.__module__) + except AttributeError: + pass + return ".".join(segments) + + +try: + now = time.monotonic # noqa +except AttributeError: + from monotonic import monotonic as now # noqa + + +class cached_property(object): + """A property that is computed once per instance. + + Upon being computed it replaces itself with an ordinary attribute. Deleting + the attribute resets the property. + + Source: https://github.com/bottlepy/bottle/blob/1de24157e74a6971d136550afe1b63eec5b0df2b/bottle.py#L234-L246 + """ # noqa: E501 + + def __init__(self, func): + update_wrapper(self, func) + self.func = func + + def __get__(self, obj, cls): + if obj is None: + return self + value = obj.__dict__[self.func.__name__] = self.func(obj) + return value diff --git a/src/pip/_vendor/tenacity/after.py b/src/pip/_vendor/tenacity/after.py new file mode 100644 index 00000000000..55522c99b6a --- /dev/null +++ b/src/pip/_vendor/tenacity/after.py @@ -0,0 +1,35 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tenacity import _utils + + +def after_nothing(retry_state): + """After call strategy that does nothing.""" + + +def after_log(logger, log_level, sec_format="%0.3f"): + """After call strategy that logs to some logger the finished attempt.""" + log_tpl = ("Finished call to '%s' after " + str(sec_format) + "(s), " + "this was the %s time calling it.") + + def log_it(retry_state): + logger.log(log_level, log_tpl, + _utils.get_callback_name(retry_state.fn), + retry_state.seconds_since_start, + _utils.to_ordinal(retry_state.attempt_number)) + + return log_it diff --git a/src/pip/_vendor/tenacity/before.py b/src/pip/_vendor/tenacity/before.py new file mode 100644 index 00000000000..54259dddff8 --- /dev/null +++ b/src/pip/_vendor/tenacity/before.py @@ -0,0 +1,32 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tenacity import _utils + + +def before_nothing(retry_state): + """Before call strategy that does nothing.""" + + +def before_log(logger, log_level): + """Before call strategy that logs to some logger the attempt.""" + def log_it(retry_state): + logger.log(log_level, + "Starting call to '%s', this is the %s time calling it.", + _utils.get_callback_name(retry_state.fn), + _utils.to_ordinal(retry_state.attempt_number)) + + return log_it diff --git a/src/pip/_vendor/tenacity/before_sleep.py b/src/pip/_vendor/tenacity/before_sleep.py new file mode 100644 index 00000000000..b5fd6016c4f --- /dev/null +++ b/src/pip/_vendor/tenacity/before_sleep.py @@ -0,0 +1,37 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tenacity import _utils + + +def before_sleep_nothing(retry_state): + """Before call strategy that does nothing.""" + + +def before_sleep_log(logger, log_level): + """Before call strategy that logs to some logger the attempt.""" + def log_it(retry_state): + if retry_state.outcome.failed: + verb, value = 'raised', retry_state.outcome.exception() + else: + verb, value = 'returned', retry_state.outcome.result() + + logger.log(log_level, + "Retrying %s in %s seconds as it %s %s.", + _utils.get_callback_name(retry_state.fn), + getattr(retry_state.next_action, 'sleep'), + verb, value) + return log_it diff --git a/src/pip/_vendor/tenacity/compat.py b/src/pip/_vendor/tenacity/compat.py new file mode 100644 index 00000000000..026c248d5eb --- /dev/null +++ b/src/pip/_vendor/tenacity/compat.py @@ -0,0 +1,301 @@ +"""Utilities for providing backward compatibility.""" + +import inspect +from fractions import Fraction +from warnings import warn + +import six + +from tenacity import _utils + + +def warn_about_non_retry_state_deprecation(cbname, func, stacklevel): + msg = ( + '"%s" function must accept single "retry_state" parameter,' + ' please update %s' % (cbname, _utils.get_callback_name(func))) + warn(msg, DeprecationWarning, stacklevel=stacklevel + 1) + + +def warn_about_dunder_non_retry_state_deprecation(fn, stacklevel): + msg = ( + '"%s" method must be called with' + ' single "retry_state" parameter' % (_utils.get_callback_name(fn))) + warn(msg, DeprecationWarning, stacklevel=stacklevel + 1) + + +def func_takes_retry_state(func): + if not six.callable(func): + raise Exception(func) + return False + if not inspect.isfunction(func) and not inspect.ismethod(func): + # func is a callable object rather than a function/method + func = func.__call__ + func_spec = _utils.getargspec(func) + return 'retry_state' in func_spec.args + + +_unset = object() + + +def _make_unset_exception(func_name, **kwargs): + missing = [] + for k, v in six.iteritems(kwargs): + if v is _unset: + missing.append(k) + missing_str = ', '.join(repr(s) for s in missing) + return TypeError(func_name + ' func missing parameters: ' + missing_str) + + +def _set_delay_since_start(retry_state, delay): + # Ensure outcome_timestamp - start_time is *exactly* equal to the delay to + # avoid complexity in test code. + retry_state.start_time = Fraction(retry_state.start_time) + retry_state.outcome_timestamp = (retry_state.start_time + Fraction(delay)) + assert retry_state.seconds_since_start == delay + + +def make_retry_state(previous_attempt_number, delay_since_first_attempt, + last_result=None): + """Construct RetryCallState for given attempt number & delay. + + Only used in testing and thus is extra careful about timestamp arithmetics. + """ + required_parameter_unset = (previous_attempt_number is _unset or + delay_since_first_attempt is _unset) + if required_parameter_unset: + raise _make_unset_exception( + 'wait/stop', + previous_attempt_number=previous_attempt_number, + delay_since_first_attempt=delay_since_first_attempt) + + from tenacity import RetryCallState + retry_state = RetryCallState(None, None, (), {}) + retry_state.attempt_number = previous_attempt_number + if last_result is not None: + retry_state.outcome = last_result + else: + retry_state.set_result(None) + _set_delay_since_start(retry_state, delay_since_first_attempt) + return retry_state + + +def func_takes_last_result(waiter): + """Check if function has a "last_result" parameter. + + Needed to provide backward compatibility for wait functions that didn't + take "last_result" in the beginning. + """ + if not six.callable(waiter): + return False + if not inspect.isfunction(waiter) and not inspect.ismethod(waiter): + # waiter is a class, check dunder-call rather than dunder-init. + waiter = waiter.__call__ + waiter_spec = _utils.getargspec(waiter) + return 'last_result' in waiter_spec.args + + +def stop_dunder_call_accept_old_params(fn): + """Decorate cls.__call__ method to accept old "stop" signature.""" + @_utils.wraps(fn) + def new_fn(self, + previous_attempt_number=_unset, + delay_since_first_attempt=_unset, + retry_state=None): + if retry_state is None: + from tenacity import RetryCallState + retry_state_passed_as_non_kwarg = ( + previous_attempt_number is not _unset and + isinstance(previous_attempt_number, RetryCallState)) + if retry_state_passed_as_non_kwarg: + retry_state = previous_attempt_number + else: + warn_about_dunder_non_retry_state_deprecation(fn, stacklevel=2) + retry_state = make_retry_state( + previous_attempt_number=previous_attempt_number, + delay_since_first_attempt=delay_since_first_attempt) + return fn(self, retry_state=retry_state) + return new_fn + + +def stop_func_accept_retry_state(stop_func): + """Wrap "stop" function to accept "retry_state" parameter.""" + if not six.callable(stop_func): + return stop_func + + if func_takes_retry_state(stop_func): + return stop_func + + @_utils.wraps(stop_func) + def wrapped_stop_func(retry_state): + warn_about_non_retry_state_deprecation( + 'stop', stop_func, stacklevel=4) + return stop_func( + retry_state.attempt_number, + retry_state.seconds_since_start, + ) + return wrapped_stop_func + + +def wait_dunder_call_accept_old_params(fn): + """Decorate cls.__call__ method to accept old "wait" signature.""" + @_utils.wraps(fn) + def new_fn(self, + previous_attempt_number=_unset, + delay_since_first_attempt=_unset, + last_result=None, + retry_state=None): + if retry_state is None: + from tenacity import RetryCallState + retry_state_passed_as_non_kwarg = ( + previous_attempt_number is not _unset and + isinstance(previous_attempt_number, RetryCallState)) + if retry_state_passed_as_non_kwarg: + retry_state = previous_attempt_number + else: + warn_about_dunder_non_retry_state_deprecation(fn, stacklevel=2) + retry_state = make_retry_state( + previous_attempt_number=previous_attempt_number, + delay_since_first_attempt=delay_since_first_attempt, + last_result=last_result) + return fn(self, retry_state=retry_state) + return new_fn + + +def wait_func_accept_retry_state(wait_func): + """Wrap wait function to accept "retry_state" parameter.""" + if not six.callable(wait_func): + return wait_func + + if func_takes_retry_state(wait_func): + return wait_func + + if func_takes_last_result(wait_func): + @_utils.wraps(wait_func) + def wrapped_wait_func(retry_state): + warn_about_non_retry_state_deprecation( + 'wait', wait_func, stacklevel=4) + return wait_func( + retry_state.attempt_number, + retry_state.seconds_since_start, + last_result=retry_state.outcome, + ) + else: + @_utils.wraps(wait_func) + def wrapped_wait_func(retry_state): + warn_about_non_retry_state_deprecation( + 'wait', wait_func, stacklevel=4) + return wait_func( + retry_state.attempt_number, + retry_state.seconds_since_start, + ) + return wrapped_wait_func + + +def retry_dunder_call_accept_old_params(fn): + """Decorate cls.__call__ method to accept old "retry" signature.""" + @_utils.wraps(fn) + def new_fn(self, attempt=_unset, retry_state=None): + if retry_state is None: + from tenacity import RetryCallState + if attempt is _unset: + raise _make_unset_exception('retry', attempt=attempt) + retry_state_passed_as_non_kwarg = ( + attempt is not _unset and + isinstance(attempt, RetryCallState)) + if retry_state_passed_as_non_kwarg: + retry_state = attempt + else: + warn_about_dunder_non_retry_state_deprecation(fn, stacklevel=2) + retry_state = RetryCallState(None, None, (), {}) + retry_state.outcome = attempt + return fn(self, retry_state=retry_state) + return new_fn + + +def retry_func_accept_retry_state(retry_func): + """Wrap "retry" function to accept "retry_state" parameter.""" + if not six.callable(retry_func): + return retry_func + + if func_takes_retry_state(retry_func): + return retry_func + + @_utils.wraps(retry_func) + def wrapped_retry_func(retry_state): + warn_about_non_retry_state_deprecation( + 'retry', retry_func, stacklevel=4) + return retry_func(retry_state.outcome) + return wrapped_retry_func + + +def before_func_accept_retry_state(fn): + """Wrap "before" function to accept "retry_state".""" + if not six.callable(fn): + return fn + + if func_takes_retry_state(fn): + return fn + + @_utils.wraps(fn) + def wrapped_before_func(retry_state): + # func, trial_number, trial_time_taken + warn_about_non_retry_state_deprecation('before', fn, stacklevel=4) + return fn( + retry_state.fn, + retry_state.attempt_number, + ) + return wrapped_before_func + + +def after_func_accept_retry_state(fn): + """Wrap "after" function to accept "retry_state".""" + if not six.callable(fn): + return fn + + if func_takes_retry_state(fn): + return fn + + @_utils.wraps(fn) + def wrapped_after_sleep_func(retry_state): + # func, trial_number, trial_time_taken + warn_about_non_retry_state_deprecation('after', fn, stacklevel=4) + return fn( + retry_state.fn, + retry_state.attempt_number, + retry_state.seconds_since_start) + return wrapped_after_sleep_func + + +def before_sleep_func_accept_retry_state(fn): + """Wrap "before_sleep" function to accept "retry_state".""" + if not six.callable(fn): + return fn + + if func_takes_retry_state(fn): + return fn + + @_utils.wraps(fn) + def wrapped_before_sleep_func(retry_state): + # retry_object, sleep, last_result + warn_about_non_retry_state_deprecation( + 'before_sleep', fn, stacklevel=4) + return fn( + retry_state.retry_object, + sleep=getattr(retry_state.next_action, 'sleep'), + last_result=retry_state.outcome) + return wrapped_before_sleep_func + + +def retry_error_callback_accept_retry_state(fn): + if not six.callable(fn): + return fn + + if func_takes_retry_state(fn): + return fn + + @_utils.wraps(fn) + def wrapped_retry_error_callback(retry_state): + warn_about_non_retry_state_deprecation( + 'retry_error_callback', fn, stacklevel=4) + return fn(retry_state.outcome) + return wrapped_retry_error_callback diff --git a/src/pip/_vendor/tenacity/nap.py b/src/pip/_vendor/tenacity/nap.py new file mode 100644 index 00000000000..e5bd8dc485e --- /dev/null +++ b/src/pip/_vendor/tenacity/nap.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 Étienne Bersac +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +#: Default sleep strategy. +sleep = time.sleep + + +class sleep_using_event(object): + """Sleep strategy that waits on an event to be set.""" + + def __init__(self, event): + self.event = event + + def __call__(self, timeout): + # NOTE(harlowja): this may *not* actually wait for timeout + # seconds if the event is set (ie this may eject out early). + self.event.wait(timeout=timeout) diff --git a/src/pip/_vendor/tenacity/retry.py b/src/pip/_vendor/tenacity/retry.py new file mode 100644 index 00000000000..8e4fab322cb --- /dev/null +++ b/src/pip/_vendor/tenacity/retry.py @@ -0,0 +1,187 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import re + +import six + +from tenacity import compat as _compat + + +@six.add_metaclass(abc.ABCMeta) +class retry_base(object): + """Abstract base class for retry strategies.""" + + @abc.abstractmethod + def __call__(self, retry_state): + pass + + def __and__(self, other): + return retry_all(self, other) + + def __or__(self, other): + return retry_any(self, other) + + +class _retry_never(retry_base): + """Retry strategy that never rejects any result.""" + + def __call__(self, retry_state): + return False + + +retry_never = _retry_never() + + +class _retry_always(retry_base): + """Retry strategy that always rejects any result.""" + + def __call__(self, retry_state): + return True + + +retry_always = _retry_always() + + +class retry_if_exception(retry_base): + """Retry strategy that retries if an exception verifies a predicate.""" + + def __init__(self, predicate): + self.predicate = predicate + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + if retry_state.outcome.failed: + return self.predicate(retry_state.outcome.exception()) + + +class retry_if_exception_type(retry_if_exception): + """Retries if an exception has been raised of one or more types.""" + + def __init__(self, exception_types=Exception): + self.exception_types = exception_types + super(retry_if_exception_type, self).__init__( + lambda e: isinstance(e, exception_types)) + + +class retry_unless_exception_type(retry_if_exception): + """Retries until an exception is raised of one or more types.""" + + def __init__(self, exception_types=Exception): + self.exception_types = exception_types + super(retry_unless_exception_type, self).__init__( + lambda e: not isinstance(e, exception_types)) + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + # always retry if no exception was raised + if not retry_state.outcome.failed: + return True + return self.predicate(retry_state.outcome.exception()) + + +class retry_if_result(retry_base): + """Retries if the result verifies a predicate.""" + + def __init__(self, predicate): + self.predicate = predicate + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + if not retry_state.outcome.failed: + return self.predicate(retry_state.outcome.result()) + + +class retry_if_not_result(retry_base): + """Retries if the result refutes a predicate.""" + + def __init__(self, predicate): + self.predicate = predicate + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + if not retry_state.outcome.failed: + return not self.predicate(retry_state.outcome.result()) + + +class retry_if_exception_message(retry_if_exception): + """Retries if an exception message equals or matches.""" + + def __init__(self, message=None, match=None): + if message and match: + raise TypeError( + "{}() takes either 'message' or 'match', not both".format( + self.__class__.__name__)) + + # set predicate + if message: + def message_fnc(exception): + return message == str(exception) + predicate = message_fnc + elif match: + prog = re.compile(match) + + def match_fnc(exception): + return prog.match(str(exception)) + predicate = match_fnc + else: + raise TypeError( + "{}() missing 1 required argument 'message' or 'match'". + format(self.__class__.__name__)) + + super(retry_if_exception_message, self).__init__(predicate) + + +class retry_if_not_exception_message(retry_if_exception_message): + """Retries until an exception message equals or matches.""" + + def __init__(self, *args, **kwargs): + super(retry_if_not_exception_message, self).__init__(*args, **kwargs) + # invert predicate + if_predicate = self.predicate + self.predicate = lambda *args_, **kwargs_: not if_predicate( + *args_, **kwargs_) + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + if not retry_state.outcome.failed: + return True + return self.predicate(retry_state.outcome.exception()) + + +class retry_any(retry_base): + """Retries if any of the retries condition is valid.""" + + def __init__(self, *retries): + self.retries = tuple(_compat.retry_func_accept_retry_state(r) + for r in retries) + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + return any(r(retry_state) for r in self.retries) + + +class retry_all(retry_base): + """Retries if all the retries condition are valid.""" + + def __init__(self, *retries): + self.retries = tuple(_compat.retry_func_accept_retry_state(r) + for r in retries) + + @_compat.retry_dunder_call_accept_old_params + def __call__(self, retry_state): + return all(r(retry_state) for r in self.retries) diff --git a/src/pip/_vendor/tenacity/stop.py b/src/pip/_vendor/tenacity/stop.py new file mode 100644 index 00000000000..b58740925cd --- /dev/null +++ b/src/pip/_vendor/tenacity/stop.py @@ -0,0 +1,103 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import abc + +import six + +from tenacity import compat as _compat + + +@six.add_metaclass(abc.ABCMeta) +class stop_base(object): + """Abstract base class for stop strategies.""" + + @abc.abstractmethod + def __call__(self, retry_state): + pass + + def __and__(self, other): + return stop_all(self, other) + + def __or__(self, other): + return stop_any(self, other) + + +class stop_any(stop_base): + """Stop if any of the stop condition is valid.""" + + def __init__(self, *stops): + self.stops = tuple(_compat.stop_func_accept_retry_state(stop_func) + for stop_func in stops) + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return any(x(retry_state) for x in self.stops) + + +class stop_all(stop_base): + """Stop if all the stop conditions are valid.""" + + def __init__(self, *stops): + self.stops = tuple(_compat.stop_func_accept_retry_state(stop_func) + for stop_func in stops) + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return all(x(retry_state) for x in self.stops) + + +class _stop_never(stop_base): + """Never stop.""" + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return False + + +stop_never = _stop_never() + + +class stop_when_event_set(stop_base): + """Stop when the given event is set.""" + + def __init__(self, event): + self.event = event + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return self.event.is_set() + + +class stop_after_attempt(stop_base): + """Stop when the previous attempt >= max_attempt.""" + + def __init__(self, max_attempt_number): + self.max_attempt_number = max_attempt_number + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return retry_state.attempt_number >= self.max_attempt_number + + +class stop_after_delay(stop_base): + """Stop when the time from the first attempt >= limit.""" + + def __init__(self, max_delay): + self.max_delay = max_delay + + @_compat.stop_dunder_call_accept_old_params + def __call__(self, retry_state): + return retry_state.seconds_since_start >= self.max_delay diff --git a/src/pip/_vendor/tenacity/tornadoweb.py b/src/pip/_vendor/tenacity/tornadoweb.py new file mode 100644 index 00000000000..43b4caf9541 --- /dev/null +++ b/src/pip/_vendor/tenacity/tornadoweb.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Elisey Zanko +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from tenacity import BaseRetrying +from tenacity import DoAttempt +from tenacity import DoSleep +from tenacity import RetryCallState + +from tornado import gen + + +class TornadoRetrying(BaseRetrying): + + def __init__(self, + sleep=gen.sleep, + **kwargs): + super(TornadoRetrying, self).__init__(**kwargs) + self.sleep = sleep + + @gen.coroutine + def call(self, fn, *args, **kwargs): + self.begin(fn) + + retry_state = RetryCallState( + retry_object=self, fn=fn, args=args, kwargs=kwargs) + while True: + do = self.iter(retry_state=retry_state) + if isinstance(do, DoAttempt): + try: + result = yield fn(*args, **kwargs) + except BaseException: + retry_state.set_exception(sys.exc_info()) + else: + retry_state.set_result(result) + elif isinstance(do, DoSleep): + retry_state.prepare_for_next_attempt() + yield self.sleep(do) + else: + raise gen.Return(do) diff --git a/src/pip/_vendor/tenacity/wait.py b/src/pip/_vendor/tenacity/wait.py new file mode 100644 index 00000000000..d3c835f26e8 --- /dev/null +++ b/src/pip/_vendor/tenacity/wait.py @@ -0,0 +1,195 @@ +# Copyright 2016 Julien Danjou +# Copyright 2016 Joshua Harlow +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import random + +import six + +from tenacity import _utils +from tenacity import compat as _compat + + +@six.add_metaclass(abc.ABCMeta) +class wait_base(object): + """Abstract base class for wait strategies.""" + + @abc.abstractmethod + def __call__(self, retry_state): + pass + + def __add__(self, other): + return wait_combine(self, other) + + def __radd__(self, other): + # make it possible to use multiple waits with the built-in sum function + if other == 0: + return self + return self.__add__(other) + + +class wait_fixed(wait_base): + """Wait strategy that waits a fixed amount of time between each retry.""" + + def __init__(self, wait): + self.wait_fixed = wait + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + return self.wait_fixed + + +class wait_none(wait_fixed): + """Wait strategy that doesn't wait at all before retrying.""" + + def __init__(self): + super(wait_none, self).__init__(0) + + +class wait_random(wait_base): + """Wait strategy that waits a random amount of time between min/max.""" + + def __init__(self, min=0, max=1): # noqa + self.wait_random_min = min + self.wait_random_max = max + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + return (self.wait_random_min + + (random.random() * + (self.wait_random_max - self.wait_random_min))) + + +class wait_combine(wait_base): + """Combine several waiting strategies.""" + + def __init__(self, *strategies): + self.wait_funcs = tuple(_compat.wait_func_accept_retry_state(strategy) + for strategy in strategies) + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + return sum(x(retry_state=retry_state) for x in self.wait_funcs) + + +class wait_chain(wait_base): + """Chain two or more waiting strategies. + + If all strategies are exhausted, the very last strategy is used + thereafter. + + For example:: + + @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] + + [wait_fixed(2) for j in range(5)] + + [wait_fixed(5) for k in range(4))) + def wait_chained(): + print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s + thereafter.") + """ + + def __init__(self, *strategies): + self.strategies = [_compat.wait_func_accept_retry_state(strategy) + for strategy in strategies] + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + wait_func_no = min(max(retry_state.attempt_number, 1), + len(self.strategies)) + wait_func = self.strategies[wait_func_no - 1] + return wait_func(retry_state=retry_state) + + +class wait_incrementing(wait_base): + """Wait an incremental amount of time after each attempt. + + Starting at a starting value and incrementing by a value for each attempt + (and restricting the upper limit to some maximum value). + """ + + def __init__(self, start=0, increment=100, max=_utils.MAX_WAIT): # noqa + self.start = start + self.increment = increment + self.max = max + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + result = self.start + ( + self.increment * (retry_state.attempt_number - 1) + ) + return max(0, min(result, self.max)) + + +class wait_exponential(wait_base): + """Wait strategy that applies exponential backoff. + + It allows for a customized multiplier and an ability to restrict the + upper and lower limits to some maximum and minimum value. + + The intervals are fixed (i.e. there is no jitter), so this strategy is + suitable for balancing retries against latency when a required resource is + unavailable for an unknown duration, but *not* suitable for resolving + contention between multiple processes for a shared resource. Use + wait_random_exponential for the latter case. + """ + + def __init__(self, multiplier=1, max=_utils.MAX_WAIT, exp_base=2, min=0): # noqa + self.multiplier = multiplier + self.min = min + self.max = max + self.exp_base = exp_base + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + try: + exp = self.exp_base ** (retry_state.attempt_number - 1) + result = self.multiplier * exp + except OverflowError: + return self.max + return max(max(0, self.min), min(result, self.max)) + + +class wait_random_exponential(wait_exponential): + """Random wait with exponentially widening window. + + An exponential backoff strategy used to mediate contention between multiple + uncoordinated processes for a shared resource in distributed systems. This + is the sense in which "exponential backoff" is meant in e.g. Ethernet + networking, and corresponds to the "Full Jitter" algorithm described in + this blog post: + + https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ + + Each retry occurs at a random time in a geometrically expanding interval. + It allows for a custom multiplier and an ability to restrict the upper + limit of the random interval to some maximum value. + + Example:: + + wait_random_exponential(multiplier=0.5, # initial window 0.5s + max=60) # max 60s timeout + + When waiting for an unavailable resource to become available again, as + opposed to trying to resolve contention for a shared resource, the + wait_exponential strategy (which uses a fixed interval) may be preferable. + + """ + + @_compat.wait_dunder_call_accept_old_params + def __call__(self, retry_state): + high = super(wait_random_exponential, self).__call__( + retry_state=retry_state) + return random.uniform(0, high) From 73d57558aa4c3bf4a9adab9db7a75242cf7cfe36 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Mon, 13 Jan 2020 12:39:09 +0000 Subject: [PATCH 2/8] Replace usage of retrying with tenacity --- src/pip/_internal/utils/filesystem.py | 4 +- src/pip/_internal/utils/misc.py | 4 +- src/pip/_vendor/__init__.py | 2 +- src/pip/_vendor/retrying.LICENSE | 202 ------------------- src/pip/_vendor/retrying.py | 267 -------------------------- src/pip/_vendor/retrying.pyi | 1 - src/pip/_vendor/vendor.txt | 2 +- 7 files changed, 6 insertions(+), 476 deletions(-) delete mode 100644 src/pip/_vendor/retrying.LICENSE delete mode 100644 src/pip/_vendor/retrying.py delete mode 100644 src/pip/_vendor/retrying.pyi diff --git a/src/pip/_internal/utils/filesystem.py b/src/pip/_internal/utils/filesystem.py index 7e1e3c8c7a5..d590689cdfa 100644 --- a/src/pip/_internal/utils/filesystem.py +++ b/src/pip/_internal/utils/filesystem.py @@ -10,7 +10,7 @@ # NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is # why we ignore the type on this import. -from pip._vendor.retrying import retry # type: ignore +from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed # type: ignore from pip._vendor.six import PY2 from pip._internal.utils.compat import get_path_uid @@ -101,7 +101,7 @@ def adjacent_tmp_file(path): os.fsync(result.file.fileno()) -_replace_retry = retry(stop_max_delay=1000, wait_fixed=250) +_replace_retry = retry(stop=stop_after_delay(1000), wait=wait_fixed(250)) if PY2: @_replace_retry diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py index 4a581601991..0a273fff4b0 100644 --- a/src/pip/_internal/utils/misc.py +++ b/src/pip/_internal/utils/misc.py @@ -20,7 +20,7 @@ from pip._vendor import pkg_resources # NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is # why we ignore the type on this import. -from pip._vendor.retrying import retry # type: ignore +from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed # type: ignore from pip._vendor.six import PY2, text_type from pip._vendor.six.moves import input from pip._vendor.six.moves.urllib import parse as urllib_parse @@ -129,7 +129,7 @@ def get_prog(): # Retry every half second for up to 3 seconds -@retry(stop_max_delay=3000, wait_fixed=500) +@retry(stop=stop_after_delay(3000), wait=wait_fixed(500)) def rmtree(dir, ignore_errors=False): # type: (str, bool) -> None shutil.rmtree(dir, ignore_errors=ignore_errors, diff --git a/src/pip/_vendor/__init__.py b/src/pip/_vendor/__init__.py index a0fcb8e2cc4..ec885c108fd 100644 --- a/src/pip/_vendor/__init__.py +++ b/src/pip/_vendor/__init__.py @@ -75,7 +75,6 @@ def vendored(modulename): vendored("pkg_resources") vendored("progress") vendored("pytoml") - vendored("retrying") vendored("requests") vendored("requests.exceptions") vendored("requests.packages") @@ -106,4 +105,5 @@ def vendored(modulename): vendored("requests.packages.urllib3.util.ssl_") vendored("requests.packages.urllib3.util.timeout") vendored("requests.packages.urllib3.util.url") + vendored("tenacity") vendored("urllib3") diff --git a/src/pip/_vendor/retrying.LICENSE b/src/pip/_vendor/retrying.LICENSE deleted file mode 100644 index 7a4a3ea2424..00000000000 --- a/src/pip/_vendor/retrying.LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/src/pip/_vendor/retrying.py b/src/pip/_vendor/retrying.py deleted file mode 100644 index 6d1e627aae8..00000000000 --- a/src/pip/_vendor/retrying.py +++ /dev/null @@ -1,267 +0,0 @@ -## Copyright 2013-2014 Ray Holder -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. - -import random -from pip._vendor import six -import sys -import time -import traceback - - -# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... -MAX_WAIT = 1073741823 - - -def retry(*dargs, **dkw): - """ - Decorator function that instantiates the Retrying object - @param *dargs: positional arguments passed to Retrying object - @param **dkw: keyword arguments passed to the Retrying object - """ - # support both @retry and @retry() as valid syntax - if len(dargs) == 1 and callable(dargs[0]): - def wrap_simple(f): - - @six.wraps(f) - def wrapped_f(*args, **kw): - return Retrying().call(f, *args, **kw) - - return wrapped_f - - return wrap_simple(dargs[0]) - - else: - def wrap(f): - - @six.wraps(f) - def wrapped_f(*args, **kw): - return Retrying(*dargs, **dkw).call(f, *args, **kw) - - return wrapped_f - - return wrap - - -class Retrying(object): - - def __init__(self, - stop=None, wait=None, - stop_max_attempt_number=None, - stop_max_delay=None, - wait_fixed=None, - wait_random_min=None, wait_random_max=None, - wait_incrementing_start=None, wait_incrementing_increment=None, - wait_exponential_multiplier=None, wait_exponential_max=None, - retry_on_exception=None, - retry_on_result=None, - wrap_exception=False, - stop_func=None, - wait_func=None, - wait_jitter_max=None): - - self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number - self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay - self._wait_fixed = 1000 if wait_fixed is None else wait_fixed - self._wait_random_min = 0 if wait_random_min is None else wait_random_min - self._wait_random_max = 1000 if wait_random_max is None else wait_random_max - self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start - self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment - self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier - self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max - self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max - - # TODO add chaining of stop behaviors - # stop behavior - stop_funcs = [] - if stop_max_attempt_number is not None: - stop_funcs.append(self.stop_after_attempt) - - if stop_max_delay is not None: - stop_funcs.append(self.stop_after_delay) - - if stop_func is not None: - self.stop = stop_func - - elif stop is None: - self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs) - - else: - self.stop = getattr(self, stop) - - # TODO add chaining of wait behaviors - # wait behavior - wait_funcs = [lambda *args, **kwargs: 0] - if wait_fixed is not None: - wait_funcs.append(self.fixed_sleep) - - if wait_random_min is not None or wait_random_max is not None: - wait_funcs.append(self.random_sleep) - - if wait_incrementing_start is not None or wait_incrementing_increment is not None: - wait_funcs.append(self.incrementing_sleep) - - if wait_exponential_multiplier is not None or wait_exponential_max is not None: - wait_funcs.append(self.exponential_sleep) - - if wait_func is not None: - self.wait = wait_func - - elif wait is None: - self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs) - - else: - self.wait = getattr(self, wait) - - # retry on exception filter - if retry_on_exception is None: - self._retry_on_exception = self.always_reject - else: - self._retry_on_exception = retry_on_exception - - # TODO simplify retrying by Exception types - # retry on result filter - if retry_on_result is None: - self._retry_on_result = self.never_reject - else: - self._retry_on_result = retry_on_result - - self._wrap_exception = wrap_exception - - def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms): - """Stop after the previous attempt >= stop_max_attempt_number.""" - return previous_attempt_number >= self._stop_max_attempt_number - - def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms): - """Stop after the time from the first attempt >= stop_max_delay.""" - return delay_since_first_attempt_ms >= self._stop_max_delay - - def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Don't sleep at all before retrying.""" - return 0 - - def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Sleep a fixed amount of time between each retry.""" - return self._wait_fixed - - def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Sleep a random amount of time between wait_random_min and wait_random_max""" - return random.randint(self._wait_random_min, self._wait_random_max) - - def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """ - Sleep an incremental amount of time after each attempt, starting at - wait_incrementing_start and incrementing by wait_incrementing_increment - """ - result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1)) - if result < 0: - result = 0 - return result - - def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - exp = 2 ** previous_attempt_number - result = self._wait_exponential_multiplier * exp - if result > self._wait_exponential_max: - result = self._wait_exponential_max - if result < 0: - result = 0 - return result - - def never_reject(self, result): - return False - - def always_reject(self, result): - return True - - def should_reject(self, attempt): - reject = False - if attempt.has_exception: - reject |= self._retry_on_exception(attempt.value[1]) - else: - reject |= self._retry_on_result(attempt.value) - - return reject - - def call(self, fn, *args, **kwargs): - start_time = int(round(time.time() * 1000)) - attempt_number = 1 - while True: - try: - attempt = Attempt(fn(*args, **kwargs), attempt_number, False) - except: - tb = sys.exc_info() - attempt = Attempt(tb, attempt_number, True) - - if not self.should_reject(attempt): - return attempt.get(self._wrap_exception) - - delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time - if self.stop(attempt_number, delay_since_first_attempt_ms): - if not self._wrap_exception and attempt.has_exception: - # get() on an attempt with an exception should cause it to be raised, but raise just in case - raise attempt.get() - else: - raise RetryError(attempt) - else: - sleep = self.wait(attempt_number, delay_since_first_attempt_ms) - if self._wait_jitter_max: - jitter = random.random() * self._wait_jitter_max - sleep = sleep + max(0, jitter) - time.sleep(sleep / 1000.0) - - attempt_number += 1 - - -class Attempt(object): - """ - An Attempt encapsulates a call to a target function that may end as a - normal return value from the function or an Exception depending on what - occurred during the execution. - """ - - def __init__(self, value, attempt_number, has_exception): - self.value = value - self.attempt_number = attempt_number - self.has_exception = has_exception - - def get(self, wrap_exception=False): - """ - Return the return value of this Attempt instance or raise an Exception. - If wrap_exception is true, this Attempt is wrapped inside of a - RetryError before being raised. - """ - if self.has_exception: - if wrap_exception: - raise RetryError(self) - else: - six.reraise(self.value[0], self.value[1], self.value[2]) - else: - return self.value - - def __repr__(self): - if self.has_exception: - return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2]))) - else: - return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value) - - -class RetryError(Exception): - """ - A RetryError encapsulates the last Attempt instance right before giving up. - """ - - def __init__(self, last_attempt): - self.last_attempt = last_attempt - - def __str__(self): - return "RetryError[{0}]".format(self.last_attempt) diff --git a/src/pip/_vendor/retrying.pyi b/src/pip/_vendor/retrying.pyi deleted file mode 100644 index 90f20c6dbc1..00000000000 --- a/src/pip/_vendor/retrying.pyi +++ /dev/null @@ -1 +0,0 @@ -from retrying import * \ No newline at end of file diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index d19ef2e51a8..1af84ff065f 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -17,7 +17,7 @@ requests==2.22.0 chardet==3.0.4 idna==2.8 urllib3==1.25.6 -retrying==1.3.3 +tenacity==6.0.0 setuptools==41.4.0 six==1.12.0 webencodings==0.5.1 From 7d54fdb653cd009ba930e0c62b81559c0b1e98a9 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Mon, 13 Jan 2020 13:02:38 +0000 Subject: [PATCH 3/8] Adapt tenacity code to vendoring --- src/pip/_vendor/tenacity/__init__.py | 10 +++++----- src/pip/_vendor/tenacity/_asyncio.py | 8 ++++---- src/pip/_vendor/tenacity/_utils.py | 2 +- src/pip/_vendor/tenacity/after.py | 2 +- src/pip/_vendor/tenacity/before.py | 2 +- src/pip/_vendor/tenacity/before_sleep.py | 2 +- src/pip/_vendor/tenacity/compat.py | 12 ++++++------ src/pip/_vendor/tenacity/retry.py | 4 ++-- src/pip/_vendor/tenacity/stop.py | 4 ++-- src/pip/_vendor/tenacity/tornadoweb.py | 8 ++++---- src/pip/_vendor/tenacity/wait.py | 6 +++--- 11 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py index 3718a5e5362..d4c2931d79c 100644 --- a/src/pip/_vendor/tenacity/__init__.py +++ b/src/pip/_vendor/tenacity/__init__.py @@ -31,10 +31,10 @@ import threading from concurrent import futures -import six +import pip._vendor.six as six -from tenacity import _utils -from tenacity import compat as _compat +from . import _utils +from . import compat as _compat # Import all built-in retry strategies for easier usage. from .retry import retry_all # noqa @@ -480,7 +480,7 @@ def set_exception(self, exc_info): if asyncio: - from tenacity._asyncio import AsyncRetrying + from ._asyncio import AsyncRetrying if tornado: - from tenacity.tornadoweb import TornadoRetrying + from .tornadoweb import TornadoRetrying diff --git a/src/pip/_vendor/tenacity/_asyncio.py b/src/pip/_vendor/tenacity/_asyncio.py index 035699d2700..2904513bc17 100644 --- a/src/pip/_vendor/tenacity/_asyncio.py +++ b/src/pip/_vendor/tenacity/_asyncio.py @@ -23,10 +23,10 @@ import sys -from tenacity import BaseRetrying -from tenacity import DoAttempt -from tenacity import DoSleep -from tenacity import RetryCallState +from . import BaseRetrying +from . import DoAttempt +from . import DoSleep +from . import RetryCallState if asyncio: diff --git a/src/pip/_vendor/tenacity/_utils.py b/src/pip/_vendor/tenacity/_utils.py index 6703bd9c903..cf91710f369 100644 --- a/src/pip/_vendor/tenacity/_utils.py +++ b/src/pip/_vendor/tenacity/_utils.py @@ -19,7 +19,7 @@ import time from functools import update_wrapper -import six +import pip._vendor.six as six # sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... try: diff --git a/src/pip/_vendor/tenacity/after.py b/src/pip/_vendor/tenacity/after.py index 55522c99b6a..ce875248184 100644 --- a/src/pip/_vendor/tenacity/after.py +++ b/src/pip/_vendor/tenacity/after.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tenacity import _utils +from . import _utils def after_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/before.py b/src/pip/_vendor/tenacity/before.py index 54259dddff8..068e116b045 100644 --- a/src/pip/_vendor/tenacity/before.py +++ b/src/pip/_vendor/tenacity/before.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tenacity import _utils +from . import _utils def before_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/before_sleep.py b/src/pip/_vendor/tenacity/before_sleep.py index b5fd6016c4f..9e7aa513f17 100644 --- a/src/pip/_vendor/tenacity/before_sleep.py +++ b/src/pip/_vendor/tenacity/before_sleep.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tenacity import _utils +from . import _utils def before_sleep_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/compat.py b/src/pip/_vendor/tenacity/compat.py index 026c248d5eb..bccbd23b943 100644 --- a/src/pip/_vendor/tenacity/compat.py +++ b/src/pip/_vendor/tenacity/compat.py @@ -4,9 +4,9 @@ from fractions import Fraction from warnings import warn -import six +import pip._vendor.six as six -from tenacity import _utils +from . import _utils def warn_about_non_retry_state_deprecation(cbname, func, stacklevel): @@ -68,7 +68,7 @@ def make_retry_state(previous_attempt_number, delay_since_first_attempt, previous_attempt_number=previous_attempt_number, delay_since_first_attempt=delay_since_first_attempt) - from tenacity import RetryCallState + from . import RetryCallState retry_state = RetryCallState(None, None, (), {}) retry_state.attempt_number = previous_attempt_number if last_result is not None: @@ -102,7 +102,7 @@ def new_fn(self, delay_since_first_attempt=_unset, retry_state=None): if retry_state is None: - from tenacity import RetryCallState + from . import RetryCallState retry_state_passed_as_non_kwarg = ( previous_attempt_number is not _unset and isinstance(previous_attempt_number, RetryCallState)) @@ -145,7 +145,7 @@ def new_fn(self, last_result=None, retry_state=None): if retry_state is None: - from tenacity import RetryCallState + from . import RetryCallState retry_state_passed_as_non_kwarg = ( previous_attempt_number is not _unset and isinstance(previous_attempt_number, RetryCallState)) @@ -196,7 +196,7 @@ def retry_dunder_call_accept_old_params(fn): @_utils.wraps(fn) def new_fn(self, attempt=_unset, retry_state=None): if retry_state is None: - from tenacity import RetryCallState + from . import RetryCallState if attempt is _unset: raise _make_unset_exception('retry', attempt=attempt) retry_state_passed_as_non_kwarg = ( diff --git a/src/pip/_vendor/tenacity/retry.py b/src/pip/_vendor/tenacity/retry.py index 8e4fab322cb..9a214459c64 100644 --- a/src/pip/_vendor/tenacity/retry.py +++ b/src/pip/_vendor/tenacity/retry.py @@ -17,9 +17,9 @@ import abc import re -import six +import pip._vendor.six as six -from tenacity import compat as _compat +from . import compat as _compat @six.add_metaclass(abc.ABCMeta) diff --git a/src/pip/_vendor/tenacity/stop.py b/src/pip/_vendor/tenacity/stop.py index b58740925cd..c82e90888a8 100644 --- a/src/pip/_vendor/tenacity/stop.py +++ b/src/pip/_vendor/tenacity/stop.py @@ -15,9 +15,9 @@ # limitations under the License. import abc -import six +import pip._vendor.six as six -from tenacity import compat as _compat +from . import compat as _compat @six.add_metaclass(abc.ABCMeta) diff --git a/src/pip/_vendor/tenacity/tornadoweb.py b/src/pip/_vendor/tenacity/tornadoweb.py index 43b4caf9541..25099f3b23d 100644 --- a/src/pip/_vendor/tenacity/tornadoweb.py +++ b/src/pip/_vendor/tenacity/tornadoweb.py @@ -15,10 +15,10 @@ import sys -from tenacity import BaseRetrying -from tenacity import DoAttempt -from tenacity import DoSleep -from tenacity import RetryCallState +from . import BaseRetrying +from . import DoAttempt +from . import DoSleep +from . import RetryCallState from tornado import gen diff --git a/src/pip/_vendor/tenacity/wait.py b/src/pip/_vendor/tenacity/wait.py index d3c835f26e8..48e01852420 100644 --- a/src/pip/_vendor/tenacity/wait.py +++ b/src/pip/_vendor/tenacity/wait.py @@ -17,10 +17,10 @@ import abc import random -import six +import pip._vendor.six as six -from tenacity import _utils -from tenacity import compat as _compat +from . import _utils +from . import compat as _compat @six.add_metaclass(abc.ABCMeta) From 4ee21b5819f9b1756f58ef02e361ee399942b940 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Mon, 13 Jan 2020 15:52:53 +0000 Subject: [PATCH 4/8] Tenacity API fixes: - Use seconds instead of milliseconds - Reraise the original exception instead of RetryError --- src/pip/_internal/utils/filesystem.py | 4 +++- src/pip/_internal/utils/misc.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pip/_internal/utils/filesystem.py b/src/pip/_internal/utils/filesystem.py index d590689cdfa..65f316f2d54 100644 --- a/src/pip/_internal/utils/filesystem.py +++ b/src/pip/_internal/utils/filesystem.py @@ -101,7 +101,9 @@ def adjacent_tmp_file(path): os.fsync(result.file.fileno()) -_replace_retry = retry(stop=stop_after_delay(1000), wait=wait_fixed(250)) +# Tenacity raises RetryError by default, explictly raise the original exception +_replace_retry = retry( + reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) if PY2: @_replace_retry diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py index 0a273fff4b0..42566670b2c 100644 --- a/src/pip/_internal/utils/misc.py +++ b/src/pip/_internal/utils/misc.py @@ -129,7 +129,8 @@ def get_prog(): # Retry every half second for up to 3 seconds -@retry(stop=stop_after_delay(3000), wait=wait_fixed(500)) +# Tenacity raises RetryError by default, explictly raise the original exception +@retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5)) def rmtree(dir, ignore_errors=False): # type: (str, bool) -> None shutil.rmtree(dir, ignore_errors=ignore_errors, From 910fe22065e5c15146bc3ab08caaa5c631c6e254 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Mon, 13 Jan 2020 16:00:36 +0000 Subject: [PATCH 5/8] Fix isort issues --- src/pip/_internal/utils/filesystem.py | 10 +++++++--- src/pip/_internal/utils/misc.py | 10 +++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/pip/_internal/utils/filesystem.py b/src/pip/_internal/utils/filesystem.py index 65f316f2d54..3610f6eedd3 100644 --- a/src/pip/_internal/utils/filesystem.py +++ b/src/pip/_internal/utils/filesystem.py @@ -8,10 +8,14 @@ from contextlib import contextmanager from tempfile import NamedTemporaryFile -# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import. -from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed # type: ignore from pip._vendor.six import PY2 +# NOTE: tenacity is not annotated in typeshed as on 2020-01-13, which is +# why we ignore the type on this import. +from pip._vendor.tenacity import ( # type: ignore + retry, + stop_after_delay, + wait_fixed, +) from pip._internal.utils.compat import get_path_uid from pip._internal.utils.typing import MYPY_CHECK_RUNNING, cast diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py index 42566670b2c..87bef54ee20 100644 --- a/src/pip/_internal/utils/misc.py +++ b/src/pip/_internal/utils/misc.py @@ -18,13 +18,17 @@ from collections import deque from pip._vendor import pkg_resources -# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import. -from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed # type: ignore from pip._vendor.six import PY2, text_type from pip._vendor.six.moves import input from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote +# NOTE: tenacity is not annotated in typeshed as on 2020-01-13, which is +# why we ignore the type on this import. +from pip._vendor.tenacity import ( # type: ignore + retry, + stop_after_delay, + wait_fixed, +) from pip import __version__ from pip._internal.exceptions import CommandError From 23c8808dcd5bb028874cfe5e96dce007ec49ac83 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Tue, 14 Jan 2020 11:40:22 +0000 Subject: [PATCH 6/8] Replace manually built tenacity with output from vendoring --- src/pip/_vendor/tenacity.pyi | 1 + src/pip/_vendor/tenacity/__init__.py | 10 +++++----- src/pip/_vendor/tenacity/_asyncio.py | 8 ++++---- src/pip/_vendor/tenacity/_utils.py | 2 +- src/pip/_vendor/tenacity/after.py | 2 +- src/pip/_vendor/tenacity/before.py | 2 +- src/pip/_vendor/tenacity/before_sleep.py | 2 +- src/pip/_vendor/tenacity/compat.py | 12 ++++++------ src/pip/_vendor/tenacity/retry.py | 4 ++-- src/pip/_vendor/tenacity/stop.py | 4 ++-- src/pip/_vendor/tenacity/tornadoweb.py | 8 ++++---- src/pip/_vendor/tenacity/wait.py | 6 +++--- 12 files changed, 31 insertions(+), 30 deletions(-) create mode 100644 src/pip/_vendor/tenacity.pyi diff --git a/src/pip/_vendor/tenacity.pyi b/src/pip/_vendor/tenacity.pyi new file mode 100644 index 00000000000..baf1de9dd9f --- /dev/null +++ b/src/pip/_vendor/tenacity.pyi @@ -0,0 +1 @@ +from tenacity import * \ No newline at end of file diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py index d4c2931d79c..410c87cdb37 100644 --- a/src/pip/_vendor/tenacity/__init__.py +++ b/src/pip/_vendor/tenacity/__init__.py @@ -31,10 +31,10 @@ import threading from concurrent import futures -import pip._vendor.six as six +from pip._vendor import six -from . import _utils -from . import compat as _compat +from pip._vendor.tenacity import _utils +from pip._vendor.tenacity import compat as _compat # Import all built-in retry strategies for easier usage. from .retry import retry_all # noqa @@ -480,7 +480,7 @@ def set_exception(self, exc_info): if asyncio: - from ._asyncio import AsyncRetrying + from pip._vendor.tenacity._asyncio import AsyncRetrying if tornado: - from .tornadoweb import TornadoRetrying + from pip._vendor.tenacity.tornadoweb import TornadoRetrying diff --git a/src/pip/_vendor/tenacity/_asyncio.py b/src/pip/_vendor/tenacity/_asyncio.py index 2904513bc17..e7e2408b092 100644 --- a/src/pip/_vendor/tenacity/_asyncio.py +++ b/src/pip/_vendor/tenacity/_asyncio.py @@ -23,10 +23,10 @@ import sys -from . import BaseRetrying -from . import DoAttempt -from . import DoSleep -from . import RetryCallState +from pip._vendor.tenacity import BaseRetrying +from pip._vendor.tenacity import DoAttempt +from pip._vendor.tenacity import DoSleep +from pip._vendor.tenacity import RetryCallState if asyncio: diff --git a/src/pip/_vendor/tenacity/_utils.py b/src/pip/_vendor/tenacity/_utils.py index cf91710f369..365b11d4b16 100644 --- a/src/pip/_vendor/tenacity/_utils.py +++ b/src/pip/_vendor/tenacity/_utils.py @@ -19,7 +19,7 @@ import time from functools import update_wrapper -import pip._vendor.six as six +from pip._vendor import six # sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... try: diff --git a/src/pip/_vendor/tenacity/after.py b/src/pip/_vendor/tenacity/after.py index ce875248184..8b6082c683a 100644 --- a/src/pip/_vendor/tenacity/after.py +++ b/src/pip/_vendor/tenacity/after.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import _utils +from pip._vendor.tenacity import _utils def after_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/before.py b/src/pip/_vendor/tenacity/before.py index 068e116b045..3eab08afb9d 100644 --- a/src/pip/_vendor/tenacity/before.py +++ b/src/pip/_vendor/tenacity/before.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import _utils +from pip._vendor.tenacity import _utils def before_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/before_sleep.py b/src/pip/_vendor/tenacity/before_sleep.py index 9e7aa513f17..90d7392008b 100644 --- a/src/pip/_vendor/tenacity/before_sleep.py +++ b/src/pip/_vendor/tenacity/before_sleep.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import _utils +from pip._vendor.tenacity import _utils def before_sleep_nothing(retry_state): diff --git a/src/pip/_vendor/tenacity/compat.py b/src/pip/_vendor/tenacity/compat.py index bccbd23b943..f5cca5ac310 100644 --- a/src/pip/_vendor/tenacity/compat.py +++ b/src/pip/_vendor/tenacity/compat.py @@ -4,9 +4,9 @@ from fractions import Fraction from warnings import warn -import pip._vendor.six as six +from pip._vendor import six -from . import _utils +from pip._vendor.tenacity import _utils def warn_about_non_retry_state_deprecation(cbname, func, stacklevel): @@ -68,7 +68,7 @@ def make_retry_state(previous_attempt_number, delay_since_first_attempt, previous_attempt_number=previous_attempt_number, delay_since_first_attempt=delay_since_first_attempt) - from . import RetryCallState + from pip._vendor.tenacity import RetryCallState retry_state = RetryCallState(None, None, (), {}) retry_state.attempt_number = previous_attempt_number if last_result is not None: @@ -102,7 +102,7 @@ def new_fn(self, delay_since_first_attempt=_unset, retry_state=None): if retry_state is None: - from . import RetryCallState + from pip._vendor.tenacity import RetryCallState retry_state_passed_as_non_kwarg = ( previous_attempt_number is not _unset and isinstance(previous_attempt_number, RetryCallState)) @@ -145,7 +145,7 @@ def new_fn(self, last_result=None, retry_state=None): if retry_state is None: - from . import RetryCallState + from pip._vendor.tenacity import RetryCallState retry_state_passed_as_non_kwarg = ( previous_attempt_number is not _unset and isinstance(previous_attempt_number, RetryCallState)) @@ -196,7 +196,7 @@ def retry_dunder_call_accept_old_params(fn): @_utils.wraps(fn) def new_fn(self, attempt=_unset, retry_state=None): if retry_state is None: - from . import RetryCallState + from pip._vendor.tenacity import RetryCallState if attempt is _unset: raise _make_unset_exception('retry', attempt=attempt) retry_state_passed_as_non_kwarg = ( diff --git a/src/pip/_vendor/tenacity/retry.py b/src/pip/_vendor/tenacity/retry.py index 9a214459c64..c6ef6e2ef79 100644 --- a/src/pip/_vendor/tenacity/retry.py +++ b/src/pip/_vendor/tenacity/retry.py @@ -17,9 +17,9 @@ import abc import re -import pip._vendor.six as six +from pip._vendor import six -from . import compat as _compat +from pip._vendor.tenacity import compat as _compat @six.add_metaclass(abc.ABCMeta) diff --git a/src/pip/_vendor/tenacity/stop.py b/src/pip/_vendor/tenacity/stop.py index c82e90888a8..a00c259b426 100644 --- a/src/pip/_vendor/tenacity/stop.py +++ b/src/pip/_vendor/tenacity/stop.py @@ -15,9 +15,9 @@ # limitations under the License. import abc -import pip._vendor.six as six +from pip._vendor import six -from . import compat as _compat +from pip._vendor.tenacity import compat as _compat @six.add_metaclass(abc.ABCMeta) diff --git a/src/pip/_vendor/tenacity/tornadoweb.py b/src/pip/_vendor/tenacity/tornadoweb.py index 25099f3b23d..eee1491ab77 100644 --- a/src/pip/_vendor/tenacity/tornadoweb.py +++ b/src/pip/_vendor/tenacity/tornadoweb.py @@ -15,10 +15,10 @@ import sys -from . import BaseRetrying -from . import DoAttempt -from . import DoSleep -from . import RetryCallState +from pip._vendor.tenacity import BaseRetrying +from pip._vendor.tenacity import DoAttempt +from pip._vendor.tenacity import DoSleep +from pip._vendor.tenacity import RetryCallState from tornado import gen diff --git a/src/pip/_vendor/tenacity/wait.py b/src/pip/_vendor/tenacity/wait.py index 48e01852420..8ce205e58cf 100644 --- a/src/pip/_vendor/tenacity/wait.py +++ b/src/pip/_vendor/tenacity/wait.py @@ -17,10 +17,10 @@ import abc import random -import pip._vendor.six as six +from pip._vendor import six -from . import _utils -from . import compat as _compat +from pip._vendor.tenacity import _utils +from pip._vendor.tenacity import compat as _compat @six.add_metaclass(abc.ABCMeta) From 504775f2606ee3ba6ac950eac9a93c41a50b1f83 Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Tue, 14 Jan 2020 11:46:13 +0000 Subject: [PATCH 7/8] Add monotonic to vendor.txt, futures cannot be installed in 3.8 --- src/pip/_vendor/tenacity/_utils.py | 2 +- src/pip/_vendor/vendor.txt | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/pip/_vendor/tenacity/_utils.py b/src/pip/_vendor/tenacity/_utils.py index 365b11d4b16..0ae90e305da 100644 --- a/src/pip/_vendor/tenacity/_utils.py +++ b/src/pip/_vendor/tenacity/_utils.py @@ -131,7 +131,7 @@ def get_callback_name(cb): try: now = time.monotonic # noqa except AttributeError: - from monotonic import monotonic as now # noqa + from pip._vendor.monotonic import monotonic as now # noqa class cached_property(object): diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 1af84ff065f..0213e00268a 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -4,8 +4,10 @@ colorama==0.4.1 contextlib2==0.6.0 distlib==0.2.9.post0 distro==1.4.0 +futures==3.3.0; python_version == "2.7" html5lib==1.0.1 ipaddress==1.0.22 # Only needed on 2.6 and 2.7 +monotonic==1.5 msgpack==0.6.2 packaging==20.0 pep517==0.7.0 From afb692c3bb42c1ca1fba5251efaaa2c8a7c5123e Mon Sep 17 00:00:00 2001 From: Yeray Diaz Diaz Date: Tue, 14 Jan 2020 11:57:23 +0000 Subject: [PATCH 8/8] Add futures and monotonic --- pyproject.toml | 2 +- src/pip/_vendor/futures/LICENSE | 48 ++ src/pip/_vendor/futures/__init__.py | 23 + src/pip/_vendor/futures/_base.py | 667 ++++++++++++++++++ src/pip/_vendor/futures/process.py | 363 ++++++++++ src/pip/_vendor/futures/thread.py | 170 +++++ src/pip/_vendor/monotonic.LICENSE | 202 ++++++ src/pip/_vendor/monotonic.py | 169 +++++ src/pip/_vendor/monotonic.pyi | 1 + src/pip/_vendor/tenacity/__init__.py | 6 +- src/pip/_vendor/vendor.txt | 2 +- .../vendoring/patches/tenacity.patch | 17 + 12 files changed, 1667 insertions(+), 3 deletions(-) create mode 100644 src/pip/_vendor/futures/LICENSE create mode 100644 src/pip/_vendor/futures/__init__.py create mode 100644 src/pip/_vendor/futures/_base.py create mode 100644 src/pip/_vendor/futures/process.py create mode 100644 src/pip/_vendor/futures/thread.py create mode 100644 src/pip/_vendor/monotonic.LICENSE create mode 100644 src/pip/_vendor/monotonic.py create mode 100644 src/pip/_vendor/monotonic.pyi create mode 100644 tools/automation/vendoring/patches/tenacity.patch diff --git a/pyproject.toml b/pyproject.toml index 01fae701523..7c675fa382d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ destination = "src/pip/_vendor/" requirements = "src/pip/_vendor/vendor.txt" namespace = "pip._vendor" -protected-files = ["__init__.py", "README.rst", "vendor.txt"] +protected-files = ["__init__.py", "README.rst", "vendor.txt", "futures"] patches-dir = "tools/automation/vendoring/patches" [tool.vendoring.transformations] diff --git a/src/pip/_vendor/futures/LICENSE b/src/pip/_vendor/futures/LICENSE new file mode 100644 index 00000000000..a8d65b16b60 --- /dev/null +++ b/src/pip/_vendor/futures/LICENSE @@ -0,0 +1,48 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python +alone or in any derivative version, provided, however, that PSF's +License Agreement and PSF's notice of copyright, i.e., "Copyright (c) +2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights +Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/src/pip/_vendor/futures/__init__.py b/src/pip/_vendor/futures/__init__.py new file mode 100644 index 00000000000..428b14bdfe4 --- /dev/null +++ b/src/pip/_vendor/futures/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.thread import ThreadPoolExecutor + +try: + from concurrent.futures.process import ProcessPoolExecutor +except ImportError: + # some platforms don't have multiprocessing + pass diff --git a/src/pip/_vendor/futures/_base.py b/src/pip/_vendor/futures/_base.py new file mode 100644 index 00000000000..510ffa53f77 --- /dev/null +++ b/src/pip/_vendor/futures/_base.py @@ -0,0 +1,667 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +import collections +import logging +import threading +import itertools +import time +import types + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super(_FirstCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + super(_FirstCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super(_FirstCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super(_AllCompletedWaiter, self).__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super(_AllCompletedWaiter, self).add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super(_AllCompletedWaiter, self).add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super(_AllCompletedWaiter, self).add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs,)): + f = [f] + yield f.pop() + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)): + f = [f] + yield f.pop() + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._traceback = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + except BaseException: + # Explicitly let all other new-style exceptions through so + # that we can catch all old-style exceptions with a simple + # "except:" clause below. + # + # All old-style exception objects are instances of + # types.InstanceType, but "except types.InstanceType:" does + # not catch old-style exceptions for some reason. Thus, the + # only way to catch all old-style exceptions without catching + # any new-style exceptions is to filter out the new-style + # exceptions, which all derive from BaseException. + raise + except: + # Because of the BaseException clause above, this handler only + # executes for old-style exception objects. + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + if isinstance(self._exception, types.InstanceType): + # The exception is an instance of an old-style class, which + # means type(self._exception) returns types.ClassType instead + # of the exception's actual class type. + exception_type = self._exception.__class__ + else: + exception_type = type(self._exception) + raise exception_type, self._exception, self._traceback + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception_info(self, timeout=None): + """Return a tuple of (exception, traceback) raised by the call that the + future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + return self.exception_info(timeout)[0] + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception_info(self, exception, traceback): + """Sets the result of the future as being the given exception + and traceback. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._traceback = traceback + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + self.set_exception_info(exception, None) + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get('timeout') + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + # Careful not to keep a reference to the popped future + if timeout is None: + yield fs.pop().result() + else: + yield fs.pop().result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/src/pip/_vendor/futures/process.py b/src/pip/_vendor/futures/process.py new file mode 100644 index 00000000000..fa5b96fd390 --- /dev/null +++ b/src/pip/_vendor/futures/process.py @@ -0,0 +1,363 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Request Q" +""" + +import atexit +from concurrent.futures import _base +import Queue as queue +import multiprocessing +import threading +import weakref +import sys + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(None) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except: + e = sys.exc_info()[1] + result_queue.put(_ResultItem(call_item.work_id, + exception=e)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + nb_shutdown_processes = [0] + def shutdown_one_process(): + """Tell a worker to terminate, which will in turn wake us again""" + call_queue.put(None) + nb_shutdown_processes[0] += 1 + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + result_item = result_queue.get(block=True) + if result_item is not None: + work_item = pending_work_items[result_item.work_id] + del pending_work_items[result_item.work_id] + + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Delete references to object. See issue16284 + del work_item + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if _shutdown or executor is None or executor._shutdown_thread: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + while nb_shutdown_processes[0] < len(processes): + shutdown_one_process() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS + # X. + for p in processes: + p.join() + call_queue.close() + return + del executor + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import os + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermine limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = multiprocessing.cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + self._result_queue = multiprocessing.Queue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + self._processes = set() + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes.add(p) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + self._adjust_process_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join(sys.maxint) + # To reduce the risk of openning too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/src/pip/_vendor/futures/thread.py b/src/pip/_vendor/futures/thread.py new file mode 100644 index 00000000000..b5f832ffbfb --- /dev/null +++ b/src/pip/_vendor/futures/thread.py @@ -0,0 +1,170 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +import atexit +from concurrent.futures import _base +import itertools +import Queue as queue +import threading +import weakref +import sys + +try: + from multiprocessing import cpu_count +except ImportError: + # some platforms don't have multiprocessing + def cpu_count(): + return None + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except: + e, tb = sys.exc_info()[1:] + self.future.set_exception_info(e, tb) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + + # attempt to increment idle count + executor = executor_reference() + if executor is not None: + executor._idle_semaphore.release() + del executor + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except: + _base.LOGGER.critical('Exception in worker', exc_info=True) + + +class ThreadPoolExecutor(_base.Executor): + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().next + + def __init__(self, max_workers=None, thread_name_prefix=''): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (cpu_count() or 1) * 5 + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(False): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join(sys.maxint) + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/src/pip/_vendor/monotonic.LICENSE b/src/pip/_vendor/monotonic.LICENSE new file mode 100644 index 00000000000..e06d2081865 --- /dev/null +++ b/src/pip/_vendor/monotonic.LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/pip/_vendor/monotonic.py b/src/pip/_vendor/monotonic.py new file mode 100644 index 00000000000..4ad147bae80 --- /dev/null +++ b/src/pip/_vendor/monotonic.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +""" + monotonic + ~~~~~~~~~ + + This module provides a ``monotonic()`` function which returns the + value (in fractional seconds) of a clock which never goes backwards. + + On Python 3.3 or newer, ``monotonic`` will be an alias of + ``time.monotonic`` from the standard library. On older versions, + it will fall back to an equivalent implementation: + + +-------------+----------------------------------------+ + | Linux, BSD | ``clock_gettime(3)`` | + +-------------+----------------------------------------+ + | Windows | ``GetTickCount`` or ``GetTickCount64`` | + +-------------+----------------------------------------+ + | OS X | ``mach_absolute_time`` | + +-------------+----------------------------------------+ + + If no suitable implementation exists for the current platform, + attempting to import this module (or to import from it) will + cause a ``RuntimeError`` exception to be raised. + + + Copyright 2014, 2015, 2016 Ori Livneh + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +""" +import time + + +__all__ = ('monotonic',) + + +try: + monotonic = time.monotonic +except AttributeError: + import ctypes + import ctypes.util + import os + import sys + import threading + try: + if sys.platform == 'darwin': # OS X, iOS + # See Technical Q&A QA1398 of the Mac Developer Library: + # + libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) + + class mach_timebase_info_data_t(ctypes.Structure): + """System timebase info. Defined in .""" + _fields_ = (('numer', ctypes.c_uint32), + ('denom', ctypes.c_uint32)) + + mach_absolute_time = libc.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + + timebase = mach_timebase_info_data_t() + libc.mach_timebase_info(ctypes.byref(timebase)) + ticks_per_second = timebase.numer / timebase.denom * 1.0e9 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return mach_absolute_time() / ticks_per_second + + elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): + if sys.platform.startswith('cygwin'): + # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since + # version 1.7.6. Using raw WinAPI for maximum version compatibility. + + # Ugly hack using the wrong calling convention (in 32-bit mode) + # because ctypes has no windll under cygwin (and it also seems that + # the code letting you select stdcall in _ctypes doesn't exist under + # the preprocessor definitions relevant to cygwin). + # This is 'safe' because: + # 1. The ABI of GetTickCount and GetTickCount64 is identical for + # both calling conventions because they both have no parameters. + # 2. libffi masks the problem because after making the call it doesn't + # touch anything through esp and epilogue code restores a correct + # esp from ebp afterwards. + try: + kernel32 = ctypes.cdll.kernel32 + except OSError: # 'No such file or directory' + kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') + else: + kernel32 = ctypes.windll.kernel32 + + GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) + if GetTickCount64: + # Windows Vista / Windows Server 2008 or newer. + GetTickCount64.restype = ctypes.c_ulonglong + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return GetTickCount64() / 1000.0 + + else: + # Before Windows Vista. + GetTickCount = kernel32.GetTickCount + GetTickCount.restype = ctypes.c_uint32 + + get_tick_count_lock = threading.Lock() + get_tick_count_last_sample = 0 + get_tick_count_wraparounds = 0 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + global get_tick_count_last_sample + global get_tick_count_wraparounds + + with get_tick_count_lock: + current_sample = GetTickCount() + if current_sample < get_tick_count_last_sample: + get_tick_count_wraparounds += 1 + get_tick_count_last_sample = current_sample + + final_milliseconds = get_tick_count_wraparounds << 32 + final_milliseconds += get_tick_count_last_sample + return final_milliseconds / 1000.0 + + else: + try: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'), + use_errno=True).clock_gettime + except Exception: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'), + use_errno=True).clock_gettime + + class timespec(ctypes.Structure): + """Time specification, as described in clock_gettime(3).""" + _fields_ = (('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long)) + + if sys.platform.startswith('linux'): + CLOCK_MONOTONIC = 1 + elif sys.platform.startswith('freebsd'): + CLOCK_MONOTONIC = 4 + elif sys.platform.startswith('sunos5'): + CLOCK_MONOTONIC = 4 + elif 'bsd' in sys.platform: + CLOCK_MONOTONIC = 3 + elif sys.platform.startswith('aix'): + CLOCK_MONOTONIC = ctypes.c_longlong(10) + + def monotonic(): + """Monotonic clock, cannot go backward.""" + ts = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno)) + return ts.tv_sec + ts.tv_nsec / 1.0e9 + + # Perform a sanity-check. + if monotonic() - monotonic() > 0: + raise ValueError('monotonic() is not monotonic!') + + except Exception as e: + raise RuntimeError('no suitable implementation for this system: ' + repr(e)) diff --git a/src/pip/_vendor/monotonic.pyi b/src/pip/_vendor/monotonic.pyi new file mode 100644 index 00000000000..9d058c41660 --- /dev/null +++ b/src/pip/_vendor/monotonic.pyi @@ -0,0 +1 @@ +from monotonic import * \ No newline at end of file diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py index 410c87cdb37..8041cbe228f 100644 --- a/src/pip/_vendor/tenacity/__init__.py +++ b/src/pip/_vendor/tenacity/__init__.py @@ -29,7 +29,11 @@ import sys import threading -from concurrent import futures + +if sys.version_info[0] == 2: + from pip._vendor import futures +else: + from concurrent import futures from pip._vendor import six diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 0213e00268a..75ca01664b0 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -4,7 +4,7 @@ colorama==0.4.1 contextlib2==0.6.0 distlib==0.2.9.post0 distro==1.4.0 -futures==3.3.0; python_version == "2.7" +futures==3.3.0; python_version == "2.7" # futures cannot be installed in Python 3 html5lib==1.0.1 ipaddress==1.0.22 # Only needed on 2.6 and 2.7 monotonic==1.5 diff --git a/tools/automation/vendoring/patches/tenacity.patch b/tools/automation/vendoring/patches/tenacity.patch new file mode 100644 index 00000000000..50631a3a3e9 --- /dev/null +++ b/tools/automation/vendoring/patches/tenacity.patch @@ -0,0 +1,17 @@ +diff --git a/src/pip/_vendor/tenacity/__init__.py b/src/pip/_vendor/tenacity/__init__.py +index 410c87cd..8041cbe2 100644 +--- a/src/pip/_vendor/tenacity/__init__.py ++++ b/src/pip/_vendor/tenacity/__init__.py +@@ -29,7 +29,11 @@ except ImportError: + + import sys + import threading +-from concurrent import futures ++ ++if sys.version_info[0] == 2: ++ from pip._vendor import futures ++else: ++ from concurrent import futures + + from pip._vendor import six +