diff --git a/ChangeLog b/ChangeLog index cb92a70d11..2384d221b9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -15,6 +15,11 @@ Release Date: TBA * Added more supported parameters to ``subprocess.check_output`` +* Fix recursion errors with pandas + + Fixes PyCQA/pylint#2843 + Fixes PyCQA/pylint#2811 + * Added exception inference for `UnicodeDecodeError` Close PyCQA/pylint#3639 diff --git a/astroid/context.py b/astroid/context.py index 40cebf222b..10cc688f6f 100644 --- a/astroid/context.py +++ b/astroid/context.py @@ -105,17 +105,6 @@ def clone(self): clone.extra_context = self.extra_context return clone - def cache_generator(self, key, generator): - """Cache result of generator into dictionary - - Used to cache inference results""" - results = [] - for result in generator: - results.append(result) - yield result - - self.inferred[key] = tuple(results) - @contextlib.contextmanager def restore_path(self): path = set(self.path) diff --git a/astroid/node_classes.py b/astroid/node_classes.py index 59b1e31c79..e9d75d7907 100644 --- a/astroid/node_classes.py +++ b/astroid/node_classes.py @@ -354,19 +354,37 @@ def infer(self, context=None, **kwargs): # explicit_inference is not bound, give it self explicitly try: # pylint: disable=not-callable - return self._explicit_inference(self, context, **kwargs) + yield from self._explicit_inference(self, context, **kwargs) + return except exceptions.UseInferenceDefault: pass if not context: - return self._infer(context, **kwargs) + yield from self._infer(context, **kwargs) + return key = (self, context.lookupname, context.callcontext, context.boundnode) if key in context.inferred: - return iter(context.inferred[key]) + yield from context.inferred[key] + return + + generator = self._infer(context, **kwargs) + results = [] + + # Limit inference amount to help with performance issues with + # exponentially exploding possible results. + limit = MANAGER.max_inferable_values + for i, result in enumerate(generator): + if i >= limit: + yield util.Uninferable + break + results.append(result) + yield result - gen = context.cache_generator(key, self._infer(context, **kwargs)) - return util.limit_inference(gen, MANAGER.max_inferable_values) + # Cache generated results for subsequent inferences of the + # same node using the same context + context.inferred[key] = tuple(results) + return def _repr_name(self): """Get a name for nice representation. diff --git a/astroid/util.py b/astroid/util.py index 3ab7561553..14ec43c685 100644 --- a/astroid/util.py +++ b/astroid/util.py @@ -7,7 +7,6 @@ # For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER import warnings -from itertools import islice import importlib import lazy_object_proxy @@ -139,26 +138,3 @@ def proxy_alias(alias_name, node_type): }, ) return proxy(lambda: node_type) - - -def limit_inference(iterator, size): - """Limit inference amount. - - Limit inference amount to help with performance issues with - exponentially exploding possible results. - - :param iterator: Inference generator to limit - :type iterator: Iterator(NodeNG) - - :param size: Maximum mount of nodes yielded plus an - Uninferable at the end if limit reached - :type size: int - - :yields: A possibly modified generator - :rtype param: Iterable - """ - yield from islice(iterator, size) - has_more = next(iterator, False) - if has_more is not False: - yield Uninferable - return diff --git a/tests/unittest_inference.py b/tests/unittest_inference.py index d99298bcaa..140648d64e 100644 --- a/tests/unittest_inference.py +++ b/tests/unittest_inference.py @@ -428,7 +428,7 @@ def test_del1(self): del undefined_attr """ delete = extract_node(code, __name__) - self.assertRaises(InferenceError, delete.infer) + self.assertRaises(InferenceError, next, delete.infer()) def test_del2(self): code = """