Skip to content

Commit 8bda66a

Browse files
authored
lru_cache preserves signature of wrapped function (#6221)
Update pytype for a fix to ParamSpec as argument to Generic
1 parent 59dfea0 commit 8bda66a

File tree

2 files changed

+10
-8
lines changed

2 files changed

+10
-8
lines changed

requirements-tests-py3.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
mypy==0.910
2-
pytype==2021.10.25
2+
pytype==2021.11.2
33
typed-ast==1.4.3
44
black==21.9b0
55
flake8==4.0.1

stdlib/functools.pyi

+9-7
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import sys
22
import types
33
from _typeshed import SupportsItems, SupportsLessThan
44
from typing import Any, Callable, Generic, Hashable, Iterable, NamedTuple, Sequence, Set, Sized, Tuple, Type, TypeVar, overload
5+
from typing_extensions import ParamSpec
56

67
if sys.version_info >= (3, 9):
78
from types import GenericAlias
@@ -10,6 +11,7 @@ _AnyCallable = Callable[..., Any]
1011

1112
_T = TypeVar("_T")
1213
_S = TypeVar("_S")
14+
_P = ParamSpec("_P")
1315

1416
@overload
1517
def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ...
@@ -22,20 +24,20 @@ class _CacheInfo(NamedTuple):
2224
maxsize: int
2325
currsize: int
2426

25-
class _lru_cache_wrapper(Generic[_T]):
26-
__wrapped__: Callable[..., _T]
27-
def __call__(self, *args: Hashable, **kwargs: Hashable) -> _T: ...
27+
class _lru_cache_wrapper(Generic[_P, _T]): # type: ignore
28+
__wrapped__: Callable[_P, _T] # type: ignore
29+
def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _T: ... # type: ignore
2830
def cache_info(self) -> _CacheInfo: ...
2931
def cache_clear(self) -> None: ...
3032

3133
if sys.version_info >= (3, 8):
3234
@overload
33-
def lru_cache(maxsize: int | None = ..., typed: bool = ...) -> Callable[[Callable[..., _T]], _lru_cache_wrapper[_T]]: ...
35+
def lru_cache(maxsize: int | None = ..., typed: bool = ...) -> Callable[[Callable[_P, _T]], _lru_cache_wrapper[_P, _T]]: ... # type: ignore
3436
@overload
35-
def lru_cache(maxsize: Callable[..., _T], typed: bool = ...) -> _lru_cache_wrapper[_T]: ...
37+
def lru_cache(maxsize: Callable[_P, _T], typed: bool = ...) -> _lru_cache_wrapper[_P, _T]: ... # type: ignore
3638

3739
else:
38-
def lru_cache(maxsize: int | None = ..., typed: bool = ...) -> Callable[[Callable[..., _T]], _lru_cache_wrapper[_T]]: ...
40+
def lru_cache(maxsize: int | None = ..., typed: bool = ...) -> Callable[[Callable[_P, _T]], _lru_cache_wrapper[_P, _T]]: ... # type: ignore
3941

4042
WRAPPER_ASSIGNMENTS: Sequence[str]
4143
WRAPPER_UPDATES: Sequence[str]
@@ -115,7 +117,7 @@ if sys.version_info >= (3, 8):
115117
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
116118

117119
if sys.version_info >= (3, 9):
118-
def cache(__user_function: Callable[..., _T]) -> _lru_cache_wrapper[_T]: ...
120+
def cache(__user_function: Callable[_P, _T]) -> _lru_cache_wrapper[_P, _T]: ... # type: ignore
119121

120122
def _make_key(
121123
args: Tuple[Hashable, ...],

0 commit comments

Comments
 (0)