1
+ from __future__ import annotations
2
+
1
3
import enum
2
4
import functools
3
5
import operator
6
8
from dataclasses import dataclass , field
7
9
from datetime import timedelta
8
10
from html import escape
9
- from typing import (
10
- TYPE_CHECKING ,
11
- Any ,
12
- Callable ,
13
- Dict ,
14
- Hashable ,
15
- Iterable ,
16
- List ,
17
- Mapping ,
18
- Optional ,
19
- Tuple ,
20
- Union ,
21
- )
11
+ from typing import TYPE_CHECKING , Any , Callable , Hashable , Iterable , Mapping
22
12
23
13
import numpy as np
24
14
import pandas as pd
@@ -59,12 +49,12 @@ class IndexSelResult:
59
49
60
50
"""
61
51
62
- dim_indexers : Dict [Any , Any ]
63
- indexes : Dict [Any , " Index" ] = field (default_factory = dict )
64
- variables : Dict [Any , " Variable" ] = field (default_factory = dict )
65
- drop_coords : List [Hashable ] = field (default_factory = list )
66
- drop_indexes : List [Hashable ] = field (default_factory = list )
67
- rename_dims : Dict [Any , Hashable ] = field (default_factory = dict )
52
+ dim_indexers : dict [Any , Any ]
53
+ indexes : dict [Any , Index ] = field (default_factory = dict )
54
+ variables : dict [Any , Variable ] = field (default_factory = dict )
55
+ drop_coords : list [Hashable ] = field (default_factory = list )
56
+ drop_indexes : list [Hashable ] = field (default_factory = list )
57
+ rename_dims : dict [Any , Hashable ] = field (default_factory = dict )
68
58
69
59
def as_tuple (self ):
70
60
"""Unlike ``dataclasses.astuple``, return a shallow copy.
@@ -82,7 +72,7 @@ def as_tuple(self):
82
72
)
83
73
84
74
85
- def merge_sel_results (results : List [IndexSelResult ]) -> IndexSelResult :
75
+ def merge_sel_results (results : list [IndexSelResult ]) -> IndexSelResult :
86
76
all_dims_count = Counter ([dim for res in results for dim in res .dim_indexers ])
87
77
duplicate_dims = {k : v for k , v in all_dims_count .items () if v > 1 }
88
78
@@ -124,13 +114,13 @@ def group_indexers_by_index(
124
114
obj : T_Xarray ,
125
115
indexers : Mapping [Any , Any ],
126
116
options : Mapping [str , Any ],
127
- ) -> List [ Tuple [ " Index" , Dict [Any , Any ]]]:
117
+ ) -> list [ tuple [ Index , dict [Any , Any ]]]:
128
118
"""Returns a list of unique indexes and their corresponding indexers."""
129
119
unique_indexes = {}
130
- grouped_indexers : Mapping [Union [ int , None ], Dict ] = defaultdict (dict )
120
+ grouped_indexers : Mapping [int | None , dict ] = defaultdict (dict )
131
121
132
122
for key , label in indexers .items ():
133
- index : " Index" = obj .xindexes .get (key , None )
123
+ index : Index = obj .xindexes .get (key , None )
134
124
135
125
if index is not None :
136
126
index_id = id (index )
@@ -787,7 +777,7 @@ class IndexingSupport(enum.Enum):
787
777
788
778
def explicit_indexing_adapter (
789
779
key : ExplicitIndexer ,
790
- shape : Tuple [int , ...],
780
+ shape : tuple [int , ...],
791
781
indexing_support : IndexingSupport ,
792
782
raw_indexing_method : Callable ,
793
783
) -> Any :
@@ -821,8 +811,8 @@ def explicit_indexing_adapter(
821
811
822
812
823
813
def decompose_indexer (
824
- indexer : ExplicitIndexer , shape : Tuple [int , ...], indexing_support : IndexingSupport
825
- ) -> Tuple [ExplicitIndexer , ExplicitIndexer ]:
814
+ indexer : ExplicitIndexer , shape : tuple [int , ...], indexing_support : IndexingSupport
815
+ ) -> tuple [ExplicitIndexer , ExplicitIndexer ]:
826
816
if isinstance (indexer , VectorizedIndexer ):
827
817
return _decompose_vectorized_indexer (indexer , shape , indexing_support )
828
818
if isinstance (indexer , (BasicIndexer , OuterIndexer )):
@@ -848,9 +838,9 @@ def _decompose_slice(key, size):
848
838
849
839
def _decompose_vectorized_indexer (
850
840
indexer : VectorizedIndexer ,
851
- shape : Tuple [int , ...],
841
+ shape : tuple [int , ...],
852
842
indexing_support : IndexingSupport ,
853
- ) -> Tuple [ExplicitIndexer , ExplicitIndexer ]:
843
+ ) -> tuple [ExplicitIndexer , ExplicitIndexer ]:
854
844
"""
855
845
Decompose vectorized indexer to the successive two indexers, where the
856
846
first indexer will be used to index backend arrays, while the second one
@@ -929,10 +919,10 @@ def _decompose_vectorized_indexer(
929
919
930
920
931
921
def _decompose_outer_indexer (
932
- indexer : Union [ BasicIndexer , OuterIndexer ] ,
933
- shape : Tuple [int , ...],
922
+ indexer : BasicIndexer | OuterIndexer ,
923
+ shape : tuple [int , ...],
934
924
indexing_support : IndexingSupport ,
935
- ) -> Tuple [ExplicitIndexer , ExplicitIndexer ]:
925
+ ) -> tuple [ExplicitIndexer , ExplicitIndexer ]:
936
926
"""
937
927
Decompose outer indexer to the successive two indexers, where the
938
928
first indexer will be used to index backend arrays, while the second one
@@ -973,7 +963,7 @@ def _decompose_outer_indexer(
973
963
return indexer , BasicIndexer (())
974
964
assert isinstance (indexer , (OuterIndexer , BasicIndexer ))
975
965
976
- backend_indexer : List [Any ] = []
966
+ backend_indexer : list [Any ] = []
977
967
np_indexer = []
978
968
# make indexer positive
979
969
pos_indexer : list [np .ndarray | int | np .number ] = []
@@ -1395,7 +1385,7 @@ def __array__(self, dtype: DTypeLike = None) -> np.ndarray:
1395
1385
return np .asarray (array .values , dtype = dtype )
1396
1386
1397
1387
@property
1398
- def shape (self ) -> Tuple [int ]:
1388
+ def shape (self ) -> tuple [int ]:
1399
1389
return (len (self .array ),)
1400
1390
1401
1391
def _convert_scalar (self , item ):
@@ -1420,13 +1410,13 @@ def _convert_scalar(self, item):
1420
1410
1421
1411
def __getitem__ (
1422
1412
self , indexer
1423
- ) -> Union [
1424
- " PandasIndexingAdapter" ,
1425
- NumpyIndexingAdapter ,
1426
- np .ndarray ,
1427
- np .datetime64 ,
1428
- np .timedelta64 ,
1429
- ] :
1413
+ ) -> (
1414
+ PandasIndexingAdapter
1415
+ | NumpyIndexingAdapter
1416
+ | np .ndarray
1417
+ | np .datetime64
1418
+ | np .timedelta64
1419
+ ) :
1430
1420
key = indexer .tuple
1431
1421
if isinstance (key , tuple ) and len (key ) == 1 :
1432
1422
# unpack key so it can index a pandas.Index object (pandas.Index
@@ -1449,7 +1439,7 @@ def transpose(self, order) -> pd.Index:
1449
1439
def __repr__ (self ) -> str :
1450
1440
return f"{ type (self ).__name__ } (array={ self .array !r} , dtype={ self .dtype !r} )"
1451
1441
1452
- def copy (self , deep : bool = True ) -> " PandasIndexingAdapter" :
1442
+ def copy (self , deep : bool = True ) -> PandasIndexingAdapter :
1453
1443
# Not the same as just writing `self.array.copy(deep=deep)`, as
1454
1444
# shallow copies of the underlying numpy.ndarrays become deep ones
1455
1445
# upon pickling
@@ -1476,7 +1466,7 @@ def __init__(
1476
1466
self ,
1477
1467
array : pd .MultiIndex ,
1478
1468
dtype : DTypeLike = None ,
1479
- level : Optional [ str ] = None ,
1469
+ level : str | None = None ,
1480
1470
):
1481
1471
super ().__init__ (array , dtype )
1482
1472
self .level = level
@@ -1535,7 +1525,7 @@ def _repr_html_(self) -> str:
1535
1525
array_repr = short_numpy_repr (self ._get_array_subset ())
1536
1526
return f"<pre>{ escape (array_repr )} </pre>"
1537
1527
1538
- def copy (self , deep : bool = True ) -> " PandasMultiIndexingAdapter" :
1528
+ def copy (self , deep : bool = True ) -> PandasMultiIndexingAdapter :
1539
1529
# see PandasIndexingAdapter.copy
1540
1530
array = self .array .copy (deep = True ) if deep else self .array
1541
1531
return type (self )(array , self ._dtype , self .level )
0 commit comments