@@ -231,9 +231,9 @@ def find_group_cohorts(labels, chunks, merge: bool = True):
231
231
232
232
233
233
def rechunk_for_cohorts (
234
- array ,
234
+ array : DaskArray ,
235
235
axis : T_Axis ,
236
- labels ,
236
+ labels : np . ndarray ,
237
237
force_new_chunk_at ,
238
238
chunksize = None ,
239
239
ignore_old_chunks = False ,
@@ -326,7 +326,7 @@ def rechunk_for_cohorts(
326
326
return array .rechunk ({axis : newchunks })
327
327
328
328
329
- def rechunk_for_blockwise (array , axis : T_Axis , labels ):
329
+ def rechunk_for_blockwise (array : DaskArray , axis : T_Axis , labels : np . ndarray ):
330
330
"""
331
331
Rechunks array so that group boundaries line up with chunk boundaries, allowing
332
332
embarassingly parallel group reductions.
@@ -863,9 +863,9 @@ def _conc2(x_chunk, key1, key2=slice(None), axis: T_Axes = None) -> np.ndarray:
863
863
# return concatenate3(mapped)
864
864
865
865
866
- def reindex_intermediates (x , agg , unique_groups ):
866
+ def reindex_intermediates (x : IntermediateDict , agg : Aggregation , unique_groups ) -> IntermediateDict :
867
867
new_shape = x ["groups" ].shape [:- 1 ] + (len (unique_groups ),)
868
- newx = {"groups" : np .broadcast_to (unique_groups , new_shape )}
868
+ newx : IntermediateDict = {"groups" : np .broadcast_to (unique_groups , new_shape )}
869
869
newx ["intermediates" ] = tuple (
870
870
reindex_ (
871
871
v , from_ = np .atleast_1d (x ["groups" ].squeeze ()), to = pd .Index (unique_groups ), fill_value = f
@@ -875,7 +875,7 @@ def reindex_intermediates(x, agg, unique_groups):
875
875
return newx
876
876
877
877
878
- def listify_groups (x ):
878
+ def listify_groups (x : IntermediateDict ):
879
879
return list (np .atleast_1d (x ["groups" ].squeeze ()))
880
880
881
881
0 commit comments