@@ -185,16 +185,16 @@ def _peaks_from_sh_parallel(args):
185
185
absolute_threshold , min_separation_angle ,
186
186
npeaks , normalize_peaks , chunk_id , is_symmetric ) = args
187
187
188
- peak_dirs , peak_values , peak_indices = _peaks_from_sh_2d (
188
+ peak_dirs , peak_values , peak_indices = _peaks_from_sh_loop (
189
189
shm_coeff , B , sphere , relative_peak_threshold ,
190
190
absolute_threshold , min_separation_angle , npeaks ,
191
191
normalize_peaks , is_symmetric )
192
192
return chunk_id , peak_dirs , peak_values , peak_indices
193
193
194
194
195
- def _peaks_from_sh_2d (shm_coeff , B , sphere , relative_peak_threshold ,
196
- absolute_threshold , min_separation_angle , npeaks ,
197
- normalize_peaks , is_symmetric ):
195
+ def _peaks_from_sh_loop (shm_coeff , B , sphere , relative_peak_threshold ,
196
+ absolute_threshold , min_separation_angle , npeaks ,
197
+ normalize_peaks , is_symmetric ):
198
198
"""
199
199
Loops on 2D (ravelled) data and fits each voxel separately.
200
200
See peaks_from_sh for a complete description of parameters.
@@ -307,7 +307,7 @@ def peaks_from_sh(shm_coeff, sphere, mask=None, relative_peak_threshold=0.5,
307
307
# (codecov does not deal well with multiprocessing)
308
308
if nbr_processes == 1 :
309
309
(tmp_peak_dirs_array , tmp_peak_values_array ,
310
- tmp_peak_indices_array ) = _peaks_from_sh_2d (
310
+ tmp_peak_indices_array ) = _peaks_from_sh_loop (
311
311
shm_coeff , B , sphere , relative_peak_threshold ,
312
312
absolute_threshold , min_separation_angle , npeaks ,
313
313
normalize_peaks , is_symmetric )
@@ -358,13 +358,13 @@ def _maps_from_sh_parallel(args):
358
358
(shm_coeff , peak_values , peak_indices , B , sphere ,
359
359
gfa_thr , chunk_id ) = args
360
360
361
- res = _maps_from_sh_2d (shm_coeff , peak_values , peak_indices , B ,
362
- sphere , gfa_thr )
361
+ res = _maps_from_sh_loop (shm_coeff , peak_values , peak_indices , B ,
362
+ sphere , gfa_thr )
363
363
return chunk_id , * res
364
364
365
365
366
- def _maps_from_sh_2d (shm_coeff , peak_values , peak_indices , B , sphere ,
367
- gfa_thr ):
366
+ def _maps_from_sh_loop (shm_coeff , peak_values , peak_indices , B , sphere ,
367
+ gfa_thr ):
368
368
"""
369
369
Loops on 2D (ravelled) data and fits each voxel separately.
370
370
For a more complete description of parameters, see maps_from_sh.
@@ -462,7 +462,7 @@ def maps_from_sh(shm_coeff, peak_values, peak_indices, sphere,
462
462
if nbr_processes == 1 :
463
463
(tmp_nufo_map_array , tmp_afd_max_array , tmp_afd_sum_array ,
464
464
tmp_rgb_map_array , tmp_gfa_map_array , tmp_qa_map_array ,
465
- all_time_max_odf , all_time_global_max ) = _maps_from_sh_2d (
465
+ all_time_max_odf , all_time_global_max ) = _maps_from_sh_loop (
466
466
shm_coeff , peak_values , peak_indices ,
467
467
B , sphere , gfa_thr )
468
468
else :
@@ -539,12 +539,12 @@ def maps_from_sh(shm_coeff, peak_values, peak_indices, sphere,
539
539
540
540
def _convert_sh_basis_parallel (args ):
541
541
(sh , B_in , invB_out , chunk_id ) = args
542
- sh = _convert_sh_basis_2d (sh , B_in , invB_out )
542
+ sh = _convert_sh_basis_loop (sh , B_in , invB_out )
543
543
544
544
return chunk_id , sh
545
545
546
546
547
- def _convert_sh_basis_2d (sh , B_in , invB_out ):
547
+ def _convert_sh_basis_loop (sh , B_in , invB_out ):
548
548
"""
549
549
Loops on 2D (ravelled) data and fits each voxel separately.
550
550
For a more complete description of parameters, see convert_sh_basis.
@@ -625,7 +625,7 @@ def convert_sh_basis(shm_coeff, sphere, mask=None,
625
625
# Separating the case nbr_processes=1 to help get good coverage metrics
626
626
# (codecov does not deal well with multiprocessing)
627
627
if nbr_processes == 1 :
628
- tmp_shm_coeff_array = _convert_sh_basis_2d (shm_coeff , B_in , invB_out )
628
+ tmp_shm_coeff_array = _convert_sh_basis_loop (shm_coeff , B_in , invB_out )
629
629
else :
630
630
# Separate the data in chunks of len(nbr_processes).
631
631
shm_coeff_chunks = np .array_split (shm_coeff , nbr_processes )
@@ -653,17 +653,24 @@ def convert_sh_basis(shm_coeff, sphere, mask=None,
653
653
654
654
655
655
def _convert_sh_to_sf_parallel (args ):
656
- sh = args [0 ]
657
- B_in = args [1 ]
658
- new_output_dim = args [2 ]
659
- chunk_id = args [3 ]
656
+ (sh , B_in , new_output_dim , chunk_id ) = args
657
+ sf = _convert_sh_to_sf_loop (sh , new_output_dim , B_in )
658
+ return chunk_id , sf
659
+
660
+
661
+ def _convert_sh_to_sf_loop (sh , new_output_dim , B_in ):
662
+ """
663
+ Loops on 2D data and fits each voxel separately.
664
+ See convert_sh_to_sf for more information.
665
+ """
666
+ # Data: Ravelled 4D data. Shape [N, X] where N is the number of voxels.
660
667
sf = np .zeros ((sh .shape [0 ], new_output_dim ), dtype = np .float32 )
661
668
662
669
for idx in range (sh .shape [0 ]):
663
670
if sh [idx ].any ():
664
671
sf [idx ] = np .dot (sh [idx ], B_in )
665
672
666
- return chunk_id , sf
673
+ return sf
667
674
668
675
669
676
def convert_sh_to_sf (shm_coeff , sphere , mask = None , dtype = "float32" ,
@@ -716,30 +723,40 @@ def convert_sh_to_sf(shm_coeff, sphere, mask=None, dtype="float32",
716
723
if mask is None :
717
724
mask = np .sum (shm_coeff , axis = 3 ).astype (bool )
718
725
726
+ output_dim = len (sphere .vertices )
727
+ new_shape = data_shape [:3 ] + (output_dim ,)
728
+
719
729
# Ravel the first 3 dimensions while keeping the 4th intact, like a list of
720
- # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
730
+ # 1D time series voxels.
721
731
shm_coeff = shm_coeff [mask ].reshape (
722
732
(np .count_nonzero (mask ), data_shape [3 ]))
723
- shm_coeff_chunks = np .array_split (shm_coeff , nbr_processes )
724
- chunk_len = np .cumsum ([0 ] + [len (c ) for c in shm_coeff_chunks ])
725
-
726
- pool = multiprocessing .Pool (nbr_processes )
727
- results = pool .map (_convert_sh_to_sf_parallel ,
728
- zip (shm_coeff_chunks ,
729
- itertools .repeat (B_in ),
730
- itertools .repeat (len (sphere .vertices )),
731
- np .arange (len (shm_coeff_chunks ))))
732
- pool .close ()
733
- pool .join ()
734
-
735
- # Re-assemble the chunk together in the original shape.
736
- new_shape = data_shape [:3 ] + (len (sphere .vertices ),)
737
- sf_array = np .zeros (new_shape , dtype = dtype )
738
- tmp_sf_array = np .zeros ((np .count_nonzero (mask ), new_shape [3 ]),
739
- dtype = dtype )
740
- for i , new_sf in results :
741
- tmp_sf_array [chunk_len [i ]:chunk_len [i + 1 ], :] = new_sf
742
733
734
+ # Separating the case nbr_processes=1 to help get good coverage metrics
735
+ # (codecov does not deal well with multiprocessing)
736
+ if nbr_processes == 1 :
737
+ tmp_sf_array = _convert_sh_to_sf_loop (shm_coeff , output_dim , B_in )
738
+ else :
739
+ # Separate the data in chunks of len(nbr_processes).
740
+ shm_coeff_chunks = np .array_split (shm_coeff , nbr_processes )
741
+
742
+ pool = multiprocessing .Pool (nbr_processes )
743
+ results = pool .map (_convert_sh_to_sf_parallel ,
744
+ zip (shm_coeff_chunks ,
745
+ itertools .repeat (B_in ),
746
+ itertools .repeat (output_dim ),
747
+ np .arange (len (shm_coeff_chunks ))))
748
+ pool .close ()
749
+ pool .join ()
750
+
751
+ # Re-assemble the chunk together.
752
+ chunk_len = np .cumsum ([0 ] + [len (c ) for c in shm_coeff_chunks ])
753
+ tmp_sf_array = np .zeros ((np .count_nonzero (mask ), new_shape [3 ]),
754
+ dtype = dtype )
755
+ for i , new_sf in results :
756
+ tmp_sf_array [chunk_len [i ]:chunk_len [i + 1 ], :] = new_sf
757
+
758
+ # Bring back to the original shape
759
+ sf_array = np .zeros (new_shape , dtype = dtype )
743
760
sf_array [mask ] = tmp_sf_array
744
761
745
762
return sf_array
0 commit comments