@@ -540,19 +540,6 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
540
540
goto error ;
541
541
}
542
542
543
-
544
- /* determine the bitflag belonging to the threadlevel_support provided */
545
- memset ( & threadlevel_bf , 0 , sizeof (uint8_t ));
546
- OMPI_THREADLEVEL_SET_BITFLAG ( ompi_mpi_thread_provided , threadlevel_bf );
547
-
548
- /* add this bitflag to the modex */
549
- OPAL_MODEX_SEND_STRING (ret , OPAL_PMIX_GLOBAL ,
550
- "MPI_THREAD_LEVEL" , & threadlevel_bf , sizeof (uint8_t ));
551
- if (OPAL_SUCCESS != ret ) {
552
- error = "ompi_mpi_init: modex send thread level" ;
553
- goto error ;
554
- }
555
-
556
543
/* initialize datatypes. This step should be done early as it will
557
544
* create the local convertor and local arch used in the proc
558
545
* init.
@@ -568,25 +555,6 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
568
555
goto error ;
569
556
}
570
557
571
- /* Initialize the op framework. This has to be done *after*
572
- ddt_init, but befor mca_coll_base_open, since some collective
573
- modules (e.g., the hierarchical coll component) may need ops in
574
- their query function. */
575
- if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_op_base_framework , 0 ))) {
576
- error = "ompi_op_base_open() failed" ;
577
- goto error ;
578
- }
579
- if (OMPI_SUCCESS !=
580
- (ret = ompi_op_base_find_available (OPAL_ENABLE_PROGRESS_THREADS ,
581
- ompi_mpi_thread_multiple ))) {
582
- error = "ompi_op_base_find_available() failed" ;
583
- goto error ;
584
- }
585
- if (OMPI_SUCCESS != (ret = ompi_op_init ())) {
586
- error = "ompi_op_init() failed" ;
587
- goto error ;
588
- }
589
-
590
558
/* Open up MPI-related MCA components */
591
559
592
560
if (OMPI_SUCCESS != (ret = mca_base_framework_open (& opal_allocator_base_framework , 0 ))) {
@@ -601,6 +569,24 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
601
569
error = "mca_mpool_base_open() failed" ;
602
570
goto error ;
603
571
}
572
+
573
+ /* We need to initialize PML before mca_bml_base_open() and
574
+ mca_op_base_find_available(), since this may modify ompi_mpi_thread_multiple,
575
+ which are used in mca_bml_base_open() and mca_op_base_find_available(). */
576
+
577
+ if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_pml_base_framework , 0 ))) {
578
+ error = "mca_pml_base_open() failed" ;
579
+ goto error ;
580
+ }
581
+
582
+ /* Select which MPI components to use */
583
+ if (OMPI_SUCCESS !=
584
+ (ret = mca_pml_base_select (OPAL_ENABLE_PROGRESS_THREADS ,
585
+ ompi_mpi_thread_multiple ))) {
586
+ error = "mca_pml_base_select() failed" ;
587
+ goto error ;
588
+ }
589
+
604
590
if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_bml_base_framework , 0 ))) {
605
591
error = "mca_bml_base_open() failed" ;
606
592
goto error ;
@@ -609,10 +595,26 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
609
595
error = "mca_bml_base_init() failed" ;
610
596
goto error ;
611
597
}
612
- if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_pml_base_framework , 0 ))) {
613
- error = "mca_pml_base_open() failed" ;
598
+
599
+ /* Initialize the op framework. This has to be done *after*
600
+ ddt_init, but befor mca_coll_base_open, since some collective
601
+ modules (e.g., the hierarchical coll component) may need ops in
602
+ their query function. */
603
+ if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_op_base_framework , 0 ))) {
604
+ error = "ompi_op_base_open() failed" ;
605
+ goto error ;
606
+ }
607
+ if (OMPI_SUCCESS !=
608
+ (ret = ompi_op_base_find_available (OPAL_ENABLE_PROGRESS_THREADS ,
609
+ ompi_mpi_thread_multiple ))) {
610
+ error = "ompi_op_base_find_available() failed" ;
611
+ goto error ;
612
+ }
613
+ if (OMPI_SUCCESS != (ret = ompi_op_init ())) {
614
+ error = "ompi_op_init() failed" ;
614
615
goto error ;
615
616
}
617
+
616
618
if (OMPI_SUCCESS != (ret = mca_base_framework_open (& ompi_coll_base_framework , 0 ))) {
617
619
error = "mca_coll_base_open() failed" ;
618
620
goto error ;
@@ -630,21 +632,24 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
630
632
}
631
633
#endif
632
634
635
+ /* determine the bitflag belonging to the threadlevel_support provided */
636
+ memset ( & threadlevel_bf , 0 , sizeof (uint8_t ));
637
+ OMPI_THREADLEVEL_SET_BITFLAG ( ompi_mpi_thread_provided , threadlevel_bf );
638
+
639
+ /* add this bitflag to the modex */
640
+ OPAL_MODEX_SEND_STRING (ret , OPAL_PMIX_GLOBAL ,
641
+ "MPI_THREAD_LEVEL" , & threadlevel_bf , sizeof (uint8_t ));
642
+ if (OPAL_SUCCESS != ret ) {
643
+ error = "ompi_mpi_init: modex send thread level" ;
644
+ goto error ;
645
+ }
646
+
633
647
/* In order to reduce the common case for MPI apps (where they
634
648
don't use MPI-2 IO or MPI-1 topology functions), the io and
635
649
topo frameworks are initialized lazily, at the first use of
636
650
relevant functions (e.g., MPI_FILE_*, MPI_CART_*, MPI_GRAPH_*),
637
651
so they are not opened here. */
638
652
639
- /* Select which MPI components to use */
640
-
641
- if (OMPI_SUCCESS !=
642
- (ret = mca_pml_base_select (OPAL_ENABLE_PROGRESS_THREADS ,
643
- ompi_mpi_thread_multiple ))) {
644
- error = "mca_pml_base_select() failed" ;
645
- goto error ;
646
- }
647
-
648
653
/* check for timing request - get stop time and report elapsed time if so */
649
654
OPAL_TIMING_MNEXT ((& tm ,"time to execute modex" ));
650
655
0 commit comments