|
| 1 | +/* |
| 2 | + * Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana |
| 3 | + * University Research and Technology |
| 4 | + * Corporation. All rights reserved. |
| 5 | + * Copyright (c) 2004-2011 The University of Tennessee and The University |
| 6 | + * of Tennessee Research Foundation. All rights |
| 7 | + * reserved. |
| 8 | + * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, |
| 9 | + * University of Stuttgart. All rights reserved. |
| 10 | + * Copyright (c) 2004-2005 The Regents of the University of California. |
| 11 | + * All rights reserved. |
| 12 | + * Copyright (c) 2006-2013 Los Alamos National Security, LLC. |
| 13 | + * All rights reserved. |
| 14 | + * Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved. |
| 15 | + * Copyright (c) 2011 Oak Ridge National Labs. All rights reserved. |
| 16 | + * Copyright (c) 2013-2020 Intel, Inc. All rights reserved. |
| 17 | + * Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved. |
| 18 | + * Copyright (c) 2019 IBM Corporation. All rights reserved. |
| 19 | + * Copyright (c) 2021-2022 Nanook Consulting. All rights reserved. |
| 20 | + * Copyright (c) 2022 Triad National Security, LLC. |
| 21 | + * All rights reserved. |
| 22 | + * |
| 23 | + * $COPYRIGHT$ |
| 24 | + * |
| 25 | + * Additional copyrights may follow |
| 26 | + * |
| 27 | + * $HEADER$ |
| 28 | + * |
| 29 | + */ |
| 30 | + |
| 31 | +/* |
| 32 | + * This test simulates the way Open MPI uses the PMIx_Group_construct to |
| 33 | + * implement MPI4 functions: |
| 34 | + * - MPI_Comm_create_from_group |
| 35 | + * - MPI_Intercomm_create_from_groups |
| 36 | + */ |
| 37 | + |
| 38 | +#include <pthread.h> |
| 39 | +#include <stdbool.h> |
| 40 | +#include <stdio.h> |
| 41 | +#include <stdlib.h> |
| 42 | +#include <time.h> |
| 43 | +#include <unistd.h> |
| 44 | + |
| 45 | +#include <pmix.h> |
| 46 | +#include "examples.h" |
| 47 | + |
| 48 | +static pmix_proc_t myproc; |
| 49 | +static uint32_t get_timeout = 600; /* default 600 secs to get remote data */ |
| 50 | + |
| 51 | +static void notification_fn(size_t evhdlr_registration_id, pmix_status_t status, |
| 52 | + const pmix_proc_t *source, pmix_info_t info[], size_t ninfo, |
| 53 | + pmix_info_t results[], size_t nresults, |
| 54 | + pmix_event_notification_cbfunc_fn_t cbfunc, void *cbdata) |
| 55 | +{ |
| 56 | + EXAMPLES_HIDE_UNUSED_PARAMS(evhdlr_registration_id, source, |
| 57 | + info, ninfo, results, nresults, |
| 58 | + cbfunc, cbdata); |
| 59 | + |
| 60 | + fprintf(stderr, "Client %s:%d NOTIFIED with status %d\n", myproc.nspace, myproc.rank, status); |
| 61 | +} |
| 62 | + |
| 63 | +static void op_callbk(pmix_status_t status, void *cbdata) |
| 64 | +{ |
| 65 | + mylock_t *lock = (mylock_t *) cbdata; |
| 66 | + |
| 67 | + fprintf(stderr, "Client %s:%d OP CALLBACK CALLED WITH STATUS %d\n", myproc.nspace, myproc.rank, |
| 68 | + status); |
| 69 | + lock->status = status; |
| 70 | + DEBUG_WAKEUP_THREAD(lock); |
| 71 | +} |
| 72 | + |
| 73 | +static void errhandler_reg_callbk(pmix_status_t status, size_t errhandler_ref, void *cbdata) |
| 74 | +{ |
| 75 | + mylock_t *lock = (mylock_t *) cbdata; |
| 76 | + |
| 77 | + fprintf(stderr, |
| 78 | + "Client %s:%d ERRHANDLER REGISTRATION CALLBACK CALLED WITH STATUS %d, ref=%lu\n", |
| 79 | + myproc.nspace, myproc.rank, status, (unsigned long) errhandler_ref); |
| 80 | + lock->status = status; |
| 81 | + DEBUG_WAKEUP_THREAD(lock); |
| 82 | +} |
| 83 | + |
| 84 | +int main(int argc, char **argv) |
| 85 | +{ |
| 86 | + int rc; |
| 87 | + pmix_value_t *val = NULL; |
| 88 | + pmix_proc_t proc, *procs; |
| 89 | + uint32_t nprocs, n; |
| 90 | + mylock_t lock; |
| 91 | + pmix_info_t *results, *info, tinfo[2]; |
| 92 | + size_t nresults, cid, lcid, ninfo; |
| 93 | + pmix_data_array_t darray; |
| 94 | + void *grpinfo, *list; |
| 95 | + |
| 96 | + EXAMPLES_HIDE_UNUSED_PARAMS(argc, argv); |
| 97 | + |
| 98 | + /* init us */ |
| 99 | + if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc, NULL, 0))) { |
| 100 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Init failed: %s\n", myproc.nspace, myproc.rank, |
| 101 | + PMIx_Error_string(rc)); |
| 102 | + exit(0); |
| 103 | + } |
| 104 | + fprintf(stderr, "Client ns %s rank %d pid %lu: Running\n", myproc.nspace, myproc.rank, (unsigned long)getpid()); |
| 105 | + |
| 106 | + PMIX_PROC_CONSTRUCT(&proc); |
| 107 | + PMIX_LOAD_PROCID(&proc, myproc.nspace, PMIX_RANK_WILDCARD); |
| 108 | + |
| 109 | + /* get our job size */ |
| 110 | + if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) { |
| 111 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Get job size failed: %s\n", myproc.nspace, |
| 112 | + myproc.rank, PMIx_Error_string(rc)); |
| 113 | + goto done; |
| 114 | + } |
| 115 | + nprocs = val->data.uint32; |
| 116 | + PMIX_VALUE_RELEASE(val); |
| 117 | + /* and our context ID */ |
| 118 | + if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_GROUP_CONTEXT_ID, NULL, 0, &val))) { |
| 119 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Get job context ID failed: %s\n", myproc.nspace, |
| 120 | + myproc.rank, PMIx_Error_string(rc)); |
| 121 | + goto done; |
| 122 | + } |
| 123 | + cid = 0; |
| 124 | + PMIX_VALUE_GET_NUMBER(rc, val, cid, size_t); |
| 125 | + fprintf(stderr, "Client %s:%d job size %d CID %u\n", myproc.nspace, myproc.rank, nprocs, (unsigned)cid); |
| 126 | + |
| 127 | + /* register our default errhandler */ |
| 128 | + DEBUG_CONSTRUCT_LOCK(&lock); |
| 129 | + PMIx_Register_event_handler(NULL, 0, NULL, 0, notification_fn, errhandler_reg_callbk, |
| 130 | + (void *) &lock); |
| 131 | + DEBUG_WAIT_THREAD(&lock); |
| 132 | + rc = lock.status; |
| 133 | + DEBUG_DESTRUCT_LOCK(&lock); |
| 134 | + if (PMIX_SUCCESS != rc) { |
| 135 | + goto done; |
| 136 | + } |
| 137 | + |
| 138 | + /* call fence to sync */ |
| 139 | + PMIX_PROC_CONSTRUCT(&proc); |
| 140 | + PMIX_LOAD_PROCID(&proc, myproc.nspace, PMIX_RANK_WILDCARD); |
| 141 | + if (PMIX_SUCCESS != (rc = PMIx_Fence(&proc, 1, NULL, 0))) { |
| 142 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Fence failed: %d\n", myproc.nspace, myproc.rank, |
| 143 | + rc); |
| 144 | + goto done; |
| 145 | + } |
| 146 | + |
| 147 | + PMIX_PROC_CREATE(procs, nprocs); |
| 148 | + for (n = 0; n < nprocs; n++) { |
| 149 | + PMIX_PROC_LOAD(&procs[n], myproc.nspace, n); |
| 150 | + } |
| 151 | + |
| 152 | + grpinfo = PMIx_Info_list_start(); |
| 153 | + rc = PMIx_Info_list_add(grpinfo, PMIX_GROUP_ASSIGN_CONTEXT_ID, NULL, PMIX_BOOL); |
| 154 | + if (PMIX_SUCCESS != rc) { |
| 155 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Info_list_add failed: %s\n", |
| 156 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 157 | + goto done; |
| 158 | + } |
| 159 | + |
| 160 | + list = PMIx_Info_list_start(); |
| 161 | + lcid = 1234UL + (unsigned long) myproc.rank; |
| 162 | + rc = PMIx_Info_list_add(list, PMIX_GROUP_LOCAL_CID, &lcid, PMIX_SIZE); |
| 163 | + if (PMIX_SUCCESS != rc) { |
| 164 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Info_list_add failed: %s\n", |
| 165 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 166 | + goto done; |
| 167 | + } |
| 168 | + rc = PMIx_Info_list_convert(list, &darray); |
| 169 | + if (PMIX_SUCCESS != rc) { |
| 170 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Info_list_convert failed: %s\n", |
| 171 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 172 | + goto done; |
| 173 | + } |
| 174 | + rc = PMIx_Info_list_add(grpinfo, PMIX_GROUP_INFO, &darray, PMIX_DATA_ARRAY); |
| 175 | + if (PMIX_SUCCESS != rc) { |
| 176 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Info_list_add failed: %s\n", |
| 177 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 178 | + goto done; |
| 179 | + } |
| 180 | + PMIx_Info_list_release(list); |
| 181 | + PMIX_DATA_ARRAY_DESTRUCT(&darray); |
| 182 | + |
| 183 | + rc = PMIx_Info_list_convert(grpinfo, &darray); |
| 184 | + if (PMIX_SUCCESS != rc) { |
| 185 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Info_list_convert failed: %s\n", |
| 186 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 187 | + goto done; |
| 188 | + } |
| 189 | + info = (pmix_info_t*)darray.array; |
| 190 | + ninfo = darray.size; |
| 191 | + PMIx_Info_list_release(grpinfo); |
| 192 | + |
| 193 | + rc = PMIx_Group_construct("ourgroup", procs, nprocs, info, ninfo, &results, &nresults); |
| 194 | + PMIX_DATA_ARRAY_DESTRUCT(&darray); |
| 195 | + if (PMIX_SUCCESS != rc) { |
| 196 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Group_construct failed: %s\n", |
| 197 | + myproc.nspace, myproc.rank, PMIx_Error_string(rc)); |
| 198 | + goto done; |
| 199 | + } |
| 200 | + /* we should have a single results object */ |
| 201 | + if (NULL != results) { |
| 202 | + cid = 0; |
| 203 | + PMIX_VALUE_GET_NUMBER(rc, &results[0].value, cid, size_t); |
| 204 | + fprintf(stderr, "%d Group construct complete with status %s KEY %s CID %ld\n", |
| 205 | + myproc.rank, PMIx_Error_string(rc), results[0].key, cid); |
| 206 | + } else { |
| 207 | + fprintf(stderr, "%d Group construct complete, but no CID returned\n", myproc.rank); |
| 208 | + goto done; |
| 209 | + } |
| 210 | + PMIX_PROC_FREE(procs, nprocs); |
| 211 | + |
| 212 | + /* |
| 213 | + * destruct the group |
| 214 | + */ |
| 215 | + rc = PMIx_Group_destruct("ourgroup", NULL, 0); |
| 216 | + if (PMIX_SUCCESS != rc) { |
| 217 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Group_destruct failed: %s\n", myproc.nspace, |
| 218 | + myproc.rank, PMIx_Error_string(rc)); |
| 219 | + goto done; |
| 220 | + } |
| 221 | + |
| 222 | + PMIX_INFO_CONSTRUCT(&tinfo[0]); |
| 223 | + PMIX_INFO_LOAD(&tinfo[0], PMIX_GROUP_CONTEXT_ID, &cid, PMIX_SIZE); |
| 224 | + PMIX_INFO_CONSTRUCT(&tinfo[1]); |
| 225 | + PMIX_INFO_LOAD(&tinfo[1], PMIX_TIMEOUT, &get_timeout, PMIX_UINT32); |
| 226 | + |
| 227 | + for (n = 0; n < nprocs; n++) { |
| 228 | + proc.rank = n; |
| 229 | + if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_GROUP_LOCAL_CID, tinfo, 2, &val))) { |
| 230 | + fprintf(stderr, "Client ns %s rank %d: PMIx_Get of LOCAL CID for rank %d failed: %s\n", |
| 231 | + myproc.nspace, myproc.rank, n, PMIx_Error_string(rc)); |
| 232 | + continue; |
| 233 | + } |
| 234 | + if (PMIX_UINT64 != val->type) { |
| 235 | + fprintf(stderr, "%s:%d: PMIx_Get LOCAL CID for rank %d returned wrong type: %s\n", myproc.nspace, |
| 236 | + myproc.rank, n, PMIx_Data_type_string(val->type)); |
| 237 | + PMIX_VALUE_RELEASE(val); |
| 238 | + continue; |
| 239 | + } |
| 240 | + if ((1234UL + (unsigned long)n) != val->data.uint64) { |
| 241 | + fprintf(stderr, "%s:%d: PMIx_Get LOCAL CID for rank %d returned wrong value: %lu\n", |
| 242 | + myproc.nspace, myproc.rank, n, (unsigned long)val->data.uint64); |
| 243 | + PMIX_VALUE_RELEASE(val); |
| 244 | + continue; |
| 245 | + } |
| 246 | + PMIX_VALUE_RELEASE(val); |
| 247 | + } |
| 248 | + |
| 249 | +done: |
| 250 | + /* finalize us */ |
| 251 | + DEBUG_CONSTRUCT_LOCK(&lock); |
| 252 | + PMIx_Deregister_event_handler(1, op_callbk, &lock); |
| 253 | + DEBUG_WAIT_THREAD(&lock); |
| 254 | + DEBUG_DESTRUCT_LOCK(&lock); |
| 255 | + |
| 256 | + if (PMIX_SUCCESS != (rc = PMIx_Finalize(NULL, 0))) { |
| 257 | + fprintf(stderr, "Client ns %s rank %d:PMIx_Finalize failed: %s\n", myproc.nspace, |
| 258 | + myproc.rank, PMIx_Error_string(rc)); |
| 259 | + } |
| 260 | + fprintf(stderr, "%s:%d COMPLETE\n", myproc.nspace, myproc.rank); |
| 261 | + fflush(stderr); |
| 262 | + return (0); |
| 263 | +} |
| 264 | + |
0 commit comments