-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathmpi-omp.c
338 lines (314 loc) · 9.8 KB
/
mpi-omp.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
/*
* Copyright (c) 2013-2019 Triad National Security, LLC
* All rights reserved.
*
* This file is part of the libquo project. See the LICENSE file at the
* top-level directory of this distribution.
*/
/**
* a simple demo code that quiesces all the MPI processes on the node where
* MPI_COMM_WORLD rank 0 resides with a QUO_barrier and expands the cpuset of
* rank 0 to accommodate OpenMP threading.
*/
#include "quo.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <unistd.h>
#include "mpi.h"
typedef struct context_t {
/* my rank */
int rank;
/* number of ranks in MPI_COMM_WORLD */
int nranks;
/* number of nodes in our job */
int nnodes;
/* number of ranks that share this node with me (includes myself) */
int nnoderanks;
/* my node rank */
int noderank;
/* whether or not mpi is initialized */
bool mpi_inited;
/* number of sockets on the node */
int nsockets;
/* number of cores on the node */
int ncores;
/* number of pus (processing units - e.g hw threads) */
int npus;
/* quo major version */
int qv;
/* quo minor version */
int qsv;
/* pointer to initial stringification of our cpuset */
char *cbindstr;
/* flag indicating whether or not we are initially bound */
int bound;
/* our quo context (the thing that gets passed around all over the place).
* filler words that make this comment line look mo better... */
QUO_context quo;
} context_t;
/**
* rudimentary "pretty print" routine. not needed in real life...
*/
static inline void
demo_emit_sync(const context_t *c)
{
MPI_Barrier(MPI_COMM_WORLD);
usleep((c->rank) * 1000);
}
static int
fini(context_t *c)
{
if (!c) return 1;
int nerrs = 0;
if (QUO_SUCCESS != QUO_free(c->quo)) nerrs++;
/* finalize mpi AFTER QUO_destruct - we may mpi in our destruct */
if (c->mpi_inited) MPI_Finalize();
if (c->cbindstr) free(c->cbindstr);
free(c);
return (nerrs ? 1 : 0);
}
/**
* i'm being really sloppy here. ideally, one should probably save the rc and
* then display or do some other cool thing with it. don't be like this code. if
* QUO_construct or QUO_init fail, then someone using this could just continue
* without the awesomeness that is libquo. they cleanup after themselves, so
* things *should* be in an okay state after an early failure. the failures may
* be part of a larger problem, however. */
static int
init(context_t **c)
{
context_t *newc = NULL;
/* alloc our context */
if (NULL == (newc = calloc(1, sizeof(*newc)))) goto err;
/* libquo requires that MPI be initialized before its init is called */
if (MPI_SUCCESS != MPI_Init(NULL, NULL)) goto err;
/* gather some basic job info from our mpi lib */
if (MPI_SUCCESS != MPI_Comm_size(MPI_COMM_WORLD, &(newc->nranks))) goto err;
/* ...and more */
if (MPI_SUCCESS != MPI_Comm_rank(MPI_COMM_WORLD, &(newc->rank))) goto err;
/* can be called at any point -- even before init and construct. */
if (QUO_SUCCESS != QUO_version(&(newc->qv), &(newc->qsv))) goto err;
/* relatively expensive call. you only really want to do this once at the
* beginning of time and pass the context all over the place within your
* code. */
if (QUO_SUCCESS != QUO_create(&newc->quo, MPI_COMM_WORLD)) goto err;
newc->mpi_inited = true;
*c = newc;
return 0;
err:
(void)fini(newc);
return 1;
}
/**
* gather system and job info from libquo.
*/
static int
sys_grok(context_t *c)
{
char *bad_func = NULL;
/* this interface is more powerful, but the other n* calls can be more
* convenient. at any rate, this is an example of the
* QUO_nobjs_in_type_by_type interface to get the number of sockets on
* the machine. note: you can also use the QUO_nsockets or
* QUO_nobjs_by_type to get the same info. */
if (QUO_SUCCESS != QUO_nobjs_in_type_by_type(c->quo,
QUO_OBJ_MACHINE,
0,
QUO_OBJ_SOCKET,
&c->nsockets)) {
bad_func = "QUO_nobjs_in_type_by_type";
goto out;
}
if (QUO_SUCCESS != QUO_ncores(c->quo, &c->ncores)) {
bad_func = "QUO_ncores";
goto out;
}
if (QUO_SUCCESS != QUO_npus(c->quo, &c->npus)) {
bad_func = "QUO_npus";
goto out;
}
if (QUO_SUCCESS != QUO_bound(c->quo, &c->bound)) {
bad_func = "QUO_bound";
goto out;
}
if (QUO_SUCCESS != QUO_stringify_cbind(c->quo, &c->cbindstr)) {
bad_func = "QUO_stringify_cbind";
goto out;
}
if (QUO_SUCCESS != QUO_nnodes(c->quo, &c->nnodes)) {
bad_func = "QUO_nnodes";
goto out;
}
if (QUO_SUCCESS != QUO_nqids(c->quo, &c->nnoderanks)) {
bad_func = "QUO_nqids";
goto out;
}
if (QUO_SUCCESS != QUO_id(c->quo, &c->noderank)) {
bad_func = "QUO_id";
goto out;
}
out:
if (bad_func) {
fprintf(stderr, "%s: %s failure :-(\n", __func__, bad_func);
return 1;
}
return 0;
}
static int
emit_bind_state(const context_t *c)
{
char *cbindstr = NULL, *bad_func = NULL;
int bound = 0;
demo_emit_sync(c);
if (QUO_SUCCESS != QUO_stringify_cbind(c->quo, &cbindstr)) {
bad_func = "QUO_stringify_cbind";
goto out;
}
if (QUO_SUCCESS != QUO_bound(c->quo, &bound)) {
bad_func = "QUO_bound";
goto out;
}
printf("### process %d rank %d [%s] bound: %s\n",
(int)getpid(), c->rank, cbindstr, bound ? "true" : "false");
fflush(stdout);
out:
demo_emit_sync(c);
if (cbindstr) free(cbindstr);
if (bad_func) {
fprintf(stderr, "%s: %s failure :-(\n", __func__, bad_func);
return 1;
}
return 0;
}
static int
emit_node_basics(const context_t *c)
{
/* one proc per node will emit this info */
if (0 == c->noderank) {
printf("### quo version: %d.%d ###\n", c->qv, c->qsv);
printf("### nnodes: %d\n", c->nnodes);
printf("### nnoderanks: %d\n", c->nnoderanks);
printf("### nsockets: %d\n", c->nsockets);
printf("### ncores: %d\n", c->ncores);
printf("### npus: %d\n", c->npus);
fflush(stdout);
}
demo_emit_sync(c);
return 0;
}
/**
* expands the caller's cpuset to all available resources on the node.
*/
static int
bindup_node(const context_t *c)
{
/* if you are going to change bindings often, then cache this */
if (QUO_SUCCESS != QUO_bind_push(c->quo, QUO_BIND_PUSH_OBJ,
QUO_OBJ_MACHINE, -1)) {
return 1;
}
return 0;
}
/**
* revert pushed binding policy to the previous policy.
*/
static int
binddown_node(const context_t *c)
{
if (QUO_SUCCESS != QUO_bind_pop(c->quo)) {
return 1;
}
return 0;
}
int
do_omp_things(const context_t *c)
{
char *cbindstr = NULL;
printf("rank %d about to do OpenMP things!\n", c->rank);
if (QUO_SUCCESS != QUO_stringify_cbind(c->quo, &cbindstr)) {
return 1;
}
printf("rank %d's cpuset: %s\n", c->rank, cbindstr);
free(cbindstr);
printf("rank %d is now threading up life in OMP land...\n", c->rank);
sleep(2); /* do real work here... */
printf("rank %d is now done threading up life in OMP land...\n", c->rank);
return 0;
}
int
enter_omp_region(const context_t *c)
{
/* FIXME - assumes that ranks are assigned by filling in a node at a
* time. Easy to create a new, more general version, but this is a demo
* code, so why are you doing weird mapping things anyway!?!
*/
bool on_rank_0s_node = c->rank < c->nnoderanks;
if (on_rank_0s_node) {
if (0 == c->rank) {
fprintf(stdout, "getting ready for OMP region...\n");
/* change policy before the OMP calculation */
if (bindup_node(c)) return 1;
/* do the calculation */
if (do_omp_things(c)) return 1;
/* revert to old binding policy */
if (binddown_node(c)) return 1;
}
/* everyone else wait for rank 0 completion. QUO_barrier because it's
* cheaper than MPI_Barrier on a node. */
if (QUO_SUCCESS != QUO_barrier(c->quo)) {
return 1;
}
if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) return 1;
}
/* not on rank 0's node, so wait in MPI barrier */
else {
if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) return 1;
}
return 0;
}
int
main(void)
{
int erc = EXIT_SUCCESS;
char *bad_func = NULL;
context_t *context = NULL;
setbuf(stdout, NULL);
/* ////////////////////////////////////////////////////////////////////// */
/* init code */
/* ////////////////////////////////////////////////////////////////////// */
if (init(&context)) {
bad_func = "init";
goto out;
}
/* ////////////////////////////////////////////////////////////////////// */
/* libquo is now ready for service */
/* ////////////////////////////////////////////////////////////////////// */
/* first gather some info so we can base our decisions on our current
* situation. */
if (sys_grok(context)) {
bad_func = "sys_grok";
goto out;
}
if (emit_node_basics(context)) {
bad_func = "emit_node_basics";
goto out;
}
if (emit_bind_state(context)) {
bad_func = "emit_bind_state";
goto out;
}
/* change binding policies to accommodate OMP threads on node 0 */
if (enter_omp_region(context)) {
bad_func = "enter_omp_region";
goto out;
}
out:
if (NULL != bad_func) {
fprintf(stderr, "XXX %s failure in: %s\n", __FILE__, bad_func);
erc = EXIT_FAILURE;
}
(void)fini(context);
return erc;
}