Cyclops Tensor Framework
parallel arithmetic on multidimensional arrays
common.cxx
Go to the documentation of this file.
1 /*Copyright (c) 2011, Edgar Solomonik, all rights reserved.*/
2 
3 #include "common.h"
4 #include "../shared/util.h"
5 #include <random>
6 
7 #ifdef USE_MPI_CPP
8 #define MPI_CXX_DOUBLE_COMPLEX MPI::DOUBLE_COMPLEX
9 #endif
10 
11 namespace CTF {
12  int DGTOG_SWITCH = 1;
13 }
14 
15 namespace CTF_int {
16  std::mersenne_twister_engine<std::uint_fast64_t, 64, 312, 156, 31,
17  0xb5026f5aa96619e9, 29,
18  0x5555555555555555, 17,
19  0x71d67fffeda60000, 37,
20  0xfff7eee000000000, 43, 6364136223846793005> rng;
21 
22 
23  void init_rng(int rank){
24  rng.seed(rank);
25  }
26 
27  double get_rand48(){
28  return ((double)rng()-(double)rng.min())/rng.max();
29  }
30 
31 
32 
33  //static double init_mdl[] = {COST_LATENCY, COST_LATENCY, COST_NETWBW};
36 
37 #ifdef BGQ
38  //static double init_lg_mdl[] = {COST_LATENCY, COST_LATENCY, 0.0, COST_NETWBW + 2.0*COST_MEMBW};
39 #else
40  //static double init_lg_mdl[] = {COST_LATENCY, COST_LATENCY, COST_NETWBW + 2.0*COST_MEMBW, 0.0};
41 #endif
42  LinModel<3> red_mdl(red_mdl_init,"red_mdl");
47 
48 
49  template <typename type>
50  int conv_idx(int order,
51  type const * cidx,
52  int ** iidx){
53  int i, j, n;
54  type c;
55 
56  *iidx = (int*)CTF_int::alloc(sizeof(int)*order);
57 
58  n = 0;
59  for (i=0; i<order; i++){
60  c = cidx[i];
61  for (j=0; j<i; j++){
62  if (c == cidx[j]){
63  (*iidx)[i] = (*iidx)[j];
64  break;
65  }
66  }
67  if (j==i){
68  (*iidx)[i] = n;
69  n++;
70  }
71  }
72  return n;
73  }
74 
75  template <typename type>
76  int conv_idx(int order_A,
77  type const * cidx_A,
78  int ** iidx_A,
79  int order_B,
80  type const * cidx_B,
81  int ** iidx_B){
82  int i, j, n;
83  type c;
84 
85  *iidx_B = (int*)CTF_int::alloc(sizeof(int)*order_B);
86 
87  n = conv_idx(order_A, cidx_A, iidx_A);
88  for (i=0; i<order_B; i++){
89  c = cidx_B[i];
90  for (j=0; j<order_A; j++){
91  if (c == cidx_A[j]){
92  (*iidx_B)[i] = (*iidx_A)[j];
93  break;
94  }
95  }
96  if (j==order_A){
97  for (j=0; j<i; j++){
98  if (c == cidx_B[j]){
99  (*iidx_B)[i] = (*iidx_B)[j];
100  break;
101  }
102  }
103  if (j==i){
104  (*iidx_B)[i] = n;
105  n++;
106  }
107  }
108  }
109  return n;
110  }
111 
112 
113  template <typename type>
114  int conv_idx(int order_A,
115  type const * cidx_A,
116  int ** iidx_A,
117  int order_B,
118  type const * cidx_B,
119  int ** iidx_B,
120  int order_C,
121  type const * cidx_C,
122  int ** iidx_C){
123  int i, j, n;
124  type c;
125 
126  *iidx_C = (int*)CTF_int::alloc(sizeof(int)*order_C);
127 
128  n = conv_idx(order_A, cidx_A, iidx_A,
129  order_B, cidx_B, iidx_B);
130 
131  for (i=0; i<order_C; i++){
132  c = cidx_C[i];
133  for (j=0; j<order_B; j++){
134  if (c == cidx_B[j]){
135  (*iidx_C)[i] = (*iidx_B)[j];
136  break;
137  }
138  }
139  if (j==order_B){
140  for (j=0; j<order_A; j++){
141  if (c == cidx_A[j]){
142  (*iidx_C)[i] = (*iidx_A)[j];
143  break;
144  }
145  }
146  if (j==order_A){
147  for (j=0; j<i; j++){
148  if (c == cidx_C[j]){
149  (*iidx_C)[i] = (*iidx_C)[j];
150  break;
151  }
152  }
153  if (j==i){
154  (*iidx_C)[i] = n;
155  n++;
156  }
157  }
158  }
159  }
160  return n;
161  }
162 
163  template int conv_idx<int>(int, int const *, int **);
164  template int conv_idx<char>(int, char const *, int **);
165  template int conv_idx<int>(int, int const *, int **, int, int const *, int **);
166  template int conv_idx<char>(int, char const *, int **, int, char const *, int **);
167  template int conv_idx<int>(int, int const *, int **, int, int const *, int **, int, int const *, int **);
168  template int conv_idx<char>(int, char const *, int **, int, char const *, int **, int, char const *, int **);
169 
170 
171  int64_t total_flop_count = 0;
172 
173  void flops_add(int64_t n){
174  total_flop_count+=n;
175  }
176 
177  int64_t get_flops(){
178  return total_flop_count;
179  }
180 
181  void handler() {
182  #if (!BGP && !BGQ && !HOPPER)
183  int i, size;
184  int rank;
185  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
186  if (rank == 0){
187  void *array[51];
188 
189  // get void*'s for all entries on the stack
190  size = backtrace(array, 50);
191 
192  // print out all the frames to stderr
193  backtrace_symbols(array, size);
194  char syscom[2048*size];
195  for (i=1; i<size; ++i)
196  {
197  char buf[2048];
198  char buf2[2048];
199  int bufsize = 2048;
200  int sz = readlink("/proc/self/exe", buf, bufsize);
201  buf[sz] = '\0';
202  sprintf(buf2,"addr2line %p -e %s", array[i], buf);
203  if (i==1)
204  strcpy(syscom,buf2);
205  else
206  strcat(syscom,buf2);
207 
208  }
209  assert(system(syscom)==0);
210  }
211  int *iiarr = NULL;
212  iiarr[0]++;
213  printf("%d",iiarr[0]);
214  #endif
215  }
216 
217  CommData::CommData(){
218  alive = 0;
219  created = 0;
220  }
221 
222  CommData::~CommData(){
223  deactivate();
224  }
225 
226  CommData::CommData(CommData const & other){
227  cm = other.cm;
228  alive = other.alive;
229  rank = other.rank;
230  np = other.np;
231  color = other.color;
232  created = 0;
233  }
234 
235  CommData& CommData::operator=(CommData const & other){
236  cm = other.cm;
237  alive = other.alive;
238  rank = other.rank;
239  np = other.np;
240  color = other.color;
241  created = 0;
242  return *this;
243  }
244 
245 
246  CommData::CommData(MPI_Comm cm_){
247  cm = cm_;
248  MPI_Comm_rank(cm, &rank);
249  MPI_Comm_size(cm, &np);
250  alive = 1;
251  created = 0;
252  }
253 
254  CommData::CommData(int rank_, int color_, int np_){
255  rank = rank_;
256  color = color_;
257  np = np_;
258  alive = 0;
259  created = 0;
260  }
261 
262  CommData::CommData(int rank_, int color_, CommData parent){
263  rank = rank_;
264  color = color_;
265  ASSERT(parent.alive);
266  MPI_Comm_split(parent.cm, color, rank_, &cm);
267  MPI_Comm_size(cm, &np);
268  alive = 1;
269  created = 1;
270  }
271 
272  void CommData::activate(MPI_Comm parent){
273  if (!alive){
274  alive = 1;
275  created = 1;
276  MPI_Comm_split(parent, color, rank, &cm);
277  int np_;
278  MPI_Comm_size(cm, &np_);
279  ASSERT(np_ == np);
280  }
281  }
282 
283  void CommData::deactivate(){
284  if (alive){
285  alive = 0;
286  if (created){
287  int is_finalized;
288  MPI_Finalized(&is_finalized);
289  if (!is_finalized) MPI_Comm_free(&cm);
290  }
291  created = 0;
292  }
293  }
294 
295  double CommData::estimate_bcast_time(int64_t msg_sz){
296  double ps[] = {1.0, log2((double)np), (double)msg_sz};
297  return bcast_mdl.est_time(ps);
298  }
299 
300  double CommData::estimate_allred_time(int64_t msg_sz, MPI_Op op){
301  double ps[] = {1.0, log2((double)np), (double)msg_sz*log2((double)(np))};
302  if (op >= MPI_MAX && op <= MPI_REPLACE)
303  return allred_mdl.est_time(ps);
304  else
305  return allred_mdl_cst.est_time(ps);
306  }
307 
308  double CommData::estimate_red_time(int64_t msg_sz, MPI_Op op){
309  double ps[] = {1.0, log2((double)np), (double)msg_sz*log2((double)(np))};
310  if (op >= MPI_MAX && op <= MPI_REPLACE)
311  return red_mdl.est_time(ps);
312  else
313  return red_mdl_cst.est_time(ps);
314  }
315 /*
316  double CommData::estimate_csrred_time(int64_t msg_sz, MPI_Op op){
317  double ps[] = {1.0, log2((double)np), (double)msg_sz};
318  if (op >= MPI_MAX && op <= MPI_REPLACE)
319  return csrred_mdl.est_time(ps);
320  else
321  return csrred_mdl_cst.est_time(ps);
322  }*/
323 
324 
325  double CommData::estimate_alltoall_time(int64_t chunk_sz) {
326  double ps[] = {1.0, log2((double)np), log2((double)np)*np*chunk_sz};
327  return alltoall_mdl.est_time(ps);
328  }
329 
330  double CommData::estimate_alltoallv_time(int64_t tot_sz) {
331  double ps[] = {1.0, log2((double)np), log2((double)np)*tot_sz};
332  return alltoallv_mdl.est_time(ps);
333  }
334 
335 
336  void CommData::bcast(void * buf, int64_t count, MPI_Datatype mdtype, int root){
337 #ifdef TUNE
338  MPI_Barrier(cm);
339 
340  int tsize_;
341  MPI_Type_size(mdtype, &tsize_);
342  double tps_[] = {0.0, 1.0, log2(np), ((double)count)*tsize_};
343  if (!bcast_mdl.should_observe(tps_)) return;
344 #endif
345 
346 #ifdef TUNE
347  double st_time = MPI_Wtime();
348 #endif
349  MPI_Bcast(buf, count, mdtype, root, cm);
350 #ifdef TUNE
351  MPI_Barrier(cm);
352  double exe_time = MPI_Wtime()-st_time;
353  int tsize;
354  MPI_Type_size(mdtype, &tsize);
355  double tps[] = {exe_time, 1.0, log2(np), ((double)count)*tsize};
356  bcast_mdl.observe(tps);
357 #endif
358  }
359 
360  void CommData::allred(void * inbuf, void * outbuf, int64_t count, MPI_Datatype mdtype, MPI_Op op){
361 #ifdef TUNE
362  MPI_Barrier(cm);
363 #endif
364 
365 #ifdef TUNE
366  int tsize_;
367  MPI_Type_size(mdtype, &tsize_);
368  double tps_[] = {0.0, 1.0, log2(np), ((double)count)*tsize_*std::max(.5,(double)log2(np))};
369  bool bsr = true;
370  if (op >= MPI_MAX && op <= MPI_REPLACE)
371  bsr = allred_mdl.should_observe(tps_);
372  else
373  bsr = allred_mdl_cst.should_observe(tps_);
374  if(!bsr) return;
375 #endif
376 
377  double st_time = MPI_Wtime();
378  MPI_Allreduce(inbuf, outbuf, count, mdtype, op, cm);
379 #ifdef TUNE
380  MPI_Barrier(cm);
381 #endif
382  double exe_time = MPI_Wtime()-st_time;
383  int tsize;
384  MPI_Type_size(mdtype, &tsize);
385  double tps[] = {exe_time, 1.0, log2(np), ((double)count)*tsize*std::max(.5,(double)log2(np))};
386  if (op >= MPI_MAX && op <= MPI_REPLACE)
387  allred_mdl.observe(tps);
388  else
389  allred_mdl_cst.observe(tps);
390  }
391 
392  void CommData::red(void * inbuf, void * outbuf, int64_t count, MPI_Datatype mdtype, MPI_Op op, int root){
393 #ifdef TUNE
394  MPI_Barrier(cm);
395 
396  // change-of-observe
397  int tsize_;
398  MPI_Type_size(mdtype, &tsize_);
399  double tps_[] = {0.0, 1.0, log2(np), ((double)count)*tsize_*std::max(.5,(double)log2(np))};
400  bool bsr = true;
401  if (op >= MPI_MAX && op <= MPI_REPLACE)
402  bsr = red_mdl.should_observe(tps_);
403  else
404  bsr = red_mdl_cst.should_observe(tps_);
405  if(!bsr) return;
406 #endif
407 
408  double st_time = MPI_Wtime();
409  MPI_Reduce(inbuf, outbuf, count, mdtype, op, root, cm);
410 #ifdef TUNE
411  MPI_Barrier(cm);
412 #endif
413  double exe_time = MPI_Wtime()-st_time;
414  int tsize;
415  MPI_Type_size(mdtype, &tsize);
416  double tps[] = {exe_time, 1.0, log2(np), ((double)count)*tsize*std::max(.5,(double)log2(np))};
417  if (op >= MPI_MAX && op <= MPI_REPLACE)
418  red_mdl.observe(tps);
419  else
420  red_mdl_cst.observe(tps);
421  }
422 
423 
424  void CommData::all_to_allv(void * send_buffer,
425  int64_t const * send_counts,
426  int64_t const * send_displs,
427  int64_t datum_size,
428  void * recv_buffer,
429  int64_t const * recv_counts,
430  int64_t const * recv_displs){
431 
432  #ifdef TUNE
433  MPI_Barrier(cm);
434  // change-of-observe
435  int64_t tot_sz_ = std::max(send_displs[np-1]+send_counts[np-1], recv_displs[np-1]+recv_counts[np-1])*datum_size;
436  double tps_[] = {0.0, 1.0, log2(np), (double)tot_sz_};
437  if (!alltoallv_mdl.should_observe(tps_)) return;
438  #endif
439 
440  double st_time = MPI_Wtime();
441  int num_nnz_trgt = 0;
442  int num_nnz_recv = 0;
443  for (int p=0; p<np; p++){
444  if (send_counts[p] != 0) num_nnz_trgt++;
445  if (recv_counts[p] != 0) num_nnz_recv++;
446  }
447  double frac_nnz = ((double)num_nnz_trgt)/np;
448  double tot_frac_nnz;
449  MPI_Allreduce(&frac_nnz, &tot_frac_nnz, 1, MPI_DOUBLE, MPI_SUM, cm);
450  tot_frac_nnz = tot_frac_nnz / np;
451 
452  int64_t max_displs = std::max(recv_displs[np-1], send_displs[np-1]);
453  int64_t tot_max_displs;
454 
455  MPI_Allreduce(&max_displs, &tot_max_displs, 1, MPI_INT64_T, MPI_MAX, cm);
456 
457  if (tot_max_displs >= INT32_MAX ||
458  (datum_size != 4 && datum_size != 8 && datum_size != 16) ||
459  (tot_frac_nnz <= .25 && tot_frac_nnz*np < 100)){
460  MPI_Datatype mdt;
461  MPI_Type_contiguous(datum_size, MPI_CHAR, &mdt);
462  MPI_Type_commit(&mdt);
463  MPI_Request reqs[num_nnz_recv+num_nnz_trgt];
464  MPI_Status stat[num_nnz_recv+num_nnz_trgt];
465  int nnr = 0;
466  for (int p=0; p<np; p++){
467  if (recv_counts[p] != 0){
468  MPI_Irecv(((char*)recv_buffer)+recv_displs[p]*datum_size,
469  recv_counts[p],
470  mdt, p, p, cm, reqs+nnr);
471  nnr++;
472  }
473  }
474  int nns = 0;
475  for (int lp=0; lp<np; lp++){
476  int p = (lp+rank)%np;
477  if (send_counts[p] != 0){
478  MPI_Isend(((char*)send_buffer)+send_displs[p]*datum_size,
479  send_counts[p],
480  mdt, p, rank, cm, reqs+nnr+nns);
481  nns++;
482  }
483  }
484  MPI_Waitall(num_nnz_recv+num_nnz_trgt, reqs, stat);
485  MPI_Type_free(&mdt);
486  } else {
487  int * i32_send_counts, * i32_send_displs;
488  int * i32_recv_counts, * i32_recv_displs;
489 
490 
491  CTF_int::mst_alloc_ptr(np*sizeof(int), (void**)&i32_send_counts);
492  CTF_int::mst_alloc_ptr(np*sizeof(int), (void**)&i32_send_displs);
493  CTF_int::mst_alloc_ptr(np*sizeof(int), (void**)&i32_recv_counts);
494  CTF_int::mst_alloc_ptr(np*sizeof(int), (void**)&i32_recv_displs);
495 
496  for (int p=0; p<np; p++){
497  i32_send_counts[p] = send_counts[p];
498  i32_send_displs[p] = send_displs[p];
499  i32_recv_counts[p] = recv_counts[p];
500  i32_recv_displs[p] = recv_displs[p];
501  }
502  switch (datum_size){
503  case 4:
504  MPI_Alltoallv(send_buffer, i32_send_counts, i32_send_displs, MPI_FLOAT,
505  recv_buffer, i32_recv_counts, i32_recv_displs, MPI_FLOAT, cm);
506  break;
507  case 8:
508  MPI_Alltoallv(send_buffer, i32_send_counts, i32_send_displs, MPI_DOUBLE,
509  recv_buffer, i32_recv_counts, i32_recv_displs, MPI_DOUBLE, cm);
510  break;
511  case 16:
512  MPI_Alltoallv(send_buffer, i32_send_counts, i32_send_displs, MPI_CXX_DOUBLE_COMPLEX,
513  recv_buffer, i32_recv_counts, i32_recv_displs, MPI_CXX_DOUBLE_COMPLEX, cm);
514  break;
515  default:
516  ABORT;
517  break;
518  }
519  CTF_int::cdealloc(i32_send_counts);
520  CTF_int::cdealloc(i32_send_displs);
521  CTF_int::cdealloc(i32_recv_counts);
522  CTF_int::cdealloc(i32_recv_displs);
523  }
524 #ifdef TUNE
525  MPI_Barrier(cm);
526 #endif
527  double exe_time = MPI_Wtime()-st_time;
528  int64_t tot_sz = std::max(send_displs[np-1]+send_counts[np-1], recv_displs[np-1]+recv_counts[np-1])*datum_size;
529  double tps[] = {exe_time, 1.0, log2(np), (double)tot_sz};
530  alltoallv_mdl.observe(tps);
531  }
532 
533  void cvrt_idx(int order,
534  int const * lens,
535  int64_t idx,
536  int * idx_arr){
537  int i;
538  int64_t cidx = idx;
539  for (i=0; i<order; i++){
540  idx_arr[i] = cidx%lens[i];
541  cidx = cidx/lens[i];
542  }
543  }
544 
545  void cvrt_idx(int order,
546  int const * lens,
547  int64_t idx,
548  int ** idx_arr){
549  (*idx_arr) = (int*)CTF_int::alloc(order*sizeof(int));
550  cvrt_idx(order, lens, idx, *idx_arr);
551  }
552 
553  void cvrt_idx(int order,
554  int const * lens,
555  int const * idx_arr,
556  int64_t * idx){
557  int i;
558  int64_t lda = 1;
559  *idx = 0;
560  for (i=0; i<order; i++){
561  (*idx) += idx_arr[i]*lda;
562  lda *= lens[i];
563  }
564  }
565 /*
566 #define USE_CUST_DBL_CMPLX 0
567 
568 #if USE_CUST_DBL_CMPLX
569  MPI_Datatype MPI_DBL_CMPLX;
570  bool dbl_cmplx_type_created = 0;
571 #else
572  MPI_Datatype MPI_DBL_CMPLX = MPI_CXX_DOUBLE_COMPLEX;
573  bool dbl_cmplx_type_created = 1;
574 #endif
575 
576  MPI_Datatype get_dbl_cmplx_type(){
577  if (dbl_cmplx_type_created){
578  MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_DBL_CMPLX);
579  MPI_Type_commit(&MPI_DBL_CMPLX);
580  MPI_DBL_CMPLX = dt;
581  }
582  return MPI_DBL_CMPLX;
583  }*/
584 
585  extern MPI_Datatype MPI_CTF_DOUBLE_COMPLEX;
586 
587  bool get_mpi_dt(int64_t count, int64_t datum_size, MPI_Datatype & dt){
588  ASSERT(count <= INT_MAX);
589  bool is_new = false;
590  switch (datum_size){
591  case 1:
592  dt = MPI_CHAR;
593  break;
594  case 4:
595  dt = MPI_INT;
596  break;
597  case 8:
598  dt = MPI_DOUBLE;
599  break;
600  case 16:
602  break;
603  default:
604  MPI_Type_contiguous(datum_size, MPI_CHAR, &dt);
605  MPI_Type_commit(&dt);
606  is_new = true;
607  break;
608  }
609  return is_new;
610  }
611 
612 }
int64_t total_flop_count
Definition: common.cxx:171
double red_mdl_init[]
Definition: init_models.cxx:32
template int conv_idx< int >(int, int const *, int **, int, int const *, int **, int, int const *, int **)
double bcast_mdl_init[]
Definition: init_models.cxx:36
double allred_mdl_cst_init[]
Definition: init_models.cxx:35
bool get_mpi_dt(int64_t count, int64_t datum_size, MPI_Datatype &dt)
gives a datatype for arbitrary datum_size, errors if exceeding 32-bits
Definition: common.cxx:587
double est_time(double const *param)
estimates model time based on observarions
Definition: model.cxx:530
void observe(double const *time_param)
records observation consisting of execution time and nparam paramter values
Definition: model.cxx:168
double alltoallv_mdl_init[]
Definition: init_models.cxx:31
def rank(self)
Definition: core.pyx:312
double red_mdl_cst_init[]
Definition: init_models.cxx:33
def array(A, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Definition: core.pyx:3079
double get_rand48()
returns new random number in [0,1)
Definition: common.cxx:27
#define ASSERT(...)
Definition: util.h:88
void * alloc(int64_t len)
alloc abstraction
Definition: memcontrol.cxx:365
int64_t get_flops()
Definition: common.cxx:177
LinModel< 3 > bcast_mdl(bcast_mdl_init,"bcast_mdl")
void init_rng(int rank)
initialized random number generator
Definition: common.cxx:23
LinModel< 3 > alltoallv_mdl(alltoallv_mdl_init,"alltoallv_mdl")
int conv_idx(int order_A, type const *cidx_A, int **iidx_A, int order_B, type const *cidx_B, int **iidx_B, int order_C, type const *cidx_C, int **iidx_C)
Definition: common.cxx:114
void flops_add(int64_t n)
Definition: common.cxx:173
template int conv_idx< char >(int, char const *, int **, int, char const *, int **, int, char const *, int **)
int mst_alloc_ptr(int64_t len, void **const ptr)
mst_alloc abstraction
Definition: memcontrol.cxx:269
LinModel< 3 > allred_mdl_cst(allred_mdl_cst_init,"allred_mdl_cst")
double alltoall_mdl_init[]
Definition: init_models.cxx:30
double allred_mdl_init[]
Definition: init_models.cxx:34
Linear performance models, which given measurements, provides new model guess.
Definition: model.h:32
LinModel< 3 > red_mdl(red_mdl_init,"red_mdl")
LinModel< 3 > alltoall_mdl(alltoall_mdl_init,"alltoall_mdl")
MPI_Comm cm
Definition: common.h:129
bool should_observe(double const *time_param)
decides whether the current instance should be observed
Definition: model.cxx:215
std::mersenne_twister_engine< std::uint_fast64_t, 64, 312, 156, 31, 0xb5026f5aa96619e9, 29, 0x5555555555555555, 17, 0x71d67fffeda60000, 37, 0xfff7eee000000000, 43, 6364136223846793005 > rng
Definition: common.cxx:20
LinModel< 3 > allred_mdl(allred_mdl_init,"allred_mdl")
void handler()
Definition: common.cxx:181
void cvrt_idx(int order, int const *lens, int const *idx_arr, int64_t *idx)
Definition: common.cxx:553
int cdealloc(void *ptr)
free abstraction
Definition: memcontrol.cxx:480
LinModel< 3 > red_mdl_cst(red_mdl_cst_init,"red_mdl_cst")
Definition: apsp.cxx:17
int DGTOG_SWITCH
Definition: common.cxx:12
#define ABORT
Definition: util.h:162
def np(self)
Definition: core.pyx:315
MPI_Datatype MPI_CTF_DOUBLE_COMPLEX
Definition: set.cxx:15