Cyclops Tensor Framework
parallel arithmetic on multidimensional arrays
readall_test.cxx
Go to the documentation of this file.
1 /*Copyright (c) 2011, Edgar Solomonik, all rights reserved.*/
2 
10 #include <ctf.hpp>
11 
12 using namespace CTF;
13 
14 int readall_test(int n,
15  int m,
16  World &dw){
17  int rank, i, num_pes, pass;
18 
19  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
20  MPI_Comm_size(MPI_COMM_WORLD, &num_pes);
21 
22 
23  int shapeN4[] = {NS,NS,NS,NS};
24  int sizeN4[] = {n,m,n,m};
25 
26  //* Creates distributed tensors initialized with zeros
27  Tensor<> A(4, sizeN4, shapeN4, dw);
28 
29  std::vector<double> vals;
30  std::vector<int64_t> inds;
31  if (rank == 0){
32  World sw(MPI_COMM_SELF);
33 
34  Tensor<> sA(4, sizeN4, shapeN4, sw);
35 
36 
37  if (rank == 0){
38  srand48(13*rank);
39  for (i=0; i<n*m*n*m; i++){
40  vals.push_back(drand48());
41  inds.push_back(i);
42  }
43  }
44 
45  sA[inds] = vals;
46 
47  A.add_from_subworld(&sA);
48  } else
49  A.add_from_subworld(NULL);
50 
51  double * vs;
52  int64_t ns;
53 
54  A.read_all(&ns, &vs);
55 
56  assert(ns == n*n*m*m);
57 
58 
59  pass = 1;
60  if (rank == 0){
61  for (i=0; i<ns; i++){
62  if (fabs(vs[i]-vals[i])>1.E-10)
63  pass = 0;
64  }
65  }
66  delete [] vs;
67 
68  if (pass){
69  if (rank == 0)
70  printf("{ sum(ai)A[\"aiai\"]=sum(ai)mA[\"ai\"] } passed \n");
71  } else {
72  if (rank == 0)
73  printf("{ sum(ai)A[\"aiai\"]=sum(ai)mA[\"ai\"] } failed \n");
74  }
75 
76 
77  return pass;
78 }
79 
80 
81 #ifndef TEST_SUITE
82 char* getCmdOption(char ** begin,
83  char ** end,
84  const std::string & option){
85  char ** itr = std::find(begin, end, option);
86  if (itr != end && ++itr != end){
87  return *itr;
88  }
89  return 0;
90 }
91 
92 
93 int main(int argc, char ** argv){
94  int rank, np, n, m;
95  int in_num = argc;
96  char ** input_str = argv;
97 
98  MPI_Init(&argc, &argv);
99  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
100  MPI_Comm_size(MPI_COMM_WORLD, &np);
101 
102  if (getCmdOption(input_str, input_str+in_num, "-n")){
103  n = atoi(getCmdOption(input_str, input_str+in_num, "-n"));
104  if (n < 0) n = 7;
105  } else n = 7;
106 
107  if (getCmdOption(input_str, input_str+in_num, "-m")){
108  m = atoi(getCmdOption(input_str, input_str+in_num, "-m"));
109  if (m < 0) m = 9;
110  } else m = 9;
111 
112 
113 
114  {
115  World dw(argc, argv);
116  readall_test(n, m, dw);
117  }
118 
119  MPI_Finalize();
120  return 0;
121 }
122 #endif
123 
def rank(self)
Definition: core.pyx:312
void read_all(int64_t *npair, dtype **data, bool unpack=false)
collects the entire tensor data on each process (not memory scalable)
Definition: tensor.cxx:377
Definition: common.h:37
an instance of the CTF library (world) on a MPI communicator
Definition: world.h:19
string
Definition: core.pyx:456
char * getCmdOption(char **begin, char **end, const std::string &option)
int main(int argc, char **argv)
void add_from_subworld(Tensor< dtype > *tsr, dtype alpha, dtype beta)
accumulates this tensor from a tensor object defined on a different world
Definition: tensor.cxx:560
int readall_test(int n, int m, World &dw)
Definition: apsp.cxx:17
an instance of a tensor within a CTF world
Definition: tensor.h:74
def np(self)
Definition: core.pyx:315