Cyclops Tensor Framework
parallel arithmetic on multidimensional arrays
readwrite_test.cxx
Go to the documentation of this file.
1 /*Copyright (c) 2011, Edgar Solomonik, all rights reserved.*/
9 #include <ctf.hpp>
10 
11 using namespace CTF;
12 
13 
14 int readwrite_test(int n,
15  World &dw){
16  int rank, i, num_pes, pass;
17  double sum;
18 
19  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
20  MPI_Comm_size(MPI_COMM_WORLD, &num_pes);
21 
22  int shape_NS4[] = {NS,NS,NS,NS};
23  int shape_SY4[] = {SY,NS,SY,NS};
24  int shape_SH4[] = {SH,NS,SH,NS};
25  int shape_AS4[] = {SH,NS,SH,NS};
26  int sizeN4[] = {n,n,n,n};
27 
28  //* Creates distributed tensors initialized with zeros
29  Tensor<> A_NS(4, sizeN4, shape_NS4, dw);
30  Tensor<> A_SY(4, sizeN4, shape_SY4, dw);
31  Tensor<> A_SH(4, sizeN4, shape_SH4, dw);
32  Tensor<> A_AS(4, sizeN4, shape_AS4, dw);
33 
34  std::vector<int64_t> indices;
35  std::vector<double> vals;
36 
37  if (rank == 0){
38  for (i=0; i<n; i++){
39  // main diagonal
40  indices.push_back(i+i*n+i*n*n+i*n*n*n);
41  vals.push_back((double)(i+1));
42  }
43  }
44  pass = 1;
45 
46  A_NS[indices]+=vals;
47 
48  sum = A_NS.reduce(CTF::OP_SUM);
49 
50  if (fabs(sum-n*(n+1.)/2.)>1.E-10){
51  pass = 0;
52 #ifndef TEST_SUITE
53  if (rank == 0){
54  printf("Nonsymmetric diagonal write failed!\n");
55  }
56 #endif
57  }
58 
59  A_SY[indices]+=vals;
60 
61  sum = A_SY.reduce(CTF::OP_SUM);
62 
63 
64  if (fabs(sum-n*(n+1.)/2.)>1.E-10){
65  pass = 0;
66 #ifndef TEST_SUITE
67  if (rank == 0){
68  printf("Symmetric diagonal write failed!, err - %lf\n",sum-n*(n+1.)/2.);
69  }
70 #endif
71  }
72 
73 
74  A_AS[indices]+=vals;
75 
76  sum = A_AS.reduce(CTF::OP_SUM);
77 
78  if (sum != 0.0){
79  pass = 0;
80 #ifndef TEST_SUITE
81  if (rank == 0){
82  printf("Asymmetric diagonal write failed!\n");
83  }
84 #endif
85  }
86 
87  A_SH[indices]+=vals;
88 
89  sum = A_SH.reduce(CTF::OP_SUM);
90 
91  if (sum != 0.0){
92  pass = 0;
93 #ifndef TEST_SUITE
94  if (rank == 0){
95  printf("Symmetric-hollow diagonal write failed!\n");
96  }
97 #endif
98  }
99 
100  for (i=0; i<(int)vals.size(); i++){
101  vals[i] = sqrt(vals[i]);
102  }
103  A_SY[indices]=vals;
104 
105  sum = 0.0;
106 
107  A_NS["ijkl"]=0.0;
108  A_NS["ijkl"]=A_SY["ijkl"];
109  sum += A_NS["ijkl"]*A_NS["ijkl"];
110 
111  if (fabs(sum-n*(n+1.)/2.)>1.E-10){
112  pass = 0;
113 #ifndef TEST_SUITE
114  if (rank == 0){
115  printf("Nonsymmetric self contraction failed!, err = %lf\n",sum-n*(n+1.)/2.);
116  }
117 #endif
118  }
119 
120  Tensor<> B_SY(4, sizeN4, shape_SY4, dw);
121 
122 /* B_SY["ijkl"]=A_SY["ijkl"];
123  sum = A_SY["ijkl"]*B_SY["ijkl"];*/
124  sum = A_SY["ijkl"]*A_SY["ijkl"];
125 
126  if (fabs(sum-n*(n+1.)/2.)>1.E-10){
127  pass = 0;
128 #ifndef TEST_SUITE
129  if (rank == 0){
130  printf("Symmetric self contraction failed!, err = %lf\n",sum-n*(n+1.)/2.);
131  }
132 #endif
133  }
134 
135  if (rank == 0){
136  MPI_Reduce(MPI_IN_PLACE, &pass, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
137  if (pass){
138  printf("{ diagonal write test } passed\n");
139  } else {
140  printf("{ diagonal write test } failed\n");
141  }
142  } else
143  MPI_Reduce(&pass, MPI_IN_PLACE, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
144 
145  return pass;
146 }
147 
148 
149 #ifndef TEST_SUITE
150 
151 char* getCmdOption(char ** begin,
152  char ** end,
153  const std::string & option){
154  char ** itr = std::find(begin, end, option);
155  if (itr != end && ++itr != end){
156  return *itr;
157  }
158  return 0;
159 }
160 
161 
162 int main(int argc, char ** argv){
163  int rank, np, n;
164  int in_num = argc;
165  char ** input_str = argv;
166 
167  MPI_Init(&argc, &argv);
168  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
169  MPI_Comm_size(MPI_COMM_WORLD, &np);
170 
171  if (getCmdOption(input_str, input_str+in_num, "-n")){
172  n = atoi(getCmdOption(input_str, input_str+in_num, "-n"));
173  if (n < 0) n = 7;
174  } else n = 7;
175 
176 
177  {
178  World dw(MPI_COMM_WORLD, argc, argv);
179 
180  if (rank == 0) printf("Testing reading and writing functions in CTF\n");
181  readwrite_test(n, dw);
182  }
183 
184 
185  MPI_Finalize();
186  return 0;
187 }
192 #endif
def sum(tensor, init_A, axis=None, dtype=None, out=None, keepdims=None)
Definition: core.pyx:4261
def rank(self)
Definition: core.pyx:312
dtype reduce(OP op)
performs a reduction on the tensor
Definition: tensor.cxx:731
Definition: common.h:37
an instance of the CTF library (world) on a MPI communicator
Definition: world.h:19
string
Definition: core.pyx:456
int readwrite_test(int n, World &dw)
int main(int argc, char **argv)
char * getCmdOption(char **begin, char **end, const std::string &option)
Definition: apsp.cxx:17
an instance of a tensor within a CTF world
Definition: tensor.h:74
Definition: common.h:37
Definition: common.h:37
def np(self)
Definition: core.pyx:315