Cyclops Tensor Framework
parallel arithmetic on multidimensional arrays
ccsdt_t3_to_t2.cxx
Go to the documentation of this file.
1 
9 #include <ctf.hpp>
10 
11 using namespace CTF;
12 
13 int ccsdt_t3_to_t2(int n,
14  int m,
15  World & dw){
16 
17  int rank, num_pes;
18 
19  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
20  MPI_Comm_size(MPI_COMM_WORLD, &num_pes);
21 
22 #if DEBUG >= 1
23  if (rank == 0)
24  printf("n = %d\n", n);
25 #endif
26 
27  int shapeAS4[] = {AS,NS,AS,NS};
28  int shapeAS6[] = {AS,AS,NS,AS,AS,NS};
29  int shapeHS6[] = {AS,AS,NS,NS,NS,NS};
30  int shapeTS6[] = {AS,NS,NS,NS,NS,NS};
31  int shapeTS4[] = {AS,NS,NS,NS};
32  int shapeNS4[] = {NS,NS,NS,NS};
33  //int shapeNS6[] = {NS,NS,NS,NS,NS,NS};
34  int nnnm[] = {n,n,n,m};
35  int mmnn[] = {m,m,n,n};
36  int mmmnnn[] = {m,m,m,n,n,n};
37 
38  //* Creates distributed tensors initialized with zeros
39  Tensor<> AS_A(4, nnnm, shapeTS4, dw, "AS_A", 1);
40  Tensor<> AS_B(6, mmmnnn, shapeAS6, dw, "AS_B", 1);
41  Tensor<> HS_B(6, mmmnnn, shapeHS6, dw);
42  Tensor<> AS_C(4, mmnn, shapeAS4, dw, "AS_C", 1);
43  Tensor<> NS_A(4, nnnm, shapeNS4, dw, "NS_A", 1);
44  Tensor<> NS_B(6, mmmnnn, shapeTS6, dw, "NS_B", 1);
45  Tensor<> NS_C(4, mmnn, shapeTS4, dw);
46 
47 #if DEBUG >= 1
48  if (rank == 0)
49  printf("tensor creation succeed\n");
50 #endif
51 
52  //* Writes noise to local data based on global index
53  srand48(2013);
54  AS_A.fill_random(0.,1.);
55  AS_B.fill_random(0.,1.);
56  AS_C.fill_random(0.,1.);
57 
58 
59  NS_A["abij"] = AS_A["abij"];
60  NS_B["abcijk"] = AS_B["abcijk"];
61 /* printf("norm of NS_B is %lf of AS_B is %lf, should be same\n",
62  NS_B.reduce(OP_SQN<>RM2), AS_B.reduce(OP_SQN<>RM2));*/
63  NS_C["abij"] += AS_C["abij"];
64  /*printf("norm of NS_C is %lf of AS_C is %lf, should be same\n",
65  NS_C.reduce(OP_SQN<>RM2), AS_C.reduce(OP_SQN<>RM2));*/
66 #if 0
67  NS_A["abij"] -= AS_A["baij"];
68  NS_A["abij"] += AS_A["abij"];
69 
70  HS_B["abcijk"] -= AS_B["abcjik"];
71  HS_B["abcijk"] -= AS_B["abckji"];
72  HS_B["abcijk"] -= AS_B["abcikj"];
73  HS_B["abcijk"] += AS_B["abcijk"];
74  HS_B["abcijk"] += AS_B["abckij"];
75  HS_B["abcijk"] += AS_B["abcjki"];
76 
77  NS_B["ijkabc"] += HS_B["ijkabc"];
78  NS_B["ikjabc"] -= HS_B["ijkabc"];
79  NS_B["kjiabc"] -= HS_B["ijkabc"];
80  NS_C["abij"] += AS_C["abij"];
81 #endif
82 
83  /*printf("norm of AS_A %lf\n", AS_A.norm2());
84  printf("norm of NS_A %lf\n", NS_A.norm2());
85  printf("norm of AS_B %lf\n", AS_B.norm2());
86  printf("norm of NS_B %lf\n", NS_B.norm2());
87  printf("norm of AS_C %lf\n", AS_C.norm2());
88  printf("norm of NS_C %lf\n", NS_C.norm2());*/
89 
90  AS_C["abij"] += 0.5*AS_A["mnje"]*AS_B["abeimn"];
91 
92  NS_C["abij"] += 0.5*NS_A["mnje"]*NS_B["abeimn"];
93 
94  NS_C["abji"] -= 0.5*NS_A["mnje"]*NS_B["abeimn"];
95 
96  double nrm_AS, nrm_NS;
97 
98 
99  int pass = 1;
100 
101 
102  nrm_AS = sqrt((double)(AS_C["ijkl"]*AS_C["ijkl"]));
103  nrm_NS = sqrt((double)(NS_C["ijkl"]*NS_C["ijkl"]));
104 #if DEBUG >= 1
105  if (rank == 0) printf("triangular norm of AS_C = %lf NS_C = %lf\n", nrm_AS, nrm_NS);
106 #endif
107  double cnrm_AS = AS_C.norm2();
108  double cnrm_NS = NS_C.norm2();
109  if (fabs(nrm_AS-cnrm_AS) >= 1.E-6) {
110  printf("ERROR: AS norm not working!\n");
111  pass = 0;
112  }
113  if (fabs(nrm_NS-cnrm_NS) >= 1.E-6) {
114  printf("ERROR: NS norm not working!\n");
115  pass = 0;
116  }
117 #if DEBUG >= 1
118  if (rank == 0) printf("norm of AS_C = %lf NS_C = %lf\n", nrm_AS, nrm_NS);
119 #endif
120  NS_C["abij"] -= AS_C["abij"];
121 #if 0
122  NS_C["abij"] += AS_C["abji"];
123 #endif
124 
125 
126 
127  double nrm = NS_C.norm2();
128 #if DEBUG >= 1
129  if (rank == 0){
130  printf("norm of NS_C after contraction should be zero, is = %lf\n", nrm);
131  }
132 #endif
133  if (fabs(nrm) > 1.E-6) pass = 0;
134 
135  if (rank == 0){
136  MPI_Reduce(MPI_IN_PLACE, &pass, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
137  if (pass)
138  printf("{ AS_C[\"abij\"] += 0.5*AS_A[\"mnje\"]*AS_B[\"abeimn\"] } passed\n");
139  else
140  printf("{ AS_C[\"abij\"] += 0.5*AS_A[\"mnje\"]*AS_B[\"abeimn\"] } failed\n");
141  } else
142  MPI_Reduce(&pass, MPI_IN_PLACE, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
143 
144  return pass;
145 }
146 
147 #ifndef TEST_SUITE
148 
149 char* getCmdOption(char ** begin,
150  char ** end,
151  const std::string & option){
152  char ** itr = std::find(begin, end, option);
153  if (itr != end && ++itr != end){
154  return *itr;
155  }
156  return 0;
157 }
158 
159 
160 int main(int argc, char ** argv){
161  int rank, np, niter, n, m;
162  int in_num = argc;
163  char ** input_str = argv;
164 
165  MPI_Init(&argc, &argv);
166  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
167  MPI_Comm_size(MPI_COMM_WORLD, &np);
168 
169  if (getCmdOption(input_str, input_str+in_num, "-n")){
170  n = atoi(getCmdOption(input_str, input_str+in_num, "-n"));
171  if (n < 0) n = 6;
172  } else n = 6;
173  if (getCmdOption(input_str, input_str+in_num, "-m")){
174  m = atoi(getCmdOption(input_str, input_str+in_num, "-m"));
175  if (m < 0) m = 7;
176  } else m = 7;
177 
178  if (getCmdOption(input_str, input_str+in_num, "-niter")){
179  niter = atoi(getCmdOption(input_str, input_str+in_num, "-niter"));
180  if (niter < 0) niter = 3;
181  } else niter = 3;
182 
183 
184 
185  {
186  World dw(argc, argv);
187  int pass = ccsdt_t3_to_t2(n, m, dw);
188  assert(pass);
189  }
190 
191 
192  MPI_Finalize();
193  return 0;
194 }
200 #endif
201 
char * getCmdOption(char **begin, char **end, const std::string &option)
def rank(self)
Definition: core.pyx:312
Definition: common.h:37
an instance of the CTF library (world) on a MPI communicator
Definition: world.h:19
string
Definition: core.pyx:456
void fill_random(dtype rmin, dtype rmax)
fills local unique tensor elements to random values in the range [min,max] works only for dtype in {f...
Definition: tensor.cxx:928
int ccsdt_t3_to_t2(int n, int m, World &dw)
int main(int argc, char **argv)
Definition: apsp.cxx:17
an instance of a tensor within a CTF world
Definition: tensor.h:74
Definition: common.h:37
def np(self)
Definition: core.pyx:315