13 double f2(
double a,
double b){
20 int shapeN4[] = {
NS,
NS,
NS,NS};
21 int sizeN4[] = {n+1,n,n+2,n+3};
31 double * all_start_data_A;
33 A.
read_all(&nall_A, &all_start_data_A);
34 double * all_start_data_B;
36 B.
read_all(&nall_B, &all_start_data_B);
39 .5*A[
"ijkl"]+=bfun(A[
"ijkl"],B[
"ijkl"]);
41 double * all_end_data_A;
43 A.
read_all(&nall2_A, &all_end_data_A);
45 int pass = (nall_A == nall2_A);
47 for (int64_t i=0; i<nall_A; i++){
48 if (fabs(.5*all_start_data_A[i]+
f2(all_start_data_A[i],all_start_data_B[i])-all_end_data_A[i])>=1.E-6) pass =0;
51 MPI_Allreduce(MPI_IN_PLACE, &pass, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
55 printf(
"{ A[\"ijkl\"] = f2(A[\"ijkl\"], B[\"ijkl\"]) } passed\n");
57 printf(
"{ A[\"ijkl\"] = f2(A[\"ijkl\"], B[\"ijkl\"]) } failed\n");
61 delete [] all_start_data_A;
62 delete [] all_end_data_A;
63 delete [] all_start_data_B;
74 char ** itr = std::find(begin, end, option);
75 if (itr != end && ++itr != end){
82 int main(
int argc,
char ** argv){
84 int const in_num = argc;
85 char ** input_str = argv;
87 MPI_Init(&argc, &argv);
88 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
89 MPI_Comm_size(MPI_COMM_WORLD, &np);
92 n = atoi(
getCmdOption(input_str, input_str+in_num,
"-n"));
98 World dw(MPI_COMM_WORLD, argc, argv);
101 printf(
"Computing bivar_function A_ijkl = f(B_ijkl, A_ijkl)\n");
int main(int argc, char **argv)
int bivar_function(int n, World &dw)
void read_all(int64_t *npair, dtype **data, bool unpack=false)
collects the entire tensor data on each process (not memory scalable)
an instance of the CTF library (world) on a MPI communicator
double f2(double a, double b)
char * getCmdOption(char **begin, char **end, const std::string &option)
void fill_random(dtype rmin, dtype rmax)
fills local unique tensor elements to random values in the range [min,max] works only for dtype in {f...
int rank
rank of local processor
an instance of a tensor within a CTF world