16   int rank, i, num_pes, pass;
    19   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    20   MPI_Comm_size(MPI_COMM_WORLD, &num_pes);
    22   int shape_NS4[] = {
NS,
NS,
NS,NS};
    23   int shape_SY4[] = {
SY,
NS,
SY,NS};
    24   int shape_SH4[] = {
SH,
NS,
SH,NS};
    25   int shape_AS4[] = {
SH,
NS,
SH,NS};
    26   int sizeN4[] = {n,n,n,n};
    29   Tensor<> A_NS(4, sizeN4, shape_NS4, dw);
    30   Tensor<> A_SY(4, sizeN4, shape_SY4, dw);
    31   Tensor<> A_SH(4, sizeN4, shape_SH4, dw);
    32   Tensor<> A_AS(4, sizeN4, shape_AS4, dw);
    34   std::vector<int64_t> indices;
    35   std::vector<double> vals;
    40       indices.push_back(i+i*n+i*n*n+i*n*n*n);
    41       vals.push_back((
double)(i+1));
    50   if (fabs(sum-n*(n+1.)/2.)>1.E-10){
    54       printf(
"Nonsymmetric diagonal write failed!\n");
    64   if (fabs(sum-n*(n+1.)/2.)>1.E-10){
    68       printf(
"Symmetric diagonal write failed!, err - %lf\n",sum-n*(n+1.)/2.);
    82       printf(
"Asymmetric diagonal write failed!\n");
    95       printf(
"Symmetric-hollow diagonal write failed!\n");
   100   for (i=0; i<(int)vals.size(); i++){
   101     vals[i] = sqrt(vals[i]);
   108   A_NS[
"ijkl"]=A_SY[
"ijkl"];
   109   sum += A_NS[
"ijkl"]*A_NS[
"ijkl"];
   111   if (fabs(sum-n*(n+1.)/2.)>1.E-10){
   115       printf(
"Nonsymmetric self contraction failed!, err = %lf\n",sum-n*(n+1.)/2.);
   120   Tensor<> B_SY(4, sizeN4, shape_SY4, dw);
   124   sum = A_SY[
"ijkl"]*A_SY[
"ijkl"];
   126   if (fabs(sum-n*(n+1.)/2.)>1.E-10){
   130       printf(
"Symmetric self contraction failed!, err = %lf\n",sum-n*(n+1.)/2.);
   136     MPI_Reduce(MPI_IN_PLACE, &pass, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
   138       printf(
"{ diagonal write test } passed\n");
   140       printf(
"{ diagonal write test } failed\n");
   143     MPI_Reduce(&pass, MPI_IN_PLACE, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
   154   char ** itr = std::find(begin, end, option);
   155   if (itr != end && ++itr != end){
   162 int main(
int argc, 
char ** argv){
   165   char ** input_str = argv;
   167   MPI_Init(&argc, &argv);
   168   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   169   MPI_Comm_size(MPI_COMM_WORLD, &np);
   172     n = atoi(
getCmdOption(input_str, input_str+in_num, 
"-n"));
   178     World dw(MPI_COMM_WORLD, argc, argv);
   180     if (rank == 0) printf(
"Testing reading and writing functions in CTF\n");
 
def sum(tensor, init_A, axis=None, dtype=None, out=None, keepdims=None)
dtype reduce(OP op)
performs a reduction on the tensor 
an instance of the CTF library (world) on a MPI communicator 
int readwrite_test(int n, World &dw)
int main(int argc, char **argv)
char * getCmdOption(char **begin, char **end, const std::string &option)
an instance of a tensor within a CTF world