17   int rank, i, num_pes, pass;
    19   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    20   MPI_Comm_size(MPI_COMM_WORLD, &num_pes);
    23   int shapeN4[] = {
NS,
NS,
NS,NS};
    24   int sizeN4[] = {n,m,n,m};
    29   std::vector<double> vals;
    30   std::vector<int64_t> inds;
    32     World sw(MPI_COMM_SELF);
    39       for (i=0; i<n*m*n*m; i++){
    40         vals.push_back(drand48());
    56   assert(ns == n*n*m*m);
    62       if (fabs(vs[i]-vals[i])>1.E-10)
    70       printf(
"{ sum(ai)A[\"aiai\"]=sum(ai)mA[\"ai\"] } passed \n");
    73       printf(
"{ sum(ai)A[\"aiai\"]=sum(ai)mA[\"ai\"] } failed \n");
    85   char ** itr = std::find(begin, end, option);
    86   if (itr != end && ++itr != end){
    93 int main(
int argc, 
char ** argv){
    96   char ** input_str = argv;
    98   MPI_Init(&argc, &argv);
    99   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   100   MPI_Comm_size(MPI_COMM_WORLD, &np);
   103     n = atoi(
getCmdOption(input_str, input_str+in_num, 
"-n"));
   108     m = atoi(
getCmdOption(input_str, input_str+in_num, 
"-m"));
   115     World dw(argc, argv);
 
void read_all(int64_t *npair, dtype **data, bool unpack=false)
collects the entire tensor data on each process (not memory scalable) 
an instance of the CTF library (world) on a MPI communicator 
char * getCmdOption(char **begin, char **end, const std::string &option)
int main(int argc, char **argv)
void add_from_subworld(Tensor< dtype > *tsr, dtype alpha, dtype beta)
accumulates this tensor from a tensor object defined on a different world 
int readall_test(int n, int m, World &dw)
an instance of a tensor within a CTF world