Import numpy library and create a random matrix

In [1]:
import numpy as np
n = 4
A = np.random.random((n,n))
A
Out[1]:
array([[ 0.98560831,  0.11823295,  0.72342078,  0.95922546],
       [ 0.20515565,  0.37738114,  0.38818925,  0.33459847],
       [ 0.78185553,  0.95349057,  0.74539072,  0.34848531],
       [ 0.68354569,  0.66207449,  0.56496249,  0.43039886]])

Create another matrix and multiply the two

In [2]:
B = np.random.random((n,n))
np.dot(A,B)
Out[2]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])

Import Cyclops Tensor Framework library and convert numpy matrices to CTF matrices

In [3]:
import ctf
tA = ctf.astensor(A)
tB = ctf.astensor(B)
ctf.dot(tA,tB)
Out[3]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])

Use CTF index-based notation to perform multiplication

In [4]:
tC = ctf.zeros((n,n))
tC.i("ij") << tA.i("ik")*tB.i("kj")
tC
Out[4]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])

The particular character 'i','j','k' don't matter, we can replace them with 'z','?','+'

In [5]:
tC = ctf.zeros((n,n))
tC.i("z?") << tA.i("z+")*tB.i("+?")
tC
Out[5]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])

numpy actually has similar functionality via einsum

In [6]:
np.einsum("ik,kj->ij",A,B)
Out[6]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])
In [7]:
ctf.einsum("ik,kj->ij",tA,tB)
Out[7]:
array([[ 1.37028366,  1.67135975,  1.38344438,  1.3174106 ],
       [ 0.69550475,  0.85317029,  0.51779038,  0.87487786],
       [ 1.66146624,  1.87982872,  1.3491488 ,  1.88574364],
       [ 1.33055926,  1.5305887 ,  1.10025433,  1.4429062 ]])

This notation can be used to contract tensor networks, for instance the tensor train (MPS)

In [10]:
k = 2 # rank
W = ctf.tensor([k,n,k])
V = ctf.tensor([k,n])
W.fill_random(-1.0,1.0)
V.fill_random(-1.0,1.0)
Z = ctf.tensor([n,n,n,n,n,n])
Z.i("ijklmn") << V.i("ai")*W.i("ajb")*W.i("bkc")*W.i("cld")*W.i("dme")*V.i("en")
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))
array([[ 0.07366687,  0.12803461],
       [ 0.06423109,  0.02295772],
       [ 0.05320817,  0.02112921]])

Using np.einsum the contractions look as follows

In [11]:
V2 = V.to_nparray()
W2 = W.to_nparray()
Z2 = np.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V2,W2,W2,W2,W2,V2)
print(Z2[1:3,0,2,1,0:3,1].reshape((3,2)))

#same possible with CTF
Z = ctf.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V,W,W,W,W,V)
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))
[[ 0.07366687  0.12803461]
 [ 0.06423109  0.02295772]
 [ 0.05320817  0.02112921]]
array([[ 0.07366687,  0.12803461],
       [ 0.06423109,  0.02295772],
       [ 0.05320817,  0.02112921]])

To contract together a CP decomposition, we need to use Hadamard products

In [13]:
U = ctf.tensor([k,n])
U.fill_random(-1.0,1.0)

Z.set_zero()

#note that the `a` index appears in multiple operands
Z.i("ijklmn") << U.i("ai")*U.i("aj")*U.i("ak")*U.i("al")*U.i("am")*U.i("an")
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))

U2 = U.to_nparray()
Z2 = np.einsum("ai,aj,ak,al,am,an->ijklmn",U2,U2,U2,U2,U2,U2)
print(Z2[1:3,0,2,1,0:3,1].reshape((3,2)))
array([[-0.06735958,  0.01278285],
       [-0.05052931,  0.02043648],
       [-0.05052931,  0.04237852]])
[[-0.06735958  0.01278285]
 [-0.05052931  0.02043648]
 [-0.05052931  0.04237852]]

Lets test the preformance of the MPS contractions for a different rank

In [16]:
import time
for k in np.arange(4)*2+2:
    t = time.time()
    W = ctf.tensor([k,n,k])
    V = ctf.tensor([k,n])
    W.fill_random(-1.0,1.0)
    V.fill_random(-1.0,1.0)
    Z = ctf.tensor([n,n,n,n,n,n])
    Z.i("ijklmn") << V.i("ai")*W.i("ajb")*W.i("bkc")*W.i("cld")*W.i("dme")*V.i("en")
    V2 = V.to_nparray()
    W2 = W.to_nparray()
    #time CTF, including all initialization and conversions
    print("ctf   k =",k,"took",time.time()-t,"seconds.")
    t2 = time.time()
    Z2 = np.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V2,W2,W2,W2,W2,V2)
    print("numpy k =",k,"took",time.time()-t2,"seconds.")
ctf   k = 2 took 0.028398990631103516 seconds.
numpy k = 2 took 0.012132644653320312 seconds.
ctf   k = 4 took 0.17014527320861816 seconds.
numpy k = 4 took 0.22895264625549316 seconds.
ctf   k = 6 took 0.88639235496521 seconds.
numpy k = 6 took 1.2816572189331055 seconds.
ctf   k = 8 took 4.034878253936768 seconds.
numpy k = 8 took 4.610719680786133 seconds.

Lets create a sparse tensor of total size 412

In [17]:
Z = ctf.tensor([n,n,n,n,n,n,n,n,n,n,n,n],sp=1)
Z.fill_sp_random(-1.,1.,.00001)
Z.read_local_nnz()
Out[17]:
(array([   22719,    93826,   119244,   267725,   487669,   660105,
          695677,   874440,   901502,   950291,  1007954,  1066395,
         1129878,  1143853,  1151885,  1220807,  1719826,  1763154,
         1789555,  1867359,  1973550,  2109998,  2130630,  2386222,
         2578764,  2696878,  2741576,  2843608,  3136093,  3228922,
         3596926,  3702374,  3710983,  3786229,  4054600,  4095468,
         4347769,  4359880,  4450764,  4461709,  4707221,  4863154,
         4870663,  4877028,  5190057,  5348657,  5416971,  5641826,
         5852037,  5919829,  5969084,  6000715,  6221829,  6442957,
         6605931,  6648639,  6760104,  6791844,  6920001,  7004972,
         7012191,  7088057,  7196409,  7229832,  7344236,  7432969,
         7564478,  7588891,  7738676,  7760252,  7999727,  8086936,
         8183463,  8208168,  8213808,  8220184,  8247167,  8387430,
         8420390,  8512627,  8705658,  8752247,  8873669,  8882706,
         8910018,  8989521,  9222430,  9287663,  9358465,  9524597,
         9659059,  9784230,  9845343,  9852590, 10012972, 10135988,
        10185155, 10388874, 10390673, 10414781, 10656508, 10835353,
        10887611, 10919483, 11084657, 11166668, 11198566, 11469205,
        11677888, 11809105, 11915094, 12156202, 12182735, 12316791,
        12369070, 12398402, 12582018, 12613426, 12617735, 12646964,
        12717177, 12829999, 12918714, 12984461, 13078439, 13083780,
        13118705, 13168316, 13223392, 13315052, 13380976, 13385728,
        13411032, 13533053, 13542486, 13699515, 13761475, 13768326,
        13925901, 13945787, 14092825, 14384865, 14387447, 14440351,
        14596777, 15240705, 15346351, 15439621, 15485513, 15499413,
        15524847, 15575624, 15644233, 15647797, 15698120, 15834954,
        15851639, 15920878, 15963911, 16337559, 16383041, 16402975,
        16430688, 16459050, 16485735, 16519772, 16670754, 16773130]),
 array([  2.41706157e-01,   2.85507392e-01,   9.45811642e-01,
         -6.29857654e-02,  -6.92117908e-01,  -3.61112264e-01,
         -2.45027253e-01,  -3.35055194e-02,   2.45278750e-01,
          4.37365737e-01,   4.71210465e-01,  -8.12253899e-01,
          1.99272988e-01,  -8.69470655e-01,   5.04959966e-01,
         -6.24239758e-01,  -1.18672754e-01,  -8.99437754e-01,
         -8.15395844e-01,   6.11418405e-03,   3.35063494e-01,
         -3.56088426e-01,  -4.41384487e-01,   7.40688938e-01,
          1.16661058e-01,   7.71723092e-01,  -2.08927677e-01,
          7.26091140e-01,   5.75076775e-01,  -2.09235954e-01,
         -5.28038780e-01,   7.65463486e-01,  -3.16505701e-01,
          7.36242614e-01,  -1.41295006e-01,  -6.45607734e-01,
         -4.85414158e-01,  -6.58832872e-01,   1.80866040e-01,
         -2.12375720e-01,  -8.12315466e-01,  -6.61687871e-01,
         -7.91374524e-02,  -1.90880098e-01,   1.07838061e-02,
          4.56079279e-01,  -6.95154160e-01,   5.51827027e-02,
          7.85025635e-02,  -6.41215730e-01,   1.80977893e-01,
         -3.03505976e-01,  -9.98693567e-01,   1.72304698e-01,
         -6.00374651e-01,   8.78012388e-01,  -2.46541714e-01,
          3.55897231e-01,  -5.13395838e-01,  -4.52045046e-02,
          2.12674844e-01,   4.78170465e-01,  -4.96938964e-01,
         -9.83421009e-02,   2.71229570e-01,   5.02987106e-01,
         -8.44587797e-01,  -4.71540398e-01,  -8.86649545e-01,
         -3.45946681e-01,   3.66275212e-01,  -8.27225395e-01,
         -3.56237615e-02,  -6.00036901e-01,   5.30091035e-04,
          3.60575560e-01,   7.67483185e-01,   7.94566994e-01,
          9.44212514e-01,  -6.35637298e-01,  -2.29659624e-01,
          2.19477468e-01,  -9.93226716e-01,  -5.75841422e-01,
         -8.49432233e-01,   9.49630764e-01,  -5.40079074e-01,
          4.47997121e-01,   2.73651359e-02,  -5.02473548e-01,
         -2.80452622e-01,  -7.09080529e-01,  -9.24967912e-01,
         -9.46692576e-01,  -1.35548546e-01,   2.58813320e-01,
         -6.92616336e-01,   5.06478188e-02,  -1.21707364e-02,
          4.96705213e-01,   6.91683478e-01,  -3.60499183e-01,
          5.30815706e-01,  -9.19494073e-01,   9.44431033e-01,
          9.87062384e-01,  -3.90290950e-01,  -8.49149940e-01,
          5.73370892e-01,   1.30951761e-01,   5.67025023e-01,
         -2.41052157e-01,  -9.84064998e-01,   6.52927370e-01,
          6.52429828e-01,  -5.77458946e-01,  -6.27856801e-01,
         -8.17120549e-01,   6.99533487e-01,  -6.06198963e-01,
          6.21661287e-01,   4.49496247e-01,   8.03890750e-01,
         -5.05434800e-01,  -4.54322835e-02,  -3.74197209e-01,
          7.40632646e-01,  -3.30008941e-01,  -4.51150160e-02,
         -2.53014712e-01,  -1.84984745e-01,  -6.66873096e-01,
         -9.03632733e-01,  -6.94942679e-01,  -9.32791010e-01,
          4.02814977e-01,  -3.69578128e-01,   1.68881875e-01,
         -2.48198963e-01,  -3.20909474e-01,  -1.97472906e-01,
          8.99067350e-01,  -3.10284321e-02,  -5.68772334e-01,
          1.05657231e-01,  -1.03456736e-02,  -1.14807806e-02,
         -1.03060939e-01,   3.49132817e-01,  -8.51822496e-02,
          1.76678233e-02,   2.85024126e-01,   2.96907155e-01,
          3.78841943e-01,  -4.55546632e-01,  -8.42670793e-01,
         -6.36168307e-02,   9.68319383e-01,  -3.58805647e-02,
          5.70181182e-01,   4.92576563e-01,  -9.60409180e-01,
          4.45420765e-01,  -1.10185185e-01,  -3.61614948e-01,
          5.49515626e-01,  -8.51610221e-01,  -3.18823302e-01]))
In [18]:
#create a random vector in a sparse representation
v = ctf.tensor([n],sp=1)
v.fill_sp_random(0.,1.,1.)

#create an order 12 sparse tensor
Z = ctf.tensor([n,n,n,n,n,n,n,n,n,n,n,n],sp=1)

#fill tensor so that .001% of entries are nonzero
Z.fill_sp_random(0.,1.,.00001)

#set diagonal to zero
Z.i("iiiiiiiiiiii") << 1. 

str12 = "1234567890ab"
for i in range(1,12)[::-1]:
    #normalize tensor
    Z.i(str12[0:i]).scl(1./ctf.sum(Z))
    #create tensor with one less dimension
    Z_new = ctf.tensor([n for j in range(i)],sp=1)
    #contract tensor over its last mode with a vector
    Z_new.i(str12[0:i]) << Z.i(str12[0:i+1])*v.i(str12[i])
    #replace old tensor with lower-dimensional one
    Z = Z_new

#read the nonzeros from Z stored on this processor
inds, vals = Z.read_local_nnz()
print(Z.ndim)
print(inds,vals)
1
[0 1 2 3] [ 0.00282648  0.02617537  0.54507062  0.00662197]
In [ ]:
 
In [ ]: