1 #ifndef __INT_SYM_SEQ_SUM_H__ 2 #define __INT_SYM_SEQ_SUM_H__ 12 algstrct
const * sr_A,
14 int const * edge_len_A,
16 int const * idx_map_A,
19 algstrct
const * sr_B,
21 int const * edge_len_B,
23 int const * idx_map_B);
30 algstrct
const * sr_A,
32 int const * edge_len_A,
34 int const * idx_map_A,
37 algstrct
const * sr_B,
39 int const * edge_len_B,
41 int const * idx_map_B,
49 algstrct
const * sr_A,
51 int const * edge_len_A,
53 int const * idx_map_A,
56 algstrct
const * sr_B,
58 int const * edge_len_B,
60 int const * idx_map_B,
int sym_seq_sum_ref(char const *alpha, char const *A, algstrct const *sr_A, int order_A, int const *edge_len_A, int const *sym_A, int const *idx_map_A, char const *beta, char *B, algstrct const *sr_B, int order_B, int const *edge_len_B, int const *sym_B, int const *idx_map_B)
performs symmetric contraction with unblocked reference kernel
int univar_function(int n, World &dw)
int sym_seq_sum_cust(char const *alpha, char const *A, algstrct const *sr_A, int order_A, int const *edge_len_A, int const *sym_A, int const *idx_map_A, char const *beta, char *B, algstrct const *sr_B, int order_B, int const *edge_len_B, int const *sym_B, int const *idx_map_B, univar_function const *func)
performs symmetric summation with custom elementwise function
int sym_seq_sum_inr(char const *alpha, char const *A, algstrct const *sr_A, int order_A, int const *edge_len_A, int const *sym_A, int const *idx_map_A, char const *beta, char *B, algstrct const *sr_B, int order_B, int const *edge_len_B, int const *sym_B, int const *idx_map_B, int inr_stride)
performs symmetric summation with blocked daxpy