| /isa-l/include/ |
| H A D | erasure_code.h | 117 ec_encode_data_base(int len, int srcs, int dests, unsigned char *v, unsigned char **src, 178 gf_vect_dot_prod_base(int len, int vlen, unsigned char *gftbls, unsigned char **src, 202 gf_vect_dot_prod(int len, int vlen, unsigned char *gftbls, unsigned char **src, 229 gf_vect_mad(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 239 gf_vect_mad_base(int len, int vec, int vec_i, unsigned char *v, unsigned char *src, 327 gf_vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls, unsigned char **src, 349 gf_vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 371 gf_vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls, unsigned char **src, 394 gf_2vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls, unsigned char **src, 417 gf_2vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls, unsigned char **src, [all …]
|
| H A D | gf_vect_mul.h | 67 gf_vect_mul_sse(int len, unsigned char *gftbl, void *src, void *dest); 88 gf_vect_mul_avx(int len, unsigned char *gftbl, void *src, void *dest); 113 gf_vect_mul(int len, unsigned char *gftbl, void *src, void *dest); 147 gf_vect_mul_base(int len, unsigned char *a, unsigned char *src, unsigned char *dest);
|
| /isa-l/raid/ |
| H A D | raid_base.c | 48 unsigned long **src = (unsigned long **) array; in pq_gen_base() local 55 q = p = src[vects - 3][i]; in pq_gen_base() 58 p ^= s = src[j][i]; in pq_gen_base() 64 src[vects - 2][i] = p; // second to last pointer is p in pq_gen_base() 65 src[vects - 1][i] = q; // last pointer is q in pq_gen_base() 75 unsigned char **src = (unsigned char **) array; in pq_check_base() local 81 q = p = src[vects - 3][i]; in pq_check_base() 84 s = src[j][i]; in pq_check_base() 91 if (src[vects - 2][i] != p) // second to last pointer is p in pq_check_base() 93 if (src[vects - 1][i] != q) // last pointer is q in pq_check_base() [all …]
|
| /isa-l/crc/ |
| H A D | crc16_t10dif_copy_test.c | 55 crc16_t10dif_copy_ref(uint16_t seed, uint8_t *dst, uint8_t *src, uint64_t len); 77 crc_copy_check(const char *description, u8 *dst, u8 *src, u8 dst_fill_val, int len, int tot) in crc_copy_check() argument 88 u16 crc_dut = crc16_t10dif_copy(seed, dst, src, len); in crc_copy_check() 89 u16 crc_ref = crc16_t10dif(seed, src, len); in crc_copy_check() 94 } else if (memcmp(dst, src, len)) { in crc_copy_check() 102 crc_dut = crc16_t10dif_copy_ref(seed, dst, src, len); in crc_copy_check() 103 crc_ref = crc16_t10dif_ref(seed, src, len); in crc_copy_check() 108 } else if (memcmp(dst, src, len)) { in crc_copy_check() 125 u8 *src, *dst; in main() local 139 src = src_raw; in main() [all …]
|
| H A D | crc16_t10dif_copy_perf.c | 60 void *src, *dst; in main() local 66 if (posix_memalign(&src, 1024, TEST_LEN)) { in main() 78 memset(src, 0, TEST_LEN); in main() 79 BENCHMARK(&start, BENCHMARK_TIME, crc = crc16_t10dif_copy(TEST_SEED, dst, src, TEST_LEN)); in main()
|
| /isa-l/erasure_code/ppc64le/ |
| H A D | ec_base_vsx.c | 5 gf_vect_dot_prod(int len, int vlen, unsigned char *v, unsigned char **src, unsigned char *dest) in gf_vect_dot_prod() argument 7 gf_vect_dot_prod_vsx(len, vlen, v, src, dest); in gf_vect_dot_prod() 11 gf_vect_mad(int len, int vec, int vec_i, unsigned char *v, unsigned char *src, unsigned char *dest) in gf_vect_mad() argument 13 gf_vect_mad_vsx(len, vec, vec_i, v, src, dest); in gf_vect_mad() 17 ec_encode_data(int len, int srcs, int dests, unsigned char *v, unsigned char **src, in ec_encode_data() argument 21 ec_encode_data_base(len, srcs, dests, v, src, dest); in ec_encode_data() 26 gf_6vect_dot_prod_vsx(len, srcs, v, src, dest); in ec_encode_data() 33 gf_6vect_dot_prod_vsx(len, srcs, v, src, dest); in ec_encode_data() 36 gf_5vect_dot_prod_vsx(len, srcs, v, src, dest); in ec_encode_data() 39 gf_4vect_dot_prod_vsx(len, srcs, v, src, dest); in ec_encode_data() [all …]
|
| H A D | ec_base_vsx.h | 69 gf_vect_mul_vsx(int len, unsigned char *gftbls, unsigned char *src, unsigned char *dest); 90 gf_vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 113 gf_2vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 136 gf_3vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 159 gf_4vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 182 gf_5vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 205 gf_6vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, 230 gf_vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 254 gf_2vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 279 gf_3vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, [all …]
|
| H A D | gf_6vect_dot_prod_vsx.c | 4 gf_6vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, in gf_6vect_dot_prod_vsx() argument 16 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *) dest[0]); in gf_6vect_dot_prod_vsx() 17 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *) dest[1]); in gf_6vect_dot_prod_vsx() 18 gf_vect_mul_vsx(len, &gftbls[2 * 32 * vlen], src[0], (unsigned char *) dest[2]); in gf_6vect_dot_prod_vsx() 19 gf_vect_mul_vsx(len, &gftbls[3 * 32 * vlen], src[0], (unsigned char *) dest[3]); in gf_6vect_dot_prod_vsx() 20 gf_vect_mul_vsx(len, &gftbls[4 * 32 * vlen], src[0], (unsigned char *) dest[4]); in gf_6vect_dot_prod_vsx() 21 gf_vect_mul_vsx(len, &gftbls[5 * 32 * vlen], src[0], (unsigned char *) dest[5]); in gf_6vect_dot_prod_vsx() 24 gf_6vect_mad_vsx(len, vlen, j, gftbls, src[j], dest); in gf_6vect_dot_prod_vsx() 38 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0); in gf_6vect_dot_prod_vsx() 39 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1); in gf_6vect_dot_prod_vsx() [all …]
|
| H A D | gf_5vect_dot_prod_vsx.c | 4 gf_5vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, in gf_5vect_dot_prod_vsx() argument 15 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *) dest[0]); in gf_5vect_dot_prod_vsx() 16 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *) dest[1]); in gf_5vect_dot_prod_vsx() 17 gf_vect_mul_vsx(len, &gftbls[2 * 32 * vlen], src[0], (unsigned char *) dest[2]); in gf_5vect_dot_prod_vsx() 18 gf_vect_mul_vsx(len, &gftbls[3 * 32 * vlen], src[0], (unsigned char *) dest[3]); in gf_5vect_dot_prod_vsx() 19 gf_vect_mul_vsx(len, &gftbls[4 * 32 * vlen], src[0], (unsigned char *) dest[4]); in gf_5vect_dot_prod_vsx() 22 gf_5vect_mad_vsx(len, vlen, j, gftbls, src[j], dest); in gf_5vect_dot_prod_vsx() 35 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0); in gf_5vect_dot_prod_vsx() 36 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1); in gf_5vect_dot_prod_vsx() 37 gf_vect_dot_prod_base(head, vlen, &gftbls[2 * 32 * vlen], src, t2); in gf_5vect_dot_prod_vsx() [all …]
|
| H A D | gf_4vect_dot_prod_vsx.c | 4 gf_4vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, in gf_4vect_dot_prod_vsx() argument 15 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *) dest[0]); in gf_4vect_dot_prod_vsx() 16 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *) dest[1]); in gf_4vect_dot_prod_vsx() 17 gf_vect_mul_vsx(len, &gftbls[2 * 32 * vlen], src[0], (unsigned char *) dest[2]); in gf_4vect_dot_prod_vsx() 18 gf_vect_mul_vsx(len, &gftbls[3 * 32 * vlen], src[0], (unsigned char *) dest[3]); in gf_4vect_dot_prod_vsx() 21 gf_4vect_mad_vsx(len, vlen, j, gftbls, src[j], dest); in gf_4vect_dot_prod_vsx() 33 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0); in gf_4vect_dot_prod_vsx() 34 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1); in gf_4vect_dot_prod_vsx() 35 gf_vect_dot_prod_base(head, vlen, &gftbls[2 * 32 * vlen], src, t2); in gf_4vect_dot_prod_vsx() 36 gf_vect_dot_prod_base(head, vlen, &gftbls[3 * 32 * vlen], src, t3); in gf_4vect_dot_prod_vsx() [all …]
|
| H A D | gf_3vect_dot_prod_vsx.c | 4 gf_3vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, in gf_3vect_dot_prod_vsx() argument 15 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *) dest[0]); in gf_3vect_dot_prod_vsx() 16 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *) dest[1]); in gf_3vect_dot_prod_vsx() 17 gf_vect_mul_vsx(len, &gftbls[2 * 32 * vlen], src[0], (unsigned char *) dest[2]); in gf_3vect_dot_prod_vsx() 20 gf_3vect_mad_vsx(len, vlen, j, gftbls, src[j], dest); in gf_3vect_dot_prod_vsx() 31 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0); in gf_3vect_dot_prod_vsx() 32 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1); in gf_3vect_dot_prod_vsx() 33 gf_vect_dot_prod_base(head, vlen, &gftbls[2 * 32 * vlen], src, t2); in gf_3vect_dot_prod_vsx() 56 s = (unsigned char *) src[j]; in gf_3vect_dot_prod_vsx()
|
| H A D | gf_2vect_dot_prod_vsx.c | 4 gf_2vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls, unsigned char **src, in gf_2vect_dot_prod_vsx() argument 15 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *) dest[0]); in gf_2vect_dot_prod_vsx() 16 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *) dest[1]); in gf_2vect_dot_prod_vsx() 19 gf_2vect_mad_vsx(len, vlen, j, gftbls, src[j], dest); in gf_2vect_dot_prod_vsx() 29 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0); in gf_2vect_dot_prod_vsx() 30 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1); in gf_2vect_dot_prod_vsx() 48 s = (unsigned char *) src[j]; in gf_2vect_dot_prod_vsx()
|
| /isa-l/igzip/ |
| H A D | stdmac.asm | 179 ;; rolx64 dst, src, amount 187 ;; rolx32 dst, src, amount 212 %define %%src %2 214 vmovdqu %%dest, %%src 216 movdqu %%dest, %%src 222 %define %%src %2 224 vmovdqa %%dest, %%src 226 movdqa %%dest, %%src 232 %define %%src %2 234 vmovd %%dest, %%src [all …]
|
| /isa-l/erasure_code/aarch64/ |
| H A D | ec_aarch64_highlevel_func.c | 33 gf_vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls, unsigned char **src, 36 gf_2vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls, unsigned char **src, 39 gf_3vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls, unsigned char **src, 42 gf_4vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls, unsigned char **src, 45 gf_5vect_dot_prod_neon(int len, int vlen, unsigned char *gftbls, unsigned char **src, 48 gf_vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 51 gf_2vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 54 gf_3vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 57 gf_4vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 60 gf_5vect_mad_neon(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, [all …]
|
| /isa-l/erasure_code/ |
| H A D | ec_base_aliases.c | 33 gf_vect_dot_prod(int len, int vlen, unsigned char *v, unsigned char **src, unsigned char *dest) in gf_vect_dot_prod() argument 35 gf_vect_dot_prod_base(len, vlen, v, src, dest); in gf_vect_dot_prod() 39 gf_vect_mad(int len, int vec, int vec_i, unsigned char *v, unsigned char *src, unsigned char *dest) in gf_vect_mad() argument 41 gf_vect_mad_base(len, vec, vec_i, v, src, dest); in gf_vect_mad() 45 ec_encode_data(int len, int srcs, int dests, unsigned char *v, unsigned char **src, in ec_encode_data() argument 48 ec_encode_data_base(len, srcs, dests, v, src, dest); in ec_encode_data() 59 gf_vect_mul(int len, unsigned char *a, void *src, void *dest) in gf_vect_mul() argument 61 return gf_vect_mul_base(len, a, (unsigned char *) src, (unsigned char *) dest); in gf_vect_mul()
|
| H A D | ec_highlevel_func.c | 165 gf_vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 168 gf_2vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 171 gf_3vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 174 gf_4vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 177 gf_5vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 180 gf_6vect_mad_avx512(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 278 gf_vect_mad_avx512_gfni(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 281 gf_2vect_mad_avx512_gfni(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 284 gf_3vect_mad_avx512_gfni(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, 287 gf_4vect_mad_avx512_gfni(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src, [all …]
|
| /isa-l/crc/aarch64/ |
| H A D | crc32c_mix_neoverse_n1.S | 43 .macro crc32_u64 dst,src,data 44 crc32cx \dst,\src,\data 46 .macro crc32_u32 dst,src,data 47 crc32cw \dst,\src,\data 49 .macro crc32_u16 dst,src,data 50 crc32ch \dst,\src,\data 52 .macro crc32_u8 dst,src,data 53 crc32cb \dst,\src,\data
|
| H A D | crc32_mix_neoverse_n1.S | 42 .macro crc32_u64 dst,src,data 43 crc32x \dst,\src,\data 45 .macro crc32_u32 dst,src,data 46 crc32w \dst,\src,\data 48 .macro crc32_u16 dst,src,data 49 crc32h \dst,\src,\data 51 .macro crc32_u8 dst,src,data 52 crc32b \dst,\src,\data
|
| H A D | crc32_iscsi_crc_ext.S | 44 .macro crc32_u64 dst,src,data 45 crc32cx \dst,\src,\data 47 .macro crc32_u32 dst,src,data 48 crc32cw \dst,\src,\data 50 .macro crc32_u16 dst,src,data 51 crc32ch \dst,\src,\data 53 .macro crc32_u8 dst,src,data 54 crc32cb \dst,\src,\data
|
| H A D | crc32_gzip_refl_crc_ext.S | 45 .macro crc32_u64 dst,src,data 46 crc32x \dst,\src,\data 48 .macro crc32_u32 dst,src,data 49 crc32w \dst,\src,\data 51 .macro crc32_u16 dst,src,data 52 crc32h \dst,\src,\data 54 .macro crc32_u8 dst,src,data 55 crc32b \dst,\src,\data
|
| H A D | crc32_mix_default.S | 36 .macro crc32_u64 dst,src,data 37 crc32x \dst,\src,\data 40 .macro crc32_u32 dst,src,data 41 crc32w \dst,\src,\data 44 .macro crc32_u16 dst,src,data 45 crc32h \dst,\src,\data 48 .macro crc32_u8 dst,src,data 49 crc32b \dst,\src,\data
|
| H A D | crc32c_mix_default.S | 34 .macro crc32_u64 dst,src,data 35 crc32cx \dst,\src,\data 38 .macro crc32_u32 dst,src,data 39 crc32cw \dst,\src,\data 42 .macro crc32_u16 dst,src,data 43 crc32ch \dst,\src,\data 46 .macro crc32_u8 dst,src,data 47 crc32cb \dst,\src,\data
|
| /isa-l/mem/ |
| H A D | mem_zero_detect_avx.asm | 65 %define src arg0 84 vmovdqu ymm0, [src+pos] 85 vmovdqu ymm1, [src+pos+1*32] 86 vmovdqu ymm2, [src+pos+2*32] 87 vmovdqu ymm3, [src+pos+3*32] 101 vmovdqu ymm0, [src+len] 102 vmovdqu ymm1, [src+len+1*32] 103 vmovdqu ymm2, [src+len+2*32] 104 vmovdqu ymm3, [src+len+3*32] 124 vmovdqu ymm0, [src] [all …]
|
| H A D | mem_zero_detect_sse.asm | 65 %define src arg0 84 movdqu xmm0, [src+pos] 85 movdqu xmm1, [src+pos+1*16] 86 movdqu xmm2, [src+pos+2*16] 87 movdqu xmm3, [src+pos+3*16] 101 movdqu xmm0, [src+len] 102 movdqu xmm1, [src+len+1*16] 103 movdqu xmm2, [src+len+2*16] 104 movdqu xmm3, [src+len+3*16] 124 movdqu xmm0, [src] [all …]
|
| H A D | mem_zero_detect_avx2.asm | 65 %define src arg0 88 vmovdqu ymm0, [src] 89 vpor ymm0, ymm0, [src+32] 90 vmovdqu ymm1, [src+64] 91 vpor ymm1, ymm1, [src+96] 98 add src, tmp1 109 vmovdqu ymm0, [src] 110 vpor ymm0, ymm0,[src+32] 111 vmovdqu ymm1, [src+64] 112 vpor ymm1, ymm1, [src [all...] |