1*81ad6265SDimitry Andric #include "blake3_impl.h"
2*81ad6265SDimitry Andric
3*81ad6265SDimitry Andric #include <immintrin.h>
4*81ad6265SDimitry Andric
5*81ad6265SDimitry Andric #define DEGREE 8
6*81ad6265SDimitry Andric
loadu(const uint8_t src[32])7*81ad6265SDimitry Andric INLINE __m256i loadu(const uint8_t src[32]) {
8*81ad6265SDimitry Andric return _mm256_loadu_si256((const __m256i *)src);
9*81ad6265SDimitry Andric }
10*81ad6265SDimitry Andric
storeu(__m256i src,uint8_t dest[16])11*81ad6265SDimitry Andric INLINE void storeu(__m256i src, uint8_t dest[16]) {
12*81ad6265SDimitry Andric _mm256_storeu_si256((__m256i *)dest, src);
13*81ad6265SDimitry Andric }
14*81ad6265SDimitry Andric
addv(__m256i a,__m256i b)15*81ad6265SDimitry Andric INLINE __m256i addv(__m256i a, __m256i b) { return _mm256_add_epi32(a, b); }
16*81ad6265SDimitry Andric
17*81ad6265SDimitry Andric // Note that clang-format doesn't like the name "xor" for some reason.
xorv(__m256i a,__m256i b)18*81ad6265SDimitry Andric INLINE __m256i xorv(__m256i a, __m256i b) { return _mm256_xor_si256(a, b); }
19*81ad6265SDimitry Andric
set1(uint32_t x)20*81ad6265SDimitry Andric INLINE __m256i set1(uint32_t x) { return _mm256_set1_epi32((int32_t)x); }
21*81ad6265SDimitry Andric
rot16(__m256i x)22*81ad6265SDimitry Andric INLINE __m256i rot16(__m256i x) {
23*81ad6265SDimitry Andric return _mm256_shuffle_epi8(
24*81ad6265SDimitry Andric x, _mm256_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2,
25*81ad6265SDimitry Andric 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2));
26*81ad6265SDimitry Andric }
27*81ad6265SDimitry Andric
rot12(__m256i x)28*81ad6265SDimitry Andric INLINE __m256i rot12(__m256i x) {
29*81ad6265SDimitry Andric return _mm256_or_si256(_mm256_srli_epi32(x, 12), _mm256_slli_epi32(x, 32 - 12));
30*81ad6265SDimitry Andric }
31*81ad6265SDimitry Andric
rot8(__m256i x)32*81ad6265SDimitry Andric INLINE __m256i rot8(__m256i x) {
33*81ad6265SDimitry Andric return _mm256_shuffle_epi8(
34*81ad6265SDimitry Andric x, _mm256_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1,
35*81ad6265SDimitry Andric 12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1));
36*81ad6265SDimitry Andric }
37*81ad6265SDimitry Andric
rot7(__m256i x)38*81ad6265SDimitry Andric INLINE __m256i rot7(__m256i x) {
39*81ad6265SDimitry Andric return _mm256_or_si256(_mm256_srli_epi32(x, 7), _mm256_slli_epi32(x, 32 - 7));
40*81ad6265SDimitry Andric }
41*81ad6265SDimitry Andric
round_fn(__m256i v[16],__m256i m[16],size_t r)42*81ad6265SDimitry Andric INLINE void round_fn(__m256i v[16], __m256i m[16], size_t r) {
43*81ad6265SDimitry Andric v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
44*81ad6265SDimitry Andric v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
45*81ad6265SDimitry Andric v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
46*81ad6265SDimitry Andric v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
47*81ad6265SDimitry Andric v[0] = addv(v[0], v[4]);
48*81ad6265SDimitry Andric v[1] = addv(v[1], v[5]);
49*81ad6265SDimitry Andric v[2] = addv(v[2], v[6]);
50*81ad6265SDimitry Andric v[3] = addv(v[3], v[7]);
51*81ad6265SDimitry Andric v[12] = xorv(v[12], v[0]);
52*81ad6265SDimitry Andric v[13] = xorv(v[13], v[1]);
53*81ad6265SDimitry Andric v[14] = xorv(v[14], v[2]);
54*81ad6265SDimitry Andric v[15] = xorv(v[15], v[3]);
55*81ad6265SDimitry Andric v[12] = rot16(v[12]);
56*81ad6265SDimitry Andric v[13] = rot16(v[13]);
57*81ad6265SDimitry Andric v[14] = rot16(v[14]);
58*81ad6265SDimitry Andric v[15] = rot16(v[15]);
59*81ad6265SDimitry Andric v[8] = addv(v[8], v[12]);
60*81ad6265SDimitry Andric v[9] = addv(v[9], v[13]);
61*81ad6265SDimitry Andric v[10] = addv(v[10], v[14]);
62*81ad6265SDimitry Andric v[11] = addv(v[11], v[15]);
63*81ad6265SDimitry Andric v[4] = xorv(v[4], v[8]);
64*81ad6265SDimitry Andric v[5] = xorv(v[5], v[9]);
65*81ad6265SDimitry Andric v[6] = xorv(v[6], v[10]);
66*81ad6265SDimitry Andric v[7] = xorv(v[7], v[11]);
67*81ad6265SDimitry Andric v[4] = rot12(v[4]);
68*81ad6265SDimitry Andric v[5] = rot12(v[5]);
69*81ad6265SDimitry Andric v[6] = rot12(v[6]);
70*81ad6265SDimitry Andric v[7] = rot12(v[7]);
71*81ad6265SDimitry Andric v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
72*81ad6265SDimitry Andric v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
73*81ad6265SDimitry Andric v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
74*81ad6265SDimitry Andric v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
75*81ad6265SDimitry Andric v[0] = addv(v[0], v[4]);
76*81ad6265SDimitry Andric v[1] = addv(v[1], v[5]);
77*81ad6265SDimitry Andric v[2] = addv(v[2], v[6]);
78*81ad6265SDimitry Andric v[3] = addv(v[3], v[7]);
79*81ad6265SDimitry Andric v[12] = xorv(v[12], v[0]);
80*81ad6265SDimitry Andric v[13] = xorv(v[13], v[1]);
81*81ad6265SDimitry Andric v[14] = xorv(v[14], v[2]);
82*81ad6265SDimitry Andric v[15] = xorv(v[15], v[3]);
83*81ad6265SDimitry Andric v[12] = rot8(v[12]);
84*81ad6265SDimitry Andric v[13] = rot8(v[13]);
85*81ad6265SDimitry Andric v[14] = rot8(v[14]);
86*81ad6265SDimitry Andric v[15] = rot8(v[15]);
87*81ad6265SDimitry Andric v[8] = addv(v[8], v[12]);
88*81ad6265SDimitry Andric v[9] = addv(v[9], v[13]);
89*81ad6265SDimitry Andric v[10] = addv(v[10], v[14]);
90*81ad6265SDimitry Andric v[11] = addv(v[11], v[15]);
91*81ad6265SDimitry Andric v[4] = xorv(v[4], v[8]);
92*81ad6265SDimitry Andric v[5] = xorv(v[5], v[9]);
93*81ad6265SDimitry Andric v[6] = xorv(v[6], v[10]);
94*81ad6265SDimitry Andric v[7] = xorv(v[7], v[11]);
95*81ad6265SDimitry Andric v[4] = rot7(v[4]);
96*81ad6265SDimitry Andric v[5] = rot7(v[5]);
97*81ad6265SDimitry Andric v[6] = rot7(v[6]);
98*81ad6265SDimitry Andric v[7] = rot7(v[7]);
99*81ad6265SDimitry Andric
100*81ad6265SDimitry Andric v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
101*81ad6265SDimitry Andric v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
102*81ad6265SDimitry Andric v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
103*81ad6265SDimitry Andric v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
104*81ad6265SDimitry Andric v[0] = addv(v[0], v[5]);
105*81ad6265SDimitry Andric v[1] = addv(v[1], v[6]);
106*81ad6265SDimitry Andric v[2] = addv(v[2], v[7]);
107*81ad6265SDimitry Andric v[3] = addv(v[3], v[4]);
108*81ad6265SDimitry Andric v[15] = xorv(v[15], v[0]);
109*81ad6265SDimitry Andric v[12] = xorv(v[12], v[1]);
110*81ad6265SDimitry Andric v[13] = xorv(v[13], v[2]);
111*81ad6265SDimitry Andric v[14] = xorv(v[14], v[3]);
112*81ad6265SDimitry Andric v[15] = rot16(v[15]);
113*81ad6265SDimitry Andric v[12] = rot16(v[12]);
114*81ad6265SDimitry Andric v[13] = rot16(v[13]);
115*81ad6265SDimitry Andric v[14] = rot16(v[14]);
116*81ad6265SDimitry Andric v[10] = addv(v[10], v[15]);
117*81ad6265SDimitry Andric v[11] = addv(v[11], v[12]);
118*81ad6265SDimitry Andric v[8] = addv(v[8], v[13]);
119*81ad6265SDimitry Andric v[9] = addv(v[9], v[14]);
120*81ad6265SDimitry Andric v[5] = xorv(v[5], v[10]);
121*81ad6265SDimitry Andric v[6] = xorv(v[6], v[11]);
122*81ad6265SDimitry Andric v[7] = xorv(v[7], v[8]);
123*81ad6265SDimitry Andric v[4] = xorv(v[4], v[9]);
124*81ad6265SDimitry Andric v[5] = rot12(v[5]);
125*81ad6265SDimitry Andric v[6] = rot12(v[6]);
126*81ad6265SDimitry Andric v[7] = rot12(v[7]);
127*81ad6265SDimitry Andric v[4] = rot12(v[4]);
128*81ad6265SDimitry Andric v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
129*81ad6265SDimitry Andric v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
130*81ad6265SDimitry Andric v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
131*81ad6265SDimitry Andric v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
132*81ad6265SDimitry Andric v[0] = addv(v[0], v[5]);
133*81ad6265SDimitry Andric v[1] = addv(v[1], v[6]);
134*81ad6265SDimitry Andric v[2] = addv(v[2], v[7]);
135*81ad6265SDimitry Andric v[3] = addv(v[3], v[4]);
136*81ad6265SDimitry Andric v[15] = xorv(v[15], v[0]);
137*81ad6265SDimitry Andric v[12] = xorv(v[12], v[1]);
138*81ad6265SDimitry Andric v[13] = xorv(v[13], v[2]);
139*81ad6265SDimitry Andric v[14] = xorv(v[14], v[3]);
140*81ad6265SDimitry Andric v[15] = rot8(v[15]);
141*81ad6265SDimitry Andric v[12] = rot8(v[12]);
142*81ad6265SDimitry Andric v[13] = rot8(v[13]);
143*81ad6265SDimitry Andric v[14] = rot8(v[14]);
144*81ad6265SDimitry Andric v[10] = addv(v[10], v[15]);
145*81ad6265SDimitry Andric v[11] = addv(v[11], v[12]);
146*81ad6265SDimitry Andric v[8] = addv(v[8], v[13]);
147*81ad6265SDimitry Andric v[9] = addv(v[9], v[14]);
148*81ad6265SDimitry Andric v[5] = xorv(v[5], v[10]);
149*81ad6265SDimitry Andric v[6] = xorv(v[6], v[11]);
150*81ad6265SDimitry Andric v[7] = xorv(v[7], v[8]);
151*81ad6265SDimitry Andric v[4] = xorv(v[4], v[9]);
152*81ad6265SDimitry Andric v[5] = rot7(v[5]);
153*81ad6265SDimitry Andric v[6] = rot7(v[6]);
154*81ad6265SDimitry Andric v[7] = rot7(v[7]);
155*81ad6265SDimitry Andric v[4] = rot7(v[4]);
156*81ad6265SDimitry Andric }
157*81ad6265SDimitry Andric
transpose_vecs(__m256i vecs[DEGREE])158*81ad6265SDimitry Andric INLINE void transpose_vecs(__m256i vecs[DEGREE]) {
159*81ad6265SDimitry Andric // Interleave 32-bit lanes. The low unpack is lanes 00/11/44/55, and the high
160*81ad6265SDimitry Andric // is 22/33/66/77.
161*81ad6265SDimitry Andric __m256i ab_0145 = _mm256_unpacklo_epi32(vecs[0], vecs[1]);
162*81ad6265SDimitry Andric __m256i ab_2367 = _mm256_unpackhi_epi32(vecs[0], vecs[1]);
163*81ad6265SDimitry Andric __m256i cd_0145 = _mm256_unpacklo_epi32(vecs[2], vecs[3]);
164*81ad6265SDimitry Andric __m256i cd_2367 = _mm256_unpackhi_epi32(vecs[2], vecs[3]);
165*81ad6265SDimitry Andric __m256i ef_0145 = _mm256_unpacklo_epi32(vecs[4], vecs[5]);
166*81ad6265SDimitry Andric __m256i ef_2367 = _mm256_unpackhi_epi32(vecs[4], vecs[5]);
167*81ad6265SDimitry Andric __m256i gh_0145 = _mm256_unpacklo_epi32(vecs[6], vecs[7]);
168*81ad6265SDimitry Andric __m256i gh_2367 = _mm256_unpackhi_epi32(vecs[6], vecs[7]);
169*81ad6265SDimitry Andric
170*81ad6265SDimitry Andric // Interleave 64-bit lates. The low unpack is lanes 00/22 and the high is
171*81ad6265SDimitry Andric // 11/33.
172*81ad6265SDimitry Andric __m256i abcd_04 = _mm256_unpacklo_epi64(ab_0145, cd_0145);
173*81ad6265SDimitry Andric __m256i abcd_15 = _mm256_unpackhi_epi64(ab_0145, cd_0145);
174*81ad6265SDimitry Andric __m256i abcd_26 = _mm256_unpacklo_epi64(ab_2367, cd_2367);
175*81ad6265SDimitry Andric __m256i abcd_37 = _mm256_unpackhi_epi64(ab_2367, cd_2367);
176*81ad6265SDimitry Andric __m256i efgh_04 = _mm256_unpacklo_epi64(ef_0145, gh_0145);
177*81ad6265SDimitry Andric __m256i efgh_15 = _mm256_unpackhi_epi64(ef_0145, gh_0145);
178*81ad6265SDimitry Andric __m256i efgh_26 = _mm256_unpacklo_epi64(ef_2367, gh_2367);
179*81ad6265SDimitry Andric __m256i efgh_37 = _mm256_unpackhi_epi64(ef_2367, gh_2367);
180*81ad6265SDimitry Andric
181*81ad6265SDimitry Andric // Interleave 128-bit lanes.
182*81ad6265SDimitry Andric vecs[0] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x20);
183*81ad6265SDimitry Andric vecs[1] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x20);
184*81ad6265SDimitry Andric vecs[2] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x20);
185*81ad6265SDimitry Andric vecs[3] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x20);
186*81ad6265SDimitry Andric vecs[4] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x31);
187*81ad6265SDimitry Andric vecs[5] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x31);
188*81ad6265SDimitry Andric vecs[6] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x31);
189*81ad6265SDimitry Andric vecs[7] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x31);
190*81ad6265SDimitry Andric }
191*81ad6265SDimitry Andric
transpose_msg_vecs(const uint8_t * const * inputs,size_t block_offset,__m256i out[16])192*81ad6265SDimitry Andric INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
193*81ad6265SDimitry Andric size_t block_offset, __m256i out[16]) {
194*81ad6265SDimitry Andric out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m256i)]);
195*81ad6265SDimitry Andric out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m256i)]);
196*81ad6265SDimitry Andric out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m256i)]);
197*81ad6265SDimitry Andric out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m256i)]);
198*81ad6265SDimitry Andric out[4] = loadu(&inputs[4][block_offset + 0 * sizeof(__m256i)]);
199*81ad6265SDimitry Andric out[5] = loadu(&inputs[5][block_offset + 0 * sizeof(__m256i)]);
200*81ad6265SDimitry Andric out[6] = loadu(&inputs[6][block_offset + 0 * sizeof(__m256i)]);
201*81ad6265SDimitry Andric out[7] = loadu(&inputs[7][block_offset + 0 * sizeof(__m256i)]);
202*81ad6265SDimitry Andric out[8] = loadu(&inputs[0][block_offset + 1 * sizeof(__m256i)]);
203*81ad6265SDimitry Andric out[9] = loadu(&inputs[1][block_offset + 1 * sizeof(__m256i)]);
204*81ad6265SDimitry Andric out[10] = loadu(&inputs[2][block_offset + 1 * sizeof(__m256i)]);
205*81ad6265SDimitry Andric out[11] = loadu(&inputs[3][block_offset + 1 * sizeof(__m256i)]);
206*81ad6265SDimitry Andric out[12] = loadu(&inputs[4][block_offset + 1 * sizeof(__m256i)]);
207*81ad6265SDimitry Andric out[13] = loadu(&inputs[5][block_offset + 1 * sizeof(__m256i)]);
208*81ad6265SDimitry Andric out[14] = loadu(&inputs[6][block_offset + 1 * sizeof(__m256i)]);
209*81ad6265SDimitry Andric out[15] = loadu(&inputs[7][block_offset + 1 * sizeof(__m256i)]);
210*81ad6265SDimitry Andric for (size_t i = 0; i < 8; ++i) {
211*81ad6265SDimitry Andric _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
212*81ad6265SDimitry Andric }
213*81ad6265SDimitry Andric transpose_vecs(&out[0]);
214*81ad6265SDimitry Andric transpose_vecs(&out[8]);
215*81ad6265SDimitry Andric }
216*81ad6265SDimitry Andric
load_counters(uint64_t counter,bool increment_counter,__m256i * out_lo,__m256i * out_hi)217*81ad6265SDimitry Andric INLINE void load_counters(uint64_t counter, bool increment_counter,
218*81ad6265SDimitry Andric __m256i *out_lo, __m256i *out_hi) {
219*81ad6265SDimitry Andric const __m256i mask = _mm256_set1_epi32(-(int32_t)increment_counter);
220*81ad6265SDimitry Andric const __m256i add0 = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
221*81ad6265SDimitry Andric const __m256i add1 = _mm256_and_si256(mask, add0);
222*81ad6265SDimitry Andric __m256i l = _mm256_add_epi32(_mm256_set1_epi32((int32_t)counter), add1);
223*81ad6265SDimitry Andric __m256i carry = _mm256_cmpgt_epi32(_mm256_xor_si256(add1, _mm256_set1_epi32(0x80000000)),
224*81ad6265SDimitry Andric _mm256_xor_si256( l, _mm256_set1_epi32(0x80000000)));
225*81ad6265SDimitry Andric __m256i h = _mm256_sub_epi32(_mm256_set1_epi32((int32_t)(counter >> 32)), carry);
226*81ad6265SDimitry Andric *out_lo = l;
227*81ad6265SDimitry Andric *out_hi = h;
228*81ad6265SDimitry Andric }
229*81ad6265SDimitry Andric
230*81ad6265SDimitry Andric static
blake3_hash8_avx2(const uint8_t * const * inputs,size_t blocks,const uint32_t key[8],uint64_t counter,bool increment_counter,uint8_t flags,uint8_t flags_start,uint8_t flags_end,uint8_t * out)231*81ad6265SDimitry Andric void blake3_hash8_avx2(const uint8_t *const *inputs, size_t blocks,
232*81ad6265SDimitry Andric const uint32_t key[8], uint64_t counter,
233*81ad6265SDimitry Andric bool increment_counter, uint8_t flags,
234*81ad6265SDimitry Andric uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
235*81ad6265SDimitry Andric __m256i h_vecs[8] = {
236*81ad6265SDimitry Andric set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
237*81ad6265SDimitry Andric set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
238*81ad6265SDimitry Andric };
239*81ad6265SDimitry Andric __m256i counter_low_vec, counter_high_vec;
240*81ad6265SDimitry Andric load_counters(counter, increment_counter, &counter_low_vec,
241*81ad6265SDimitry Andric &counter_high_vec);
242*81ad6265SDimitry Andric uint8_t block_flags = flags | flags_start;
243*81ad6265SDimitry Andric
244*81ad6265SDimitry Andric for (size_t block = 0; block < blocks; block++) {
245*81ad6265SDimitry Andric if (block + 1 == blocks) {
246*81ad6265SDimitry Andric block_flags |= flags_end;
247*81ad6265SDimitry Andric }
248*81ad6265SDimitry Andric __m256i block_len_vec = set1(BLAKE3_BLOCK_LEN);
249*81ad6265SDimitry Andric __m256i block_flags_vec = set1(block_flags);
250*81ad6265SDimitry Andric __m256i msg_vecs[16];
251*81ad6265SDimitry Andric transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
252*81ad6265SDimitry Andric
253*81ad6265SDimitry Andric __m256i v[16] = {
254*81ad6265SDimitry Andric h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
255*81ad6265SDimitry Andric h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
256*81ad6265SDimitry Andric set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
257*81ad6265SDimitry Andric counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
258*81ad6265SDimitry Andric };
259*81ad6265SDimitry Andric round_fn(v, msg_vecs, 0);
260*81ad6265SDimitry Andric round_fn(v, msg_vecs, 1);
261*81ad6265SDimitry Andric round_fn(v, msg_vecs, 2);
262*81ad6265SDimitry Andric round_fn(v, msg_vecs, 3);
263*81ad6265SDimitry Andric round_fn(v, msg_vecs, 4);
264*81ad6265SDimitry Andric round_fn(v, msg_vecs, 5);
265*81ad6265SDimitry Andric round_fn(v, msg_vecs, 6);
266*81ad6265SDimitry Andric h_vecs[0] = xorv(v[0], v[8]);
267*81ad6265SDimitry Andric h_vecs[1] = xorv(v[1], v[9]);
268*81ad6265SDimitry Andric h_vecs[2] = xorv(v[2], v[10]);
269*81ad6265SDimitry Andric h_vecs[3] = xorv(v[3], v[11]);
270*81ad6265SDimitry Andric h_vecs[4] = xorv(v[4], v[12]);
271*81ad6265SDimitry Andric h_vecs[5] = xorv(v[5], v[13]);
272*81ad6265SDimitry Andric h_vecs[6] = xorv(v[6], v[14]);
273*81ad6265SDimitry Andric h_vecs[7] = xorv(v[7], v[15]);
274*81ad6265SDimitry Andric
275*81ad6265SDimitry Andric block_flags = flags;
276*81ad6265SDimitry Andric }
277*81ad6265SDimitry Andric
278*81ad6265SDimitry Andric transpose_vecs(h_vecs);
279*81ad6265SDimitry Andric storeu(h_vecs[0], &out[0 * sizeof(__m256i)]);
280*81ad6265SDimitry Andric storeu(h_vecs[1], &out[1 * sizeof(__m256i)]);
281*81ad6265SDimitry Andric storeu(h_vecs[2], &out[2 * sizeof(__m256i)]);
282*81ad6265SDimitry Andric storeu(h_vecs[3], &out[3 * sizeof(__m256i)]);
283*81ad6265SDimitry Andric storeu(h_vecs[4], &out[4 * sizeof(__m256i)]);
284*81ad6265SDimitry Andric storeu(h_vecs[5], &out[5 * sizeof(__m256i)]);
285*81ad6265SDimitry Andric storeu(h_vecs[6], &out[6 * sizeof(__m256i)]);
286*81ad6265SDimitry Andric storeu(h_vecs[7], &out[7 * sizeof(__m256i)]);
287*81ad6265SDimitry Andric }
288*81ad6265SDimitry Andric
289*81ad6265SDimitry Andric #if !defined(BLAKE3_NO_SSE41)
290*81ad6265SDimitry Andric void blake3_hash_many_sse41(const uint8_t *const *inputs, size_t num_inputs,
291*81ad6265SDimitry Andric size_t blocks, const uint32_t key[8],
292*81ad6265SDimitry Andric uint64_t counter, bool increment_counter,
293*81ad6265SDimitry Andric uint8_t flags, uint8_t flags_start,
294*81ad6265SDimitry Andric uint8_t flags_end, uint8_t *out);
295*81ad6265SDimitry Andric #else
296*81ad6265SDimitry Andric void blake3_hash_many_portable(const uint8_t *const *inputs, size_t num_inputs,
297*81ad6265SDimitry Andric size_t blocks, const uint32_t key[8],
298*81ad6265SDimitry Andric uint64_t counter, bool increment_counter,
299*81ad6265SDimitry Andric uint8_t flags, uint8_t flags_start,
300*81ad6265SDimitry Andric uint8_t flags_end, uint8_t *out);
301*81ad6265SDimitry Andric #endif
302*81ad6265SDimitry Andric
blake3_hash_many_avx2(const uint8_t * const * inputs,size_t num_inputs,size_t blocks,const uint32_t key[8],uint64_t counter,bool increment_counter,uint8_t flags,uint8_t flags_start,uint8_t flags_end,uint8_t * out)303*81ad6265SDimitry Andric void blake3_hash_many_avx2(const uint8_t *const *inputs, size_t num_inputs,
304*81ad6265SDimitry Andric size_t blocks, const uint32_t key[8],
305*81ad6265SDimitry Andric uint64_t counter, bool increment_counter,
306*81ad6265SDimitry Andric uint8_t flags, uint8_t flags_start,
307*81ad6265SDimitry Andric uint8_t flags_end, uint8_t *out) {
308*81ad6265SDimitry Andric while (num_inputs >= DEGREE) {
309*81ad6265SDimitry Andric blake3_hash8_avx2(inputs, blocks, key, counter, increment_counter, flags,
310*81ad6265SDimitry Andric flags_start, flags_end, out);
311*81ad6265SDimitry Andric if (increment_counter) {
312*81ad6265SDimitry Andric counter += DEGREE;
313*81ad6265SDimitry Andric }
314*81ad6265SDimitry Andric inputs += DEGREE;
315*81ad6265SDimitry Andric num_inputs -= DEGREE;
316*81ad6265SDimitry Andric out = &out[DEGREE * BLAKE3_OUT_LEN];
317*81ad6265SDimitry Andric }
318*81ad6265SDimitry Andric #if !defined(BLAKE3_NO_SSE41)
319*81ad6265SDimitry Andric blake3_hash_many_sse41(inputs, num_inputs, blocks, key, counter,
320*81ad6265SDimitry Andric increment_counter, flags, flags_start, flags_end, out);
321*81ad6265SDimitry Andric #else
322*81ad6265SDimitry Andric blake3_hash_many_portable(inputs, num_inputs, blocks, key, counter,
323*81ad6265SDimitry Andric increment_counter, flags, flags_start, flags_end,
324*81ad6265SDimitry Andric out);
325*81ad6265SDimitry Andric #endif
326*81ad6265SDimitry Andric }
327