xref: /netbsd-src/sys/crypto/aes/arch/x86/aes_sse2_enc.c (revision 336b5650c616345272bc32eac80cd5c63757e79f)
1 /*
2  * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(1, "$NetBSD: aes_sse2_enc.c,v 1.1 2020/06/29 23:47:54 riastradh Exp $");
27 
28 #include <sys/types.h>
29 
30 #include "aes_sse2_impl.h"
31 
32 static inline void
add_round_key(__m128i q[static4],const uint64_t sk[static8])33 add_round_key(__m128i q[static 4], const uint64_t sk[static 8])
34 {
35 	q[0] ^= _mm_set_epi64x(sk[4], sk[0]);
36 	q[1] ^= _mm_set_epi64x(sk[5], sk[1]);
37 	q[2] ^= _mm_set_epi64x(sk[6], sk[2]);
38 	q[3] ^= _mm_set_epi64x(sk[7], sk[3]);
39 }
40 
41 static inline __m128i
shift_row(__m128i q)42 shift_row(__m128i q)
43 {
44 	__m128i x, y0, y1, y2, y3, y4, y5, y6;
45 
46 	x = q;
47 	y0 = x & _mm_set1_epi64x(0x000000000000FFFF);
48 	y1 = x & _mm_set1_epi64x(0x00000000FFF00000);
49 	y2 = x & _mm_set1_epi64x(0x00000000000F0000);
50 	y3 = x & _mm_set1_epi64x(0x0000FF0000000000);
51 	y4 = x & _mm_set1_epi64x(0x000000FF00000000);
52 	y5 = x & _mm_set1_epi64x(0xF000000000000000);
53 	y6 = x & _mm_set1_epi64x(0x0FFF000000000000);
54 	y1 = _mm_srli_epi64(y1, 4);
55 	y2 = _mm_slli_epi64(y2, 12);
56 	y3 = _mm_srli_epi64(y3, 8);
57 	y4 = _mm_slli_epi64(y4, 8);
58 	y5 = _mm_srli_epi64(y5, 12);
59 	y6 = _mm_slli_epi64(y6, 4);
60 	return y0 | y1 | y2 | y3 | y4 | y5 | y6;
61 }
62 
63 static inline void
shift_rows(__m128i q[static4])64 shift_rows(__m128i q[static 4])
65 {
66 
67 	q[0] = shift_row(q[0]);
68 	q[1] = shift_row(q[1]);
69 	q[2] = shift_row(q[2]);
70 	q[3] = shift_row(q[3]);
71 }
72 
73 static inline __m128i
rotr32(__m128i x)74 rotr32(__m128i x)
75 {
76 	return _mm_slli_epi64(x, 32) | _mm_srli_epi64(x, 32);
77 }
78 
79 static inline void
mix_columns(__m128i q[static4])80 mix_columns(__m128i q[static 4])
81 {
82 	__m128i q0, q1, q2, q3, q4, q5, q6, q7;
83 	__m128i r0, r1, r2, r3, r4, r5, r6, r7;
84 	__m128i s0, s1, s2, s3, s4, s5, s6, s7;
85 
86 	q0 = q[0];
87 	q1 = q[1];
88 	q2 = q[2];
89 	q3 = q[3];
90 	r0 = _mm_srli_epi64(q0, 16) | _mm_slli_epi64(q0, 48);
91 	r1 = _mm_srli_epi64(q1, 16) | _mm_slli_epi64(q1, 48);
92 	r2 = _mm_srli_epi64(q2, 16) | _mm_slli_epi64(q2, 48);
93 	r3 = _mm_srli_epi64(q3, 16) | _mm_slli_epi64(q3, 48);
94 
95 	q7 = _mm_shuffle_epi32(q3, 0x0e);
96 	q6 = _mm_shuffle_epi32(q2, 0x0e);
97 	q5 = _mm_shuffle_epi32(q1, 0x0e);
98 	q4 = _mm_shuffle_epi32(q0, 0x0e);
99 
100 	r7 = _mm_shuffle_epi32(r3, 0x0e);
101 	r6 = _mm_shuffle_epi32(r2, 0x0e);
102 	r5 = _mm_shuffle_epi32(r1, 0x0e);
103 	r4 = _mm_shuffle_epi32(r0, 0x0e);
104 
105 	s0 = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0);
106 	s1 = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1);
107 	s2 = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2);
108 	s3 = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3);
109 	s4 = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4);
110 	s5 = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5);
111 	s6 = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6);
112 	s7 = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7);
113 
114 	q[0] = _mm_unpacklo_epi64(s0, s4);
115 	q[1] = _mm_unpacklo_epi64(s1, s5);
116 	q[2] = _mm_unpacklo_epi64(s2, s6);
117 	q[3] = _mm_unpacklo_epi64(s3, s7);
118 }
119 
120 void
aes_sse2_bitslice_encrypt(unsigned num_rounds,const uint64_t * skey,__m128i q[static4])121 aes_sse2_bitslice_encrypt(unsigned num_rounds,
122 	const uint64_t *skey, __m128i q[static 4])
123 {
124 	unsigned u;
125 
126 	add_round_key(q, skey);
127 	for (u = 1; u < num_rounds; u ++) {
128 		aes_sse2_bitslice_Sbox(q);
129 		shift_rows(q);
130 		mix_columns(q);
131 		add_round_key(q, skey + (u << 3));
132 	}
133 	aes_sse2_bitslice_Sbox(q);
134 	shift_rows(q);
135 	add_round_key(q, skey + (num_rounds << 3));
136 }
137