1 /**********************************************************************
2 Copyright(c) 2011-2019 Intel Corporation All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
29
30 #include <string.h>
31 #include "sm3_mb.h"
32 #include "sm3_mb_internal.h"
33 #include "memcpy_inline.h"
34 #include "endian_helper.h"
35
36 #ifdef _MSC_VER
37 #include <intrin.h>
38 #define inline __inline
39 #endif
40
41 #if (__GNUC__ >= 11)
42 #define OPT_FIX __attribute__((noipa))
43 #else
44 #define OPT_FIX
45 #endif
46
47 #define rol32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
48
49 static void
50 sm3_init(ISAL_SM3_HASH_CTX *ctx, const void *buffer, uint32_t len);
51 static void OPT_FIX
52 sm3_update(ISAL_SM3_HASH_CTX *ctx, const void *buffer, uint32_t len);
53 static void OPT_FIX
54 sm3_final(ISAL_SM3_HASH_CTX *ctx);
55 static void OPT_FIX
56 sm3_single(const volatile void *data, uint32_t digest[]);
57 static inline void
58 hash_init_digest(ISAL_SM3_WORD_T *digest);
59
60 static inline uint32_t
P0(uint32_t X)61 P0(uint32_t X)
62 {
63 return (X ^ (rol32(X, 9)) ^ (rol32(X, 17)));
64 }
65
66 static inline uint32_t
P1(uint32_t X)67 P1(uint32_t X)
68 {
69 return (X ^ (rol32(X, 15)) ^ (rol32(X, 23)));
70 }
71
72 static inline uint32_t
sm3_ff(int j,uint32_t x,uint32_t y,uint32_t z)73 sm3_ff(int j, uint32_t x, uint32_t y, uint32_t z)
74 {
75 return j < 16 ? (x ^ y ^ z) : ((x & y) | (x & z) | (y & z));
76 }
77
78 static inline uint32_t
sm3_gg(int j,uint32_t x,uint32_t y,uint32_t z)79 sm3_gg(int j, uint32_t x, uint32_t y, uint32_t z)
80 {
81 return j < 16 ? (x ^ y ^ z) : ((x & y) | ((~x) & z));
82 }
83
84 static inline void
sm3_message_schedule(uint32_t bi[],volatile uint32_t W[],volatile uint32_t W_B[])85 sm3_message_schedule(uint32_t bi[], volatile uint32_t W[], volatile uint32_t W_B[])
86 {
87 int j;
88 volatile uint32_t tmp;
89
90 for (j = 0; j <= 15; j++) {
91 W[j] = to_be32(bi[j]);
92 }
93
94 for (; j <= 67; j++) {
95 tmp = W[j - 16] ^ W[j - 9] ^ rol32(W[j - 3], 15);
96 W[j] = P1(tmp) ^ (rol32(W[j - 13], 7)) ^ W[j - 6];
97 }
98
99 for (j = 0; j < 64; j++) {
100 W_B[j] = W[j] ^ W[j + 4];
101 }
102
103 tmp = 0;
104 }
105
106 static inline void
sm3_compress_step_func(int j,volatile uint32_t * a_p,volatile uint32_t * b_p,volatile uint32_t * c_p,volatile uint32_t * d_p,volatile uint32_t * e_p,volatile uint32_t * f_p,volatile uint32_t * g_p,volatile uint32_t * h_p,volatile uint32_t W[],volatile uint32_t W_B[])107 sm3_compress_step_func(int j, volatile uint32_t *a_p, volatile uint32_t *b_p,
108 volatile uint32_t *c_p, volatile uint32_t *d_p, volatile uint32_t *e_p,
109 volatile uint32_t *f_p, volatile uint32_t *g_p, volatile uint32_t *h_p,
110 volatile uint32_t W[], volatile uint32_t W_B[])
111 {
112 volatile uint32_t SS1, SS2, TT1, TT2;
113 uint32_t T = j < 16 ? 0x79cc4519 : 0x7a879d8a;
114
115 SS1 = rol32(rol32(*a_p, 12) + *e_p + rol32(T, (j % 32)), 7);
116 SS2 = SS1 ^ rol32(*a_p, 12);
117 TT1 = sm3_ff(j, *a_p, *b_p, *c_p) + *d_p + SS2 + W_B[j];
118 TT2 = sm3_gg(j, *e_p, *f_p, *g_p) + *h_p + SS1 + W[j];
119 *d_p = *c_p;
120 *c_p = rol32(*b_p, 9);
121 *b_p = *a_p;
122 *a_p = TT1;
123 *h_p = *g_p;
124 *g_p = rol32(*f_p, 19);
125 *f_p = *e_p;
126 *e_p = P0(TT2);
127
128 SS1 = 0;
129 SS2 = 0;
130 TT1 = 0;
131 TT2 = 0;
132 }
133
134 void
_sm3_ctx_mgr_init_base(ISAL_SM3_HASH_CTX_MGR * mgr)135 _sm3_ctx_mgr_init_base(ISAL_SM3_HASH_CTX_MGR *mgr)
136 {
137 }
138
139 ISAL_SM3_HASH_CTX *
_sm3_ctx_mgr_submit_base(ISAL_SM3_HASH_CTX_MGR * mgr,ISAL_SM3_HASH_CTX * ctx,const void * buffer,uint32_t len,ISAL_HASH_CTX_FLAG flags)140 _sm3_ctx_mgr_submit_base(ISAL_SM3_HASH_CTX_MGR *mgr, ISAL_SM3_HASH_CTX *ctx, const void *buffer,
141 uint32_t len, ISAL_HASH_CTX_FLAG flags)
142 {
143
144 if (flags & (~ISAL_HASH_ENTIRE)) {
145 // User should not pass anything other than FIRST, UPDATE, or LAST
146 ctx->error = ISAL_HASH_CTX_ERROR_INVALID_FLAGS;
147 return ctx;
148 }
149
150 if ((ctx->status & ISAL_HASH_CTX_STS_PROCESSING) && (flags == ISAL_HASH_ENTIRE)) {
151 // Cannot submit a new entire job to a currently processing job.
152 ctx->error = ISAL_HASH_CTX_ERROR_ALREADY_PROCESSING;
153 return ctx;
154 }
155
156 if ((ctx->status & ISAL_HASH_CTX_STS_COMPLETE) && !(flags & ISAL_HASH_FIRST)) {
157 // Cannot update a finished job.
158 ctx->error = ISAL_HASH_CTX_ERROR_ALREADY_COMPLETED;
159 return ctx;
160 }
161
162 if (flags == ISAL_HASH_FIRST) {
163 sm3_init(ctx, buffer, len);
164 sm3_update(ctx, buffer, len);
165 }
166
167 if (flags == ISAL_HASH_UPDATE) {
168 sm3_update(ctx, buffer, len);
169 }
170
171 if (flags == ISAL_HASH_LAST) {
172 sm3_update(ctx, buffer, len);
173 sm3_final(ctx);
174 }
175
176 if (flags == ISAL_HASH_ENTIRE) {
177 sm3_init(ctx, buffer, len);
178 sm3_update(ctx, buffer, len);
179 sm3_final(ctx);
180 }
181
182 return ctx;
183 }
184
185 ISAL_SM3_HASH_CTX *
_sm3_ctx_mgr_flush_base(ISAL_SM3_HASH_CTX_MGR * mgr)186 _sm3_ctx_mgr_flush_base(ISAL_SM3_HASH_CTX_MGR *mgr)
187 {
188 return NULL;
189 }
190
191 static void
sm3_init(ISAL_SM3_HASH_CTX * ctx,const void * buffer,uint32_t len)192 sm3_init(ISAL_SM3_HASH_CTX *ctx, const void *buffer, uint32_t len)
193 {
194 // Init digest
195 hash_init_digest(ctx->job.result_digest);
196
197 // Reset byte counter
198 ctx->total_length = 0;
199
200 // Clear extra blocks
201 ctx->partial_block_buffer_length = 0;
202
203 // If we made it here, there were no errors during this call to submit
204 ctx->error = ISAL_HASH_CTX_ERROR_NONE;
205
206 // Mark it as processing
207 ctx->status = ISAL_HASH_CTX_STS_PROCESSING;
208 }
209
210 static void
sm3_update(ISAL_SM3_HASH_CTX * ctx,const void * buffer,uint32_t len)211 sm3_update(ISAL_SM3_HASH_CTX *ctx, const void *buffer, uint32_t len)
212 {
213 uint32_t remain_len = len;
214 uint32_t *digest = ctx->job.result_digest;
215
216 // Advance byte counter
217 ctx->total_length += len;
218
219 // If there is anything currently buffered in the extra blocks, append to it until it
220 // contains a whole block. Or if the user's buffer contains less than a whole block, append
221 // as much as possible to the extra block.
222 if ((ctx->partial_block_buffer_length) | (remain_len < ISAL_SM3_BLOCK_SIZE)) {
223 // Compute how many bytes to copy from user buffer into extra block
224 uint32_t copy_len = ISAL_SM3_BLOCK_SIZE - ctx->partial_block_buffer_length;
225 if (remain_len < copy_len) {
226 copy_len = remain_len;
227 }
228
229 if (copy_len) {
230 // Copy and update relevant pointers and counters
231 memcpy_fixedlen(
232 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
233 buffer, copy_len);
234
235 ctx->partial_block_buffer_length += copy_len;
236 remain_len -= copy_len;
237 buffer = (void *) ((uint8_t *) buffer + copy_len);
238 }
239 // The extra block should never contain more than 1 block here
240 assert(ctx->partial_block_buffer_length <= ISAL_SM3_BLOCK_SIZE);
241
242 // If the extra block buffer contains exactly 1 block, it can be hashed.
243 if (ctx->partial_block_buffer_length >= ISAL_SM3_BLOCK_SIZE) {
244 ctx->partial_block_buffer_length = 0;
245 sm3_single(ctx->partial_block_buffer, digest);
246 }
247 }
248 // If the extra blocks are empty, begin hashing what remains in the user's buffer.
249 if (ctx->partial_block_buffer_length == 0) {
250 while (remain_len >= ISAL_SM3_BLOCK_SIZE) {
251 sm3_single(buffer, digest);
252 buffer = (void *) ((uint8_t *) buffer + ISAL_SM3_BLOCK_SIZE);
253 remain_len -= ISAL_SM3_BLOCK_SIZE;
254 }
255 }
256
257 if (remain_len > 0) {
258 memcpy_fixedlen(&ctx->partial_block_buffer, buffer, remain_len);
259 ctx->partial_block_buffer_length = remain_len;
260 }
261
262 ctx->status = ISAL_HASH_CTX_STS_IDLE;
263 return;
264 }
265
266 static void
sm3_final(ISAL_SM3_HASH_CTX * ctx)267 sm3_final(ISAL_SM3_HASH_CTX *ctx)
268 {
269 const void *buffer = ctx->partial_block_buffer;
270 uint32_t i = ctx->partial_block_buffer_length;
271 uint8_t buf[2 * ISAL_SM3_BLOCK_SIZE];
272 uint32_t *digest = ctx->job.result_digest;
273 uint32_t j;
274
275 memcpy(buf, buffer, i);
276 buf[i++] = 0x80;
277 for (j = i; j < (2 * ISAL_SM3_BLOCK_SIZE); j++) {
278 buf[j] = 0;
279 }
280
281 if (i > ISAL_SM3_BLOCK_SIZE - ISAL_SM3_PADLENGTHFIELD_SIZE) {
282 i = 2 * ISAL_SM3_BLOCK_SIZE;
283 } else {
284 i = ISAL_SM3_BLOCK_SIZE;
285 }
286
287 *(uint64_t *) (buf + i - 8) = to_be64((uint64_t) ctx->total_length * 8);
288
289 sm3_single(buf, digest);
290 if (i == 2 * ISAL_SM3_BLOCK_SIZE) {
291 sm3_single(buf + ISAL_SM3_BLOCK_SIZE, digest);
292 }
293
294 /* convert to small-endian for words */
295 for (j = 0; j < ISAL_SM3_DIGEST_NWORDS; j++) {
296 digest[j] = byteswap32(digest[j]);
297 }
298 ctx->status = ISAL_HASH_CTX_STS_COMPLETE;
299 }
300
301 static void
sm3_single(const volatile void * data,uint32_t digest[])302 sm3_single(const volatile void *data, uint32_t digest[])
303 {
304 volatile uint32_t a, b, c, d, e, f, g, h;
305 volatile uint32_t W[68], W_bar[64];
306 int j;
307
308 a = digest[0];
309 b = digest[1];
310 c = digest[2];
311 d = digest[3];
312 e = digest[4];
313 f = digest[5];
314 g = digest[6];
315 h = digest[7];
316
317 sm3_message_schedule((uint32_t *) data, W, W_bar);
318 for (j = 0; j < 64; j++) {
319 sm3_compress_step_func(j, &a, &b, &c, &d, &e, &f, &g, &h, W, W_bar);
320 }
321
322 digest[0] ^= a;
323 digest[1] ^= b;
324 digest[2] ^= c;
325 digest[3] ^= d;
326 digest[4] ^= e;
327 digest[5] ^= f;
328 digest[6] ^= g;
329 digest[7] ^= h;
330
331 memset((void *) W, 0, sizeof(W));
332 memset((void *) W_bar, 0, sizeof(W_bar));
333
334 a = 0;
335 b = 0;
336 c = 0;
337 d = 0;
338 e = 0;
339 f = 0;
340 g = 0;
341 h = 0;
342 }
343
344 static inline void
hash_init_digest(ISAL_SM3_WORD_T * digest)345 hash_init_digest(ISAL_SM3_WORD_T *digest)
346 {
347 static const ISAL_SM3_WORD_T hash_initial_digest[ISAL_SM3_DIGEST_NWORDS] = {
348 ISAL_SM3_INITIAL_DIGEST
349 };
350 memcpy_fixedlen(digest, hash_initial_digest, sizeof(hash_initial_digest));
351 }
352
353 struct slver {
354 uint16_t snum;
355 uint8_t ver;
356 uint8_t core;
357 };
358 struct slver sm3_ctx_mgr_init_base_slver_0000;
359 struct slver sm3_ctx_mgr_init_base_slver = { 0x2303, 0x00, 0x00 };
360
361 struct slver sm3_ctx_mgr_submit_base_slver_0000;
362 struct slver sm3_ctx_mgr_submit_base_slver = { 0x2304, 0x00, 0x00 };
363
364 struct slver sm3_ctx_mgr_flush_base_slver_0000;
365 struct slver sm3_ctx_mgr_flush_base_slver = { 0x2305, 0x00, 0x00 };
366