xref: /netbsd-src/crypto/external/bsd/openssl/dist/engines/e_padlock.c (revision 4e6df137e8e14049b5a701d249962c480449c141)
1 /*
2  * Support for VIA PadLock Advanced Cryptography Engine (ACE)
3  * Written by Michal Ludvig <michal@logix.cz>
4  *            http://www.logix.cz/michal
5  *
6  * Big thanks to Andy Polyakov for a help with optimization,
7  * assembler fixes, port to MS Windows and a lot of other
8  * valuable work on this engine!
9  */
10 
11 /* ====================================================================
12  * Copyright (c) 1999-2001 The OpenSSL Project.  All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  *
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  *
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in
23  *    the documentation and/or other materials provided with the
24  *    distribution.
25  *
26  * 3. All advertising materials mentioning features or use of this
27  *    software must display the following acknowledgment:
28  *    "This product includes software developed by the OpenSSL Project
29  *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
30  *
31  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
32  *    endorse or promote products derived from this software without
33  *    prior written permission. For written permission, please contact
34  *    licensing@OpenSSL.org.
35  *
36  * 5. Products derived from this software may not be called "OpenSSL"
37  *    nor may "OpenSSL" appear in their names without prior written
38  *    permission of the OpenSSL Project.
39  *
40  * 6. Redistributions of any form whatsoever must retain the following
41  *    acknowledgment:
42  *    "This product includes software developed by the OpenSSL Project
43  *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
46  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
49  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
54  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
56  * OF THE POSSIBILITY OF SUCH DAMAGE.
57  * ====================================================================
58  *
59  * This product includes cryptographic software written by Eric Young
60  * (eay@cryptsoft.com).  This product includes software written by Tim
61  * Hudson (tjh@cryptsoft.com).
62  *
63  */
64 
65 
66 #include <stdio.h>
67 #include <string.h>
68 
69 #include <openssl/opensslconf.h>
70 #include <openssl/crypto.h>
71 #include <openssl/dso.h>
72 #include <openssl/engine.h>
73 #include <openssl/evp.h>
74 #ifndef OPENSSL_NO_AES
75 #include <openssl/aes.h>
76 #endif
77 #include <openssl/rand.h>
78 #include <openssl/err.h>
79 
80 #ifndef OPENSSL_NO_HW
81 #ifndef OPENSSL_NO_HW_PADLOCK
82 
83 /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
84 #if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
85 #  ifndef OPENSSL_NO_DYNAMIC_ENGINE
86 #    define DYNAMIC_ENGINE
87 #  endif
88 #elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
89 #  ifdef ENGINE_DYNAMIC_SUPPORT
90 #    define DYNAMIC_ENGINE
91 #  endif
92 #else
93 #  error "Only OpenSSL >= 0.9.7 is supported"
94 #endif
95 
96 /* VIA PadLock AES is available *ONLY* on some x86 CPUs.
97    Not only that it doesn't exist elsewhere, but it
98    even can't be compiled on other platforms!
99 
100    In addition, because of the heavy use of inline assembler,
101    compiler choice is limited to GCC and Microsoft C. */
102 #undef COMPILE_HW_PADLOCK
103 #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
104 # if (defined(__GNUC__) && __GNUC__>=2 && \
105 	(defined(__i386__) || defined(__i386) || \
106 	 defined(__x86_64__) || defined(__x86_64)) \
107      ) || \
108      (defined(_MSC_VER) && defined(_M_IX86))
109 #  define COMPILE_HW_PADLOCK
110 static ENGINE *ENGINE_padlock (void);
111 # endif
112 #endif
113 
114 void ENGINE_load_padlock (void)
115 {
116 /* On non-x86 CPUs it just returns. */
117 #ifdef COMPILE_HW_PADLOCK
118 	ENGINE *toadd = ENGINE_padlock ();
119 	if (!toadd) return;
120 	ENGINE_add (toadd);
121 	ENGINE_free (toadd);
122 	ERR_clear_error ();
123 #endif
124 }
125 
126 #ifdef COMPILE_HW_PADLOCK
127 /* We do these includes here to avoid header problems on platforms that
128    do not have the VIA padlock anyway... */
129 #include <stdlib.h>
130 #ifdef _WIN32
131 # include <malloc.h>
132 # ifndef alloca
133 #  define alloca _alloca
134 # endif
135 #elif defined(__GNUC__)
136 # ifndef alloca
137 #  define alloca(s) __builtin_alloca((s))
138 # endif
139 #endif
140 
141 /* Function for ENGINE detection and control */
142 static int padlock_available(void);
143 static int padlock_init(ENGINE *e);
144 
145 /* RNG Stuff */
146 static RAND_METHOD padlock_rand;
147 
148 /* Cipher Stuff */
149 #ifndef OPENSSL_NO_AES
150 static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
151 #endif
152 
153 /* Engine names */
154 static const char *padlock_id = "padlock";
155 static char padlock_name[100];
156 
157 /* Available features */
158 static int padlock_use_ace = 0;	/* Advanced Cryptography Engine */
159 static int padlock_use_rng = 0;	/* Random Number Generator */
160 #ifndef OPENSSL_NO_AES
161 static int padlock_aes_align_required = 1;
162 #endif
163 
164 /* ===== Engine "management" functions ===== */
165 
166 /* Prepare the ENGINE structure for registration */
167 static int
168 padlock_bind_helper(ENGINE *e)
169 {
170 	/* Check available features */
171 	padlock_available();
172 
173 #if 1	/* disable RNG for now, see commentary in vicinity of RNG code */
174 	padlock_use_rng=0;
175 #endif
176 
177 	/* Generate a nice engine name with available features */
178 	BIO_snprintf(padlock_name, sizeof(padlock_name),
179 		"VIA PadLock (%s, %s)",
180 		 padlock_use_rng ? "RNG" : "no-RNG",
181 		 padlock_use_ace ? "ACE" : "no-ACE");
182 
183 	/* Register everything or return with an error */
184 	if (!ENGINE_set_id(e, padlock_id) ||
185 	    !ENGINE_set_name(e, padlock_name) ||
186 
187 	    !ENGINE_set_init_function(e, padlock_init) ||
188 #ifndef OPENSSL_NO_AES
189 	    (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
190 #endif
191 	    (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
192 		return 0;
193 	}
194 
195 	/* Everything looks good */
196 	return 1;
197 }
198 
199 /* Constructor */
200 static ENGINE *
201 ENGINE_padlock(void)
202 {
203 	ENGINE *eng = ENGINE_new();
204 
205 	if (!eng) {
206 		return NULL;
207 	}
208 
209 	if (!padlock_bind_helper(eng)) {
210 		ENGINE_free(eng);
211 		return NULL;
212 	}
213 
214 	return eng;
215 }
216 
217 /* Check availability of the engine */
218 static int
219 padlock_init(ENGINE *e)
220 {
221 	return (padlock_use_rng || padlock_use_ace);
222 }
223 
224 /* This stuff is needed if this ENGINE is being compiled into a self-contained
225  * shared-library.
226  */
227 #ifdef DYNAMIC_ENGINE
228 static int
229 padlock_bind_fn(ENGINE *e, const char *id)
230 {
231 	if (id && (strcmp(id, padlock_id) != 0)) {
232 		return 0;
233 	}
234 
235 	if (!padlock_bind_helper(e))  {
236 		return 0;
237 	}
238 
239 	return 1;
240 }
241 
242 IMPLEMENT_DYNAMIC_CHECK_FN()
243 IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn)
244 #endif /* DYNAMIC_ENGINE */
245 
246 /* ===== Here comes the "real" engine ===== */
247 
248 #ifndef OPENSSL_NO_AES
249 /* Some AES-related constants */
250 #define AES_BLOCK_SIZE		16
251 #define AES_KEY_SIZE_128	16
252 #define AES_KEY_SIZE_192	24
253 #define AES_KEY_SIZE_256	32
254 
255 /* Here we store the status information relevant to the
256    current context. */
257 /* BIG FAT WARNING:
258  * 	Inline assembler in PADLOCK_XCRYPT_ASM()
259  * 	depends on the order of items in this structure.
260  * 	Don't blindly modify, reorder, etc!
261  */
262 struct padlock_cipher_data
263 {
264 	unsigned char iv[AES_BLOCK_SIZE];	/* Initialization vector */
265 	union {	unsigned int pad[4];
266 		struct {
267 			int rounds:4;
268 			int dgst:1;	/* n/a in C3 */
269 			int align:1;	/* n/a in C3 */
270 			int ciphr:1;	/* n/a in C3 */
271 			unsigned int keygen:1;
272 			int interm:1;
273 			unsigned int encdec:1;
274 			int ksize:2;
275 		} b;
276 	} cword;		/* Control word */
277 	AES_KEY ks;		/* Encryption key */
278 };
279 
280 /*
281  * Essentially this variable belongs in thread local storage.
282  * Having this variable global on the other hand can only cause
283  * few bogus key reloads [if any at all on single-CPU system],
284  * so we accept the penatly...
285  */
286 static volatile struct padlock_cipher_data *padlock_saved_context;
287 #endif
288 
289 /*
290  * =======================================================
291  * Inline assembler section(s).
292  * =======================================================
293  * Order of arguments is chosen to facilitate Windows port
294  * using __fastcall calling convention. If you wish to add
295  * more routines, keep in mind that first __fastcall
296  * argument is passed in %ecx and second - in %edx.
297  * =======================================================
298  */
299 #if defined(__GNUC__) && __GNUC__>=2
300 #if defined(__i386__) || defined(__i386)
301 /*
302  * As for excessive "push %ebx"/"pop %ebx" found all over.
303  * When generating position-independent code GCC won't let
304  * us use "b" in assembler templates nor even respect "ebx"
305  * in "clobber description." Therefore the trouble...
306  */
307 
308 /* Helper function - check if a CPUID instruction
309    is available on this CPU */
310 static int
311 padlock_insn_cpuid_available(void)
312 {
313 	int result = -1;
314 
315 	/* We're checking if the bit #21 of EFLAGS
316 	   can be toggled. If yes = CPUID is available. */
317 	asm volatile (
318 		"pushf\n"
319 		"popl %%eax\n"
320 		"xorl $0x200000, %%eax\n"
321 		"movl %%eax, %%ecx\n"
322 		"andl $0x200000, %%ecx\n"
323 		"pushl %%eax\n"
324 		"popf\n"
325 		"pushf\n"
326 		"popl %%eax\n"
327 		"andl $0x200000, %%eax\n"
328 		"xorl %%eax, %%ecx\n"
329 		"movl %%ecx, %0\n"
330 		: "=r" (result) : : "eax", "ecx");
331 
332 	return (result == 0);
333 }
334 
335 /* Load supported features of the CPU to see if
336    the PadLock is available. */
337 static int
338 padlock_available(void)
339 {
340 	char vendor_string[16];
341 	unsigned int eax, edx;
342 
343 	/* First check if the CPUID instruction is available at all... */
344 	if (! padlock_insn_cpuid_available())
345 		return 0;
346 
347 	/* Are we running on the Centaur (VIA) CPU? */
348 	eax = 0x00000000;
349 	vendor_string[12] = 0;
350 	asm volatile (
351 		"pushl	%%ebx\n"
352 		"cpuid\n"
353 		"movl	%%ebx,(%%edi)\n"
354 		"movl	%%edx,4(%%edi)\n"
355 		"movl	%%ecx,8(%%edi)\n"
356 		"popl	%%ebx"
357 		: "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
358 	if (strcmp(vendor_string, "CentaurHauls") != 0)
359 		return 0;
360 
361 	/* Check for Centaur Extended Feature Flags presence */
362 	eax = 0xC0000000;
363 	asm volatile ("pushl %%ebx; cpuid; popl	%%ebx"
364 		: "+a"(eax) : : "ecx", "edx");
365 	if (eax < 0xC0000001)
366 		return 0;
367 
368 	/* Read the Centaur Extended Feature Flags */
369 	eax = 0xC0000001;
370 	asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
371 		: "+a"(eax), "=d"(edx) : : "ecx");
372 
373 	/* Fill up some flags */
374 	padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
375 	padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
376 
377 	return padlock_use_ace + padlock_use_rng;
378 }
379 
380 /* Force key reload from memory to the CPU microcode.
381    Loading EFLAGS from the stack clears EFLAGS[30]
382    which does the trick. */
383 static inline void
384 padlock_reload_key(void)
385 {
386 	asm volatile ("pushfl; popfl");
387 }
388 
389 #ifndef OPENSSL_NO_AES
390 /*
391  * This is heuristic key context tracing. At first one
392  * believes that one should use atomic swap instructions,
393  * but it's not actually necessary. Point is that if
394  * padlock_saved_context was changed by another thread
395  * after we've read it and before we compare it with cdata,
396  * our key *shall* be reloaded upon thread context switch
397  * and we are therefore set in either case...
398  */
399 static inline void
400 padlock_verify_context(struct padlock_cipher_data *cdata)
401 {
402 	asm volatile (
403 	"pushfl\n"
404 "	btl	$30,(%%esp)\n"
405 "	jnc	1f\n"
406 "	cmpl	%2,%1\n"
407 "	je	1f\n"
408 "	popfl\n"
409 "	subl	$4,%%esp\n"
410 "1:	addl	$4,%%esp\n"
411 "	movl	%2,%0"
412 	:"+m"(padlock_saved_context)
413 	: "r"(padlock_saved_context), "r"(cdata) : "cc");
414 }
415 
416 /* Template for padlock_xcrypt_* modes */
417 /* BIG FAT WARNING:
418  * 	The offsets used with 'leal' instructions
419  * 	describe items of the 'padlock_cipher_data'
420  * 	structure.
421  */
422 #define PADLOCK_XCRYPT_ASM(name,rep_xcrypt)	\
423 static inline void *name(size_t cnt,		\
424 	struct padlock_cipher_data *cdata,	\
425 	void *out, const void *inp) 		\
426 {	void *iv; 				\
427 	asm volatile ( "pushl	%%ebx\n"	\
428 		"	leal	16(%0),%%edx\n"	\
429 		"	leal	32(%0),%%ebx\n"	\
430 			rep_xcrypt "\n"		\
431 		"	popl	%%ebx"		\
432 		: "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
433 		: "0"(cdata), "1"(cnt), "2"(out), "3"(inp)  \
434 		: "edx", "cc", "memory");	\
435 	return iv;				\
436 }
437 #endif
438 
439 #elif defined(__x86_64__) || defined(__x86_64)
440 
441 /* Load supported features of the CPU to see if
442    the PadLock is available. */
443 static int
444 padlock_available(void)
445 {
446 	char vendor_string[16];
447 	unsigned int eax, edx;
448 
449 	/* Are we running on the Centaur (VIA) CPU? */
450 	eax = 0x00000000;
451 	vendor_string[12] = 0;
452 	asm volatile (
453 		"cpuid\n"
454 		"movl	%%ebx,(%1)\n"
455 		"movl	%%edx,4(%1)\n"
456 		"movl	%%ecx,8(%1)\n"
457 		: "+a"(eax) : "r"(vendor_string) : "rbx", "rcx", "rdx");
458 	if (strcmp(vendor_string, "CentaurHauls") != 0)
459 		return 0;
460 
461 	/* Check for Centaur Extended Feature Flags presence */
462 	eax = 0xC0000000;
463 	asm volatile ("cpuid"
464 		: "+a"(eax) : : "rbx", "rcx", "rdx");
465 	if (eax < 0xC0000001)
466 		return 0;
467 
468 	/* Read the Centaur Extended Feature Flags */
469 	eax = 0xC0000001;
470 	asm volatile ("cpuid"
471 		: "+a"(eax), "=d"(edx) : : "rbx", "rcx");
472 
473 	/* Fill up some flags */
474 	padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
475 	padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
476 
477 	return padlock_use_ace + padlock_use_rng;
478 }
479 
480 /* Force key reload from memory to the CPU microcode.
481    Loading EFLAGS from the stack clears EFLAGS[30]
482    which does the trick. */
483 static inline void
484 padlock_reload_key(void)
485 {
486 	asm volatile ("pushfq; popfq");
487 }
488 
489 #ifndef OPENSSL_NO_AES
490 /*
491  * This is heuristic key context tracing. At first one
492  * believes that one should use atomic swap instructions,
493  * but it's not actually necessary. Point is that if
494  * padlock_saved_context was changed by another thread
495  * after we've read it and before we compare it with cdata,
496  * our key *shall* be reloaded upon thread context switch
497  * and we are therefore set in either case...
498  */
499 static inline void
500 padlock_verify_context(struct padlock_cipher_data *cdata)
501 {
502 	asm volatile (
503 	"pushfq\n"
504 "	btl	$30,(%%rsp)\n"
505 "	jnc	1f\n"
506 "	cmpq	%2,%1\n"
507 "	je	1f\n"
508 "	popfq\n"
509 "	subq	$8,%%rsp\n"
510 "1:	addq	$8,%%rsp\n"
511 "	movq	%2,%0"
512 	:"+m"(padlock_saved_context)
513 	: "r"(padlock_saved_context), "r"(cdata) : "cc");
514 }
515 
516 /* Template for padlock_xcrypt_* modes */
517 /* BIG FAT WARNING:
518  * 	The offsets used with 'leal' instructions
519  * 	describe items of the 'padlock_cipher_data'
520  * 	structure.
521  */
522 #define PADLOCK_XCRYPT_ASM(name,rep_xcrypt)	\
523 static inline void *name(size_t cnt,		\
524 	struct padlock_cipher_data *cdata,	\
525 	void *out, const void *inp) 		\
526 {	void *iv; 				\
527 	asm volatile ( "leaq	16(%0),%%rdx\n"	\
528 		"	leaq	32(%0),%%rbx\n"	\
529 			rep_xcrypt "\n"		\
530 		: "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
531 		: "0"(cdata), "1"(cnt), "2"(out), "3"(inp)  \
532 		: "rbx", "rdx", "cc", "memory");	\
533 	return iv;				\
534 }
535 #endif
536 
537 #endif	/* cpu */
538 
539 #ifndef OPENSSL_NO_AES
540 /* Generate all functions with appropriate opcodes */
541 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8")	/* rep xcryptecb */
542 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0")	/* rep xcryptcbc */
543 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0")	/* rep xcryptcfb */
544 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8")	/* rep xcryptofb */
545 
546 /* Our own htonl()/ntohl() */
547 static inline void
548 padlock_bswapl(AES_KEY *ks)
549 {
550 	size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
551 	unsigned int *key = ks->rd_key;
552 
553 	while (i--) {
554 		asm volatile ("bswapl %0" : "+r"(*key));
555 		key++;
556 	}
557 }
558 #endif
559 
560 /* The RNG call itself */
561 static inline unsigned int
562 padlock_xstore(void *addr, unsigned int edx_in)
563 {
564 	unsigned int eax_out;
565 
566 	asm volatile (".byte 0x0f,0xa7,0xc0"	/* xstore */
567 	    : "=a"(eax_out),"=m"(*(unsigned *)addr)
568 	    : "D"(addr), "d" (edx_in)
569 	    );
570 
571 	return eax_out;
572 }
573 
574 /* Why not inline 'rep movsd'? I failed to find information on what
575  * value in Direction Flag one can expect and consequently have to
576  * apply "better-safe-than-sorry" approach and assume "undefined."
577  * I could explicitly clear it and restore the original value upon
578  * return from padlock_aes_cipher, but it's presumably too much
579  * trouble for too little gain...
580  *
581  * In case you wonder 'rep xcrypt*' instructions above are *not*
582  * affected by the Direction Flag and pointers advance toward
583  * larger addresses unconditionally.
584  */
585 static inline unsigned char *
586 padlock_memcpy(void *dst,const void *src,size_t n)
587 {
588 	size_t       *d=dst;
589 	const size_t *s=src;
590 
591 	n /= sizeof(*d);
592 	do { *d++ = *s++; } while (--n);
593 
594 	return dst;
595 }
596 
597 #elif defined(_MSC_VER)
598 /*
599  * Unlike GCC these are real functions. In order to minimize impact
600  * on performance we adhere to __fastcall calling convention in
601  * order to get two first arguments passed through %ecx and %edx.
602  * Which kind of suits very well, as instructions in question use
603  * both %ecx and %edx as input:-)
604  */
605 #define REP_XCRYPT(code)		\
606 	_asm _emit 0xf3			\
607 	_asm _emit 0x0f _asm _emit 0xa7	\
608 	_asm _emit code
609 
610 /* BIG FAT WARNING:
611  * 	The offsets used with 'lea' instructions
612  * 	describe items of the 'padlock_cipher_data'
613  * 	structure.
614  */
615 #define PADLOCK_XCRYPT_ASM(name,code)	\
616 static void * __fastcall 		\
617 	name (size_t cnt, void *cdata,	\
618 	void *outp, const void *inp)	\
619 {	_asm	mov	eax,edx		\
620 	_asm	lea	edx,[eax+16]	\
621 	_asm	lea	ebx,[eax+32]	\
622 	_asm	mov	edi,outp	\
623 	_asm	mov	esi,inp		\
624 	REP_XCRYPT(code)		\
625 }
626 
627 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8)
628 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0)
629 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0)
630 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8)
631 
632 static int __fastcall
633 padlock_xstore(void *outp,unsigned int code)
634 {	_asm	mov	edi,ecx
635 	_asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0
636 }
637 
638 static void __fastcall
639 padlock_reload_key(void)
640 {	_asm pushfd _asm popfd		}
641 
642 static void __fastcall
643 padlock_verify_context(void *cdata)
644 {	_asm	{
645 		pushfd
646 		bt	DWORD PTR[esp],30
647 		jnc	skip
648 		cmp	ecx,padlock_saved_context
649 		je	skip
650 		popfd
651 		sub	esp,4
652 	skip:	add	esp,4
653 		mov	padlock_saved_context,ecx
654 		}
655 }
656 
657 static int
658 padlock_available(void)
659 {	_asm	{
660 		pushfd
661 		pop	eax
662 		mov	ecx,eax
663 		xor	eax,1<<21
664 		push	eax
665 		popfd
666 		pushfd
667 		pop	eax
668 		xor	eax,ecx
669 		bt	eax,21
670 		jnc	noluck
671 		mov	eax,0
672 		cpuid
673 		xor	eax,eax
674 		cmp	ebx,'tneC'
675 		jne	noluck
676 		cmp	edx,'Hrua'
677 		jne	noluck
678 		cmp	ecx,'slua'
679 		jne	noluck
680 		mov	eax,0xC0000000
681 		cpuid
682 		mov	edx,eax
683 		xor	eax,eax
684 		cmp	edx,0xC0000001
685 		jb	noluck
686 		mov	eax,0xC0000001
687 		cpuid
688 		xor	eax,eax
689 		bt	edx,6
690 		jnc	skip_a
691 		bt	edx,7
692 		jnc	skip_a
693 		mov	padlock_use_ace,1
694 		inc	eax
695 	skip_a:	bt	edx,2
696 		jnc	skip_r
697 		bt	edx,3
698 		jnc	skip_r
699 		mov	padlock_use_rng,1
700 		inc	eax
701 	skip_r:
702 	noluck:
703 		}
704 }
705 
706 static void __fastcall
707 padlock_bswapl(void *key)
708 {	_asm	{
709 		pushfd
710 		cld
711 		mov	esi,ecx
712 		mov	edi,ecx
713 		mov	ecx,60
714 	up:	lodsd
715 		bswap	eax
716 		stosd
717 		loop	up
718 		popfd
719 		}
720 }
721 
722 /* MS actually specifies status of Direction Flag and compiler even
723  * manages to compile following as 'rep movsd' all by itself...
724  */
725 #define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U))
726 #endif
727 
728 /* ===== AES encryption/decryption ===== */
729 #ifndef OPENSSL_NO_AES
730 
731 #if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
732 #define NID_aes_128_cfb	NID_aes_128_cfb128
733 #endif
734 
735 #if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
736 #define NID_aes_128_ofb	NID_aes_128_ofb128
737 #endif
738 
739 #if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
740 #define NID_aes_192_cfb	NID_aes_192_cfb128
741 #endif
742 
743 #if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
744 #define NID_aes_192_ofb	NID_aes_192_ofb128
745 #endif
746 
747 #if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
748 #define NID_aes_256_cfb	NID_aes_256_cfb128
749 #endif
750 
751 #if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
752 #define NID_aes_256_ofb	NID_aes_256_ofb128
753 #endif
754 
755 /* List of supported ciphers. */
756 static int padlock_cipher_nids[] = {
757 	NID_aes_128_ecb,
758 	NID_aes_128_cbc,
759 	NID_aes_128_cfb,
760 	NID_aes_128_ofb,
761 
762 	NID_aes_192_ecb,
763 	NID_aes_192_cbc,
764 	NID_aes_192_cfb,
765 	NID_aes_192_ofb,
766 
767 	NID_aes_256_ecb,
768 	NID_aes_256_cbc,
769 	NID_aes_256_cfb,
770 	NID_aes_256_ofb,
771 };
772 static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
773 				      sizeof(padlock_cipher_nids[0]));
774 
775 /* Function prototypes ... */
776 static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
777 				const unsigned char *iv, int enc);
778 static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
779 			      const unsigned char *in, size_t nbytes);
780 
781 #define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) +		\
782 	( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F )	)
783 #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
784 	NEAREST_ALIGNED(ctx->cipher_data))
785 
786 #define EVP_CIPHER_block_size_ECB	AES_BLOCK_SIZE
787 #define EVP_CIPHER_block_size_CBC	AES_BLOCK_SIZE
788 #define EVP_CIPHER_block_size_OFB	1
789 #define EVP_CIPHER_block_size_CFB	1
790 
791 /* Declaring so many ciphers by hand would be a pain.
792    Instead introduce a bit of preprocessor magic :-) */
793 #define	DECLARE_AES_EVP(ksize,lmode,umode)	\
794 static const EVP_CIPHER padlock_aes_##ksize##_##lmode = {	\
795 	NID_aes_##ksize##_##lmode,		\
796 	EVP_CIPHER_block_size_##umode,	\
797 	AES_KEY_SIZE_##ksize,		\
798 	AES_BLOCK_SIZE,			\
799 	0 | EVP_CIPH_##umode##_MODE,	\
800 	padlock_aes_init_key,		\
801 	padlock_aes_cipher,		\
802 	NULL,				\
803 	sizeof(struct padlock_cipher_data) + 16,	\
804 	EVP_CIPHER_set_asn1_iv,		\
805 	EVP_CIPHER_get_asn1_iv,		\
806 	NULL,				\
807 	NULL				\
808 }
809 
810 DECLARE_AES_EVP(128,ecb,ECB);
811 DECLARE_AES_EVP(128,cbc,CBC);
812 DECLARE_AES_EVP(128,cfb,CFB);
813 DECLARE_AES_EVP(128,ofb,OFB);
814 
815 DECLARE_AES_EVP(192,ecb,ECB);
816 DECLARE_AES_EVP(192,cbc,CBC);
817 DECLARE_AES_EVP(192,cfb,CFB);
818 DECLARE_AES_EVP(192,ofb,OFB);
819 
820 DECLARE_AES_EVP(256,ecb,ECB);
821 DECLARE_AES_EVP(256,cbc,CBC);
822 DECLARE_AES_EVP(256,cfb,CFB);
823 DECLARE_AES_EVP(256,ofb,OFB);
824 
825 static int
826 padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
827 {
828 	/* No specific cipher => return a list of supported nids ... */
829 	if (!cipher) {
830 		*nids = padlock_cipher_nids;
831 		return padlock_cipher_nids_num;
832 	}
833 
834 	/* ... or the requested "cipher" otherwise */
835 	switch (nid) {
836 	  case NID_aes_128_ecb:
837 	    *cipher = &padlock_aes_128_ecb;
838 	    break;
839 	  case NID_aes_128_cbc:
840 	    *cipher = &padlock_aes_128_cbc;
841 	    break;
842 	  case NID_aes_128_cfb:
843 	    *cipher = &padlock_aes_128_cfb;
844 	    break;
845 	  case NID_aes_128_ofb:
846 	    *cipher = &padlock_aes_128_ofb;
847 	    break;
848 
849 	  case NID_aes_192_ecb:
850 	    *cipher = &padlock_aes_192_ecb;
851 	    break;
852 	  case NID_aes_192_cbc:
853 	    *cipher = &padlock_aes_192_cbc;
854 	    break;
855 	  case NID_aes_192_cfb:
856 	    *cipher = &padlock_aes_192_cfb;
857 	    break;
858 	  case NID_aes_192_ofb:
859 	    *cipher = &padlock_aes_192_ofb;
860 	    break;
861 
862 	  case NID_aes_256_ecb:
863 	    *cipher = &padlock_aes_256_ecb;
864 	    break;
865 	  case NID_aes_256_cbc:
866 	    *cipher = &padlock_aes_256_cbc;
867 	    break;
868 	  case NID_aes_256_cfb:
869 	    *cipher = &padlock_aes_256_cfb;
870 	    break;
871 	  case NID_aes_256_ofb:
872 	    *cipher = &padlock_aes_256_ofb;
873 	    break;
874 
875 	  default:
876 	    /* Sorry, we don't support this NID */
877 	    *cipher = NULL;
878 	    return 0;
879 	}
880 
881 	return 1;
882 }
883 
884 /* Prepare the encryption key for PadLock usage */
885 static int
886 padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
887 		      const unsigned char *iv, int enc)
888 {
889 	struct padlock_cipher_data *cdata;
890 	int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
891 
892 	if (key==NULL) return 0;	/* ERROR */
893 
894 	cdata = ALIGNED_CIPHER_DATA(ctx);
895 	memset(cdata, 0, sizeof(struct padlock_cipher_data));
896 
897 	/* Prepare Control word. */
898 	if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE)
899 		cdata->cword.b.encdec = 0;
900 	else
901 		cdata->cword.b.encdec = (ctx->encrypt == 0);
902 	cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
903 	cdata->cword.b.ksize = (key_len - 128) / 64;
904 
905 	switch(key_len) {
906 		case 128:
907 			/* PadLock can generate an extended key for
908 			   AES128 in hardware */
909 			memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
910 			cdata->cword.b.keygen = 0;
911 			break;
912 
913 		case 192:
914 		case 256:
915 			/* Generate an extended AES key in software.
916 			   Needed for AES192/AES256 */
917 			/* Well, the above applies to Stepping 8 CPUs
918 			   and is listed as hardware errata. They most
919 			   likely will fix it at some point and then
920 			   a check for stepping would be due here. */
921 			if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
922 			    EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
923 			    enc)
924 				AES_set_encrypt_key(key, key_len, &cdata->ks);
925 			else
926 				AES_set_decrypt_key(key, key_len, &cdata->ks);
927 #ifndef AES_ASM
928 			/* OpenSSL C functions use byte-swapped extended key. */
929 			padlock_bswapl(&cdata->ks);
930 #endif
931 			cdata->cword.b.keygen = 1;
932 			break;
933 
934 		default:
935 			/* ERROR */
936 			return 0;
937 	}
938 
939 	/*
940 	 * This is done to cover for cases when user reuses the
941 	 * context for new key. The catch is that if we don't do
942 	 * this, padlock_eas_cipher might proceed with old key...
943 	 */
944 	padlock_reload_key ();
945 
946 	return 1;
947 }
948 
949 /*
950  * Simplified version of padlock_aes_cipher() used when
951  * 1) both input and output buffers are at aligned addresses.
952  * or when
953  * 2) running on a newer CPU that doesn't require aligned buffers.
954  */
955 static int
956 padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
957 		const unsigned char *in_arg, size_t nbytes)
958 {
959 	struct padlock_cipher_data *cdata;
960 	void  *iv;
961 
962 	cdata = ALIGNED_CIPHER_DATA(ctx);
963 	padlock_verify_context(cdata);
964 
965 	switch (EVP_CIPHER_CTX_mode(ctx)) {
966 	case EVP_CIPH_ECB_MODE:
967 		padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
968 		break;
969 
970 	case EVP_CIPH_CBC_MODE:
971 		memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
972 		iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
973 		memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
974 		break;
975 
976 	case EVP_CIPH_CFB_MODE:
977 		memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
978 		iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
979 		memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
980 		break;
981 
982 	case EVP_CIPH_OFB_MODE:
983 		memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
984 		padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
985 		memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
986 		break;
987 
988 	default:
989 		return 0;
990 	}
991 
992 	memset(cdata->iv, 0, AES_BLOCK_SIZE);
993 
994 	return 1;
995 }
996 
997 #ifndef  PADLOCK_CHUNK
998 # define PADLOCK_CHUNK	512	/* Must be a power of 2 larger than 16 */
999 #endif
1000 #if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
1001 # error "insane PADLOCK_CHUNK..."
1002 #endif
1003 
1004 /* Re-align the arguments to 16-Bytes boundaries and run the
1005    encryption function itself. This function is not AES-specific. */
1006 static int
1007 padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
1008 		   const unsigned char *in_arg, size_t nbytes)
1009 {
1010 	struct padlock_cipher_data *cdata;
1011 	const  void *inp;
1012 	unsigned char  *out;
1013 	void  *iv;
1014 	int    inp_misaligned, out_misaligned, realign_in_loop;
1015 	size_t chunk, allocated=0;
1016 
1017 	/* ctx->num is maintained in byte-oriented modes,
1018 	   such as CFB and OFB... */
1019 	if ((chunk = ctx->num)) { /* borrow chunk variable */
1020 		unsigned char *ivp=ctx->iv;
1021 
1022 		switch (EVP_CIPHER_CTX_mode(ctx)) {
1023 		case EVP_CIPH_CFB_MODE:
1024 			if (chunk >= AES_BLOCK_SIZE)
1025 				return 0; /* bogus value */
1026 
1027 			if (ctx->encrypt)
1028 				while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
1029 					ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
1030 					chunk++, nbytes--;
1031 				}
1032 			else	while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
1033 					unsigned char c = *(in_arg++);
1034 					*(out_arg++) = c ^ ivp[chunk];
1035 					ivp[chunk++] = c, nbytes--;
1036 				}
1037 
1038 			ctx->num = chunk%AES_BLOCK_SIZE;
1039 			break;
1040 		case EVP_CIPH_OFB_MODE:
1041 			if (chunk >= AES_BLOCK_SIZE)
1042 				return 0; /* bogus value */
1043 
1044 			while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
1045 				*(out_arg++) = *(in_arg++) ^ ivp[chunk];
1046 				chunk++, nbytes--;
1047 			}
1048 
1049 			ctx->num = chunk%AES_BLOCK_SIZE;
1050 			break;
1051 		}
1052 	}
1053 
1054 	if (nbytes == 0)
1055 		return 1;
1056 #if 0
1057 	if (nbytes % AES_BLOCK_SIZE)
1058 		return 0; /* are we expected to do tail processing? */
1059 #else
1060 	/* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC
1061 	   modes and arbitrary value in byte-oriented modes, such as
1062 	   CFB and OFB... */
1063 #endif
1064 
1065 	/* VIA promises CPUs that won't require alignment in the future.
1066 	   For now padlock_aes_align_required is initialized to 1 and
1067 	   the condition is never met... */
1068 	/* C7 core is capable to manage unaligned input in non-ECB[!]
1069 	   mode, but performance penalties appear to be approximately
1070 	   same as for software alignment below or ~3x. They promise to
1071 	   improve it in the future, but for now we can just as well
1072 	   pretend that it can only handle aligned input... */
1073 	if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0)
1074 		return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
1075 
1076 	inp_misaligned = (((size_t)in_arg) & 0x0F);
1077 	out_misaligned = (((size_t)out_arg) & 0x0F);
1078 
1079 	/* Note that even if output is aligned and input not,
1080 	 * I still prefer to loop instead of copy the whole
1081 	 * input and then encrypt in one stroke. This is done
1082 	 * in order to improve L1 cache utilization... */
1083 	realign_in_loop = out_misaligned|inp_misaligned;
1084 
1085 	if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0)
1086 		return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
1087 
1088 	/* this takes one "if" out of the loops */
1089 	chunk  = nbytes;
1090 	chunk %= PADLOCK_CHUNK;
1091 	if (chunk==0) chunk = PADLOCK_CHUNK;
1092 
1093 	if (out_misaligned) {
1094 		/* optmize for small input */
1095 		allocated = (chunk<nbytes?PADLOCK_CHUNK:nbytes);
1096 		out = alloca(0x10 + allocated);
1097 		out = NEAREST_ALIGNED(out);
1098 	}
1099 	else
1100 		out = out_arg;
1101 
1102 	cdata = ALIGNED_CIPHER_DATA(ctx);
1103 	padlock_verify_context(cdata);
1104 
1105 	switch (EVP_CIPHER_CTX_mode(ctx)) {
1106 	case EVP_CIPH_ECB_MODE:
1107 		do	{
1108 			if (inp_misaligned)
1109 				inp = padlock_memcpy(out, in_arg, chunk);
1110 			else
1111 				inp = in_arg;
1112 			in_arg += chunk;
1113 
1114 			padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
1115 
1116 			if (out_misaligned)
1117 				out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
1118 			else
1119 				out     = out_arg+=chunk;
1120 
1121 			nbytes -= chunk;
1122 			chunk   = PADLOCK_CHUNK;
1123 		} while (nbytes);
1124 		break;
1125 
1126 	case EVP_CIPH_CBC_MODE:
1127 		memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
1128 		goto cbc_shortcut;
1129 		do	{
1130 			if (iv != cdata->iv)
1131 				memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
1132 			chunk = PADLOCK_CHUNK;
1133 		cbc_shortcut: /* optimize for small input */
1134 			if (inp_misaligned)
1135 				inp = padlock_memcpy(out, in_arg, chunk);
1136 			else
1137 				inp = in_arg;
1138 			in_arg += chunk;
1139 
1140 			iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp);
1141 
1142 			if (out_misaligned)
1143 				out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
1144 			else
1145 				out     = out_arg+=chunk;
1146 
1147 		} while (nbytes -= chunk);
1148 		memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
1149 		break;
1150 
1151 	case EVP_CIPH_CFB_MODE:
1152 		memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE);
1153 		chunk &= ~(AES_BLOCK_SIZE-1);
1154 		if (chunk)	goto cfb_shortcut;
1155 		else		goto cfb_skiploop;
1156 		do	{
1157 			if (iv != cdata->iv)
1158 				memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
1159 			chunk = PADLOCK_CHUNK;
1160 		cfb_shortcut: /* optimize for small input */
1161 			if (inp_misaligned)
1162 				inp = padlock_memcpy(out, in_arg, chunk);
1163 			else
1164 				inp = in_arg;
1165 			in_arg += chunk;
1166 
1167 			iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
1168 
1169 			if (out_misaligned)
1170 				out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
1171 			else
1172 				out     = out_arg+=chunk;
1173 
1174 			nbytes -= chunk;
1175 		} while (nbytes >= AES_BLOCK_SIZE);
1176 
1177 		cfb_skiploop:
1178 		if (nbytes) {
1179 			unsigned char *ivp = cdata->iv;
1180 
1181 			if (iv != ivp) {
1182 				memcpy(ivp, iv, AES_BLOCK_SIZE);
1183 				iv = ivp;
1184 			}
1185 			ctx->num = nbytes;
1186 			if (cdata->cword.b.encdec) {
1187 				cdata->cword.b.encdec=0;
1188 				padlock_reload_key();
1189 				padlock_xcrypt_ecb(1,cdata,ivp,ivp);
1190 				cdata->cword.b.encdec=1;
1191 				padlock_reload_key();
1192 				while(nbytes) {
1193 					unsigned char c = *(in_arg++);
1194 					*(out_arg++) = c ^ *ivp;
1195 					*(ivp++) = c, nbytes--;
1196 				}
1197 			}
1198 			else {	padlock_reload_key();
1199 				padlock_xcrypt_ecb(1,cdata,ivp,ivp);
1200 				padlock_reload_key();
1201 				while (nbytes) {
1202 					*ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
1203 					ivp++, nbytes--;
1204 				}
1205 			}
1206 		}
1207 
1208 		memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
1209 		break;
1210 
1211 	case EVP_CIPH_OFB_MODE:
1212 		memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
1213 		chunk &= ~(AES_BLOCK_SIZE-1);
1214 		if (chunk) do	{
1215 			if (inp_misaligned)
1216 				inp = padlock_memcpy(out, in_arg, chunk);
1217 			else
1218 				inp = in_arg;
1219 			in_arg += chunk;
1220 
1221 			padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
1222 
1223 			if (out_misaligned)
1224 				out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
1225 			else
1226 				out     = out_arg+=chunk;
1227 
1228 			nbytes -= chunk;
1229 			chunk   = PADLOCK_CHUNK;
1230 		} while (nbytes >= AES_BLOCK_SIZE);
1231 
1232 		if (nbytes) {
1233 			unsigned char *ivp = cdata->iv;
1234 
1235 			ctx->num = nbytes;
1236 			padlock_reload_key();	/* empirically found */
1237 			padlock_xcrypt_ecb(1,cdata,ivp,ivp);
1238 			padlock_reload_key();	/* empirically found */
1239 			while (nbytes) {
1240 				*(out_arg++) = *(in_arg++) ^ *ivp;
1241 				ivp++, nbytes--;
1242 			}
1243 		}
1244 
1245 		memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
1246 		break;
1247 
1248 	default:
1249 		return 0;
1250 	}
1251 
1252 	/* Clean the realign buffer if it was used */
1253 	if (out_misaligned) {
1254 		volatile unsigned long *p=(void *)out;
1255 		size_t   n = allocated/sizeof(*p);
1256 		while (n--) *p++=0;
1257 	}
1258 
1259 	memset(cdata->iv, 0, AES_BLOCK_SIZE);
1260 
1261 	return 1;
1262 }
1263 
1264 #endif /* OPENSSL_NO_AES */
1265 
1266 /* ===== Random Number Generator ===== */
1267 /*
1268  * This code is not engaged. The reason is that it does not comply
1269  * with recommendations for VIA RNG usage for secure applications
1270  * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
1271  * provide meaningful error control...
1272  */
1273 /* Wrapper that provides an interface between the API and
1274    the raw PadLock RNG */
1275 static int
1276 padlock_rand_bytes(unsigned char *output, int count)
1277 {
1278 	unsigned int eax, buf;
1279 
1280 	while (count >= 8) {
1281 		eax = padlock_xstore(output, 0);
1282 		if (!(eax&(1<<6)))	return 0; /* RNG disabled */
1283 		/* this ---vv--- covers DC bias, Raw Bits and String Filter */
1284 		if (eax&(0x1F<<10))	return 0;
1285 		if ((eax&0x1F)==0)	continue; /* no data, retry... */
1286 		if ((eax&0x1F)!=8)	return 0; /* fatal failure...  */
1287 		output += 8;
1288 		count  -= 8;
1289 	}
1290 	while (count > 0) {
1291 		eax = padlock_xstore(&buf, 3);
1292 		if (!(eax&(1<<6)))	return 0; /* RNG disabled */
1293 		/* this ---vv--- covers DC bias, Raw Bits and String Filter */
1294 		if (eax&(0x1F<<10))	return 0;
1295 		if ((eax&0x1F)==0)	continue; /* no data, retry... */
1296 		if ((eax&0x1F)!=1)	return 0; /* fatal failure...  */
1297 		*output++ = (unsigned char)buf;
1298 		count--;
1299 	}
1300 	*(volatile unsigned int *)&buf=0;
1301 
1302 	return 1;
1303 }
1304 
1305 /* Dummy but necessary function */
1306 static int
1307 padlock_rand_status(void)
1308 {
1309 	return 1;
1310 }
1311 
1312 /* Prepare structure for registration */
1313 static RAND_METHOD padlock_rand = {
1314 	NULL,			/* seed */
1315 	padlock_rand_bytes,	/* bytes */
1316 	NULL,			/* cleanup */
1317 	NULL,			/* add */
1318 	padlock_rand_bytes,	/* pseudorand */
1319 	padlock_rand_status,	/* rand status */
1320 };
1321 
1322 #else  /* !COMPILE_HW_PADLOCK */
1323 #ifndef OPENSSL_NO_DYNAMIC_ENGINE
1324 OPENSSL_EXPORT
1325 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) { return 0; }
1326 IMPLEMENT_DYNAMIC_CHECK_FN()
1327 #endif
1328 #endif /* COMPILE_HW_PADLOCK */
1329 
1330 #endif /* !OPENSSL_NO_HW_PADLOCK */
1331 #endif /* !OPENSSL_NO_HW */
1332