10Sstevel@tonic-gate /*
2*11141Sopensolaris@drydog.com * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
30Sstevel@tonic-gate * Use is subject to license terms.
40Sstevel@tonic-gate */
50Sstevel@tonic-gate
60Sstevel@tonic-gate /*
70Sstevel@tonic-gate * The basic framework for this code came from the reference
80Sstevel@tonic-gate * implementation for MD5. That implementation is Copyright (C)
90Sstevel@tonic-gate * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
100Sstevel@tonic-gate *
110Sstevel@tonic-gate * License to copy and use this software is granted provided that it
120Sstevel@tonic-gate * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
130Sstevel@tonic-gate * Algorithm" in all material mentioning or referencing this software
140Sstevel@tonic-gate * or this function.
150Sstevel@tonic-gate *
160Sstevel@tonic-gate * License is also granted to make and use derivative works provided
170Sstevel@tonic-gate * that such works are identified as "derived from the RSA Data
180Sstevel@tonic-gate * Security, Inc. MD5 Message-Digest Algorithm" in all material
190Sstevel@tonic-gate * mentioning or referencing the derived work.
200Sstevel@tonic-gate *
210Sstevel@tonic-gate * RSA Data Security, Inc. makes no representations concerning either
220Sstevel@tonic-gate * the merchantability of this software or the suitability of this
230Sstevel@tonic-gate * software for any particular purpose. It is provided "as is"
240Sstevel@tonic-gate * without express or implied warranty of any kind.
250Sstevel@tonic-gate *
260Sstevel@tonic-gate * These notices must be retained in any copies of any part of this
270Sstevel@tonic-gate * documentation and/or software.
280Sstevel@tonic-gate *
290Sstevel@tonic-gate * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
307421SDaniel.Anderson@Sun.COM * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm
310Sstevel@tonic-gate * Not as fast as one would like -- further optimizations are encouraged
320Sstevel@tonic-gate * and appreciated.
330Sstevel@tonic-gate */
340Sstevel@tonic-gate
35*11141Sopensolaris@drydog.com #ifndef _KERNEL
36*11141Sopensolaris@drydog.com #include <stdint.h>
37*11141Sopensolaris@drydog.com #include <strings.h>
38*11141Sopensolaris@drydog.com #include <stdlib.h>
39*11141Sopensolaris@drydog.com #include <errno.h>
40*11141Sopensolaris@drydog.com #include <sys/systeminfo.h>
41*11141Sopensolaris@drydog.com #endif /* !_KERNEL */
42*11141Sopensolaris@drydog.com
430Sstevel@tonic-gate #include <sys/types.h>
440Sstevel@tonic-gate #include <sys/param.h>
450Sstevel@tonic-gate #include <sys/systm.h>
460Sstevel@tonic-gate #include <sys/sysmacros.h>
470Sstevel@tonic-gate #include <sys/sha1.h>
480Sstevel@tonic-gate #include <sys/sha1_consts.h>
490Sstevel@tonic-gate
507421SDaniel.Anderson@Sun.COM #ifdef _LITTLE_ENDIAN
517421SDaniel.Anderson@Sun.COM #include <sys/byteorder.h>
527421SDaniel.Anderson@Sun.COM #define HAVE_HTONL
537421SDaniel.Anderson@Sun.COM #endif
547421SDaniel.Anderson@Sun.COM
551694Sdarrenm static void Encode(uint8_t *, const uint32_t *, size_t);
560Sstevel@tonic-gate
571694Sdarrenm #if defined(__sparc)
581694Sdarrenm
591694Sdarrenm #define SHA1_TRANSFORM(ctx, in) \
601694Sdarrenm SHA1Transform((ctx)->state[0], (ctx)->state[1], (ctx)->state[2], \
611694Sdarrenm (ctx)->state[3], (ctx)->state[4], (ctx), (in))
621694Sdarrenm
630Sstevel@tonic-gate static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
640Sstevel@tonic-gate SHA1_CTX *, const uint8_t *);
650Sstevel@tonic-gate
666137Sda73024 #elif defined(__amd64)
676137Sda73024
686137Sda73024 #define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1)
696137Sda73024 #define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \
706137Sda73024 (in), (num))
716137Sda73024
726137Sda73024 void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks);
736137Sda73024
741694Sdarrenm #else
751694Sdarrenm
761694Sdarrenm #define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in))
771694Sdarrenm
781694Sdarrenm static void SHA1Transform(SHA1_CTX *, const uint8_t *);
791694Sdarrenm
801694Sdarrenm #endif
811694Sdarrenm
821694Sdarrenm
830Sstevel@tonic-gate static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
840Sstevel@tonic-gate
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate * F, G, and H are the basic SHA1 functions.
870Sstevel@tonic-gate */
880Sstevel@tonic-gate #define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
890Sstevel@tonic-gate #define G(b, c, d) ((b) ^ (c) ^ (d))
901694Sdarrenm #define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d)))
910Sstevel@tonic-gate
920Sstevel@tonic-gate /*
930Sstevel@tonic-gate * ROTATE_LEFT rotates x left n bits.
940Sstevel@tonic-gate */
951694Sdarrenm
961694Sdarrenm #if defined(__GNUC__) && defined(_LP64)
971694Sdarrenm static __inline__ uint64_t
ROTATE_LEFT(uint64_t value,uint32_t n)981694Sdarrenm ROTATE_LEFT(uint64_t value, uint32_t n)
991694Sdarrenm {
1001694Sdarrenm uint32_t t32;
1011694Sdarrenm
1021694Sdarrenm t32 = (uint32_t)value;
1031694Sdarrenm return ((t32 << n) | (t32 >> (32 - n)));
1041694Sdarrenm }
1051694Sdarrenm
1061694Sdarrenm #else
1071694Sdarrenm
1080Sstevel@tonic-gate #define ROTATE_LEFT(x, n) \
1090Sstevel@tonic-gate (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
1100Sstevel@tonic-gate
1111694Sdarrenm #endif
1120Sstevel@tonic-gate
1130Sstevel@tonic-gate
1140Sstevel@tonic-gate /*
1150Sstevel@tonic-gate * SHA1Init()
1160Sstevel@tonic-gate *
1170Sstevel@tonic-gate * purpose: initializes the sha1 context and begins and sha1 digest operation
1180Sstevel@tonic-gate * input: SHA1_CTX * : the context to initializes.
1190Sstevel@tonic-gate * output: void
1200Sstevel@tonic-gate */
1210Sstevel@tonic-gate
1220Sstevel@tonic-gate void
SHA1Init(SHA1_CTX * ctx)1230Sstevel@tonic-gate SHA1Init(SHA1_CTX *ctx)
1240Sstevel@tonic-gate {
1250Sstevel@tonic-gate ctx->count[0] = ctx->count[1] = 0;
1260Sstevel@tonic-gate
1270Sstevel@tonic-gate /*
1280Sstevel@tonic-gate * load magic initialization constants. Tell lint
1290Sstevel@tonic-gate * that these constants are unsigned by using U.
1300Sstevel@tonic-gate */
1310Sstevel@tonic-gate
1320Sstevel@tonic-gate ctx->state[0] = 0x67452301U;
1330Sstevel@tonic-gate ctx->state[1] = 0xefcdab89U;
1340Sstevel@tonic-gate ctx->state[2] = 0x98badcfeU;
1350Sstevel@tonic-gate ctx->state[3] = 0x10325476U;
1360Sstevel@tonic-gate ctx->state[4] = 0xc3d2e1f0U;
1370Sstevel@tonic-gate }
1380Sstevel@tonic-gate
1390Sstevel@tonic-gate #ifdef VIS_SHA1
1400Sstevel@tonic-gate #ifdef _KERNEL
1410Sstevel@tonic-gate
1420Sstevel@tonic-gate #include <sys/regset.h>
1430Sstevel@tonic-gate #include <sys/vis.h>
144943Skrishna #include <sys/fpu/fpusystm.h>
1450Sstevel@tonic-gate
1460Sstevel@tonic-gate /* the alignment for block stores to save fp registers */
1470Sstevel@tonic-gate #define VIS_ALIGN (64)
1480Sstevel@tonic-gate
1490Sstevel@tonic-gate extern int sha1_savefp(kfpu_t *, int);
1500Sstevel@tonic-gate extern void sha1_restorefp(kfpu_t *);
1510Sstevel@tonic-gate
1520Sstevel@tonic-gate uint32_t vis_sha1_svfp_threshold = 128;
1530Sstevel@tonic-gate
1540Sstevel@tonic-gate #endif /* _KERNEL */
1550Sstevel@tonic-gate
1560Sstevel@tonic-gate /*
1570Sstevel@tonic-gate * VIS SHA-1 consts.
1580Sstevel@tonic-gate */
1590Sstevel@tonic-gate static uint64_t VIS[] = {
160416Skrishna 0x8000000080000000ULL,
161416Skrishna 0x0002000200020002ULL,
162416Skrishna 0x5a8279996ed9eba1ULL,
163416Skrishna 0x8f1bbcdcca62c1d6ULL,
164416Skrishna 0x012389ab456789abULL};
1650Sstevel@tonic-gate
1661694Sdarrenm extern void SHA1TransformVIS(uint64_t *, uint32_t *, uint32_t *, uint64_t *);
1670Sstevel@tonic-gate
1680Sstevel@tonic-gate
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate * SHA1Update()
1710Sstevel@tonic-gate *
1720Sstevel@tonic-gate * purpose: continues an sha1 digest operation, using the message block
1730Sstevel@tonic-gate * to update the context.
1740Sstevel@tonic-gate * input: SHA1_CTX * : the context to update
1751694Sdarrenm * void * : the message block
1761694Sdarrenm * size_t : the length of the message block in bytes
1770Sstevel@tonic-gate * output: void
1780Sstevel@tonic-gate */
1790Sstevel@tonic-gate
1800Sstevel@tonic-gate void
SHA1Update(SHA1_CTX * ctx,const void * inptr,size_t input_len)1811694Sdarrenm SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
1820Sstevel@tonic-gate {
1830Sstevel@tonic-gate uint32_t i, buf_index, buf_len;
1840Sstevel@tonic-gate uint64_t X0[40], input64[8];
1851694Sdarrenm const uint8_t *input = inptr;
186943Skrishna #ifdef _KERNEL
187943Skrishna int usevis = 0;
1881694Sdarrenm #else
1891694Sdarrenm int usevis = 1;
190943Skrishna #endif /* _KERNEL */
1910Sstevel@tonic-gate
1920Sstevel@tonic-gate /* check for noop */
1930Sstevel@tonic-gate if (input_len == 0)
1940Sstevel@tonic-gate return;
1950Sstevel@tonic-gate
1960Sstevel@tonic-gate /* compute number of bytes mod 64 */
1970Sstevel@tonic-gate buf_index = (ctx->count[1] >> 3) & 0x3F;
1980Sstevel@tonic-gate
1990Sstevel@tonic-gate /* update number of bits */
2000Sstevel@tonic-gate if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
2010Sstevel@tonic-gate ctx->count[0]++;
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate ctx->count[0] += (input_len >> 29);
2040Sstevel@tonic-gate
2050Sstevel@tonic-gate buf_len = 64 - buf_index;
2060Sstevel@tonic-gate
2070Sstevel@tonic-gate /* transform as many times as possible */
2080Sstevel@tonic-gate i = 0;
2090Sstevel@tonic-gate if (input_len >= buf_len) {
2100Sstevel@tonic-gate #ifdef _KERNEL
2110Sstevel@tonic-gate kfpu_t *fpu;
2121694Sdarrenm if (fpu_exists) {
2131694Sdarrenm uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN];
2141694Sdarrenm uint32_t len = (input_len + buf_index) & ~0x3f;
2151694Sdarrenm int svfp_ok;
2160Sstevel@tonic-gate
2171694Sdarrenm fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64);
2181694Sdarrenm svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0);
2191694Sdarrenm usevis = fpu_exists && sha1_savefp(fpu, svfp_ok);
2201694Sdarrenm } else {
2211694Sdarrenm usevis = 0;
2221694Sdarrenm }
2230Sstevel@tonic-gate #endif /* _KERNEL */
2240Sstevel@tonic-gate
2250Sstevel@tonic-gate /*
2260Sstevel@tonic-gate * general optimization:
2270Sstevel@tonic-gate *
2280Sstevel@tonic-gate * only do initial bcopy() and SHA1Transform() if
2290Sstevel@tonic-gate * buf_index != 0. if buf_index == 0, we're just
2300Sstevel@tonic-gate * wasting our time doing the bcopy() since there
2310Sstevel@tonic-gate * wasn't any data left over from a previous call to
2320Sstevel@tonic-gate * SHA1Update().
2330Sstevel@tonic-gate */
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate if (buf_index) {
2360Sstevel@tonic-gate bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
2370Sstevel@tonic-gate if (usevis) {
2380Sstevel@tonic-gate SHA1TransformVIS(X0,
2391694Sdarrenm ctx->buf_un.buf32,
2400Sstevel@tonic-gate &ctx->state[0], VIS);
2410Sstevel@tonic-gate } else {
2421694Sdarrenm SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate i = buf_len;
2450Sstevel@tonic-gate }
2460Sstevel@tonic-gate
2470Sstevel@tonic-gate /*
2480Sstevel@tonic-gate * VIS SHA-1: uses the VIS 1.0 instructions to accelerate
2490Sstevel@tonic-gate * SHA-1 processing. This is achieved by "offloading" the
2500Sstevel@tonic-gate * computation of the message schedule (MS) to the VIS units.
2510Sstevel@tonic-gate * This allows the VIS computation of the message schedule
2520Sstevel@tonic-gate * to be performed in parallel with the standard integer
2530Sstevel@tonic-gate * processing of the remainder of the SHA-1 computation.
2540Sstevel@tonic-gate * performance by up to around 1.37X, compared to an optimized
2550Sstevel@tonic-gate * integer-only implementation.
2560Sstevel@tonic-gate *
2570Sstevel@tonic-gate * The VIS implementation of SHA1Transform has a different API
2580Sstevel@tonic-gate * to the standard integer version:
2590Sstevel@tonic-gate *
2600Sstevel@tonic-gate * void SHA1TransformVIS(
2610Sstevel@tonic-gate * uint64_t *, // Pointer to MS for ith block
2621694Sdarrenm * uint32_t *, // Pointer to ith block of message data
2630Sstevel@tonic-gate * uint32_t *, // Pointer to SHA state i.e ctx->state
2640Sstevel@tonic-gate * uint64_t *, // Pointer to various VIS constants
2650Sstevel@tonic-gate * )
2660Sstevel@tonic-gate *
2670Sstevel@tonic-gate * Note: the message data must by 4-byte aligned.
2680Sstevel@tonic-gate *
2690Sstevel@tonic-gate * Function requires VIS 1.0 support.
2700Sstevel@tonic-gate *
2710Sstevel@tonic-gate * Handling is provided to deal with arbitrary byte alingment
2720Sstevel@tonic-gate * of the input data but the performance gains are reduced
2730Sstevel@tonic-gate * for alignments other than 4-bytes.
2740Sstevel@tonic-gate */
2750Sstevel@tonic-gate if (usevis) {
2761694Sdarrenm if (!IS_P2ALIGNED(&input[i], sizeof (uint32_t))) {
2770Sstevel@tonic-gate /*
2780Sstevel@tonic-gate * Main processing loop - input misaligned
2790Sstevel@tonic-gate */
2800Sstevel@tonic-gate for (; i + 63 < input_len; i += 64) {
2816137Sda73024 bcopy(&input[i], input64, 64);
2826137Sda73024 SHA1TransformVIS(X0,
2836137Sda73024 (uint32_t *)input64,
2846137Sda73024 &ctx->state[0], VIS);
2850Sstevel@tonic-gate }
2860Sstevel@tonic-gate } else {
2870Sstevel@tonic-gate /*
2880Sstevel@tonic-gate * Main processing loop - input 8-byte aligned
2890Sstevel@tonic-gate */
2900Sstevel@tonic-gate for (; i + 63 < input_len; i += 64) {
2910Sstevel@tonic-gate SHA1TransformVIS(X0,
2927421SDaniel.Anderson@Sun.COM /* LINTED E_BAD_PTR_CAST_ALIGN */
2937421SDaniel.Anderson@Sun.COM (uint32_t *)&input[i], /* CSTYLED */
2940Sstevel@tonic-gate &ctx->state[0], VIS);
2950Sstevel@tonic-gate }
2960Sstevel@tonic-gate
2970Sstevel@tonic-gate }
2980Sstevel@tonic-gate #ifdef _KERNEL
2990Sstevel@tonic-gate sha1_restorefp(fpu);
3000Sstevel@tonic-gate #endif /* _KERNEL */
3010Sstevel@tonic-gate } else {
3020Sstevel@tonic-gate for (; i + 63 < input_len; i += 64) {
3036137Sda73024 SHA1_TRANSFORM(ctx, &input[i]);
3040Sstevel@tonic-gate }
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate /*
3080Sstevel@tonic-gate * general optimization:
3090Sstevel@tonic-gate *
3100Sstevel@tonic-gate * if i and input_len are the same, return now instead
3110Sstevel@tonic-gate * of calling bcopy(), since the bcopy() in this case
3120Sstevel@tonic-gate * will be an expensive nop.
3130Sstevel@tonic-gate */
3140Sstevel@tonic-gate
3150Sstevel@tonic-gate if (input_len == i)
3160Sstevel@tonic-gate return;
3170Sstevel@tonic-gate
3180Sstevel@tonic-gate buf_index = 0;
3190Sstevel@tonic-gate }
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate /* buffer remaining input */
3220Sstevel@tonic-gate bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
3230Sstevel@tonic-gate }
3240Sstevel@tonic-gate
3250Sstevel@tonic-gate #else /* VIS_SHA1 */
3260Sstevel@tonic-gate
3270Sstevel@tonic-gate void
SHA1Update(SHA1_CTX * ctx,const void * inptr,size_t input_len)3281694Sdarrenm SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
3290Sstevel@tonic-gate {
3300Sstevel@tonic-gate uint32_t i, buf_index, buf_len;
3311694Sdarrenm const uint8_t *input = inptr;
3326137Sda73024 #if defined(__amd64)
3336137Sda73024 uint32_t block_count;
3346137Sda73024 #endif /* __amd64 */
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate /* check for noop */
3370Sstevel@tonic-gate if (input_len == 0)
3380Sstevel@tonic-gate return;
3390Sstevel@tonic-gate
3400Sstevel@tonic-gate /* compute number of bytes mod 64 */
3410Sstevel@tonic-gate buf_index = (ctx->count[1] >> 3) & 0x3F;
3420Sstevel@tonic-gate
3430Sstevel@tonic-gate /* update number of bits */
3440Sstevel@tonic-gate if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
3450Sstevel@tonic-gate ctx->count[0]++;
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate ctx->count[0] += (input_len >> 29);
3480Sstevel@tonic-gate
3490Sstevel@tonic-gate buf_len = 64 - buf_index;
3500Sstevel@tonic-gate
3510Sstevel@tonic-gate /* transform as many times as possible */
3520Sstevel@tonic-gate i = 0;
3530Sstevel@tonic-gate if (input_len >= buf_len) {
3540Sstevel@tonic-gate
3550Sstevel@tonic-gate /*
3560Sstevel@tonic-gate * general optimization:
3570Sstevel@tonic-gate *
3580Sstevel@tonic-gate * only do initial bcopy() and SHA1Transform() if
3590Sstevel@tonic-gate * buf_index != 0. if buf_index == 0, we're just
3600Sstevel@tonic-gate * wasting our time doing the bcopy() since there
3610Sstevel@tonic-gate * wasn't any data left over from a previous call to
3620Sstevel@tonic-gate * SHA1Update().
3630Sstevel@tonic-gate */
3640Sstevel@tonic-gate
3650Sstevel@tonic-gate if (buf_index) {
3660Sstevel@tonic-gate bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
3671694Sdarrenm SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
3680Sstevel@tonic-gate i = buf_len;
3690Sstevel@tonic-gate }
3700Sstevel@tonic-gate
3716137Sda73024 #if !defined(__amd64)
3720Sstevel@tonic-gate for (; i + 63 < input_len; i += 64)
3731694Sdarrenm SHA1_TRANSFORM(ctx, &input[i]);
3746137Sda73024 #else
3756137Sda73024 block_count = (input_len - i) >> 6;
3766137Sda73024 if (block_count > 0) {
3776137Sda73024 SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count);
3786137Sda73024 i += block_count << 6;
3796137Sda73024 }
3806137Sda73024 #endif /* !__amd64 */
3810Sstevel@tonic-gate
3820Sstevel@tonic-gate /*
3830Sstevel@tonic-gate * general optimization:
3840Sstevel@tonic-gate *
3850Sstevel@tonic-gate * if i and input_len are the same, return now instead
3860Sstevel@tonic-gate * of calling bcopy(), since the bcopy() in this case
3870Sstevel@tonic-gate * will be an expensive nop.
3880Sstevel@tonic-gate */
3890Sstevel@tonic-gate
3900Sstevel@tonic-gate if (input_len == i)
3910Sstevel@tonic-gate return;
3920Sstevel@tonic-gate
3930Sstevel@tonic-gate buf_index = 0;
3940Sstevel@tonic-gate }
3950Sstevel@tonic-gate
3960Sstevel@tonic-gate /* buffer remaining input */
3970Sstevel@tonic-gate bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
3980Sstevel@tonic-gate }
3990Sstevel@tonic-gate
4000Sstevel@tonic-gate #endif /* VIS_SHA1 */
4010Sstevel@tonic-gate
4020Sstevel@tonic-gate /*
4030Sstevel@tonic-gate * SHA1Final()
4040Sstevel@tonic-gate *
4050Sstevel@tonic-gate * purpose: ends an sha1 digest operation, finalizing the message digest and
4060Sstevel@tonic-gate * zeroing the context.
4076137Sda73024 * input: uchar_t * : A buffer to store the digest.
4084002Sdarrenm * : The function actually uses void* because many
4094002Sdarrenm * : callers pass things other than uchar_t here.
4100Sstevel@tonic-gate * SHA1_CTX * : the context to finalize, save, and zero
4110Sstevel@tonic-gate * output: void
4120Sstevel@tonic-gate */
4130Sstevel@tonic-gate
4140Sstevel@tonic-gate void
SHA1Final(void * digest,SHA1_CTX * ctx)4151694Sdarrenm SHA1Final(void *digest, SHA1_CTX *ctx)
4160Sstevel@tonic-gate {
4170Sstevel@tonic-gate uint8_t bitcount_be[sizeof (ctx->count)];
4180Sstevel@tonic-gate uint32_t index = (ctx->count[1] >> 3) & 0x3f;
4190Sstevel@tonic-gate
4200Sstevel@tonic-gate /* store bit count, big endian */
4210Sstevel@tonic-gate Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
4220Sstevel@tonic-gate
4230Sstevel@tonic-gate /* pad out to 56 mod 64 */
4240Sstevel@tonic-gate SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
4250Sstevel@tonic-gate
4260Sstevel@tonic-gate /* append length (before padding) */
4270Sstevel@tonic-gate SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
4280Sstevel@tonic-gate
4290Sstevel@tonic-gate /* store state in digest */
4300Sstevel@tonic-gate Encode(digest, ctx->state, sizeof (ctx->state));
4311551Sdarrenm
4321551Sdarrenm /* zeroize sensitive information */
4331551Sdarrenm bzero(ctx, sizeof (*ctx));
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate
4366137Sda73024
4376137Sda73024 #if !defined(__amd64)
4386137Sda73024
4391694Sdarrenm typedef uint32_t sha1word;
4401694Sdarrenm
4410Sstevel@tonic-gate /*
4420Sstevel@tonic-gate * sparc optimization:
4430Sstevel@tonic-gate *
4440Sstevel@tonic-gate * on the sparc, we can load big endian 32-bit data easily. note that
4450Sstevel@tonic-gate * special care must be taken to ensure the address is 32-bit aligned.
4460Sstevel@tonic-gate * in the interest of speed, we don't check to make sure, since
4470Sstevel@tonic-gate * careful programming can guarantee this for us.
4480Sstevel@tonic-gate */
4490Sstevel@tonic-gate
4500Sstevel@tonic-gate #if defined(_BIG_ENDIAN)
4510Sstevel@tonic-gate #define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
4520Sstevel@tonic-gate
4537421SDaniel.Anderson@Sun.COM #elif defined(HAVE_HTONL)
4547421SDaniel.Anderson@Sun.COM #define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
4550Sstevel@tonic-gate
4567421SDaniel.Anderson@Sun.COM #else
4571694Sdarrenm /* little endian -- will work on big endian, but slowly */
4580Sstevel@tonic-gate #define LOAD_BIG_32(addr) \
4590Sstevel@tonic-gate (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
4607421SDaniel.Anderson@Sun.COM #endif /* _BIG_ENDIAN */
4611694Sdarrenm
4621694Sdarrenm /*
4631694Sdarrenm * SHA1Transform()
4641694Sdarrenm */
4651694Sdarrenm #if defined(W_ARRAY)
4661694Sdarrenm #define W(n) w[n]
4671694Sdarrenm #else /* !defined(W_ARRAY) */
4681694Sdarrenm #define W(n) w_ ## n
4691694Sdarrenm #endif /* !defined(W_ARRAY) */
4701694Sdarrenm
4711694Sdarrenm
4721694Sdarrenm #if defined(__sparc)
4730Sstevel@tonic-gate
4740Sstevel@tonic-gate /*
4750Sstevel@tonic-gate * sparc register window optimization:
4760Sstevel@tonic-gate *
4770Sstevel@tonic-gate * `a', `b', `c', `d', and `e' are passed into SHA1Transform
4780Sstevel@tonic-gate * explicitly since it increases the number of registers available to
4790Sstevel@tonic-gate * the compiler. under this scheme, these variables can be held in
4800Sstevel@tonic-gate * %i0 - %i4, which leaves more local and out registers available.
4810Sstevel@tonic-gate *
4820Sstevel@tonic-gate * purpose: sha1 transformation -- updates the digest based on `block'
4830Sstevel@tonic-gate * input: uint32_t : bytes 1 - 4 of the digest
4840Sstevel@tonic-gate * uint32_t : bytes 5 - 8 of the digest
4850Sstevel@tonic-gate * uint32_t : bytes 9 - 12 of the digest
4860Sstevel@tonic-gate * uint32_t : bytes 12 - 16 of the digest
4870Sstevel@tonic-gate * uint32_t : bytes 16 - 20 of the digest
4880Sstevel@tonic-gate * SHA1_CTX * : the context to update
4890Sstevel@tonic-gate * uint8_t [64]: the block to use to update the digest
4900Sstevel@tonic-gate * output: void
4910Sstevel@tonic-gate */
4920Sstevel@tonic-gate
4930Sstevel@tonic-gate void
SHA1Transform(uint32_t a,uint32_t b,uint32_t c,uint32_t d,uint32_t e,SHA1_CTX * ctx,const uint8_t blk[64])4940Sstevel@tonic-gate SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
4950Sstevel@tonic-gate SHA1_CTX *ctx, const uint8_t blk[64])
4960Sstevel@tonic-gate {
4970Sstevel@tonic-gate /*
4980Sstevel@tonic-gate * sparc optimization:
4990Sstevel@tonic-gate *
5000Sstevel@tonic-gate * while it is somewhat counter-intuitive, on sparc, it is
5010Sstevel@tonic-gate * more efficient to place all the constants used in this
5020Sstevel@tonic-gate * function in an array and load the values out of the array
5030Sstevel@tonic-gate * than to manually load the constants. this is because
5040Sstevel@tonic-gate * setting a register to a 32-bit value takes two ops in most
5050Sstevel@tonic-gate * cases: a `sethi' and an `or', but loading a 32-bit value
5060Sstevel@tonic-gate * from memory only takes one `ld' (or `lduw' on v9). while
5070Sstevel@tonic-gate * this increases memory usage, the compiler can find enough
5080Sstevel@tonic-gate * other things to do while waiting to keep the pipeline does
5090Sstevel@tonic-gate * not stall. additionally, it is likely that many of these
5100Sstevel@tonic-gate * constants are cached so that later accesses do not even go
5110Sstevel@tonic-gate * out to the bus.
5120Sstevel@tonic-gate *
5130Sstevel@tonic-gate * this array is declared `static' to keep the compiler from
5140Sstevel@tonic-gate * having to bcopy() this array onto the stack frame of
5150Sstevel@tonic-gate * SHA1Transform() each time it is called -- which is
5160Sstevel@tonic-gate * unacceptably expensive.
5170Sstevel@tonic-gate *
5180Sstevel@tonic-gate * the `const' is to ensure that callers are good citizens and
5190Sstevel@tonic-gate * do not try to munge the array. since these routines are
5200Sstevel@tonic-gate * going to be called from inside multithreaded kernelland,
5210Sstevel@tonic-gate * this is a good safety check. -- `sha1_consts' will end up in
5220Sstevel@tonic-gate * .rodata.
5230Sstevel@tonic-gate *
5240Sstevel@tonic-gate * unfortunately, loading from an array in this manner hurts
5257421SDaniel.Anderson@Sun.COM * performance under Intel. So, there is a macro,
5260Sstevel@tonic-gate * SHA1_CONST(), used in SHA1Transform(), that either expands to
5270Sstevel@tonic-gate * a reference to this array, or to the actual constant,
5280Sstevel@tonic-gate * depending on what platform this code is compiled for.
5290Sstevel@tonic-gate */
5300Sstevel@tonic-gate
5310Sstevel@tonic-gate static const uint32_t sha1_consts[] = {
5327421SDaniel.Anderson@Sun.COM SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3
5330Sstevel@tonic-gate };
5340Sstevel@tonic-gate
5350Sstevel@tonic-gate /*
5360Sstevel@tonic-gate * general optimization:
5370Sstevel@tonic-gate *
5380Sstevel@tonic-gate * use individual integers instead of using an array. this is a
5390Sstevel@tonic-gate * win, although the amount it wins by seems to vary quite a bit.
5400Sstevel@tonic-gate */
5410Sstevel@tonic-gate
5420Sstevel@tonic-gate uint32_t w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
5430Sstevel@tonic-gate uint32_t w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
5440Sstevel@tonic-gate
5450Sstevel@tonic-gate /*
5460Sstevel@tonic-gate * sparc optimization:
5470Sstevel@tonic-gate *
5480Sstevel@tonic-gate * if `block' is already aligned on a 4-byte boundary, use
5490Sstevel@tonic-gate * LOAD_BIG_32() directly. otherwise, bcopy() into a
5500Sstevel@tonic-gate * buffer that *is* aligned on a 4-byte boundary and then do
5510Sstevel@tonic-gate * the LOAD_BIG_32() on that buffer. benchmarks have shown
5520Sstevel@tonic-gate * that using the bcopy() is better than loading the bytes
5530Sstevel@tonic-gate * individually and doing the endian-swap by hand.
5540Sstevel@tonic-gate *
5550Sstevel@tonic-gate * even though it's quite tempting to assign to do:
5560Sstevel@tonic-gate *
5570Sstevel@tonic-gate * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
5580Sstevel@tonic-gate *
5590Sstevel@tonic-gate * and only have one set of LOAD_BIG_32()'s, the compiler
5600Sstevel@tonic-gate * *does not* like that, so please resist the urge.
5610Sstevel@tonic-gate */
5620Sstevel@tonic-gate
5630Sstevel@tonic-gate if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
5640Sstevel@tonic-gate bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
5650Sstevel@tonic-gate w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
5660Sstevel@tonic-gate w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
5670Sstevel@tonic-gate w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
5680Sstevel@tonic-gate w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
5690Sstevel@tonic-gate w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
5700Sstevel@tonic-gate w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
5710Sstevel@tonic-gate w_9 = LOAD_BIG_32(ctx->buf_un.buf32 + 9);
5720Sstevel@tonic-gate w_8 = LOAD_BIG_32(ctx->buf_un.buf32 + 8);
5730Sstevel@tonic-gate w_7 = LOAD_BIG_32(ctx->buf_un.buf32 + 7);
5740Sstevel@tonic-gate w_6 = LOAD_BIG_32(ctx->buf_un.buf32 + 6);
5750Sstevel@tonic-gate w_5 = LOAD_BIG_32(ctx->buf_un.buf32 + 5);
5760Sstevel@tonic-gate w_4 = LOAD_BIG_32(ctx->buf_un.buf32 + 4);
5770Sstevel@tonic-gate w_3 = LOAD_BIG_32(ctx->buf_un.buf32 + 3);
5780Sstevel@tonic-gate w_2 = LOAD_BIG_32(ctx->buf_un.buf32 + 2);
5790Sstevel@tonic-gate w_1 = LOAD_BIG_32(ctx->buf_un.buf32 + 1);
5800Sstevel@tonic-gate w_0 = LOAD_BIG_32(ctx->buf_un.buf32 + 0);
5810Sstevel@tonic-gate } else {
582*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5830Sstevel@tonic-gate w_15 = LOAD_BIG_32(blk + 60);
584*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5850Sstevel@tonic-gate w_14 = LOAD_BIG_32(blk + 56);
586*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5870Sstevel@tonic-gate w_13 = LOAD_BIG_32(blk + 52);
588*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5890Sstevel@tonic-gate w_12 = LOAD_BIG_32(blk + 48);
590*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5910Sstevel@tonic-gate w_11 = LOAD_BIG_32(blk + 44);
592*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5930Sstevel@tonic-gate w_10 = LOAD_BIG_32(blk + 40);
594*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5950Sstevel@tonic-gate w_9 = LOAD_BIG_32(blk + 36);
596*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5970Sstevel@tonic-gate w_8 = LOAD_BIG_32(blk + 32);
598*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
5990Sstevel@tonic-gate w_7 = LOAD_BIG_32(blk + 28);
600*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6010Sstevel@tonic-gate w_6 = LOAD_BIG_32(blk + 24);
602*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6030Sstevel@tonic-gate w_5 = LOAD_BIG_32(blk + 20);
604*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6050Sstevel@tonic-gate w_4 = LOAD_BIG_32(blk + 16);
606*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6070Sstevel@tonic-gate w_3 = LOAD_BIG_32(blk + 12);
608*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6090Sstevel@tonic-gate w_2 = LOAD_BIG_32(blk + 8);
610*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6110Sstevel@tonic-gate w_1 = LOAD_BIG_32(blk + 4);
612*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
6130Sstevel@tonic-gate w_0 = LOAD_BIG_32(blk + 0);
6140Sstevel@tonic-gate }
6151694Sdarrenm #else /* !defined(__sparc) */
6161694Sdarrenm
6177421SDaniel.Anderson@Sun.COM void /* CSTYLED */
6181694Sdarrenm SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64])
6191694Sdarrenm {
6207421SDaniel.Anderson@Sun.COM /* CSTYLED */
6211694Sdarrenm sha1word a = ctx->state[0];
6221694Sdarrenm sha1word b = ctx->state[1];
6231694Sdarrenm sha1word c = ctx->state[2];
6241694Sdarrenm sha1word d = ctx->state[3];
6251694Sdarrenm sha1word e = ctx->state[4];
6261694Sdarrenm
6271694Sdarrenm #if defined(W_ARRAY)
6281694Sdarrenm sha1word w[16];
6291694Sdarrenm #else /* !defined(W_ARRAY) */
6301694Sdarrenm sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
6311694Sdarrenm sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
6321694Sdarrenm #endif /* !defined(W_ARRAY) */
6331694Sdarrenm
6347434SDaniel.Anderson@Sun.COM W(0) = LOAD_BIG_32((void *)(blk + 0));
6357434SDaniel.Anderson@Sun.COM W(1) = LOAD_BIG_32((void *)(blk + 4));
6367434SDaniel.Anderson@Sun.COM W(2) = LOAD_BIG_32((void *)(blk + 8));
6377434SDaniel.Anderson@Sun.COM W(3) = LOAD_BIG_32((void *)(blk + 12));
6387434SDaniel.Anderson@Sun.COM W(4) = LOAD_BIG_32((void *)(blk + 16));
6397434SDaniel.Anderson@Sun.COM W(5) = LOAD_BIG_32((void *)(blk + 20));
6407434SDaniel.Anderson@Sun.COM W(6) = LOAD_BIG_32((void *)(blk + 24));
6417434SDaniel.Anderson@Sun.COM W(7) = LOAD_BIG_32((void *)(blk + 28));
6427434SDaniel.Anderson@Sun.COM W(8) = LOAD_BIG_32((void *)(blk + 32));
6437434SDaniel.Anderson@Sun.COM W(9) = LOAD_BIG_32((void *)(blk + 36));
6447434SDaniel.Anderson@Sun.COM W(10) = LOAD_BIG_32((void *)(blk + 40));
6457434SDaniel.Anderson@Sun.COM W(11) = LOAD_BIG_32((void *)(blk + 44));
6467434SDaniel.Anderson@Sun.COM W(12) = LOAD_BIG_32((void *)(blk + 48));
6477434SDaniel.Anderson@Sun.COM W(13) = LOAD_BIG_32((void *)(blk + 52));
6487434SDaniel.Anderson@Sun.COM W(14) = LOAD_BIG_32((void *)(blk + 56));
6497434SDaniel.Anderson@Sun.COM W(15) = LOAD_BIG_32((void *)(blk + 60));
6501694Sdarrenm
6511694Sdarrenm #endif /* !defined(__sparc) */
6521694Sdarrenm
6530Sstevel@tonic-gate /*
6540Sstevel@tonic-gate * general optimization:
6550Sstevel@tonic-gate *
6560Sstevel@tonic-gate * even though this approach is described in the standard as
6570Sstevel@tonic-gate * being slower algorithmically, it is 30-40% faster than the
6580Sstevel@tonic-gate * "faster" version under SPARC, because this version has more
6590Sstevel@tonic-gate * of the constraints specified at compile-time and uses fewer
6600Sstevel@tonic-gate * variables (and therefore has better register utilization)
6610Sstevel@tonic-gate * than its "speedier" brother. (i've tried both, trust me)
6620Sstevel@tonic-gate *
6630Sstevel@tonic-gate * for either method given in the spec, there is an "assignment"
6640Sstevel@tonic-gate * phase where the following takes place:
6650Sstevel@tonic-gate *
6660Sstevel@tonic-gate * tmp = (main_computation);
6670Sstevel@tonic-gate * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
6680Sstevel@tonic-gate *
6690Sstevel@tonic-gate * we can make the algorithm go faster by not doing this work,
6700Sstevel@tonic-gate * but just pretending that `d' is now `e', etc. this works
6710Sstevel@tonic-gate * really well and obviates the need for a temporary variable.
6726137Sda73024 * however, we still explicitly perform the rotate action,
6730Sstevel@tonic-gate * since it is cheaper on SPARC to do it once than to have to
6740Sstevel@tonic-gate * do it over and over again.
6750Sstevel@tonic-gate */
6760Sstevel@tonic-gate
6770Sstevel@tonic-gate /* round 1 */
6781694Sdarrenm e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */
6790Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
6800Sstevel@tonic-gate
6811694Sdarrenm d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */
6820Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
6830Sstevel@tonic-gate
6841694Sdarrenm c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */
6850Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
6860Sstevel@tonic-gate
6871694Sdarrenm b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */
6880Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
6890Sstevel@tonic-gate
6901694Sdarrenm a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */
6910Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
6920Sstevel@tonic-gate
6931694Sdarrenm e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */
6940Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
6950Sstevel@tonic-gate
6961694Sdarrenm d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */
6970Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
6980Sstevel@tonic-gate
6991694Sdarrenm c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */
7000Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7010Sstevel@tonic-gate
7021694Sdarrenm b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */
7030Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7040Sstevel@tonic-gate
7051694Sdarrenm a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */
7060Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
7070Sstevel@tonic-gate
7081694Sdarrenm e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */
7090Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
7100Sstevel@tonic-gate
7111694Sdarrenm d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */
7120Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
7130Sstevel@tonic-gate
7141694Sdarrenm c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */
7150Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7160Sstevel@tonic-gate
7171694Sdarrenm b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */
7180Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7190Sstevel@tonic-gate
7201694Sdarrenm a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */
7210Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
7220Sstevel@tonic-gate
7231694Sdarrenm e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */
7240Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
7250Sstevel@tonic-gate
7261694Sdarrenm W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */
7271694Sdarrenm d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0);
7280Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
7290Sstevel@tonic-gate
7301694Sdarrenm W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */
7311694Sdarrenm c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0);
7320Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7330Sstevel@tonic-gate
7341694Sdarrenm W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */
7351694Sdarrenm b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0);
7360Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7370Sstevel@tonic-gate
7381694Sdarrenm W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */
7391694Sdarrenm a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0);
7400Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
7410Sstevel@tonic-gate
7420Sstevel@tonic-gate /* round 2 */
7431694Sdarrenm W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */
7441694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1);
7450Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
7460Sstevel@tonic-gate
7471694Sdarrenm W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */
7481694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1);
7490Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
7500Sstevel@tonic-gate
7511694Sdarrenm W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */
7521694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1);
7530Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7540Sstevel@tonic-gate
7551694Sdarrenm W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */
7561694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1);
7570Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7580Sstevel@tonic-gate
7591694Sdarrenm W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */
7601694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1);
7610Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
7620Sstevel@tonic-gate
7631694Sdarrenm W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */
7641694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1);
7650Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
7660Sstevel@tonic-gate
7671694Sdarrenm W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */
7681694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1);
7690Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
7700Sstevel@tonic-gate
7711694Sdarrenm W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */
7721694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1);
7730Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7740Sstevel@tonic-gate
7751694Sdarrenm W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */
7761694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1);
7770Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7780Sstevel@tonic-gate
7791694Sdarrenm W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */
7801694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1);
7810Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
7820Sstevel@tonic-gate
7831694Sdarrenm W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */
7841694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1);
7850Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
7860Sstevel@tonic-gate
7871694Sdarrenm W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */
7881694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1);
7890Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
7900Sstevel@tonic-gate
7911694Sdarrenm W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */
7921694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1);
7930Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
7940Sstevel@tonic-gate
7951694Sdarrenm W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */
7961694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1);
7970Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
7980Sstevel@tonic-gate
7991694Sdarrenm W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */
8001694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1);
8010Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
8020Sstevel@tonic-gate
8031694Sdarrenm W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */
8041694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1);
8050Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
8060Sstevel@tonic-gate
8071694Sdarrenm W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */
8081694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1);
8090Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
8100Sstevel@tonic-gate
8111694Sdarrenm W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */
8121694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1);
8130Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
8140Sstevel@tonic-gate
8151694Sdarrenm W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */
8161694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1);
8170Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
8180Sstevel@tonic-gate
8191694Sdarrenm W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */
8201694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1);
8210Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
8220Sstevel@tonic-gate
8230Sstevel@tonic-gate /* round 3 */
8241694Sdarrenm W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */
8251694Sdarrenm e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2);
8260Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
8270Sstevel@tonic-gate
8281694Sdarrenm W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */
8291694Sdarrenm d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2);
8300Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
8310Sstevel@tonic-gate
8321694Sdarrenm W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */
8331694Sdarrenm c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2);
8340Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
8350Sstevel@tonic-gate
8361694Sdarrenm W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */
8371694Sdarrenm b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2);
8380Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
8390Sstevel@tonic-gate
8401694Sdarrenm W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */
8411694Sdarrenm a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2);
8420Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
8430Sstevel@tonic-gate
8441694Sdarrenm W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */
8451694Sdarrenm e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2);
8460Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
8470Sstevel@tonic-gate
8481694Sdarrenm W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */
8491694Sdarrenm d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2);
8500Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
8510Sstevel@tonic-gate
8521694Sdarrenm W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */
8531694Sdarrenm c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2);
8540Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
8550Sstevel@tonic-gate
8561694Sdarrenm W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */
8571694Sdarrenm b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2);
8580Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
8590Sstevel@tonic-gate
8601694Sdarrenm W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */
8611694Sdarrenm a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2);
8620Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
8630Sstevel@tonic-gate
8641694Sdarrenm W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */
8651694Sdarrenm e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2);
8660Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
8670Sstevel@tonic-gate
8681694Sdarrenm W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */
8691694Sdarrenm d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2);
8700Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
8710Sstevel@tonic-gate
8721694Sdarrenm W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */
8731694Sdarrenm c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2);
8740Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
8750Sstevel@tonic-gate
8761694Sdarrenm W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */
8771694Sdarrenm b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2);
8780Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
8790Sstevel@tonic-gate
8801694Sdarrenm W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */
8811694Sdarrenm a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2);
8820Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
8830Sstevel@tonic-gate
8841694Sdarrenm W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */
8851694Sdarrenm e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2);
8860Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
8870Sstevel@tonic-gate
8881694Sdarrenm W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */
8891694Sdarrenm d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2);
8900Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
8910Sstevel@tonic-gate
8921694Sdarrenm W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */
8931694Sdarrenm c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2);
8940Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
8950Sstevel@tonic-gate
8961694Sdarrenm W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */
8971694Sdarrenm b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2);
8980Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
8990Sstevel@tonic-gate
9001694Sdarrenm W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */
9011694Sdarrenm a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2);
9020Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
9030Sstevel@tonic-gate
9040Sstevel@tonic-gate /* round 4 */
9051694Sdarrenm W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */
9061694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3);
9070Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
9080Sstevel@tonic-gate
9091694Sdarrenm W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */
9101694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3);
9110Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
9120Sstevel@tonic-gate
9131694Sdarrenm W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */
9141694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3);
9150Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
9160Sstevel@tonic-gate
9171694Sdarrenm W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */
9181694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3);
9190Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
9200Sstevel@tonic-gate
9211694Sdarrenm W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */
9221694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3);
9230Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
9240Sstevel@tonic-gate
9251694Sdarrenm W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */
9261694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3);
9270Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
9280Sstevel@tonic-gate
9291694Sdarrenm W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */
9301694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3);
9310Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
9320Sstevel@tonic-gate
9331694Sdarrenm W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */
9341694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3);
9350Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
9360Sstevel@tonic-gate
9371694Sdarrenm W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */
9381694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3);
9390Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
9400Sstevel@tonic-gate
9411694Sdarrenm W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */
9421694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3);
9430Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
9440Sstevel@tonic-gate
9451694Sdarrenm W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */
9461694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3);
9470Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
9480Sstevel@tonic-gate
9491694Sdarrenm W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */
9501694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3);
9510Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
9520Sstevel@tonic-gate
9531694Sdarrenm W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */
9541694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3);
9550Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
9560Sstevel@tonic-gate
9571694Sdarrenm W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */
9581694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3);
9590Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
9600Sstevel@tonic-gate
9611694Sdarrenm W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */
9621694Sdarrenm a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3);
9630Sstevel@tonic-gate c = ROTATE_LEFT(c, 30);
9640Sstevel@tonic-gate
9651694Sdarrenm W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */
9661694Sdarrenm e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3);
9670Sstevel@tonic-gate b = ROTATE_LEFT(b, 30);
9680Sstevel@tonic-gate
9691694Sdarrenm W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */
9701694Sdarrenm d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3);
9710Sstevel@tonic-gate a = ROTATE_LEFT(a, 30);
9720Sstevel@tonic-gate
9731694Sdarrenm W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */
9741694Sdarrenm c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3);
9750Sstevel@tonic-gate e = ROTATE_LEFT(e, 30);
9760Sstevel@tonic-gate
9771694Sdarrenm W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */
9781694Sdarrenm b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3);
9790Sstevel@tonic-gate d = ROTATE_LEFT(d, 30);
9800Sstevel@tonic-gate
9811694Sdarrenm W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */
9820Sstevel@tonic-gate
9831694Sdarrenm ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) +
9840Sstevel@tonic-gate SHA1_CONST(3);
9850Sstevel@tonic-gate ctx->state[1] += b;
9860Sstevel@tonic-gate ctx->state[2] += ROTATE_LEFT(c, 30);
9870Sstevel@tonic-gate ctx->state[3] += d;
9880Sstevel@tonic-gate ctx->state[4] += e;
9890Sstevel@tonic-gate
9900Sstevel@tonic-gate /* zeroize sensitive information */
9911694Sdarrenm W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0;
9921694Sdarrenm W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0;
9930Sstevel@tonic-gate }
9946137Sda73024 #endif /* !__amd64 */
9956137Sda73024
9960Sstevel@tonic-gate
9970Sstevel@tonic-gate /*
9980Sstevel@tonic-gate * Encode()
9990Sstevel@tonic-gate *
10000Sstevel@tonic-gate * purpose: to convert a list of numbers from little endian to big endian
10010Sstevel@tonic-gate * input: uint8_t * : place to store the converted big endian numbers
10020Sstevel@tonic-gate * uint32_t * : place to get numbers to convert from
10030Sstevel@tonic-gate * size_t : the length of the input in bytes
10040Sstevel@tonic-gate * output: void
10050Sstevel@tonic-gate */
10060Sstevel@tonic-gate
10070Sstevel@tonic-gate static void
10081694Sdarrenm Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
10091694Sdarrenm size_t len)
10100Sstevel@tonic-gate {
10110Sstevel@tonic-gate size_t i, j;
10120Sstevel@tonic-gate
10130Sstevel@tonic-gate #if defined(__sparc)
10140Sstevel@tonic-gate if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
10150Sstevel@tonic-gate for (i = 0, j = 0; j < len; i++, j += 4) {
1016*11141Sopensolaris@drydog.com /* LINTED E_BAD_PTR_CAST_ALIGN */
10170Sstevel@tonic-gate *((uint32_t *)(output + j)) = input[i];
10180Sstevel@tonic-gate }
10190Sstevel@tonic-gate } else {
10200Sstevel@tonic-gate #endif /* little endian -- will work on big endian, but slowly */
10210Sstevel@tonic-gate for (i = 0, j = 0; j < len; i++, j += 4) {
10220Sstevel@tonic-gate output[j] = (input[i] >> 24) & 0xff;
10230Sstevel@tonic-gate output[j + 1] = (input[i] >> 16) & 0xff;
10240Sstevel@tonic-gate output[j + 2] = (input[i] >> 8) & 0xff;
10250Sstevel@tonic-gate output[j + 3] = input[i] & 0xff;
10260Sstevel@tonic-gate }
10270Sstevel@tonic-gate #if defined(__sparc)
10280Sstevel@tonic-gate }
10290Sstevel@tonic-gate #endif
10300Sstevel@tonic-gate }
1031