10Sstevel@tonic-gate /*
2*11141Sopensolaris@drydog.com * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
30Sstevel@tonic-gate * Use is subject to license terms.
40Sstevel@tonic-gate */
50Sstevel@tonic-gate
60Sstevel@tonic-gate /*
70Sstevel@tonic-gate * Cleaned-up and optimized version of MD5, based on the reference
80Sstevel@tonic-gate * implementation provided in RFC 1321. See RSA Copyright information
90Sstevel@tonic-gate * below.
100Sstevel@tonic-gate */
110Sstevel@tonic-gate
120Sstevel@tonic-gate /*
130Sstevel@tonic-gate * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
140Sstevel@tonic-gate */
150Sstevel@tonic-gate
160Sstevel@tonic-gate /*
170Sstevel@tonic-gate * Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
180Sstevel@tonic-gate * rights reserved.
190Sstevel@tonic-gate *
200Sstevel@tonic-gate * License to copy and use this software is granted provided that it
210Sstevel@tonic-gate * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
220Sstevel@tonic-gate * Algorithm" in all material mentioning or referencing this software
230Sstevel@tonic-gate * or this function.
240Sstevel@tonic-gate *
250Sstevel@tonic-gate * License is also granted to make and use derivative works provided
260Sstevel@tonic-gate * that such works are identified as "derived from the RSA Data
270Sstevel@tonic-gate * Security, Inc. MD5 Message-Digest Algorithm" in all material
280Sstevel@tonic-gate * mentioning or referencing the derived work.
290Sstevel@tonic-gate *
300Sstevel@tonic-gate * RSA Data Security, Inc. makes no representations concerning either
310Sstevel@tonic-gate * the merchantability of this software or the suitability of this
320Sstevel@tonic-gate * software for any particular purpose. It is provided "as is"
330Sstevel@tonic-gate * without express or implied warranty of any kind.
340Sstevel@tonic-gate *
350Sstevel@tonic-gate * These notices must be retained in any copies of any part of this
360Sstevel@tonic-gate * documentation and/or software.
370Sstevel@tonic-gate */
380Sstevel@tonic-gate
39*11141Sopensolaris@drydog.com #ifndef _KERNEL
40*11141Sopensolaris@drydog.com #include <stdint.h>
41*11141Sopensolaris@drydog.com #endif /* _KERNEL */
42*11141Sopensolaris@drydog.com
430Sstevel@tonic-gate #include <sys/types.h>
440Sstevel@tonic-gate #include <sys/md5.h>
450Sstevel@tonic-gate #include <sys/md5_consts.h> /* MD5_CONST() optimization */
461015Swesolows #include "md5_byteswap.h"
470Sstevel@tonic-gate #if !defined(_KERNEL) || defined(_BOOT)
480Sstevel@tonic-gate #include <strings.h>
490Sstevel@tonic-gate #endif /* !_KERNEL || _BOOT */
500Sstevel@tonic-gate
511694Sdarrenm #ifdef _KERNEL
520Sstevel@tonic-gate #include <sys/systm.h>
531694Sdarrenm #endif /* _KERNEL */
540Sstevel@tonic-gate
551694Sdarrenm static void Encode(uint8_t *, const uint32_t *, size_t);
565764Sda73024
575764Sda73024 #if !defined(__amd64)
580Sstevel@tonic-gate static void MD5Transform(uint32_t, uint32_t, uint32_t, uint32_t, MD5_CTX *,
590Sstevel@tonic-gate const uint8_t [64]);
605764Sda73024 #else
615764Sda73024 void md5_block_asm_host_order(MD5_CTX *ctx, const void *inpp,
625764Sda73024 unsigned int input_length_in_blocks);
635764Sda73024 #endif /* !defined(__amd64) */
640Sstevel@tonic-gate
650Sstevel@tonic-gate static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
660Sstevel@tonic-gate
670Sstevel@tonic-gate /*
680Sstevel@tonic-gate * F, G, H and I are the basic MD5 functions.
690Sstevel@tonic-gate */
700Sstevel@tonic-gate #define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
710Sstevel@tonic-gate #define G(b, c, d) (((b) & (d)) | ((c) & (~d)))
720Sstevel@tonic-gate #define H(b, c, d) ((b) ^ (c) ^ (d))
730Sstevel@tonic-gate #define I(b, c, d) ((c) ^ ((b) | (~d)))
740Sstevel@tonic-gate
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate * ROTATE_LEFT rotates x left n bits.
770Sstevel@tonic-gate */
780Sstevel@tonic-gate #define ROTATE_LEFT(x, n) \
790Sstevel@tonic-gate (((x) << (n)) | ((x) >> ((sizeof (x) << 3) - (n))))
800Sstevel@tonic-gate
810Sstevel@tonic-gate /*
820Sstevel@tonic-gate * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
830Sstevel@tonic-gate * Rotation is separate from addition to prevent recomputation.
840Sstevel@tonic-gate */
850Sstevel@tonic-gate
860Sstevel@tonic-gate #define FF(a, b, c, d, x, s, ac) { \
87227Skais (a) += F((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
880Sstevel@tonic-gate (a) = ROTATE_LEFT((a), (s)); \
890Sstevel@tonic-gate (a) += (b); \
900Sstevel@tonic-gate }
910Sstevel@tonic-gate
920Sstevel@tonic-gate #define GG(a, b, c, d, x, s, ac) { \
93227Skais (a) += G((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
940Sstevel@tonic-gate (a) = ROTATE_LEFT((a), (s)); \
950Sstevel@tonic-gate (a) += (b); \
960Sstevel@tonic-gate }
970Sstevel@tonic-gate
980Sstevel@tonic-gate #define HH(a, b, c, d, x, s, ac) { \
99227Skais (a) += H((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
1000Sstevel@tonic-gate (a) = ROTATE_LEFT((a), (s)); \
1010Sstevel@tonic-gate (a) += (b); \
1020Sstevel@tonic-gate }
1030Sstevel@tonic-gate
1040Sstevel@tonic-gate #define II(a, b, c, d, x, s, ac) { \
105227Skais (a) += I((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
1060Sstevel@tonic-gate (a) = ROTATE_LEFT((a), (s)); \
1070Sstevel@tonic-gate (a) += (b); \
1080Sstevel@tonic-gate }
1090Sstevel@tonic-gate
1100Sstevel@tonic-gate /*
1110Sstevel@tonic-gate * Loading 32-bit constants on a RISC is expensive since it involves both a
1120Sstevel@tonic-gate * `sethi' and an `or'. thus, we instead have the compiler generate `ld's to
1130Sstevel@tonic-gate * load the constants from an array called `md5_consts'. however, on intel
1140Sstevel@tonic-gate * (and other CISC processors), it is cheaper to load the constant
1150Sstevel@tonic-gate * directly. thus, the c code in MD5Transform() uses the macro MD5_CONST()
1160Sstevel@tonic-gate * which either expands to a constant or an array reference, depending on the
1170Sstevel@tonic-gate * architecture the code is being compiled for.
1180Sstevel@tonic-gate *
1190Sstevel@tonic-gate * Right now, i386 and amd64 are the CISC exceptions.
1200Sstevel@tonic-gate * If we get another CISC ISA, we'll have to change the ifdef.
1210Sstevel@tonic-gate */
1220Sstevel@tonic-gate
1230Sstevel@tonic-gate #if defined(__i386) || defined(__amd64)
1240Sstevel@tonic-gate
1250Sstevel@tonic-gate #define MD5_CONST(x) (MD5_CONST_ ## x)
126227Skais #define MD5_CONST_e(x) MD5_CONST(x)
127227Skais #define MD5_CONST_o(x) MD5_CONST(x)
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate #else
1300Sstevel@tonic-gate /*
1310Sstevel@tonic-gate * sparc/RISC optimization:
1320Sstevel@tonic-gate *
1330Sstevel@tonic-gate * while it is somewhat counter-intuitive, on sparc (and presumably other RISC
1340Sstevel@tonic-gate * machines), it is more efficient to place all the constants used in this
1350Sstevel@tonic-gate * function in an array and load the values out of the array than to manually
1360Sstevel@tonic-gate * load the constants. this is because setting a register to a 32-bit value
1370Sstevel@tonic-gate * takes two ops in most cases: a `sethi' and an `or', but loading a 32-bit
1380Sstevel@tonic-gate * value from memory only takes one `ld' (or `lduw' on v9). while this
1390Sstevel@tonic-gate * increases memory usage, the compiler can find enough other things to do
1400Sstevel@tonic-gate * while waiting to keep the pipeline does not stall. additionally, it is
1410Sstevel@tonic-gate * likely that many of these constants are cached so that later accesses do
1420Sstevel@tonic-gate * not even go out to the bus.
1430Sstevel@tonic-gate *
1440Sstevel@tonic-gate * this array is declared `static' to keep the compiler from having to
1450Sstevel@tonic-gate * bcopy() this array onto the stack frame of MD5Transform() each time it is
1460Sstevel@tonic-gate * called -- which is unacceptably expensive.
1470Sstevel@tonic-gate *
1480Sstevel@tonic-gate * the `const' is to ensure that callers are good citizens and do not try to
1490Sstevel@tonic-gate * munge the array. since these routines are going to be called from inside
1500Sstevel@tonic-gate * multithreaded kernelland, this is a good safety check. -- `constants' will
1510Sstevel@tonic-gate * end up in .rodata.
1520Sstevel@tonic-gate *
1530Sstevel@tonic-gate * unfortunately, loading from an array in this manner hurts performance under
1540Sstevel@tonic-gate * intel (and presumably other CISC machines). so, there is a macro,
1550Sstevel@tonic-gate * MD5_CONST(), used in MD5Transform(), that either expands to a reference to
1560Sstevel@tonic-gate * this array, or to the actual constant, depending on what platform this code
1570Sstevel@tonic-gate * is compiled for.
1580Sstevel@tonic-gate */
1590Sstevel@tonic-gate
160227Skais #ifdef sun4v
161227Skais
162227Skais /*
163227Skais * Going to load these consts in 8B chunks, so need to enforce 8B alignment
164227Skais */
165227Skais
166227Skais /* CSTYLED */
167227Skais #pragma align 64 (md5_consts)
1682782Sfr80241 #define _MD5_CHECK_ALIGNMENT
169227Skais
170227Skais #endif /* sun4v */
171227Skais
1720Sstevel@tonic-gate static const uint32_t md5_consts[] = {
1730Sstevel@tonic-gate MD5_CONST_0, MD5_CONST_1, MD5_CONST_2, MD5_CONST_3,
1740Sstevel@tonic-gate MD5_CONST_4, MD5_CONST_5, MD5_CONST_6, MD5_CONST_7,
1750Sstevel@tonic-gate MD5_CONST_8, MD5_CONST_9, MD5_CONST_10, MD5_CONST_11,
1760Sstevel@tonic-gate MD5_CONST_12, MD5_CONST_13, MD5_CONST_14, MD5_CONST_15,
1770Sstevel@tonic-gate MD5_CONST_16, MD5_CONST_17, MD5_CONST_18, MD5_CONST_19,
1780Sstevel@tonic-gate MD5_CONST_20, MD5_CONST_21, MD5_CONST_22, MD5_CONST_23,
1790Sstevel@tonic-gate MD5_CONST_24, MD5_CONST_25, MD5_CONST_26, MD5_CONST_27,
1800Sstevel@tonic-gate MD5_CONST_28, MD5_CONST_29, MD5_CONST_30, MD5_CONST_31,
1810Sstevel@tonic-gate MD5_CONST_32, MD5_CONST_33, MD5_CONST_34, MD5_CONST_35,
1820Sstevel@tonic-gate MD5_CONST_36, MD5_CONST_37, MD5_CONST_38, MD5_CONST_39,
1830Sstevel@tonic-gate MD5_CONST_40, MD5_CONST_41, MD5_CONST_42, MD5_CONST_43,
1840Sstevel@tonic-gate MD5_CONST_44, MD5_CONST_45, MD5_CONST_46, MD5_CONST_47,
1850Sstevel@tonic-gate MD5_CONST_48, MD5_CONST_49, MD5_CONST_50, MD5_CONST_51,
1860Sstevel@tonic-gate MD5_CONST_52, MD5_CONST_53, MD5_CONST_54, MD5_CONST_55,
1870Sstevel@tonic-gate MD5_CONST_56, MD5_CONST_57, MD5_CONST_58, MD5_CONST_59,
1880Sstevel@tonic-gate MD5_CONST_60, MD5_CONST_61, MD5_CONST_62, MD5_CONST_63
1890Sstevel@tonic-gate };
1900Sstevel@tonic-gate
191227Skais
192227Skais #ifdef sun4v
193227Skais /*
194227Skais * To reduce the number of loads, load consts in 64-bit
195227Skais * chunks and then split.
196227Skais *
197227Skais * No need to mask upper 32-bits, as just interested in
198227Skais * low 32-bits (saves an & operation and means that this
199227Skais * optimization doesn't increases the icount.
200227Skais */
201227Skais #define MD5_CONST_e(x) (md5_consts64[x/2] >> 32)
202227Skais #define MD5_CONST_o(x) (md5_consts64[x/2])
203227Skais
204227Skais #else
205227Skais
206227Skais #define MD5_CONST_e(x) (md5_consts[x])
207227Skais #define MD5_CONST_o(x) (md5_consts[x])
208227Skais
209227Skais #endif /* sun4v */
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate #endif
2120Sstevel@tonic-gate
2130Sstevel@tonic-gate /*
2140Sstevel@tonic-gate * MD5Init()
2150Sstevel@tonic-gate *
2160Sstevel@tonic-gate * purpose: initializes the md5 context and begins and md5 digest operation
2170Sstevel@tonic-gate * input: MD5_CTX * : the context to initialize.
2180Sstevel@tonic-gate * output: void
2190Sstevel@tonic-gate */
2200Sstevel@tonic-gate
2210Sstevel@tonic-gate void
MD5Init(MD5_CTX * ctx)2220Sstevel@tonic-gate MD5Init(MD5_CTX *ctx)
2230Sstevel@tonic-gate {
2240Sstevel@tonic-gate ctx->count[0] = ctx->count[1] = 0;
2250Sstevel@tonic-gate
2260Sstevel@tonic-gate /* load magic initialization constants */
2270Sstevel@tonic-gate ctx->state[0] = MD5_INIT_CONST_1;
2280Sstevel@tonic-gate ctx->state[1] = MD5_INIT_CONST_2;
2290Sstevel@tonic-gate ctx->state[2] = MD5_INIT_CONST_3;
2300Sstevel@tonic-gate ctx->state[3] = MD5_INIT_CONST_4;
2310Sstevel@tonic-gate }
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate /*
2340Sstevel@tonic-gate * MD5Update()
2350Sstevel@tonic-gate *
2360Sstevel@tonic-gate * purpose: continues an md5 digest operation, using the message block
2370Sstevel@tonic-gate * to update the context.
2380Sstevel@tonic-gate * input: MD5_CTX * : the context to update
2390Sstevel@tonic-gate * uint8_t * : the message block
2400Sstevel@tonic-gate * uint32_t : the length of the message block in bytes
2410Sstevel@tonic-gate * output: void
2420Sstevel@tonic-gate *
2430Sstevel@tonic-gate * MD5 crunches in 64-byte blocks. All numeric constants here are related to
2440Sstevel@tonic-gate * that property of MD5.
2450Sstevel@tonic-gate */
2460Sstevel@tonic-gate
2470Sstevel@tonic-gate void
MD5Update(MD5_CTX * ctx,const void * inpp,unsigned int input_len)2480Sstevel@tonic-gate MD5Update(MD5_CTX *ctx, const void *inpp, unsigned int input_len)
2490Sstevel@tonic-gate {
2500Sstevel@tonic-gate uint32_t i, buf_index, buf_len;
251227Skais #ifdef sun4v
252227Skais uint32_t old_asi;
253227Skais #endif /* sun4v */
2545764Sda73024 #if defined(__amd64)
2555764Sda73024 uint32_t block_count;
2565764Sda73024 #endif /* !defined(__amd64) */
2570Sstevel@tonic-gate const unsigned char *input = (const unsigned char *)inpp;
2580Sstevel@tonic-gate
2590Sstevel@tonic-gate /* compute (number of bytes computed so far) mod 64 */
2600Sstevel@tonic-gate buf_index = (ctx->count[0] >> 3) & 0x3F;
2610Sstevel@tonic-gate
2620Sstevel@tonic-gate /* update number of bits hashed into this MD5 computation so far */
2630Sstevel@tonic-gate if ((ctx->count[0] += (input_len << 3)) < (input_len << 3))
2645764Sda73024 ctx->count[1]++;
2650Sstevel@tonic-gate ctx->count[1] += (input_len >> 29);
2660Sstevel@tonic-gate
2670Sstevel@tonic-gate buf_len = 64 - buf_index;
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate /* transform as many times as possible */
2700Sstevel@tonic-gate i = 0;
2710Sstevel@tonic-gate if (input_len >= buf_len) {
2720Sstevel@tonic-gate
2730Sstevel@tonic-gate /*
2740Sstevel@tonic-gate * general optimization:
2750Sstevel@tonic-gate *
2760Sstevel@tonic-gate * only do initial bcopy() and MD5Transform() if
2770Sstevel@tonic-gate * buf_index != 0. if buf_index == 0, we're just
2780Sstevel@tonic-gate * wasting our time doing the bcopy() since there
2790Sstevel@tonic-gate * wasn't any data left over from a previous call to
2800Sstevel@tonic-gate * MD5Update().
2810Sstevel@tonic-gate */
2820Sstevel@tonic-gate
283227Skais #ifdef sun4v
284227Skais /*
285227Skais * For N1 use %asi register. However, costly to repeatedly set
286227Skais * in MD5Transform. Therefore, set once here.
287227Skais * Should probably restore the old value afterwards...
288227Skais */
289227Skais old_asi = get_little();
290227Skais set_little(0x88);
291227Skais #endif /* sun4v */
292227Skais
2930Sstevel@tonic-gate if (buf_index) {
2940Sstevel@tonic-gate bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
2950Sstevel@tonic-gate
2965764Sda73024 #if !defined(__amd64)
2970Sstevel@tonic-gate MD5Transform(ctx->state[0], ctx->state[1],
2980Sstevel@tonic-gate ctx->state[2], ctx->state[3], ctx,
2990Sstevel@tonic-gate ctx->buf_un.buf8);
3005764Sda73024 #else
3015764Sda73024 md5_block_asm_host_order(ctx, ctx->buf_un.buf8, 1);
3025764Sda73024 #endif /* !defined(__amd64) */
3030Sstevel@tonic-gate
3040Sstevel@tonic-gate i = buf_len;
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate
3075764Sda73024 #if !defined(__amd64)
3080Sstevel@tonic-gate for (; i + 63 < input_len; i += 64)
3090Sstevel@tonic-gate MD5Transform(ctx->state[0], ctx->state[1],
3100Sstevel@tonic-gate ctx->state[2], ctx->state[3], ctx, &input[i]);
3110Sstevel@tonic-gate
3125764Sda73024 #else
3135764Sda73024 block_count = (input_len - i) >> 6;
3145764Sda73024 if (block_count > 0) {
3155764Sda73024 md5_block_asm_host_order(ctx, &input[i], block_count);
3165764Sda73024 i += block_count << 6;
3175764Sda73024 }
3185764Sda73024 #endif /* !defined(__amd64) */
3195764Sda73024
320227Skais
321227Skais #ifdef sun4v
322227Skais /*
323227Skais * Restore old %ASI value
324227Skais */
325227Skais set_little(old_asi);
326227Skais #endif /* sun4v */
327227Skais
3280Sstevel@tonic-gate /*
3290Sstevel@tonic-gate * general optimization:
3300Sstevel@tonic-gate *
3310Sstevel@tonic-gate * if i and input_len are the same, return now instead
3320Sstevel@tonic-gate * of calling bcopy(), since the bcopy() in this
3330Sstevel@tonic-gate * case will be an expensive nop.
3340Sstevel@tonic-gate */
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate if (input_len == i)
3370Sstevel@tonic-gate return;
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate buf_index = 0;
3400Sstevel@tonic-gate }
3410Sstevel@tonic-gate
3420Sstevel@tonic-gate /* buffer remaining input */
3430Sstevel@tonic-gate bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
3440Sstevel@tonic-gate }
3450Sstevel@tonic-gate
3460Sstevel@tonic-gate /*
3470Sstevel@tonic-gate * MD5Final()
3480Sstevel@tonic-gate *
3490Sstevel@tonic-gate * purpose: ends an md5 digest operation, finalizing the message digest and
3500Sstevel@tonic-gate * zeroing the context.
3514002Sdarrenm * input: uchar_t * : a buffer to store the digest in
3524002Sdarrenm * : The function actually uses void* because many
3534002Sdarrenm * : callers pass things other than uchar_t here.
3540Sstevel@tonic-gate * MD5_CTX * : the context to finalize, save, and zero
3550Sstevel@tonic-gate * output: void
3560Sstevel@tonic-gate */
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate void
MD5Final(void * digest,MD5_CTX * ctx)3594002Sdarrenm MD5Final(void *digest, MD5_CTX *ctx)
3600Sstevel@tonic-gate {
3610Sstevel@tonic-gate uint8_t bitcount_le[sizeof (ctx->count)];
3620Sstevel@tonic-gate uint32_t index = (ctx->count[0] >> 3) & 0x3f;
3630Sstevel@tonic-gate
3640Sstevel@tonic-gate /* store bit count, little endian */
3650Sstevel@tonic-gate Encode(bitcount_le, ctx->count, sizeof (bitcount_le));
3660Sstevel@tonic-gate
3670Sstevel@tonic-gate /* pad out to 56 mod 64 */
3680Sstevel@tonic-gate MD5Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate /* append length (before padding) */
3710Sstevel@tonic-gate MD5Update(ctx, bitcount_le, sizeof (bitcount_le));
3720Sstevel@tonic-gate
3730Sstevel@tonic-gate /* store state in digest */
3740Sstevel@tonic-gate Encode(digest, ctx->state, sizeof (ctx->state));
3751551Sdarrenm
3761551Sdarrenm /* zeroize sensitive information */
3771551Sdarrenm bzero(ctx, sizeof (*ctx));
3780Sstevel@tonic-gate }
3790Sstevel@tonic-gate
3800Sstevel@tonic-gate #ifndef _KERNEL
3810Sstevel@tonic-gate
3820Sstevel@tonic-gate void
md5_calc(unsigned char * output,unsigned char * input,unsigned int inlen)3830Sstevel@tonic-gate md5_calc(unsigned char *output, unsigned char *input, unsigned int inlen)
3840Sstevel@tonic-gate {
3850Sstevel@tonic-gate MD5_CTX context;
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate MD5Init(&context);
3880Sstevel@tonic-gate MD5Update(&context, input, inlen);
3890Sstevel@tonic-gate MD5Final(output, &context);
3900Sstevel@tonic-gate }
3910Sstevel@tonic-gate
3920Sstevel@tonic-gate #endif /* !_KERNEL */
3930Sstevel@tonic-gate
3945764Sda73024 #if !defined(__amd64)
3950Sstevel@tonic-gate /*
3960Sstevel@tonic-gate * sparc register window optimization:
3970Sstevel@tonic-gate *
3980Sstevel@tonic-gate * `a', `b', `c', and `d' are passed into MD5Transform explicitly
3990Sstevel@tonic-gate * since it increases the number of registers available to the
4000Sstevel@tonic-gate * compiler. under this scheme, these variables can be held in
4010Sstevel@tonic-gate * %i0 - %i3, which leaves more local and out registers available.
4020Sstevel@tonic-gate */
4030Sstevel@tonic-gate
4040Sstevel@tonic-gate /*
4050Sstevel@tonic-gate * MD5Transform()
4060Sstevel@tonic-gate *
4070Sstevel@tonic-gate * purpose: md5 transformation -- updates the digest based on `block'
4080Sstevel@tonic-gate * input: uint32_t : bytes 1 - 4 of the digest
4090Sstevel@tonic-gate * uint32_t : bytes 5 - 8 of the digest
4100Sstevel@tonic-gate * uint32_t : bytes 9 - 12 of the digest
4110Sstevel@tonic-gate * uint32_t : bytes 12 - 16 of the digest
4120Sstevel@tonic-gate * MD5_CTX * : the context to update
4130Sstevel@tonic-gate * uint8_t [64]: the block to use to update the digest
4140Sstevel@tonic-gate * output: void
4150Sstevel@tonic-gate */
4160Sstevel@tonic-gate
4170Sstevel@tonic-gate static void
MD5Transform(uint32_t a,uint32_t b,uint32_t c,uint32_t d,MD5_CTX * ctx,const uint8_t block[64])4180Sstevel@tonic-gate MD5Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
4190Sstevel@tonic-gate MD5_CTX *ctx, const uint8_t block[64])
4200Sstevel@tonic-gate {
4210Sstevel@tonic-gate /*
4220Sstevel@tonic-gate * general optimization:
4230Sstevel@tonic-gate *
4240Sstevel@tonic-gate * use individual integers instead of using an array. this is a
4250Sstevel@tonic-gate * win, although the amount it wins by seems to vary quite a bit.
4260Sstevel@tonic-gate */
4270Sstevel@tonic-gate
4280Sstevel@tonic-gate register uint32_t x_0, x_1, x_2, x_3, x_4, x_5, x_6, x_7;
4290Sstevel@tonic-gate register uint32_t x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15;
430227Skais #ifdef sun4v
431227Skais unsigned long long *md5_consts64;
432227Skais
4331694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
434227Skais md5_consts64 = (unsigned long long *) md5_consts;
435227Skais #endif /* sun4v */
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate /*
4380Sstevel@tonic-gate * general optimization:
4390Sstevel@tonic-gate *
4400Sstevel@tonic-gate * the compiler (at least SC4.2/5.x) generates better code if
4410Sstevel@tonic-gate * variable use is localized. in this case, swapping the integers in
4420Sstevel@tonic-gate * this order allows `x_0 'to be swapped nearest to its first use in
4430Sstevel@tonic-gate * FF(), and likewise for `x_1' and up. note that the compiler
4440Sstevel@tonic-gate * prefers this to doing each swap right before the FF() that
4450Sstevel@tonic-gate * uses it.
4460Sstevel@tonic-gate */
4470Sstevel@tonic-gate
4480Sstevel@tonic-gate /*
4490Sstevel@tonic-gate * sparc v9/v8plus optimization:
4500Sstevel@tonic-gate *
4510Sstevel@tonic-gate * if `block' is already aligned on a 4-byte boundary, use the
4520Sstevel@tonic-gate * optimized load_little_32() directly. otherwise, bcopy()
4530Sstevel@tonic-gate * into a buffer that *is* aligned on a 4-byte boundary and
4540Sstevel@tonic-gate * then do the load_little_32() on that buffer. benchmarks
4550Sstevel@tonic-gate * have shown that using the bcopy() is better than loading
4560Sstevel@tonic-gate * the bytes individually and doing the endian-swap by hand.
4570Sstevel@tonic-gate *
4580Sstevel@tonic-gate * even though it's quite tempting to assign to do:
4590Sstevel@tonic-gate *
4600Sstevel@tonic-gate * blk = bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
4610Sstevel@tonic-gate *
4620Sstevel@tonic-gate * and only have one set of LOAD_LITTLE_32()'s, the compiler (at least
4630Sstevel@tonic-gate * SC4.2/5.x) *does not* like that, so please resist the urge.
4640Sstevel@tonic-gate */
4650Sstevel@tonic-gate
4660Sstevel@tonic-gate #ifdef _MD5_CHECK_ALIGNMENT
4670Sstevel@tonic-gate if ((uintptr_t)block & 0x3) { /* not 4-byte aligned? */
4680Sstevel@tonic-gate bcopy(block, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
469227Skais
470227Skais #ifdef sun4v
471227Skais x_15 = LOAD_LITTLE_32_f(ctx->buf_un.buf32);
472227Skais x_14 = LOAD_LITTLE_32_e(ctx->buf_un.buf32);
473227Skais x_13 = LOAD_LITTLE_32_d(ctx->buf_un.buf32);
474227Skais x_12 = LOAD_LITTLE_32_c(ctx->buf_un.buf32);
475227Skais x_11 = LOAD_LITTLE_32_b(ctx->buf_un.buf32);
476227Skais x_10 = LOAD_LITTLE_32_a(ctx->buf_un.buf32);
477227Skais x_9 = LOAD_LITTLE_32_9(ctx->buf_un.buf32);
478227Skais x_8 = LOAD_LITTLE_32_8(ctx->buf_un.buf32);
479227Skais x_7 = LOAD_LITTLE_32_7(ctx->buf_un.buf32);
480227Skais x_6 = LOAD_LITTLE_32_6(ctx->buf_un.buf32);
481227Skais x_5 = LOAD_LITTLE_32_5(ctx->buf_un.buf32);
482227Skais x_4 = LOAD_LITTLE_32_4(ctx->buf_un.buf32);
483227Skais x_3 = LOAD_LITTLE_32_3(ctx->buf_un.buf32);
484227Skais x_2 = LOAD_LITTLE_32_2(ctx->buf_un.buf32);
485227Skais x_1 = LOAD_LITTLE_32_1(ctx->buf_un.buf32);
486227Skais x_0 = LOAD_LITTLE_32_0(ctx->buf_un.buf32);
487227Skais #else
4880Sstevel@tonic-gate x_15 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 15);
4890Sstevel@tonic-gate x_14 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 14);
4900Sstevel@tonic-gate x_13 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 13);
4910Sstevel@tonic-gate x_12 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 12);
4920Sstevel@tonic-gate x_11 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 11);
4930Sstevel@tonic-gate x_10 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 10);
4940Sstevel@tonic-gate x_9 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 9);
4950Sstevel@tonic-gate x_8 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 8);
4960Sstevel@tonic-gate x_7 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 7);
4970Sstevel@tonic-gate x_6 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 6);
4980Sstevel@tonic-gate x_5 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 5);
4990Sstevel@tonic-gate x_4 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 4);
5000Sstevel@tonic-gate x_3 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 3);
5010Sstevel@tonic-gate x_2 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 2);
5020Sstevel@tonic-gate x_1 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 1);
5030Sstevel@tonic-gate x_0 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 0);
504227Skais #endif /* sun4v */
5050Sstevel@tonic-gate } else
5060Sstevel@tonic-gate #endif
5070Sstevel@tonic-gate {
508227Skais
509227Skais #ifdef sun4v
5101694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
511227Skais x_15 = LOAD_LITTLE_32_f(block);
5121694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
513227Skais x_14 = LOAD_LITTLE_32_e(block);
5141694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
515227Skais x_13 = LOAD_LITTLE_32_d(block);
5161694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
517227Skais x_12 = LOAD_LITTLE_32_c(block);
5181694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
519227Skais x_11 = LOAD_LITTLE_32_b(block);
5201694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
521227Skais x_10 = LOAD_LITTLE_32_a(block);
5221694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
523227Skais x_9 = LOAD_LITTLE_32_9(block);
5241694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
525227Skais x_8 = LOAD_LITTLE_32_8(block);
5261694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
527227Skais x_7 = LOAD_LITTLE_32_7(block);
5281694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
529227Skais x_6 = LOAD_LITTLE_32_6(block);
5301694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
531227Skais x_5 = LOAD_LITTLE_32_5(block);
5321694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
533227Skais x_4 = LOAD_LITTLE_32_4(block);
5341694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
535227Skais x_3 = LOAD_LITTLE_32_3(block);
5361694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
537227Skais x_2 = LOAD_LITTLE_32_2(block);
5381694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
539227Skais x_1 = LOAD_LITTLE_32_1(block);
5401694Sdarrenm /* LINTED E_BAD_PTR_CAST_ALIGN */
541227Skais x_0 = LOAD_LITTLE_32_0(block);
542227Skais #else
5430Sstevel@tonic-gate x_15 = LOAD_LITTLE_32(block + 60);
5440Sstevel@tonic-gate x_14 = LOAD_LITTLE_32(block + 56);
5450Sstevel@tonic-gate x_13 = LOAD_LITTLE_32(block + 52);
5460Sstevel@tonic-gate x_12 = LOAD_LITTLE_32(block + 48);
5470Sstevel@tonic-gate x_11 = LOAD_LITTLE_32(block + 44);
5480Sstevel@tonic-gate x_10 = LOAD_LITTLE_32(block + 40);
5490Sstevel@tonic-gate x_9 = LOAD_LITTLE_32(block + 36);
5500Sstevel@tonic-gate x_8 = LOAD_LITTLE_32(block + 32);
5510Sstevel@tonic-gate x_7 = LOAD_LITTLE_32(block + 28);
5520Sstevel@tonic-gate x_6 = LOAD_LITTLE_32(block + 24);
5530Sstevel@tonic-gate x_5 = LOAD_LITTLE_32(block + 20);
5540Sstevel@tonic-gate x_4 = LOAD_LITTLE_32(block + 16);
5550Sstevel@tonic-gate x_3 = LOAD_LITTLE_32(block + 12);
5560Sstevel@tonic-gate x_2 = LOAD_LITTLE_32(block + 8);
5570Sstevel@tonic-gate x_1 = LOAD_LITTLE_32(block + 4);
5580Sstevel@tonic-gate x_0 = LOAD_LITTLE_32(block + 0);
559227Skais #endif /* sun4v */
5600Sstevel@tonic-gate }
5610Sstevel@tonic-gate
5620Sstevel@tonic-gate /* round 1 */
563227Skais FF(a, b, c, d, x_0, MD5_SHIFT_11, MD5_CONST_e(0)); /* 1 */
564227Skais FF(d, a, b, c, x_1, MD5_SHIFT_12, MD5_CONST_o(1)); /* 2 */
565227Skais FF(c, d, a, b, x_2, MD5_SHIFT_13, MD5_CONST_e(2)); /* 3 */
566227Skais FF(b, c, d, a, x_3, MD5_SHIFT_14, MD5_CONST_o(3)); /* 4 */
567227Skais FF(a, b, c, d, x_4, MD5_SHIFT_11, MD5_CONST_e(4)); /* 5 */
568227Skais FF(d, a, b, c, x_5, MD5_SHIFT_12, MD5_CONST_o(5)); /* 6 */
569227Skais FF(c, d, a, b, x_6, MD5_SHIFT_13, MD5_CONST_e(6)); /* 7 */
570227Skais FF(b, c, d, a, x_7, MD5_SHIFT_14, MD5_CONST_o(7)); /* 8 */
571227Skais FF(a, b, c, d, x_8, MD5_SHIFT_11, MD5_CONST_e(8)); /* 9 */
572227Skais FF(d, a, b, c, x_9, MD5_SHIFT_12, MD5_CONST_o(9)); /* 10 */
573227Skais FF(c, d, a, b, x_10, MD5_SHIFT_13, MD5_CONST_e(10)); /* 11 */
574227Skais FF(b, c, d, a, x_11, MD5_SHIFT_14, MD5_CONST_o(11)); /* 12 */
575227Skais FF(a, b, c, d, x_12, MD5_SHIFT_11, MD5_CONST_e(12)); /* 13 */
576227Skais FF(d, a, b, c, x_13, MD5_SHIFT_12, MD5_CONST_o(13)); /* 14 */
577227Skais FF(c, d, a, b, x_14, MD5_SHIFT_13, MD5_CONST_e(14)); /* 15 */
578227Skais FF(b, c, d, a, x_15, MD5_SHIFT_14, MD5_CONST_o(15)); /* 16 */
5790Sstevel@tonic-gate
5800Sstevel@tonic-gate /* round 2 */
581227Skais GG(a, b, c, d, x_1, MD5_SHIFT_21, MD5_CONST_e(16)); /* 17 */
582227Skais GG(d, a, b, c, x_6, MD5_SHIFT_22, MD5_CONST_o(17)); /* 18 */
583227Skais GG(c, d, a, b, x_11, MD5_SHIFT_23, MD5_CONST_e(18)); /* 19 */
584227Skais GG(b, c, d, a, x_0, MD5_SHIFT_24, MD5_CONST_o(19)); /* 20 */
585227Skais GG(a, b, c, d, x_5, MD5_SHIFT_21, MD5_CONST_e(20)); /* 21 */
586227Skais GG(d, a, b, c, x_10, MD5_SHIFT_22, MD5_CONST_o(21)); /* 22 */
587227Skais GG(c, d, a, b, x_15, MD5_SHIFT_23, MD5_CONST_e(22)); /* 23 */
588227Skais GG(b, c, d, a, x_4, MD5_SHIFT_24, MD5_CONST_o(23)); /* 24 */
589227Skais GG(a, b, c, d, x_9, MD5_SHIFT_21, MD5_CONST_e(24)); /* 25 */
590227Skais GG(d, a, b, c, x_14, MD5_SHIFT_22, MD5_CONST_o(25)); /* 26 */
591227Skais GG(c, d, a, b, x_3, MD5_SHIFT_23, MD5_CONST_e(26)); /* 27 */
592227Skais GG(b, c, d, a, x_8, MD5_SHIFT_24, MD5_CONST_o(27)); /* 28 */
593227Skais GG(a, b, c, d, x_13, MD5_SHIFT_21, MD5_CONST_e(28)); /* 29 */
594227Skais GG(d, a, b, c, x_2, MD5_SHIFT_22, MD5_CONST_o(29)); /* 30 */
595227Skais GG(c, d, a, b, x_7, MD5_SHIFT_23, MD5_CONST_e(30)); /* 31 */
596227Skais GG(b, c, d, a, x_12, MD5_SHIFT_24, MD5_CONST_o(31)); /* 32 */
5970Sstevel@tonic-gate
5980Sstevel@tonic-gate /* round 3 */
599227Skais HH(a, b, c, d, x_5, MD5_SHIFT_31, MD5_CONST_e(32)); /* 33 */
600227Skais HH(d, a, b, c, x_8, MD5_SHIFT_32, MD5_CONST_o(33)); /* 34 */
601227Skais HH(c, d, a, b, x_11, MD5_SHIFT_33, MD5_CONST_e(34)); /* 35 */
602227Skais HH(b, c, d, a, x_14, MD5_SHIFT_34, MD5_CONST_o(35)); /* 36 */
603227Skais HH(a, b, c, d, x_1, MD5_SHIFT_31, MD5_CONST_e(36)); /* 37 */
604227Skais HH(d, a, b, c, x_4, MD5_SHIFT_32, MD5_CONST_o(37)); /* 38 */
605227Skais HH(c, d, a, b, x_7, MD5_SHIFT_33, MD5_CONST_e(38)); /* 39 */
606227Skais HH(b, c, d, a, x_10, MD5_SHIFT_34, MD5_CONST_o(39)); /* 40 */
607227Skais HH(a, b, c, d, x_13, MD5_SHIFT_31, MD5_CONST_e(40)); /* 41 */
608227Skais HH(d, a, b, c, x_0, MD5_SHIFT_32, MD5_CONST_o(41)); /* 42 */
609227Skais HH(c, d, a, b, x_3, MD5_SHIFT_33, MD5_CONST_e(42)); /* 43 */
610227Skais HH(b, c, d, a, x_6, MD5_SHIFT_34, MD5_CONST_o(43)); /* 44 */
611227Skais HH(a, b, c, d, x_9, MD5_SHIFT_31, MD5_CONST_e(44)); /* 45 */
612227Skais HH(d, a, b, c, x_12, MD5_SHIFT_32, MD5_CONST_o(45)); /* 46 */
613227Skais HH(c, d, a, b, x_15, MD5_SHIFT_33, MD5_CONST_e(46)); /* 47 */
614227Skais HH(b, c, d, a, x_2, MD5_SHIFT_34, MD5_CONST_o(47)); /* 48 */
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate /* round 4 */
617227Skais II(a, b, c, d, x_0, MD5_SHIFT_41, MD5_CONST_e(48)); /* 49 */
618227Skais II(d, a, b, c, x_7, MD5_SHIFT_42, MD5_CONST_o(49)); /* 50 */
619227Skais II(c, d, a, b, x_14, MD5_SHIFT_43, MD5_CONST_e(50)); /* 51 */
620227Skais II(b, c, d, a, x_5, MD5_SHIFT_44, MD5_CONST_o(51)); /* 52 */
621227Skais II(a, b, c, d, x_12, MD5_SHIFT_41, MD5_CONST_e(52)); /* 53 */
622227Skais II(d, a, b, c, x_3, MD5_SHIFT_42, MD5_CONST_o(53)); /* 54 */
623227Skais II(c, d, a, b, x_10, MD5_SHIFT_43, MD5_CONST_e(54)); /* 55 */
624227Skais II(b, c, d, a, x_1, MD5_SHIFT_44, MD5_CONST_o(55)); /* 56 */
625227Skais II(a, b, c, d, x_8, MD5_SHIFT_41, MD5_CONST_e(56)); /* 57 */
626227Skais II(d, a, b, c, x_15, MD5_SHIFT_42, MD5_CONST_o(57)); /* 58 */
627227Skais II(c, d, a, b, x_6, MD5_SHIFT_43, MD5_CONST_e(58)); /* 59 */
628227Skais II(b, c, d, a, x_13, MD5_SHIFT_44, MD5_CONST_o(59)); /* 60 */
629227Skais II(a, b, c, d, x_4, MD5_SHIFT_41, MD5_CONST_e(60)); /* 61 */
630227Skais II(d, a, b, c, x_11, MD5_SHIFT_42, MD5_CONST_o(61)); /* 62 */
631227Skais II(c, d, a, b, x_2, MD5_SHIFT_43, MD5_CONST_e(62)); /* 63 */
632227Skais II(b, c, d, a, x_9, MD5_SHIFT_44, MD5_CONST_o(63)); /* 64 */
6330Sstevel@tonic-gate
6340Sstevel@tonic-gate ctx->state[0] += a;
6350Sstevel@tonic-gate ctx->state[1] += b;
6360Sstevel@tonic-gate ctx->state[2] += c;
6370Sstevel@tonic-gate ctx->state[3] += d;
6380Sstevel@tonic-gate
6390Sstevel@tonic-gate /*
6400Sstevel@tonic-gate * zeroize sensitive information -- compiler will optimize
6410Sstevel@tonic-gate * this out if everything is kept in registers
6420Sstevel@tonic-gate */
6430Sstevel@tonic-gate
6440Sstevel@tonic-gate x_0 = x_1 = x_2 = x_3 = x_4 = x_5 = x_6 = x_7 = x_8 = 0;
6450Sstevel@tonic-gate x_9 = x_10 = x_11 = x_12 = x_13 = x_14 = x_15 = 0;
6460Sstevel@tonic-gate }
6475764Sda73024 #endif /* !defined(__amd64) */
6480Sstevel@tonic-gate
6490Sstevel@tonic-gate /*
6500Sstevel@tonic-gate * Encode()
6510Sstevel@tonic-gate *
6520Sstevel@tonic-gate * purpose: to convert a list of numbers from big endian to little endian
6530Sstevel@tonic-gate * input: uint8_t * : place to store the converted little endian numbers
6540Sstevel@tonic-gate * uint32_t * : place to get numbers to convert from
6550Sstevel@tonic-gate * size_t : the length of the input in bytes
6560Sstevel@tonic-gate * output: void
6570Sstevel@tonic-gate */
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate static void
Encode(uint8_t * _RESTRICT_KYWD output,const uint32_t * _RESTRICT_KYWD input,size_t input_len)6601694Sdarrenm Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
6611694Sdarrenm size_t input_len)
6620Sstevel@tonic-gate {
6630Sstevel@tonic-gate size_t i, j;
6640Sstevel@tonic-gate
6650Sstevel@tonic-gate for (i = 0, j = 0; j < input_len; i++, j += sizeof (uint32_t)) {
6660Sstevel@tonic-gate
6670Sstevel@tonic-gate #ifdef _LITTLE_ENDIAN
6680Sstevel@tonic-gate
6690Sstevel@tonic-gate #ifdef _MD5_CHECK_ALIGNMENT
6700Sstevel@tonic-gate if ((uintptr_t)output & 0x3) /* Not 4-byte aligned */
6710Sstevel@tonic-gate bcopy(input + i, output + j, 4);
6720Sstevel@tonic-gate else *(uint32_t *)(output + j) = input[i];
6730Sstevel@tonic-gate #else
6741694Sdarrenm /*LINTED E_BAD_PTR_CAST_ALIGN*/
6750Sstevel@tonic-gate *(uint32_t *)(output + j) = input[i];
6760Sstevel@tonic-gate #endif /* _MD5_CHECK_ALIGNMENT */
6770Sstevel@tonic-gate
6780Sstevel@tonic-gate #else /* big endian -- will work on little endian, but slowly */
6790Sstevel@tonic-gate
6800Sstevel@tonic-gate output[j] = input[i] & 0xff;
6810Sstevel@tonic-gate output[j + 1] = (input[i] >> 8) & 0xff;
6820Sstevel@tonic-gate output[j + 2] = (input[i] >> 16) & 0xff;
6830Sstevel@tonic-gate output[j + 3] = (input[i] >> 24) & 0xff;
6840Sstevel@tonic-gate #endif
6850Sstevel@tonic-gate }
6860Sstevel@tonic-gate }
687