xref: /onnv-gate/usr/src/uts/common/crypto/io/md4_mod.c (revision 4002:d12f36b7c388)
1*4002Sdarrenm /*
2*4002Sdarrenm  * CDDL HEADER START
3*4002Sdarrenm  *
4*4002Sdarrenm  * The contents of this file are subject to the terms of the
5*4002Sdarrenm  * Common Development and Distribution License (the "License").
6*4002Sdarrenm  * You may not use this file except in compliance with the License.
7*4002Sdarrenm  *
8*4002Sdarrenm  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*4002Sdarrenm  * or http://www.opensolaris.org/os/licensing.
10*4002Sdarrenm  * See the License for the specific language governing permissions
11*4002Sdarrenm  * and limitations under the License.
12*4002Sdarrenm  *
13*4002Sdarrenm  * When distributing Covered Code, include this CDDL HEADER in each
14*4002Sdarrenm  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*4002Sdarrenm  * If applicable, add the following below this CDDL HEADER, with the
16*4002Sdarrenm  * fields enclosed by brackets "[]" replaced with your own identifying
17*4002Sdarrenm  * information: Portions Copyright [yyyy] [name of copyright owner]
18*4002Sdarrenm  *
19*4002Sdarrenm  * CDDL HEADER END
20*4002Sdarrenm  */
21*4002Sdarrenm 
22*4002Sdarrenm /*
23*4002Sdarrenm  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24*4002Sdarrenm  * Use is subject to license terms.
25*4002Sdarrenm  */
26*4002Sdarrenm #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*4002Sdarrenm 
28*4002Sdarrenm /*
29*4002Sdarrenm  * In kernel module, the md4 module is created with one modlinkage,
30*4002Sdarrenm  * this is different to md5 and sha1 modules which have a legacy misc
31*4002Sdarrenm  * variant for direct calls to the Init/Update/Final routines.
32*4002Sdarrenm  *
33*4002Sdarrenm  * - a modlcrypto that allows the module to register with the Kernel
34*4002Sdarrenm  *   Cryptographic Framework (KCF) as a software provider for the MD4
35*4002Sdarrenm  *   mechanisms.
36*4002Sdarrenm  */
37*4002Sdarrenm 
38*4002Sdarrenm #include <sys/types.h>
39*4002Sdarrenm #include <sys/systm.h>
40*4002Sdarrenm #include <sys/modctl.h>
41*4002Sdarrenm #include <sys/cmn_err.h>
42*4002Sdarrenm #include <sys/ddi.h>
43*4002Sdarrenm #include <sys/crypto/common.h>
44*4002Sdarrenm #include <sys/crypto/spi.h>
45*4002Sdarrenm #include <sys/sysmacros.h>
46*4002Sdarrenm #include <sys/strsun.h>
47*4002Sdarrenm #include <sys/note.h>
48*4002Sdarrenm #include <sys/md4.h>
49*4002Sdarrenm 
50*4002Sdarrenm extern struct mod_ops mod_miscops;
51*4002Sdarrenm extern struct mod_ops mod_cryptoops;
52*4002Sdarrenm 
53*4002Sdarrenm /*
54*4002Sdarrenm  * Module linkage information for the kernel.
55*4002Sdarrenm  */
56*4002Sdarrenm 
57*4002Sdarrenm static struct modlcrypto modlcrypto = {
58*4002Sdarrenm 	&mod_cryptoops,
59*4002Sdarrenm 	"MD4 Kernel SW Provider %I%"
60*4002Sdarrenm };
61*4002Sdarrenm 
62*4002Sdarrenm static struct modlinkage modlinkage = {
63*4002Sdarrenm 	MODREV_1,
64*4002Sdarrenm 	(void *)&modlcrypto,
65*4002Sdarrenm 	NULL
66*4002Sdarrenm };
67*4002Sdarrenm 
68*4002Sdarrenm /*
69*4002Sdarrenm  * CSPI information (entry points, provider info, etc.)
70*4002Sdarrenm  */
71*4002Sdarrenm 
72*4002Sdarrenm typedef enum md4_mech_type {
73*4002Sdarrenm 	MD4_MECH_INFO_TYPE,		/* SUN_CKM_MD4 */
74*4002Sdarrenm } md4_mech_type_t;
75*4002Sdarrenm 
76*4002Sdarrenm #define	MD4_DIGEST_LENGTH	16	/* MD4 digest length in bytes */
77*4002Sdarrenm 
78*4002Sdarrenm /*
79*4002Sdarrenm  * Context for MD4 mechanism.
80*4002Sdarrenm  */
81*4002Sdarrenm typedef struct md4_ctx {
82*4002Sdarrenm 	md4_mech_type_t		mc_mech_type;	/* type of context */
83*4002Sdarrenm 	MD4_CTX			mc_md4_ctx;	/* MD4 context */
84*4002Sdarrenm } md4_ctx_t;
85*4002Sdarrenm 
86*4002Sdarrenm /*
87*4002Sdarrenm  * Macros to access the MD4 contexts from a context passed
88*4002Sdarrenm  * by KCF to one of the entry points.
89*4002Sdarrenm  */
90*4002Sdarrenm 
91*4002Sdarrenm #define	PROV_MD4_CTX(ctx)	((md4_ctx_t *)(ctx)->cc_provider_private)
92*4002Sdarrenm 
93*4002Sdarrenm /*
94*4002Sdarrenm  * Mechanism info structure passed to KCF during registration.
95*4002Sdarrenm  */
96*4002Sdarrenm static crypto_mech_info_t md4_mech_info_tab[] = {
97*4002Sdarrenm 	/* MD4 */
98*4002Sdarrenm 	{SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
99*4002Sdarrenm 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
100*4002Sdarrenm 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
101*4002Sdarrenm };
102*4002Sdarrenm 
103*4002Sdarrenm static void md4_provider_status(crypto_provider_handle_t, uint_t *);
104*4002Sdarrenm 
105*4002Sdarrenm static crypto_control_ops_t md4_control_ops = {
106*4002Sdarrenm 	md4_provider_status
107*4002Sdarrenm };
108*4002Sdarrenm 
109*4002Sdarrenm static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
110*4002Sdarrenm     crypto_req_handle_t);
111*4002Sdarrenm static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
112*4002Sdarrenm     crypto_req_handle_t);
113*4002Sdarrenm static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
114*4002Sdarrenm     crypto_req_handle_t);
115*4002Sdarrenm static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
116*4002Sdarrenm     crypto_req_handle_t);
117*4002Sdarrenm static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
118*4002Sdarrenm     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
119*4002Sdarrenm     crypto_req_handle_t);
120*4002Sdarrenm 
121*4002Sdarrenm static crypto_digest_ops_t md4_digest_ops = {
122*4002Sdarrenm 	md4_digest_init,
123*4002Sdarrenm 	md4_digest,
124*4002Sdarrenm 	md4_digest_update,
125*4002Sdarrenm 	NULL,
126*4002Sdarrenm 	md4_digest_final,
127*4002Sdarrenm 	md4_digest_atomic
128*4002Sdarrenm };
129*4002Sdarrenm 
130*4002Sdarrenm static crypto_ops_t md4_crypto_ops = {
131*4002Sdarrenm 	&md4_control_ops,
132*4002Sdarrenm 	&md4_digest_ops,
133*4002Sdarrenm 	NULL,
134*4002Sdarrenm 	NULL,
135*4002Sdarrenm 	NULL,
136*4002Sdarrenm 	NULL,
137*4002Sdarrenm 	NULL,
138*4002Sdarrenm 	NULL,
139*4002Sdarrenm 	NULL,
140*4002Sdarrenm 	NULL,
141*4002Sdarrenm 	NULL,
142*4002Sdarrenm 	NULL,
143*4002Sdarrenm 	NULL,
144*4002Sdarrenm 	NULL,
145*4002Sdarrenm };
146*4002Sdarrenm 
147*4002Sdarrenm static crypto_provider_info_t md4_prov_info = {
148*4002Sdarrenm 	CRYPTO_SPI_VERSION_1,
149*4002Sdarrenm 	"MD4 Software Provider",
150*4002Sdarrenm 	CRYPTO_SW_PROVIDER,
151*4002Sdarrenm 	{&modlinkage},
152*4002Sdarrenm 	NULL,
153*4002Sdarrenm 	&md4_crypto_ops,
154*4002Sdarrenm 	sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
155*4002Sdarrenm 	md4_mech_info_tab
156*4002Sdarrenm };
157*4002Sdarrenm 
158*4002Sdarrenm static crypto_kcf_provider_handle_t md4_prov_handle = NULL;
159*4002Sdarrenm 
160*4002Sdarrenm int
161*4002Sdarrenm _init(void)
162*4002Sdarrenm {
163*4002Sdarrenm 	int ret;
164*4002Sdarrenm 
165*4002Sdarrenm 	if ((ret = mod_install(&modlinkage)) != 0)
166*4002Sdarrenm 		return (ret);
167*4002Sdarrenm 
168*4002Sdarrenm 	/*
169*4002Sdarrenm 	 * Register with KCF. If the registration fails, log an
170*4002Sdarrenm 	 * error and uninstall the module.
171*4002Sdarrenm 	 */
172*4002Sdarrenm 	if ((ret = crypto_register_provider(&md4_prov_info,
173*4002Sdarrenm 	    &md4_prov_handle)) != CRYPTO_SUCCESS) {
174*4002Sdarrenm 		cmn_err(CE_WARN, "md4 _init: "
175*4002Sdarrenm 		    "crypto_register_provider() failed (0x%x)", ret);
176*4002Sdarrenm 		(void) mod_remove(&modlinkage);
177*4002Sdarrenm 		return (ret);
178*4002Sdarrenm 	}
179*4002Sdarrenm 
180*4002Sdarrenm 	return (0);
181*4002Sdarrenm }
182*4002Sdarrenm 
183*4002Sdarrenm int
184*4002Sdarrenm _fini(void)
185*4002Sdarrenm {
186*4002Sdarrenm 	int ret;
187*4002Sdarrenm 
188*4002Sdarrenm 	/*
189*4002Sdarrenm 	 * Unregister from KCF if previous registration succeeded.
190*4002Sdarrenm 	 */
191*4002Sdarrenm 	if (md4_prov_handle != NULL) {
192*4002Sdarrenm 		if ((ret = crypto_unregister_provider(md4_prov_handle)) !=
193*4002Sdarrenm 		    CRYPTO_SUCCESS) {
194*4002Sdarrenm 			cmn_err(CE_WARN, "md4 _fini: "
195*4002Sdarrenm 			    "crypto_unregister_provider() failed (0x%x)", ret);
196*4002Sdarrenm 			return (EBUSY);
197*4002Sdarrenm 		}
198*4002Sdarrenm 		md4_prov_handle = NULL;
199*4002Sdarrenm 	}
200*4002Sdarrenm 
201*4002Sdarrenm 	return (mod_remove(&modlinkage));
202*4002Sdarrenm }
203*4002Sdarrenm 
204*4002Sdarrenm int
205*4002Sdarrenm _info(struct modinfo *modinfop)
206*4002Sdarrenm {
207*4002Sdarrenm 	return (mod_info(&modlinkage, modinfop));
208*4002Sdarrenm }
209*4002Sdarrenm 
210*4002Sdarrenm /*
211*4002Sdarrenm  * KCF software provider control entry points.
212*4002Sdarrenm  */
213*4002Sdarrenm /* ARGSUSED */
214*4002Sdarrenm static void
215*4002Sdarrenm md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
216*4002Sdarrenm {
217*4002Sdarrenm 	*status = CRYPTO_PROVIDER_READY;
218*4002Sdarrenm }
219*4002Sdarrenm 
220*4002Sdarrenm /*
221*4002Sdarrenm  * KCF software provider digest entry points.
222*4002Sdarrenm  */
223*4002Sdarrenm 
224*4002Sdarrenm static int
225*4002Sdarrenm md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
226*4002Sdarrenm     crypto_req_handle_t req)
227*4002Sdarrenm {
228*4002Sdarrenm 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
229*4002Sdarrenm 		return (CRYPTO_MECHANISM_INVALID);
230*4002Sdarrenm 
231*4002Sdarrenm 	/*
232*4002Sdarrenm 	 * Allocate and initialize MD4 context.
233*4002Sdarrenm 	 */
234*4002Sdarrenm 	ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
235*4002Sdarrenm 	    crypto_kmflag(req));
236*4002Sdarrenm 	if (ctx->cc_provider_private == NULL)
237*4002Sdarrenm 		return (CRYPTO_HOST_MEMORY);
238*4002Sdarrenm 
239*4002Sdarrenm 	PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
240*4002Sdarrenm 	MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
241*4002Sdarrenm 
242*4002Sdarrenm 	return (CRYPTO_SUCCESS);
243*4002Sdarrenm }
244*4002Sdarrenm 
245*4002Sdarrenm /*
246*4002Sdarrenm  * Helper MD4 digest update function for uio data.
247*4002Sdarrenm  */
248*4002Sdarrenm static int
249*4002Sdarrenm md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
250*4002Sdarrenm {
251*4002Sdarrenm 	off_t offset = data->cd_offset;
252*4002Sdarrenm 	size_t length = data->cd_length;
253*4002Sdarrenm 	uint_t vec_idx;
254*4002Sdarrenm 	size_t cur_len;
255*4002Sdarrenm 
256*4002Sdarrenm 	/* we support only kernel buffer */
257*4002Sdarrenm 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
258*4002Sdarrenm 		return (CRYPTO_ARGUMENTS_BAD);
259*4002Sdarrenm 
260*4002Sdarrenm 	/*
261*4002Sdarrenm 	 * Jump to the first iovec containing data to be
262*4002Sdarrenm 	 * digested.
263*4002Sdarrenm 	 */
264*4002Sdarrenm 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
265*4002Sdarrenm 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
266*4002Sdarrenm 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
267*4002Sdarrenm 	if (vec_idx == data->cd_uio->uio_iovcnt) {
268*4002Sdarrenm 		/*
269*4002Sdarrenm 		 * The caller specified an offset that is larger than the
270*4002Sdarrenm 		 * total size of the buffers it provided.
271*4002Sdarrenm 		 */
272*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
273*4002Sdarrenm 	}
274*4002Sdarrenm 
275*4002Sdarrenm 	/*
276*4002Sdarrenm 	 * Now do the digesting on the iovecs.
277*4002Sdarrenm 	 */
278*4002Sdarrenm 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
279*4002Sdarrenm 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
280*4002Sdarrenm 		    offset, length);
281*4002Sdarrenm 
282*4002Sdarrenm 		MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
283*4002Sdarrenm 		    offset, cur_len);
284*4002Sdarrenm 
285*4002Sdarrenm 		length -= cur_len;
286*4002Sdarrenm 		vec_idx++;
287*4002Sdarrenm 		offset = 0;
288*4002Sdarrenm 	}
289*4002Sdarrenm 
290*4002Sdarrenm 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
291*4002Sdarrenm 		/*
292*4002Sdarrenm 		 * The end of the specified iovec's was reached but
293*4002Sdarrenm 		 * the length requested could not be processed, i.e.
294*4002Sdarrenm 		 * The caller requested to digest more data than it provided.
295*4002Sdarrenm 		 */
296*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
297*4002Sdarrenm 	}
298*4002Sdarrenm 
299*4002Sdarrenm 	return (CRYPTO_SUCCESS);
300*4002Sdarrenm }
301*4002Sdarrenm 
302*4002Sdarrenm /*
303*4002Sdarrenm  * Helper MD4 digest final function for uio data.
304*4002Sdarrenm  * digest_len is the length of the desired digest. If digest_len
305*4002Sdarrenm  * is smaller than the default MD4 digest length, the caller
306*4002Sdarrenm  * must pass a scratch buffer, digest_scratch, which must
307*4002Sdarrenm  * be at least MD4_DIGEST_LENGTH bytes.
308*4002Sdarrenm  */
309*4002Sdarrenm static int
310*4002Sdarrenm md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
311*4002Sdarrenm     ulong_t digest_len, uchar_t *digest_scratch)
312*4002Sdarrenm {
313*4002Sdarrenm 	off_t offset = digest->cd_offset;
314*4002Sdarrenm 	uint_t vec_idx;
315*4002Sdarrenm 
316*4002Sdarrenm 	/* we support only kernel buffer */
317*4002Sdarrenm 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
318*4002Sdarrenm 		return (CRYPTO_ARGUMENTS_BAD);
319*4002Sdarrenm 
320*4002Sdarrenm 	/*
321*4002Sdarrenm 	 * Jump to the first iovec containing ptr to the digest to
322*4002Sdarrenm 	 * be returned.
323*4002Sdarrenm 	 */
324*4002Sdarrenm 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
325*4002Sdarrenm 	    vec_idx < digest->cd_uio->uio_iovcnt;
326*4002Sdarrenm 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
327*4002Sdarrenm 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
328*4002Sdarrenm 		/*
329*4002Sdarrenm 		 * The caller specified an offset that is
330*4002Sdarrenm 		 * larger than the total size of the buffers
331*4002Sdarrenm 		 * it provided.
332*4002Sdarrenm 		 */
333*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
334*4002Sdarrenm 	}
335*4002Sdarrenm 
336*4002Sdarrenm 	if (offset + digest_len <=
337*4002Sdarrenm 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
338*4002Sdarrenm 		/*
339*4002Sdarrenm 		 * The computed MD4 digest will fit in the current
340*4002Sdarrenm 		 * iovec.
341*4002Sdarrenm 		 */
342*4002Sdarrenm 		if (digest_len != MD4_DIGEST_LENGTH) {
343*4002Sdarrenm 			/*
344*4002Sdarrenm 			 * The caller requested a short digest. Digest
345*4002Sdarrenm 			 * into a scratch buffer and return to
346*4002Sdarrenm 			 * the user only what was requested.
347*4002Sdarrenm 			 */
348*4002Sdarrenm 			MD4Final(digest_scratch, md4_ctx);
349*4002Sdarrenm 			bcopy(digest_scratch, (uchar_t *)digest->
350*4002Sdarrenm 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
351*4002Sdarrenm 			    digest_len);
352*4002Sdarrenm 		} else {
353*4002Sdarrenm 			MD4Final((uchar_t *)digest->
354*4002Sdarrenm 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
355*4002Sdarrenm 			    md4_ctx);
356*4002Sdarrenm 		}
357*4002Sdarrenm 	} else {
358*4002Sdarrenm 		/*
359*4002Sdarrenm 		 * The computed digest will be crossing one or more iovec's.
360*4002Sdarrenm 		 * This is bad performance-wise but we need to support it.
361*4002Sdarrenm 		 * Allocate a small scratch buffer on the stack and
362*4002Sdarrenm 		 * copy it piece meal to the specified digest iovec's.
363*4002Sdarrenm 		 */
364*4002Sdarrenm 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
365*4002Sdarrenm 		off_t scratch_offset = 0;
366*4002Sdarrenm 		size_t length = digest_len;
367*4002Sdarrenm 		size_t cur_len;
368*4002Sdarrenm 
369*4002Sdarrenm 		MD4Final(digest_tmp, md4_ctx);
370*4002Sdarrenm 
371*4002Sdarrenm 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
372*4002Sdarrenm 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
373*4002Sdarrenm 			    offset, length);
374*4002Sdarrenm 			bcopy(digest_tmp + scratch_offset,
375*4002Sdarrenm 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
376*4002Sdarrenm 			    cur_len);
377*4002Sdarrenm 
378*4002Sdarrenm 			length -= cur_len;
379*4002Sdarrenm 			vec_idx++;
380*4002Sdarrenm 			scratch_offset += cur_len;
381*4002Sdarrenm 			offset = 0;
382*4002Sdarrenm 		}
383*4002Sdarrenm 
384*4002Sdarrenm 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
385*4002Sdarrenm 			/*
386*4002Sdarrenm 			 * The end of the specified iovec's was reached but
387*4002Sdarrenm 			 * the length requested could not be processed, i.e.
388*4002Sdarrenm 			 * The caller requested to digest more data than it
389*4002Sdarrenm 			 * provided.
390*4002Sdarrenm 			 */
391*4002Sdarrenm 			return (CRYPTO_DATA_LEN_RANGE);
392*4002Sdarrenm 		}
393*4002Sdarrenm 	}
394*4002Sdarrenm 
395*4002Sdarrenm 	return (CRYPTO_SUCCESS);
396*4002Sdarrenm }
397*4002Sdarrenm 
398*4002Sdarrenm /*
399*4002Sdarrenm  * Helper MD4 digest update for mblk's.
400*4002Sdarrenm  */
401*4002Sdarrenm static int
402*4002Sdarrenm md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
403*4002Sdarrenm {
404*4002Sdarrenm 	off_t offset = data->cd_offset;
405*4002Sdarrenm 	size_t length = data->cd_length;
406*4002Sdarrenm 	mblk_t *mp;
407*4002Sdarrenm 	size_t cur_len;
408*4002Sdarrenm 
409*4002Sdarrenm 	/*
410*4002Sdarrenm 	 * Jump to the first mblk_t containing data to be digested.
411*4002Sdarrenm 	 */
412*4002Sdarrenm 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
413*4002Sdarrenm 	    offset -= MBLKL(mp), mp = mp->b_cont);
414*4002Sdarrenm 	if (mp == NULL) {
415*4002Sdarrenm 		/*
416*4002Sdarrenm 		 * The caller specified an offset that is larger than the
417*4002Sdarrenm 		 * total size of the buffers it provided.
418*4002Sdarrenm 		 */
419*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
420*4002Sdarrenm 	}
421*4002Sdarrenm 
422*4002Sdarrenm 	/*
423*4002Sdarrenm 	 * Now do the digesting on the mblk chain.
424*4002Sdarrenm 	 */
425*4002Sdarrenm 	while (mp != NULL && length > 0) {
426*4002Sdarrenm 		cur_len = MIN(MBLKL(mp) - offset, length);
427*4002Sdarrenm 		MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
428*4002Sdarrenm 		length -= cur_len;
429*4002Sdarrenm 		offset = 0;
430*4002Sdarrenm 		mp = mp->b_cont;
431*4002Sdarrenm 	}
432*4002Sdarrenm 
433*4002Sdarrenm 	if (mp == NULL && length > 0) {
434*4002Sdarrenm 		/*
435*4002Sdarrenm 		 * The end of the mblk was reached but the length requested
436*4002Sdarrenm 		 * could not be processed, i.e. The caller requested
437*4002Sdarrenm 		 * to digest more data than it provided.
438*4002Sdarrenm 		 */
439*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
440*4002Sdarrenm 	}
441*4002Sdarrenm 
442*4002Sdarrenm 	return (CRYPTO_SUCCESS);
443*4002Sdarrenm }
444*4002Sdarrenm 
445*4002Sdarrenm /*
446*4002Sdarrenm  * Helper MD4 digest final for mblk's.
447*4002Sdarrenm  * digest_len is the length of the desired digest. If digest_len
448*4002Sdarrenm  * is smaller than the default MD4 digest length, the caller
449*4002Sdarrenm  * must pass a scratch buffer, digest_scratch, which must
450*4002Sdarrenm  * be at least MD4_DIGEST_LENGTH bytes.
451*4002Sdarrenm  */
452*4002Sdarrenm static int
453*4002Sdarrenm md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
454*4002Sdarrenm     ulong_t digest_len, uchar_t *digest_scratch)
455*4002Sdarrenm {
456*4002Sdarrenm 	off_t offset = digest->cd_offset;
457*4002Sdarrenm 	mblk_t *mp;
458*4002Sdarrenm 
459*4002Sdarrenm 	/*
460*4002Sdarrenm 	 * Jump to the first mblk_t that will be used to store the digest.
461*4002Sdarrenm 	 */
462*4002Sdarrenm 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
463*4002Sdarrenm 	    offset -= MBLKL(mp), mp = mp->b_cont);
464*4002Sdarrenm 	if (mp == NULL) {
465*4002Sdarrenm 		/*
466*4002Sdarrenm 		 * The caller specified an offset that is larger than the
467*4002Sdarrenm 		 * total size of the buffers it provided.
468*4002Sdarrenm 		 */
469*4002Sdarrenm 		return (CRYPTO_DATA_LEN_RANGE);
470*4002Sdarrenm 	}
471*4002Sdarrenm 
472*4002Sdarrenm 	if (offset + digest_len <= MBLKL(mp)) {
473*4002Sdarrenm 		/*
474*4002Sdarrenm 		 * The computed MD4 digest will fit in the current mblk.
475*4002Sdarrenm 		 * Do the MD4Final() in-place.
476*4002Sdarrenm 		 */
477*4002Sdarrenm 		if (digest_len != MD4_DIGEST_LENGTH) {
478*4002Sdarrenm 			/*
479*4002Sdarrenm 			 * The caller requested a short digest. Digest
480*4002Sdarrenm 			 * into a scratch buffer and return to
481*4002Sdarrenm 			 * the user only what was requested.
482*4002Sdarrenm 			 */
483*4002Sdarrenm 			MD4Final(digest_scratch, md4_ctx);
484*4002Sdarrenm 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
485*4002Sdarrenm 		} else {
486*4002Sdarrenm 			MD4Final(mp->b_rptr + offset, md4_ctx);
487*4002Sdarrenm 		}
488*4002Sdarrenm 	} else {
489*4002Sdarrenm 		/*
490*4002Sdarrenm 		 * The computed digest will be crossing one or more mblk's.
491*4002Sdarrenm 		 * This is bad performance-wise but we need to support it.
492*4002Sdarrenm 		 * Allocate a small scratch buffer on the stack and
493*4002Sdarrenm 		 * copy it piece meal to the specified digest iovec's.
494*4002Sdarrenm 		 */
495*4002Sdarrenm 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
496*4002Sdarrenm 		off_t scratch_offset = 0;
497*4002Sdarrenm 		size_t length = digest_len;
498*4002Sdarrenm 		size_t cur_len;
499*4002Sdarrenm 
500*4002Sdarrenm 		MD4Final(digest_tmp, md4_ctx);
501*4002Sdarrenm 
502*4002Sdarrenm 		while (mp != NULL && length > 0) {
503*4002Sdarrenm 			cur_len = MIN(MBLKL(mp) - offset, length);
504*4002Sdarrenm 			bcopy(digest_tmp + scratch_offset,
505*4002Sdarrenm 			    mp->b_rptr + offset, cur_len);
506*4002Sdarrenm 
507*4002Sdarrenm 			length -= cur_len;
508*4002Sdarrenm 			mp = mp->b_cont;
509*4002Sdarrenm 			scratch_offset += cur_len;
510*4002Sdarrenm 			offset = 0;
511*4002Sdarrenm 		}
512*4002Sdarrenm 
513*4002Sdarrenm 		if (mp == NULL && length > 0) {
514*4002Sdarrenm 			/*
515*4002Sdarrenm 			 * The end of the specified mblk was reached but
516*4002Sdarrenm 			 * the length requested could not be processed, i.e.
517*4002Sdarrenm 			 * The caller requested to digest more data than it
518*4002Sdarrenm 			 * provided.
519*4002Sdarrenm 			 */
520*4002Sdarrenm 			return (CRYPTO_DATA_LEN_RANGE);
521*4002Sdarrenm 		}
522*4002Sdarrenm 	}
523*4002Sdarrenm 
524*4002Sdarrenm 	return (CRYPTO_SUCCESS);
525*4002Sdarrenm }
526*4002Sdarrenm 
527*4002Sdarrenm /* ARGSUSED */
528*4002Sdarrenm static int
529*4002Sdarrenm md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
530*4002Sdarrenm     crypto_req_handle_t req)
531*4002Sdarrenm {
532*4002Sdarrenm 	int ret = CRYPTO_SUCCESS;
533*4002Sdarrenm 
534*4002Sdarrenm 	ASSERT(ctx->cc_provider_private != NULL);
535*4002Sdarrenm 
536*4002Sdarrenm 	/*
537*4002Sdarrenm 	 * We need to just return the length needed to store the output.
538*4002Sdarrenm 	 * We should not destroy the context for the following cases.
539*4002Sdarrenm 	 */
540*4002Sdarrenm 	if ((digest->cd_length == 0) ||
541*4002Sdarrenm 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
542*4002Sdarrenm 		digest->cd_length = MD4_DIGEST_LENGTH;
543*4002Sdarrenm 		return (CRYPTO_BUFFER_TOO_SMALL);
544*4002Sdarrenm 	}
545*4002Sdarrenm 
546*4002Sdarrenm 	/*
547*4002Sdarrenm 	 * Do the MD4 update on the specified input data.
548*4002Sdarrenm 	 */
549*4002Sdarrenm 	switch (data->cd_format) {
550*4002Sdarrenm 	case CRYPTO_DATA_RAW:
551*4002Sdarrenm 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
552*4002Sdarrenm 		    data->cd_raw.iov_base + data->cd_offset,
553*4002Sdarrenm 		    data->cd_length);
554*4002Sdarrenm 		break;
555*4002Sdarrenm 	case CRYPTO_DATA_UIO:
556*4002Sdarrenm 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
557*4002Sdarrenm 		    data);
558*4002Sdarrenm 		break;
559*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
560*4002Sdarrenm 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
561*4002Sdarrenm 		    data);
562*4002Sdarrenm 		break;
563*4002Sdarrenm 	default:
564*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
565*4002Sdarrenm 	}
566*4002Sdarrenm 
567*4002Sdarrenm 	if (ret != CRYPTO_SUCCESS) {
568*4002Sdarrenm 		/* the update failed, free context and bail */
569*4002Sdarrenm 		kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
570*4002Sdarrenm 		ctx->cc_provider_private = NULL;
571*4002Sdarrenm 		digest->cd_length = 0;
572*4002Sdarrenm 		return (ret);
573*4002Sdarrenm 	}
574*4002Sdarrenm 
575*4002Sdarrenm 	/*
576*4002Sdarrenm 	 * Do an MD4 final, must be done separately since the digest
577*4002Sdarrenm 	 * type can be different than the input data type.
578*4002Sdarrenm 	 */
579*4002Sdarrenm 	switch (digest->cd_format) {
580*4002Sdarrenm 	case CRYPTO_DATA_RAW:
581*4002Sdarrenm 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
582*4002Sdarrenm 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
583*4002Sdarrenm 		break;
584*4002Sdarrenm 	case CRYPTO_DATA_UIO:
585*4002Sdarrenm 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
586*4002Sdarrenm 		    digest, MD4_DIGEST_LENGTH, NULL);
587*4002Sdarrenm 		break;
588*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
589*4002Sdarrenm 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
590*4002Sdarrenm 		    digest, MD4_DIGEST_LENGTH, NULL);
591*4002Sdarrenm 		break;
592*4002Sdarrenm 	default:
593*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
594*4002Sdarrenm 	}
595*4002Sdarrenm 
596*4002Sdarrenm 	/* all done, free context and return */
597*4002Sdarrenm 
598*4002Sdarrenm 	if (ret == CRYPTO_SUCCESS) {
599*4002Sdarrenm 		digest->cd_length = MD4_DIGEST_LENGTH;
600*4002Sdarrenm 	} else {
601*4002Sdarrenm 		digest->cd_length = 0;
602*4002Sdarrenm 	}
603*4002Sdarrenm 
604*4002Sdarrenm 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
605*4002Sdarrenm 	ctx->cc_provider_private = NULL;
606*4002Sdarrenm 	return (ret);
607*4002Sdarrenm }
608*4002Sdarrenm 
609*4002Sdarrenm /* ARGSUSED */
610*4002Sdarrenm static int
611*4002Sdarrenm md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
612*4002Sdarrenm     crypto_req_handle_t req)
613*4002Sdarrenm {
614*4002Sdarrenm 	int ret = CRYPTO_SUCCESS;
615*4002Sdarrenm 
616*4002Sdarrenm 	ASSERT(ctx->cc_provider_private != NULL);
617*4002Sdarrenm 
618*4002Sdarrenm 	/*
619*4002Sdarrenm 	 * Do the MD4 update on the specified input data.
620*4002Sdarrenm 	 */
621*4002Sdarrenm 	switch (data->cd_format) {
622*4002Sdarrenm 	case CRYPTO_DATA_RAW:
623*4002Sdarrenm 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
624*4002Sdarrenm 		    data->cd_raw.iov_base + data->cd_offset,
625*4002Sdarrenm 		    data->cd_length);
626*4002Sdarrenm 		break;
627*4002Sdarrenm 	case CRYPTO_DATA_UIO:
628*4002Sdarrenm 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
629*4002Sdarrenm 		    data);
630*4002Sdarrenm 		break;
631*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
632*4002Sdarrenm 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
633*4002Sdarrenm 		    data);
634*4002Sdarrenm 		break;
635*4002Sdarrenm 	default:
636*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
637*4002Sdarrenm 	}
638*4002Sdarrenm 
639*4002Sdarrenm 	return (ret);
640*4002Sdarrenm }
641*4002Sdarrenm 
642*4002Sdarrenm /* ARGSUSED */
643*4002Sdarrenm static int
644*4002Sdarrenm md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
645*4002Sdarrenm     crypto_req_handle_t req)
646*4002Sdarrenm {
647*4002Sdarrenm 	int ret = CRYPTO_SUCCESS;
648*4002Sdarrenm 
649*4002Sdarrenm 	ASSERT(ctx->cc_provider_private != NULL);
650*4002Sdarrenm 
651*4002Sdarrenm 	/*
652*4002Sdarrenm 	 * We need to just return the length needed to store the output.
653*4002Sdarrenm 	 * We should not destroy the context for the following cases.
654*4002Sdarrenm 	 */
655*4002Sdarrenm 	if ((digest->cd_length == 0) ||
656*4002Sdarrenm 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
657*4002Sdarrenm 		digest->cd_length = MD4_DIGEST_LENGTH;
658*4002Sdarrenm 		return (CRYPTO_BUFFER_TOO_SMALL);
659*4002Sdarrenm 	}
660*4002Sdarrenm 
661*4002Sdarrenm 	/*
662*4002Sdarrenm 	 * Do an MD4 final.
663*4002Sdarrenm 	 */
664*4002Sdarrenm 	switch (digest->cd_format) {
665*4002Sdarrenm 	case CRYPTO_DATA_RAW:
666*4002Sdarrenm 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
667*4002Sdarrenm 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
668*4002Sdarrenm 		break;
669*4002Sdarrenm 	case CRYPTO_DATA_UIO:
670*4002Sdarrenm 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
671*4002Sdarrenm 		    digest, MD4_DIGEST_LENGTH, NULL);
672*4002Sdarrenm 		break;
673*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
674*4002Sdarrenm 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
675*4002Sdarrenm 		    digest, MD4_DIGEST_LENGTH, NULL);
676*4002Sdarrenm 		break;
677*4002Sdarrenm 	default:
678*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
679*4002Sdarrenm 	}
680*4002Sdarrenm 
681*4002Sdarrenm 	/* all done, free context and return */
682*4002Sdarrenm 
683*4002Sdarrenm 	if (ret == CRYPTO_SUCCESS) {
684*4002Sdarrenm 		digest->cd_length = MD4_DIGEST_LENGTH;
685*4002Sdarrenm 	} else {
686*4002Sdarrenm 		digest->cd_length = 0;
687*4002Sdarrenm 	}
688*4002Sdarrenm 
689*4002Sdarrenm 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
690*4002Sdarrenm 	ctx->cc_provider_private = NULL;
691*4002Sdarrenm 
692*4002Sdarrenm 	return (ret);
693*4002Sdarrenm }
694*4002Sdarrenm 
695*4002Sdarrenm /* ARGSUSED */
696*4002Sdarrenm static int
697*4002Sdarrenm md4_digest_atomic(crypto_provider_handle_t provider,
698*4002Sdarrenm     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
699*4002Sdarrenm     crypto_data_t *data, crypto_data_t *digest,
700*4002Sdarrenm     crypto_req_handle_t req)
701*4002Sdarrenm {
702*4002Sdarrenm 	int ret = CRYPTO_SUCCESS;
703*4002Sdarrenm 	MD4_CTX md4_ctx;
704*4002Sdarrenm 
705*4002Sdarrenm 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
706*4002Sdarrenm 		return (CRYPTO_MECHANISM_INVALID);
707*4002Sdarrenm 
708*4002Sdarrenm 	/*
709*4002Sdarrenm 	 * Do the MD4 init.
710*4002Sdarrenm 	 */
711*4002Sdarrenm 	MD4Init(&md4_ctx);
712*4002Sdarrenm 
713*4002Sdarrenm 	/*
714*4002Sdarrenm 	 * Do the MD4 update on the specified input data.
715*4002Sdarrenm 	 */
716*4002Sdarrenm 	switch (data->cd_format) {
717*4002Sdarrenm 	case CRYPTO_DATA_RAW:
718*4002Sdarrenm 		MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
719*4002Sdarrenm 		    data->cd_length);
720*4002Sdarrenm 		break;
721*4002Sdarrenm 	case CRYPTO_DATA_UIO:
722*4002Sdarrenm 		ret = md4_digest_update_uio(&md4_ctx, data);
723*4002Sdarrenm 		break;
724*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
725*4002Sdarrenm 		ret = md4_digest_update_mblk(&md4_ctx, data);
726*4002Sdarrenm 		break;
727*4002Sdarrenm 	default:
728*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
729*4002Sdarrenm 	}
730*4002Sdarrenm 
731*4002Sdarrenm 	if (ret != CRYPTO_SUCCESS) {
732*4002Sdarrenm 		/* the update failed, bail */
733*4002Sdarrenm 		digest->cd_length = 0;
734*4002Sdarrenm 		return (ret);
735*4002Sdarrenm 	}
736*4002Sdarrenm 
737*4002Sdarrenm 	/*
738*4002Sdarrenm 	 * Do an MD4 final, must be done separately since the digest
739*4002Sdarrenm 	 * type can be different than the input data type.
740*4002Sdarrenm 	 */
741*4002Sdarrenm 	switch (digest->cd_format) {
742*4002Sdarrenm 	case CRYPTO_DATA_RAW:
743*4002Sdarrenm 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
744*4002Sdarrenm 		    digest->cd_offset, &md4_ctx);
745*4002Sdarrenm 		break;
746*4002Sdarrenm 	case CRYPTO_DATA_UIO:
747*4002Sdarrenm 		ret = md4_digest_final_uio(&md4_ctx, digest,
748*4002Sdarrenm 		    MD4_DIGEST_LENGTH, NULL);
749*4002Sdarrenm 		break;
750*4002Sdarrenm 	case CRYPTO_DATA_MBLK:
751*4002Sdarrenm 		ret = md4_digest_final_mblk(&md4_ctx, digest,
752*4002Sdarrenm 		    MD4_DIGEST_LENGTH, NULL);
753*4002Sdarrenm 		break;
754*4002Sdarrenm 	default:
755*4002Sdarrenm 		ret = CRYPTO_ARGUMENTS_BAD;
756*4002Sdarrenm 	}
757*4002Sdarrenm 
758*4002Sdarrenm 	if (ret == CRYPTO_SUCCESS) {
759*4002Sdarrenm 		digest->cd_length = MD4_DIGEST_LENGTH;
760*4002Sdarrenm 	} else {
761*4002Sdarrenm 		digest->cd_length = 0;
762*4002Sdarrenm 	}
763*4002Sdarrenm 
764*4002Sdarrenm 	return (ret);
765*4002Sdarrenm }
766