xref: /netbsd-src/sys/dev/cgd.c (revision 4fee23f98c45552038ad6b5bd05124a41302fb01)
1 /* $NetBSD: cgd.c,v 1.73 2011/06/12 03:35:51 rmind Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.73 2011/06/12 03:35:51 rmind Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/ioctl.h>
45 #include <sys/device.h>
46 #include <sys/disk.h>
47 #include <sys/disklabel.h>
48 #include <sys/fcntl.h>
49 #include <sys/namei.h> /* for pathbuf */
50 #include <sys/vnode.h>
51 #include <sys/conf.h>
52 #include <sys/syslog.h>
53 
54 #include <dev/dkvar.h>
55 #include <dev/cgdvar.h>
56 
57 /* Entry Point Functions */
58 
59 void	cgdattach(int);
60 
61 static dev_type_open(cgdopen);
62 static dev_type_close(cgdclose);
63 static dev_type_read(cgdread);
64 static dev_type_write(cgdwrite);
65 static dev_type_ioctl(cgdioctl);
66 static dev_type_strategy(cgdstrategy);
67 static dev_type_dump(cgddump);
68 static dev_type_size(cgdsize);
69 
70 const struct bdevsw cgd_bdevsw = {
71 	cgdopen, cgdclose, cgdstrategy, cgdioctl,
72 	cgddump, cgdsize, D_DISK
73 };
74 
75 const struct cdevsw cgd_cdevsw = {
76 	cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
77 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
78 };
79 
80 static int cgd_match(device_t, cfdata_t, void *);
81 static void cgd_attach(device_t, device_t, void *);
82 static int cgd_detach(device_t, int);
83 static struct cgd_softc	*cgd_spawn(int);
84 static int cgd_destroy(device_t);
85 
86 /* Internal Functions */
87 
88 static int	cgdstart(struct dk_softc *, struct buf *);
89 static void	cgdiodone(struct buf *);
90 
91 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
92 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
93 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
94 			struct lwp *);
95 static void	cgd_cipher(struct cgd_softc *, void *, void *,
96 			   size_t, daddr_t, size_t, int);
97 
98 /* Pseudo-disk Interface */
99 
100 static struct dk_intf the_dkintf = {
101 	DTYPE_CGD,
102 	"cgd",
103 	cgdopen,
104 	cgdclose,
105 	cgdstrategy,
106 	cgdstart,
107 };
108 static struct dk_intf *di = &the_dkintf;
109 
110 static struct dkdriver cgddkdriver = {
111 	.d_strategy = cgdstrategy,
112 	.d_minphys = minphys,
113 };
114 
115 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
116     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
117 extern struct cfdriver cgd_cd;
118 
119 /* DIAGNOSTIC and DEBUG definitions */
120 
121 #if defined(CGDDEBUG) && !defined(DEBUG)
122 #define DEBUG
123 #endif
124 
125 #ifdef DEBUG
126 int cgddebug = 0;
127 
128 #define CGDB_FOLLOW	0x1
129 #define CGDB_IO	0x2
130 #define CGDB_CRYPTO	0x4
131 
132 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
133 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
134 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
135 
136 static void	hexprint(const char *, void *, int);
137 
138 #else
139 #define IFDEBUG(x,y)
140 #define DPRINTF(x,y)
141 #define DPRINTF_FOLLOW(y)
142 #endif
143 
144 #ifdef DIAGNOSTIC
145 #define DIAGPANIC(x)		panic x
146 #define DIAGCONDPANIC(x,y)	if (x) panic y
147 #else
148 #define DIAGPANIC(x)
149 #define DIAGCONDPANIC(x,y)
150 #endif
151 
152 /* Global variables */
153 
154 /* Utility Functions */
155 
156 #define CGDUNIT(x)		DISKUNIT(x)
157 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
158 
159 /* The code */
160 
161 static struct cgd_softc *
162 getcgd_softc(dev_t dev)
163 {
164 	int	unit = CGDUNIT(dev);
165 	struct cgd_softc *sc;
166 
167 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
168 
169 	sc = device_lookup_private(&cgd_cd, unit);
170 	if (sc == NULL)
171 		sc = cgd_spawn(unit);
172 	return sc;
173 }
174 
175 static int
176 cgd_match(device_t self, cfdata_t cfdata, void *aux)
177 {
178 
179 	return 1;
180 }
181 
182 static void
183 cgd_attach(device_t parent, device_t self, void *aux)
184 {
185 	struct cgd_softc *sc = device_private(self);
186 
187 	sc->sc_dev = self;
188 	simple_lock_init(&sc->sc_slock);
189 	dk_sc_init(&sc->sc_dksc, sc, device_xname(sc->sc_dev));
190 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
191 
192 	 if (!pmf_device_register(self, NULL, NULL))
193 		aprint_error_dev(self, "unable to register power management hooks\n");
194 }
195 
196 
197 static int
198 cgd_detach(device_t self, int flags)
199 {
200 	int ret;
201 	const int pmask = 1 << RAW_PART;
202 	struct cgd_softc *sc = device_private(self);
203 	struct dk_softc *dksc = &sc->sc_dksc;
204 
205 	if (DK_BUSY(dksc, pmask))
206 		return EBUSY;
207 
208 	if ((dksc->sc_flags & DKF_INITED) != 0 &&
209 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
210 		return ret;
211 
212 	disk_destroy(&dksc->sc_dkdev);
213 
214 	return 0;
215 }
216 
217 void
218 cgdattach(int num)
219 {
220 	int error;
221 
222 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
223 	if (error != 0)
224 		aprint_error("%s: unable to register cfattach\n",
225 		    cgd_cd.cd_name);
226 }
227 
228 static struct cgd_softc *
229 cgd_spawn(int unit)
230 {
231 	cfdata_t cf;
232 
233 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
234 	cf->cf_name = cgd_cd.cd_name;
235 	cf->cf_atname = cgd_cd.cd_name;
236 	cf->cf_unit = unit;
237 	cf->cf_fstate = FSTATE_STAR;
238 
239 	return device_private(config_attach_pseudo(cf));
240 }
241 
242 static int
243 cgd_destroy(device_t dev)
244 {
245 	int error;
246 	cfdata_t cf;
247 
248 	cf = device_cfdata(dev);
249 	error = config_detach(dev, DETACH_QUIET);
250 	if (error)
251 		return error;
252 	free(cf, M_DEVBUF);
253 	return 0;
254 }
255 
256 static int
257 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
258 {
259 	struct	cgd_softc *cs;
260 
261 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
262 	GETCGD_SOFTC(cs, dev);
263 	return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
264 }
265 
266 static int
267 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
268 {
269 	int error;
270 	struct	cgd_softc *cs;
271 	struct	dk_softc *dksc;
272 
273 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
274 	GETCGD_SOFTC(cs, dev);
275 	dksc = &cs->sc_dksc;
276 	if ((error =  dk_close(di, dksc, dev, flags, fmt, l)) != 0)
277 		return error;
278 
279 	if ((dksc->sc_flags & DKF_INITED) == 0) {
280 		if ((error = cgd_destroy(cs->sc_dev)) != 0) {
281 			aprint_error_dev(cs->sc_dev,
282 			    "unable to detach instance\n");
283 			return error;
284 		}
285 	}
286 	return 0;
287 }
288 
289 static void
290 cgdstrategy(struct buf *bp)
291 {
292 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev);
293 
294 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
295 	    (long)bp->b_bcount));
296 
297 	/*
298 	 * Reject unaligned writes.  We can encrypt and decrypt only
299 	 * complete disk sectors, and we let the ciphers require their
300 	 * buffers to be aligned to 32-bit boundaries.
301 	 */
302 	if (bp->b_blkno < 0 ||
303 	    (bp->b_bcount % DEV_BSIZE) != 0 ||
304 	    ((uintptr_t)bp->b_data & 3) != 0) {
305 		bp->b_error = EINVAL;
306 		bp->b_resid = bp->b_bcount;
307 		biodone(bp);
308 		return;
309 	}
310 
311 	/* XXXrcd: Should we test for (cs != NULL)? */
312 	dk_strategy(di, &cs->sc_dksc, bp);
313 	return;
314 }
315 
316 static int
317 cgdsize(dev_t dev)
318 {
319 	struct cgd_softc *cs = getcgd_softc(dev);
320 
321 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
322 	if (!cs)
323 		return -1;
324 	return dk_size(di, &cs->sc_dksc, dev);
325 }
326 
327 /*
328  * cgd_{get,put}data are functions that deal with getting a buffer
329  * for the new encrypted data.  We have a buffer per device so that
330  * we can ensure that we can always have a transaction in flight.
331  * We use this buffer first so that we have one less piece of
332  * malloc'ed data at any given point.
333  */
334 
335 static void *
336 cgd_getdata(struct dk_softc *dksc, unsigned long size)
337 {
338 	struct	cgd_softc *cs =dksc->sc_osc;
339 	void *	data = NULL;
340 
341 	simple_lock(&cs->sc_slock);
342 	if (cs->sc_data_used == 0) {
343 		cs->sc_data_used = 1;
344 		data = cs->sc_data;
345 	}
346 	simple_unlock(&cs->sc_slock);
347 
348 	if (data)
349 		return data;
350 
351 	return malloc(size, M_DEVBUF, M_NOWAIT);
352 }
353 
354 static void
355 cgd_putdata(struct dk_softc *dksc, void *data)
356 {
357 	struct	cgd_softc *cs =dksc->sc_osc;
358 
359 	if (data == cs->sc_data) {
360 		simple_lock(&cs->sc_slock);
361 		cs->sc_data_used = 0;
362 		simple_unlock(&cs->sc_slock);
363 	} else {
364 		free(data, M_DEVBUF);
365 	}
366 }
367 
368 static int
369 cgdstart(struct dk_softc *dksc, struct buf *bp)
370 {
371 	struct	cgd_softc *cs = dksc->sc_osc;
372 	struct	buf *nbp;
373 	void *	addr;
374 	void *	newaddr;
375 	daddr_t	bn;
376 	struct	vnode *vp;
377 
378 	DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
379 	disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
380 
381 	bn = bp->b_rawblkno;
382 
383 	/*
384 	 * We attempt to allocate all of our resources up front, so that
385 	 * we can fail quickly if they are unavailable.
386 	 */
387 
388 	nbp = getiobuf(cs->sc_tvn, false);
389 	if (nbp == NULL) {
390 		disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
391 		return -1;
392 	}
393 
394 	/*
395 	 * If we are writing, then we need to encrypt the outgoing
396 	 * block into a new block of memory.  If we fail, then we
397 	 * return an error and let the dksubr framework deal with it.
398 	 */
399 	newaddr = addr = bp->b_data;
400 	if ((bp->b_flags & B_READ) == 0) {
401 		newaddr = cgd_getdata(dksc, bp->b_bcount);
402 		if (!newaddr) {
403 			putiobuf(nbp);
404 			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
405 			return -1;
406 		}
407 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
408 		    DEV_BSIZE, CGD_CIPHER_ENCRYPT);
409 	}
410 
411 	nbp->b_data = newaddr;
412 	nbp->b_flags = bp->b_flags;
413 	nbp->b_oflags = bp->b_oflags;
414 	nbp->b_cflags = bp->b_cflags;
415 	nbp->b_iodone = cgdiodone;
416 	nbp->b_proc = bp->b_proc;
417 	nbp->b_blkno = bn;
418 	nbp->b_bcount = bp->b_bcount;
419 	nbp->b_private = bp;
420 
421 	BIO_COPYPRIO(nbp, bp);
422 
423 	if ((nbp->b_flags & B_READ) == 0) {
424 		vp = nbp->b_vp;
425 		mutex_enter(vp->v_interlock);
426 		vp->v_numoutput++;
427 		mutex_exit(vp->v_interlock);
428 	}
429 	VOP_STRATEGY(cs->sc_tvn, nbp);
430 	return 0;
431 }
432 
433 static void
434 cgdiodone(struct buf *nbp)
435 {
436 	struct	buf *obp = nbp->b_private;
437 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
438 	struct	dk_softc *dksc = &cs->sc_dksc;
439 	int s;
440 
441 	KDASSERT(cs);
442 
443 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
444 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
445 	    obp, obp->b_bcount, obp->b_resid));
446 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
447 	    nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
448 	    nbp->b_bcount));
449 	if (nbp->b_error != 0) {
450 		obp->b_error = nbp->b_error;
451 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
452 		    obp->b_error));
453 	}
454 
455 	/* Perform the decryption if we are reading.
456 	 *
457 	 * Note: use the blocknumber from nbp, since it is what
458 	 *       we used to encrypt the blocks.
459 	 */
460 
461 	if (nbp->b_flags & B_READ)
462 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
463 		    nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
464 
465 	/* If we allocated memory, free it now... */
466 	if (nbp->b_data != obp->b_data)
467 		cgd_putdata(dksc, nbp->b_data);
468 
469 	putiobuf(nbp);
470 
471 	/* Request is complete for whatever reason */
472 	obp->b_resid = 0;
473 	if (obp->b_error != 0)
474 		obp->b_resid = obp->b_bcount;
475 	s = splbio();
476 	disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
477 	    (obp->b_flags & B_READ));
478 	biodone(obp);
479 	dk_iodone(di, dksc);
480 	splx(s);
481 }
482 
483 /* XXX: we should probably put these into dksubr.c, mostly */
484 static int
485 cgdread(dev_t dev, struct uio *uio, int flags)
486 {
487 	struct	cgd_softc *cs;
488 	struct	dk_softc *dksc;
489 
490 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
491 	    (unsigned long long)dev, uio, flags));
492 	GETCGD_SOFTC(cs, dev);
493 	dksc = &cs->sc_dksc;
494 	if ((dksc->sc_flags & DKF_INITED) == 0)
495 		return ENXIO;
496 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
497 }
498 
499 /* XXX: we should probably put these into dksubr.c, mostly */
500 static int
501 cgdwrite(dev_t dev, struct uio *uio, int flags)
502 {
503 	struct	cgd_softc *cs;
504 	struct	dk_softc *dksc;
505 
506 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
507 	GETCGD_SOFTC(cs, dev);
508 	dksc = &cs->sc_dksc;
509 	if ((dksc->sc_flags & DKF_INITED) == 0)
510 		return ENXIO;
511 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
512 }
513 
514 static int
515 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
516 {
517 	struct	cgd_softc *cs;
518 	struct	dk_softc *dksc;
519 	struct	disk *dk;
520 	int	part = DISKPART(dev);
521 	int	pmask = 1 << part;
522 
523 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
524 	    dev, cmd, data, flag, l));
525 	GETCGD_SOFTC(cs, dev);
526 	dksc = &cs->sc_dksc;
527 	dk = &dksc->sc_dkdev;
528 	switch (cmd) {
529 	case CGDIOCSET:
530 	case CGDIOCCLR:
531 		if ((flag & FWRITE) == 0)
532 			return EBADF;
533 	}
534 
535 	switch (cmd) {
536 	case CGDIOCSET:
537 		if (dksc->sc_flags & DKF_INITED)
538 			return EBUSY;
539 		return cgd_ioctl_set(cs, data, l);
540 	case CGDIOCCLR:
541 		if (DK_BUSY(&cs->sc_dksc, pmask))
542 			return EBUSY;
543 		return cgd_ioctl_clr(cs, l);
544 	case DIOCCACHESYNC:
545 		/*
546 		 * XXX Do we really need to care about having a writable
547 		 * file descriptor here?
548 		 */
549 		if ((flag & FWRITE) == 0)
550 			return (EBADF);
551 
552 		/*
553 		 * We pass this call down to the underlying disk.
554 		 */
555 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
556 	default:
557 		return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
558 	}
559 }
560 
561 static int
562 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
563 {
564 	struct	cgd_softc *cs;
565 
566 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
567 	    dev, blkno, va, (unsigned long)size));
568 	GETCGD_SOFTC(cs, dev);
569 	return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
570 }
571 
572 /*
573  * XXXrcd:
574  *  for now we hardcode the maximum key length.
575  */
576 #define MAX_KEYSIZE	1024
577 
578 static const struct {
579 	const char *n;
580 	int v;
581 	int d;
582 } encblkno[] = {
583 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
584 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
585 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
586 };
587 
588 /* ARGSUSED */
589 static int
590 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
591 {
592 	struct	 cgd_ioctl *ci = data;
593 	struct	 vnode *vp;
594 	int	 ret;
595 	size_t	 i;
596 	size_t	 keybytes;			/* key length in bytes */
597 	const char *cp;
598 	struct pathbuf *pb;
599 	char	 *inbuf;
600 
601 	cp = ci->ci_disk;
602 
603 	ret = pathbuf_copyin(ci->ci_disk, &pb);
604 	if (ret != 0) {
605 		return ret;
606 	}
607 	ret = dk_lookup(pb, l, &vp);
608 	pathbuf_destroy(pb);
609 	if (ret != 0) {
610 		return ret;
611 	}
612 
613 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
614 
615 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
616 		goto bail;
617 
618 	(void)memset(inbuf, 0, MAX_KEYSIZE);
619 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
620 	if (ret)
621 		goto bail;
622 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
623 	if (!cs->sc_cfuncs) {
624 		ret = EINVAL;
625 		goto bail;
626 	}
627 
628 	(void)memset(inbuf, 0, MAX_KEYSIZE);
629 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
630 	if (ret)
631 		goto bail;
632 
633 	for (i = 0; i < __arraycount(encblkno); i++)
634 		if (strcmp(encblkno[i].n, inbuf) == 0)
635 			break;
636 
637 	if (i == __arraycount(encblkno)) {
638 		ret = EINVAL;
639 		goto bail;
640 	}
641 
642 	keybytes = ci->ci_keylen / 8 + 1;
643 	if (keybytes > MAX_KEYSIZE) {
644 		ret = EINVAL;
645 		goto bail;
646 	}
647 
648 	(void)memset(inbuf, 0, MAX_KEYSIZE);
649 	ret = copyin(ci->ci_key, inbuf, keybytes);
650 	if (ret)
651 		goto bail;
652 
653 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
654 	cs->sc_cdata.cf_mode = encblkno[i].v;
655 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
656 	    &cs->sc_cdata.cf_blocksize);
657 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
658 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
659 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
660 	    cs->sc_cdata.cf_priv = NULL;
661 	}
662 
663 	/*
664 	 * The blocksize is supposed to be in bytes. Unfortunately originally
665 	 * it was expressed in bits. For compatibility we maintain encblkno
666 	 * and encblkno8.
667 	 */
668 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
669 	(void)memset(inbuf, 0, MAX_KEYSIZE);
670 	if (!cs->sc_cdata.cf_priv) {
671 		ret = EINVAL;		/* XXX is this the right error? */
672 		goto bail;
673 	}
674 	free(inbuf, M_TEMP);
675 
676 	bufq_alloc(&cs->sc_dksc.sc_bufq, "fcfs", 0);
677 
678 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
679 	cs->sc_data_used = 0;
680 
681 	cs->sc_dksc.sc_flags |= DKF_INITED;
682 
683 	/* Attach the disk. */
684 	disk_attach(&cs->sc_dksc.sc_dkdev);
685 
686 	/* Try and read the disklabel. */
687 	dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? (cause of PR 41704) */);
688 
689 	/* Discover wedges on this disk. */
690 	dkwedge_discover(&cs->sc_dksc.sc_dkdev);
691 
692 	return 0;
693 
694 bail:
695 	free(inbuf, M_TEMP);
696 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
697 	return ret;
698 }
699 
700 /* ARGSUSED */
701 static int
702 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
703 {
704 	int	s;
705 	struct	dk_softc *dksc;
706 
707 	dksc = &cs->sc_dksc;
708 
709 	if ((dksc->sc_flags & DKF_INITED) == 0)
710 		return ENXIO;
711 
712 	/* Delete all of our wedges. */
713 	dkwedge_delall(&cs->sc_dksc.sc_dkdev);
714 
715 	/* Kill off any queued buffers. */
716 	s = splbio();
717 	bufq_drain(cs->sc_dksc.sc_bufq);
718 	splx(s);
719 	bufq_free(cs->sc_dksc.sc_bufq);
720 
721 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
722 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
723 	free(cs->sc_tpath, M_DEVBUF);
724 	free(cs->sc_data, M_DEVBUF);
725 	cs->sc_data_used = 0;
726 	cs->sc_dksc.sc_flags &= ~DKF_INITED;
727 	disk_detach(&cs->sc_dksc.sc_dkdev);
728 
729 	return 0;
730 }
731 
732 static int
733 getsize(struct lwp *l, struct vnode *vp, size_t *size)
734 {
735 	struct partinfo dpart;
736 	struct dkwedge_info dkw;
737 	int ret;
738 
739 	if ((ret = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
740 	    l->l_cred)) == 0) {
741 		*size = dkw.dkw_size;
742 		return 0;
743 	}
744 
745 	if ((ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred)) == 0) {
746 		*size = dpart.part->p_size;
747 		return 0;
748 	}
749 
750 	return ret;
751 }
752 
753 
754 static int
755 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
756 	struct lwp *l)
757 {
758 	struct	dk_geom *pdg;
759 	struct	vattr va;
760 	size_t	size;
761 	int	ret;
762 	char	*tmppath;
763 
764 	cs->sc_dksc.sc_size = 0;
765 	cs->sc_tvn = vp;
766 	cs->sc_tpath = NULL;
767 
768 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
769 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
770 	if (ret)
771 		goto bail;
772 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
773 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
774 
775 	if ((ret = VOP_GETATTR(vp, &va, l->l_cred)) != 0)
776 		goto bail;
777 
778 	cs->sc_tdev = va.va_rdev;
779 
780 	if ((ret = getsize(l, vp, &size)) != 0)
781 		goto bail;
782 
783 	if (!size) {
784 		ret = ENODEV;
785 		goto bail;
786 	}
787 
788 	cs->sc_dksc.sc_size = size;
789 
790 	/*
791 	 * XXX here we should probe the underlying device.  If we
792 	 *     are accessing a partition of type RAW_PART, then
793 	 *     we should populate our initial geometry with the
794 	 *     geometry that we discover from the device.
795 	 */
796 	pdg = &cs->sc_dksc.sc_geom;
797 	pdg->pdg_secsize = DEV_BSIZE;
798 	pdg->pdg_ntracks = 1;
799 	pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
800 	pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors;
801 
802 bail:
803 	free(tmppath, M_TEMP);
804 	if (ret && cs->sc_tpath)
805 		free(cs->sc_tpath, M_DEVBUF);
806 	return ret;
807 }
808 
809 /*
810  * Our generic cipher entry point.  This takes care of the
811  * IV mode and passes off the work to the specific cipher.
812  * We implement here the IV method ``encrypted block
813  * number''.
814  *
815  * For the encryption case, we accomplish this by setting
816  * up a struct uio where the first iovec of the source is
817  * the blocknumber and the first iovec of the dest is a
818  * sink.  We then call the cipher with an IV of zero, and
819  * the right thing happens.
820  *
821  * For the decryption case, we use the same basic mechanism
822  * for symmetry, but we encrypt the block number in the
823  * first iovec.
824  *
825  * We mainly do this to avoid requiring the definition of
826  * an ECB mode.
827  *
828  * XXXrcd: for now we rely on our own crypto framework defined
829  *         in dev/cgd_crypto.c.  This will change when we
830  *         get a generic kernel crypto framework.
831  */
832 
833 static void
834 blkno2blkno_buf(char *sbuf, daddr_t blkno)
835 {
836 	int	i;
837 
838 	/* Set up the blkno in blkno_buf, here we do not care much
839 	 * about the final layout of the information as long as we
840 	 * can guarantee that each sector will have a different IV
841 	 * and that the endianness of the machine will not affect
842 	 * the representation that we have chosen.
843 	 *
844 	 * We choose this representation, because it does not rely
845 	 * on the size of buf (which is the blocksize of the cipher),
846 	 * but allows daddr_t to grow without breaking existing
847 	 * disks.
848 	 *
849 	 * Note that blkno2blkno_buf does not take a size as input,
850 	 * and hence must be called on a pre-zeroed buffer of length
851 	 * greater than or equal to sizeof(daddr_t).
852 	 */
853 	for (i=0; i < sizeof(daddr_t); i++) {
854 		*sbuf++ = blkno & 0xff;
855 		blkno >>= 8;
856 	}
857 }
858 
859 static void
860 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
861     size_t len, daddr_t blkno, size_t secsize, int dir)
862 {
863 	char		*dst = dstv;
864 	char 		*src = srcv;
865 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
866 	struct uio	dstuio;
867 	struct uio	srcuio;
868 	struct iovec	dstiov[2];
869 	struct iovec	srciov[2];
870 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
871 	char		sink[CGD_MAXBLOCKSIZE];
872 	char		zero_iv[CGD_MAXBLOCKSIZE];
873 	char		blkno_buf[CGD_MAXBLOCKSIZE];
874 
875 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
876 
877 	DIAGCONDPANIC(len % blocksize != 0,
878 	    ("cgd_cipher: len %% blocksize != 0"));
879 
880 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
881 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
882 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
883 
884 	memset(zero_iv, 0x0, blocksize);
885 
886 	dstuio.uio_iov = dstiov;
887 	dstuio.uio_iovcnt = 2;
888 
889 	srcuio.uio_iov = srciov;
890 	srcuio.uio_iovcnt = 2;
891 
892 	dstiov[0].iov_base = sink;
893 	dstiov[0].iov_len  = blocksize;
894 	srciov[0].iov_base = blkno_buf;
895 	srciov[0].iov_len  = blocksize;
896 	dstiov[1].iov_len  = secsize;
897 	srciov[1].iov_len  = secsize;
898 
899 	for (; len > 0; len -= secsize) {
900 		dstiov[1].iov_base = dst;
901 		srciov[1].iov_base = src;
902 
903 		memset(blkno_buf, 0x0, blocksize);
904 		blkno2blkno_buf(blkno_buf, blkno);
905 		if (dir == CGD_CIPHER_DECRYPT) {
906 			dstuio.uio_iovcnt = 1;
907 			srcuio.uio_iovcnt = 1;
908 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
909 			    blkno_buf, blocksize));
910 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
911 			    zero_iv, CGD_CIPHER_ENCRYPT);
912 			memcpy(blkno_buf, sink, blocksize);
913 			dstuio.uio_iovcnt = 2;
914 			srcuio.uio_iovcnt = 2;
915 		}
916 
917 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
918 		    blkno_buf, blocksize));
919 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
920 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
921 		    sink, blocksize));
922 
923 		dst += secsize;
924 		src += secsize;
925 		blkno++;
926 	}
927 }
928 
929 #ifdef DEBUG
930 static void
931 hexprint(const char *start, void *buf, int len)
932 {
933 	char	*c = buf;
934 
935 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
936 	printf("%s: len=%06d 0x", start, len);
937 	while (len--)
938 		printf("%02x", (unsigned char) *c++);
939 }
940 #endif
941 
942 #ifdef _MODULE
943 
944 #include <sys/module.h>
945 
946 MODULE(MODULE_CLASS_DRIVER, cgd, NULL);
947 CFDRIVER_DECL(cgd, DV_DISK, NULL);
948 
949 static int
950 cgd_modcmd(modcmd_t cmd, void *arg)
951 {
952 	int bmajor = -1, cmajor = -1,  error = 0;
953 
954 	switch (cmd) {
955 	case MODULE_CMD_INIT:
956 		error = config_cfdriver_attach(&cgd_cd);
957 		if (error)
958 			break;
959 
960 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
961 	        if (error) {
962 			config_cfdriver_detach(&cgd_cd);
963 			aprint_error("%s: unable to register cfattach\n",
964 			    cgd_cd.cd_name);
965 			break;
966 		}
967 
968 		error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
969 		    &cgd_cdevsw, &cmajor);
970 		if (error) {
971 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
972 			config_cfdriver_detach(&cgd_cd);
973 			break;
974 		}
975 
976 		break;
977 
978 	case MODULE_CMD_FINI:
979 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
980 		if (error)
981 			break;
982 		config_cfdriver_detach(&cgd_cd);
983 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
984 		break;
985 
986 	case MODULE_CMD_STAT:
987 		return ENOTTY;
988 
989 	default:
990 		return ENOTTY;
991 	}
992 
993 	return error;
994 }
995 
996 #endif
997