xref: /netbsd-src/sys/dev/cgd.c (revision 37afb7eb6895c833050f8bfb1d1bb2f99f332539)
1 /* $NetBSD: cgd.c,v 1.98 2015/05/02 08:00:08 mlelstv Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.98 2015/05/02 08:00:08 mlelstv Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57 
58 #include <miscfs/specfs/specdev.h> /* for v_rdev */
59 
60 /* Entry Point Functions */
61 
62 void	cgdattach(int);
63 
64 static dev_type_open(cgdopen);
65 static dev_type_close(cgdclose);
66 static dev_type_read(cgdread);
67 static dev_type_write(cgdwrite);
68 static dev_type_ioctl(cgdioctl);
69 static dev_type_strategy(cgdstrategy);
70 static dev_type_dump(cgddump);
71 static dev_type_size(cgdsize);
72 
73 const struct bdevsw cgd_bdevsw = {
74 	.d_open = cgdopen,
75 	.d_close = cgdclose,
76 	.d_strategy = cgdstrategy,
77 	.d_ioctl = cgdioctl,
78 	.d_dump = cgddump,
79 	.d_psize = cgdsize,
80 	.d_discard = nodiscard,
81 	.d_flag = D_DISK
82 };
83 
84 const struct cdevsw cgd_cdevsw = {
85 	.d_open = cgdopen,
86 	.d_close = cgdclose,
87 	.d_read = cgdread,
88 	.d_write = cgdwrite,
89 	.d_ioctl = cgdioctl,
90 	.d_stop = nostop,
91 	.d_tty = notty,
92 	.d_poll = nopoll,
93 	.d_mmap = nommap,
94 	.d_kqfilter = nokqfilter,
95 	.d_discard = nodiscard,
96 	.d_flag = D_DISK
97 };
98 
99 static int cgd_match(device_t, cfdata_t, void *);
100 static void cgd_attach(device_t, device_t, void *);
101 static int cgd_detach(device_t, int);
102 static struct cgd_softc	*cgd_spawn(int);
103 static int cgd_destroy(device_t);
104 
105 /* Internal Functions */
106 
107 static void	cgd_start(device_t);
108 static void	cgdiodone(struct buf *);
109 
110 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
111 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
112 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
113 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
114 			struct lwp *);
115 static void	cgd_cipher(struct cgd_softc *, void *, void *,
116 			   size_t, daddr_t, size_t, int);
117 
118 static struct dkdriver cgddkdriver = {
119         .d_minphys  = minphys,
120         .d_open = cgdopen,
121         .d_close = cgdclose,
122         .d_strategy = cgdstrategy,
123         .d_iosize = NULL,
124         .d_diskstart = cgd_start,
125         .d_dumpblocks = NULL,
126         .d_lastclose = NULL
127 };
128 
129 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
130     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
131 extern struct cfdriver cgd_cd;
132 
133 /* DIAGNOSTIC and DEBUG definitions */
134 
135 #if defined(CGDDEBUG) && !defined(DEBUG)
136 #define DEBUG
137 #endif
138 
139 #ifdef DEBUG
140 int cgddebug = 0;
141 
142 #define CGDB_FOLLOW	0x1
143 #define CGDB_IO	0x2
144 #define CGDB_CRYPTO	0x4
145 
146 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
147 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
148 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
149 
150 static void	hexprint(const char *, void *, int);
151 
152 #else
153 #define IFDEBUG(x,y)
154 #define DPRINTF(x,y)
155 #define DPRINTF_FOLLOW(y)
156 #endif
157 
158 #ifdef DIAGNOSTIC
159 #define DIAGPANIC(x)		panic x
160 #define DIAGCONDPANIC(x,y)	if (x) panic y
161 #else
162 #define DIAGPANIC(x)
163 #define DIAGCONDPANIC(x,y)
164 #endif
165 
166 /* Global variables */
167 
168 /* Utility Functions */
169 
170 #define CGDUNIT(x)		DISKUNIT(x)
171 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
172 
173 /* The code */
174 
175 static struct cgd_softc *
176 getcgd_softc(dev_t dev)
177 {
178 	int	unit = CGDUNIT(dev);
179 	struct cgd_softc *sc;
180 
181 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
182 
183 	sc = device_lookup_private(&cgd_cd, unit);
184 	if (sc == NULL)
185 		sc = cgd_spawn(unit);
186 	return sc;
187 }
188 
189 static int
190 cgd_match(device_t self, cfdata_t cfdata, void *aux)
191 {
192 
193 	return 1;
194 }
195 
196 static void
197 cgd_attach(device_t parent, device_t self, void *aux)
198 {
199 	struct cgd_softc *sc = device_private(self);
200 
201 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
202 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
203 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
204 
205 	if (!pmf_device_register(self, NULL, NULL))
206 		aprint_error_dev(self, "unable to register power management hooks\n");
207 }
208 
209 
210 static int
211 cgd_detach(device_t self, int flags)
212 {
213 	int ret;
214 	const int pmask = 1 << RAW_PART;
215 	struct cgd_softc *sc = device_private(self);
216 	struct dk_softc *dksc = &sc->sc_dksc;
217 
218 	if (DK_BUSY(dksc, pmask))
219 		return EBUSY;
220 
221 	if (DK_ATTACHED(dksc) &&
222 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
223 		return ret;
224 
225 	disk_destroy(&dksc->sc_dkdev);
226 	mutex_destroy(&sc->sc_lock);
227 
228 	return 0;
229 }
230 
231 void
232 cgdattach(int num)
233 {
234 	int error;
235 
236 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
237 	if (error != 0)
238 		aprint_error("%s: unable to register cfattach\n",
239 		    cgd_cd.cd_name);
240 }
241 
242 static struct cgd_softc *
243 cgd_spawn(int unit)
244 {
245 	cfdata_t cf;
246 
247 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
248 	cf->cf_name = cgd_cd.cd_name;
249 	cf->cf_atname = cgd_cd.cd_name;
250 	cf->cf_unit = unit;
251 	cf->cf_fstate = FSTATE_STAR;
252 
253 	return device_private(config_attach_pseudo(cf));
254 }
255 
256 static int
257 cgd_destroy(device_t dev)
258 {
259 	int error;
260 	cfdata_t cf;
261 
262 	cf = device_cfdata(dev);
263 	error = config_detach(dev, DETACH_QUIET);
264 	if (error)
265 		return error;
266 	free(cf, M_DEVBUF);
267 	return 0;
268 }
269 
270 static int
271 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
272 {
273 	struct	cgd_softc *cs;
274 
275 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
276 	GETCGD_SOFTC(cs, dev);
277 	return dk_open(&cs->sc_dksc, dev, flags, fmt, l);
278 }
279 
280 static int
281 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
282 {
283 	int error;
284 	struct	cgd_softc *cs;
285 	struct	dk_softc *dksc;
286 
287 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
288 	GETCGD_SOFTC(cs, dev);
289 	dksc = &cs->sc_dksc;
290 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
291 		return error;
292 
293 	if (!DK_ATTACHED(dksc)) {
294 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
295 			aprint_error_dev(dksc->sc_dev,
296 			    "unable to detach instance\n");
297 			return error;
298 		}
299 	}
300 	return 0;
301 }
302 
303 static void
304 cgdstrategy(struct buf *bp)
305 {
306 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev);
307 
308 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
309 	    (long)bp->b_bcount));
310 
311 	/*
312 	 * Reject unaligned writes.  We can encrypt and decrypt only
313 	 * complete disk sectors, and we let the ciphers require their
314 	 * buffers to be aligned to 32-bit boundaries.
315 	 */
316 	if (bp->b_blkno < 0 ||
317 	    (bp->b_bcount % DEV_BSIZE) != 0 ||
318 	    ((uintptr_t)bp->b_data & 3) != 0) {
319 		bp->b_error = EINVAL;
320 		bp->b_resid = bp->b_bcount;
321 		biodone(bp);
322 		return;
323 	}
324 
325 	/* XXXrcd: Should we test for (cs != NULL)? */
326 	dk_strategy(&cs->sc_dksc, bp);
327 	return;
328 }
329 
330 static int
331 cgdsize(dev_t dev)
332 {
333 	struct cgd_softc *cs = getcgd_softc(dev);
334 
335 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
336 	if (!cs)
337 		return -1;
338 	return dk_size(&cs->sc_dksc, dev);
339 }
340 
341 /*
342  * cgd_{get,put}data are functions that deal with getting a buffer
343  * for the new encrypted data.  We have a buffer per device so that
344  * we can ensure that we can always have a transaction in flight.
345  * We use this buffer first so that we have one less piece of
346  * malloc'ed data at any given point.
347  */
348 
349 static void *
350 cgd_getdata(struct dk_softc *dksc, unsigned long size)
351 {
352 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
353 	void *	data = NULL;
354 
355 	mutex_enter(&cs->sc_lock);
356 	if (cs->sc_data_used == 0) {
357 		cs->sc_data_used = 1;
358 		data = cs->sc_data;
359 	}
360 	mutex_exit(&cs->sc_lock);
361 
362 	if (data)
363 		return data;
364 
365 	return malloc(size, M_DEVBUF, M_NOWAIT);
366 }
367 
368 static void
369 cgd_putdata(struct dk_softc *dksc, void *data)
370 {
371 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
372 
373 	if (data == cs->sc_data) {
374 		mutex_enter(&cs->sc_lock);
375 		cs->sc_data_used = 0;
376 		mutex_exit(&cs->sc_lock);
377 	} else {
378 		free(data, M_DEVBUF);
379 	}
380 }
381 
382 static void
383 cgd_start(device_t dev)
384 {
385 	struct	cgd_softc *cs = device_private(dev);
386 	struct	dk_softc *dksc = &cs->sc_dksc;
387 	struct	buf *bp, *nbp;
388 #ifdef DIAGNOSTIC
389 	struct	buf *qbp;
390 #endif
391 	void *	addr;
392 	void *	newaddr;
393 	daddr_t	bn;
394 	struct	vnode *vp;
395 
396 	while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
397 
398 		DPRINTF_FOLLOW(("cgd_start(%p, %p)\n", dksc, bp));
399 		disk_busy(&dksc->sc_dkdev);
400 
401 		bn = bp->b_rawblkno;
402 
403 		/*
404 		 * We attempt to allocate all of our resources up front, so that
405 		 * we can fail quickly if they are unavailable.
406 		 */
407 		nbp = getiobuf(cs->sc_tvn, false);
408 		if (nbp == NULL) {
409 			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
410 			break;
411 		}
412 
413 		/*
414 		 * If we are writing, then we need to encrypt the outgoing
415 		 * block into a new block of memory.
416 		 */
417 		newaddr = addr = bp->b_data;
418 		if ((bp->b_flags & B_READ) == 0) {
419 			newaddr = cgd_getdata(dksc, bp->b_bcount);
420 			if (!newaddr) {
421 				putiobuf(nbp);
422 				disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
423 				break;
424 			}
425 			cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
426 			    DEV_BSIZE, CGD_CIPHER_ENCRYPT);
427 		}
428 		/* we now have all needed resources to process this buf */
429 #ifdef DIAGNOSTIC
430 		qbp = bufq_get(dksc->sc_bufq);
431 		KASSERT(bp == qbp);
432 #else
433 		(void)bufq_get(dksc->sc_bufq);
434 #endif
435 		nbp->b_data = newaddr;
436 		nbp->b_flags = bp->b_flags;
437 		nbp->b_oflags = bp->b_oflags;
438 		nbp->b_cflags = bp->b_cflags;
439 		nbp->b_iodone = cgdiodone;
440 		nbp->b_proc = bp->b_proc;
441 		nbp->b_blkno = bn;
442 		nbp->b_bcount = bp->b_bcount;
443 		nbp->b_private = bp;
444 
445 		BIO_COPYPRIO(nbp, bp);
446 
447 		if ((nbp->b_flags & B_READ) == 0) {
448 			vp = nbp->b_vp;
449 			mutex_enter(vp->v_interlock);
450 			vp->v_numoutput++;
451 			mutex_exit(vp->v_interlock);
452 		}
453 		VOP_STRATEGY(cs->sc_tvn, nbp);
454 	}
455 }
456 
457 static void
458 cgdiodone(struct buf *nbp)
459 {
460 	struct	buf *obp = nbp->b_private;
461 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
462 	struct	dk_softc *dksc = &cs->sc_dksc;
463 	int s;
464 
465 	KDASSERT(cs);
466 
467 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
468 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
469 	    obp, obp->b_bcount, obp->b_resid));
470 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
471 	    nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
472 	    nbp->b_bcount));
473 	if (nbp->b_error != 0) {
474 		obp->b_error = nbp->b_error;
475 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
476 		    obp->b_error));
477 	}
478 
479 	/* Perform the decryption if we are reading.
480 	 *
481 	 * Note: use the blocknumber from nbp, since it is what
482 	 *       we used to encrypt the blocks.
483 	 */
484 
485 	if (nbp->b_flags & B_READ)
486 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
487 		    nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
488 
489 	/* If we allocated memory, free it now... */
490 	if (nbp->b_data != obp->b_data)
491 		cgd_putdata(dksc, nbp->b_data);
492 
493 	putiobuf(nbp);
494 
495 	/* Request is complete for whatever reason */
496 	obp->b_resid = 0;
497 	if (obp->b_error != 0)
498 		obp->b_resid = obp->b_bcount;
499 	s = splbio();
500 	disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
501 	    (obp->b_flags & B_READ));
502 	biodone(obp);
503 	cgd_start(dksc->sc_dev);
504 	splx(s);
505 }
506 
507 /* XXX: we should probably put these into dksubr.c, mostly */
508 static int
509 cgdread(dev_t dev, struct uio *uio, int flags)
510 {
511 	struct	cgd_softc *cs;
512 	struct	dk_softc *dksc;
513 
514 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
515 	    (unsigned long long)dev, uio, flags));
516 	GETCGD_SOFTC(cs, dev);
517 	dksc = &cs->sc_dksc;
518 	if (!DK_ATTACHED(dksc))
519 		return ENXIO;
520 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
521 }
522 
523 /* XXX: we should probably put these into dksubr.c, mostly */
524 static int
525 cgdwrite(dev_t dev, struct uio *uio, int flags)
526 {
527 	struct	cgd_softc *cs;
528 	struct	dk_softc *dksc;
529 
530 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
531 	GETCGD_SOFTC(cs, dev);
532 	dksc = &cs->sc_dksc;
533 	if (!DK_ATTACHED(dksc))
534 		return ENXIO;
535 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
536 }
537 
538 static int
539 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
540 {
541 	struct	cgd_softc *cs;
542 	struct	dk_softc *dksc;
543 	int	part = DISKPART(dev);
544 	int	pmask = 1 << part;
545 
546 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
547 	    dev, cmd, data, flag, l));
548 
549 	switch (cmd) {
550 	case CGDIOCGET:
551 		return cgd_ioctl_get(dev, data, l);
552 	case CGDIOCSET:
553 	case CGDIOCCLR:
554 		if ((flag & FWRITE) == 0)
555 			return EBADF;
556 		/* FALLTHROUGH */
557 	default:
558 		GETCGD_SOFTC(cs, dev);
559 		dksc = &cs->sc_dksc;
560 		break;
561 	}
562 
563 	switch (cmd) {
564 	case CGDIOCSET:
565 		if (DK_ATTACHED(dksc))
566 			return EBUSY;
567 		return cgd_ioctl_set(cs, data, l);
568 	case CGDIOCCLR:
569 		if (DK_BUSY(&cs->sc_dksc, pmask))
570 			return EBUSY;
571 		return cgd_ioctl_clr(cs, l);
572 	case DIOCCACHESYNC:
573 		/*
574 		 * XXX Do we really need to care about having a writable
575 		 * file descriptor here?
576 		 */
577 		if ((flag & FWRITE) == 0)
578 			return (EBADF);
579 
580 		/*
581 		 * We pass this call down to the underlying disk.
582 		 */
583 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
584 	default:
585 		return dk_ioctl(dksc, dev, cmd, data, flag, l);
586 	case CGDIOCGET:
587 		KASSERT(0);
588 		return EINVAL;
589 	}
590 }
591 
592 static int
593 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
594 {
595 	struct	cgd_softc *cs;
596 
597 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
598 	    dev, blkno, va, (unsigned long)size));
599 	GETCGD_SOFTC(cs, dev);
600 	return dk_dump(&cs->sc_dksc, dev, blkno, va, size);
601 }
602 
603 /*
604  * XXXrcd:
605  *  for now we hardcode the maximum key length.
606  */
607 #define MAX_KEYSIZE	1024
608 
609 static const struct {
610 	const char *n;
611 	int v;
612 	int d;
613 } encblkno[] = {
614 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
615 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
616 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
617 };
618 
619 /* ARGSUSED */
620 static int
621 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
622 {
623 	struct	 cgd_ioctl *ci = data;
624 	struct	 vnode *vp;
625 	int	 ret;
626 	size_t	 i;
627 	size_t	 keybytes;			/* key length in bytes */
628 	const char *cp;
629 	struct pathbuf *pb;
630 	char	 *inbuf;
631 	struct dk_softc *dksc = &cs->sc_dksc;
632 
633 	cp = ci->ci_disk;
634 
635 	ret = pathbuf_copyin(ci->ci_disk, &pb);
636 	if (ret != 0) {
637 		return ret;
638 	}
639 	ret = dk_lookup(pb, l, &vp);
640 	pathbuf_destroy(pb);
641 	if (ret != 0) {
642 		return ret;
643 	}
644 
645 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
646 
647 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
648 		goto bail;
649 
650 	(void)memset(inbuf, 0, MAX_KEYSIZE);
651 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
652 	if (ret)
653 		goto bail;
654 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
655 	if (!cs->sc_cfuncs) {
656 		ret = EINVAL;
657 		goto bail;
658 	}
659 
660 	(void)memset(inbuf, 0, MAX_KEYSIZE);
661 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
662 	if (ret)
663 		goto bail;
664 
665 	for (i = 0; i < __arraycount(encblkno); i++)
666 		if (strcmp(encblkno[i].n, inbuf) == 0)
667 			break;
668 
669 	if (i == __arraycount(encblkno)) {
670 		ret = EINVAL;
671 		goto bail;
672 	}
673 
674 	keybytes = ci->ci_keylen / 8 + 1;
675 	if (keybytes > MAX_KEYSIZE) {
676 		ret = EINVAL;
677 		goto bail;
678 	}
679 
680 	(void)memset(inbuf, 0, MAX_KEYSIZE);
681 	ret = copyin(ci->ci_key, inbuf, keybytes);
682 	if (ret)
683 		goto bail;
684 
685 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
686 	cs->sc_cdata.cf_mode = encblkno[i].v;
687 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
688 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
689 	    &cs->sc_cdata.cf_blocksize);
690 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
691 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
692 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
693 	    cs->sc_cdata.cf_priv = NULL;
694 	}
695 
696 	/*
697 	 * The blocksize is supposed to be in bytes. Unfortunately originally
698 	 * it was expressed in bits. For compatibility we maintain encblkno
699 	 * and encblkno8.
700 	 */
701 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
702 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
703 	if (!cs->sc_cdata.cf_priv) {
704 		ret = EINVAL;		/* XXX is this the right error? */
705 		goto bail;
706 	}
707 	free(inbuf, M_TEMP);
708 
709 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
710 
711 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
712 	cs->sc_data_used = 0;
713 
714 	/* Attach the disk. */
715 	dk_attach(dksc);
716 	disk_attach(&dksc->sc_dkdev);
717 
718 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
719 
720 	/* Try and read the disklabel. */
721 	dk_getdisklabel(dksc, 0 /* XXX ? (cause of PR 41704) */);
722 
723 	/* Discover wedges on this disk. */
724 	dkwedge_discover(&dksc->sc_dkdev);
725 
726 	return 0;
727 
728 bail:
729 	free(inbuf, M_TEMP);
730 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
731 	return ret;
732 }
733 
734 /* ARGSUSED */
735 static int
736 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
737 {
738 	int	s;
739 	struct	dk_softc *dksc = &cs->sc_dksc;
740 
741 	if (!DK_ATTACHED(dksc))
742 		return ENXIO;
743 
744 	/* Delete all of our wedges. */
745 	dkwedge_delall(&dksc->sc_dkdev);
746 
747 	/* Kill off any queued buffers. */
748 	s = splbio();
749 	bufq_drain(dksc->sc_bufq);
750 	splx(s);
751 	bufq_free(dksc->sc_bufq);
752 
753 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
754 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
755 	free(cs->sc_tpath, M_DEVBUF);
756 	free(cs->sc_data, M_DEVBUF);
757 	cs->sc_data_used = 0;
758 	dk_detach(dksc);
759 	disk_detach(&dksc->sc_dkdev);
760 
761 	return 0;
762 }
763 
764 static int
765 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
766 {
767 	struct cgd_softc *cs = getcgd_softc(dev);
768 	struct cgd_user *cgu;
769 	int unit;
770 	struct	dk_softc *dksc = &cs->sc_dksc;
771 
772 	unit = CGDUNIT(dev);
773 	cgu = (struct cgd_user *)data;
774 
775 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
776 			   dev, unit, data, l));
777 
778 	if (cgu->cgu_unit == -1)
779 		cgu->cgu_unit = unit;
780 
781 	if (cgu->cgu_unit < 0)
782 		return EINVAL;	/* XXX: should this be ENXIO? */
783 
784 	cs = device_lookup_private(&cgd_cd, unit);
785 	if (cs == NULL || !DK_ATTACHED(dksc)) {
786 		cgu->cgu_dev = 0;
787 		cgu->cgu_alg[0] = '\0';
788 		cgu->cgu_blocksize = 0;
789 		cgu->cgu_mode = 0;
790 		cgu->cgu_keylen = 0;
791 	}
792 	else {
793 		cgu->cgu_dev = cs->sc_tdev;
794 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
795 		    sizeof(cgu->cgu_alg));
796 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
797 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
798 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
799 	}
800 	return 0;
801 }
802 
803 static int
804 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
805 	struct lwp *l)
806 {
807 	struct	disk_geom *dg;
808 	int	ret;
809 	char	*tmppath;
810 	uint64_t psize;
811 	unsigned secsize;
812 	struct dk_softc *dksc = &cs->sc_dksc;
813 
814 	cs->sc_tvn = vp;
815 	cs->sc_tpath = NULL;
816 
817 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
818 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
819 	if (ret)
820 		goto bail;
821 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
822 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
823 
824 	cs->sc_tdev = vp->v_rdev;
825 
826 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
827 		goto bail;
828 
829 	if (psize == 0) {
830 		ret = ENODEV;
831 		goto bail;
832 	}
833 
834 	/*
835 	 * XXX here we should probe the underlying device.  If we
836 	 *     are accessing a partition of type RAW_PART, then
837 	 *     we should populate our initial geometry with the
838 	 *     geometry that we discover from the device.
839 	 */
840 	dg = &dksc->sc_dkdev.dk_geom;
841 	memset(dg, 0, sizeof(*dg));
842 	dg->dg_secperunit = psize;
843 	// XXX: Inherit?
844 	dg->dg_secsize = DEV_BSIZE;
845 	dg->dg_ntracks = 1;
846 	dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
847 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
848 
849 bail:
850 	free(tmppath, M_TEMP);
851 	if (ret && cs->sc_tpath)
852 		free(cs->sc_tpath, M_DEVBUF);
853 	return ret;
854 }
855 
856 /*
857  * Our generic cipher entry point.  This takes care of the
858  * IV mode and passes off the work to the specific cipher.
859  * We implement here the IV method ``encrypted block
860  * number''.
861  *
862  * For the encryption case, we accomplish this by setting
863  * up a struct uio where the first iovec of the source is
864  * the blocknumber and the first iovec of the dest is a
865  * sink.  We then call the cipher with an IV of zero, and
866  * the right thing happens.
867  *
868  * For the decryption case, we use the same basic mechanism
869  * for symmetry, but we encrypt the block number in the
870  * first iovec.
871  *
872  * We mainly do this to avoid requiring the definition of
873  * an ECB mode.
874  *
875  * XXXrcd: for now we rely on our own crypto framework defined
876  *         in dev/cgd_crypto.c.  This will change when we
877  *         get a generic kernel crypto framework.
878  */
879 
880 static void
881 blkno2blkno_buf(char *sbuf, daddr_t blkno)
882 {
883 	int	i;
884 
885 	/* Set up the blkno in blkno_buf, here we do not care much
886 	 * about the final layout of the information as long as we
887 	 * can guarantee that each sector will have a different IV
888 	 * and that the endianness of the machine will not affect
889 	 * the representation that we have chosen.
890 	 *
891 	 * We choose this representation, because it does not rely
892 	 * on the size of buf (which is the blocksize of the cipher),
893 	 * but allows daddr_t to grow without breaking existing
894 	 * disks.
895 	 *
896 	 * Note that blkno2blkno_buf does not take a size as input,
897 	 * and hence must be called on a pre-zeroed buffer of length
898 	 * greater than or equal to sizeof(daddr_t).
899 	 */
900 	for (i=0; i < sizeof(daddr_t); i++) {
901 		*sbuf++ = blkno & 0xff;
902 		blkno >>= 8;
903 	}
904 }
905 
906 static void
907 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
908     size_t len, daddr_t blkno, size_t secsize, int dir)
909 {
910 	char		*dst = dstv;
911 	char 		*src = srcv;
912 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
913 	struct uio	dstuio;
914 	struct uio	srcuio;
915 	struct iovec	dstiov[2];
916 	struct iovec	srciov[2];
917 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
918 	char		sink[CGD_MAXBLOCKSIZE];
919 	char		zero_iv[CGD_MAXBLOCKSIZE];
920 	char		blkno_buf[CGD_MAXBLOCKSIZE];
921 
922 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
923 
924 	DIAGCONDPANIC(len % blocksize != 0,
925 	    ("cgd_cipher: len %% blocksize != 0"));
926 
927 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
928 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
929 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
930 
931 	memset(zero_iv, 0x0, blocksize);
932 
933 	dstuio.uio_iov = dstiov;
934 	dstuio.uio_iovcnt = 2;
935 
936 	srcuio.uio_iov = srciov;
937 	srcuio.uio_iovcnt = 2;
938 
939 	dstiov[0].iov_base = sink;
940 	dstiov[0].iov_len  = blocksize;
941 	srciov[0].iov_base = blkno_buf;
942 	srciov[0].iov_len  = blocksize;
943 	dstiov[1].iov_len  = secsize;
944 	srciov[1].iov_len  = secsize;
945 
946 	for (; len > 0; len -= secsize) {
947 		dstiov[1].iov_base = dst;
948 		srciov[1].iov_base = src;
949 
950 		memset(blkno_buf, 0x0, blocksize);
951 		blkno2blkno_buf(blkno_buf, blkno);
952 		if (dir == CGD_CIPHER_DECRYPT) {
953 			dstuio.uio_iovcnt = 1;
954 			srcuio.uio_iovcnt = 1;
955 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
956 			    blkno_buf, blocksize));
957 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
958 			    zero_iv, CGD_CIPHER_ENCRYPT);
959 			memcpy(blkno_buf, sink, blocksize);
960 			dstuio.uio_iovcnt = 2;
961 			srcuio.uio_iovcnt = 2;
962 		}
963 
964 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
965 		    blkno_buf, blocksize));
966 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
967 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
968 		    sink, blocksize));
969 
970 		dst += secsize;
971 		src += secsize;
972 		blkno++;
973 	}
974 }
975 
976 #ifdef DEBUG
977 static void
978 hexprint(const char *start, void *buf, int len)
979 {
980 	char	*c = buf;
981 
982 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
983 	printf("%s: len=%06d 0x", start, len);
984 	while (len--)
985 		printf("%02x", (unsigned char) *c++);
986 }
987 #endif
988 
989 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
990 
991 #ifdef _MODULE
992 CFDRIVER_DECL(cgd, DV_DISK, NULL);
993 #endif
994 
995 static int
996 cgd_modcmd(modcmd_t cmd, void *arg)
997 {
998 	int error = 0;
999 
1000 #ifdef _MODULE
1001 	devmajor_t bmajor = -1, cmajor = -1;
1002 #endif
1003 
1004 	switch (cmd) {
1005 	case MODULE_CMD_INIT:
1006 #ifdef _MODULE
1007 		error = config_cfdriver_attach(&cgd_cd);
1008 		if (error)
1009 			break;
1010 
1011 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1012 	        if (error) {
1013 			config_cfdriver_detach(&cgd_cd);
1014 			aprint_error("%s: unable to register cfattach\n",
1015 			    cgd_cd.cd_name);
1016 			break;
1017 		}
1018 
1019 		error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1020 		    &cgd_cdevsw, &cmajor);
1021 		if (error) {
1022 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1023 			config_cfdriver_detach(&cgd_cd);
1024 			break;
1025 		}
1026 #endif
1027 		break;
1028 
1029 	case MODULE_CMD_FINI:
1030 #ifdef _MODULE
1031 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1032 		if (error)
1033 			break;
1034 		config_cfdriver_detach(&cgd_cd);
1035 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1036 #endif
1037 		break;
1038 
1039 	case MODULE_CMD_STAT:
1040 		return ENOTTY;
1041 
1042 	default:
1043 		return ENOTTY;
1044 	}
1045 
1046 	return error;
1047 }
1048