xref: /netbsd-src/sys/dev/cgd.c (revision 2c6fc41c810f5088457889d00eba558e8bc74d9e)
1 /* $NetBSD: cgd.c,v 1.87 2014/05/25 19:23:49 bouyer Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.87 2014/05/25 19:23:49 bouyer Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57 
58 /* Entry Point Functions */
59 
60 void	cgdattach(int);
61 
62 static dev_type_open(cgdopen);
63 static dev_type_close(cgdclose);
64 static dev_type_read(cgdread);
65 static dev_type_write(cgdwrite);
66 static dev_type_ioctl(cgdioctl);
67 static dev_type_strategy(cgdstrategy);
68 static dev_type_dump(cgddump);
69 static dev_type_size(cgdsize);
70 
71 const struct bdevsw cgd_bdevsw = {
72 	.d_open = cgdopen,
73 	.d_close = cgdclose,
74 	.d_strategy = cgdstrategy,
75 	.d_ioctl = cgdioctl,
76 	.d_dump = cgddump,
77 	.d_psize = cgdsize,
78 	.d_flag = D_DISK
79 };
80 
81 const struct cdevsw cgd_cdevsw = {
82 	.d_open = cgdopen,
83 	.d_close = cgdclose,
84 	.d_read = cgdread,
85 	.d_write = cgdwrite,
86 	.d_ioctl = cgdioctl,
87 	.d_stop = nostop,
88 	.d_tty = notty,
89 	.d_poll = nopoll,
90 	.d_mmap = nommap,
91 	.d_kqfilter = nokqfilter,
92 	.d_flag = D_DISK
93 };
94 
95 static int cgd_match(device_t, cfdata_t, void *);
96 static void cgd_attach(device_t, device_t, void *);
97 static int cgd_detach(device_t, int);
98 static struct cgd_softc	*cgd_spawn(int);
99 static int cgd_destroy(device_t);
100 
101 /* Internal Functions */
102 
103 static void	cgdstart(struct dk_softc *);
104 static void	cgdiodone(struct buf *);
105 
106 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
107 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
108 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
109 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
110 			struct lwp *);
111 static void	cgd_cipher(struct cgd_softc *, void *, void *,
112 			   size_t, daddr_t, size_t, int);
113 
114 /* Pseudo-disk Interface */
115 
116 static struct dk_intf the_dkintf = {
117 	DTYPE_CGD,
118 	"cgd",
119 	cgdopen,
120 	cgdclose,
121 	cgdstrategy,
122 	cgdstart,
123 };
124 static struct dk_intf *di = &the_dkintf;
125 
126 static struct dkdriver cgddkdriver = {
127 	.d_strategy = cgdstrategy,
128 	.d_minphys = minphys,
129 };
130 
131 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
132     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
133 extern struct cfdriver cgd_cd;
134 
135 /* DIAGNOSTIC and DEBUG definitions */
136 
137 #if defined(CGDDEBUG) && !defined(DEBUG)
138 #define DEBUG
139 #endif
140 
141 #ifdef DEBUG
142 int cgddebug = 0;
143 
144 #define CGDB_FOLLOW	0x1
145 #define CGDB_IO	0x2
146 #define CGDB_CRYPTO	0x4
147 
148 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
149 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
150 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
151 
152 static void	hexprint(const char *, void *, int);
153 
154 #else
155 #define IFDEBUG(x,y)
156 #define DPRINTF(x,y)
157 #define DPRINTF_FOLLOW(y)
158 #endif
159 
160 #ifdef DIAGNOSTIC
161 #define DIAGPANIC(x)		panic x
162 #define DIAGCONDPANIC(x,y)	if (x) panic y
163 #else
164 #define DIAGPANIC(x)
165 #define DIAGCONDPANIC(x,y)
166 #endif
167 
168 /* Global variables */
169 
170 /* Utility Functions */
171 
172 #define CGDUNIT(x)		DISKUNIT(x)
173 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
174 
175 /* The code */
176 
177 static struct cgd_softc *
178 getcgd_softc(dev_t dev)
179 {
180 	int	unit = CGDUNIT(dev);
181 	struct cgd_softc *sc;
182 
183 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
184 
185 	sc = device_lookup_private(&cgd_cd, unit);
186 	if (sc == NULL)
187 		sc = cgd_spawn(unit);
188 	return sc;
189 }
190 
191 static int
192 cgd_match(device_t self, cfdata_t cfdata, void *aux)
193 {
194 
195 	return 1;
196 }
197 
198 static void
199 cgd_attach(device_t parent, device_t self, void *aux)
200 {
201 	struct cgd_softc *sc = device_private(self);
202 
203 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
204 	dk_sc_init(&sc->sc_dksc, device_xname(self));
205 	sc->sc_dksc.sc_dev = self;
206 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
207 
208 	 if (!pmf_device_register(self, NULL, NULL))
209 		aprint_error_dev(self, "unable to register power management hooks\n");
210 }
211 
212 
213 static int
214 cgd_detach(device_t self, int flags)
215 {
216 	int ret;
217 	const int pmask = 1 << RAW_PART;
218 	struct cgd_softc *sc = device_private(self);
219 	struct dk_softc *dksc = &sc->sc_dksc;
220 
221 	if (DK_BUSY(dksc, pmask))
222 		return EBUSY;
223 
224 	if ((dksc->sc_flags & DKF_INITED) != 0 &&
225 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
226 		return ret;
227 
228 	disk_destroy(&dksc->sc_dkdev);
229 	mutex_destroy(&sc->sc_lock);
230 
231 	return 0;
232 }
233 
234 void
235 cgdattach(int num)
236 {
237 	int error;
238 
239 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
240 	if (error != 0)
241 		aprint_error("%s: unable to register cfattach\n",
242 		    cgd_cd.cd_name);
243 }
244 
245 static struct cgd_softc *
246 cgd_spawn(int unit)
247 {
248 	cfdata_t cf;
249 
250 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
251 	cf->cf_name = cgd_cd.cd_name;
252 	cf->cf_atname = cgd_cd.cd_name;
253 	cf->cf_unit = unit;
254 	cf->cf_fstate = FSTATE_STAR;
255 
256 	return device_private(config_attach_pseudo(cf));
257 }
258 
259 static int
260 cgd_destroy(device_t dev)
261 {
262 	int error;
263 	cfdata_t cf;
264 
265 	cf = device_cfdata(dev);
266 	error = config_detach(dev, DETACH_QUIET);
267 	if (error)
268 		return error;
269 	free(cf, M_DEVBUF);
270 	return 0;
271 }
272 
273 static int
274 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
275 {
276 	struct	cgd_softc *cs;
277 
278 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
279 	GETCGD_SOFTC(cs, dev);
280 	return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
281 }
282 
283 static int
284 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
285 {
286 	int error;
287 	struct	cgd_softc *cs;
288 	struct	dk_softc *dksc;
289 
290 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
291 	GETCGD_SOFTC(cs, dev);
292 	dksc = &cs->sc_dksc;
293 	if ((error =  dk_close(di, dksc, dev, flags, fmt, l)) != 0)
294 		return error;
295 
296 	if ((dksc->sc_flags & DKF_INITED) == 0) {
297 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
298 			aprint_error_dev(dksc->sc_dev,
299 			    "unable to detach instance\n");
300 			return error;
301 		}
302 	}
303 	return 0;
304 }
305 
306 static void
307 cgdstrategy(struct buf *bp)
308 {
309 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev);
310 
311 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
312 	    (long)bp->b_bcount));
313 
314 	/*
315 	 * Reject unaligned writes.  We can encrypt and decrypt only
316 	 * complete disk sectors, and we let the ciphers require their
317 	 * buffers to be aligned to 32-bit boundaries.
318 	 */
319 	if (bp->b_blkno < 0 ||
320 	    (bp->b_bcount % DEV_BSIZE) != 0 ||
321 	    ((uintptr_t)bp->b_data & 3) != 0) {
322 		bp->b_error = EINVAL;
323 		bp->b_resid = bp->b_bcount;
324 		biodone(bp);
325 		return;
326 	}
327 
328 	/* XXXrcd: Should we test for (cs != NULL)? */
329 	dk_strategy(di, &cs->sc_dksc, bp);
330 	return;
331 }
332 
333 static int
334 cgdsize(dev_t dev)
335 {
336 	struct cgd_softc *cs = getcgd_softc(dev);
337 
338 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
339 	if (!cs)
340 		return -1;
341 	return dk_size(di, &cs->sc_dksc, dev);
342 }
343 
344 /*
345  * cgd_{get,put}data are functions that deal with getting a buffer
346  * for the new encrypted data.  We have a buffer per device so that
347  * we can ensure that we can always have a transaction in flight.
348  * We use this buffer first so that we have one less piece of
349  * malloc'ed data at any given point.
350  */
351 
352 static void *
353 cgd_getdata(struct dk_softc *dksc, unsigned long size)
354 {
355 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
356 	void *	data = NULL;
357 
358 	mutex_enter(&cs->sc_lock);
359 	if (cs->sc_data_used == 0) {
360 		cs->sc_data_used = 1;
361 		data = cs->sc_data;
362 	}
363 	mutex_exit(&cs->sc_lock);
364 
365 	if (data)
366 		return data;
367 
368 	return malloc(size, M_DEVBUF, M_NOWAIT);
369 }
370 
371 static void
372 cgd_putdata(struct dk_softc *dksc, void *data)
373 {
374 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
375 
376 	if (data == cs->sc_data) {
377 		mutex_enter(&cs->sc_lock);
378 		cs->sc_data_used = 0;
379 		mutex_exit(&cs->sc_lock);
380 	} else {
381 		free(data, M_DEVBUF);
382 	}
383 }
384 
385 static void
386 cgdstart(struct dk_softc *dksc)
387 {
388 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
389 	struct	buf *bp, *nbp;
390 #ifdef DIAGNOSTIC
391 	struct	buf *qbp;
392 #endif
393 	void *	addr;
394 	void *	newaddr;
395 	daddr_t	bn;
396 	struct	vnode *vp;
397 
398 	while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
399 
400 		DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
401 		disk_busy(&dksc->sc_dkdev);
402 
403 		bn = bp->b_rawblkno;
404 
405 		/*
406 		 * We attempt to allocate all of our resources up front, so that
407 		 * we can fail quickly if they are unavailable.
408 		 */
409 		nbp = getiobuf(cs->sc_tvn, false);
410 		if (nbp == NULL) {
411 			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
412 			break;
413 		}
414 
415 		/*
416 		 * If we are writing, then we need to encrypt the outgoing
417 		 * block into a new block of memory.
418 		 */
419 		newaddr = addr = bp->b_data;
420 		if ((bp->b_flags & B_READ) == 0) {
421 			newaddr = cgd_getdata(dksc, bp->b_bcount);
422 			if (!newaddr) {
423 				putiobuf(nbp);
424 				disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
425 				break;
426 			}
427 			cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
428 			    DEV_BSIZE, CGD_CIPHER_ENCRYPT);
429 		}
430 		/* we now have all needed resources to process this buf */
431 #ifdef DIAGNOSTIC
432 		qbp = bufq_get(dksc->sc_bufq);
433 		KASSERT(bp == qbp);
434 #else
435 		(void)bufq_get(dksc->sc_bufq);
436 #endif
437 		nbp->b_data = newaddr;
438 		nbp->b_flags = bp->b_flags;
439 		nbp->b_oflags = bp->b_oflags;
440 		nbp->b_cflags = bp->b_cflags;
441 		nbp->b_iodone = cgdiodone;
442 		nbp->b_proc = bp->b_proc;
443 		nbp->b_blkno = bn;
444 		nbp->b_bcount = bp->b_bcount;
445 		nbp->b_private = bp;
446 
447 		BIO_COPYPRIO(nbp, bp);
448 
449 		if ((nbp->b_flags & B_READ) == 0) {
450 			vp = nbp->b_vp;
451 			mutex_enter(vp->v_interlock);
452 			vp->v_numoutput++;
453 			mutex_exit(vp->v_interlock);
454 		}
455 		VOP_STRATEGY(cs->sc_tvn, nbp);
456 	}
457 }
458 
459 static void
460 cgdiodone(struct buf *nbp)
461 {
462 	struct	buf *obp = nbp->b_private;
463 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
464 	struct	dk_softc *dksc = &cs->sc_dksc;
465 	int s;
466 
467 	KDASSERT(cs);
468 
469 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
470 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
471 	    obp, obp->b_bcount, obp->b_resid));
472 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
473 	    nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
474 	    nbp->b_bcount));
475 	if (nbp->b_error != 0) {
476 		obp->b_error = nbp->b_error;
477 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
478 		    obp->b_error));
479 	}
480 
481 	/* Perform the decryption if we are reading.
482 	 *
483 	 * Note: use the blocknumber from nbp, since it is what
484 	 *       we used to encrypt the blocks.
485 	 */
486 
487 	if (nbp->b_flags & B_READ)
488 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
489 		    nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
490 
491 	/* If we allocated memory, free it now... */
492 	if (nbp->b_data != obp->b_data)
493 		cgd_putdata(dksc, nbp->b_data);
494 
495 	putiobuf(nbp);
496 
497 	/* Request is complete for whatever reason */
498 	obp->b_resid = 0;
499 	if (obp->b_error != 0)
500 		obp->b_resid = obp->b_bcount;
501 	s = splbio();
502 	disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
503 	    (obp->b_flags & B_READ));
504 	biodone(obp);
505 	cgdstart(dksc);
506 	splx(s);
507 }
508 
509 /* XXX: we should probably put these into dksubr.c, mostly */
510 static int
511 cgdread(dev_t dev, struct uio *uio, int flags)
512 {
513 	struct	cgd_softc *cs;
514 	struct	dk_softc *dksc;
515 
516 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
517 	    (unsigned long long)dev, uio, flags));
518 	GETCGD_SOFTC(cs, dev);
519 	dksc = &cs->sc_dksc;
520 	if ((dksc->sc_flags & DKF_INITED) == 0)
521 		return ENXIO;
522 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
523 }
524 
525 /* XXX: we should probably put these into dksubr.c, mostly */
526 static int
527 cgdwrite(dev_t dev, struct uio *uio, int flags)
528 {
529 	struct	cgd_softc *cs;
530 	struct	dk_softc *dksc;
531 
532 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
533 	GETCGD_SOFTC(cs, dev);
534 	dksc = &cs->sc_dksc;
535 	if ((dksc->sc_flags & DKF_INITED) == 0)
536 		return ENXIO;
537 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
538 }
539 
540 static int
541 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
542 {
543 	struct	cgd_softc *cs;
544 	struct	dk_softc *dksc;
545 	int	part = DISKPART(dev);
546 	int	pmask = 1 << part;
547 
548 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
549 	    dev, cmd, data, flag, l));
550 
551 	switch (cmd) {
552 	case CGDIOCGET: /* don't call cgd_spawn() if the device isn't there */
553 		cs = NULL;
554 		dksc = NULL;
555 		break;
556 	case CGDIOCSET:
557 	case CGDIOCCLR:
558 		if ((flag & FWRITE) == 0)
559 			return EBADF;
560 		/* FALLTHROUGH */
561 	default:
562 		GETCGD_SOFTC(cs, dev);
563 		dksc = &cs->sc_dksc;
564 		break;
565 	}
566 
567 	switch (cmd) {
568 	case CGDIOCSET:
569 		if (dksc->sc_flags & DKF_INITED)
570 			return EBUSY;
571 		return cgd_ioctl_set(cs, data, l);
572 	case CGDIOCCLR:
573 		if (DK_BUSY(&cs->sc_dksc, pmask))
574 			return EBUSY;
575 		return cgd_ioctl_clr(cs, l);
576 	case CGDIOCGET:
577 		return cgd_ioctl_get(dev, data, l);
578 	case DIOCCACHESYNC:
579 		/*
580 		 * XXX Do we really need to care about having a writable
581 		 * file descriptor here?
582 		 */
583 		if ((flag & FWRITE) == 0)
584 			return (EBADF);
585 
586 		/*
587 		 * We pass this call down to the underlying disk.
588 		 */
589 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
590 	default:
591 		return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
592 	}
593 }
594 
595 static int
596 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
597 {
598 	struct	cgd_softc *cs;
599 
600 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
601 	    dev, blkno, va, (unsigned long)size));
602 	GETCGD_SOFTC(cs, dev);
603 	return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
604 }
605 
606 /*
607  * XXXrcd:
608  *  for now we hardcode the maximum key length.
609  */
610 #define MAX_KEYSIZE	1024
611 
612 static const struct {
613 	const char *n;
614 	int v;
615 	int d;
616 } encblkno[] = {
617 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
618 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
619 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
620 };
621 
622 /* ARGSUSED */
623 static int
624 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
625 {
626 	struct	 cgd_ioctl *ci = data;
627 	struct	 vnode *vp;
628 	int	 ret;
629 	size_t	 i;
630 	size_t	 keybytes;			/* key length in bytes */
631 	const char *cp;
632 	struct pathbuf *pb;
633 	char	 *inbuf;
634 	struct dk_softc *dksc = &cs->sc_dksc;
635 
636 	cp = ci->ci_disk;
637 
638 	ret = pathbuf_copyin(ci->ci_disk, &pb);
639 	if (ret != 0) {
640 		return ret;
641 	}
642 	ret = dk_lookup(pb, l, &vp);
643 	pathbuf_destroy(pb);
644 	if (ret != 0) {
645 		return ret;
646 	}
647 
648 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
649 
650 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
651 		goto bail;
652 
653 	(void)memset(inbuf, 0, MAX_KEYSIZE);
654 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
655 	if (ret)
656 		goto bail;
657 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
658 	if (!cs->sc_cfuncs) {
659 		ret = EINVAL;
660 		goto bail;
661 	}
662 
663 	(void)memset(inbuf, 0, MAX_KEYSIZE);
664 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
665 	if (ret)
666 		goto bail;
667 
668 	for (i = 0; i < __arraycount(encblkno); i++)
669 		if (strcmp(encblkno[i].n, inbuf) == 0)
670 			break;
671 
672 	if (i == __arraycount(encblkno)) {
673 		ret = EINVAL;
674 		goto bail;
675 	}
676 
677 	keybytes = ci->ci_keylen / 8 + 1;
678 	if (keybytes > MAX_KEYSIZE) {
679 		ret = EINVAL;
680 		goto bail;
681 	}
682 
683 	(void)memset(inbuf, 0, MAX_KEYSIZE);
684 	ret = copyin(ci->ci_key, inbuf, keybytes);
685 	if (ret)
686 		goto bail;
687 
688 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
689 	cs->sc_cdata.cf_mode = encblkno[i].v;
690 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
691 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
692 	    &cs->sc_cdata.cf_blocksize);
693 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
694 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
695 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
696 	    cs->sc_cdata.cf_priv = NULL;
697 	}
698 
699 	/*
700 	 * The blocksize is supposed to be in bytes. Unfortunately originally
701 	 * it was expressed in bits. For compatibility we maintain encblkno
702 	 * and encblkno8.
703 	 */
704 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
705 	(void)memset(inbuf, 0, MAX_KEYSIZE);
706 	if (!cs->sc_cdata.cf_priv) {
707 		ret = EINVAL;		/* XXX is this the right error? */
708 		goto bail;
709 	}
710 	free(inbuf, M_TEMP);
711 
712 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
713 
714 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
715 	cs->sc_data_used = 0;
716 
717 	dksc->sc_flags |= DKF_INITED;
718 
719 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
720 
721 	/* Attach the disk. */
722 	disk_attach(&dksc->sc_dkdev);
723 
724 	/* Try and read the disklabel. */
725 	dk_getdisklabel(di, dksc, 0 /* XXX ? (cause of PR 41704) */);
726 
727 	/* Discover wedges on this disk. */
728 	dkwedge_discover(&dksc->sc_dkdev);
729 
730 	return 0;
731 
732 bail:
733 	free(inbuf, M_TEMP);
734 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
735 	return ret;
736 }
737 
738 /* ARGSUSED */
739 static int
740 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
741 {
742 	int	s;
743 	struct	dk_softc *dksc = &cs->sc_dksc;
744 
745 	if ((dksc->sc_flags & DKF_INITED) == 0)
746 		return ENXIO;
747 
748 	/* Delete all of our wedges. */
749 	dkwedge_delall(&dksc->sc_dkdev);
750 
751 	/* Kill off any queued buffers. */
752 	s = splbio();
753 	bufq_drain(dksc->sc_bufq);
754 	splx(s);
755 	bufq_free(dksc->sc_bufq);
756 
757 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
758 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
759 	free(cs->sc_tpath, M_DEVBUF);
760 	free(cs->sc_data, M_DEVBUF);
761 	cs->sc_data_used = 0;
762 	dksc->sc_flags &= ~DKF_INITED;
763 	disk_detach(&dksc->sc_dkdev);
764 
765 	return 0;
766 }
767 
768 static int
769 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
770 {
771 	struct cgd_softc *cs = getcgd_softc(dev);
772 	struct cgd_user *cgu;
773 	int unit;
774 	struct	dk_softc *dksc = &cs->sc_dksc;
775 
776 	unit = CGDUNIT(dev);
777 	cgu = (struct cgd_user *)data;
778 
779 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
780 			   dev, unit, data, l));
781 
782 	if (cgu->cgu_unit == -1)
783 		cgu->cgu_unit = unit;
784 
785 	if (cgu->cgu_unit < 0)
786 		return EINVAL;	/* XXX: should this be ENXIO? */
787 
788 	cs = device_lookup_private(&cgd_cd, unit);
789 	if (cs == NULL || (dksc->sc_flags & DKF_INITED) == 0) {
790 		cgu->cgu_dev = 0;
791 		cgu->cgu_alg[0] = '\0';
792 		cgu->cgu_blocksize = 0;
793 		cgu->cgu_mode = 0;
794 		cgu->cgu_keylen = 0;
795 	}
796 	else {
797 		cgu->cgu_dev = cs->sc_tdev;
798 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
799 		    sizeof(cgu->cgu_alg));
800 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
801 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
802 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
803 	}
804 	return 0;
805 }
806 
807 static int
808 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
809 	struct lwp *l)
810 {
811 	struct	disk_geom *dg;
812 	struct	vattr va;
813 	int	ret;
814 	char	*tmppath;
815 	uint64_t psize;
816 	unsigned secsize;
817 	struct dk_softc *dksc = &cs->sc_dksc;
818 
819 	cs->sc_tvn = vp;
820 	cs->sc_tpath = NULL;
821 
822 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
823 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
824 	if (ret)
825 		goto bail;
826 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
827 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
828 
829 	vn_lock(vp, LK_SHARED | LK_RETRY);
830 	ret = VOP_GETATTR(vp, &va, l->l_cred);
831 	VOP_UNLOCK(vp);
832 	if (ret != 0)
833 		goto bail;
834 
835 	cs->sc_tdev = va.va_rdev;
836 
837 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
838 		goto bail;
839 
840 	if (psize == 0) {
841 		ret = ENODEV;
842 		goto bail;
843 	}
844 
845 	/*
846 	 * XXX here we should probe the underlying device.  If we
847 	 *     are accessing a partition of type RAW_PART, then
848 	 *     we should populate our initial geometry with the
849 	 *     geometry that we discover from the device.
850 	 */
851 	dg = &dksc->sc_dkdev.dk_geom;
852 	memset(dg, 0, sizeof(*dg));
853 	dg->dg_secperunit = psize;
854 	// XXX: Inherit?
855 	dg->dg_secsize = DEV_BSIZE;
856 	dg->dg_ntracks = 1;
857 	dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
858 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
859 
860 bail:
861 	free(tmppath, M_TEMP);
862 	if (ret && cs->sc_tpath)
863 		free(cs->sc_tpath, M_DEVBUF);
864 	return ret;
865 }
866 
867 /*
868  * Our generic cipher entry point.  This takes care of the
869  * IV mode and passes off the work to the specific cipher.
870  * We implement here the IV method ``encrypted block
871  * number''.
872  *
873  * For the encryption case, we accomplish this by setting
874  * up a struct uio where the first iovec of the source is
875  * the blocknumber and the first iovec of the dest is a
876  * sink.  We then call the cipher with an IV of zero, and
877  * the right thing happens.
878  *
879  * For the decryption case, we use the same basic mechanism
880  * for symmetry, but we encrypt the block number in the
881  * first iovec.
882  *
883  * We mainly do this to avoid requiring the definition of
884  * an ECB mode.
885  *
886  * XXXrcd: for now we rely on our own crypto framework defined
887  *         in dev/cgd_crypto.c.  This will change when we
888  *         get a generic kernel crypto framework.
889  */
890 
891 static void
892 blkno2blkno_buf(char *sbuf, daddr_t blkno)
893 {
894 	int	i;
895 
896 	/* Set up the blkno in blkno_buf, here we do not care much
897 	 * about the final layout of the information as long as we
898 	 * can guarantee that each sector will have a different IV
899 	 * and that the endianness of the machine will not affect
900 	 * the representation that we have chosen.
901 	 *
902 	 * We choose this representation, because it does not rely
903 	 * on the size of buf (which is the blocksize of the cipher),
904 	 * but allows daddr_t to grow without breaking existing
905 	 * disks.
906 	 *
907 	 * Note that blkno2blkno_buf does not take a size as input,
908 	 * and hence must be called on a pre-zeroed buffer of length
909 	 * greater than or equal to sizeof(daddr_t).
910 	 */
911 	for (i=0; i < sizeof(daddr_t); i++) {
912 		*sbuf++ = blkno & 0xff;
913 		blkno >>= 8;
914 	}
915 }
916 
917 static void
918 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
919     size_t len, daddr_t blkno, size_t secsize, int dir)
920 {
921 	char		*dst = dstv;
922 	char 		*src = srcv;
923 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
924 	struct uio	dstuio;
925 	struct uio	srcuio;
926 	struct iovec	dstiov[2];
927 	struct iovec	srciov[2];
928 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
929 	char		sink[CGD_MAXBLOCKSIZE];
930 	char		zero_iv[CGD_MAXBLOCKSIZE];
931 	char		blkno_buf[CGD_MAXBLOCKSIZE];
932 
933 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
934 
935 	DIAGCONDPANIC(len % blocksize != 0,
936 	    ("cgd_cipher: len %% blocksize != 0"));
937 
938 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
939 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
940 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
941 
942 	memset(zero_iv, 0x0, blocksize);
943 
944 	dstuio.uio_iov = dstiov;
945 	dstuio.uio_iovcnt = 2;
946 
947 	srcuio.uio_iov = srciov;
948 	srcuio.uio_iovcnt = 2;
949 
950 	dstiov[0].iov_base = sink;
951 	dstiov[0].iov_len  = blocksize;
952 	srciov[0].iov_base = blkno_buf;
953 	srciov[0].iov_len  = blocksize;
954 	dstiov[1].iov_len  = secsize;
955 	srciov[1].iov_len  = secsize;
956 
957 	for (; len > 0; len -= secsize) {
958 		dstiov[1].iov_base = dst;
959 		srciov[1].iov_base = src;
960 
961 		memset(blkno_buf, 0x0, blocksize);
962 		blkno2blkno_buf(blkno_buf, blkno);
963 		if (dir == CGD_CIPHER_DECRYPT) {
964 			dstuio.uio_iovcnt = 1;
965 			srcuio.uio_iovcnt = 1;
966 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
967 			    blkno_buf, blocksize));
968 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
969 			    zero_iv, CGD_CIPHER_ENCRYPT);
970 			memcpy(blkno_buf, sink, blocksize);
971 			dstuio.uio_iovcnt = 2;
972 			srcuio.uio_iovcnt = 2;
973 		}
974 
975 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
976 		    blkno_buf, blocksize));
977 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
978 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
979 		    sink, blocksize));
980 
981 		dst += secsize;
982 		src += secsize;
983 		blkno++;
984 	}
985 }
986 
987 #ifdef DEBUG
988 static void
989 hexprint(const char *start, void *buf, int len)
990 {
991 	char	*c = buf;
992 
993 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
994 	printf("%s: len=%06d 0x", start, len);
995 	while (len--)
996 		printf("%02x", (unsigned char) *c++);
997 }
998 #endif
999 
1000 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
1001 
1002 #ifdef _MODULE
1003 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1004 #endif
1005 
1006 static int
1007 cgd_modcmd(modcmd_t cmd, void *arg)
1008 {
1009 	int error = 0;
1010 
1011 #ifdef _MODULE
1012 	int bmajor = -1, cmajor = -1;
1013 #endif
1014 
1015 	switch (cmd) {
1016 	case MODULE_CMD_INIT:
1017 #ifdef _MODULE
1018 		error = config_cfdriver_attach(&cgd_cd);
1019 		if (error)
1020 			break;
1021 
1022 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1023 	        if (error) {
1024 			config_cfdriver_detach(&cgd_cd);
1025 			aprint_error("%s: unable to register cfattach\n",
1026 			    cgd_cd.cd_name);
1027 			break;
1028 		}
1029 
1030 		error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1031 		    &cgd_cdevsw, &cmajor);
1032 		if (error) {
1033 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1034 			config_cfdriver_detach(&cgd_cd);
1035 			break;
1036 		}
1037 #endif
1038 		break;
1039 
1040 	case MODULE_CMD_FINI:
1041 #ifdef _MODULE
1042 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1043 		if (error)
1044 			break;
1045 		config_cfdriver_detach(&cgd_cd);
1046 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1047 #endif
1048 		break;
1049 
1050 	case MODULE_CMD_STAT:
1051 		return ENOTTY;
1052 
1053 	default:
1054 		return ENOTTY;
1055 	}
1056 
1057 	return error;
1058 }
1059