xref: /netbsd-src/sys/dev/cgd.c (revision 19ef5b5b0bcb90f63509df6e78769de1b57c2758)
1 /* $NetBSD: cgd.c,v 1.85 2014/03/18 15:44:37 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.85 2014/03/18 15:44:37 skrll Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57 
58 /* Entry Point Functions */
59 
60 void	cgdattach(int);
61 
62 static dev_type_open(cgdopen);
63 static dev_type_close(cgdclose);
64 static dev_type_read(cgdread);
65 static dev_type_write(cgdwrite);
66 static dev_type_ioctl(cgdioctl);
67 static dev_type_strategy(cgdstrategy);
68 static dev_type_dump(cgddump);
69 static dev_type_size(cgdsize);
70 
71 const struct bdevsw cgd_bdevsw = {
72 	.d_open = cgdopen,
73 	.d_close = cgdclose,
74 	.d_strategy = cgdstrategy,
75 	.d_ioctl = cgdioctl,
76 	.d_dump = cgddump,
77 	.d_psize = cgdsize,
78 	.d_flag = D_DISK
79 };
80 
81 const struct cdevsw cgd_cdevsw = {
82 	.d_open = cgdopen,
83 	.d_close = cgdclose,
84 	.d_read = cgdread,
85 	.d_write = cgdwrite,
86 	.d_ioctl = cgdioctl,
87 	.d_stop = nostop,
88 	.d_tty = notty,
89 	.d_poll = nopoll,
90 	.d_mmap = nommap,
91 	.d_kqfilter = nokqfilter,
92 	.d_flag = D_DISK
93 };
94 
95 static int cgd_match(device_t, cfdata_t, void *);
96 static void cgd_attach(device_t, device_t, void *);
97 static int cgd_detach(device_t, int);
98 static struct cgd_softc	*cgd_spawn(int);
99 static int cgd_destroy(device_t);
100 
101 /* Internal Functions */
102 
103 static int	cgdstart(struct dk_softc *, struct buf *);
104 static void	cgdiodone(struct buf *);
105 
106 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
107 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
108 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
109 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
110 			struct lwp *);
111 static void	cgd_cipher(struct cgd_softc *, void *, void *,
112 			   size_t, daddr_t, size_t, int);
113 
114 /* Pseudo-disk Interface */
115 
116 static struct dk_intf the_dkintf = {
117 	DTYPE_CGD,
118 	"cgd",
119 	cgdopen,
120 	cgdclose,
121 	cgdstrategy,
122 	cgdstart,
123 };
124 static struct dk_intf *di = &the_dkintf;
125 
126 static struct dkdriver cgddkdriver = {
127 	.d_strategy = cgdstrategy,
128 	.d_minphys = minphys,
129 };
130 
131 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
132     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
133 extern struct cfdriver cgd_cd;
134 
135 /* DIAGNOSTIC and DEBUG definitions */
136 
137 #if defined(CGDDEBUG) && !defined(DEBUG)
138 #define DEBUG
139 #endif
140 
141 #ifdef DEBUG
142 int cgddebug = 0;
143 
144 #define CGDB_FOLLOW	0x1
145 #define CGDB_IO	0x2
146 #define CGDB_CRYPTO	0x4
147 
148 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
149 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
150 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
151 
152 static void	hexprint(const char *, void *, int);
153 
154 #else
155 #define IFDEBUG(x,y)
156 #define DPRINTF(x,y)
157 #define DPRINTF_FOLLOW(y)
158 #endif
159 
160 #ifdef DIAGNOSTIC
161 #define DIAGPANIC(x)		panic x
162 #define DIAGCONDPANIC(x,y)	if (x) panic y
163 #else
164 #define DIAGPANIC(x)
165 #define DIAGCONDPANIC(x,y)
166 #endif
167 
168 /* Global variables */
169 
170 /* Utility Functions */
171 
172 #define CGDUNIT(x)		DISKUNIT(x)
173 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
174 
175 /* The code */
176 
177 static struct cgd_softc *
178 getcgd_softc(dev_t dev)
179 {
180 	int	unit = CGDUNIT(dev);
181 	struct cgd_softc *sc;
182 
183 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
184 
185 	sc = device_lookup_private(&cgd_cd, unit);
186 	if (sc == NULL)
187 		sc = cgd_spawn(unit);
188 	return sc;
189 }
190 
191 static int
192 cgd_match(device_t self, cfdata_t cfdata, void *aux)
193 {
194 
195 	return 1;
196 }
197 
198 static void
199 cgd_attach(device_t parent, device_t self, void *aux)
200 {
201 	struct cgd_softc *sc = device_private(self);
202 
203 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
204 	dk_sc_init(&sc->sc_dksc, device_xname(self));
205 	sc->sc_dksc.sc_dev = self;
206 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
207 
208 	 if (!pmf_device_register(self, NULL, NULL))
209 		aprint_error_dev(self, "unable to register power management hooks\n");
210 }
211 
212 
213 static int
214 cgd_detach(device_t self, int flags)
215 {
216 	int ret;
217 	const int pmask = 1 << RAW_PART;
218 	struct cgd_softc *sc = device_private(self);
219 	struct dk_softc *dksc = &sc->sc_dksc;
220 
221 	if (DK_BUSY(dksc, pmask))
222 		return EBUSY;
223 
224 	if ((dksc->sc_flags & DKF_INITED) != 0 &&
225 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
226 		return ret;
227 
228 	disk_destroy(&dksc->sc_dkdev);
229 
230 	return 0;
231 }
232 
233 void
234 cgdattach(int num)
235 {
236 	int error;
237 
238 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
239 	if (error != 0)
240 		aprint_error("%s: unable to register cfattach\n",
241 		    cgd_cd.cd_name);
242 }
243 
244 static struct cgd_softc *
245 cgd_spawn(int unit)
246 {
247 	cfdata_t cf;
248 
249 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
250 	cf->cf_name = cgd_cd.cd_name;
251 	cf->cf_atname = cgd_cd.cd_name;
252 	cf->cf_unit = unit;
253 	cf->cf_fstate = FSTATE_STAR;
254 
255 	return device_private(config_attach_pseudo(cf));
256 }
257 
258 static int
259 cgd_destroy(device_t dev)
260 {
261 	int error;
262 	cfdata_t cf;
263 
264 	cf = device_cfdata(dev);
265 	error = config_detach(dev, DETACH_QUIET);
266 	if (error)
267 		return error;
268 	free(cf, M_DEVBUF);
269 	return 0;
270 }
271 
272 static int
273 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
274 {
275 	struct	cgd_softc *cs;
276 
277 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
278 	GETCGD_SOFTC(cs, dev);
279 	return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
280 }
281 
282 static int
283 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
284 {
285 	int error;
286 	struct	cgd_softc *cs;
287 	struct	dk_softc *dksc;
288 
289 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
290 	GETCGD_SOFTC(cs, dev);
291 	dksc = &cs->sc_dksc;
292 	if ((error =  dk_close(di, dksc, dev, flags, fmt, l)) != 0)
293 		return error;
294 
295 	if ((dksc->sc_flags & DKF_INITED) == 0) {
296 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
297 			aprint_error_dev(dksc->sc_dev,
298 			    "unable to detach instance\n");
299 			return error;
300 		}
301 	}
302 	return 0;
303 }
304 
305 static void
306 cgdstrategy(struct buf *bp)
307 {
308 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev);
309 
310 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
311 	    (long)bp->b_bcount));
312 
313 	/*
314 	 * Reject unaligned writes.  We can encrypt and decrypt only
315 	 * complete disk sectors, and we let the ciphers require their
316 	 * buffers to be aligned to 32-bit boundaries.
317 	 */
318 	if (bp->b_blkno < 0 ||
319 	    (bp->b_bcount % DEV_BSIZE) != 0 ||
320 	    ((uintptr_t)bp->b_data & 3) != 0) {
321 		bp->b_error = EINVAL;
322 		bp->b_resid = bp->b_bcount;
323 		biodone(bp);
324 		return;
325 	}
326 
327 	/* XXXrcd: Should we test for (cs != NULL)? */
328 	dk_strategy(di, &cs->sc_dksc, bp);
329 	return;
330 }
331 
332 static int
333 cgdsize(dev_t dev)
334 {
335 	struct cgd_softc *cs = getcgd_softc(dev);
336 
337 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
338 	if (!cs)
339 		return -1;
340 	return dk_size(di, &cs->sc_dksc, dev);
341 }
342 
343 /*
344  * cgd_{get,put}data are functions that deal with getting a buffer
345  * for the new encrypted data.  We have a buffer per device so that
346  * we can ensure that we can always have a transaction in flight.
347  * We use this buffer first so that we have one less piece of
348  * malloc'ed data at any given point.
349  */
350 
351 static void *
352 cgd_getdata(struct dk_softc *dksc, unsigned long size)
353 {
354 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
355 	void *	data = NULL;
356 
357 	mutex_enter(&cs->sc_lock);
358 	if (cs->sc_data_used == 0) {
359 		cs->sc_data_used = 1;
360 		data = cs->sc_data;
361 	}
362 	mutex_exit(&cs->sc_lock);
363 
364 	if (data)
365 		return data;
366 
367 	return malloc(size, M_DEVBUF, M_NOWAIT);
368 }
369 
370 static void
371 cgd_putdata(struct dk_softc *dksc, void *data)
372 {
373 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
374 
375 	if (data == cs->sc_data) {
376 		mutex_enter(&cs->sc_lock);
377 		cs->sc_data_used = 0;
378 		mutex_exit(&cs->sc_lock);
379 	} else {
380 		free(data, M_DEVBUF);
381 	}
382 }
383 
384 static int
385 cgdstart(struct dk_softc *dksc, struct buf *bp)
386 {
387 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
388 	struct	buf *nbp;
389 	void *	addr;
390 	void *	newaddr;
391 	daddr_t	bn;
392 	struct	vnode *vp;
393 
394 	DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
395 	disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
396 
397 	bn = bp->b_rawblkno;
398 
399 	/*
400 	 * We attempt to allocate all of our resources up front, so that
401 	 * we can fail quickly if they are unavailable.
402 	 */
403 
404 	nbp = getiobuf(cs->sc_tvn, false);
405 	if (nbp == NULL) {
406 		disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
407 		return -1;
408 	}
409 
410 	/*
411 	 * If we are writing, then we need to encrypt the outgoing
412 	 * block into a new block of memory.  If we fail, then we
413 	 * return an error and let the dksubr framework deal with it.
414 	 */
415 	newaddr = addr = bp->b_data;
416 	if ((bp->b_flags & B_READ) == 0) {
417 		newaddr = cgd_getdata(dksc, bp->b_bcount);
418 		if (!newaddr) {
419 			putiobuf(nbp);
420 			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
421 			return -1;
422 		}
423 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
424 		    DEV_BSIZE, CGD_CIPHER_ENCRYPT);
425 	}
426 
427 	nbp->b_data = newaddr;
428 	nbp->b_flags = bp->b_flags;
429 	nbp->b_oflags = bp->b_oflags;
430 	nbp->b_cflags = bp->b_cflags;
431 	nbp->b_iodone = cgdiodone;
432 	nbp->b_proc = bp->b_proc;
433 	nbp->b_blkno = bn;
434 	nbp->b_bcount = bp->b_bcount;
435 	nbp->b_private = bp;
436 
437 	BIO_COPYPRIO(nbp, bp);
438 
439 	if ((nbp->b_flags & B_READ) == 0) {
440 		vp = nbp->b_vp;
441 		mutex_enter(vp->v_interlock);
442 		vp->v_numoutput++;
443 		mutex_exit(vp->v_interlock);
444 	}
445 	VOP_STRATEGY(cs->sc_tvn, nbp);
446 	return 0;
447 }
448 
449 static void
450 cgdiodone(struct buf *nbp)
451 {
452 	struct	buf *obp = nbp->b_private;
453 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
454 	struct	dk_softc *dksc = &cs->sc_dksc;
455 	int s;
456 
457 	KDASSERT(cs);
458 
459 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
460 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
461 	    obp, obp->b_bcount, obp->b_resid));
462 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
463 	    nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
464 	    nbp->b_bcount));
465 	if (nbp->b_error != 0) {
466 		obp->b_error = nbp->b_error;
467 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
468 		    obp->b_error));
469 	}
470 
471 	/* Perform the decryption if we are reading.
472 	 *
473 	 * Note: use the blocknumber from nbp, since it is what
474 	 *       we used to encrypt the blocks.
475 	 */
476 
477 	if (nbp->b_flags & B_READ)
478 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
479 		    nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
480 
481 	/* If we allocated memory, free it now... */
482 	if (nbp->b_data != obp->b_data)
483 		cgd_putdata(dksc, nbp->b_data);
484 
485 	putiobuf(nbp);
486 
487 	/* Request is complete for whatever reason */
488 	obp->b_resid = 0;
489 	if (obp->b_error != 0)
490 		obp->b_resid = obp->b_bcount;
491 	s = splbio();
492 	disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
493 	    (obp->b_flags & B_READ));
494 	biodone(obp);
495 	dk_iodone(di, dksc);
496 	splx(s);
497 }
498 
499 /* XXX: we should probably put these into dksubr.c, mostly */
500 static int
501 cgdread(dev_t dev, struct uio *uio, int flags)
502 {
503 	struct	cgd_softc *cs;
504 	struct	dk_softc *dksc;
505 
506 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
507 	    (unsigned long long)dev, uio, flags));
508 	GETCGD_SOFTC(cs, dev);
509 	dksc = &cs->sc_dksc;
510 	if ((dksc->sc_flags & DKF_INITED) == 0)
511 		return ENXIO;
512 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
513 }
514 
515 /* XXX: we should probably put these into dksubr.c, mostly */
516 static int
517 cgdwrite(dev_t dev, struct uio *uio, int flags)
518 {
519 	struct	cgd_softc *cs;
520 	struct	dk_softc *dksc;
521 
522 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
523 	GETCGD_SOFTC(cs, dev);
524 	dksc = &cs->sc_dksc;
525 	if ((dksc->sc_flags & DKF_INITED) == 0)
526 		return ENXIO;
527 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
528 }
529 
530 static int
531 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
532 {
533 	struct	cgd_softc *cs;
534 	struct	dk_softc *dksc;
535 	int	part = DISKPART(dev);
536 	int	pmask = 1 << part;
537 
538 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
539 	    dev, cmd, data, flag, l));
540 
541 	switch (cmd) {
542 	case CGDIOCGET: /* don't call cgd_spawn() if the device isn't there */
543 		cs = NULL;
544 		dksc = NULL;
545 		break;
546 	case CGDIOCSET:
547 	case CGDIOCCLR:
548 		if ((flag & FWRITE) == 0)
549 			return EBADF;
550 		/* FALLTHROUGH */
551 	default:
552 		GETCGD_SOFTC(cs, dev);
553 		dksc = &cs->sc_dksc;
554 		break;
555 	}
556 
557 	switch (cmd) {
558 	case CGDIOCSET:
559 		if (dksc->sc_flags & DKF_INITED)
560 			return EBUSY;
561 		return cgd_ioctl_set(cs, data, l);
562 	case CGDIOCCLR:
563 		if (DK_BUSY(&cs->sc_dksc, pmask))
564 			return EBUSY;
565 		return cgd_ioctl_clr(cs, l);
566 	case CGDIOCGET:
567 		return cgd_ioctl_get(dev, data, l);
568 	case DIOCCACHESYNC:
569 		/*
570 		 * XXX Do we really need to care about having a writable
571 		 * file descriptor here?
572 		 */
573 		if ((flag & FWRITE) == 0)
574 			return (EBADF);
575 
576 		/*
577 		 * We pass this call down to the underlying disk.
578 		 */
579 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
580 	default:
581 		return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
582 	}
583 }
584 
585 static int
586 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
587 {
588 	struct	cgd_softc *cs;
589 
590 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
591 	    dev, blkno, va, (unsigned long)size));
592 	GETCGD_SOFTC(cs, dev);
593 	return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
594 }
595 
596 /*
597  * XXXrcd:
598  *  for now we hardcode the maximum key length.
599  */
600 #define MAX_KEYSIZE	1024
601 
602 static const struct {
603 	const char *n;
604 	int v;
605 	int d;
606 } encblkno[] = {
607 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
608 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
609 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
610 };
611 
612 /* ARGSUSED */
613 static int
614 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
615 {
616 	struct	 cgd_ioctl *ci = data;
617 	struct	 vnode *vp;
618 	int	 ret;
619 	size_t	 i;
620 	size_t	 keybytes;			/* key length in bytes */
621 	const char *cp;
622 	struct pathbuf *pb;
623 	char	 *inbuf;
624 	struct dk_softc *dksc = &cs->sc_dksc;
625 
626 	cp = ci->ci_disk;
627 
628 	ret = pathbuf_copyin(ci->ci_disk, &pb);
629 	if (ret != 0) {
630 		return ret;
631 	}
632 	ret = dk_lookup(pb, l, &vp);
633 	pathbuf_destroy(pb);
634 	if (ret != 0) {
635 		return ret;
636 	}
637 
638 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
639 
640 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
641 		goto bail;
642 
643 	(void)memset(inbuf, 0, MAX_KEYSIZE);
644 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
645 	if (ret)
646 		goto bail;
647 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
648 	if (!cs->sc_cfuncs) {
649 		ret = EINVAL;
650 		goto bail;
651 	}
652 
653 	(void)memset(inbuf, 0, MAX_KEYSIZE);
654 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
655 	if (ret)
656 		goto bail;
657 
658 	for (i = 0; i < __arraycount(encblkno); i++)
659 		if (strcmp(encblkno[i].n, inbuf) == 0)
660 			break;
661 
662 	if (i == __arraycount(encblkno)) {
663 		ret = EINVAL;
664 		goto bail;
665 	}
666 
667 	keybytes = ci->ci_keylen / 8 + 1;
668 	if (keybytes > MAX_KEYSIZE) {
669 		ret = EINVAL;
670 		goto bail;
671 	}
672 
673 	(void)memset(inbuf, 0, MAX_KEYSIZE);
674 	ret = copyin(ci->ci_key, inbuf, keybytes);
675 	if (ret)
676 		goto bail;
677 
678 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
679 	cs->sc_cdata.cf_mode = encblkno[i].v;
680 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
681 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
682 	    &cs->sc_cdata.cf_blocksize);
683 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
684 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
685 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
686 	    cs->sc_cdata.cf_priv = NULL;
687 	}
688 
689 	/*
690 	 * The blocksize is supposed to be in bytes. Unfortunately originally
691 	 * it was expressed in bits. For compatibility we maintain encblkno
692 	 * and encblkno8.
693 	 */
694 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
695 	(void)memset(inbuf, 0, MAX_KEYSIZE);
696 	if (!cs->sc_cdata.cf_priv) {
697 		ret = EINVAL;		/* XXX is this the right error? */
698 		goto bail;
699 	}
700 	free(inbuf, M_TEMP);
701 
702 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
703 
704 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
705 	cs->sc_data_used = 0;
706 
707 	dksc->sc_flags |= DKF_INITED;
708 
709 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
710 
711 	/* Attach the disk. */
712 	disk_attach(&dksc->sc_dkdev);
713 
714 	/* Try and read the disklabel. */
715 	dk_getdisklabel(di, dksc, 0 /* XXX ? (cause of PR 41704) */);
716 
717 	/* Discover wedges on this disk. */
718 	dkwedge_discover(&dksc->sc_dkdev);
719 
720 	return 0;
721 
722 bail:
723 	free(inbuf, M_TEMP);
724 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
725 	return ret;
726 }
727 
728 /* ARGSUSED */
729 static int
730 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
731 {
732 	int	s;
733 	struct	dk_softc *dksc = &cs->sc_dksc;
734 
735 	if ((dksc->sc_flags & DKF_INITED) == 0)
736 		return ENXIO;
737 
738 	/* Delete all of our wedges. */
739 	dkwedge_delall(&dksc->sc_dkdev);
740 
741 	/* Kill off any queued buffers. */
742 	s = splbio();
743 	bufq_drain(dksc->sc_bufq);
744 	splx(s);
745 	bufq_free(dksc->sc_bufq);
746 
747 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
748 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
749 	free(cs->sc_tpath, M_DEVBUF);
750 	free(cs->sc_data, M_DEVBUF);
751 	cs->sc_data_used = 0;
752 	dksc->sc_flags &= ~DKF_INITED;
753 	disk_detach(&dksc->sc_dkdev);
754 
755 	return 0;
756 }
757 
758 static int
759 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
760 {
761 	struct cgd_softc *cs = getcgd_softc(dev);
762 	struct cgd_user *cgu;
763 	int unit;
764 	struct	dk_softc *dksc = &cs->sc_dksc;
765 
766 	unit = CGDUNIT(dev);
767 	cgu = (struct cgd_user *)data;
768 
769 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
770 			   dev, unit, data, l));
771 
772 	if (cgu->cgu_unit == -1)
773 		cgu->cgu_unit = unit;
774 
775 	if (cgu->cgu_unit < 0)
776 		return EINVAL;	/* XXX: should this be ENXIO? */
777 
778 	cs = device_lookup_private(&cgd_cd, unit);
779 	if (cs == NULL || (dksc->sc_flags & DKF_INITED) == 0) {
780 		cgu->cgu_dev = 0;
781 		cgu->cgu_alg[0] = '\0';
782 		cgu->cgu_blocksize = 0;
783 		cgu->cgu_mode = 0;
784 		cgu->cgu_keylen = 0;
785 	}
786 	else {
787 		cgu->cgu_dev = cs->sc_tdev;
788 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
789 		    sizeof(cgu->cgu_alg));
790 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
791 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
792 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
793 	}
794 	return 0;
795 }
796 
797 static int
798 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
799 	struct lwp *l)
800 {
801 	struct	disk_geom *dg;
802 	struct	vattr va;
803 	int	ret;
804 	char	*tmppath;
805 	uint64_t psize;
806 	unsigned secsize;
807 	struct dk_softc *dksc = &cs->sc_dksc;
808 
809 	cs->sc_tvn = vp;
810 	cs->sc_tpath = NULL;
811 
812 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
813 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
814 	if (ret)
815 		goto bail;
816 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
817 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
818 
819 	vn_lock(vp, LK_SHARED | LK_RETRY);
820 	ret = VOP_GETATTR(vp, &va, l->l_cred);
821 	VOP_UNLOCK(vp);
822 	if (ret != 0)
823 		goto bail;
824 
825 	cs->sc_tdev = va.va_rdev;
826 
827 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
828 		goto bail;
829 
830 	if (psize == 0) {
831 		ret = ENODEV;
832 		goto bail;
833 	}
834 
835 	/*
836 	 * XXX here we should probe the underlying device.  If we
837 	 *     are accessing a partition of type RAW_PART, then
838 	 *     we should populate our initial geometry with the
839 	 *     geometry that we discover from the device.
840 	 */
841 	dg = &dksc->sc_dkdev.dk_geom;
842 	memset(dg, 0, sizeof(*dg));
843 	dg->dg_secperunit = psize;
844 	// XXX: Inherit?
845 	dg->dg_secsize = DEV_BSIZE;
846 	dg->dg_ntracks = 1;
847 	dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
848 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
849 
850 bail:
851 	free(tmppath, M_TEMP);
852 	if (ret && cs->sc_tpath)
853 		free(cs->sc_tpath, M_DEVBUF);
854 	return ret;
855 }
856 
857 /*
858  * Our generic cipher entry point.  This takes care of the
859  * IV mode and passes off the work to the specific cipher.
860  * We implement here the IV method ``encrypted block
861  * number''.
862  *
863  * For the encryption case, we accomplish this by setting
864  * up a struct uio where the first iovec of the source is
865  * the blocknumber and the first iovec of the dest is a
866  * sink.  We then call the cipher with an IV of zero, and
867  * the right thing happens.
868  *
869  * For the decryption case, we use the same basic mechanism
870  * for symmetry, but we encrypt the block number in the
871  * first iovec.
872  *
873  * We mainly do this to avoid requiring the definition of
874  * an ECB mode.
875  *
876  * XXXrcd: for now we rely on our own crypto framework defined
877  *         in dev/cgd_crypto.c.  This will change when we
878  *         get a generic kernel crypto framework.
879  */
880 
881 static void
882 blkno2blkno_buf(char *sbuf, daddr_t blkno)
883 {
884 	int	i;
885 
886 	/* Set up the blkno in blkno_buf, here we do not care much
887 	 * about the final layout of the information as long as we
888 	 * can guarantee that each sector will have a different IV
889 	 * and that the endianness of the machine will not affect
890 	 * the representation that we have chosen.
891 	 *
892 	 * We choose this representation, because it does not rely
893 	 * on the size of buf (which is the blocksize of the cipher),
894 	 * but allows daddr_t to grow without breaking existing
895 	 * disks.
896 	 *
897 	 * Note that blkno2blkno_buf does not take a size as input,
898 	 * and hence must be called on a pre-zeroed buffer of length
899 	 * greater than or equal to sizeof(daddr_t).
900 	 */
901 	for (i=0; i < sizeof(daddr_t); i++) {
902 		*sbuf++ = blkno & 0xff;
903 		blkno >>= 8;
904 	}
905 }
906 
907 static void
908 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
909     size_t len, daddr_t blkno, size_t secsize, int dir)
910 {
911 	char		*dst = dstv;
912 	char 		*src = srcv;
913 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
914 	struct uio	dstuio;
915 	struct uio	srcuio;
916 	struct iovec	dstiov[2];
917 	struct iovec	srciov[2];
918 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
919 	char		sink[CGD_MAXBLOCKSIZE];
920 	char		zero_iv[CGD_MAXBLOCKSIZE];
921 	char		blkno_buf[CGD_MAXBLOCKSIZE];
922 
923 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
924 
925 	DIAGCONDPANIC(len % blocksize != 0,
926 	    ("cgd_cipher: len %% blocksize != 0"));
927 
928 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
929 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
930 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
931 
932 	memset(zero_iv, 0x0, blocksize);
933 
934 	dstuio.uio_iov = dstiov;
935 	dstuio.uio_iovcnt = 2;
936 
937 	srcuio.uio_iov = srciov;
938 	srcuio.uio_iovcnt = 2;
939 
940 	dstiov[0].iov_base = sink;
941 	dstiov[0].iov_len  = blocksize;
942 	srciov[0].iov_base = blkno_buf;
943 	srciov[0].iov_len  = blocksize;
944 	dstiov[1].iov_len  = secsize;
945 	srciov[1].iov_len  = secsize;
946 
947 	for (; len > 0; len -= secsize) {
948 		dstiov[1].iov_base = dst;
949 		srciov[1].iov_base = src;
950 
951 		memset(blkno_buf, 0x0, blocksize);
952 		blkno2blkno_buf(blkno_buf, blkno);
953 		if (dir == CGD_CIPHER_DECRYPT) {
954 			dstuio.uio_iovcnt = 1;
955 			srcuio.uio_iovcnt = 1;
956 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
957 			    blkno_buf, blocksize));
958 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
959 			    zero_iv, CGD_CIPHER_ENCRYPT);
960 			memcpy(blkno_buf, sink, blocksize);
961 			dstuio.uio_iovcnt = 2;
962 			srcuio.uio_iovcnt = 2;
963 		}
964 
965 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
966 		    blkno_buf, blocksize));
967 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
968 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
969 		    sink, blocksize));
970 
971 		dst += secsize;
972 		src += secsize;
973 		blkno++;
974 	}
975 }
976 
977 #ifdef DEBUG
978 static void
979 hexprint(const char *start, void *buf, int len)
980 {
981 	char	*c = buf;
982 
983 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
984 	printf("%s: len=%06d 0x", start, len);
985 	while (len--)
986 		printf("%02x", (unsigned char) *c++);
987 }
988 #endif
989 
990 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
991 
992 #ifdef _MODULE
993 CFDRIVER_DECL(cgd, DV_DISK, NULL);
994 #endif
995 
996 static int
997 cgd_modcmd(modcmd_t cmd, void *arg)
998 {
999 	int error = 0;
1000 
1001 #ifdef _MODULE
1002 	int bmajor = -1, cmajor = -1;
1003 #endif
1004 
1005 	switch (cmd) {
1006 	case MODULE_CMD_INIT:
1007 #ifdef _MODULE
1008 		error = config_cfdriver_attach(&cgd_cd);
1009 		if (error)
1010 			break;
1011 
1012 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1013 	        if (error) {
1014 			config_cfdriver_detach(&cgd_cd);
1015 			aprint_error("%s: unable to register cfattach\n",
1016 			    cgd_cd.cd_name);
1017 			break;
1018 		}
1019 
1020 		error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1021 		    &cgd_cdevsw, &cmajor);
1022 		if (error) {
1023 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1024 			config_cfdriver_detach(&cgd_cd);
1025 			break;
1026 		}
1027 #endif
1028 		break;
1029 
1030 	case MODULE_CMD_FINI:
1031 #ifdef _MODULE
1032 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1033 		if (error)
1034 			break;
1035 		config_cfdriver_detach(&cgd_cd);
1036 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1037 #endif
1038 		break;
1039 
1040 	case MODULE_CMD_STAT:
1041 		return ENOTTY;
1042 
1043 	default:
1044 		return ENOTTY;
1045 	}
1046 
1047 	return error;
1048 }
1049