xref: /netbsd-src/sys/dev/cgd.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /* $NetBSD: cgd.c,v 1.82 2013/09/12 12:28:49 martin Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.82 2013/09/12 12:28:49 martin Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57 
58 /* Entry Point Functions */
59 
60 void	cgdattach(int);
61 
62 static dev_type_open(cgdopen);
63 static dev_type_close(cgdclose);
64 static dev_type_read(cgdread);
65 static dev_type_write(cgdwrite);
66 static dev_type_ioctl(cgdioctl);
67 static dev_type_strategy(cgdstrategy);
68 static dev_type_dump(cgddump);
69 static dev_type_size(cgdsize);
70 
71 const struct bdevsw cgd_bdevsw = {
72 	cgdopen, cgdclose, cgdstrategy, cgdioctl,
73 	cgddump, cgdsize, D_DISK
74 };
75 
76 const struct cdevsw cgd_cdevsw = {
77 	cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
78 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
79 };
80 
81 static int cgd_match(device_t, cfdata_t, void *);
82 static void cgd_attach(device_t, device_t, void *);
83 static int cgd_detach(device_t, int);
84 static struct cgd_softc	*cgd_spawn(int);
85 static int cgd_destroy(device_t);
86 
87 /* Internal Functions */
88 
89 static int	cgdstart(struct dk_softc *, struct buf *);
90 static void	cgdiodone(struct buf *);
91 
92 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
93 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
94 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
95 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
96 			struct lwp *);
97 static void	cgd_cipher(struct cgd_softc *, void *, void *,
98 			   size_t, daddr_t, size_t, int);
99 
100 /* Pseudo-disk Interface */
101 
102 static struct dk_intf the_dkintf = {
103 	DTYPE_CGD,
104 	"cgd",
105 	cgdopen,
106 	cgdclose,
107 	cgdstrategy,
108 	cgdstart,
109 };
110 static struct dk_intf *di = &the_dkintf;
111 
112 static struct dkdriver cgddkdriver = {
113 	.d_strategy = cgdstrategy,
114 	.d_minphys = minphys,
115 };
116 
117 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
118     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
119 extern struct cfdriver cgd_cd;
120 
121 /* DIAGNOSTIC and DEBUG definitions */
122 
123 #if defined(CGDDEBUG) && !defined(DEBUG)
124 #define DEBUG
125 #endif
126 
127 #ifdef DEBUG
128 int cgddebug = 0;
129 
130 #define CGDB_FOLLOW	0x1
131 #define CGDB_IO	0x2
132 #define CGDB_CRYPTO	0x4
133 
134 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
135 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
136 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
137 
138 static void	hexprint(const char *, void *, int);
139 
140 #else
141 #define IFDEBUG(x,y)
142 #define DPRINTF(x,y)
143 #define DPRINTF_FOLLOW(y)
144 #endif
145 
146 #ifdef DIAGNOSTIC
147 #define DIAGPANIC(x)		panic x
148 #define DIAGCONDPANIC(x,y)	if (x) panic y
149 #else
150 #define DIAGPANIC(x)
151 #define DIAGCONDPANIC(x,y)
152 #endif
153 
154 /* Global variables */
155 
156 /* Utility Functions */
157 
158 #define CGDUNIT(x)		DISKUNIT(x)
159 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
160 
161 /* The code */
162 
163 static struct cgd_softc *
164 getcgd_softc(dev_t dev)
165 {
166 	int	unit = CGDUNIT(dev);
167 	struct cgd_softc *sc;
168 
169 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
170 
171 	sc = device_lookup_private(&cgd_cd, unit);
172 	if (sc == NULL)
173 		sc = cgd_spawn(unit);
174 	return sc;
175 }
176 
177 static int
178 cgd_match(device_t self, cfdata_t cfdata, void *aux)
179 {
180 
181 	return 1;
182 }
183 
184 static void
185 cgd_attach(device_t parent, device_t self, void *aux)
186 {
187 	struct cgd_softc *sc = device_private(self);
188 
189 	simple_lock_init(&sc->sc_slock);
190 	dk_sc_init(&sc->sc_dksc, device_xname(self));
191 	sc->sc_dksc.sc_dev = self;
192 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
193 
194 	 if (!pmf_device_register(self, NULL, NULL))
195 		aprint_error_dev(self, "unable to register power management hooks\n");
196 }
197 
198 
199 static int
200 cgd_detach(device_t self, int flags)
201 {
202 	int ret;
203 	const int pmask = 1 << RAW_PART;
204 	struct cgd_softc *sc = device_private(self);
205 	struct dk_softc *dksc = &sc->sc_dksc;
206 
207 	if (DK_BUSY(dksc, pmask))
208 		return EBUSY;
209 
210 	if ((dksc->sc_flags & DKF_INITED) != 0 &&
211 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
212 		return ret;
213 
214 	disk_destroy(&dksc->sc_dkdev);
215 
216 	return 0;
217 }
218 
219 void
220 cgdattach(int num)
221 {
222 	int error;
223 
224 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
225 	if (error != 0)
226 		aprint_error("%s: unable to register cfattach\n",
227 		    cgd_cd.cd_name);
228 }
229 
230 static struct cgd_softc *
231 cgd_spawn(int unit)
232 {
233 	cfdata_t cf;
234 
235 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
236 	cf->cf_name = cgd_cd.cd_name;
237 	cf->cf_atname = cgd_cd.cd_name;
238 	cf->cf_unit = unit;
239 	cf->cf_fstate = FSTATE_STAR;
240 
241 	return device_private(config_attach_pseudo(cf));
242 }
243 
244 static int
245 cgd_destroy(device_t dev)
246 {
247 	int error;
248 	cfdata_t cf;
249 
250 	cf = device_cfdata(dev);
251 	error = config_detach(dev, DETACH_QUIET);
252 	if (error)
253 		return error;
254 	free(cf, M_DEVBUF);
255 	return 0;
256 }
257 
258 static int
259 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
260 {
261 	struct	cgd_softc *cs;
262 
263 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
264 	GETCGD_SOFTC(cs, dev);
265 	return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
266 }
267 
268 static int
269 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
270 {
271 	int error;
272 	struct	cgd_softc *cs;
273 	struct	dk_softc *dksc;
274 
275 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
276 	GETCGD_SOFTC(cs, dev);
277 	dksc = &cs->sc_dksc;
278 	if ((error =  dk_close(di, dksc, dev, flags, fmt, l)) != 0)
279 		return error;
280 
281 	if ((dksc->sc_flags & DKF_INITED) == 0) {
282 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
283 			aprint_error_dev(dksc->sc_dev,
284 			    "unable to detach instance\n");
285 			return error;
286 		}
287 	}
288 	return 0;
289 }
290 
291 static void
292 cgdstrategy(struct buf *bp)
293 {
294 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev);
295 
296 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
297 	    (long)bp->b_bcount));
298 
299 	/*
300 	 * Reject unaligned writes.  We can encrypt and decrypt only
301 	 * complete disk sectors, and we let the ciphers require their
302 	 * buffers to be aligned to 32-bit boundaries.
303 	 */
304 	if (bp->b_blkno < 0 ||
305 	    (bp->b_bcount % DEV_BSIZE) != 0 ||
306 	    ((uintptr_t)bp->b_data & 3) != 0) {
307 		bp->b_error = EINVAL;
308 		bp->b_resid = bp->b_bcount;
309 		biodone(bp);
310 		return;
311 	}
312 
313 	/* XXXrcd: Should we test for (cs != NULL)? */
314 	dk_strategy(di, &cs->sc_dksc, bp);
315 	return;
316 }
317 
318 static int
319 cgdsize(dev_t dev)
320 {
321 	struct cgd_softc *cs = getcgd_softc(dev);
322 
323 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
324 	if (!cs)
325 		return -1;
326 	return dk_size(di, &cs->sc_dksc, dev);
327 }
328 
329 /*
330  * cgd_{get,put}data are functions that deal with getting a buffer
331  * for the new encrypted data.  We have a buffer per device so that
332  * we can ensure that we can always have a transaction in flight.
333  * We use this buffer first so that we have one less piece of
334  * malloc'ed data at any given point.
335  */
336 
337 static void *
338 cgd_getdata(struct dk_softc *dksc, unsigned long size)
339 {
340 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
341 	void *	data = NULL;
342 
343 	simple_lock(&cs->sc_slock);
344 	if (cs->sc_data_used == 0) {
345 		cs->sc_data_used = 1;
346 		data = cs->sc_data;
347 	}
348 	simple_unlock(&cs->sc_slock);
349 
350 	if (data)
351 		return data;
352 
353 	return malloc(size, M_DEVBUF, M_NOWAIT);
354 }
355 
356 static void
357 cgd_putdata(struct dk_softc *dksc, void *data)
358 {
359 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
360 
361 	if (data == cs->sc_data) {
362 		simple_lock(&cs->sc_slock);
363 		cs->sc_data_used = 0;
364 		simple_unlock(&cs->sc_slock);
365 	} else {
366 		free(data, M_DEVBUF);
367 	}
368 }
369 
370 static int
371 cgdstart(struct dk_softc *dksc, struct buf *bp)
372 {
373 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
374 	struct	buf *nbp;
375 	void *	addr;
376 	void *	newaddr;
377 	daddr_t	bn;
378 	struct	vnode *vp;
379 
380 	DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
381 	disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
382 
383 	bn = bp->b_rawblkno;
384 
385 	/*
386 	 * We attempt to allocate all of our resources up front, so that
387 	 * we can fail quickly if they are unavailable.
388 	 */
389 
390 	nbp = getiobuf(cs->sc_tvn, false);
391 	if (nbp == NULL) {
392 		disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
393 		return -1;
394 	}
395 
396 	/*
397 	 * If we are writing, then we need to encrypt the outgoing
398 	 * block into a new block of memory.  If we fail, then we
399 	 * return an error and let the dksubr framework deal with it.
400 	 */
401 	newaddr = addr = bp->b_data;
402 	if ((bp->b_flags & B_READ) == 0) {
403 		newaddr = cgd_getdata(dksc, bp->b_bcount);
404 		if (!newaddr) {
405 			putiobuf(nbp);
406 			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
407 			return -1;
408 		}
409 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
410 		    DEV_BSIZE, CGD_CIPHER_ENCRYPT);
411 	}
412 
413 	nbp->b_data = newaddr;
414 	nbp->b_flags = bp->b_flags;
415 	nbp->b_oflags = bp->b_oflags;
416 	nbp->b_cflags = bp->b_cflags;
417 	nbp->b_iodone = cgdiodone;
418 	nbp->b_proc = bp->b_proc;
419 	nbp->b_blkno = bn;
420 	nbp->b_bcount = bp->b_bcount;
421 	nbp->b_private = bp;
422 
423 	BIO_COPYPRIO(nbp, bp);
424 
425 	if ((nbp->b_flags & B_READ) == 0) {
426 		vp = nbp->b_vp;
427 		mutex_enter(vp->v_interlock);
428 		vp->v_numoutput++;
429 		mutex_exit(vp->v_interlock);
430 	}
431 	VOP_STRATEGY(cs->sc_tvn, nbp);
432 	return 0;
433 }
434 
435 static void
436 cgdiodone(struct buf *nbp)
437 {
438 	struct	buf *obp = nbp->b_private;
439 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
440 	struct	dk_softc *dksc = &cs->sc_dksc;
441 	int s;
442 
443 	KDASSERT(cs);
444 
445 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
446 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
447 	    obp, obp->b_bcount, obp->b_resid));
448 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
449 	    nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
450 	    nbp->b_bcount));
451 	if (nbp->b_error != 0) {
452 		obp->b_error = nbp->b_error;
453 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
454 		    obp->b_error));
455 	}
456 
457 	/* Perform the decryption if we are reading.
458 	 *
459 	 * Note: use the blocknumber from nbp, since it is what
460 	 *       we used to encrypt the blocks.
461 	 */
462 
463 	if (nbp->b_flags & B_READ)
464 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
465 		    nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
466 
467 	/* If we allocated memory, free it now... */
468 	if (nbp->b_data != obp->b_data)
469 		cgd_putdata(dksc, nbp->b_data);
470 
471 	putiobuf(nbp);
472 
473 	/* Request is complete for whatever reason */
474 	obp->b_resid = 0;
475 	if (obp->b_error != 0)
476 		obp->b_resid = obp->b_bcount;
477 	s = splbio();
478 	disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
479 	    (obp->b_flags & B_READ));
480 	biodone(obp);
481 	dk_iodone(di, dksc);
482 	splx(s);
483 }
484 
485 /* XXX: we should probably put these into dksubr.c, mostly */
486 static int
487 cgdread(dev_t dev, struct uio *uio, int flags)
488 {
489 	struct	cgd_softc *cs;
490 	struct	dk_softc *dksc;
491 
492 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
493 	    (unsigned long long)dev, uio, flags));
494 	GETCGD_SOFTC(cs, dev);
495 	dksc = &cs->sc_dksc;
496 	if ((dksc->sc_flags & DKF_INITED) == 0)
497 		return ENXIO;
498 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
499 }
500 
501 /* XXX: we should probably put these into dksubr.c, mostly */
502 static int
503 cgdwrite(dev_t dev, struct uio *uio, int flags)
504 {
505 	struct	cgd_softc *cs;
506 	struct	dk_softc *dksc;
507 
508 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
509 	GETCGD_SOFTC(cs, dev);
510 	dksc = &cs->sc_dksc;
511 	if ((dksc->sc_flags & DKF_INITED) == 0)
512 		return ENXIO;
513 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
514 }
515 
516 static int
517 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
518 {
519 	struct	cgd_softc *cs;
520 	struct	dk_softc *dksc;
521 	int	part = DISKPART(dev);
522 	int	pmask = 1 << part;
523 
524 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
525 	    dev, cmd, data, flag, l));
526 
527 	switch (cmd) {
528 	case CGDIOCGET: /* don't call cgd_spawn() if the device isn't there */
529 		cs = NULL;
530 		dksc = NULL;
531 		break;
532 	case CGDIOCSET:
533 	case CGDIOCCLR:
534 		if ((flag & FWRITE) == 0)
535 			return EBADF;
536 		/* FALLTHROUGH */
537 	default:
538 		GETCGD_SOFTC(cs, dev);
539 		dksc = &cs->sc_dksc;
540 		break;
541 	}
542 
543 	switch (cmd) {
544 	case CGDIOCSET:
545 		if (dksc->sc_flags & DKF_INITED)
546 			return EBUSY;
547 		return cgd_ioctl_set(cs, data, l);
548 	case CGDIOCCLR:
549 		if (DK_BUSY(&cs->sc_dksc, pmask))
550 			return EBUSY;
551 		return cgd_ioctl_clr(cs, l);
552 	case CGDIOCGET:
553 		return cgd_ioctl_get(dev, data, l);
554 	case DIOCCACHESYNC:
555 		/*
556 		 * XXX Do we really need to care about having a writable
557 		 * file descriptor here?
558 		 */
559 		if ((flag & FWRITE) == 0)
560 			return (EBADF);
561 
562 		/*
563 		 * We pass this call down to the underlying disk.
564 		 */
565 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
566 	default:
567 		return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
568 	}
569 }
570 
571 static int
572 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
573 {
574 	struct	cgd_softc *cs;
575 
576 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
577 	    dev, blkno, va, (unsigned long)size));
578 	GETCGD_SOFTC(cs, dev);
579 	return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
580 }
581 
582 /*
583  * XXXrcd:
584  *  for now we hardcode the maximum key length.
585  */
586 #define MAX_KEYSIZE	1024
587 
588 static const struct {
589 	const char *n;
590 	int v;
591 	int d;
592 } encblkno[] = {
593 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
594 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
595 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
596 };
597 
598 /* ARGSUSED */
599 static int
600 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
601 {
602 	struct	 cgd_ioctl *ci = data;
603 	struct	 vnode *vp;
604 	int	 ret;
605 	size_t	 i;
606 	size_t	 keybytes;			/* key length in bytes */
607 	const char *cp;
608 	struct pathbuf *pb;
609 	char	 *inbuf;
610 	struct dk_softc *dksc = &cs->sc_dksc;
611 
612 	cp = ci->ci_disk;
613 
614 	ret = pathbuf_copyin(ci->ci_disk, &pb);
615 	if (ret != 0) {
616 		return ret;
617 	}
618 	ret = dk_lookup(pb, l, &vp);
619 	pathbuf_destroy(pb);
620 	if (ret != 0) {
621 		return ret;
622 	}
623 
624 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
625 
626 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
627 		goto bail;
628 
629 	(void)memset(inbuf, 0, MAX_KEYSIZE);
630 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
631 	if (ret)
632 		goto bail;
633 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
634 	if (!cs->sc_cfuncs) {
635 		ret = EINVAL;
636 		goto bail;
637 	}
638 
639 	(void)memset(inbuf, 0, MAX_KEYSIZE);
640 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
641 	if (ret)
642 		goto bail;
643 
644 	for (i = 0; i < __arraycount(encblkno); i++)
645 		if (strcmp(encblkno[i].n, inbuf) == 0)
646 			break;
647 
648 	if (i == __arraycount(encblkno)) {
649 		ret = EINVAL;
650 		goto bail;
651 	}
652 
653 	keybytes = ci->ci_keylen / 8 + 1;
654 	if (keybytes > MAX_KEYSIZE) {
655 		ret = EINVAL;
656 		goto bail;
657 	}
658 
659 	(void)memset(inbuf, 0, MAX_KEYSIZE);
660 	ret = copyin(ci->ci_key, inbuf, keybytes);
661 	if (ret)
662 		goto bail;
663 
664 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
665 	cs->sc_cdata.cf_mode = encblkno[i].v;
666 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
667 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
668 	    &cs->sc_cdata.cf_blocksize);
669 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
670 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
671 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
672 	    cs->sc_cdata.cf_priv = NULL;
673 	}
674 
675 	/*
676 	 * The blocksize is supposed to be in bytes. Unfortunately originally
677 	 * it was expressed in bits. For compatibility we maintain encblkno
678 	 * and encblkno8.
679 	 */
680 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
681 	(void)memset(inbuf, 0, MAX_KEYSIZE);
682 	if (!cs->sc_cdata.cf_priv) {
683 		ret = EINVAL;		/* XXX is this the right error? */
684 		goto bail;
685 	}
686 	free(inbuf, M_TEMP);
687 
688 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
689 
690 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
691 	cs->sc_data_used = 0;
692 
693 	dksc->sc_flags |= DKF_INITED;
694 
695 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
696 
697 	/* Attach the disk. */
698 	disk_attach(&dksc->sc_dkdev);
699 
700 	/* Try and read the disklabel. */
701 	dk_getdisklabel(di, dksc, 0 /* XXX ? (cause of PR 41704) */);
702 
703 	/* Discover wedges on this disk. */
704 	dkwedge_discover(&dksc->sc_dkdev);
705 
706 	return 0;
707 
708 bail:
709 	free(inbuf, M_TEMP);
710 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
711 	return ret;
712 }
713 
714 /* ARGSUSED */
715 static int
716 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
717 {
718 	int	s;
719 	struct	dk_softc *dksc = &cs->sc_dksc;
720 
721 	if ((dksc->sc_flags & DKF_INITED) == 0)
722 		return ENXIO;
723 
724 	/* Delete all of our wedges. */
725 	dkwedge_delall(&dksc->sc_dkdev);
726 
727 	/* Kill off any queued buffers. */
728 	s = splbio();
729 	bufq_drain(dksc->sc_bufq);
730 	splx(s);
731 	bufq_free(dksc->sc_bufq);
732 
733 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
734 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
735 	free(cs->sc_tpath, M_DEVBUF);
736 	free(cs->sc_data, M_DEVBUF);
737 	cs->sc_data_used = 0;
738 	dksc->sc_flags &= ~DKF_INITED;
739 	disk_detach(&dksc->sc_dkdev);
740 
741 	return 0;
742 }
743 
744 static int
745 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
746 {
747 	struct cgd_softc *cs = getcgd_softc(dev);
748 	struct cgd_user *cgu;
749 	int unit;
750 	struct	dk_softc *dksc = &cs->sc_dksc;
751 
752 	unit = CGDUNIT(dev);
753 	cgu = (struct cgd_user *)data;
754 
755 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
756 			   dev, unit, data, l));
757 
758 	if (cgu->cgu_unit == -1)
759 		cgu->cgu_unit = unit;
760 
761 	if (cgu->cgu_unit < 0)
762 		return EINVAL;	/* XXX: should this be ENXIO? */
763 
764 	cs = device_lookup_private(&cgd_cd, unit);
765 	if (cs == NULL || (dksc->sc_flags & DKF_INITED) == 0) {
766 		cgu->cgu_dev = 0;
767 		cgu->cgu_alg[0] = '\0';
768 		cgu->cgu_blocksize = 0;
769 		cgu->cgu_mode = 0;
770 		cgu->cgu_keylen = 0;
771 	}
772 	else {
773 		cgu->cgu_dev = cs->sc_tdev;
774 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
775 		    sizeof(cgu->cgu_alg));
776 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
777 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
778 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
779 	}
780 	return 0;
781 }
782 
783 static int
784 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
785 	struct lwp *l)
786 {
787 	struct	disk_geom *dg;
788 	struct	vattr va;
789 	int	ret;
790 	char	*tmppath;
791 	uint64_t psize;
792 	unsigned secsize;
793 	struct dk_softc *dksc = &cs->sc_dksc;
794 
795 	cs->sc_tvn = vp;
796 	cs->sc_tpath = NULL;
797 
798 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
799 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
800 	if (ret)
801 		goto bail;
802 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
803 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
804 
805 	vn_lock(vp, LK_SHARED | LK_RETRY);
806 	ret = VOP_GETATTR(vp, &va, l->l_cred);
807 	VOP_UNLOCK(vp);
808 	if (ret != 0)
809 		goto bail;
810 
811 	cs->sc_tdev = va.va_rdev;
812 
813 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
814 		goto bail;
815 
816 	if (psize == 0) {
817 		ret = ENODEV;
818 		goto bail;
819 	}
820 
821 	/*
822 	 * XXX here we should probe the underlying device.  If we
823 	 *     are accessing a partition of type RAW_PART, then
824 	 *     we should populate our initial geometry with the
825 	 *     geometry that we discover from the device.
826 	 */
827 	dg = &dksc->sc_dkdev.dk_geom;
828 	memset(dg, 0, sizeof(*dg));
829 	dg->dg_secperunit = psize;
830 	// XXX: Inherit?
831 	dg->dg_secsize = DEV_BSIZE;
832 	dg->dg_ntracks = 1;
833 	dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
834 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
835 
836 bail:
837 	free(tmppath, M_TEMP);
838 	if (ret && cs->sc_tpath)
839 		free(cs->sc_tpath, M_DEVBUF);
840 	return ret;
841 }
842 
843 /*
844  * Our generic cipher entry point.  This takes care of the
845  * IV mode and passes off the work to the specific cipher.
846  * We implement here the IV method ``encrypted block
847  * number''.
848  *
849  * For the encryption case, we accomplish this by setting
850  * up a struct uio where the first iovec of the source is
851  * the blocknumber and the first iovec of the dest is a
852  * sink.  We then call the cipher with an IV of zero, and
853  * the right thing happens.
854  *
855  * For the decryption case, we use the same basic mechanism
856  * for symmetry, but we encrypt the block number in the
857  * first iovec.
858  *
859  * We mainly do this to avoid requiring the definition of
860  * an ECB mode.
861  *
862  * XXXrcd: for now we rely on our own crypto framework defined
863  *         in dev/cgd_crypto.c.  This will change when we
864  *         get a generic kernel crypto framework.
865  */
866 
867 static void
868 blkno2blkno_buf(char *sbuf, daddr_t blkno)
869 {
870 	int	i;
871 
872 	/* Set up the blkno in blkno_buf, here we do not care much
873 	 * about the final layout of the information as long as we
874 	 * can guarantee that each sector will have a different IV
875 	 * and that the endianness of the machine will not affect
876 	 * the representation that we have chosen.
877 	 *
878 	 * We choose this representation, because it does not rely
879 	 * on the size of buf (which is the blocksize of the cipher),
880 	 * but allows daddr_t to grow without breaking existing
881 	 * disks.
882 	 *
883 	 * Note that blkno2blkno_buf does not take a size as input,
884 	 * and hence must be called on a pre-zeroed buffer of length
885 	 * greater than or equal to sizeof(daddr_t).
886 	 */
887 	for (i=0; i < sizeof(daddr_t); i++) {
888 		*sbuf++ = blkno & 0xff;
889 		blkno >>= 8;
890 	}
891 }
892 
893 static void
894 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
895     size_t len, daddr_t blkno, size_t secsize, int dir)
896 {
897 	char		*dst = dstv;
898 	char 		*src = srcv;
899 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
900 	struct uio	dstuio;
901 	struct uio	srcuio;
902 	struct iovec	dstiov[2];
903 	struct iovec	srciov[2];
904 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
905 	char		sink[CGD_MAXBLOCKSIZE];
906 	char		zero_iv[CGD_MAXBLOCKSIZE];
907 	char		blkno_buf[CGD_MAXBLOCKSIZE];
908 
909 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
910 
911 	DIAGCONDPANIC(len % blocksize != 0,
912 	    ("cgd_cipher: len %% blocksize != 0"));
913 
914 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
915 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
916 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
917 
918 	memset(zero_iv, 0x0, blocksize);
919 
920 	dstuio.uio_iov = dstiov;
921 	dstuio.uio_iovcnt = 2;
922 
923 	srcuio.uio_iov = srciov;
924 	srcuio.uio_iovcnt = 2;
925 
926 	dstiov[0].iov_base = sink;
927 	dstiov[0].iov_len  = blocksize;
928 	srciov[0].iov_base = blkno_buf;
929 	srciov[0].iov_len  = blocksize;
930 	dstiov[1].iov_len  = secsize;
931 	srciov[1].iov_len  = secsize;
932 
933 	for (; len > 0; len -= secsize) {
934 		dstiov[1].iov_base = dst;
935 		srciov[1].iov_base = src;
936 
937 		memset(blkno_buf, 0x0, blocksize);
938 		blkno2blkno_buf(blkno_buf, blkno);
939 		if (dir == CGD_CIPHER_DECRYPT) {
940 			dstuio.uio_iovcnt = 1;
941 			srcuio.uio_iovcnt = 1;
942 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
943 			    blkno_buf, blocksize));
944 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
945 			    zero_iv, CGD_CIPHER_ENCRYPT);
946 			memcpy(blkno_buf, sink, blocksize);
947 			dstuio.uio_iovcnt = 2;
948 			srcuio.uio_iovcnt = 2;
949 		}
950 
951 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
952 		    blkno_buf, blocksize));
953 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
954 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
955 		    sink, blocksize));
956 
957 		dst += secsize;
958 		src += secsize;
959 		blkno++;
960 	}
961 }
962 
963 #ifdef DEBUG
964 static void
965 hexprint(const char *start, void *buf, int len)
966 {
967 	char	*c = buf;
968 
969 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
970 	printf("%s: len=%06d 0x", start, len);
971 	while (len--)
972 		printf("%02x", (unsigned char) *c++);
973 }
974 #endif
975 
976 MODULE(MODULE_CLASS_DRIVER, cgd, NULL);
977 
978 #ifdef _MODULE
979 CFDRIVER_DECL(cgd, DV_DISK, NULL);
980 #endif
981 
982 static int
983 cgd_modcmd(modcmd_t cmd, void *arg)
984 {
985 	int error = 0;
986 
987 #ifdef _MODULE
988 	int bmajor = -1, cmajor = -1;
989 #endif
990 
991 	switch (cmd) {
992 	case MODULE_CMD_INIT:
993 #ifdef _MODULE
994 		error = config_cfdriver_attach(&cgd_cd);
995 		if (error)
996 			break;
997 
998 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
999 	        if (error) {
1000 			config_cfdriver_detach(&cgd_cd);
1001 			aprint_error("%s: unable to register cfattach\n",
1002 			    cgd_cd.cd_name);
1003 			break;
1004 		}
1005 
1006 		error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1007 		    &cgd_cdevsw, &cmajor);
1008 		if (error) {
1009 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1010 			config_cfdriver_detach(&cgd_cd);
1011 			break;
1012 		}
1013 #endif
1014 		break;
1015 
1016 	case MODULE_CMD_FINI:
1017 #ifdef _MODULE
1018 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1019 		if (error)
1020 			break;
1021 		config_cfdriver_detach(&cgd_cd);
1022 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1023 #endif
1024 		break;
1025 
1026 	case MODULE_CMD_STAT:
1027 		return ENOTTY;
1028 
1029 	default:
1030 		return ENOTTY;
1031 	}
1032 
1033 	return error;
1034 }
1035