xref: /netbsd-src/sys/dev/cgd.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /* $NetBSD: cgd.c,v 1.113 2016/12/22 20:57:33 kamil Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.113 2016/12/22 20:57:33 kamil Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57 
58 #include <miscfs/specfs/specdev.h> /* for v_rdev */
59 
60 #include "ioconf.h"
61 
62 struct selftest_params {
63 	const char *alg;
64 	int blocksize;	/* number of bytes */
65 	int secsize;
66 	daddr_t blkno;
67 	int keylen;	/* number of bits */
68 	int txtlen;	/* number of bytes */
69 	const uint8_t *key;
70 	const uint8_t *ptxt;
71 	const uint8_t *ctxt;
72 };
73 
74 /* Entry Point Functions */
75 
76 static dev_type_open(cgdopen);
77 static dev_type_close(cgdclose);
78 static dev_type_read(cgdread);
79 static dev_type_write(cgdwrite);
80 static dev_type_ioctl(cgdioctl);
81 static dev_type_strategy(cgdstrategy);
82 static dev_type_dump(cgddump);
83 static dev_type_size(cgdsize);
84 
85 const struct bdevsw cgd_bdevsw = {
86 	.d_open = cgdopen,
87 	.d_close = cgdclose,
88 	.d_strategy = cgdstrategy,
89 	.d_ioctl = cgdioctl,
90 	.d_dump = cgddump,
91 	.d_psize = cgdsize,
92 	.d_discard = nodiscard,
93 	.d_flag = D_DISK
94 };
95 
96 const struct cdevsw cgd_cdevsw = {
97 	.d_open = cgdopen,
98 	.d_close = cgdclose,
99 	.d_read = cgdread,
100 	.d_write = cgdwrite,
101 	.d_ioctl = cgdioctl,
102 	.d_stop = nostop,
103 	.d_tty = notty,
104 	.d_poll = nopoll,
105 	.d_mmap = nommap,
106 	.d_kqfilter = nokqfilter,
107 	.d_discard = nodiscard,
108 	.d_flag = D_DISK
109 };
110 
111 /*
112  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
113  */
114 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
115 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
116 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
117 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
118 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
119 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
120 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
121 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
122 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
123 };
124 
125 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
126 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
127 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
128 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
129 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
130 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
131 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
132 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
133 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
134 };
135 
136 static const uint8_t selftest_aes_xts_256_key[33] = {
137 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
138 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
139 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
140 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
141 	0
142 };
143 
144 /*
145  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
146  */
147 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
148 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
149 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
150 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
151 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
152 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
153 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
154 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
155 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
156 };
157 
158 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
159 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
160 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
161 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
162 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
163 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
164 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
165 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
166 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
167 };
168 
169 static const uint8_t selftest_aes_xts_512_key[65] = {
170 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
171 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
172 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
173 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
174 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
175 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
176 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
177 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
178 	0
179 };
180 
181 const struct selftest_params selftests[] = {
182 	{
183 		.alg = "aes-xts",
184 		.blocksize = 16,
185 		.secsize = 512,
186 		.blkno = 1,
187 		.keylen = 256,
188 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
189 		.key  = selftest_aes_xts_256_key,
190 		.ptxt = selftest_aes_xts_256_ptxt,
191 		.ctxt = selftest_aes_xts_256_ctxt
192 	},
193 	{
194 		.alg = "aes-xts",
195 		.blocksize = 16,
196 		.secsize = 512,
197 		.blkno = 0xffff,
198 		.keylen = 512,
199 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
200 		.key  = selftest_aes_xts_512_key,
201 		.ptxt = selftest_aes_xts_512_ptxt,
202 		.ctxt = selftest_aes_xts_512_ctxt
203 	}
204 };
205 
206 static int cgd_match(device_t, cfdata_t, void *);
207 static void cgd_attach(device_t, device_t, void *);
208 static int cgd_detach(device_t, int);
209 static struct cgd_softc	*cgd_spawn(int);
210 static int cgd_destroy(device_t);
211 
212 /* Internal Functions */
213 
214 static int	cgd_diskstart(device_t, struct buf *);
215 static void	cgdiodone(struct buf *);
216 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
217 
218 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
219 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
220 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
221 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
222 			struct lwp *);
223 static void	cgd_cipher(struct cgd_softc *, void *, void *,
224 			   size_t, daddr_t, size_t, int);
225 
226 static struct dkdriver cgddkdriver = {
227         .d_minphys  = minphys,
228         .d_open = cgdopen,
229         .d_close = cgdclose,
230         .d_strategy = cgdstrategy,
231         .d_iosize = NULL,
232         .d_diskstart = cgd_diskstart,
233         .d_dumpblocks = cgd_dumpblocks,
234         .d_lastclose = NULL
235 };
236 
237 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
238     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
239 extern struct cfdriver cgd_cd;
240 
241 /* DIAGNOSTIC and DEBUG definitions */
242 
243 #if defined(CGDDEBUG) && !defined(DEBUG)
244 #define DEBUG
245 #endif
246 
247 #ifdef DEBUG
248 int cgddebug = 0;
249 
250 #define CGDB_FOLLOW	0x1
251 #define CGDB_IO	0x2
252 #define CGDB_CRYPTO	0x4
253 
254 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
255 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
256 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
257 
258 static void	hexprint(const char *, void *, int);
259 
260 #else
261 #define IFDEBUG(x,y)
262 #define DPRINTF(x,y)
263 #define DPRINTF_FOLLOW(y)
264 #endif
265 
266 #ifdef DIAGNOSTIC
267 #define DIAGPANIC(x)		panic x
268 #define DIAGCONDPANIC(x,y)	if (x) panic y
269 #else
270 #define DIAGPANIC(x)
271 #define DIAGCONDPANIC(x,y)
272 #endif
273 
274 /* Global variables */
275 
276 /* Utility Functions */
277 
278 #define CGDUNIT(x)		DISKUNIT(x)
279 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
280 
281 /* The code */
282 
283 static struct cgd_softc *
284 getcgd_softc(dev_t dev)
285 {
286 	int	unit = CGDUNIT(dev);
287 	struct cgd_softc *sc;
288 
289 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
290 
291 	sc = device_lookup_private(&cgd_cd, unit);
292 	if (sc == NULL)
293 		sc = cgd_spawn(unit);
294 	return sc;
295 }
296 
297 static int
298 cgd_match(device_t self, cfdata_t cfdata, void *aux)
299 {
300 
301 	return 1;
302 }
303 
304 static void
305 cgd_attach(device_t parent, device_t self, void *aux)
306 {
307 	struct cgd_softc *sc = device_private(self);
308 
309 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
310 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
311 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
312 
313 	if (!pmf_device_register(self, NULL, NULL))
314 		aprint_error_dev(self,
315 		    "unable to register power management hooks\n");
316 }
317 
318 
319 static int
320 cgd_detach(device_t self, int flags)
321 {
322 	int ret;
323 	const int pmask = 1 << RAW_PART;
324 	struct cgd_softc *sc = device_private(self);
325 	struct dk_softc *dksc = &sc->sc_dksc;
326 
327 	if (DK_BUSY(dksc, pmask))
328 		return EBUSY;
329 
330 	if (DK_ATTACHED(dksc) &&
331 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
332 		return ret;
333 
334 	disk_destroy(&dksc->sc_dkdev);
335 	mutex_destroy(&sc->sc_lock);
336 
337 	return 0;
338 }
339 
340 void
341 cgdattach(int num)
342 {
343 	int error;
344 
345 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
346 	if (error != 0)
347 		aprint_error("%s: unable to register cfattach\n",
348 		    cgd_cd.cd_name);
349 }
350 
351 static struct cgd_softc *
352 cgd_spawn(int unit)
353 {
354 	cfdata_t cf;
355 
356 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
357 	cf->cf_name = cgd_cd.cd_name;
358 	cf->cf_atname = cgd_cd.cd_name;
359 	cf->cf_unit = unit;
360 	cf->cf_fstate = FSTATE_STAR;
361 
362 	return device_private(config_attach_pseudo(cf));
363 }
364 
365 static int
366 cgd_destroy(device_t dev)
367 {
368 	int error;
369 	cfdata_t cf;
370 
371 	cf = device_cfdata(dev);
372 	error = config_detach(dev, DETACH_QUIET);
373 	if (error)
374 		return error;
375 	free(cf, M_DEVBUF);
376 	return 0;
377 }
378 
379 static int
380 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
381 {
382 	struct	cgd_softc *cs;
383 
384 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
385 	GETCGD_SOFTC(cs, dev);
386 	return dk_open(&cs->sc_dksc, dev, flags, fmt, l);
387 }
388 
389 static int
390 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
391 {
392 	int error;
393 	struct	cgd_softc *cs;
394 	struct	dk_softc *dksc;
395 
396 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
397 	GETCGD_SOFTC(cs, dev);
398 	dksc = &cs->sc_dksc;
399 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
400 		return error;
401 
402 	if (!DK_ATTACHED(dksc)) {
403 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
404 			aprint_error_dev(dksc->sc_dev,
405 			    "unable to detach instance\n");
406 			return error;
407 		}
408 	}
409 	return 0;
410 }
411 
412 static void
413 cgdstrategy(struct buf *bp)
414 {
415 	struct	cgd_softc *cs;
416 
417 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
418 	    (long)bp->b_bcount));
419 
420 	cs = getcgd_softc(bp->b_dev);
421 	if (!cs) {
422 		bp->b_error = ENXIO;
423 		goto bail;
424 	}
425 
426 	/*
427 	 * Reject unaligned writes.
428 	 */
429 	if (((uintptr_t)bp->b_data & 3) != 0) {
430 		bp->b_error = EINVAL;
431 		goto bail;
432 	}
433 
434 	dk_strategy(&cs->sc_dksc, bp);
435 	return;
436 
437 bail:
438 	bp->b_resid = bp->b_bcount;
439 	biodone(bp);
440 	return;
441 }
442 
443 static int
444 cgdsize(dev_t dev)
445 {
446 	struct cgd_softc *cs = getcgd_softc(dev);
447 
448 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
449 	if (!cs)
450 		return -1;
451 	return dk_size(&cs->sc_dksc, dev);
452 }
453 
454 /*
455  * cgd_{get,put}data are functions that deal with getting a buffer
456  * for the new encrypted data.  We have a buffer per device so that
457  * we can ensure that we can always have a transaction in flight.
458  * We use this buffer first so that we have one less piece of
459  * malloc'ed data at any given point.
460  */
461 
462 static void *
463 cgd_getdata(struct dk_softc *dksc, unsigned long size)
464 {
465 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
466 	void *	data = NULL;
467 
468 	mutex_enter(&cs->sc_lock);
469 	if (cs->sc_data_used == 0) {
470 		cs->sc_data_used = 1;
471 		data = cs->sc_data;
472 	}
473 	mutex_exit(&cs->sc_lock);
474 
475 	if (data)
476 		return data;
477 
478 	return malloc(size, M_DEVBUF, M_NOWAIT);
479 }
480 
481 static void
482 cgd_putdata(struct dk_softc *dksc, void *data)
483 {
484 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
485 
486 	if (data == cs->sc_data) {
487 		mutex_enter(&cs->sc_lock);
488 		cs->sc_data_used = 0;
489 		mutex_exit(&cs->sc_lock);
490 	} else {
491 		free(data, M_DEVBUF);
492 	}
493 }
494 
495 static int
496 cgd_diskstart(device_t dev, struct buf *bp)
497 {
498 	struct	cgd_softc *cs = device_private(dev);
499 	struct	dk_softc *dksc = &cs->sc_dksc;
500 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
501 	struct	buf *nbp;
502 	void *	addr;
503 	void *	newaddr;
504 	daddr_t	bn;
505 	struct	vnode *vp;
506 
507 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
508 
509 	bn = bp->b_rawblkno;
510 
511 	/*
512 	 * We attempt to allocate all of our resources up front, so that
513 	 * we can fail quickly if they are unavailable.
514 	 */
515 	nbp = getiobuf(cs->sc_tvn, false);
516 	if (nbp == NULL)
517 		return EAGAIN;
518 
519 	/*
520 	 * If we are writing, then we need to encrypt the outgoing
521 	 * block into a new block of memory.
522 	 */
523 	newaddr = addr = bp->b_data;
524 	if ((bp->b_flags & B_READ) == 0) {
525 		newaddr = cgd_getdata(dksc, bp->b_bcount);
526 		if (!newaddr) {
527 			putiobuf(nbp);
528 			return EAGAIN;
529 		}
530 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
531 		    dg->dg_secsize, CGD_CIPHER_ENCRYPT);
532 	}
533 
534 	nbp->b_data = newaddr;
535 	nbp->b_flags = bp->b_flags;
536 	nbp->b_oflags = bp->b_oflags;
537 	nbp->b_cflags = bp->b_cflags;
538 	nbp->b_iodone = cgdiodone;
539 	nbp->b_proc = bp->b_proc;
540 	nbp->b_blkno = btodb(bn * dg->dg_secsize);
541 	nbp->b_bcount = bp->b_bcount;
542 	nbp->b_private = bp;
543 
544 	BIO_COPYPRIO(nbp, bp);
545 
546 	if ((nbp->b_flags & B_READ) == 0) {
547 		vp = nbp->b_vp;
548 		mutex_enter(vp->v_interlock);
549 		vp->v_numoutput++;
550 		mutex_exit(vp->v_interlock);
551 	}
552 	VOP_STRATEGY(cs->sc_tvn, nbp);
553 
554 	return 0;
555 }
556 
557 static void
558 cgdiodone(struct buf *nbp)
559 {
560 	struct	buf *obp = nbp->b_private;
561 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
562 	struct	dk_softc *dksc = &cs->sc_dksc;
563 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
564 	daddr_t	bn;
565 
566 	KDASSERT(cs);
567 
568 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
569 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
570 	    obp, obp->b_bcount, obp->b_resid));
571 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
572 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
573 		nbp->b_bcount));
574 	if (nbp->b_error != 0) {
575 		obp->b_error = nbp->b_error;
576 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
577 		    obp->b_error));
578 	}
579 
580 	/* Perform the decryption if we are reading.
581 	 *
582 	 * Note: use the blocknumber from nbp, since it is what
583 	 *       we used to encrypt the blocks.
584 	 */
585 
586 	if (nbp->b_flags & B_READ) {
587 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
588 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
589 		    bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
590 	}
591 
592 	/* If we allocated memory, free it now... */
593 	if (nbp->b_data != obp->b_data)
594 		cgd_putdata(dksc, nbp->b_data);
595 
596 	putiobuf(nbp);
597 
598 	/* Request is complete for whatever reason */
599 	obp->b_resid = 0;
600 	if (obp->b_error != 0)
601 		obp->b_resid = obp->b_bcount;
602 
603 	dk_done(dksc, obp);
604 	dk_start(dksc, NULL);
605 }
606 
607 static int
608 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
609 {
610 	struct cgd_softc *sc = device_private(dev);
611 	struct dk_softc *dksc = &sc->sc_dksc;
612 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
613 	size_t nbytes, blksize;
614 	void *buf;
615 	int error;
616 
617 	/*
618 	 * dk_dump gives us units of disklabel sectors.  Everything
619 	 * else in cgd uses units of diskgeom sectors.  These had
620 	 * better agree; otherwise we need to figure out how to convert
621 	 * between them.
622 	 */
623 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
624 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
625 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
626 	blksize = dg->dg_secsize;
627 
628 	/*
629 	 * Compute the number of bytes in this request, which dk_dump
630 	 * has `helpfully' converted to a number of blocks for us.
631 	 */
632 	nbytes = nblk*blksize;
633 
634 	/* Try to acquire a buffer to store the ciphertext.  */
635 	buf = cgd_getdata(dksc, nbytes);
636 	if (buf == NULL)
637 		/* Out of memory: give up.  */
638 		return ENOMEM;
639 
640 	/* Encrypt the caller's data into the temporary buffer.  */
641 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
642 
643 	/* Pass it on to the underlying disk device.  */
644 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
645 
646 	/* Release the buffer.  */
647 	cgd_putdata(dksc, buf);
648 
649 	/* Return any error from the underlying disk device.  */
650 	return error;
651 }
652 
653 /* XXX: we should probably put these into dksubr.c, mostly */
654 static int
655 cgdread(dev_t dev, struct uio *uio, int flags)
656 {
657 	struct	cgd_softc *cs;
658 	struct	dk_softc *dksc;
659 
660 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
661 	    (unsigned long long)dev, uio, flags));
662 	GETCGD_SOFTC(cs, dev);
663 	dksc = &cs->sc_dksc;
664 	if (!DK_ATTACHED(dksc))
665 		return ENXIO;
666 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
667 }
668 
669 /* XXX: we should probably put these into dksubr.c, mostly */
670 static int
671 cgdwrite(dev_t dev, struct uio *uio, int flags)
672 {
673 	struct	cgd_softc *cs;
674 	struct	dk_softc *dksc;
675 
676 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
677 	GETCGD_SOFTC(cs, dev);
678 	dksc = &cs->sc_dksc;
679 	if (!DK_ATTACHED(dksc))
680 		return ENXIO;
681 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
682 }
683 
684 static int
685 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
686 {
687 	struct	cgd_softc *cs;
688 	struct	dk_softc *dksc;
689 	int	part = DISKPART(dev);
690 	int	pmask = 1 << part;
691 
692 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
693 	    dev, cmd, data, flag, l));
694 
695 	switch (cmd) {
696 	case CGDIOCGET:
697 		return cgd_ioctl_get(dev, data, l);
698 	case CGDIOCSET:
699 	case CGDIOCCLR:
700 		if ((flag & FWRITE) == 0)
701 			return EBADF;
702 		/* FALLTHROUGH */
703 	default:
704 		GETCGD_SOFTC(cs, dev);
705 		dksc = &cs->sc_dksc;
706 		break;
707 	}
708 
709 	switch (cmd) {
710 	case CGDIOCSET:
711 		if (DK_ATTACHED(dksc))
712 			return EBUSY;
713 		return cgd_ioctl_set(cs, data, l);
714 	case CGDIOCCLR:
715 		if (DK_BUSY(&cs->sc_dksc, pmask))
716 			return EBUSY;
717 		return cgd_ioctl_clr(cs, l);
718 	case DIOCCACHESYNC:
719 		/*
720 		 * XXX Do we really need to care about having a writable
721 		 * file descriptor here?
722 		 */
723 		if ((flag & FWRITE) == 0)
724 			return (EBADF);
725 
726 		/*
727 		 * We pass this call down to the underlying disk.
728 		 */
729 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
730 	case DIOCGSTRATEGY:
731 	case DIOCSSTRATEGY:
732 		if (!DK_ATTACHED(dksc))
733 			return ENOENT;
734 		/*FALLTHROUGH*/
735 	default:
736 		return dk_ioctl(dksc, dev, cmd, data, flag, l);
737 	case CGDIOCGET:
738 		KASSERT(0);
739 		return EINVAL;
740 	}
741 }
742 
743 static int
744 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
745 {
746 	struct	cgd_softc *cs;
747 
748 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
749 	    dev, blkno, va, (unsigned long)size));
750 	GETCGD_SOFTC(cs, dev);
751 	return dk_dump(&cs->sc_dksc, dev, blkno, va, size);
752 }
753 
754 /*
755  * XXXrcd:
756  *  for now we hardcode the maximum key length.
757  */
758 #define MAX_KEYSIZE	1024
759 
760 static const struct {
761 	const char *n;
762 	int v;
763 	int d;
764 } encblkno[] = {
765 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
766 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
767 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
768 };
769 
770 /* ARGSUSED */
771 static int
772 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
773 {
774 	struct	 cgd_ioctl *ci = data;
775 	struct	 vnode *vp;
776 	int	 ret;
777 	size_t	 i;
778 	size_t	 keybytes;			/* key length in bytes */
779 	const char *cp;
780 	struct pathbuf *pb;
781 	char	 *inbuf;
782 	struct dk_softc *dksc = &cs->sc_dksc;
783 
784 	cp = ci->ci_disk;
785 
786 	ret = pathbuf_copyin(ci->ci_disk, &pb);
787 	if (ret != 0) {
788 		return ret;
789 	}
790 	ret = dk_lookup(pb, l, &vp);
791 	pathbuf_destroy(pb);
792 	if (ret != 0) {
793 		return ret;
794 	}
795 
796 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
797 
798 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
799 		goto bail;
800 
801 	(void)memset(inbuf, 0, MAX_KEYSIZE);
802 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
803 	if (ret)
804 		goto bail;
805 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
806 	if (!cs->sc_cfuncs) {
807 		ret = EINVAL;
808 		goto bail;
809 	}
810 
811 	(void)memset(inbuf, 0, MAX_KEYSIZE);
812 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
813 	if (ret)
814 		goto bail;
815 
816 	for (i = 0; i < __arraycount(encblkno); i++)
817 		if (strcmp(encblkno[i].n, inbuf) == 0)
818 			break;
819 
820 	if (i == __arraycount(encblkno)) {
821 		ret = EINVAL;
822 		goto bail;
823 	}
824 
825 	keybytes = ci->ci_keylen / 8 + 1;
826 	if (keybytes > MAX_KEYSIZE) {
827 		ret = EINVAL;
828 		goto bail;
829 	}
830 
831 	(void)memset(inbuf, 0, MAX_KEYSIZE);
832 	ret = copyin(ci->ci_key, inbuf, keybytes);
833 	if (ret)
834 		goto bail;
835 
836 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
837 	cs->sc_cdata.cf_mode = encblkno[i].v;
838 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
839 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
840 	    &cs->sc_cdata.cf_blocksize);
841 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
842 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
843 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
844 	    cs->sc_cdata.cf_priv = NULL;
845 	}
846 
847 	/*
848 	 * The blocksize is supposed to be in bytes. Unfortunately originally
849 	 * it was expressed in bits. For compatibility we maintain encblkno
850 	 * and encblkno8.
851 	 */
852 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
853 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
854 	if (!cs->sc_cdata.cf_priv) {
855 		ret = EINVAL;		/* XXX is this the right error? */
856 		goto bail;
857 	}
858 	free(inbuf, M_TEMP);
859 
860 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
861 
862 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
863 	cs->sc_data_used = 0;
864 
865 	/* Attach the disk. */
866 	dk_attach(dksc);
867 	disk_attach(&dksc->sc_dkdev);
868 
869 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
870 
871 	/* Discover wedges on this disk. */
872 	dkwedge_discover(&dksc->sc_dkdev);
873 
874 	return 0;
875 
876 bail:
877 	free(inbuf, M_TEMP);
878 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
879 	return ret;
880 }
881 
882 /* ARGSUSED */
883 static int
884 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
885 {
886 	struct	dk_softc *dksc = &cs->sc_dksc;
887 
888 	if (!DK_ATTACHED(dksc))
889 		return ENXIO;
890 
891 	/* Delete all of our wedges. */
892 	dkwedge_delall(&dksc->sc_dkdev);
893 
894 	/* Kill off any queued buffers. */
895 	dk_drain(dksc);
896 	bufq_free(dksc->sc_bufq);
897 
898 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
899 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
900 	free(cs->sc_tpath, M_DEVBUF);
901 	free(cs->sc_data, M_DEVBUF);
902 	cs->sc_data_used = 0;
903 	dk_detach(dksc);
904 	disk_detach(&dksc->sc_dkdev);
905 
906 	return 0;
907 }
908 
909 static int
910 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
911 {
912 	struct cgd_softc *cs = getcgd_softc(dev);
913 	struct cgd_user *cgu;
914 	int unit;
915 	struct	dk_softc *dksc = &cs->sc_dksc;
916 
917 	unit = CGDUNIT(dev);
918 	cgu = (struct cgd_user *)data;
919 
920 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
921 			   dev, unit, data, l));
922 
923 	if (cgu->cgu_unit == -1)
924 		cgu->cgu_unit = unit;
925 
926 	if (cgu->cgu_unit < 0)
927 		return EINVAL;	/* XXX: should this be ENXIO? */
928 
929 	cs = device_lookup_private(&cgd_cd, unit);
930 	if (cs == NULL || !DK_ATTACHED(dksc)) {
931 		cgu->cgu_dev = 0;
932 		cgu->cgu_alg[0] = '\0';
933 		cgu->cgu_blocksize = 0;
934 		cgu->cgu_mode = 0;
935 		cgu->cgu_keylen = 0;
936 	}
937 	else {
938 		cgu->cgu_dev = cs->sc_tdev;
939 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
940 		    sizeof(cgu->cgu_alg));
941 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
942 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
943 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
944 	}
945 	return 0;
946 }
947 
948 static int
949 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
950 	struct lwp *l)
951 {
952 	struct	disk_geom *dg;
953 	int	ret;
954 	char	*tmppath;
955 	uint64_t psize;
956 	unsigned secsize;
957 	struct dk_softc *dksc = &cs->sc_dksc;
958 
959 	cs->sc_tvn = vp;
960 	cs->sc_tpath = NULL;
961 
962 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
963 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
964 	if (ret)
965 		goto bail;
966 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
967 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
968 
969 	cs->sc_tdev = vp->v_rdev;
970 
971 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
972 		goto bail;
973 
974 	if (psize == 0) {
975 		ret = ENODEV;
976 		goto bail;
977 	}
978 
979 	/*
980 	 * XXX here we should probe the underlying device.  If we
981 	 *     are accessing a partition of type RAW_PART, then
982 	 *     we should populate our initial geometry with the
983 	 *     geometry that we discover from the device.
984 	 */
985 	dg = &dksc->sc_dkdev.dk_geom;
986 	memset(dg, 0, sizeof(*dg));
987 	dg->dg_secperunit = psize;
988 	dg->dg_secsize = secsize;
989 	dg->dg_ntracks = 1;
990 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
991 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
992 
993 bail:
994 	free(tmppath, M_TEMP);
995 	if (ret && cs->sc_tpath)
996 		free(cs->sc_tpath, M_DEVBUF);
997 	return ret;
998 }
999 
1000 /*
1001  * Our generic cipher entry point.  This takes care of the
1002  * IV mode and passes off the work to the specific cipher.
1003  * We implement here the IV method ``encrypted block
1004  * number''.
1005  *
1006  * XXXrcd: for now we rely on our own crypto framework defined
1007  *         in dev/cgd_crypto.c.  This will change when we
1008  *         get a generic kernel crypto framework.
1009  */
1010 
1011 static void
1012 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1013 {
1014 	int	i;
1015 
1016 	/* Set up the blkno in blkno_buf, here we do not care much
1017 	 * about the final layout of the information as long as we
1018 	 * can guarantee that each sector will have a different IV
1019 	 * and that the endianness of the machine will not affect
1020 	 * the representation that we have chosen.
1021 	 *
1022 	 * We choose this representation, because it does not rely
1023 	 * on the size of buf (which is the blocksize of the cipher),
1024 	 * but allows daddr_t to grow without breaking existing
1025 	 * disks.
1026 	 *
1027 	 * Note that blkno2blkno_buf does not take a size as input,
1028 	 * and hence must be called on a pre-zeroed buffer of length
1029 	 * greater than or equal to sizeof(daddr_t).
1030 	 */
1031 	for (i=0; i < sizeof(daddr_t); i++) {
1032 		*sbuf++ = blkno & 0xff;
1033 		blkno >>= 8;
1034 	}
1035 }
1036 
1037 static void
1038 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
1039     size_t len, daddr_t blkno, size_t secsize, int dir)
1040 {
1041 	char		*dst = dstv;
1042 	char		*src = srcv;
1043 	cfunc_cipher_prep	*ciprep = cs->sc_cfuncs->cf_cipher_prep;
1044 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
1045 	struct uio	dstuio;
1046 	struct uio	srcuio;
1047 	struct iovec	dstiov[2];
1048 	struct iovec	srciov[2];
1049 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
1050 	size_t		todo;
1051 	char		blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1052 
1053 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1054 
1055 	DIAGCONDPANIC(len % blocksize != 0,
1056 	    ("cgd_cipher: len %% blocksize != 0"));
1057 
1058 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1059 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1060 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1061 
1062 	DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1063 	    ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1064 
1065 	dstuio.uio_iov = dstiov;
1066 	dstuio.uio_iovcnt = 1;
1067 
1068 	srcuio.uio_iov = srciov;
1069 	srcuio.uio_iovcnt = 1;
1070 
1071 	for (; len > 0; len -= todo) {
1072 		todo = MIN(len, secsize);
1073 
1074 		dstiov[0].iov_base = dst;
1075 		srciov[0].iov_base = src;
1076 		dstiov[0].iov_len  = todo;
1077 		srciov[0].iov_len  = todo;
1078 
1079 		memset(blkno_buf, 0x0, blocksize);
1080 		blkno2blkno_buf(blkno_buf, blkno);
1081 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1082 		    blkno_buf, blocksize));
1083 
1084 		/*
1085 		 * Compute an initial IV. All ciphers
1086 		 * can convert blkno_buf in-place.
1087 		 */
1088 		iv = blkno_buf;
1089 		ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1090 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1091 
1092 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1093 
1094 		dst += todo;
1095 		src += todo;
1096 		blkno++;
1097 	}
1098 }
1099 
1100 #ifdef DEBUG
1101 static void
1102 hexprint(const char *start, void *buf, int len)
1103 {
1104 	char	*c = buf;
1105 
1106 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1107 	printf("%s: len=%06d 0x", start, len);
1108 	while (len--)
1109 		printf("%02x", (unsigned char) *c++);
1110 }
1111 #endif
1112 
1113 static void
1114 selftest(void)
1115 {
1116 	struct cgd_softc cs;
1117 	void *buf;
1118 
1119 	printf("running cgd selftest ");
1120 
1121 	for (size_t i = 0; i < __arraycount(selftests); i++) {
1122 		const char *alg = selftests[i].alg;
1123 		const uint8_t *key = selftests[i].key;
1124 		int keylen = selftests[i].keylen;
1125 		int txtlen = selftests[i].txtlen;
1126 
1127 		printf("%s-%d ", alg, keylen);
1128 
1129 		memset(&cs, 0, sizeof(cs));
1130 
1131 		cs.sc_cfuncs = cryptfuncs_find(alg);
1132 		if (cs.sc_cfuncs == NULL)
1133 			panic("%s not implemented", alg);
1134 
1135 		cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1136 		cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1137 		cs.sc_cdata.cf_keylen = keylen;
1138 
1139 		cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen,
1140 		    key, &cs.sc_cdata.cf_blocksize);
1141 		if (cs.sc_cdata.cf_priv == NULL)
1142 			panic("cf_priv is NULL");
1143 		if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1144 			panic("bad block size %zu", cs.sc_cdata.cf_blocksize);
1145 
1146 		cs.sc_cdata.cf_blocksize /= 8;
1147 
1148 		buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
1149 		memcpy(buf, selftests[i].ptxt, txtlen);
1150 
1151 		cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1152 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1153 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1154 			panic("encryption is broken");
1155 
1156 		cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1157 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
1158 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1159 			panic("decryption is broken");
1160 
1161 		free(buf, M_DEVBUF);
1162 		cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv);
1163 	}
1164 
1165 	printf("done\n");
1166 }
1167 
1168 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr");
1169 
1170 #ifdef _MODULE
1171 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1172 
1173 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1174 #endif
1175 
1176 static int
1177 cgd_modcmd(modcmd_t cmd, void *arg)
1178 {
1179 	int error = 0;
1180 
1181 	switch (cmd) {
1182 	case MODULE_CMD_INIT:
1183 		selftest();
1184 #ifdef _MODULE
1185 		error = config_cfdriver_attach(&cgd_cd);
1186 		if (error)
1187 			break;
1188 
1189 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1190 	        if (error) {
1191 			config_cfdriver_detach(&cgd_cd);
1192 			aprint_error("%s: unable to register cfattach for"
1193 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1194 			break;
1195 		}
1196 		/*
1197 		 * Attach the {b,c}devsw's
1198 		 */
1199 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1200 		    &cgd_cdevsw, &cgd_cmajor);
1201 
1202 		/*
1203 		 * If devsw_attach fails, remove from autoconf database
1204 		 */
1205 		if (error) {
1206 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1207 			config_cfdriver_detach(&cgd_cd);
1208 			aprint_error("%s: unable to attach %s devsw, "
1209 			    "error %d", __func__, cgd_cd.cd_name, error);
1210 			break;
1211 		}
1212 #endif
1213 		break;
1214 
1215 	case MODULE_CMD_FINI:
1216 #ifdef _MODULE
1217 		/*
1218 		 * Remove {b,c}devsw's
1219 		 */
1220 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1221 
1222 		/*
1223 		 * Now remove device from autoconf database
1224 		 */
1225 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1226 		if (error) {
1227 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1228 			    &cgd_cdevsw, &cgd_cmajor);
1229 			aprint_error("%s: failed to detach %s cfattach, "
1230 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1231  			break;
1232 		}
1233 		error = config_cfdriver_detach(&cgd_cd);
1234 		if (error) {
1235 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1236 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1237 			    &cgd_cdevsw, &cgd_cmajor);
1238 			aprint_error("%s: failed to detach %s cfdriver, "
1239 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1240 			break;
1241 		}
1242 #endif
1243 		break;
1244 
1245 	case MODULE_CMD_STAT:
1246 		error = ENOTTY;
1247 		break;
1248 	default:
1249 		error = ENOTTY;
1250 		break;
1251 	}
1252 
1253 	return error;
1254 }
1255