1 /* $NetBSD: cgd.c,v 1.116 2018/01/23 22:42:29 pgoyette Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Roland C. Dowdeswell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.116 2018/01/23 22:42:29 pgoyette Exp $"); 34 35 #include <sys/types.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/errno.h> 40 #include <sys/buf.h> 41 #include <sys/bufq.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/pool.h> 45 #include <sys/ioctl.h> 46 #include <sys/device.h> 47 #include <sys/disk.h> 48 #include <sys/disklabel.h> 49 #include <sys/fcntl.h> 50 #include <sys/namei.h> /* for pathbuf */ 51 #include <sys/vnode.h> 52 #include <sys/conf.h> 53 #include <sys/syslog.h> 54 55 #include <dev/dkvar.h> 56 #include <dev/cgdvar.h> 57 58 #include <miscfs/specfs/specdev.h> /* for v_rdev */ 59 60 #include "ioconf.h" 61 62 struct selftest_params { 63 const char *alg; 64 int blocksize; /* number of bytes */ 65 int secsize; 66 daddr_t blkno; 67 int keylen; /* number of bits */ 68 int txtlen; /* number of bytes */ 69 const uint8_t *key; 70 const uint8_t *ptxt; 71 const uint8_t *ctxt; 72 }; 73 74 /* Entry Point Functions */ 75 76 static dev_type_open(cgdopen); 77 static dev_type_close(cgdclose); 78 static dev_type_read(cgdread); 79 static dev_type_write(cgdwrite); 80 static dev_type_ioctl(cgdioctl); 81 static dev_type_strategy(cgdstrategy); 82 static dev_type_dump(cgddump); 83 static dev_type_size(cgdsize); 84 85 const struct bdevsw cgd_bdevsw = { 86 .d_open = cgdopen, 87 .d_close = cgdclose, 88 .d_strategy = cgdstrategy, 89 .d_ioctl = cgdioctl, 90 .d_dump = cgddump, 91 .d_psize = cgdsize, 92 .d_discard = nodiscard, 93 .d_flag = D_DISK 94 }; 95 96 const struct cdevsw cgd_cdevsw = { 97 .d_open = cgdopen, 98 .d_close = cgdclose, 99 .d_read = cgdread, 100 .d_write = cgdwrite, 101 .d_ioctl = cgdioctl, 102 .d_stop = nostop, 103 .d_tty = notty, 104 .d_poll = nopoll, 105 .d_mmap = nommap, 106 .d_kqfilter = nokqfilter, 107 .d_discard = nodiscard, 108 .d_flag = D_DISK 109 }; 110 111 /* 112 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1. 113 */ 114 static const uint8_t selftest_aes_xts_256_ptxt[64] = { 115 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76, 116 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2, 117 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25, 118 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c, 119 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f, 120 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00, 121 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad, 122 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12, 123 }; 124 125 static const uint8_t selftest_aes_xts_256_ctxt[512] = { 126 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe, 127 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f, 128 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60, 129 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5, 130 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d, 131 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce, 132 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b, 133 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb, 134 }; 135 136 static const uint8_t selftest_aes_xts_256_key[33] = { 137 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, 138 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26, 139 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, 140 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95, 141 0 142 }; 143 144 /* 145 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff. 146 */ 147 static const uint8_t selftest_aes_xts_512_ptxt[64] = { 148 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 149 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 150 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 151 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 152 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 153 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 154 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 155 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 156 }; 157 158 static const uint8_t selftest_aes_xts_512_ctxt[64] = { 159 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6, 160 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50, 161 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02, 162 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11, 163 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24, 164 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4, 165 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2, 166 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2, 167 }; 168 169 static const uint8_t selftest_aes_xts_512_key[65] = { 170 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, 171 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26, 172 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69, 173 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27, 174 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, 175 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95, 176 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37, 177 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92, 178 0 179 }; 180 181 const struct selftest_params selftests[] = { 182 { 183 .alg = "aes-xts", 184 .blocksize = 16, 185 .secsize = 512, 186 .blkno = 1, 187 .keylen = 256, 188 .txtlen = sizeof(selftest_aes_xts_256_ptxt), 189 .key = selftest_aes_xts_256_key, 190 .ptxt = selftest_aes_xts_256_ptxt, 191 .ctxt = selftest_aes_xts_256_ctxt 192 }, 193 { 194 .alg = "aes-xts", 195 .blocksize = 16, 196 .secsize = 512, 197 .blkno = 0xffff, 198 .keylen = 512, 199 .txtlen = sizeof(selftest_aes_xts_512_ptxt), 200 .key = selftest_aes_xts_512_key, 201 .ptxt = selftest_aes_xts_512_ptxt, 202 .ctxt = selftest_aes_xts_512_ctxt 203 } 204 }; 205 206 static int cgd_match(device_t, cfdata_t, void *); 207 static void cgd_attach(device_t, device_t, void *); 208 static int cgd_detach(device_t, int); 209 static struct cgd_softc *cgd_spawn(int); 210 static int cgd_destroy(device_t); 211 212 /* Internal Functions */ 213 214 static int cgd_diskstart(device_t, struct buf *); 215 static void cgdiodone(struct buf *); 216 static int cgd_dumpblocks(device_t, void *, daddr_t, int); 217 218 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *); 219 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *); 220 static int cgd_ioctl_get(dev_t, void *, struct lwp *); 221 static int cgdinit(struct cgd_softc *, const char *, struct vnode *, 222 struct lwp *); 223 static void cgd_cipher(struct cgd_softc *, void *, void *, 224 size_t, daddr_t, size_t, int); 225 226 static struct dkdriver cgddkdriver = { 227 .d_minphys = minphys, 228 .d_open = cgdopen, 229 .d_close = cgdclose, 230 .d_strategy = cgdstrategy, 231 .d_iosize = NULL, 232 .d_diskstart = cgd_diskstart, 233 .d_dumpblocks = cgd_dumpblocks, 234 .d_lastclose = NULL 235 }; 236 237 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc), 238 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 239 240 /* DIAGNOSTIC and DEBUG definitions */ 241 242 #if defined(CGDDEBUG) && !defined(DEBUG) 243 #define DEBUG 244 #endif 245 246 #ifdef DEBUG 247 int cgddebug = 0; 248 249 #define CGDB_FOLLOW 0x1 250 #define CGDB_IO 0x2 251 #define CGDB_CRYPTO 0x4 252 253 #define IFDEBUG(x,y) if (cgddebug & (x)) y 254 #define DPRINTF(x,y) IFDEBUG(x, printf y) 255 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y) 256 257 static void hexprint(const char *, void *, int); 258 259 #else 260 #define IFDEBUG(x,y) 261 #define DPRINTF(x,y) 262 #define DPRINTF_FOLLOW(y) 263 #endif 264 265 #ifdef DIAGNOSTIC 266 #define DIAGPANIC(x) panic x 267 #define DIAGCONDPANIC(x,y) if (x) panic y 268 #else 269 #define DIAGPANIC(x) 270 #define DIAGCONDPANIC(x,y) 271 #endif 272 273 /* Global variables */ 274 275 /* Utility Functions */ 276 277 #define CGDUNIT(x) DISKUNIT(x) 278 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO 279 280 /* The code */ 281 282 static struct cgd_softc * 283 getcgd_softc(dev_t dev) 284 { 285 int unit = CGDUNIT(dev); 286 struct cgd_softc *sc; 287 288 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit)); 289 290 sc = device_lookup_private(&cgd_cd, unit); 291 if (sc == NULL) 292 sc = cgd_spawn(unit); 293 return sc; 294 } 295 296 static int 297 cgd_match(device_t self, cfdata_t cfdata, void *aux) 298 { 299 300 return 1; 301 } 302 303 static void 304 cgd_attach(device_t parent, device_t self, void *aux) 305 { 306 struct cgd_softc *sc = device_private(self); 307 308 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO); 309 dk_init(&sc->sc_dksc, self, DKTYPE_CGD); 310 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver); 311 312 if (!pmf_device_register(self, NULL, NULL)) 313 aprint_error_dev(self, 314 "unable to register power management hooks\n"); 315 } 316 317 318 static int 319 cgd_detach(device_t self, int flags) 320 { 321 int ret; 322 const int pmask = 1 << RAW_PART; 323 struct cgd_softc *sc = device_private(self); 324 struct dk_softc *dksc = &sc->sc_dksc; 325 326 if (DK_BUSY(dksc, pmask)) 327 return EBUSY; 328 329 if (DK_ATTACHED(dksc) && 330 (ret = cgd_ioctl_clr(sc, curlwp)) != 0) 331 return ret; 332 333 disk_destroy(&dksc->sc_dkdev); 334 mutex_destroy(&sc->sc_lock); 335 336 return 0; 337 } 338 339 void 340 cgdattach(int num) 341 { 342 int error; 343 344 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 345 if (error != 0) 346 aprint_error("%s: unable to register cfattach\n", 347 cgd_cd.cd_name); 348 } 349 350 static struct cgd_softc * 351 cgd_spawn(int unit) 352 { 353 cfdata_t cf; 354 355 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 356 cf->cf_name = cgd_cd.cd_name; 357 cf->cf_atname = cgd_cd.cd_name; 358 cf->cf_unit = unit; 359 cf->cf_fstate = FSTATE_STAR; 360 361 return device_private(config_attach_pseudo(cf)); 362 } 363 364 static int 365 cgd_destroy(device_t dev) 366 { 367 int error; 368 cfdata_t cf; 369 370 cf = device_cfdata(dev); 371 error = config_detach(dev, DETACH_QUIET); 372 if (error) 373 return error; 374 free(cf, M_DEVBUF); 375 return 0; 376 } 377 378 static int 379 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l) 380 { 381 struct cgd_softc *cs; 382 383 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags)); 384 GETCGD_SOFTC(cs, dev); 385 return dk_open(&cs->sc_dksc, dev, flags, fmt, l); 386 } 387 388 static int 389 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l) 390 { 391 int error; 392 struct cgd_softc *cs; 393 struct dk_softc *dksc; 394 395 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags)); 396 GETCGD_SOFTC(cs, dev); 397 dksc = &cs->sc_dksc; 398 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) 399 return error; 400 401 if (!DK_ATTACHED(dksc)) { 402 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) { 403 aprint_error_dev(dksc->sc_dev, 404 "unable to detach instance\n"); 405 return error; 406 } 407 } 408 return 0; 409 } 410 411 static void 412 cgdstrategy(struct buf *bp) 413 { 414 struct cgd_softc *cs; 415 416 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp, 417 (long)bp->b_bcount)); 418 419 cs = getcgd_softc(bp->b_dev); 420 if (!cs) { 421 bp->b_error = ENXIO; 422 goto bail; 423 } 424 425 /* 426 * Reject unaligned writes. 427 */ 428 if (((uintptr_t)bp->b_data & 3) != 0) { 429 bp->b_error = EINVAL; 430 goto bail; 431 } 432 433 dk_strategy(&cs->sc_dksc, bp); 434 return; 435 436 bail: 437 bp->b_resid = bp->b_bcount; 438 biodone(bp); 439 return; 440 } 441 442 static int 443 cgdsize(dev_t dev) 444 { 445 struct cgd_softc *cs = getcgd_softc(dev); 446 447 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev)); 448 if (!cs) 449 return -1; 450 return dk_size(&cs->sc_dksc, dev); 451 } 452 453 /* 454 * cgd_{get,put}data are functions that deal with getting a buffer 455 * for the new encrypted data. We have a buffer per device so that 456 * we can ensure that we can always have a transaction in flight. 457 * We use this buffer first so that we have one less piece of 458 * malloc'ed data at any given point. 459 */ 460 461 static void * 462 cgd_getdata(struct dk_softc *dksc, unsigned long size) 463 { 464 struct cgd_softc *cs = (struct cgd_softc *)dksc; 465 void * data = NULL; 466 467 mutex_enter(&cs->sc_lock); 468 if (cs->sc_data_used == 0) { 469 cs->sc_data_used = 1; 470 data = cs->sc_data; 471 } 472 mutex_exit(&cs->sc_lock); 473 474 if (data) 475 return data; 476 477 return malloc(size, M_DEVBUF, M_NOWAIT); 478 } 479 480 static void 481 cgd_putdata(struct dk_softc *dksc, void *data) 482 { 483 struct cgd_softc *cs = (struct cgd_softc *)dksc; 484 485 if (data == cs->sc_data) { 486 mutex_enter(&cs->sc_lock); 487 cs->sc_data_used = 0; 488 mutex_exit(&cs->sc_lock); 489 } else { 490 free(data, M_DEVBUF); 491 } 492 } 493 494 static int 495 cgd_diskstart(device_t dev, struct buf *bp) 496 { 497 struct cgd_softc *cs = device_private(dev); 498 struct dk_softc *dksc = &cs->sc_dksc; 499 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 500 struct buf *nbp; 501 void * addr; 502 void * newaddr; 503 daddr_t bn; 504 struct vnode *vp; 505 506 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp)); 507 508 bn = bp->b_rawblkno; 509 510 /* 511 * We attempt to allocate all of our resources up front, so that 512 * we can fail quickly if they are unavailable. 513 */ 514 nbp = getiobuf(cs->sc_tvn, false); 515 if (nbp == NULL) 516 return EAGAIN; 517 518 /* 519 * If we are writing, then we need to encrypt the outgoing 520 * block into a new block of memory. 521 */ 522 newaddr = addr = bp->b_data; 523 if ((bp->b_flags & B_READ) == 0) { 524 newaddr = cgd_getdata(dksc, bp->b_bcount); 525 if (!newaddr) { 526 putiobuf(nbp); 527 return EAGAIN; 528 } 529 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, 530 dg->dg_secsize, CGD_CIPHER_ENCRYPT); 531 } 532 533 nbp->b_data = newaddr; 534 nbp->b_flags = bp->b_flags; 535 nbp->b_oflags = bp->b_oflags; 536 nbp->b_cflags = bp->b_cflags; 537 nbp->b_iodone = cgdiodone; 538 nbp->b_proc = bp->b_proc; 539 nbp->b_blkno = btodb(bn * dg->dg_secsize); 540 nbp->b_bcount = bp->b_bcount; 541 nbp->b_private = bp; 542 543 BIO_COPYPRIO(nbp, bp); 544 545 if ((nbp->b_flags & B_READ) == 0) { 546 vp = nbp->b_vp; 547 mutex_enter(vp->v_interlock); 548 vp->v_numoutput++; 549 mutex_exit(vp->v_interlock); 550 } 551 VOP_STRATEGY(cs->sc_tvn, nbp); 552 553 return 0; 554 } 555 556 static void 557 cgdiodone(struct buf *nbp) 558 { 559 struct buf *obp = nbp->b_private; 560 struct cgd_softc *cs = getcgd_softc(obp->b_dev); 561 struct dk_softc *dksc = &cs->sc_dksc; 562 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 563 daddr_t bn; 564 565 KDASSERT(cs); 566 567 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); 568 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", 569 obp, obp->b_bcount, obp->b_resid)); 570 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 571 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, 572 nbp->b_bcount)); 573 if (nbp->b_error != 0) { 574 obp->b_error = nbp->b_error; 575 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname, 576 obp->b_error)); 577 } 578 579 /* Perform the decryption if we are reading. 580 * 581 * Note: use the blocknumber from nbp, since it is what 582 * we used to encrypt the blocks. 583 */ 584 585 if (nbp->b_flags & B_READ) { 586 bn = dbtob(nbp->b_blkno) / dg->dg_secsize; 587 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, 588 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT); 589 } 590 591 /* If we allocated memory, free it now... */ 592 if (nbp->b_data != obp->b_data) 593 cgd_putdata(dksc, nbp->b_data); 594 595 putiobuf(nbp); 596 597 /* Request is complete for whatever reason */ 598 obp->b_resid = 0; 599 if (obp->b_error != 0) 600 obp->b_resid = obp->b_bcount; 601 602 dk_done(dksc, obp); 603 dk_start(dksc, NULL); 604 } 605 606 static int 607 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk) 608 { 609 struct cgd_softc *sc = device_private(dev); 610 struct dk_softc *dksc = &sc->sc_dksc; 611 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 612 size_t nbytes, blksize; 613 void *buf; 614 int error; 615 616 /* 617 * dk_dump gives us units of disklabel sectors. Everything 618 * else in cgd uses units of diskgeom sectors. These had 619 * better agree; otherwise we need to figure out how to convert 620 * between them. 621 */ 622 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize), 623 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32, 624 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize); 625 blksize = dg->dg_secsize; 626 627 /* 628 * Compute the number of bytes in this request, which dk_dump 629 * has `helpfully' converted to a number of blocks for us. 630 */ 631 nbytes = nblk*blksize; 632 633 /* Try to acquire a buffer to store the ciphertext. */ 634 buf = cgd_getdata(dksc, nbytes); 635 if (buf == NULL) 636 /* Out of memory: give up. */ 637 return ENOMEM; 638 639 /* Encrypt the caller's data into the temporary buffer. */ 640 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT); 641 642 /* Pass it on to the underlying disk device. */ 643 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes); 644 645 /* Release the buffer. */ 646 cgd_putdata(dksc, buf); 647 648 /* Return any error from the underlying disk device. */ 649 return error; 650 } 651 652 /* XXX: we should probably put these into dksubr.c, mostly */ 653 static int 654 cgdread(dev_t dev, struct uio *uio, int flags) 655 { 656 struct cgd_softc *cs; 657 struct dk_softc *dksc; 658 659 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n", 660 (unsigned long long)dev, uio, flags)); 661 GETCGD_SOFTC(cs, dev); 662 dksc = &cs->sc_dksc; 663 if (!DK_ATTACHED(dksc)) 664 return ENXIO; 665 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio); 666 } 667 668 /* XXX: we should probably put these into dksubr.c, mostly */ 669 static int 670 cgdwrite(dev_t dev, struct uio *uio, int flags) 671 { 672 struct cgd_softc *cs; 673 struct dk_softc *dksc; 674 675 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags)); 676 GETCGD_SOFTC(cs, dev); 677 dksc = &cs->sc_dksc; 678 if (!DK_ATTACHED(dksc)) 679 return ENXIO; 680 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio); 681 } 682 683 static int 684 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 685 { 686 struct cgd_softc *cs; 687 struct dk_softc *dksc; 688 int part = DISKPART(dev); 689 int pmask = 1 << part; 690 691 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n", 692 dev, cmd, data, flag, l)); 693 694 switch (cmd) { 695 case CGDIOCGET: 696 return cgd_ioctl_get(dev, data, l); 697 case CGDIOCSET: 698 case CGDIOCCLR: 699 if ((flag & FWRITE) == 0) 700 return EBADF; 701 /* FALLTHROUGH */ 702 default: 703 GETCGD_SOFTC(cs, dev); 704 dksc = &cs->sc_dksc; 705 break; 706 } 707 708 switch (cmd) { 709 case CGDIOCSET: 710 if (DK_ATTACHED(dksc)) 711 return EBUSY; 712 return cgd_ioctl_set(cs, data, l); 713 case CGDIOCCLR: 714 if (DK_BUSY(&cs->sc_dksc, pmask)) 715 return EBUSY; 716 return cgd_ioctl_clr(cs, l); 717 case DIOCGCACHE: 718 case DIOCCACHESYNC: 719 if (!DK_ATTACHED(dksc)) 720 return ENOENT; 721 /* 722 * We pass this call down to the underlying disk. 723 */ 724 return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred); 725 case DIOCGSTRATEGY: 726 case DIOCSSTRATEGY: 727 if (!DK_ATTACHED(dksc)) 728 return ENOENT; 729 /*FALLTHROUGH*/ 730 default: 731 return dk_ioctl(dksc, dev, cmd, data, flag, l); 732 case CGDIOCGET: 733 KASSERT(0); 734 return EINVAL; 735 } 736 } 737 738 static int 739 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size) 740 { 741 struct cgd_softc *cs; 742 743 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n", 744 dev, blkno, va, (unsigned long)size)); 745 GETCGD_SOFTC(cs, dev); 746 return dk_dump(&cs->sc_dksc, dev, blkno, va, size); 747 } 748 749 /* 750 * XXXrcd: 751 * for now we hardcode the maximum key length. 752 */ 753 #define MAX_KEYSIZE 1024 754 755 static const struct { 756 const char *n; 757 int v; 758 int d; 759 } encblkno[] = { 760 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 761 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 762 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 }, 763 }; 764 765 /* ARGSUSED */ 766 static int 767 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l) 768 { 769 struct cgd_ioctl *ci = data; 770 struct vnode *vp; 771 int ret; 772 size_t i; 773 size_t keybytes; /* key length in bytes */ 774 const char *cp; 775 struct pathbuf *pb; 776 char *inbuf; 777 struct dk_softc *dksc = &cs->sc_dksc; 778 779 cp = ci->ci_disk; 780 781 ret = pathbuf_copyin(ci->ci_disk, &pb); 782 if (ret != 0) { 783 return ret; 784 } 785 ret = dk_lookup(pb, l, &vp); 786 pathbuf_destroy(pb); 787 if (ret != 0) { 788 return ret; 789 } 790 791 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK); 792 793 if ((ret = cgdinit(cs, cp, vp, l)) != 0) 794 goto bail; 795 796 (void)memset(inbuf, 0, MAX_KEYSIZE); 797 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL); 798 if (ret) 799 goto bail; 800 cs->sc_cfuncs = cryptfuncs_find(inbuf); 801 if (!cs->sc_cfuncs) { 802 ret = EINVAL; 803 goto bail; 804 } 805 806 (void)memset(inbuf, 0, MAX_KEYSIZE); 807 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL); 808 if (ret) 809 goto bail; 810 811 for (i = 0; i < __arraycount(encblkno); i++) 812 if (strcmp(encblkno[i].n, inbuf) == 0) 813 break; 814 815 if (i == __arraycount(encblkno)) { 816 ret = EINVAL; 817 goto bail; 818 } 819 820 keybytes = ci->ci_keylen / 8 + 1; 821 if (keybytes > MAX_KEYSIZE) { 822 ret = EINVAL; 823 goto bail; 824 } 825 826 (void)memset(inbuf, 0, MAX_KEYSIZE); 827 ret = copyin(ci->ci_key, inbuf, keybytes); 828 if (ret) 829 goto bail; 830 831 cs->sc_cdata.cf_blocksize = ci->ci_blocksize; 832 cs->sc_cdata.cf_mode = encblkno[i].v; 833 cs->sc_cdata.cf_keylen = ci->ci_keylen; 834 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf, 835 &cs->sc_cdata.cf_blocksize); 836 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) { 837 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n", 838 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE); 839 cs->sc_cdata.cf_priv = NULL; 840 } 841 842 /* 843 * The blocksize is supposed to be in bytes. Unfortunately originally 844 * it was expressed in bits. For compatibility we maintain encblkno 845 * and encblkno8. 846 */ 847 cs->sc_cdata.cf_blocksize /= encblkno[i].d; 848 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE); 849 if (!cs->sc_cdata.cf_priv) { 850 ret = EINVAL; /* XXX is this the right error? */ 851 goto bail; 852 } 853 free(inbuf, M_TEMP); 854 855 bufq_alloc(&dksc->sc_bufq, "fcfs", 0); 856 857 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK); 858 cs->sc_data_used = 0; 859 860 /* Attach the disk. */ 861 dk_attach(dksc); 862 disk_attach(&dksc->sc_dkdev); 863 864 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL); 865 866 /* Discover wedges on this disk. */ 867 dkwedge_discover(&dksc->sc_dkdev); 868 869 return 0; 870 871 bail: 872 free(inbuf, M_TEMP); 873 (void)vn_close(vp, FREAD|FWRITE, l->l_cred); 874 return ret; 875 } 876 877 /* ARGSUSED */ 878 static int 879 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l) 880 { 881 struct dk_softc *dksc = &cs->sc_dksc; 882 883 if (!DK_ATTACHED(dksc)) 884 return ENXIO; 885 886 /* Delete all of our wedges. */ 887 dkwedge_delall(&dksc->sc_dkdev); 888 889 /* Kill off any queued buffers. */ 890 dk_drain(dksc); 891 bufq_free(dksc->sc_bufq); 892 893 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred); 894 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv); 895 free(cs->sc_tpath, M_DEVBUF); 896 free(cs->sc_data, M_DEVBUF); 897 cs->sc_data_used = 0; 898 dk_detach(dksc); 899 disk_detach(&dksc->sc_dkdev); 900 901 return 0; 902 } 903 904 static int 905 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l) 906 { 907 struct cgd_softc *cs = getcgd_softc(dev); 908 struct cgd_user *cgu; 909 int unit; 910 struct dk_softc *dksc = &cs->sc_dksc; 911 912 unit = CGDUNIT(dev); 913 cgu = (struct cgd_user *)data; 914 915 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n", 916 dev, unit, data, l)); 917 918 if (cgu->cgu_unit == -1) 919 cgu->cgu_unit = unit; 920 921 if (cgu->cgu_unit < 0) 922 return EINVAL; /* XXX: should this be ENXIO? */ 923 924 cs = device_lookup_private(&cgd_cd, unit); 925 if (cs == NULL || !DK_ATTACHED(dksc)) { 926 cgu->cgu_dev = 0; 927 cgu->cgu_alg[0] = '\0'; 928 cgu->cgu_blocksize = 0; 929 cgu->cgu_mode = 0; 930 cgu->cgu_keylen = 0; 931 } 932 else { 933 cgu->cgu_dev = cs->sc_tdev; 934 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name, 935 sizeof(cgu->cgu_alg)); 936 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize; 937 cgu->cgu_mode = cs->sc_cdata.cf_mode; 938 cgu->cgu_keylen = cs->sc_cdata.cf_keylen; 939 } 940 return 0; 941 } 942 943 static int 944 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp, 945 struct lwp *l) 946 { 947 struct disk_geom *dg; 948 int ret; 949 char *tmppath; 950 uint64_t psize; 951 unsigned secsize; 952 struct dk_softc *dksc = &cs->sc_dksc; 953 954 cs->sc_tvn = vp; 955 cs->sc_tpath = NULL; 956 957 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 958 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen); 959 if (ret) 960 goto bail; 961 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK); 962 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen); 963 964 cs->sc_tdev = vp->v_rdev; 965 966 if ((ret = getdisksize(vp, &psize, &secsize)) != 0) 967 goto bail; 968 969 if (psize == 0) { 970 ret = ENODEV; 971 goto bail; 972 } 973 974 /* 975 * XXX here we should probe the underlying device. If we 976 * are accessing a partition of type RAW_PART, then 977 * we should populate our initial geometry with the 978 * geometry that we discover from the device. 979 */ 980 dg = &dksc->sc_dkdev.dk_geom; 981 memset(dg, 0, sizeof(*dg)); 982 dg->dg_secperunit = psize; 983 dg->dg_secsize = secsize; 984 dg->dg_ntracks = 1; 985 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize; 986 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors; 987 988 bail: 989 free(tmppath, M_TEMP); 990 if (ret && cs->sc_tpath) 991 free(cs->sc_tpath, M_DEVBUF); 992 return ret; 993 } 994 995 /* 996 * Our generic cipher entry point. This takes care of the 997 * IV mode and passes off the work to the specific cipher. 998 * We implement here the IV method ``encrypted block 999 * number''. 1000 * 1001 * XXXrcd: for now we rely on our own crypto framework defined 1002 * in dev/cgd_crypto.c. This will change when we 1003 * get a generic kernel crypto framework. 1004 */ 1005 1006 static void 1007 blkno2blkno_buf(char *sbuf, daddr_t blkno) 1008 { 1009 int i; 1010 1011 /* Set up the blkno in blkno_buf, here we do not care much 1012 * about the final layout of the information as long as we 1013 * can guarantee that each sector will have a different IV 1014 * and that the endianness of the machine will not affect 1015 * the representation that we have chosen. 1016 * 1017 * We choose this representation, because it does not rely 1018 * on the size of buf (which is the blocksize of the cipher), 1019 * but allows daddr_t to grow without breaking existing 1020 * disks. 1021 * 1022 * Note that blkno2blkno_buf does not take a size as input, 1023 * and hence must be called on a pre-zeroed buffer of length 1024 * greater than or equal to sizeof(daddr_t). 1025 */ 1026 for (i=0; i < sizeof(daddr_t); i++) { 1027 *sbuf++ = blkno & 0xff; 1028 blkno >>= 8; 1029 } 1030 } 1031 1032 static void 1033 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv, 1034 size_t len, daddr_t blkno, size_t secsize, int dir) 1035 { 1036 char *dst = dstv; 1037 char *src = srcv; 1038 cfunc_cipher_prep *ciprep = cs->sc_cfuncs->cf_cipher_prep; 1039 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; 1040 struct uio dstuio; 1041 struct uio srcuio; 1042 struct iovec dstiov[2]; 1043 struct iovec srciov[2]; 1044 size_t blocksize = cs->sc_cdata.cf_blocksize; 1045 size_t todo; 1046 char blkno_buf[CGD_MAXBLOCKSIZE], *iv; 1047 1048 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); 1049 1050 DIAGCONDPANIC(len % blocksize != 0, 1051 ("cgd_cipher: len %% blocksize != 0")); 1052 1053 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ 1054 DIAGCONDPANIC(sizeof(daddr_t) > blocksize, 1055 ("cgd_cipher: sizeof(daddr_t) > blocksize")); 1056 1057 DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE, 1058 ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE")); 1059 1060 dstuio.uio_iov = dstiov; 1061 dstuio.uio_iovcnt = 1; 1062 1063 srcuio.uio_iov = srciov; 1064 srcuio.uio_iovcnt = 1; 1065 1066 for (; len > 0; len -= todo) { 1067 todo = MIN(len, secsize); 1068 1069 dstiov[0].iov_base = dst; 1070 srciov[0].iov_base = src; 1071 dstiov[0].iov_len = todo; 1072 srciov[0].iov_len = todo; 1073 1074 memset(blkno_buf, 0x0, blocksize); 1075 blkno2blkno_buf(blkno_buf, blkno); 1076 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", 1077 blkno_buf, blocksize)); 1078 1079 /* 1080 * Compute an initial IV. All ciphers 1081 * can convert blkno_buf in-place. 1082 */ 1083 iv = blkno_buf; 1084 ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir); 1085 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize)); 1086 1087 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir); 1088 1089 dst += todo; 1090 src += todo; 1091 blkno++; 1092 } 1093 } 1094 1095 #ifdef DEBUG 1096 static void 1097 hexprint(const char *start, void *buf, int len) 1098 { 1099 char *c = buf; 1100 1101 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0")); 1102 printf("%s: len=%06d 0x", start, len); 1103 while (len--) 1104 printf("%02x", (unsigned char) *c++); 1105 } 1106 #endif 1107 1108 static void 1109 selftest(void) 1110 { 1111 struct cgd_softc cs; 1112 void *buf; 1113 1114 printf("running cgd selftest "); 1115 1116 for (size_t i = 0; i < __arraycount(selftests); i++) { 1117 const char *alg = selftests[i].alg; 1118 const uint8_t *key = selftests[i].key; 1119 int keylen = selftests[i].keylen; 1120 int txtlen = selftests[i].txtlen; 1121 1122 printf("%s-%d ", alg, keylen); 1123 1124 memset(&cs, 0, sizeof(cs)); 1125 1126 cs.sc_cfuncs = cryptfuncs_find(alg); 1127 if (cs.sc_cfuncs == NULL) 1128 panic("%s not implemented", alg); 1129 1130 cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize; 1131 cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1; 1132 cs.sc_cdata.cf_keylen = keylen; 1133 1134 cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen, 1135 key, &cs.sc_cdata.cf_blocksize); 1136 if (cs.sc_cdata.cf_priv == NULL) 1137 panic("cf_priv is NULL"); 1138 if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) 1139 panic("bad block size %zu", cs.sc_cdata.cf_blocksize); 1140 1141 cs.sc_cdata.cf_blocksize /= 8; 1142 1143 buf = malloc(txtlen, M_DEVBUF, M_WAITOK); 1144 memcpy(buf, selftests[i].ptxt, txtlen); 1145 1146 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno, 1147 selftests[i].secsize, CGD_CIPHER_ENCRYPT); 1148 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) 1149 panic("encryption is broken"); 1150 1151 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno, 1152 selftests[i].secsize, CGD_CIPHER_DECRYPT); 1153 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) 1154 panic("decryption is broken"); 1155 1156 free(buf, M_DEVBUF); 1157 cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv); 1158 } 1159 1160 printf("done\n"); 1161 } 1162 1163 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs"); 1164 1165 #ifdef _MODULE 1166 CFDRIVER_DECL(cgd, DV_DISK, NULL); 1167 1168 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1; 1169 #endif 1170 1171 static int 1172 cgd_modcmd(modcmd_t cmd, void *arg) 1173 { 1174 int error = 0; 1175 1176 switch (cmd) { 1177 case MODULE_CMD_INIT: 1178 selftest(); 1179 #ifdef _MODULE 1180 error = config_cfdriver_attach(&cgd_cd); 1181 if (error) 1182 break; 1183 1184 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 1185 if (error) { 1186 config_cfdriver_detach(&cgd_cd); 1187 aprint_error("%s: unable to register cfattach for" 1188 "%s, error %d\n", __func__, cgd_cd.cd_name, error); 1189 break; 1190 } 1191 /* 1192 * Attach the {b,c}devsw's 1193 */ 1194 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1195 &cgd_cdevsw, &cgd_cmajor); 1196 1197 /* 1198 * If devsw_attach fails, remove from autoconf database 1199 */ 1200 if (error) { 1201 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 1202 config_cfdriver_detach(&cgd_cd); 1203 aprint_error("%s: unable to attach %s devsw, " 1204 "error %d", __func__, cgd_cd.cd_name, error); 1205 break; 1206 } 1207 #endif 1208 break; 1209 1210 case MODULE_CMD_FINI: 1211 #ifdef _MODULE 1212 /* 1213 * Remove {b,c}devsw's 1214 */ 1215 devsw_detach(&cgd_bdevsw, &cgd_cdevsw); 1216 1217 /* 1218 * Now remove device from autoconf database 1219 */ 1220 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 1221 if (error) { 1222 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1223 &cgd_cdevsw, &cgd_cmajor); 1224 aprint_error("%s: failed to detach %s cfattach, " 1225 "error %d\n", __func__, cgd_cd.cd_name, error); 1226 break; 1227 } 1228 error = config_cfdriver_detach(&cgd_cd); 1229 if (error) { 1230 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 1231 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1232 &cgd_cdevsw, &cgd_cmajor); 1233 aprint_error("%s: failed to detach %s cfdriver, " 1234 "error %d\n", __func__, cgd_cd.cd_name, error); 1235 break; 1236 } 1237 #endif 1238 break; 1239 1240 case MODULE_CMD_STAT: 1241 error = ENOTTY; 1242 break; 1243 default: 1244 error = ENOTTY; 1245 break; 1246 } 1247 1248 return error; 1249 } 1250