1 /* $NetBSD: cgd.c,v 1.74 2011/06/21 06:23:38 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Roland C. Dowdeswell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.74 2011/06/21 06:23:38 jruoho Exp $"); 34 35 #include <sys/types.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/errno.h> 40 #include <sys/buf.h> 41 #include <sys/bufq.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/pool.h> 45 #include <sys/ioctl.h> 46 #include <sys/device.h> 47 #include <sys/disk.h> 48 #include <sys/disklabel.h> 49 #include <sys/fcntl.h> 50 #include <sys/namei.h> /* for pathbuf */ 51 #include <sys/vnode.h> 52 #include <sys/conf.h> 53 #include <sys/syslog.h> 54 55 #include <dev/dkvar.h> 56 #include <dev/cgdvar.h> 57 58 /* Entry Point Functions */ 59 60 void cgdattach(int); 61 62 static dev_type_open(cgdopen); 63 static dev_type_close(cgdclose); 64 static dev_type_read(cgdread); 65 static dev_type_write(cgdwrite); 66 static dev_type_ioctl(cgdioctl); 67 static dev_type_strategy(cgdstrategy); 68 static dev_type_dump(cgddump); 69 static dev_type_size(cgdsize); 70 71 const struct bdevsw cgd_bdevsw = { 72 cgdopen, cgdclose, cgdstrategy, cgdioctl, 73 cgddump, cgdsize, D_DISK 74 }; 75 76 const struct cdevsw cgd_cdevsw = { 77 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl, 78 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 79 }; 80 81 static int cgd_match(device_t, cfdata_t, void *); 82 static void cgd_attach(device_t, device_t, void *); 83 static int cgd_detach(device_t, int); 84 static struct cgd_softc *cgd_spawn(int); 85 static int cgd_destroy(device_t); 86 87 /* Internal Functions */ 88 89 static int cgdstart(struct dk_softc *, struct buf *); 90 static void cgdiodone(struct buf *); 91 92 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *); 93 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *); 94 static int cgdinit(struct cgd_softc *, const char *, struct vnode *, 95 struct lwp *); 96 static void cgd_cipher(struct cgd_softc *, void *, void *, 97 size_t, daddr_t, size_t, int); 98 99 /* Pseudo-disk Interface */ 100 101 static struct dk_intf the_dkintf = { 102 DTYPE_CGD, 103 "cgd", 104 cgdopen, 105 cgdclose, 106 cgdstrategy, 107 cgdstart, 108 }; 109 static struct dk_intf *di = &the_dkintf; 110 111 static struct dkdriver cgddkdriver = { 112 .d_strategy = cgdstrategy, 113 .d_minphys = minphys, 114 }; 115 116 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc), 117 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 118 extern struct cfdriver cgd_cd; 119 120 /* DIAGNOSTIC and DEBUG definitions */ 121 122 #if defined(CGDDEBUG) && !defined(DEBUG) 123 #define DEBUG 124 #endif 125 126 #ifdef DEBUG 127 int cgddebug = 0; 128 129 #define CGDB_FOLLOW 0x1 130 #define CGDB_IO 0x2 131 #define CGDB_CRYPTO 0x4 132 133 #define IFDEBUG(x,y) if (cgddebug & (x)) y 134 #define DPRINTF(x,y) IFDEBUG(x, printf y) 135 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y) 136 137 static void hexprint(const char *, void *, int); 138 139 #else 140 #define IFDEBUG(x,y) 141 #define DPRINTF(x,y) 142 #define DPRINTF_FOLLOW(y) 143 #endif 144 145 #ifdef DIAGNOSTIC 146 #define DIAGPANIC(x) panic x 147 #define DIAGCONDPANIC(x,y) if (x) panic y 148 #else 149 #define DIAGPANIC(x) 150 #define DIAGCONDPANIC(x,y) 151 #endif 152 153 /* Global variables */ 154 155 /* Utility Functions */ 156 157 #define CGDUNIT(x) DISKUNIT(x) 158 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO 159 160 /* The code */ 161 162 static struct cgd_softc * 163 getcgd_softc(dev_t dev) 164 { 165 int unit = CGDUNIT(dev); 166 struct cgd_softc *sc; 167 168 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit)); 169 170 sc = device_lookup_private(&cgd_cd, unit); 171 if (sc == NULL) 172 sc = cgd_spawn(unit); 173 return sc; 174 } 175 176 static int 177 cgd_match(device_t self, cfdata_t cfdata, void *aux) 178 { 179 180 return 1; 181 } 182 183 static void 184 cgd_attach(device_t parent, device_t self, void *aux) 185 { 186 struct cgd_softc *sc = device_private(self); 187 188 sc->sc_dev = self; 189 simple_lock_init(&sc->sc_slock); 190 dk_sc_init(&sc->sc_dksc, sc, device_xname(sc->sc_dev)); 191 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver); 192 193 if (!pmf_device_register(self, NULL, NULL)) 194 aprint_error_dev(self, "unable to register power management hooks\n"); 195 } 196 197 198 static int 199 cgd_detach(device_t self, int flags) 200 { 201 int ret; 202 const int pmask = 1 << RAW_PART; 203 struct cgd_softc *sc = device_private(self); 204 struct dk_softc *dksc = &sc->sc_dksc; 205 206 if (DK_BUSY(dksc, pmask)) 207 return EBUSY; 208 209 if ((dksc->sc_flags & DKF_INITED) != 0 && 210 (ret = cgd_ioctl_clr(sc, curlwp)) != 0) 211 return ret; 212 213 disk_destroy(&dksc->sc_dkdev); 214 215 return 0; 216 } 217 218 void 219 cgdattach(int num) 220 { 221 int error; 222 223 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 224 if (error != 0) 225 aprint_error("%s: unable to register cfattach\n", 226 cgd_cd.cd_name); 227 } 228 229 static struct cgd_softc * 230 cgd_spawn(int unit) 231 { 232 cfdata_t cf; 233 234 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 235 cf->cf_name = cgd_cd.cd_name; 236 cf->cf_atname = cgd_cd.cd_name; 237 cf->cf_unit = unit; 238 cf->cf_fstate = FSTATE_STAR; 239 240 return device_private(config_attach_pseudo(cf)); 241 } 242 243 static int 244 cgd_destroy(device_t dev) 245 { 246 int error; 247 cfdata_t cf; 248 249 cf = device_cfdata(dev); 250 error = config_detach(dev, DETACH_QUIET); 251 if (error) 252 return error; 253 free(cf, M_DEVBUF); 254 return 0; 255 } 256 257 static int 258 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l) 259 { 260 struct cgd_softc *cs; 261 262 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags)); 263 GETCGD_SOFTC(cs, dev); 264 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l); 265 } 266 267 static int 268 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l) 269 { 270 int error; 271 struct cgd_softc *cs; 272 struct dk_softc *dksc; 273 274 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags)); 275 GETCGD_SOFTC(cs, dev); 276 dksc = &cs->sc_dksc; 277 if ((error = dk_close(di, dksc, dev, flags, fmt, l)) != 0) 278 return error; 279 280 if ((dksc->sc_flags & DKF_INITED) == 0) { 281 if ((error = cgd_destroy(cs->sc_dev)) != 0) { 282 aprint_error_dev(cs->sc_dev, 283 "unable to detach instance\n"); 284 return error; 285 } 286 } 287 return 0; 288 } 289 290 static void 291 cgdstrategy(struct buf *bp) 292 { 293 struct cgd_softc *cs = getcgd_softc(bp->b_dev); 294 295 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp, 296 (long)bp->b_bcount)); 297 298 /* 299 * Reject unaligned writes. We can encrypt and decrypt only 300 * complete disk sectors, and we let the ciphers require their 301 * buffers to be aligned to 32-bit boundaries. 302 */ 303 if (bp->b_blkno < 0 || 304 (bp->b_bcount % DEV_BSIZE) != 0 || 305 ((uintptr_t)bp->b_data & 3) != 0) { 306 bp->b_error = EINVAL; 307 bp->b_resid = bp->b_bcount; 308 biodone(bp); 309 return; 310 } 311 312 /* XXXrcd: Should we test for (cs != NULL)? */ 313 dk_strategy(di, &cs->sc_dksc, bp); 314 return; 315 } 316 317 static int 318 cgdsize(dev_t dev) 319 { 320 struct cgd_softc *cs = getcgd_softc(dev); 321 322 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev)); 323 if (!cs) 324 return -1; 325 return dk_size(di, &cs->sc_dksc, dev); 326 } 327 328 /* 329 * cgd_{get,put}data are functions that deal with getting a buffer 330 * for the new encrypted data. We have a buffer per device so that 331 * we can ensure that we can always have a transaction in flight. 332 * We use this buffer first so that we have one less piece of 333 * malloc'ed data at any given point. 334 */ 335 336 static void * 337 cgd_getdata(struct dk_softc *dksc, unsigned long size) 338 { 339 struct cgd_softc *cs =dksc->sc_osc; 340 void * data = NULL; 341 342 simple_lock(&cs->sc_slock); 343 if (cs->sc_data_used == 0) { 344 cs->sc_data_used = 1; 345 data = cs->sc_data; 346 } 347 simple_unlock(&cs->sc_slock); 348 349 if (data) 350 return data; 351 352 return malloc(size, M_DEVBUF, M_NOWAIT); 353 } 354 355 static void 356 cgd_putdata(struct dk_softc *dksc, void *data) 357 { 358 struct cgd_softc *cs =dksc->sc_osc; 359 360 if (data == cs->sc_data) { 361 simple_lock(&cs->sc_slock); 362 cs->sc_data_used = 0; 363 simple_unlock(&cs->sc_slock); 364 } else { 365 free(data, M_DEVBUF); 366 } 367 } 368 369 static int 370 cgdstart(struct dk_softc *dksc, struct buf *bp) 371 { 372 struct cgd_softc *cs = dksc->sc_osc; 373 struct buf *nbp; 374 void * addr; 375 void * newaddr; 376 daddr_t bn; 377 struct vnode *vp; 378 379 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp)); 380 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */ 381 382 bn = bp->b_rawblkno; 383 384 /* 385 * We attempt to allocate all of our resources up front, so that 386 * we can fail quickly if they are unavailable. 387 */ 388 389 nbp = getiobuf(cs->sc_tvn, false); 390 if (nbp == NULL) { 391 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ)); 392 return -1; 393 } 394 395 /* 396 * If we are writing, then we need to encrypt the outgoing 397 * block into a new block of memory. If we fail, then we 398 * return an error and let the dksubr framework deal with it. 399 */ 400 newaddr = addr = bp->b_data; 401 if ((bp->b_flags & B_READ) == 0) { 402 newaddr = cgd_getdata(dksc, bp->b_bcount); 403 if (!newaddr) { 404 putiobuf(nbp); 405 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ)); 406 return -1; 407 } 408 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, 409 DEV_BSIZE, CGD_CIPHER_ENCRYPT); 410 } 411 412 nbp->b_data = newaddr; 413 nbp->b_flags = bp->b_flags; 414 nbp->b_oflags = bp->b_oflags; 415 nbp->b_cflags = bp->b_cflags; 416 nbp->b_iodone = cgdiodone; 417 nbp->b_proc = bp->b_proc; 418 nbp->b_blkno = bn; 419 nbp->b_bcount = bp->b_bcount; 420 nbp->b_private = bp; 421 422 BIO_COPYPRIO(nbp, bp); 423 424 if ((nbp->b_flags & B_READ) == 0) { 425 vp = nbp->b_vp; 426 mutex_enter(vp->v_interlock); 427 vp->v_numoutput++; 428 mutex_exit(vp->v_interlock); 429 } 430 VOP_STRATEGY(cs->sc_tvn, nbp); 431 return 0; 432 } 433 434 static void 435 cgdiodone(struct buf *nbp) 436 { 437 struct buf *obp = nbp->b_private; 438 struct cgd_softc *cs = getcgd_softc(obp->b_dev); 439 struct dk_softc *dksc = &cs->sc_dksc; 440 int s; 441 442 KDASSERT(cs); 443 444 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); 445 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", 446 obp, obp->b_bcount, obp->b_resid)); 447 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n", 448 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, 449 nbp->b_bcount)); 450 if (nbp->b_error != 0) { 451 obp->b_error = nbp->b_error; 452 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname, 453 obp->b_error)); 454 } 455 456 /* Perform the decryption if we are reading. 457 * 458 * Note: use the blocknumber from nbp, since it is what 459 * we used to encrypt the blocks. 460 */ 461 462 if (nbp->b_flags & B_READ) 463 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, 464 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT); 465 466 /* If we allocated memory, free it now... */ 467 if (nbp->b_data != obp->b_data) 468 cgd_putdata(dksc, nbp->b_data); 469 470 putiobuf(nbp); 471 472 /* Request is complete for whatever reason */ 473 obp->b_resid = 0; 474 if (obp->b_error != 0) 475 obp->b_resid = obp->b_bcount; 476 s = splbio(); 477 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid, 478 (obp->b_flags & B_READ)); 479 biodone(obp); 480 dk_iodone(di, dksc); 481 splx(s); 482 } 483 484 /* XXX: we should probably put these into dksubr.c, mostly */ 485 static int 486 cgdread(dev_t dev, struct uio *uio, int flags) 487 { 488 struct cgd_softc *cs; 489 struct dk_softc *dksc; 490 491 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n", 492 (unsigned long long)dev, uio, flags)); 493 GETCGD_SOFTC(cs, dev); 494 dksc = &cs->sc_dksc; 495 if ((dksc->sc_flags & DKF_INITED) == 0) 496 return ENXIO; 497 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio); 498 } 499 500 /* XXX: we should probably put these into dksubr.c, mostly */ 501 static int 502 cgdwrite(dev_t dev, struct uio *uio, int flags) 503 { 504 struct cgd_softc *cs; 505 struct dk_softc *dksc; 506 507 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags)); 508 GETCGD_SOFTC(cs, dev); 509 dksc = &cs->sc_dksc; 510 if ((dksc->sc_flags & DKF_INITED) == 0) 511 return ENXIO; 512 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio); 513 } 514 515 static int 516 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 517 { 518 struct cgd_softc *cs; 519 struct dk_softc *dksc; 520 struct disk *dk; 521 int part = DISKPART(dev); 522 int pmask = 1 << part; 523 524 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n", 525 dev, cmd, data, flag, l)); 526 GETCGD_SOFTC(cs, dev); 527 dksc = &cs->sc_dksc; 528 dk = &dksc->sc_dkdev; 529 switch (cmd) { 530 case CGDIOCSET: 531 case CGDIOCCLR: 532 if ((flag & FWRITE) == 0) 533 return EBADF; 534 } 535 536 switch (cmd) { 537 case CGDIOCSET: 538 if (dksc->sc_flags & DKF_INITED) 539 return EBUSY; 540 return cgd_ioctl_set(cs, data, l); 541 case CGDIOCCLR: 542 if (DK_BUSY(&cs->sc_dksc, pmask)) 543 return EBUSY; 544 return cgd_ioctl_clr(cs, l); 545 case DIOCCACHESYNC: 546 /* 547 * XXX Do we really need to care about having a writable 548 * file descriptor here? 549 */ 550 if ((flag & FWRITE) == 0) 551 return (EBADF); 552 553 /* 554 * We pass this call down to the underlying disk. 555 */ 556 return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred); 557 default: 558 return dk_ioctl(di, dksc, dev, cmd, data, flag, l); 559 } 560 } 561 562 static int 563 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size) 564 { 565 struct cgd_softc *cs; 566 567 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n", 568 dev, blkno, va, (unsigned long)size)); 569 GETCGD_SOFTC(cs, dev); 570 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size); 571 } 572 573 /* 574 * XXXrcd: 575 * for now we hardcode the maximum key length. 576 */ 577 #define MAX_KEYSIZE 1024 578 579 static const struct { 580 const char *n; 581 int v; 582 int d; 583 } encblkno[] = { 584 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 585 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 586 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 }, 587 }; 588 589 /* ARGSUSED */ 590 static int 591 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l) 592 { 593 struct cgd_ioctl *ci = data; 594 struct vnode *vp; 595 int ret; 596 size_t i; 597 size_t keybytes; /* key length in bytes */ 598 const char *cp; 599 struct pathbuf *pb; 600 char *inbuf; 601 602 cp = ci->ci_disk; 603 604 ret = pathbuf_copyin(ci->ci_disk, &pb); 605 if (ret != 0) { 606 return ret; 607 } 608 ret = dk_lookup(pb, l, &vp); 609 pathbuf_destroy(pb); 610 if (ret != 0) { 611 return ret; 612 } 613 614 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK); 615 616 if ((ret = cgdinit(cs, cp, vp, l)) != 0) 617 goto bail; 618 619 (void)memset(inbuf, 0, MAX_KEYSIZE); 620 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL); 621 if (ret) 622 goto bail; 623 cs->sc_cfuncs = cryptfuncs_find(inbuf); 624 if (!cs->sc_cfuncs) { 625 ret = EINVAL; 626 goto bail; 627 } 628 629 (void)memset(inbuf, 0, MAX_KEYSIZE); 630 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL); 631 if (ret) 632 goto bail; 633 634 for (i = 0; i < __arraycount(encblkno); i++) 635 if (strcmp(encblkno[i].n, inbuf) == 0) 636 break; 637 638 if (i == __arraycount(encblkno)) { 639 ret = EINVAL; 640 goto bail; 641 } 642 643 keybytes = ci->ci_keylen / 8 + 1; 644 if (keybytes > MAX_KEYSIZE) { 645 ret = EINVAL; 646 goto bail; 647 } 648 649 (void)memset(inbuf, 0, MAX_KEYSIZE); 650 ret = copyin(ci->ci_key, inbuf, keybytes); 651 if (ret) 652 goto bail; 653 654 cs->sc_cdata.cf_blocksize = ci->ci_blocksize; 655 cs->sc_cdata.cf_mode = encblkno[i].v; 656 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf, 657 &cs->sc_cdata.cf_blocksize); 658 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) { 659 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n", 660 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE); 661 cs->sc_cdata.cf_priv = NULL; 662 } 663 664 /* 665 * The blocksize is supposed to be in bytes. Unfortunately originally 666 * it was expressed in bits. For compatibility we maintain encblkno 667 * and encblkno8. 668 */ 669 cs->sc_cdata.cf_blocksize /= encblkno[i].d; 670 (void)memset(inbuf, 0, MAX_KEYSIZE); 671 if (!cs->sc_cdata.cf_priv) { 672 ret = EINVAL; /* XXX is this the right error? */ 673 goto bail; 674 } 675 free(inbuf, M_TEMP); 676 677 bufq_alloc(&cs->sc_dksc.sc_bufq, "fcfs", 0); 678 679 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK); 680 cs->sc_data_used = 0; 681 682 cs->sc_dksc.sc_flags |= DKF_INITED; 683 684 /* Attach the disk. */ 685 disk_attach(&cs->sc_dksc.sc_dkdev); 686 687 /* Try and read the disklabel. */ 688 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? (cause of PR 41704) */); 689 690 /* Discover wedges on this disk. */ 691 dkwedge_discover(&cs->sc_dksc.sc_dkdev); 692 693 return 0; 694 695 bail: 696 free(inbuf, M_TEMP); 697 (void)vn_close(vp, FREAD|FWRITE, l->l_cred); 698 return ret; 699 } 700 701 /* ARGSUSED */ 702 static int 703 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l) 704 { 705 int s; 706 struct dk_softc *dksc; 707 708 dksc = &cs->sc_dksc; 709 710 if ((dksc->sc_flags & DKF_INITED) == 0) 711 return ENXIO; 712 713 /* Delete all of our wedges. */ 714 dkwedge_delall(&cs->sc_dksc.sc_dkdev); 715 716 /* Kill off any queued buffers. */ 717 s = splbio(); 718 bufq_drain(cs->sc_dksc.sc_bufq); 719 splx(s); 720 bufq_free(cs->sc_dksc.sc_bufq); 721 722 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred); 723 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv); 724 free(cs->sc_tpath, M_DEVBUF); 725 free(cs->sc_data, M_DEVBUF); 726 cs->sc_data_used = 0; 727 cs->sc_dksc.sc_flags &= ~DKF_INITED; 728 disk_detach(&cs->sc_dksc.sc_dkdev); 729 730 return 0; 731 } 732 733 static int 734 getsize(struct lwp *l, struct vnode *vp, size_t *size) 735 { 736 struct partinfo dpart; 737 struct dkwedge_info dkw; 738 int ret; 739 740 if ((ret = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, 741 l->l_cred)) == 0) { 742 *size = dkw.dkw_size; 743 return 0; 744 } 745 746 if ((ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred)) == 0) { 747 *size = dpart.part->p_size; 748 return 0; 749 } 750 751 return ret; 752 } 753 754 755 static int 756 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp, 757 struct lwp *l) 758 { 759 struct dk_geom *pdg; 760 struct vattr va; 761 size_t size; 762 int ret; 763 char *tmppath; 764 765 cs->sc_dksc.sc_size = 0; 766 cs->sc_tvn = vp; 767 cs->sc_tpath = NULL; 768 769 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 770 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen); 771 if (ret) 772 goto bail; 773 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK); 774 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen); 775 776 if ((ret = VOP_GETATTR(vp, &va, l->l_cred)) != 0) 777 goto bail; 778 779 cs->sc_tdev = va.va_rdev; 780 781 if ((ret = getsize(l, vp, &size)) != 0) 782 goto bail; 783 784 if (!size) { 785 ret = ENODEV; 786 goto bail; 787 } 788 789 cs->sc_dksc.sc_size = size; 790 791 /* 792 * XXX here we should probe the underlying device. If we 793 * are accessing a partition of type RAW_PART, then 794 * we should populate our initial geometry with the 795 * geometry that we discover from the device. 796 */ 797 pdg = &cs->sc_dksc.sc_geom; 798 pdg->pdg_secsize = DEV_BSIZE; 799 pdg->pdg_ntracks = 1; 800 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize); 801 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors; 802 803 bail: 804 free(tmppath, M_TEMP); 805 if (ret && cs->sc_tpath) 806 free(cs->sc_tpath, M_DEVBUF); 807 return ret; 808 } 809 810 /* 811 * Our generic cipher entry point. This takes care of the 812 * IV mode and passes off the work to the specific cipher. 813 * We implement here the IV method ``encrypted block 814 * number''. 815 * 816 * For the encryption case, we accomplish this by setting 817 * up a struct uio where the first iovec of the source is 818 * the blocknumber and the first iovec of the dest is a 819 * sink. We then call the cipher with an IV of zero, and 820 * the right thing happens. 821 * 822 * For the decryption case, we use the same basic mechanism 823 * for symmetry, but we encrypt the block number in the 824 * first iovec. 825 * 826 * We mainly do this to avoid requiring the definition of 827 * an ECB mode. 828 * 829 * XXXrcd: for now we rely on our own crypto framework defined 830 * in dev/cgd_crypto.c. This will change when we 831 * get a generic kernel crypto framework. 832 */ 833 834 static void 835 blkno2blkno_buf(char *sbuf, daddr_t blkno) 836 { 837 int i; 838 839 /* Set up the blkno in blkno_buf, here we do not care much 840 * about the final layout of the information as long as we 841 * can guarantee that each sector will have a different IV 842 * and that the endianness of the machine will not affect 843 * the representation that we have chosen. 844 * 845 * We choose this representation, because it does not rely 846 * on the size of buf (which is the blocksize of the cipher), 847 * but allows daddr_t to grow without breaking existing 848 * disks. 849 * 850 * Note that blkno2blkno_buf does not take a size as input, 851 * and hence must be called on a pre-zeroed buffer of length 852 * greater than or equal to sizeof(daddr_t). 853 */ 854 for (i=0; i < sizeof(daddr_t); i++) { 855 *sbuf++ = blkno & 0xff; 856 blkno >>= 8; 857 } 858 } 859 860 static void 861 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv, 862 size_t len, daddr_t blkno, size_t secsize, int dir) 863 { 864 char *dst = dstv; 865 char *src = srcv; 866 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; 867 struct uio dstuio; 868 struct uio srcuio; 869 struct iovec dstiov[2]; 870 struct iovec srciov[2]; 871 size_t blocksize = cs->sc_cdata.cf_blocksize; 872 char sink[CGD_MAXBLOCKSIZE]; 873 char zero_iv[CGD_MAXBLOCKSIZE]; 874 char blkno_buf[CGD_MAXBLOCKSIZE]; 875 876 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); 877 878 DIAGCONDPANIC(len % blocksize != 0, 879 ("cgd_cipher: len %% blocksize != 0")); 880 881 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ 882 DIAGCONDPANIC(sizeof(daddr_t) > blocksize, 883 ("cgd_cipher: sizeof(daddr_t) > blocksize")); 884 885 memset(zero_iv, 0x0, blocksize); 886 887 dstuio.uio_iov = dstiov; 888 dstuio.uio_iovcnt = 2; 889 890 srcuio.uio_iov = srciov; 891 srcuio.uio_iovcnt = 2; 892 893 dstiov[0].iov_base = sink; 894 dstiov[0].iov_len = blocksize; 895 srciov[0].iov_base = blkno_buf; 896 srciov[0].iov_len = blocksize; 897 dstiov[1].iov_len = secsize; 898 srciov[1].iov_len = secsize; 899 900 for (; len > 0; len -= secsize) { 901 dstiov[1].iov_base = dst; 902 srciov[1].iov_base = src; 903 904 memset(blkno_buf, 0x0, blocksize); 905 blkno2blkno_buf(blkno_buf, blkno); 906 if (dir == CGD_CIPHER_DECRYPT) { 907 dstuio.uio_iovcnt = 1; 908 srcuio.uio_iovcnt = 1; 909 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf", 910 blkno_buf, blocksize)); 911 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, 912 zero_iv, CGD_CIPHER_ENCRYPT); 913 memcpy(blkno_buf, sink, blocksize); 914 dstuio.uio_iovcnt = 2; 915 srcuio.uio_iovcnt = 2; 916 } 917 918 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", 919 blkno_buf, blocksize)); 920 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir); 921 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink", 922 sink, blocksize)); 923 924 dst += secsize; 925 src += secsize; 926 blkno++; 927 } 928 } 929 930 #ifdef DEBUG 931 static void 932 hexprint(const char *start, void *buf, int len) 933 { 934 char *c = buf; 935 936 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0")); 937 printf("%s: len=%06d 0x", start, len); 938 while (len--) 939 printf("%02x", (unsigned char) *c++); 940 } 941 #endif 942 943 MODULE(MODULE_CLASS_DRIVER, cgd, NULL); 944 945 #ifdef _MODULE 946 CFDRIVER_DECL(cgd, DV_DISK, NULL); 947 #endif 948 949 static int 950 cgd_modcmd(modcmd_t cmd, void *arg) 951 { 952 int bmajor, cmajor, error = 0; 953 954 bmajor = cmajor = -1; 955 956 switch (cmd) { 957 case MODULE_CMD_INIT: 958 #ifdef _MODULE 959 error = config_cfdriver_attach(&cgd_cd); 960 if (error) 961 break; 962 963 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 964 if (error) { 965 config_cfdriver_detach(&cgd_cd); 966 aprint_error("%s: unable to register cfattach\n", 967 cgd_cd.cd_name); 968 break; 969 } 970 971 error = devsw_attach("cgd", &cgd_bdevsw, &bmajor, 972 &cgd_cdevsw, &cmajor); 973 if (error) { 974 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 975 config_cfdriver_detach(&cgd_cd); 976 break; 977 } 978 #endif 979 break; 980 981 case MODULE_CMD_FINI: 982 #ifdef _MODULE 983 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 984 if (error) 985 break; 986 config_cfdriver_detach(&cgd_cd); 987 devsw_detach(&cgd_bdevsw, &cgd_cdevsw); 988 #endif 989 break; 990 991 case MODULE_CMD_STAT: 992 return ENOTTY; 993 994 default: 995 return ENOTTY; 996 } 997 998 return error; 999 } 1000