1 /* $NetBSD: cgd.c,v 1.110 2016/08/05 08:24:46 pgoyette Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Roland C. Dowdeswell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.110 2016/08/05 08:24:46 pgoyette Exp $"); 34 35 #include <sys/types.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/errno.h> 40 #include <sys/buf.h> 41 #include <sys/bufq.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/pool.h> 45 #include <sys/ioctl.h> 46 #include <sys/device.h> 47 #include <sys/disk.h> 48 #include <sys/disklabel.h> 49 #include <sys/fcntl.h> 50 #include <sys/namei.h> /* for pathbuf */ 51 #include <sys/vnode.h> 52 #include <sys/conf.h> 53 #include <sys/syslog.h> 54 55 #include <dev/dkvar.h> 56 #include <dev/cgdvar.h> 57 58 #include <miscfs/specfs/specdev.h> /* for v_rdev */ 59 60 #include "ioconf.h" 61 62 /* Entry Point Functions */ 63 64 static dev_type_open(cgdopen); 65 static dev_type_close(cgdclose); 66 static dev_type_read(cgdread); 67 static dev_type_write(cgdwrite); 68 static dev_type_ioctl(cgdioctl); 69 static dev_type_strategy(cgdstrategy); 70 static dev_type_dump(cgddump); 71 static dev_type_size(cgdsize); 72 73 const struct bdevsw cgd_bdevsw = { 74 .d_open = cgdopen, 75 .d_close = cgdclose, 76 .d_strategy = cgdstrategy, 77 .d_ioctl = cgdioctl, 78 .d_dump = cgddump, 79 .d_psize = cgdsize, 80 .d_discard = nodiscard, 81 .d_flag = D_DISK 82 }; 83 84 const struct cdevsw cgd_cdevsw = { 85 .d_open = cgdopen, 86 .d_close = cgdclose, 87 .d_read = cgdread, 88 .d_write = cgdwrite, 89 .d_ioctl = cgdioctl, 90 .d_stop = nostop, 91 .d_tty = notty, 92 .d_poll = nopoll, 93 .d_mmap = nommap, 94 .d_kqfilter = nokqfilter, 95 .d_discard = nodiscard, 96 .d_flag = D_DISK 97 }; 98 99 static int cgd_match(device_t, cfdata_t, void *); 100 static void cgd_attach(device_t, device_t, void *); 101 static int cgd_detach(device_t, int); 102 static struct cgd_softc *cgd_spawn(int); 103 static int cgd_destroy(device_t); 104 105 /* Internal Functions */ 106 107 static int cgd_diskstart(device_t, struct buf *); 108 static void cgdiodone(struct buf *); 109 static int cgd_dumpblocks(device_t, void *, daddr_t, int); 110 111 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *); 112 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *); 113 static int cgd_ioctl_get(dev_t, void *, struct lwp *); 114 static int cgdinit(struct cgd_softc *, const char *, struct vnode *, 115 struct lwp *); 116 static void cgd_cipher(struct cgd_softc *, void *, void *, 117 size_t, daddr_t, size_t, int); 118 119 static struct dkdriver cgddkdriver = { 120 .d_minphys = minphys, 121 .d_open = cgdopen, 122 .d_close = cgdclose, 123 .d_strategy = cgdstrategy, 124 .d_iosize = NULL, 125 .d_diskstart = cgd_diskstart, 126 .d_dumpblocks = cgd_dumpblocks, 127 .d_lastclose = NULL 128 }; 129 130 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc), 131 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 132 extern struct cfdriver cgd_cd; 133 134 /* DIAGNOSTIC and DEBUG definitions */ 135 136 #if defined(CGDDEBUG) && !defined(DEBUG) 137 #define DEBUG 138 #endif 139 140 #ifdef DEBUG 141 int cgddebug = 0; 142 143 #define CGDB_FOLLOW 0x1 144 #define CGDB_IO 0x2 145 #define CGDB_CRYPTO 0x4 146 147 #define IFDEBUG(x,y) if (cgddebug & (x)) y 148 #define DPRINTF(x,y) IFDEBUG(x, printf y) 149 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y) 150 151 static void hexprint(const char *, void *, int); 152 153 #else 154 #define IFDEBUG(x,y) 155 #define DPRINTF(x,y) 156 #define DPRINTF_FOLLOW(y) 157 #endif 158 159 #ifdef DIAGNOSTIC 160 #define DIAGPANIC(x) panic x 161 #define DIAGCONDPANIC(x,y) if (x) panic y 162 #else 163 #define DIAGPANIC(x) 164 #define DIAGCONDPANIC(x,y) 165 #endif 166 167 /* Global variables */ 168 169 /* Utility Functions */ 170 171 #define CGDUNIT(x) DISKUNIT(x) 172 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO 173 174 /* The code */ 175 176 static struct cgd_softc * 177 getcgd_softc(dev_t dev) 178 { 179 int unit = CGDUNIT(dev); 180 struct cgd_softc *sc; 181 182 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit)); 183 184 sc = device_lookup_private(&cgd_cd, unit); 185 if (sc == NULL) 186 sc = cgd_spawn(unit); 187 return sc; 188 } 189 190 static int 191 cgd_match(device_t self, cfdata_t cfdata, void *aux) 192 { 193 194 return 1; 195 } 196 197 static void 198 cgd_attach(device_t parent, device_t self, void *aux) 199 { 200 struct cgd_softc *sc = device_private(self); 201 202 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO); 203 dk_init(&sc->sc_dksc, self, DKTYPE_CGD); 204 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver); 205 206 if (!pmf_device_register(self, NULL, NULL)) 207 aprint_error_dev(self, 208 "unable to register power management hooks\n"); 209 } 210 211 212 static int 213 cgd_detach(device_t self, int flags) 214 { 215 int ret; 216 const int pmask = 1 << RAW_PART; 217 struct cgd_softc *sc = device_private(self); 218 struct dk_softc *dksc = &sc->sc_dksc; 219 220 if (DK_BUSY(dksc, pmask)) 221 return EBUSY; 222 223 if (DK_ATTACHED(dksc) && 224 (ret = cgd_ioctl_clr(sc, curlwp)) != 0) 225 return ret; 226 227 disk_destroy(&dksc->sc_dkdev); 228 mutex_destroy(&sc->sc_lock); 229 230 return 0; 231 } 232 233 void 234 cgdattach(int num) 235 { 236 int error; 237 238 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 239 if (error != 0) 240 aprint_error("%s: unable to register cfattach\n", 241 cgd_cd.cd_name); 242 } 243 244 static struct cgd_softc * 245 cgd_spawn(int unit) 246 { 247 cfdata_t cf; 248 249 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 250 cf->cf_name = cgd_cd.cd_name; 251 cf->cf_atname = cgd_cd.cd_name; 252 cf->cf_unit = unit; 253 cf->cf_fstate = FSTATE_STAR; 254 255 return device_private(config_attach_pseudo(cf)); 256 } 257 258 static int 259 cgd_destroy(device_t dev) 260 { 261 int error; 262 cfdata_t cf; 263 264 cf = device_cfdata(dev); 265 error = config_detach(dev, DETACH_QUIET); 266 if (error) 267 return error; 268 free(cf, M_DEVBUF); 269 return 0; 270 } 271 272 static int 273 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l) 274 { 275 struct cgd_softc *cs; 276 277 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags)); 278 GETCGD_SOFTC(cs, dev); 279 return dk_open(&cs->sc_dksc, dev, flags, fmt, l); 280 } 281 282 static int 283 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l) 284 { 285 int error; 286 struct cgd_softc *cs; 287 struct dk_softc *dksc; 288 289 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags)); 290 GETCGD_SOFTC(cs, dev); 291 dksc = &cs->sc_dksc; 292 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) 293 return error; 294 295 if (!DK_ATTACHED(dksc)) { 296 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) { 297 aprint_error_dev(dksc->sc_dev, 298 "unable to detach instance\n"); 299 return error; 300 } 301 } 302 return 0; 303 } 304 305 static void 306 cgdstrategy(struct buf *bp) 307 { 308 struct cgd_softc *cs = getcgd_softc(bp->b_dev); 309 struct dk_softc *dksc = &cs->sc_dksc; 310 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 311 312 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp, 313 (long)bp->b_bcount)); 314 315 /* 316 * Reject unaligned writes. We can encrypt and decrypt only 317 * complete disk sectors, and we let the ciphers require their 318 * buffers to be aligned to 32-bit boundaries. 319 */ 320 if (bp->b_blkno < 0 || 321 (bp->b_bcount % dg->dg_secsize) != 0 || 322 ((uintptr_t)bp->b_data & 3) != 0) { 323 bp->b_error = EINVAL; 324 bp->b_resid = bp->b_bcount; 325 biodone(bp); 326 return; 327 } 328 329 /* XXXrcd: Should we test for (cs != NULL)? */ 330 dk_strategy(&cs->sc_dksc, bp); 331 return; 332 } 333 334 static int 335 cgdsize(dev_t dev) 336 { 337 struct cgd_softc *cs = getcgd_softc(dev); 338 339 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev)); 340 if (!cs) 341 return -1; 342 return dk_size(&cs->sc_dksc, dev); 343 } 344 345 /* 346 * cgd_{get,put}data are functions that deal with getting a buffer 347 * for the new encrypted data. We have a buffer per device so that 348 * we can ensure that we can always have a transaction in flight. 349 * We use this buffer first so that we have one less piece of 350 * malloc'ed data at any given point. 351 */ 352 353 static void * 354 cgd_getdata(struct dk_softc *dksc, unsigned long size) 355 { 356 struct cgd_softc *cs = (struct cgd_softc *)dksc; 357 void * data = NULL; 358 359 mutex_enter(&cs->sc_lock); 360 if (cs->sc_data_used == 0) { 361 cs->sc_data_used = 1; 362 data = cs->sc_data; 363 } 364 mutex_exit(&cs->sc_lock); 365 366 if (data) 367 return data; 368 369 return malloc(size, M_DEVBUF, M_NOWAIT); 370 } 371 372 static void 373 cgd_putdata(struct dk_softc *dksc, void *data) 374 { 375 struct cgd_softc *cs = (struct cgd_softc *)dksc; 376 377 if (data == cs->sc_data) { 378 mutex_enter(&cs->sc_lock); 379 cs->sc_data_used = 0; 380 mutex_exit(&cs->sc_lock); 381 } else { 382 free(data, M_DEVBUF); 383 } 384 } 385 386 static int 387 cgd_diskstart(device_t dev, struct buf *bp) 388 { 389 struct cgd_softc *cs = device_private(dev); 390 struct dk_softc *dksc = &cs->sc_dksc; 391 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 392 struct buf *nbp; 393 void * addr; 394 void * newaddr; 395 daddr_t bn; 396 struct vnode *vp; 397 398 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp)); 399 400 bn = bp->b_rawblkno; 401 402 /* 403 * We attempt to allocate all of our resources up front, so that 404 * we can fail quickly if they are unavailable. 405 */ 406 nbp = getiobuf(cs->sc_tvn, false); 407 if (nbp == NULL) 408 return EAGAIN; 409 410 /* 411 * If we are writing, then we need to encrypt the outgoing 412 * block into a new block of memory. 413 */ 414 newaddr = addr = bp->b_data; 415 if ((bp->b_flags & B_READ) == 0) { 416 newaddr = cgd_getdata(dksc, bp->b_bcount); 417 if (!newaddr) { 418 putiobuf(nbp); 419 return EAGAIN; 420 } 421 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, 422 dg->dg_secsize, CGD_CIPHER_ENCRYPT); 423 } 424 425 nbp->b_data = newaddr; 426 nbp->b_flags = bp->b_flags; 427 nbp->b_oflags = bp->b_oflags; 428 nbp->b_cflags = bp->b_cflags; 429 nbp->b_iodone = cgdiodone; 430 nbp->b_proc = bp->b_proc; 431 nbp->b_blkno = btodb(bn * dg->dg_secsize); 432 nbp->b_bcount = bp->b_bcount; 433 nbp->b_private = bp; 434 435 BIO_COPYPRIO(nbp, bp); 436 437 if ((nbp->b_flags & B_READ) == 0) { 438 vp = nbp->b_vp; 439 mutex_enter(vp->v_interlock); 440 vp->v_numoutput++; 441 mutex_exit(vp->v_interlock); 442 } 443 VOP_STRATEGY(cs->sc_tvn, nbp); 444 445 return 0; 446 } 447 448 static void 449 cgdiodone(struct buf *nbp) 450 { 451 struct buf *obp = nbp->b_private; 452 struct cgd_softc *cs = getcgd_softc(obp->b_dev); 453 struct dk_softc *dksc = &cs->sc_dksc; 454 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 455 daddr_t bn; 456 457 KDASSERT(cs); 458 459 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); 460 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", 461 obp, obp->b_bcount, obp->b_resid)); 462 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 463 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, 464 nbp->b_bcount)); 465 if (nbp->b_error != 0) { 466 obp->b_error = nbp->b_error; 467 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname, 468 obp->b_error)); 469 } 470 471 /* Perform the decryption if we are reading. 472 * 473 * Note: use the blocknumber from nbp, since it is what 474 * we used to encrypt the blocks. 475 */ 476 477 if (nbp->b_flags & B_READ) { 478 bn = dbtob(nbp->b_blkno) / dg->dg_secsize; 479 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, 480 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT); 481 } 482 483 /* If we allocated memory, free it now... */ 484 if (nbp->b_data != obp->b_data) 485 cgd_putdata(dksc, nbp->b_data); 486 487 putiobuf(nbp); 488 489 /* Request is complete for whatever reason */ 490 obp->b_resid = 0; 491 if (obp->b_error != 0) 492 obp->b_resid = obp->b_bcount; 493 494 dk_done(dksc, obp); 495 dk_start(dksc, NULL); 496 } 497 498 static int 499 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk) 500 { 501 struct cgd_softc *sc = device_private(dev); 502 struct dk_softc *dksc = &sc->sc_dksc; 503 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 504 size_t nbytes, blksize; 505 void *buf; 506 int error; 507 508 /* 509 * dk_dump gives us units of disklabel sectors. Everything 510 * else in cgd uses units of diskgeom sectors. These had 511 * better agree; otherwise we need to figure out how to convert 512 * between them. 513 */ 514 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize), 515 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32, 516 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize); 517 blksize = dg->dg_secsize; 518 519 /* 520 * Compute the number of bytes in this request, which dk_dump 521 * has `helpfully' converted to a number of blocks for us. 522 */ 523 nbytes = nblk*blksize; 524 525 /* Try to acquire a buffer to store the ciphertext. */ 526 buf = cgd_getdata(dksc, nbytes); 527 if (buf == NULL) 528 /* Out of memory: give up. */ 529 return ENOMEM; 530 531 /* Encrypt the caller's data into the temporary buffer. */ 532 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT); 533 534 /* Pass it on to the underlying disk device. */ 535 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes); 536 537 /* Release the buffer. */ 538 cgd_putdata(dksc, buf); 539 540 /* Return any error from the underlying disk device. */ 541 return error; 542 } 543 544 /* XXX: we should probably put these into dksubr.c, mostly */ 545 static int 546 cgdread(dev_t dev, struct uio *uio, int flags) 547 { 548 struct cgd_softc *cs; 549 struct dk_softc *dksc; 550 551 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n", 552 (unsigned long long)dev, uio, flags)); 553 GETCGD_SOFTC(cs, dev); 554 dksc = &cs->sc_dksc; 555 if (!DK_ATTACHED(dksc)) 556 return ENXIO; 557 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio); 558 } 559 560 /* XXX: we should probably put these into dksubr.c, mostly */ 561 static int 562 cgdwrite(dev_t dev, struct uio *uio, int flags) 563 { 564 struct cgd_softc *cs; 565 struct dk_softc *dksc; 566 567 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags)); 568 GETCGD_SOFTC(cs, dev); 569 dksc = &cs->sc_dksc; 570 if (!DK_ATTACHED(dksc)) 571 return ENXIO; 572 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio); 573 } 574 575 static int 576 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 577 { 578 struct cgd_softc *cs; 579 struct dk_softc *dksc; 580 int part = DISKPART(dev); 581 int pmask = 1 << part; 582 583 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n", 584 dev, cmd, data, flag, l)); 585 586 switch (cmd) { 587 case CGDIOCGET: 588 return cgd_ioctl_get(dev, data, l); 589 case CGDIOCSET: 590 case CGDIOCCLR: 591 if ((flag & FWRITE) == 0) 592 return EBADF; 593 /* FALLTHROUGH */ 594 default: 595 GETCGD_SOFTC(cs, dev); 596 dksc = &cs->sc_dksc; 597 break; 598 } 599 600 switch (cmd) { 601 case CGDIOCSET: 602 if (DK_ATTACHED(dksc)) 603 return EBUSY; 604 return cgd_ioctl_set(cs, data, l); 605 case CGDIOCCLR: 606 if (DK_BUSY(&cs->sc_dksc, pmask)) 607 return EBUSY; 608 return cgd_ioctl_clr(cs, l); 609 case DIOCCACHESYNC: 610 /* 611 * XXX Do we really need to care about having a writable 612 * file descriptor here? 613 */ 614 if ((flag & FWRITE) == 0) 615 return (EBADF); 616 617 /* 618 * We pass this call down to the underlying disk. 619 */ 620 return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred); 621 case DIOCGSTRATEGY: 622 case DIOCSSTRATEGY: 623 if (!DK_ATTACHED(dksc)) 624 return ENOENT; 625 /*FALLTHROUGH*/ 626 default: 627 return dk_ioctl(dksc, dev, cmd, data, flag, l); 628 case CGDIOCGET: 629 KASSERT(0); 630 return EINVAL; 631 } 632 } 633 634 static int 635 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size) 636 { 637 struct cgd_softc *cs; 638 639 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n", 640 dev, blkno, va, (unsigned long)size)); 641 GETCGD_SOFTC(cs, dev); 642 return dk_dump(&cs->sc_dksc, dev, blkno, va, size); 643 } 644 645 /* 646 * XXXrcd: 647 * for now we hardcode the maximum key length. 648 */ 649 #define MAX_KEYSIZE 1024 650 651 static const struct { 652 const char *n; 653 int v; 654 int d; 655 } encblkno[] = { 656 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 657 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 }, 658 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 }, 659 }; 660 661 /* ARGSUSED */ 662 static int 663 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l) 664 { 665 struct cgd_ioctl *ci = data; 666 struct vnode *vp; 667 int ret; 668 size_t i; 669 size_t keybytes; /* key length in bytes */ 670 const char *cp; 671 struct pathbuf *pb; 672 char *inbuf; 673 struct dk_softc *dksc = &cs->sc_dksc; 674 675 cp = ci->ci_disk; 676 677 ret = pathbuf_copyin(ci->ci_disk, &pb); 678 if (ret != 0) { 679 return ret; 680 } 681 ret = dk_lookup(pb, l, &vp); 682 pathbuf_destroy(pb); 683 if (ret != 0) { 684 return ret; 685 } 686 687 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK); 688 689 if ((ret = cgdinit(cs, cp, vp, l)) != 0) 690 goto bail; 691 692 (void)memset(inbuf, 0, MAX_KEYSIZE); 693 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL); 694 if (ret) 695 goto bail; 696 cs->sc_cfuncs = cryptfuncs_find(inbuf); 697 if (!cs->sc_cfuncs) { 698 ret = EINVAL; 699 goto bail; 700 } 701 702 (void)memset(inbuf, 0, MAX_KEYSIZE); 703 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL); 704 if (ret) 705 goto bail; 706 707 for (i = 0; i < __arraycount(encblkno); i++) 708 if (strcmp(encblkno[i].n, inbuf) == 0) 709 break; 710 711 if (i == __arraycount(encblkno)) { 712 ret = EINVAL; 713 goto bail; 714 } 715 716 keybytes = ci->ci_keylen / 8 + 1; 717 if (keybytes > MAX_KEYSIZE) { 718 ret = EINVAL; 719 goto bail; 720 } 721 722 (void)memset(inbuf, 0, MAX_KEYSIZE); 723 ret = copyin(ci->ci_key, inbuf, keybytes); 724 if (ret) 725 goto bail; 726 727 cs->sc_cdata.cf_blocksize = ci->ci_blocksize; 728 cs->sc_cdata.cf_mode = encblkno[i].v; 729 cs->sc_cdata.cf_keylen = ci->ci_keylen; 730 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf, 731 &cs->sc_cdata.cf_blocksize); 732 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) { 733 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n", 734 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE); 735 cs->sc_cdata.cf_priv = NULL; 736 } 737 738 /* 739 * The blocksize is supposed to be in bytes. Unfortunately originally 740 * it was expressed in bits. For compatibility we maintain encblkno 741 * and encblkno8. 742 */ 743 cs->sc_cdata.cf_blocksize /= encblkno[i].d; 744 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE); 745 if (!cs->sc_cdata.cf_priv) { 746 ret = EINVAL; /* XXX is this the right error? */ 747 goto bail; 748 } 749 free(inbuf, M_TEMP); 750 751 bufq_alloc(&dksc->sc_bufq, "fcfs", 0); 752 753 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK); 754 cs->sc_data_used = 0; 755 756 /* Attach the disk. */ 757 dk_attach(dksc); 758 disk_attach(&dksc->sc_dkdev); 759 760 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL); 761 762 /* Discover wedges on this disk. */ 763 dkwedge_discover(&dksc->sc_dkdev); 764 765 return 0; 766 767 bail: 768 free(inbuf, M_TEMP); 769 (void)vn_close(vp, FREAD|FWRITE, l->l_cred); 770 return ret; 771 } 772 773 /* ARGSUSED */ 774 static int 775 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l) 776 { 777 struct dk_softc *dksc = &cs->sc_dksc; 778 779 if (!DK_ATTACHED(dksc)) 780 return ENXIO; 781 782 /* Delete all of our wedges. */ 783 dkwedge_delall(&dksc->sc_dkdev); 784 785 /* Kill off any queued buffers. */ 786 dk_drain(dksc); 787 bufq_free(dksc->sc_bufq); 788 789 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred); 790 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv); 791 free(cs->sc_tpath, M_DEVBUF); 792 free(cs->sc_data, M_DEVBUF); 793 cs->sc_data_used = 0; 794 dk_detach(dksc); 795 disk_detach(&dksc->sc_dkdev); 796 797 return 0; 798 } 799 800 static int 801 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l) 802 { 803 struct cgd_softc *cs = getcgd_softc(dev); 804 struct cgd_user *cgu; 805 int unit; 806 struct dk_softc *dksc = &cs->sc_dksc; 807 808 unit = CGDUNIT(dev); 809 cgu = (struct cgd_user *)data; 810 811 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n", 812 dev, unit, data, l)); 813 814 if (cgu->cgu_unit == -1) 815 cgu->cgu_unit = unit; 816 817 if (cgu->cgu_unit < 0) 818 return EINVAL; /* XXX: should this be ENXIO? */ 819 820 cs = device_lookup_private(&cgd_cd, unit); 821 if (cs == NULL || !DK_ATTACHED(dksc)) { 822 cgu->cgu_dev = 0; 823 cgu->cgu_alg[0] = '\0'; 824 cgu->cgu_blocksize = 0; 825 cgu->cgu_mode = 0; 826 cgu->cgu_keylen = 0; 827 } 828 else { 829 cgu->cgu_dev = cs->sc_tdev; 830 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name, 831 sizeof(cgu->cgu_alg)); 832 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize; 833 cgu->cgu_mode = cs->sc_cdata.cf_mode; 834 cgu->cgu_keylen = cs->sc_cdata.cf_keylen; 835 } 836 return 0; 837 } 838 839 static int 840 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp, 841 struct lwp *l) 842 { 843 struct disk_geom *dg; 844 int ret; 845 char *tmppath; 846 uint64_t psize; 847 unsigned secsize; 848 struct dk_softc *dksc = &cs->sc_dksc; 849 850 cs->sc_tvn = vp; 851 cs->sc_tpath = NULL; 852 853 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 854 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen); 855 if (ret) 856 goto bail; 857 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK); 858 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen); 859 860 cs->sc_tdev = vp->v_rdev; 861 862 if ((ret = getdisksize(vp, &psize, &secsize)) != 0) 863 goto bail; 864 865 if (psize == 0) { 866 ret = ENODEV; 867 goto bail; 868 } 869 870 /* 871 * XXX here we should probe the underlying device. If we 872 * are accessing a partition of type RAW_PART, then 873 * we should populate our initial geometry with the 874 * geometry that we discover from the device. 875 */ 876 dg = &dksc->sc_dkdev.dk_geom; 877 memset(dg, 0, sizeof(*dg)); 878 dg->dg_secperunit = psize; 879 dg->dg_secsize = secsize; 880 dg->dg_ntracks = 1; 881 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize; 882 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors; 883 884 bail: 885 free(tmppath, M_TEMP); 886 if (ret && cs->sc_tpath) 887 free(cs->sc_tpath, M_DEVBUF); 888 return ret; 889 } 890 891 /* 892 * Our generic cipher entry point. This takes care of the 893 * IV mode and passes off the work to the specific cipher. 894 * We implement here the IV method ``encrypted block 895 * number''. 896 * 897 * For the encryption case, we accomplish this by setting 898 * up a struct uio where the first iovec of the source is 899 * the blocknumber and the first iovec of the dest is a 900 * sink. We then call the cipher with an IV of zero, and 901 * the right thing happens. 902 * 903 * For the decryption case, we use the same basic mechanism 904 * for symmetry, but we encrypt the block number in the 905 * first iovec. 906 * 907 * We mainly do this to avoid requiring the definition of 908 * an ECB mode. 909 * 910 * XXXrcd: for now we rely on our own crypto framework defined 911 * in dev/cgd_crypto.c. This will change when we 912 * get a generic kernel crypto framework. 913 */ 914 915 static void 916 blkno2blkno_buf(char *sbuf, daddr_t blkno) 917 { 918 int i; 919 920 /* Set up the blkno in blkno_buf, here we do not care much 921 * about the final layout of the information as long as we 922 * can guarantee that each sector will have a different IV 923 * and that the endianness of the machine will not affect 924 * the representation that we have chosen. 925 * 926 * We choose this representation, because it does not rely 927 * on the size of buf (which is the blocksize of the cipher), 928 * but allows daddr_t to grow without breaking existing 929 * disks. 930 * 931 * Note that blkno2blkno_buf does not take a size as input, 932 * and hence must be called on a pre-zeroed buffer of length 933 * greater than or equal to sizeof(daddr_t). 934 */ 935 for (i=0; i < sizeof(daddr_t); i++) { 936 *sbuf++ = blkno & 0xff; 937 blkno >>= 8; 938 } 939 } 940 941 static void 942 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv, 943 size_t len, daddr_t blkno, size_t secsize, int dir) 944 { 945 char *dst = dstv; 946 char *src = srcv; 947 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; 948 struct uio dstuio; 949 struct uio srcuio; 950 struct iovec dstiov[2]; 951 struct iovec srciov[2]; 952 size_t blocksize = cs->sc_cdata.cf_blocksize; 953 size_t todo; 954 char sink[CGD_MAXBLOCKSIZE]; 955 char zero_iv[CGD_MAXBLOCKSIZE]; 956 char blkno_buf[CGD_MAXBLOCKSIZE]; 957 958 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); 959 960 DIAGCONDPANIC(len % blocksize != 0, 961 ("cgd_cipher: len %% blocksize != 0")); 962 963 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ 964 DIAGCONDPANIC(sizeof(daddr_t) > blocksize, 965 ("cgd_cipher: sizeof(daddr_t) > blocksize")); 966 967 memset(zero_iv, 0x0, blocksize); 968 969 dstuio.uio_iov = dstiov; 970 dstuio.uio_iovcnt = 2; 971 972 srcuio.uio_iov = srciov; 973 srcuio.uio_iovcnt = 2; 974 975 dstiov[0].iov_base = sink; 976 dstiov[0].iov_len = blocksize; 977 srciov[0].iov_base = blkno_buf; 978 srciov[0].iov_len = blocksize; 979 980 for (; len > 0; len -= todo) { 981 todo = MIN(len, secsize); 982 983 dstiov[1].iov_base = dst; 984 srciov[1].iov_base = src; 985 dstiov[1].iov_len = todo; 986 srciov[1].iov_len = todo; 987 988 memset(blkno_buf, 0x0, blocksize); 989 blkno2blkno_buf(blkno_buf, blkno); 990 if (dir == CGD_CIPHER_DECRYPT) { 991 dstuio.uio_iovcnt = 1; 992 srcuio.uio_iovcnt = 1; 993 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf", 994 blkno_buf, blocksize)); 995 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, 996 zero_iv, CGD_CIPHER_ENCRYPT); 997 memcpy(blkno_buf, sink, blocksize); 998 dstuio.uio_iovcnt = 2; 999 srcuio.uio_iovcnt = 2; 1000 } 1001 1002 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", 1003 blkno_buf, blocksize)); 1004 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir); 1005 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink", 1006 sink, blocksize)); 1007 1008 dst += todo; 1009 src += todo; 1010 blkno++; 1011 } 1012 } 1013 1014 #ifdef DEBUG 1015 static void 1016 hexprint(const char *start, void *buf, int len) 1017 { 1018 char *c = buf; 1019 1020 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0")); 1021 printf("%s: len=%06d 0x", start, len); 1022 while (len--) 1023 printf("%02x", (unsigned char) *c++); 1024 } 1025 #endif 1026 1027 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr"); 1028 1029 #ifdef _MODULE 1030 CFDRIVER_DECL(cgd, DV_DISK, NULL); 1031 1032 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1; 1033 #endif 1034 1035 static int 1036 cgd_modcmd(modcmd_t cmd, void *arg) 1037 { 1038 int error = 0; 1039 1040 switch (cmd) { 1041 case MODULE_CMD_INIT: 1042 #ifdef _MODULE 1043 error = config_cfdriver_attach(&cgd_cd); 1044 if (error) 1045 break; 1046 1047 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 1048 if (error) { 1049 config_cfdriver_detach(&cgd_cd); 1050 aprint_error("%s: unable to register cfattach for" 1051 "%s, error %d\n", __func__, cgd_cd.cd_name, error); 1052 break; 1053 } 1054 /* 1055 * Attach the {b,c}devsw's 1056 */ 1057 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1058 &cgd_cdevsw, &cgd_cmajor); 1059 1060 /* 1061 * If devsw_attach fails, remove from autoconf database 1062 */ 1063 if (error) { 1064 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 1065 config_cfdriver_detach(&cgd_cd); 1066 aprint_error("%s: unable to attach %s devsw, " 1067 "error %d", __func__, cgd_cd.cd_name, error); 1068 break; 1069 } 1070 #endif 1071 break; 1072 1073 case MODULE_CMD_FINI: 1074 #ifdef _MODULE 1075 /* 1076 * Remove {b,c}devsw's 1077 */ 1078 devsw_detach(&cgd_bdevsw, &cgd_cdevsw); 1079 1080 /* 1081 * Now remove device from autoconf database 1082 */ 1083 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca); 1084 if (error) { 1085 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1086 &cgd_cdevsw, &cgd_cmajor); 1087 aprint_error("%s: failed to detach %s cfattach, " 1088 "error %d\n", __func__, cgd_cd.cd_name, error); 1089 break; 1090 } 1091 error = config_cfdriver_detach(&cgd_cd); 1092 if (error) { 1093 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca); 1094 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor, 1095 &cgd_cdevsw, &cgd_cmajor); 1096 aprint_error("%s: failed to detach %s cfdriver, " 1097 "error %d\n", __func__, cgd_cd.cd_name, error); 1098 break; 1099 } 1100 #endif 1101 break; 1102 1103 case MODULE_CMD_STAT: 1104 error = ENOTTY; 1105 break; 1106 default: 1107 error = ENOTTY; 1108 break; 1109 } 1110 1111 return error; 1112 } 1113