1 /* $NetBSD: cgd.c,v 1.21 2004/10/28 07:07:39 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Roland C. Dowdeswell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.21 2004/10/28 07:07:39 yamt Exp $"); 41 42 #include <sys/types.h> 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/proc.h> 46 #include <sys/errno.h> 47 #include <sys/buf.h> 48 #include <sys/bufq.h> 49 #include <sys/malloc.h> 50 #include <sys/pool.h> 51 #include <sys/ioctl.h> 52 #include <sys/device.h> 53 #include <sys/disk.h> 54 #include <sys/disklabel.h> 55 #include <sys/fcntl.h> 56 #include <sys/vnode.h> 57 #include <sys/lock.h> 58 #include <sys/conf.h> 59 60 #include <dev/dkvar.h> 61 #include <dev/cgdvar.h> 62 63 /* Entry Point Functions */ 64 65 void cgdattach(int); 66 67 static dev_type_open(cgdopen); 68 static dev_type_close(cgdclose); 69 static dev_type_read(cgdread); 70 static dev_type_write(cgdwrite); 71 static dev_type_ioctl(cgdioctl); 72 static dev_type_strategy(cgdstrategy); 73 static dev_type_dump(cgddump); 74 static dev_type_size(cgdsize); 75 76 const struct bdevsw cgd_bdevsw = { 77 cgdopen, cgdclose, cgdstrategy, cgdioctl, 78 cgddump, cgdsize, D_DISK 79 }; 80 81 const struct cdevsw cgd_cdevsw = { 82 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl, 83 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 84 }; 85 86 /* Internal Functions */ 87 88 static int cgdstart(struct dk_softc *, struct buf *); 89 static void cgdiodone(struct buf *); 90 91 static int cgd_ioctl_set(struct cgd_softc *, void *, struct proc *); 92 static int cgd_ioctl_clr(struct cgd_softc *, void *, struct proc *); 93 static int cgdinit(struct cgd_softc *, char *, struct vnode *, 94 struct proc *); 95 static void cgd_cipher(struct cgd_softc *, caddr_t, caddr_t, 96 size_t, daddr_t, size_t, int); 97 98 /* Pseudo-disk Interface */ 99 100 static struct dk_intf the_dkintf = { 101 DTYPE_CGD, 102 "cgd", 103 cgdopen, 104 cgdclose, 105 cgdstrategy, 106 cgdstart, 107 }; 108 static struct dk_intf *di = &the_dkintf; 109 110 /* DIAGNOSTIC and DEBUG definitions */ 111 112 #if defined(CGDDEBUG) && !defined(DEBUG) 113 #define DEBUG 114 #endif 115 116 #ifdef DEBUG 117 int cgddebug = 0; 118 119 #define CGDB_FOLLOW 0x1 120 #define CGDB_IO 0x2 121 #define CGDB_CRYPTO 0x4 122 123 #define IFDEBUG(x,y) if (cgddebug & (x)) y 124 #define DPRINTF(x,y) IFDEBUG(x, printf y) 125 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y) 126 127 static void hexprint(char *, void *, int); 128 129 #else 130 #define IFDEBUG(x,y) 131 #define DPRINTF(x,y) 132 #define DPRINTF_FOLLOW(y) 133 #endif 134 135 #ifdef DIAGNOSTIC 136 #define DIAGPANIC(x) panic x 137 #define DIAGCONDPANIC(x,y) if (x) panic y 138 #else 139 #define DIAGPANIC(x) 140 #define DIAGCONDPANIC(x,y) 141 #endif 142 143 /* Global variables */ 144 145 struct cgd_softc *cgd_softc; 146 int numcgd = 0; 147 148 /* Utility Functions */ 149 150 #define CGDUNIT(x) DISKUNIT(x) 151 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO 152 153 static struct cgd_softc * 154 getcgd_softc(dev_t dev) 155 { 156 int unit = CGDUNIT(dev); 157 158 DPRINTF_FOLLOW(("getcgd_softc(0x%x): unit = %d\n", dev, unit)); 159 if (unit >= numcgd) 160 return NULL; 161 return &cgd_softc[unit]; 162 } 163 164 /* The code */ 165 166 static void 167 cgdsoftc_init(struct cgd_softc *cs, int num) 168 { 169 char buf[DK_XNAME_SIZE]; 170 171 memset(cs, 0x0, sizeof(*cs)); 172 snprintf(buf, DK_XNAME_SIZE, "cgd%d", num); 173 simple_lock_init(&cs->sc_slock); 174 dk_sc_init(&cs->sc_dksc, cs, buf); 175 } 176 177 void 178 cgdattach(int num) 179 { 180 int i; 181 182 DPRINTF_FOLLOW(("cgdattach(%d)\n", num)); 183 if (num <= 0) { 184 DIAGPANIC(("cgdattach: count <= 0")); 185 return; 186 } 187 188 cgd_softc = (void *)malloc(num * sizeof(*cgd_softc), M_DEVBUF, M_NOWAIT); 189 if (!cgd_softc) { 190 printf("WARNING: unable to malloc(9) memory for crypt disks\n"); 191 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory")); 192 return; 193 } 194 195 numcgd = num; 196 for (i=0; i<num; i++) 197 cgdsoftc_init(&cgd_softc[i], i); 198 } 199 200 static int 201 cgdopen(dev_t dev, int flags, int fmt, struct proc *p) 202 { 203 struct cgd_softc *cs; 204 205 DPRINTF_FOLLOW(("cgdopen(%d, %d)\n", dev, flags)); 206 GETCGD_SOFTC(cs, dev); 207 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, p); 208 } 209 210 static int 211 cgdclose(dev_t dev, int flags, int fmt, struct proc *p) 212 { 213 struct cgd_softc *cs; 214 215 DPRINTF_FOLLOW(("cgdclose(%d, %d)\n", dev, flags)); 216 GETCGD_SOFTC(cs, dev); 217 return dk_close(di, &cs->sc_dksc, dev, flags, fmt, p); 218 } 219 220 static void 221 cgdstrategy(struct buf *bp) 222 { 223 struct cgd_softc *cs = getcgd_softc(bp->b_dev); 224 225 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp, 226 (long)bp->b_bcount)); 227 /* XXXrcd: Should we test for (cs != NULL)? */ 228 dk_strategy(di, &cs->sc_dksc, bp); 229 return; 230 } 231 232 static int 233 cgdsize(dev_t dev) 234 { 235 struct cgd_softc *cs = getcgd_softc(dev); 236 237 DPRINTF_FOLLOW(("cgdsize(%d)\n", dev)); 238 if (!cs) 239 return -1; 240 return dk_size(di, &cs->sc_dksc, dev); 241 } 242 243 /* 244 * cgd_{get,put}data are functions that deal with getting a buffer 245 * for the new encrypted data. We have a buffer per device so that 246 * we can ensure that we can always have a transaction in flight. 247 * We use this buffer first so that we have one less piece of 248 * malloc'ed data at any given point. 249 */ 250 251 static void * 252 cgd_getdata(struct dk_softc *dksc, unsigned long size) 253 { 254 struct cgd_softc *cs =dksc->sc_osc; 255 caddr_t data = NULL; 256 257 simple_lock(&cs->sc_slock); 258 if (cs->sc_data_used == 0) { 259 cs->sc_data_used = 1; 260 data = cs->sc_data; 261 } 262 simple_unlock(&cs->sc_slock); 263 264 if (data) 265 return data; 266 267 return malloc(size, M_DEVBUF, M_NOWAIT); 268 } 269 270 static void 271 cgd_putdata(struct dk_softc *dksc, caddr_t data) 272 { 273 struct cgd_softc *cs =dksc->sc_osc; 274 275 if (data == cs->sc_data) { 276 simple_lock(&cs->sc_slock); 277 cs->sc_data_used = 0; 278 simple_unlock(&cs->sc_slock); 279 } else { 280 free(data, M_DEVBUF); 281 } 282 } 283 284 static int 285 cgdstart(struct dk_softc *dksc, struct buf *bp) 286 { 287 struct cgd_softc *cs = dksc->sc_osc; 288 struct buf *nbp; 289 struct partition *pp; 290 caddr_t addr; 291 caddr_t newaddr; 292 daddr_t bn; 293 int s; 294 295 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp)); 296 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */ 297 298 /* XXXrcd: 299 * Translate partition relative blocks to absolute blocks, 300 * this probably belongs (somehow) in dksubr.c, since it 301 * is independant of the underlying code... This will require 302 * that the interface be expanded slightly, though. 303 */ 304 bn = bp->b_blkno; 305 if (DISKPART(bp->b_dev) != RAW_PART) { 306 pp = &cs->sc_dksc.sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; 307 bn += pp->p_offset; 308 } 309 310 /* 311 * We attempt to allocate all of our resources up front, so that 312 * we can fail quickly if they are unavailable. 313 */ 314 315 s = splbio(); 316 nbp = pool_get(&bufpool, PR_NOWAIT); 317 splx(s); 318 if (nbp == NULL) { 319 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ)); 320 return -1; 321 } 322 323 /* 324 * If we are writing, then we need to encrypt the outgoing 325 * block into a new block of memory. If we fail, then we 326 * return an error and let the dksubr framework deal with it. 327 */ 328 newaddr = addr = bp->b_data; 329 if ((bp->b_flags & B_READ) == 0) { 330 newaddr = cgd_getdata(dksc, bp->b_bcount); 331 if (!newaddr) { 332 s = splbio(); 333 pool_put(&bufpool, nbp); 334 splx(s); 335 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ)); 336 return -1; 337 } 338 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, 339 DEV_BSIZE, CGD_CIPHER_ENCRYPT); 340 } 341 342 BUF_INIT(nbp); 343 nbp->b_data = newaddr; 344 nbp->b_flags = bp->b_flags | B_CALL; 345 nbp->b_iodone = cgdiodone; 346 nbp->b_proc = bp->b_proc; 347 nbp->b_blkno = bn; 348 nbp->b_vp = cs->sc_tvn; 349 nbp->b_bcount = bp->b_bcount; 350 nbp->b_private = bp; 351 352 BIO_COPYPRIO(nbp, bp); 353 354 if ((nbp->b_flags & B_READ) == 0) { 355 V_INCR_NUMOUTPUT(nbp->b_vp); 356 } 357 VOP_STRATEGY(cs->sc_tvn, nbp); 358 return 0; 359 } 360 361 /* expected to be called at splbio() */ 362 static void 363 cgdiodone(struct buf *nbp) 364 { 365 struct buf *obp = nbp->b_private; 366 struct cgd_softc *cs = getcgd_softc(obp->b_dev); 367 struct dk_softc *dksc = &cs->sc_dksc; 368 369 KDASSERT(cs); 370 371 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); 372 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", 373 obp, obp->b_bcount, obp->b_resid)); 374 DPRINTF(CGDB_IO, (" dev 0x%x, nbp %p bn %" PRId64 " addr %p bcnt %d\n", 375 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, 376 nbp->b_bcount)); 377 if (nbp->b_flags & B_ERROR) { 378 obp->b_flags |= B_ERROR; 379 obp->b_error = nbp->b_error ? nbp->b_error : EIO; 380 381 printf("%s: error %d\n", dksc->sc_xname, obp->b_error); 382 } 383 384 /* Perform the decryption if we are reading. 385 * 386 * Note: use the blocknumber from nbp, since it is what 387 * we used to encrypt the blocks. 388 */ 389 390 if (nbp->b_flags & B_READ) 391 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, 392 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT); 393 394 /* If we allocated memory, free it now... */ 395 if (nbp->b_data != obp->b_data) 396 cgd_putdata(dksc, nbp->b_data); 397 398 pool_put(&bufpool, nbp); 399 400 /* Request is complete for whatever reason */ 401 obp->b_resid = 0; 402 if (obp->b_flags & B_ERROR) 403 obp->b_resid = obp->b_bcount; 404 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid, 405 (obp->b_flags & B_READ)); 406 biodone(obp); 407 dk_iodone(di, dksc); 408 } 409 410 /* XXX: we should probably put these into dksubr.c, mostly */ 411 static int 412 cgdread(dev_t dev, struct uio *uio, int flags) 413 { 414 struct cgd_softc *cs; 415 struct dk_softc *dksc; 416 417 DPRINTF_FOLLOW(("cgdread(%d, %p, %d)\n", dev, uio, flags)); 418 GETCGD_SOFTC(cs, dev); 419 dksc = &cs->sc_dksc; 420 if ((dksc->sc_flags & DKF_INITED) == 0) 421 return ENXIO; 422 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio); 423 } 424 425 /* XXX: we should probably put these into dksubr.c, mostly */ 426 static int 427 cgdwrite(dev_t dev, struct uio *uio, int flags) 428 { 429 struct cgd_softc *cs; 430 struct dk_softc *dksc; 431 432 DPRINTF_FOLLOW(("cgdwrite(%d, %p, %d)\n", dev, uio, flags)); 433 GETCGD_SOFTC(cs, dev); 434 dksc = &cs->sc_dksc; 435 if ((dksc->sc_flags & DKF_INITED) == 0) 436 return ENXIO; 437 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio); 438 } 439 440 static int 441 cgdioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 442 { 443 struct cgd_softc *cs; 444 struct dk_softc *dksc; 445 int ret; 446 int part = DISKPART(dev); 447 int pmask = 1 << part; 448 449 DPRINTF_FOLLOW(("cgdioctl(%d, %ld, %p, %d, %p)\n", 450 dev, cmd, data, flag, p)); 451 GETCGD_SOFTC(cs, dev); 452 dksc = &cs->sc_dksc; 453 switch (cmd) { 454 case CGDIOCSET: 455 case CGDIOCCLR: 456 if ((flag & FWRITE) == 0) 457 return EBADF; 458 } 459 460 if ((ret = lockmgr(&dksc->sc_lock, LK_EXCLUSIVE, NULL)) != 0) 461 return ret; 462 463 switch (cmd) { 464 case CGDIOCSET: 465 if (dksc->sc_flags & DKF_INITED) 466 ret = EBUSY; 467 else 468 ret = cgd_ioctl_set(cs, data, p); 469 break; 470 case CGDIOCCLR: 471 if (!(dksc->sc_flags & DKF_INITED)) { 472 ret = ENXIO; 473 break; 474 } 475 if (DK_BUSY(&cs->sc_dksc, pmask)) { 476 ret = EBUSY; 477 break; 478 } 479 ret = cgd_ioctl_clr(cs, data, p); 480 break; 481 default: 482 ret = dk_ioctl(di, dksc, dev, cmd, data, flag, p); 483 break; 484 } 485 486 lockmgr(&dksc->sc_lock, LK_RELEASE, NULL); 487 return ret; 488 } 489 490 static int 491 cgddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 492 { 493 struct cgd_softc *cs; 494 495 DPRINTF_FOLLOW(("cgddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va, 496 (unsigned long)size)); 497 GETCGD_SOFTC(cs, dev); 498 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size); 499 } 500 501 /* 502 * XXXrcd: 503 * for now we hardcode the maximum key length. 504 */ 505 #define MAX_KEYSIZE 1024 506 507 /* ARGSUSED */ 508 static int 509 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct proc *p) 510 { 511 struct cgd_ioctl *ci = data; 512 struct vnode *vp; 513 int ret; 514 int keybytes; /* key length in bytes */ 515 char *cp; 516 char inbuf[MAX_KEYSIZE]; 517 518 cp = ci->ci_disk; 519 if ((ret = dk_lookup(cp, p, &vp)) != 0) 520 return ret; 521 522 if ((ret = cgdinit(cs, cp, vp, p)) != 0) 523 goto bail; 524 525 memset(inbuf, 0x0, sizeof(inbuf)); 526 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL); 527 if (ret) 528 goto bail; 529 cs->sc_cfuncs = cryptfuncs_find(inbuf); 530 if (!cs->sc_cfuncs) { 531 ret = EINVAL; 532 goto bail; 533 } 534 535 /* right now we only support encblkno, so hard-code it */ 536 memset(inbuf, 0x0, sizeof(inbuf)); 537 ret = copyinstr(ci->ci_ivmethod, inbuf, sizeof(inbuf), NULL); 538 if (ret) 539 goto bail; 540 if (strcmp("encblkno", inbuf)) { 541 ret = EINVAL; 542 goto bail; 543 } 544 545 keybytes = ci->ci_keylen / 8 + 1; 546 if (keybytes > MAX_KEYSIZE) { 547 ret = EINVAL; 548 goto bail; 549 } 550 memset(inbuf, 0x0, sizeof(inbuf)); 551 ret = copyin(ci->ci_key, inbuf, keybytes); 552 if (ret) 553 goto bail; 554 555 cs->sc_cdata.cf_blocksize = ci->ci_blocksize; 556 cs->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO; 557 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf, 558 &cs->sc_cdata.cf_blocksize); 559 memset(inbuf, 0x0, sizeof(inbuf)); 560 if (!cs->sc_cdata.cf_priv) { 561 printf("cgd: unable to initialize cipher\n"); 562 ret = EINVAL; /* XXX is this the right error? */ 563 goto bail; 564 } 565 566 bufq_alloc(&cs->sc_dksc.sc_bufq, BUFQ_FCFS); 567 568 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK); 569 cs->sc_data_used = 0; 570 571 cs->sc_dksc.sc_flags |= DKF_INITED; 572 573 /* Attach the disk. */ 574 disk_attach(&cs->sc_dksc.sc_dkdev); 575 576 /* Try and read the disklabel. */ 577 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? */); 578 579 return 0; 580 581 bail: 582 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p); 583 return ret; 584 } 585 586 /* ARGSUSED */ 587 static int 588 cgd_ioctl_clr(struct cgd_softc *cs, void *data, struct proc *p) 589 { 590 struct buf *bp; 591 int s; 592 593 /* Kill off any queued buffers. */ 594 s = splbio(); 595 while ((bp = BUFQ_GET(&cs->sc_dksc.sc_bufq)) != NULL) { 596 bp->b_error = EIO; 597 bp->b_flags |= B_ERROR; 598 bp->b_resid = bp->b_bcount; 599 biodone(bp); 600 } 601 splx(s); 602 bufq_free(&cs->sc_dksc.sc_bufq); 603 604 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, p->p_ucred, p); 605 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv); 606 free(cs->sc_tpath, M_DEVBUF); 607 free(cs->sc_data, M_DEVBUF); 608 cs->sc_data_used = 0; 609 cs->sc_dksc.sc_flags &= ~DKF_INITED; 610 disk_detach(&cs->sc_dksc.sc_dkdev); 611 612 return 0; 613 } 614 615 static int 616 cgdinit(struct cgd_softc *cs, char *cpath, struct vnode *vp, 617 struct proc *p) 618 { 619 struct dk_geom *pdg; 620 struct partinfo dpart; 621 struct vattr va; 622 size_t size; 623 int maxsecsize = 0; 624 int ret; 625 char tmppath[MAXPATHLEN]; 626 627 cs->sc_dksc.sc_size = 0; 628 cs->sc_tvn = vp; 629 630 memset(tmppath, 0x0, sizeof(tmppath)); 631 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen); 632 if (ret) 633 goto bail; 634 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK); 635 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen); 636 637 if ((ret = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) 638 goto bail; 639 640 cs->sc_tdev = va.va_rdev; 641 642 ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, p->p_ucred, p); 643 if (ret) 644 goto bail; 645 646 maxsecsize = 647 ((dpart.disklab->d_secsize > maxsecsize) ? 648 dpart.disklab->d_secsize : maxsecsize); 649 size = dpart.part->p_size; 650 651 if (!size) { 652 ret = ENODEV; 653 goto bail; 654 } 655 656 cs->sc_dksc.sc_size = size; 657 658 /* 659 * XXX here we should probe the underlying device. If we 660 * are accessing a partition of type RAW_PART, then 661 * we should populate our initial geometry with the 662 * geometry that we discover from the device. 663 */ 664 pdg = &cs->sc_dksc.sc_geom; 665 pdg->pdg_secsize = DEV_BSIZE; 666 pdg->pdg_ntracks = 1; 667 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize); 668 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors; 669 670 bail: 671 if (ret && cs->sc_tpath) 672 free(cs->sc_tpath, M_DEVBUF); 673 return ret; 674 } 675 676 /* 677 * Our generic cipher entry point. This takes care of the 678 * IV mode and passes off the work to the specific cipher. 679 * We implement here the IV method ``encrypted block 680 * number''. 681 * 682 * For the encryption case, we accomplish this by setting 683 * up a struct uio where the first iovec of the source is 684 * the blocknumber and the first iovec of the dest is a 685 * sink. We then call the cipher with an IV of zero, and 686 * the right thing happens. 687 * 688 * For the decryption case, we use the same basic mechanism 689 * for symmetry, but we encrypt the block number in the 690 * first iovec. 691 * 692 * We mainly do this to avoid requiring the definition of 693 * an ECB mode. 694 * 695 * XXXrcd: for now we rely on our own crypto framework defined 696 * in dev/cgd_crypto.c. This will change when we 697 * get a generic kernel crypto framework. 698 */ 699 700 static void 701 blkno2blkno_buf(char *buf, daddr_t blkno) 702 { 703 int i; 704 705 /* Set up the blkno in blkno_buf, here we do not care much 706 * about the final layout of the information as long as we 707 * can guarantee that each sector will have a different IV 708 * and that the endianness of the machine will not affect 709 * the representation that we have chosen. 710 * 711 * We choose this representation, because it does not rely 712 * on the size of buf (which is the blocksize of the cipher), 713 * but allows daddr_t to grow without breaking existing 714 * disks. 715 * 716 * Note that blkno2blkno_buf does not take a size as input, 717 * and hence must be called on a pre-zeroed buffer of length 718 * greater than or equal to sizeof(daddr_t). 719 */ 720 for (i=0; i < sizeof(daddr_t); i++) { 721 *buf++ = blkno & 0xff; 722 blkno >>= 8; 723 } 724 } 725 726 static void 727 cgd_cipher(struct cgd_softc *cs, caddr_t dst, caddr_t src, 728 size_t len, daddr_t blkno, size_t secsize, int dir) 729 { 730 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; 731 struct uio dstuio; 732 struct uio srcuio; 733 struct iovec dstiov[2]; 734 struct iovec srciov[2]; 735 int blocksize = cs->sc_cdata.cf_blocksize; 736 char sink[blocksize]; 737 char zero_iv[blocksize]; 738 char blkno_buf[blocksize]; 739 740 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); 741 742 DIAGCONDPANIC(len % blocksize != 0, 743 ("cgd_cipher: len %% blocksize != 0")); 744 745 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ 746 DIAGCONDPANIC(sizeof(daddr_t) > blocksize, 747 ("cgd_cipher: sizeof(daddr_t) > blocksize")); 748 749 memset(zero_iv, 0x0, sizeof(zero_iv)); 750 751 dstuio.uio_iov = dstiov; 752 dstuio.uio_iovcnt = 2; 753 754 srcuio.uio_iov = srciov; 755 srcuio.uio_iovcnt = 2; 756 757 dstiov[0].iov_base = sink; 758 dstiov[0].iov_len = blocksize; 759 srciov[0].iov_base = blkno_buf; 760 srciov[0].iov_len = blocksize; 761 dstiov[1].iov_len = secsize; 762 srciov[1].iov_len = secsize; 763 764 for (; len > 0; len -= secsize) { 765 dstiov[1].iov_base = dst; 766 srciov[1].iov_base = src; 767 768 memset(blkno_buf, 0x0, sizeof(blkno_buf)); 769 blkno2blkno_buf(blkno_buf, blkno); 770 if (dir == CGD_CIPHER_DECRYPT) { 771 dstuio.uio_iovcnt = 1; 772 srcuio.uio_iovcnt = 1; 773 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf", 774 blkno_buf, sizeof(blkno_buf))); 775 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, 776 zero_iv, CGD_CIPHER_ENCRYPT); 777 memcpy(blkno_buf, sink, blocksize); 778 dstuio.uio_iovcnt = 2; 779 srcuio.uio_iovcnt = 2; 780 } 781 782 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", 783 blkno_buf, sizeof(blkno_buf))); 784 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir); 785 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink", 786 sink, sizeof(sink))); 787 788 dst += secsize; 789 src += secsize; 790 blkno++; 791 } 792 } 793 794 #ifdef DEBUG 795 static void 796 hexprint(char *start, void *buf, int len) 797 { 798 char *c = buf; 799 800 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0")); 801 printf("%s: len=%06d 0x", start, len); 802 while (len--) 803 printf("%02x", (unsigned) *c++); 804 } 805 #endif 806