1 /* $NetBSD: subr_devsw.c,v 1.15 2008/02/13 18:43:16 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Overview 41 * 42 * subr_devsw.c: registers device drivers by name and by major 43 * number, and provides wrapper methods for performing I/O and 44 * other tasks on device drivers, keying on the device number 45 * (dev_t). 46 * 47 * When the system is built, the config(8) command generates 48 * static tables of device drivers built into the kernel image 49 * along with their associated methods. These are recorded in 50 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to 51 * and removed from the system dynamically. 52 * 53 * Allocation 54 * 55 * When the system initially boots only the statically allocated 56 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to 57 * allocation, we allocate a fixed block of memory to hold the new, 58 * expanded index. This "fork" of the table is only ever performed 59 * once in order to guarantee that other threads may safely access 60 * the device tables: 61 * 62 * o Once a thread has a "reference" to the table via an earlier 63 * open() call, we know that the entry in the table must exist 64 * and so it is safe to access it. 65 * 66 * o Regardless of whether other threads see the old or new 67 * pointers, they will point to a correct device switch 68 * structure for the operation being performed. 69 * 70 * XXX Currently, the wrapper methods such as cdev_read() verify 71 * that a device driver does in fact exist before calling the 72 * associated driver method. This should be changed so that 73 * once the device is has been referenced by a vnode (opened), 74 * calling the other methods should be valid until that reference 75 * is dropped. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.15 2008/02/13 18:43:16 matt Exp $"); 80 81 #include <sys/param.h> 82 #include <sys/conf.h> 83 #include <sys/kmem.h> 84 #include <sys/systm.h> 85 #include <sys/poll.h> 86 #include <sys/tty.h> 87 #include <sys/cpu.h> 88 #include <sys/buf.h> 89 90 #ifdef DEVSW_DEBUG 91 #define DPRINTF(x) printf x 92 #else /* DEVSW_DEBUG */ 93 #define DPRINTF(x) 94 #endif /* DEVSW_DEBUG */ 95 96 #define MAXDEVSW 512 /* the maximum of major device number */ 97 #define BDEVSW_SIZE (sizeof(struct bdevsw *)) 98 #define CDEVSW_SIZE (sizeof(struct cdevsw *)) 99 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv)) 100 101 extern const struct bdevsw **bdevsw, *bdevsw0[]; 102 extern const struct cdevsw **cdevsw, *cdevsw0[]; 103 extern struct devsw_conv *devsw_conv, devsw_conv0[]; 104 extern const int sys_bdevsws, sys_cdevsws; 105 extern int max_bdevsws, max_cdevsws, max_devsw_convs; 106 107 static int bdevsw_attach(const struct bdevsw *, int *); 108 static int cdevsw_attach(const struct cdevsw *, int *); 109 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *); 110 111 kmutex_t devsw_lock; 112 113 void 114 devsw_init(void) 115 { 116 117 KASSERT(sys_bdevsws < MAXDEVSW - 1); 118 KASSERT(sys_cdevsws < MAXDEVSW - 1); 119 120 mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE); 121 } 122 123 int 124 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor, 125 const struct cdevsw *cdev, int *cmajor) 126 { 127 struct devsw_conv *conv; 128 char *name; 129 int error, i; 130 131 if (devname == NULL || cdev == NULL) 132 return (EINVAL); 133 134 mutex_enter(&devsw_lock); 135 136 for (i = 0 ; i < max_devsw_convs ; i++) { 137 conv = &devsw_conv[i]; 138 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0) 139 continue; 140 141 if (*bmajor < 0) 142 *bmajor = conv->d_bmajor; 143 if (*cmajor < 0) 144 *cmajor = conv->d_cmajor; 145 146 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) { 147 error = EINVAL; 148 goto fail; 149 } 150 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) { 151 error = EINVAL; 152 goto fail; 153 } 154 155 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) || 156 cdevsw[*cmajor] != NULL) { 157 error = EEXIST; 158 goto fail; 159 } 160 161 if (bdev != NULL) 162 bdevsw[*bmajor] = bdev; 163 cdevsw[*cmajor] = cdev; 164 165 mutex_exit(&devsw_lock); 166 return (0); 167 } 168 169 error = bdevsw_attach(bdev, bmajor); 170 if (error != 0) 171 goto fail; 172 error = cdevsw_attach(cdev, cmajor); 173 if (error != 0) { 174 devsw_detach_locked(bdev, NULL); 175 goto fail; 176 } 177 178 for (i = 0 ; i < max_devsw_convs ; i++) { 179 if (devsw_conv[i].d_name == NULL) 180 break; 181 } 182 if (i == max_devsw_convs) { 183 struct devsw_conv *newptr; 184 int old, new; 185 186 old = max_devsw_convs; 187 new = old + 1; 188 189 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP); 190 if (newptr == NULL) { 191 devsw_detach_locked(bdev, cdev); 192 error = ENOMEM; 193 goto fail; 194 } 195 newptr[old].d_name = NULL; 196 newptr[old].d_bmajor = -1; 197 newptr[old].d_cmajor = -1; 198 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE); 199 if (devsw_conv != devsw_conv0) 200 kmem_free(devsw_conv, old * DEVSWCONV_SIZE); 201 devsw_conv = newptr; 202 max_devsw_convs = new; 203 } 204 205 i = strlen(devname) + 1; 206 name = kmem_alloc(i, KM_NOSLEEP); 207 if (name == NULL) { 208 devsw_detach_locked(bdev, cdev); 209 goto fail; 210 } 211 strlcpy(name, devname, i); 212 213 devsw_conv[i].d_name = name; 214 devsw_conv[i].d_bmajor = *bmajor; 215 devsw_conv[i].d_cmajor = *cmajor; 216 217 mutex_exit(&devsw_lock); 218 return (0); 219 fail: 220 mutex_exit(&devsw_lock); 221 return (error); 222 } 223 224 static int 225 bdevsw_attach(const struct bdevsw *devsw, int *devmajor) 226 { 227 const struct bdevsw **newptr; 228 int bmajor, i; 229 230 KASSERT(mutex_owned(&devsw_lock)); 231 232 if (devsw == NULL) 233 return (0); 234 235 if (*devmajor < 0) { 236 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) { 237 if (bdevsw[bmajor] != NULL) 238 continue; 239 for (i = 0 ; i < max_devsw_convs ; i++) { 240 if (devsw_conv[i].d_bmajor == bmajor) 241 break; 242 } 243 if (i != max_devsw_convs) 244 continue; 245 break; 246 } 247 *devmajor = bmajor; 248 } 249 250 if (*devmajor >= MAXDEVSW) { 251 printf("bdevsw_attach: block majors exhausted"); 252 return (ENOMEM); 253 } 254 255 if (*devmajor >= max_bdevsws) { 256 KASSERT(bdevsw == bdevsw0); 257 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP); 258 if (newptr == NULL) 259 return (ENOMEM); 260 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE); 261 bdevsw = newptr; 262 max_bdevsws = MAXDEVSW; 263 } 264 265 if (bdevsw[*devmajor] != NULL) 266 return (EEXIST); 267 268 bdevsw[*devmajor] = devsw; 269 270 return (0); 271 } 272 273 static int 274 cdevsw_attach(const struct cdevsw *devsw, int *devmajor) 275 { 276 const struct cdevsw **newptr; 277 int cmajor, i; 278 279 KASSERT(mutex_owned(&devsw_lock)); 280 281 if (*devmajor < 0) { 282 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) { 283 if (cdevsw[cmajor] != NULL) 284 continue; 285 for (i = 0 ; i < max_devsw_convs ; i++) { 286 if (devsw_conv[i].d_cmajor == cmajor) 287 break; 288 } 289 if (i != max_devsw_convs) 290 continue; 291 break; 292 } 293 *devmajor = cmajor; 294 } 295 296 if (*devmajor >= MAXDEVSW) { 297 printf("cdevsw_attach: character majors exhausted"); 298 return (ENOMEM); 299 } 300 301 if (*devmajor >= max_cdevsws) { 302 KASSERT(cdevsw == cdevsw0); 303 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP); 304 if (newptr == NULL) 305 return (ENOMEM); 306 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE); 307 cdevsw = newptr; 308 max_cdevsws = MAXDEVSW; 309 } 310 311 if (cdevsw[*devmajor] != NULL) 312 return (EEXIST); 313 314 cdevsw[*devmajor] = devsw; 315 316 return (0); 317 } 318 319 static void 320 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev) 321 { 322 int i; 323 324 KASSERT(mutex_owned(&devsw_lock)); 325 326 if (bdev != NULL) { 327 for (i = 0 ; i < max_bdevsws ; i++) { 328 if (bdevsw[i] != bdev) 329 continue; 330 bdevsw[i] = NULL; 331 break; 332 } 333 } 334 if (cdev != NULL) { 335 for (i = 0 ; i < max_cdevsws ; i++) { 336 if (cdevsw[i] != cdev) 337 continue; 338 cdevsw[i] = NULL; 339 break; 340 } 341 } 342 } 343 344 void 345 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev) 346 { 347 348 mutex_enter(&devsw_lock); 349 devsw_detach_locked(bdev, cdev); 350 mutex_exit(&devsw_lock); 351 } 352 353 /* 354 * Look up a block device by number. 355 * 356 * => Caller must ensure that the device is attached. 357 */ 358 const struct bdevsw * 359 bdevsw_lookup(dev_t dev) 360 { 361 int bmajor; 362 363 if (dev == NODEV) 364 return (NULL); 365 bmajor = major(dev); 366 if (bmajor < 0 || bmajor >= max_bdevsws) 367 return (NULL); 368 369 return (bdevsw[bmajor]); 370 } 371 372 /* 373 * Look up a character device by number. 374 * 375 * => Caller must ensure that the device is attached. 376 */ 377 const struct cdevsw * 378 cdevsw_lookup(dev_t dev) 379 { 380 int cmajor; 381 382 if (dev == NODEV) 383 return (NULL); 384 cmajor = major(dev); 385 if (cmajor < 0 || cmajor >= max_cdevsws) 386 return (NULL); 387 388 return (cdevsw[cmajor]); 389 } 390 391 /* 392 * Look up a block device by reference to its operations set. 393 * 394 * => Caller must ensure that the device is not detached, and therefore 395 * that the returned major is still valid when dereferenced. 396 */ 397 int 398 bdevsw_lookup_major(const struct bdevsw *bdev) 399 { 400 int bmajor; 401 402 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) { 403 if (bdevsw[bmajor] == bdev) 404 return (bmajor); 405 } 406 407 return (-1); 408 } 409 410 /* 411 * Look up a character device by reference to its operations set. 412 * 413 * => Caller must ensure that the device is not detached, and therefore 414 * that the returned major is still valid when dereferenced. 415 */ 416 int 417 cdevsw_lookup_major(const struct cdevsw *cdev) 418 { 419 int cmajor; 420 421 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) { 422 if (cdevsw[cmajor] == cdev) 423 return (cmajor); 424 } 425 426 return (-1); 427 } 428 429 /* 430 * Convert from block major number to name. 431 * 432 * => Caller must ensure that the device is not detached, and therefore 433 * that the name pointer is still valid when dereferenced. 434 */ 435 const char * 436 devsw_blk2name(int bmajor) 437 { 438 const char *name; 439 int cmajor, i; 440 441 name = NULL; 442 cmajor = -1; 443 444 mutex_enter(&devsw_lock); 445 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 446 mutex_exit(&devsw_lock); 447 return (NULL); 448 } 449 for (i = 0 ; i < max_devsw_convs; i++) { 450 if (devsw_conv[i].d_bmajor == bmajor) { 451 cmajor = devsw_conv[i].d_cmajor; 452 break; 453 } 454 } 455 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 456 name = devsw_conv[i].d_name; 457 mutex_exit(&devsw_lock); 458 459 return (name); 460 } 461 462 /* 463 * Convert from device name to block major number. 464 * 465 * => Caller must ensure that the device is not detached, and therefore 466 * that the major number is still valid when dereferenced. 467 */ 468 int 469 devsw_name2blk(const char *name, char *devname, size_t devnamelen) 470 { 471 struct devsw_conv *conv; 472 int bmajor, i; 473 474 if (name == NULL) 475 return (-1); 476 477 mutex_enter(&devsw_lock); 478 for (i = 0 ; i < max_devsw_convs ; i++) { 479 size_t len; 480 481 conv = &devsw_conv[i]; 482 if (conv->d_name == NULL) 483 continue; 484 len = strlen(conv->d_name); 485 if (strncmp(conv->d_name, name, len) != 0) 486 continue; 487 if (*(name +len) && !isdigit(*(name + len))) 488 continue; 489 bmajor = conv->d_bmajor; 490 if (bmajor < 0 || bmajor >= max_bdevsws || 491 bdevsw[bmajor] == NULL) 492 break; 493 if (devname != NULL) { 494 #ifdef DEVSW_DEBUG 495 if (strlen(conv->d_name) >= devnamelen) 496 printf("devsw_name2blk: too short buffer"); 497 #endif /* DEVSW_DEBUG */ 498 strncpy(devname, conv->d_name, devnamelen); 499 devname[devnamelen - 1] = '\0'; 500 } 501 mutex_exit(&devsw_lock); 502 return (bmajor); 503 } 504 505 mutex_exit(&devsw_lock); 506 return (-1); 507 } 508 509 /* 510 * Convert from character dev_t to block dev_t. 511 * 512 * => Caller must ensure that the device is not detached, and therefore 513 * that the major number is still valid when dereferenced. 514 */ 515 dev_t 516 devsw_chr2blk(dev_t cdev) 517 { 518 int bmajor, cmajor, i; 519 dev_t rv; 520 521 cmajor = major(cdev); 522 bmajor = -1; 523 rv = NODEV; 524 525 mutex_enter(&devsw_lock); 526 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) { 527 mutex_exit(&devsw_lock); 528 return (NODEV); 529 } 530 for (i = 0 ; i < max_devsw_convs ; i++) { 531 if (devsw_conv[i].d_cmajor == cmajor) { 532 bmajor = devsw_conv[i].d_bmajor; 533 break; 534 } 535 } 536 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL) 537 rv = makedev(bmajor, minor(cdev)); 538 mutex_exit(&devsw_lock); 539 540 return (rv); 541 } 542 543 /* 544 * Convert from block dev_t to character dev_t. 545 * 546 * => Caller must ensure that the device is not detached, and therefore 547 * that the major number is still valid when dereferenced. 548 */ 549 dev_t 550 devsw_blk2chr(dev_t bdev) 551 { 552 int bmajor, cmajor, i; 553 dev_t rv; 554 555 bmajor = major(bdev); 556 cmajor = -1; 557 rv = NODEV; 558 559 mutex_enter(&devsw_lock); 560 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 561 mutex_exit(&devsw_lock); 562 return (NODEV); 563 } 564 for (i = 0 ; i < max_devsw_convs ; i++) { 565 if (devsw_conv[i].d_bmajor == bmajor) { 566 cmajor = devsw_conv[i].d_cmajor; 567 break; 568 } 569 } 570 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 571 rv = makedev(cmajor, minor(bdev)); 572 mutex_exit(&devsw_lock); 573 574 return (rv); 575 } 576 577 /* 578 * Device access methods. 579 */ 580 581 #define DEV_LOCK(d) \ 582 if ((d->d_flag & D_MPSAFE) == 0) { \ 583 KERNEL_LOCK(1, curlwp); \ 584 } 585 586 #define DEV_UNLOCK(d) \ 587 if ((d->d_flag & D_MPSAFE) == 0) { \ 588 KERNEL_UNLOCK_ONE(curlwp); \ 589 } 590 591 int 592 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 593 { 594 const struct bdevsw *d; 595 int rv; 596 597 /* 598 * For open we need to lock, in order to synchronize 599 * with attach/detach. 600 */ 601 mutex_enter(&devsw_lock); 602 d = bdevsw_lookup(dev); 603 mutex_exit(&devsw_lock); 604 if (d == NULL) 605 return ENXIO; 606 607 DEV_LOCK(d); 608 rv = (*d->d_open)(dev, flag, devtype, l); 609 DEV_UNLOCK(d); 610 611 return rv; 612 } 613 614 int 615 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 616 { 617 const struct bdevsw *d; 618 int rv; 619 620 if ((d = bdevsw_lookup(dev)) == NULL) 621 return ENXIO; 622 623 DEV_LOCK(d); 624 rv = (*d->d_close)(dev, flag, devtype, l); 625 DEV_UNLOCK(d); 626 627 return rv; 628 } 629 630 void 631 bdev_strategy(struct buf *bp) 632 { 633 const struct bdevsw *d; 634 635 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) 636 panic("bdev_strategy"); 637 638 DEV_LOCK(d); 639 (*d->d_strategy)(bp); 640 DEV_UNLOCK(d); 641 } 642 643 int 644 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 645 { 646 const struct bdevsw *d; 647 int rv; 648 649 if ((d = bdevsw_lookup(dev)) == NULL) 650 return ENXIO; 651 652 DEV_LOCK(d); 653 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 654 DEV_UNLOCK(d); 655 656 return rv; 657 } 658 659 int 660 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz) 661 { 662 const struct bdevsw *d; 663 int rv; 664 665 /* 666 * Dump can be called without the device open. Since it can 667 * currently only be called with the system paused (and in a 668 * potentially unstable state), we don't perform any locking. 669 */ 670 if ((d = bdevsw_lookup(dev)) == NULL) 671 return ENXIO; 672 673 /* DEV_LOCK(d); */ 674 rv = (*d->d_dump)(dev, addr, data, sz); 675 /* DEV_UNLOCK(d); */ 676 677 return rv; 678 } 679 680 int 681 bdev_type(dev_t dev) 682 { 683 const struct bdevsw *d; 684 685 if ((d = bdevsw_lookup(dev)) == NULL) 686 return D_OTHER; 687 return d->d_flag & D_TYPEMASK; 688 } 689 690 int 691 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 692 { 693 const struct cdevsw *d; 694 int rv; 695 696 /* 697 * For open we need to lock, in order to synchronize 698 * with attach/detach. 699 */ 700 mutex_enter(&devsw_lock); 701 d = cdevsw_lookup(dev); 702 mutex_exit(&devsw_lock); 703 if (d == NULL) 704 return ENXIO; 705 706 DEV_LOCK(d); 707 rv = (*d->d_open)(dev, flag, devtype, l); 708 DEV_UNLOCK(d); 709 710 return rv; 711 } 712 713 int 714 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 715 { 716 const struct cdevsw *d; 717 int rv; 718 719 if ((d = cdevsw_lookup(dev)) == NULL) 720 return ENXIO; 721 722 DEV_LOCK(d); 723 rv = (*d->d_close)(dev, flag, devtype, l); 724 DEV_UNLOCK(d); 725 726 return rv; 727 } 728 729 int 730 cdev_read(dev_t dev, struct uio *uio, int flag) 731 { 732 const struct cdevsw *d; 733 int rv; 734 735 if ((d = cdevsw_lookup(dev)) == NULL) 736 return ENXIO; 737 738 DEV_LOCK(d); 739 rv = (*d->d_read)(dev, uio, flag); 740 DEV_UNLOCK(d); 741 742 return rv; 743 } 744 745 int 746 cdev_write(dev_t dev, struct uio *uio, int flag) 747 { 748 const struct cdevsw *d; 749 int rv; 750 751 if ((d = cdevsw_lookup(dev)) == NULL) 752 return ENXIO; 753 754 DEV_LOCK(d); 755 rv = (*d->d_write)(dev, uio, flag); 756 DEV_UNLOCK(d); 757 758 return rv; 759 } 760 761 int 762 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 763 { 764 const struct cdevsw *d; 765 int rv; 766 767 if ((d = cdevsw_lookup(dev)) == NULL) 768 return ENXIO; 769 770 DEV_LOCK(d); 771 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 772 DEV_UNLOCK(d); 773 774 return rv; 775 } 776 777 void 778 cdev_stop(struct tty *tp, int flag) 779 { 780 const struct cdevsw *d; 781 782 if ((d = cdevsw_lookup(tp->t_dev)) == NULL) 783 return; 784 785 DEV_LOCK(d); 786 (*d->d_stop)(tp, flag); 787 DEV_UNLOCK(d); 788 } 789 790 struct tty * 791 cdev_tty(dev_t dev) 792 { 793 const struct cdevsw *d; 794 struct tty * rv; 795 796 if ((d = cdevsw_lookup(dev)) == NULL) 797 return NULL; 798 799 /* XXX Check if necessary. */ 800 if (d->d_tty == NULL) 801 return NULL; 802 803 DEV_LOCK(d); 804 rv = (*d->d_tty)(dev); 805 DEV_UNLOCK(d); 806 807 return rv; 808 } 809 810 int 811 cdev_poll(dev_t dev, int flag, lwp_t *l) 812 { 813 const struct cdevsw *d; 814 int rv; 815 816 if ((d = cdevsw_lookup(dev)) == NULL) 817 return POLLERR; 818 819 DEV_LOCK(d); 820 rv = (*d->d_poll)(dev, flag, l); 821 DEV_UNLOCK(d); 822 823 return rv; 824 } 825 826 paddr_t 827 cdev_mmap(dev_t dev, off_t off, int flag) 828 { 829 const struct cdevsw *d; 830 paddr_t rv; 831 832 if ((d = cdevsw_lookup(dev)) == NULL) 833 return (paddr_t)-1LL; 834 835 DEV_LOCK(d); 836 rv = (*d->d_mmap)(dev, off, flag); 837 DEV_UNLOCK(d); 838 839 return rv; 840 } 841 842 int 843 cdev_kqfilter(dev_t dev, struct knote *kn) 844 { 845 const struct cdevsw *d; 846 int rv; 847 848 if ((d = cdevsw_lookup(dev)) == NULL) 849 return ENXIO; 850 851 DEV_LOCK(d); 852 rv = (*d->d_kqfilter)(dev, kn); 853 DEV_UNLOCK(d); 854 855 return rv; 856 } 857 858 int 859 cdev_type(dev_t dev) 860 { 861 const struct cdevsw *d; 862 863 if ((d = cdevsw_lookup(dev)) == NULL) 864 return D_OTHER; 865 return d->d_flag & D_TYPEMASK; 866 } 867