1 /* $NetBSD: subr_devsw.c,v 1.45 2022/03/28 12:41:17 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Overview 34 * 35 * subr_devsw.c: registers device drivers by name and by major 36 * number, and provides wrapper methods for performing I/O and 37 * other tasks on device drivers, keying on the device number 38 * (dev_t). 39 * 40 * When the system is built, the config(8) command generates 41 * static tables of device drivers built into the kernel image 42 * along with their associated methods. These are recorded in 43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to 44 * and removed from the system dynamically. 45 * 46 * Allocation 47 * 48 * When the system initially boots only the statically allocated 49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to 50 * allocation, we allocate a fixed block of memory to hold the new, 51 * expanded index. This "fork" of the table is only ever performed 52 * once in order to guarantee that other threads may safely access 53 * the device tables: 54 * 55 * o Once a thread has a "reference" to the table via an earlier 56 * open() call, we know that the entry in the table must exist 57 * and so it is safe to access it. 58 * 59 * o Regardless of whether other threads see the old or new 60 * pointers, they will point to a correct device switch 61 * structure for the operation being performed. 62 * 63 * XXX Currently, the wrapper methods such as cdev_read() verify 64 * that a device driver does in fact exist before calling the 65 * associated driver method. This should be changed so that 66 * once the device is has been referenced by a vnode (opened), 67 * calling the other methods should be valid until that reference 68 * is dropped. 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.45 2022/03/28 12:41:17 riastradh Exp $"); 73 74 #ifdef _KERNEL_OPT 75 #include "opt_dtrace.h" 76 #endif 77 78 #include <sys/param.h> 79 #include <sys/conf.h> 80 #include <sys/kmem.h> 81 #include <sys/systm.h> 82 #include <sys/poll.h> 83 #include <sys/tty.h> 84 #include <sys/cpu.h> 85 #include <sys/buf.h> 86 #include <sys/reboot.h> 87 #include <sys/sdt.h> 88 #include <sys/atomic.h> 89 #include <sys/localcount.h> 90 #include <sys/pserialize.h> 91 #include <sys/xcall.h> 92 #include <sys/device.h> 93 94 #ifdef DEVSW_DEBUG 95 #define DPRINTF(x) printf x 96 #else /* DEVSW_DEBUG */ 97 #define DPRINTF(x) 98 #endif /* DEVSW_DEBUG */ 99 100 #define MAXDEVSW 512 /* the maximum of major device number */ 101 #define BDEVSW_SIZE (sizeof(struct bdevsw *)) 102 #define CDEVSW_SIZE (sizeof(struct cdevsw *)) 103 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv)) 104 105 struct devswref { 106 struct localcount *dr_lc; 107 }; 108 109 /* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */ 110 extern const struct bdevsw **bdevsw, *bdevsw0[]; 111 extern const struct cdevsw **cdevsw, *cdevsw0[]; 112 extern struct devsw_conv *devsw_conv, devsw_conv0[]; 113 extern const int sys_bdevsws, sys_cdevsws; 114 extern int max_bdevsws, max_cdevsws, max_devsw_convs; 115 116 static struct devswref *cdevswref; 117 static struct devswref *bdevswref; 118 static kcondvar_t devsw_cv; 119 120 static int bdevsw_attach(const struct bdevsw *, devmajor_t *); 121 static int cdevsw_attach(const struct cdevsw *, devmajor_t *); 122 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *); 123 124 kmutex_t device_lock; 125 126 void (*biodone_vfs)(buf_t *) = (void *)nullop; 127 128 void 129 devsw_init(void) 130 { 131 132 KASSERT(sys_bdevsws < MAXDEVSW - 1); 133 KASSERT(sys_cdevsws < MAXDEVSW - 1); 134 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE); 135 136 cv_init(&devsw_cv, "devsw"); 137 } 138 139 int 140 devsw_attach(const char *devname, 141 const struct bdevsw *bdev, devmajor_t *bmajor, 142 const struct cdevsw *cdev, devmajor_t *cmajor) 143 { 144 struct devsw_conv *conv; 145 char *name; 146 int error, i; 147 148 if (devname == NULL || cdev == NULL) 149 return EINVAL; 150 151 mutex_enter(&device_lock); 152 153 for (i = 0; i < max_devsw_convs; i++) { 154 conv = &devsw_conv[i]; 155 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0) 156 continue; 157 158 if (*bmajor < 0) 159 *bmajor = conv->d_bmajor; 160 if (*cmajor < 0) 161 *cmajor = conv->d_cmajor; 162 163 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) { 164 error = EINVAL; 165 goto out; 166 } 167 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) { 168 error = EINVAL; 169 goto out; 170 } 171 172 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) || 173 cdevsw[*cmajor] != NULL) { 174 error = EEXIST; 175 goto out; 176 } 177 break; 178 } 179 180 /* 181 * XXX This should allocate what it needs up front so we never 182 * need to flail around trying to unwind. 183 */ 184 error = bdevsw_attach(bdev, bmajor); 185 if (error != 0) 186 goto out; 187 error = cdevsw_attach(cdev, cmajor); 188 if (error != 0) { 189 devsw_detach_locked(bdev, NULL); 190 goto out; 191 } 192 193 /* 194 * If we already found a conv, we're done. Otherwise, find an 195 * empty slot or extend the table. 196 */ 197 if (i == max_devsw_convs) 198 goto out; 199 200 for (i = 0; i < max_devsw_convs; i++) { 201 if (devsw_conv[i].d_name == NULL) 202 break; 203 } 204 if (i == max_devsw_convs) { 205 struct devsw_conv *newptr; 206 int old_convs, new_convs; 207 208 old_convs = max_devsw_convs; 209 new_convs = old_convs + 1; 210 211 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP); 212 if (newptr == NULL) { 213 devsw_detach_locked(bdev, cdev); 214 error = ENOMEM; 215 goto out; 216 } 217 newptr[old_convs].d_name = NULL; 218 newptr[old_convs].d_bmajor = -1; 219 newptr[old_convs].d_cmajor = -1; 220 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE); 221 if (devsw_conv != devsw_conv0) 222 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE); 223 devsw_conv = newptr; 224 max_devsw_convs = new_convs; 225 } 226 227 name = kmem_strdupsize(devname, NULL, KM_NOSLEEP); 228 if (name == NULL) { 229 devsw_detach_locked(bdev, cdev); 230 error = ENOMEM; 231 goto out; 232 } 233 234 devsw_conv[i].d_name = name; 235 devsw_conv[i].d_bmajor = *bmajor; 236 devsw_conv[i].d_cmajor = *cmajor; 237 error = 0; 238 out: 239 mutex_exit(&device_lock); 240 return error; 241 } 242 243 static int 244 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor) 245 { 246 const struct bdevsw **newbdevsw = NULL; 247 struct devswref *newbdevswref = NULL; 248 struct localcount *lc; 249 devmajor_t bmajor; 250 int i; 251 252 KASSERT(mutex_owned(&device_lock)); 253 254 if (devsw == NULL) 255 return 0; 256 257 if (*devmajor < 0) { 258 for (bmajor = sys_bdevsws; bmajor < max_bdevsws; bmajor++) { 259 if (bdevsw[bmajor] != NULL) 260 continue; 261 for (i = 0; i < max_devsw_convs; i++) { 262 if (devsw_conv[i].d_bmajor == bmajor) 263 break; 264 } 265 if (i != max_devsw_convs) 266 continue; 267 break; 268 } 269 *devmajor = bmajor; 270 } 271 272 if (*devmajor >= MAXDEVSW) { 273 printf("%s: block majors exhausted\n", __func__); 274 return ENOMEM; 275 } 276 277 if (bdevswref == NULL) { 278 newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]), 279 KM_NOSLEEP); 280 if (newbdevswref == NULL) 281 return ENOMEM; 282 atomic_store_release(&bdevswref, newbdevswref); 283 } 284 285 if (*devmajor >= max_bdevsws) { 286 KASSERT(bdevsw == bdevsw0); 287 newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]), 288 KM_NOSLEEP); 289 if (newbdevsw == NULL) 290 return ENOMEM; 291 memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0])); 292 atomic_store_release(&bdevsw, newbdevsw); 293 atomic_store_release(&max_bdevsws, MAXDEVSW); 294 } 295 296 if (bdevsw[*devmajor] != NULL) 297 return EEXIST; 298 299 KASSERT(bdevswref[*devmajor].dr_lc == NULL); 300 lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); 301 localcount_init(lc); 302 bdevswref[*devmajor].dr_lc = lc; 303 304 atomic_store_release(&bdevsw[*devmajor], devsw); 305 306 return 0; 307 } 308 309 static int 310 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor) 311 { 312 const struct cdevsw **newcdevsw = NULL; 313 struct devswref *newcdevswref = NULL; 314 struct localcount *lc; 315 devmajor_t cmajor; 316 int i; 317 318 KASSERT(mutex_owned(&device_lock)); 319 320 if (*devmajor < 0) { 321 for (cmajor = sys_cdevsws; cmajor < max_cdevsws; cmajor++) { 322 if (cdevsw[cmajor] != NULL) 323 continue; 324 for (i = 0; i < max_devsw_convs; i++) { 325 if (devsw_conv[i].d_cmajor == cmajor) 326 break; 327 } 328 if (i != max_devsw_convs) 329 continue; 330 break; 331 } 332 *devmajor = cmajor; 333 } 334 335 if (*devmajor >= MAXDEVSW) { 336 printf("%s: character majors exhausted\n", __func__); 337 return ENOMEM; 338 } 339 340 if (cdevswref == NULL) { 341 newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]), 342 KM_NOSLEEP); 343 if (newcdevswref == NULL) 344 return ENOMEM; 345 atomic_store_release(&cdevswref, newcdevswref); 346 } 347 348 if (*devmajor >= max_cdevsws) { 349 KASSERT(cdevsw == cdevsw0); 350 newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]), 351 KM_NOSLEEP); 352 if (newcdevsw == NULL) 353 return ENOMEM; 354 memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0])); 355 atomic_store_release(&cdevsw, newcdevsw); 356 atomic_store_release(&max_cdevsws, MAXDEVSW); 357 } 358 359 if (cdevsw[*devmajor] != NULL) 360 return EEXIST; 361 362 KASSERT(cdevswref[*devmajor].dr_lc == NULL); 363 lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); 364 localcount_init(lc); 365 cdevswref[*devmajor].dr_lc = lc; 366 367 atomic_store_release(&cdevsw[*devmajor], devsw); 368 369 return 0; 370 } 371 372 static void 373 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev) 374 { 375 int bi, ci = -1/*XXXGCC*/; 376 377 KASSERT(mutex_owned(&device_lock)); 378 379 /* Prevent new references. */ 380 if (bdev != NULL) { 381 for (bi = 0; bi < max_bdevsws; bi++) { 382 if (bdevsw[bi] != bdev) 383 continue; 384 atomic_store_relaxed(&bdevsw[bi], NULL); 385 break; 386 } 387 KASSERT(bi < max_bdevsws); 388 } 389 if (cdev != NULL) { 390 for (ci = 0; ci < max_cdevsws; ci++) { 391 if (cdevsw[ci] != cdev) 392 continue; 393 atomic_store_relaxed(&cdevsw[ci], NULL); 394 break; 395 } 396 KASSERT(ci < max_cdevsws); 397 } 398 399 if (bdev == NULL && cdev == NULL) /* XXX possible? */ 400 return; 401 402 /* 403 * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire 404 * calls to notice that the devsw is gone. 405 * 406 * XXX Despite the use of the pserialize_read_enter/exit API 407 * elsewhere in this file, we use xc_barrier here instead of 408 * pserialize_perform -- because devsw_init is too early for 409 * pserialize_create. Either pserialize_create should be made 410 * to work earlier, or it should be nixed altogether. Until 411 * that is fixed, xc_barrier will serve the same purpose. 412 */ 413 xc_barrier(0); 414 415 /* 416 * Wait for all references to drain. It is the caller's 417 * responsibility to ensure that at this point, there are no 418 * extant open instances and all new d_open calls will fail. 419 * 420 * Note that localcount_drain may release and reacquire 421 * device_lock. 422 */ 423 if (bdev != NULL) { 424 localcount_drain(bdevswref[bi].dr_lc, 425 &devsw_cv, &device_lock); 426 localcount_fini(bdevswref[bi].dr_lc); 427 kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc)); 428 bdevswref[bi].dr_lc = NULL; 429 } 430 if (cdev != NULL) { 431 localcount_drain(cdevswref[ci].dr_lc, 432 &devsw_cv, &device_lock); 433 localcount_fini(cdevswref[ci].dr_lc); 434 kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc)); 435 cdevswref[ci].dr_lc = NULL; 436 } 437 } 438 439 void 440 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev) 441 { 442 443 mutex_enter(&device_lock); 444 devsw_detach_locked(bdev, cdev); 445 mutex_exit(&device_lock); 446 } 447 448 /* 449 * Look up a block device by number. 450 * 451 * => Caller must ensure that the device is attached. 452 */ 453 const struct bdevsw * 454 bdevsw_lookup(dev_t dev) 455 { 456 devmajor_t bmajor; 457 458 if (dev == NODEV) 459 return NULL; 460 bmajor = major(dev); 461 if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws)) 462 return NULL; 463 464 return atomic_load_consume(&bdevsw)[bmajor]; 465 } 466 467 static const struct bdevsw * 468 bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) 469 { 470 devmajor_t bmajor; 471 const struct bdevsw *bdev = NULL, *const *curbdevsw; 472 struct devswref *curbdevswref; 473 int s; 474 475 if (dev == NODEV) 476 return NULL; 477 bmajor = major(dev); 478 if (bmajor < 0) 479 return NULL; 480 481 s = pserialize_read_enter(); 482 483 /* 484 * max_bdevsws never goes down, so it is safe to rely on this 485 * condition without any locking for the array access below. 486 * Test sys_bdevsws first so we can avoid the memory barrier in 487 * that case. 488 */ 489 if (bmajor >= sys_bdevsws && 490 bmajor >= atomic_load_acquire(&max_bdevsws)) 491 goto out; 492 curbdevsw = atomic_load_consume(&bdevsw); 493 if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL) 494 goto out; 495 496 curbdevswref = atomic_load_consume(&bdevswref); 497 if (curbdevswref == NULL) { 498 *lcp = NULL; 499 } else if ((*lcp = curbdevswref[bmajor].dr_lc) != NULL) { 500 localcount_acquire(*lcp); 501 } 502 out: 503 pserialize_read_exit(s); 504 return bdev; 505 } 506 507 static void 508 bdevsw_release(const struct bdevsw *bdev, struct localcount *lc) 509 { 510 511 if (lc == NULL) 512 return; 513 localcount_release(lc, &devsw_cv, &device_lock); 514 } 515 516 /* 517 * Look up a character device by number. 518 * 519 * => Caller must ensure that the device is attached. 520 */ 521 const struct cdevsw * 522 cdevsw_lookup(dev_t dev) 523 { 524 devmajor_t cmajor; 525 526 if (dev == NODEV) 527 return NULL; 528 cmajor = major(dev); 529 if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws)) 530 return NULL; 531 532 return atomic_load_consume(&cdevsw)[cmajor]; 533 } 534 535 static const struct cdevsw * 536 cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) 537 { 538 devmajor_t cmajor; 539 const struct cdevsw *cdev = NULL, *const *curcdevsw; 540 struct devswref *curcdevswref; 541 int s; 542 543 if (dev == NODEV) 544 return NULL; 545 cmajor = major(dev); 546 if (cmajor < 0) 547 return NULL; 548 549 s = pserialize_read_enter(); 550 551 /* 552 * max_cdevsws never goes down, so it is safe to rely on this 553 * condition without any locking for the array access below. 554 * Test sys_cdevsws first so we can avoid the memory barrier in 555 * that case. 556 */ 557 if (cmajor >= sys_cdevsws && 558 cmajor >= atomic_load_acquire(&max_cdevsws)) 559 goto out; 560 curcdevsw = atomic_load_consume(&cdevsw); 561 if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL) 562 goto out; 563 564 curcdevswref = atomic_load_consume(&cdevswref); 565 if (curcdevswref == NULL) { 566 *lcp = NULL; 567 } else if ((*lcp = curcdevswref[cmajor].dr_lc) != NULL) { 568 localcount_acquire(*lcp); 569 } 570 out: 571 pserialize_read_exit(s); 572 return cdev; 573 } 574 575 static void 576 cdevsw_release(const struct cdevsw *cdev, struct localcount *lc) 577 { 578 579 if (lc == NULL) 580 return; 581 localcount_release(lc, &devsw_cv, &device_lock); 582 } 583 584 /* 585 * Look up a block device by reference to its operations set. 586 * 587 * => Caller must ensure that the device is not detached, and therefore 588 * that the returned major is still valid when dereferenced. 589 */ 590 devmajor_t 591 bdevsw_lookup_major(const struct bdevsw *bdev) 592 { 593 const struct bdevsw *const *curbdevsw; 594 devmajor_t bmajor, bmax; 595 596 bmax = atomic_load_acquire(&max_bdevsws); 597 curbdevsw = atomic_load_consume(&bdevsw); 598 for (bmajor = 0; bmajor < bmax; bmajor++) { 599 if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev) 600 return bmajor; 601 } 602 603 return NODEVMAJOR; 604 } 605 606 /* 607 * Look up a character device by reference to its operations set. 608 * 609 * => Caller must ensure that the device is not detached, and therefore 610 * that the returned major is still valid when dereferenced. 611 */ 612 devmajor_t 613 cdevsw_lookup_major(const struct cdevsw *cdev) 614 { 615 const struct cdevsw *const *curcdevsw; 616 devmajor_t cmajor, cmax; 617 618 cmax = atomic_load_acquire(&max_cdevsws); 619 curcdevsw = atomic_load_consume(&cdevsw); 620 for (cmajor = 0; cmajor < cmax; cmajor++) { 621 if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev) 622 return cmajor; 623 } 624 625 return NODEVMAJOR; 626 } 627 628 /* 629 * Convert from block major number to name. 630 * 631 * => Caller must ensure that the device is not detached, and therefore 632 * that the name pointer is still valid when dereferenced. 633 */ 634 const char * 635 devsw_blk2name(devmajor_t bmajor) 636 { 637 const char *name; 638 devmajor_t cmajor; 639 int i; 640 641 name = NULL; 642 cmajor = -1; 643 644 mutex_enter(&device_lock); 645 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 646 mutex_exit(&device_lock); 647 return NULL; 648 } 649 for (i = 0; i < max_devsw_convs; i++) { 650 if (devsw_conv[i].d_bmajor == bmajor) { 651 cmajor = devsw_conv[i].d_cmajor; 652 break; 653 } 654 } 655 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 656 name = devsw_conv[i].d_name; 657 mutex_exit(&device_lock); 658 659 return name; 660 } 661 662 /* 663 * Convert char major number to device driver name. 664 */ 665 const char * 666 cdevsw_getname(devmajor_t major) 667 { 668 const char *name; 669 int i; 670 671 name = NULL; 672 673 if (major < 0) 674 return NULL; 675 676 mutex_enter(&device_lock); 677 for (i = 0; i < max_devsw_convs; i++) { 678 if (devsw_conv[i].d_cmajor == major) { 679 name = devsw_conv[i].d_name; 680 break; 681 } 682 } 683 mutex_exit(&device_lock); 684 return name; 685 } 686 687 /* 688 * Convert block major number to device driver name. 689 */ 690 const char * 691 bdevsw_getname(devmajor_t major) 692 { 693 const char *name; 694 int i; 695 696 name = NULL; 697 698 if (major < 0) 699 return NULL; 700 701 mutex_enter(&device_lock); 702 for (i = 0; i < max_devsw_convs; i++) { 703 if (devsw_conv[i].d_bmajor == major) { 704 name = devsw_conv[i].d_name; 705 break; 706 } 707 } 708 mutex_exit(&device_lock); 709 return name; 710 } 711 712 /* 713 * Convert from device name to block major number. 714 * 715 * => Caller must ensure that the device is not detached, and therefore 716 * that the major number is still valid when dereferenced. 717 */ 718 devmajor_t 719 devsw_name2blk(const char *name, char *devname, size_t devnamelen) 720 { 721 struct devsw_conv *conv; 722 devmajor_t bmajor; 723 int i; 724 725 if (name == NULL) 726 return NODEVMAJOR; 727 728 mutex_enter(&device_lock); 729 for (i = 0; i < max_devsw_convs; i++) { 730 size_t len; 731 732 conv = &devsw_conv[i]; 733 if (conv->d_name == NULL) 734 continue; 735 len = strlen(conv->d_name); 736 if (strncmp(conv->d_name, name, len) != 0) 737 continue; 738 if (name[len] != '\0' && !isdigit((unsigned char)name[len])) 739 continue; 740 bmajor = conv->d_bmajor; 741 if (bmajor < 0 || bmajor >= max_bdevsws || 742 bdevsw[bmajor] == NULL) 743 break; 744 if (devname != NULL) { 745 #ifdef DEVSW_DEBUG 746 if (strlen(conv->d_name) >= devnamelen) 747 printf("%s: too short buffer\n", __func__); 748 #endif /* DEVSW_DEBUG */ 749 strncpy(devname, conv->d_name, devnamelen); 750 devname[devnamelen - 1] = '\0'; 751 } 752 mutex_exit(&device_lock); 753 return bmajor; 754 } 755 756 mutex_exit(&device_lock); 757 return NODEVMAJOR; 758 } 759 760 /* 761 * Convert from device name to char major number. 762 * 763 * => Caller must ensure that the device is not detached, and therefore 764 * that the major number is still valid when dereferenced. 765 */ 766 devmajor_t 767 devsw_name2chr(const char *name, char *devname, size_t devnamelen) 768 { 769 struct devsw_conv *conv; 770 devmajor_t cmajor; 771 int i; 772 773 if (name == NULL) 774 return NODEVMAJOR; 775 776 mutex_enter(&device_lock); 777 for (i = 0; i < max_devsw_convs; i++) { 778 size_t len; 779 780 conv = &devsw_conv[i]; 781 if (conv->d_name == NULL) 782 continue; 783 len = strlen(conv->d_name); 784 if (strncmp(conv->d_name, name, len) != 0) 785 continue; 786 if (name[len] != '\0' && !isdigit((unsigned char)name[len])) 787 continue; 788 cmajor = conv->d_cmajor; 789 if (cmajor < 0 || cmajor >= max_cdevsws || 790 cdevsw[cmajor] == NULL) 791 break; 792 if (devname != NULL) { 793 #ifdef DEVSW_DEBUG 794 if (strlen(conv->d_name) >= devnamelen) 795 printf("%s: too short buffer", __func__); 796 #endif /* DEVSW_DEBUG */ 797 strncpy(devname, conv->d_name, devnamelen); 798 devname[devnamelen - 1] = '\0'; 799 } 800 mutex_exit(&device_lock); 801 return cmajor; 802 } 803 804 mutex_exit(&device_lock); 805 return NODEVMAJOR; 806 } 807 808 /* 809 * Convert from character dev_t to block dev_t. 810 * 811 * => Caller must ensure that the device is not detached, and therefore 812 * that the major number is still valid when dereferenced. 813 */ 814 dev_t 815 devsw_chr2blk(dev_t cdev) 816 { 817 devmajor_t bmajor, cmajor; 818 int i; 819 dev_t rv; 820 821 cmajor = major(cdev); 822 bmajor = NODEVMAJOR; 823 rv = NODEV; 824 825 mutex_enter(&device_lock); 826 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) { 827 mutex_exit(&device_lock); 828 return NODEV; 829 } 830 for (i = 0; i < max_devsw_convs; i++) { 831 if (devsw_conv[i].d_cmajor == cmajor) { 832 bmajor = devsw_conv[i].d_bmajor; 833 break; 834 } 835 } 836 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL) 837 rv = makedev(bmajor, minor(cdev)); 838 mutex_exit(&device_lock); 839 840 return rv; 841 } 842 843 /* 844 * Convert from block dev_t to character dev_t. 845 * 846 * => Caller must ensure that the device is not detached, and therefore 847 * that the major number is still valid when dereferenced. 848 */ 849 dev_t 850 devsw_blk2chr(dev_t bdev) 851 { 852 devmajor_t bmajor, cmajor; 853 int i; 854 dev_t rv; 855 856 bmajor = major(bdev); 857 cmajor = NODEVMAJOR; 858 rv = NODEV; 859 860 mutex_enter(&device_lock); 861 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 862 mutex_exit(&device_lock); 863 return NODEV; 864 } 865 for (i = 0; i < max_devsw_convs; i++) { 866 if (devsw_conv[i].d_bmajor == bmajor) { 867 cmajor = devsw_conv[i].d_cmajor; 868 break; 869 } 870 } 871 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 872 rv = makedev(cmajor, minor(bdev)); 873 mutex_exit(&device_lock); 874 875 return rv; 876 } 877 878 /* 879 * Device access methods. 880 */ 881 882 #define DEV_LOCK(d) \ 883 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \ 884 KERNEL_LOCK(1, NULL); \ 885 } 886 887 #define DEV_UNLOCK(d) \ 888 if (mpflag == 0) { \ 889 KERNEL_UNLOCK_ONE(NULL); \ 890 } 891 892 int 893 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 894 { 895 const struct bdevsw *d; 896 struct localcount *lc; 897 device_t dv = NULL/*XXXGCC*/; 898 int unit, rv, mpflag; 899 900 d = bdevsw_lookup_acquire(dev, &lc); 901 if (d == NULL) 902 return ENXIO; 903 904 if (d->d_devtounit) { 905 /* 906 * If the device node corresponds to an autoconf device 907 * instance, acquire a reference to it so that during 908 * d_open, device_lookup is stable. 909 * 910 * XXX This should also arrange to instantiate cloning 911 * pseudo-devices if appropriate, but that requires 912 * reviewing them all to find and verify a common 913 * pattern. 914 */ 915 if ((unit = (*d->d_devtounit)(dev)) == -1) 916 return ENXIO; 917 if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) 918 return ENXIO; 919 } 920 921 DEV_LOCK(d); 922 rv = (*d->d_open)(dev, flag, devtype, l); 923 DEV_UNLOCK(d); 924 925 if (d->d_devtounit) { 926 device_release(dv); 927 } 928 929 bdevsw_release(d, lc); 930 931 return rv; 932 } 933 934 int 935 bdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l) 936 { 937 const struct bdevsw *d; 938 int rv, mpflag; 939 940 if ((d = bdevsw_lookup(dev)) == NULL) 941 return ENXIO; 942 if (d->d_cancel == NULL) 943 return ENODEV; 944 945 DEV_LOCK(d); 946 rv = (*d->d_cancel)(dev, flag, devtype, l); 947 DEV_UNLOCK(d); 948 949 return rv; 950 } 951 952 int 953 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 954 { 955 const struct bdevsw *d; 956 int rv, mpflag; 957 958 if ((d = bdevsw_lookup(dev)) == NULL) 959 return ENXIO; 960 961 DEV_LOCK(d); 962 rv = (*d->d_close)(dev, flag, devtype, l); 963 DEV_UNLOCK(d); 964 965 return rv; 966 } 967 968 SDT_PROVIDER_DECLARE(io); 969 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/); 970 971 void 972 bdev_strategy(struct buf *bp) 973 { 974 const struct bdevsw *d; 975 int mpflag; 976 977 SDT_PROBE1(io, kernel, , start, bp); 978 979 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) { 980 bp->b_error = ENXIO; 981 bp->b_resid = bp->b_bcount; 982 biodone_vfs(bp); /* biodone() iff vfs present */ 983 return; 984 } 985 986 DEV_LOCK(d); 987 (*d->d_strategy)(bp); 988 DEV_UNLOCK(d); 989 } 990 991 int 992 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 993 { 994 const struct bdevsw *d; 995 int rv, mpflag; 996 997 if ((d = bdevsw_lookup(dev)) == NULL) 998 return ENXIO; 999 1000 DEV_LOCK(d); 1001 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 1002 DEV_UNLOCK(d); 1003 1004 return rv; 1005 } 1006 1007 int 1008 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz) 1009 { 1010 const struct bdevsw *d; 1011 int rv; 1012 1013 /* 1014 * Dump can be called without the device open. Since it can 1015 * currently only be called with the system paused (and in a 1016 * potentially unstable state), we don't perform any locking. 1017 */ 1018 if ((d = bdevsw_lookup(dev)) == NULL) 1019 return ENXIO; 1020 1021 /* DEV_LOCK(d); */ 1022 rv = (*d->d_dump)(dev, addr, data, sz); 1023 /* DEV_UNLOCK(d); */ 1024 1025 return rv; 1026 } 1027 1028 int 1029 bdev_flags(dev_t dev) 1030 { 1031 const struct bdevsw *d; 1032 1033 if ((d = bdevsw_lookup(dev)) == NULL) 1034 return 0; 1035 return d->d_flag & ~D_TYPEMASK; 1036 } 1037 1038 int 1039 bdev_type(dev_t dev) 1040 { 1041 const struct bdevsw *d; 1042 1043 if ((d = bdevsw_lookup(dev)) == NULL) 1044 return D_OTHER; 1045 return d->d_flag & D_TYPEMASK; 1046 } 1047 1048 int 1049 bdev_size(dev_t dev) 1050 { 1051 const struct bdevsw *d; 1052 int rv, mpflag = 0; 1053 1054 if ((d = bdevsw_lookup(dev)) == NULL || 1055 d->d_psize == NULL) 1056 return -1; 1057 1058 /* 1059 * Don't to try lock the device if we're dumping. 1060 * XXX: is there a better way to test this? 1061 */ 1062 if ((boothowto & RB_DUMP) == 0) 1063 DEV_LOCK(d); 1064 rv = (*d->d_psize)(dev); 1065 if ((boothowto & RB_DUMP) == 0) 1066 DEV_UNLOCK(d); 1067 1068 return rv; 1069 } 1070 1071 int 1072 bdev_discard(dev_t dev, off_t pos, off_t len) 1073 { 1074 const struct bdevsw *d; 1075 int rv, mpflag; 1076 1077 if ((d = bdevsw_lookup(dev)) == NULL) 1078 return ENXIO; 1079 1080 DEV_LOCK(d); 1081 rv = (*d->d_discard)(dev, pos, len); 1082 DEV_UNLOCK(d); 1083 1084 return rv; 1085 } 1086 1087 void 1088 bdev_detached(dev_t dev) 1089 { 1090 const struct bdevsw *d; 1091 device_t dv; 1092 int unit; 1093 1094 if ((d = bdevsw_lookup(dev)) == NULL) 1095 return; 1096 if (d->d_devtounit == NULL) 1097 return; 1098 if ((unit = (*d->d_devtounit)(dev)) == -1) 1099 return; 1100 if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL) 1101 return; 1102 config_detach_commit(dv); 1103 } 1104 1105 int 1106 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 1107 { 1108 const struct cdevsw *d; 1109 struct localcount *lc; 1110 device_t dv = NULL/*XXXGCC*/; 1111 int unit, rv, mpflag; 1112 1113 d = cdevsw_lookup_acquire(dev, &lc); 1114 if (d == NULL) 1115 return ENXIO; 1116 1117 if (d->d_devtounit) { 1118 /* 1119 * If the device node corresponds to an autoconf device 1120 * instance, acquire a reference to it so that during 1121 * d_open, device_lookup is stable. 1122 * 1123 * XXX This should also arrange to instantiate cloning 1124 * pseudo-devices if appropriate, but that requires 1125 * reviewing them all to find and verify a common 1126 * pattern. 1127 */ 1128 if ((unit = (*d->d_devtounit)(dev)) == -1) 1129 return ENXIO; 1130 if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) 1131 return ENXIO; 1132 } 1133 1134 DEV_LOCK(d); 1135 rv = (*d->d_open)(dev, flag, devtype, l); 1136 DEV_UNLOCK(d); 1137 1138 if (d->d_devtounit) { 1139 device_release(dv); 1140 } 1141 1142 cdevsw_release(d, lc); 1143 1144 return rv; 1145 } 1146 1147 int 1148 cdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l) 1149 { 1150 const struct cdevsw *d; 1151 int rv, mpflag; 1152 1153 if ((d = cdevsw_lookup(dev)) == NULL) 1154 return ENXIO; 1155 if (d->d_cancel == NULL) 1156 return ENODEV; 1157 1158 DEV_LOCK(d); 1159 rv = (*d->d_cancel)(dev, flag, devtype, l); 1160 DEV_UNLOCK(d); 1161 1162 return rv; 1163 } 1164 1165 int 1166 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 1167 { 1168 const struct cdevsw *d; 1169 int rv, mpflag; 1170 1171 if ((d = cdevsw_lookup(dev)) == NULL) 1172 return ENXIO; 1173 1174 DEV_LOCK(d); 1175 rv = (*d->d_close)(dev, flag, devtype, l); 1176 DEV_UNLOCK(d); 1177 1178 return rv; 1179 } 1180 1181 int 1182 cdev_read(dev_t dev, struct uio *uio, int flag) 1183 { 1184 const struct cdevsw *d; 1185 int rv, mpflag; 1186 1187 if ((d = cdevsw_lookup(dev)) == NULL) 1188 return ENXIO; 1189 1190 DEV_LOCK(d); 1191 rv = (*d->d_read)(dev, uio, flag); 1192 DEV_UNLOCK(d); 1193 1194 return rv; 1195 } 1196 1197 int 1198 cdev_write(dev_t dev, struct uio *uio, int flag) 1199 { 1200 const struct cdevsw *d; 1201 int rv, mpflag; 1202 1203 if ((d = cdevsw_lookup(dev)) == NULL) 1204 return ENXIO; 1205 1206 DEV_LOCK(d); 1207 rv = (*d->d_write)(dev, uio, flag); 1208 DEV_UNLOCK(d); 1209 1210 return rv; 1211 } 1212 1213 int 1214 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 1215 { 1216 const struct cdevsw *d; 1217 int rv, mpflag; 1218 1219 if ((d = cdevsw_lookup(dev)) == NULL) 1220 return ENXIO; 1221 1222 DEV_LOCK(d); 1223 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 1224 DEV_UNLOCK(d); 1225 1226 return rv; 1227 } 1228 1229 void 1230 cdev_stop(struct tty *tp, int flag) 1231 { 1232 const struct cdevsw *d; 1233 int mpflag; 1234 1235 if ((d = cdevsw_lookup(tp->t_dev)) == NULL) 1236 return; 1237 1238 DEV_LOCK(d); 1239 (*d->d_stop)(tp, flag); 1240 DEV_UNLOCK(d); 1241 } 1242 1243 struct tty * 1244 cdev_tty(dev_t dev) 1245 { 1246 const struct cdevsw *d; 1247 1248 if ((d = cdevsw_lookup(dev)) == NULL) 1249 return NULL; 1250 1251 /* XXX Check if necessary. */ 1252 if (d->d_tty == NULL) 1253 return NULL; 1254 1255 return (*d->d_tty)(dev); 1256 } 1257 1258 int 1259 cdev_poll(dev_t dev, int flag, lwp_t *l) 1260 { 1261 const struct cdevsw *d; 1262 int rv, mpflag; 1263 1264 if ((d = cdevsw_lookup(dev)) == NULL) 1265 return POLLERR; 1266 1267 DEV_LOCK(d); 1268 rv = (*d->d_poll)(dev, flag, l); 1269 DEV_UNLOCK(d); 1270 1271 return rv; 1272 } 1273 1274 paddr_t 1275 cdev_mmap(dev_t dev, off_t off, int flag) 1276 { 1277 const struct cdevsw *d; 1278 paddr_t rv; 1279 int mpflag; 1280 1281 if ((d = cdevsw_lookup(dev)) == NULL) 1282 return (paddr_t)-1LL; 1283 1284 DEV_LOCK(d); 1285 rv = (*d->d_mmap)(dev, off, flag); 1286 DEV_UNLOCK(d); 1287 1288 return rv; 1289 } 1290 1291 int 1292 cdev_kqfilter(dev_t dev, struct knote *kn) 1293 { 1294 const struct cdevsw *d; 1295 int rv, mpflag; 1296 1297 if ((d = cdevsw_lookup(dev)) == NULL) 1298 return ENXIO; 1299 1300 DEV_LOCK(d); 1301 rv = (*d->d_kqfilter)(dev, kn); 1302 DEV_UNLOCK(d); 1303 1304 return rv; 1305 } 1306 1307 int 1308 cdev_discard(dev_t dev, off_t pos, off_t len) 1309 { 1310 const struct cdevsw *d; 1311 int rv, mpflag; 1312 1313 if ((d = cdevsw_lookup(dev)) == NULL) 1314 return ENXIO; 1315 1316 DEV_LOCK(d); 1317 rv = (*d->d_discard)(dev, pos, len); 1318 DEV_UNLOCK(d); 1319 1320 return rv; 1321 } 1322 1323 int 1324 cdev_flags(dev_t dev) 1325 { 1326 const struct cdevsw *d; 1327 1328 if ((d = cdevsw_lookup(dev)) == NULL) 1329 return 0; 1330 return d->d_flag & ~D_TYPEMASK; 1331 } 1332 1333 int 1334 cdev_type(dev_t dev) 1335 { 1336 const struct cdevsw *d; 1337 1338 if ((d = cdevsw_lookup(dev)) == NULL) 1339 return D_OTHER; 1340 return d->d_flag & D_TYPEMASK; 1341 } 1342 1343 void 1344 cdev_detached(dev_t dev) 1345 { 1346 const struct cdevsw *d; 1347 device_t dv; 1348 int unit; 1349 1350 if ((d = cdevsw_lookup(dev)) == NULL) 1351 return; 1352 if (d->d_devtounit == NULL) 1353 return; 1354 if ((unit = (*d->d_devtounit)(dev)) == -1) 1355 return; 1356 if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL) 1357 return; 1358 config_detach_commit(dv); 1359 } 1360 1361 /* 1362 * nommap(dev, off, prot) 1363 * 1364 * mmap routine that always fails, for non-mmappable devices. 1365 */ 1366 paddr_t 1367 nommap(dev_t dev, off_t off, int prot) 1368 { 1369 1370 return (paddr_t)-1; 1371 } 1372 1373 /* 1374 * dev_minor_unit(dev) 1375 * 1376 * Returns minor(dev) as an int. Intended for use with struct 1377 * bdevsw, cdevsw::d_devtounit for drivers whose /dev nodes are 1378 * implemented by reference to an autoconf instance with the minor 1379 * number. 1380 */ 1381 int 1382 dev_minor_unit(dev_t dev) 1383 { 1384 1385 return minor(dev); 1386 } 1387