1 /* $NetBSD: subr_devsw.c,v 1.51 2023/02/15 13:12:45 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Overview 34 * 35 * subr_devsw.c: registers device drivers by name and by major 36 * number, and provides wrapper methods for performing I/O and 37 * other tasks on device drivers, keying on the device number 38 * (dev_t). 39 * 40 * When the system is built, the config(8) command generates 41 * static tables of device drivers built into the kernel image 42 * along with their associated methods. These are recorded in 43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to 44 * and removed from the system dynamically. 45 * 46 * Allocation 47 * 48 * When the system initially boots only the statically allocated 49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to 50 * allocation, we allocate a fixed block of memory to hold the new, 51 * expanded index. This "fork" of the table is only ever performed 52 * once in order to guarantee that other threads may safely access 53 * the device tables: 54 * 55 * o Once a thread has a "reference" to the table via an earlier 56 * open() call, we know that the entry in the table must exist 57 * and so it is safe to access it. 58 * 59 * o Regardless of whether other threads see the old or new 60 * pointers, they will point to a correct device switch 61 * structure for the operation being performed. 62 * 63 * XXX Currently, the wrapper methods such as cdev_read() verify 64 * that a device driver does in fact exist before calling the 65 * associated driver method. This should be changed so that 66 * once the device is has been referenced by a vnode (opened), 67 * calling the other methods should be valid until that reference 68 * is dropped. 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.51 2023/02/15 13:12:45 riastradh Exp $"); 73 74 #ifdef _KERNEL_OPT 75 #include "opt_dtrace.h" 76 #endif 77 78 #include <sys/param.h> 79 #include <sys/conf.h> 80 #include <sys/kmem.h> 81 #include <sys/systm.h> 82 #include <sys/poll.h> 83 #include <sys/tty.h> 84 #include <sys/cpu.h> 85 #include <sys/buf.h> 86 #include <sys/reboot.h> 87 #include <sys/sdt.h> 88 #include <sys/atomic.h> 89 #include <sys/localcount.h> 90 #include <sys/pserialize.h> 91 #include <sys/xcall.h> 92 #include <sys/device.h> 93 94 #ifdef DEVSW_DEBUG 95 #define DPRINTF(x) printf x 96 #else /* DEVSW_DEBUG */ 97 #define DPRINTF(x) 98 #endif /* DEVSW_DEBUG */ 99 100 #define MAXDEVSW 512 /* the maximum of major device number */ 101 #define BDEVSW_SIZE (sizeof(struct bdevsw *)) 102 #define CDEVSW_SIZE (sizeof(struct cdevsw *)) 103 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv)) 104 105 struct devswref { 106 struct localcount *dr_lc; 107 }; 108 109 /* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */ 110 extern const struct bdevsw **bdevsw, *bdevsw0[]; 111 extern const struct cdevsw **cdevsw, *cdevsw0[]; 112 extern struct devsw_conv *devsw_conv, devsw_conv0[]; 113 extern const int sys_bdevsws, sys_cdevsws; 114 extern int max_bdevsws, max_cdevsws, max_devsw_convs; 115 116 static struct devswref *cdevswref; 117 static struct devswref *bdevswref; 118 static kcondvar_t devsw_cv; 119 120 static int bdevsw_attach(const struct bdevsw *, devmajor_t *); 121 static int cdevsw_attach(const struct cdevsw *, devmajor_t *); 122 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *); 123 124 kmutex_t device_lock; 125 126 void (*biodone_vfs)(buf_t *) = (void *)nullop; 127 128 /* 129 * bdev probes 130 */ 131 SDT_PROBE_DEFINE6(sdt, bdev, open, acquire, 132 "struct bdevsw *"/*bdevsw*/, 133 "dev_t"/*dev*/, 134 "int"/*flag*/, 135 "int"/*devtype*/, 136 "int"/*unit*/, 137 "device_t"/*dv*/); 138 SDT_PROBE_DEFINE4(sdt, bdev, open, entry, 139 "struct bdevsw *"/*bdevsw*/, 140 "dev_t"/*dev*/, 141 "int"/*flag*/, 142 "int"/*devtype*/); 143 SDT_PROBE_DEFINE5(sdt, bdev, open, return, 144 "struct bdevsw *"/*bdevsw*/, 145 "dev_t"/*dev*/, 146 "int"/*flag*/, 147 "int"/*devtype*/, 148 "int"/*error*/); 149 SDT_PROBE_DEFINE6(sdt, bdev, open, release, 150 "struct bdevsw *"/*bdevsw*/, 151 "dev_t"/*dev*/, 152 "int"/*flag*/, 153 "int"/*devtype*/, 154 "int"/*unit*/, 155 "device_t"/*dv*/); 156 157 SDT_PROBE_DEFINE4(sdt, bdev, cancel, entry, 158 "struct bdevsw *"/*bdevsw*/, 159 "dev_t"/*dev*/, 160 "int"/*flag*/, 161 "int"/*devtype*/); 162 SDT_PROBE_DEFINE5(sdt, bdev, cancel, return, 163 "struct bdevsw *"/*bdevsw*/, 164 "dev_t"/*dev*/, 165 "int"/*flag*/, 166 "int"/*devtype*/, 167 "int"/*error*/); 168 169 SDT_PROBE_DEFINE4(sdt, bdev, close, entry, 170 "struct bdevsw *"/*bdevsw*/, 171 "dev_t"/*dev*/, 172 "int"/*flag*/, 173 "int"/*devtype*/); 174 SDT_PROBE_DEFINE5(sdt, bdev, close, return, 175 "struct bdevsw *"/*bdevsw*/, 176 "dev_t"/*dev*/, 177 "int"/*flag*/, 178 "int"/*devtype*/, 179 "int"/*error*/); 180 181 SDT_PROBE_DEFINE3(sdt, bdev, strategy, entry, 182 "struct bdevsw *"/*bdevsw*/, 183 "dev_t"/*dev*/, 184 "struct buf *"/*bp*/); 185 SDT_PROBE_DEFINE3(sdt, bdev, strategy, return, 186 "struct bdevsw *"/*bdevsw*/, 187 "dev_t"/*dev*/, 188 "struct buf *"/*bp*/); 189 190 SDT_PROBE_DEFINE5(sdt, bdev, ioctl, entry, 191 "struct bdevsw *"/*bdevsw*/, 192 "dev_t"/*dev*/, 193 "unsigned long"/*cmd*/, 194 "void *"/*data*/, 195 "int"/*flag*/); 196 SDT_PROBE_DEFINE6(sdt, bdev, ioctl, return, 197 "struct bdevsw *"/*bdevsw*/, 198 "dev_t"/*dev*/, 199 "unsigned long"/*cmd*/, 200 "void *"/*data*/, 201 "int"/*flag*/, 202 "int"/*error*/); 203 204 SDT_PROBE_DEFINE2(sdt, bdev, psize, entry, 205 "struct bdevsw *"/*bdevsw*/, 206 "dev_t"/*dev*/); 207 SDT_PROBE_DEFINE3(sdt, bdev, psize, return, 208 "struct bdevsw *"/*bdevsw*/, 209 "dev_t"/*dev*/, 210 "int"/*psize*/); 211 212 SDT_PROBE_DEFINE4(sdt, bdev, discard, entry, 213 "struct bdevsw *"/*bdevsw*/, 214 "dev_t"/*dev*/, 215 "off_t"/*pos*/, 216 "off_t"/*len*/); 217 SDT_PROBE_DEFINE5(sdt, bdev, discard, return, 218 "struct bdevsw *"/*bdevsw*/, 219 "dev_t"/*dev*/, 220 "off_t"/*pos*/, 221 "off_t"/*len*/, 222 "int"/*error*/); 223 224 /* 225 * cdev probes 226 */ 227 SDT_PROBE_DEFINE6(sdt, cdev, open, acquire, 228 "struct cdevsw *"/*cdevsw*/, 229 "dev_t"/*dev*/, 230 "int"/*flag*/, 231 "int"/*devtype*/, 232 "int"/*unit*/, 233 "device_t"/*dv*/); 234 SDT_PROBE_DEFINE4(sdt, cdev, open, entry, 235 "struct cdevsw *"/*cdevsw*/, 236 "dev_t"/*dev*/, 237 "int"/*flag*/, 238 "int"/*devtype*/); 239 SDT_PROBE_DEFINE5(sdt, cdev, open, return, 240 "struct cdevsw *"/*cdevsw*/, 241 "dev_t"/*dev*/, 242 "int"/*flag*/, 243 "int"/*devtype*/, 244 "int"/*error*/); 245 SDT_PROBE_DEFINE6(sdt, cdev, open, release, 246 "struct cdevsw *"/*cdevsw*/, 247 "dev_t"/*dev*/, 248 "int"/*flag*/, 249 "int"/*devtype*/, 250 "int"/*unit*/, 251 "device_t"/*dv*/); 252 253 SDT_PROBE_DEFINE4(sdt, cdev, cancel, entry, 254 "struct cdevsw *"/*cdevsw*/, 255 "dev_t"/*dev*/, 256 "int"/*flag*/, 257 "int"/*devtype*/); 258 SDT_PROBE_DEFINE5(sdt, cdev, cancel, return, 259 "struct cdevsw *"/*cdevsw*/, 260 "dev_t"/*dev*/, 261 "int"/*flag*/, 262 "int"/*devtype*/, 263 "int"/*error*/); 264 265 SDT_PROBE_DEFINE4(sdt, cdev, close, entry, 266 "struct cdevsw *"/*cdevsw*/, 267 "dev_t"/*dev*/, 268 "int"/*flag*/, 269 "int"/*devtype*/); 270 SDT_PROBE_DEFINE5(sdt, cdev, close, return, 271 "struct cdevsw *"/*cdevsw*/, 272 "dev_t"/*dev*/, 273 "int"/*flag*/, 274 "int"/*devtype*/, 275 "int"/*error*/); 276 277 SDT_PROBE_DEFINE4(sdt, cdev, read, entry, 278 "struct cdevsw *"/*cdevsw*/, 279 "dev_t"/*dev*/, 280 "struct uio *"/*uio*/, 281 "int"/*flag*/); 282 SDT_PROBE_DEFINE5(sdt, cdev, read, return, 283 "struct cdevsw *"/*cdevsw*/, 284 "dev_t"/*dev*/, 285 "struct uio *"/*uio*/, 286 "int"/*flag*/, 287 "int"/*error*/); 288 289 SDT_PROBE_DEFINE4(sdt, cdev, write, entry, 290 "struct cdevsw *"/*cdevsw*/, 291 "dev_t"/*dev*/, 292 "struct uio *"/*uio*/, 293 "int"/*flag*/); 294 SDT_PROBE_DEFINE5(sdt, cdev, write, return, 295 "struct cdevsw *"/*cdevsw*/, 296 "dev_t"/*dev*/, 297 "struct uio *"/*uio*/, 298 "int"/*flag*/, 299 "int"/*error*/); 300 301 SDT_PROBE_DEFINE5(sdt, cdev, ioctl, entry, 302 "struct cdevsw *"/*cdevsw*/, 303 "dev_t"/*dev*/, 304 "unsigned long"/*cmd*/, 305 "void *"/*data*/, 306 "int"/*flag*/); 307 SDT_PROBE_DEFINE6(sdt, cdev, ioctl, return, 308 "struct cdevsw *"/*cdevsw*/, 309 "dev_t"/*dev*/, 310 "unsigned long"/*cmd*/, 311 "void *"/*data*/, 312 "int"/*flag*/, 313 "int"/*error*/); 314 315 SDT_PROBE_DEFINE4(sdt, cdev, stop, entry, 316 "struct cdevsw *"/*cdevsw*/, 317 "dev_t"/*dev*/, 318 "struct tty *"/*tp*/, 319 "int"/*flag*/); 320 SDT_PROBE_DEFINE4(sdt, cdev, stop, return, 321 "struct cdevsw *"/*cdevsw*/, 322 "dev_t"/*dev*/, 323 "struct tty *"/*tp*/, 324 "int"/*flag*/); 325 326 SDT_PROBE_DEFINE3(sdt, cdev, poll, entry, 327 "struct cdevsw *"/*cdevsw*/, 328 "dev_t"/*dev*/, 329 "int"/*events*/); 330 SDT_PROBE_DEFINE4(sdt, cdev, poll, return, 331 "struct cdevsw *"/*cdevsw*/, 332 "dev_t"/*dev*/, 333 "int"/*events*/, 334 "int"/*revents*/); 335 336 SDT_PROBE_DEFINE4(sdt, cdev, mmap, entry, 337 "struct cdevsw *"/*cdevsw*/, 338 "dev_t"/*dev*/, 339 "off_t"/*off*/, 340 "int"/*flag*/); 341 SDT_PROBE_DEFINE5(sdt, cdev, mmap, return, 342 "struct cdevsw *"/*cdevsw*/, 343 "dev_t"/*dev*/, 344 "off_t"/*off*/, 345 "int"/*flag*/, 346 "paddr_t"/*mmapcookie*/); 347 348 SDT_PROBE_DEFINE3(sdt, cdev, kqfilter, entry, 349 "struct cdevsw *"/*cdevsw*/, 350 "dev_t"/*dev*/, 351 "struct knote *"/*kn*/); 352 SDT_PROBE_DEFINE4(sdt, cdev, kqfilter, return, 353 "struct cdevsw *"/*cdevsw*/, 354 "dev_t"/*dev*/, 355 "struct knote *"/*kn*/, 356 "int"/*error*/); 357 358 SDT_PROBE_DEFINE4(sdt, cdev, discard, entry, 359 "struct cdevsw *"/*cdevsw*/, 360 "dev_t"/*dev*/, 361 "off_t"/*pos*/, 362 "off_t"/*len*/); 363 SDT_PROBE_DEFINE5(sdt, cdev, discard, return, 364 "struct cdevsw *"/*cdevsw*/, 365 "dev_t"/*dev*/, 366 "off_t"/*pos*/, 367 "off_t"/*len*/, 368 "int"/*error*/); 369 370 void 371 devsw_init(void) 372 { 373 374 KASSERT(sys_bdevsws < MAXDEVSW - 1); 375 KASSERT(sys_cdevsws < MAXDEVSW - 1); 376 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE); 377 378 cv_init(&devsw_cv, "devsw"); 379 } 380 381 int 382 devsw_attach(const char *devname, 383 const struct bdevsw *bdev, devmajor_t *bmajor, 384 const struct cdevsw *cdev, devmajor_t *cmajor) 385 { 386 struct devsw_conv *conv; 387 char *name; 388 int error, i; 389 390 if (devname == NULL || cdev == NULL) 391 return EINVAL; 392 393 mutex_enter(&device_lock); 394 395 for (i = 0; i < max_devsw_convs; i++) { 396 conv = &devsw_conv[i]; 397 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0) 398 continue; 399 400 if ((bdev != NULL) && (*bmajor < 0)) 401 *bmajor = conv->d_bmajor; 402 if (*cmajor < 0) 403 *cmajor = conv->d_cmajor; 404 405 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) { 406 error = EINVAL; 407 goto out; 408 } 409 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) { 410 error = EINVAL; 411 goto out; 412 } 413 414 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) || 415 cdevsw[*cmajor] != NULL) { 416 error = EEXIST; 417 goto out; 418 } 419 break; 420 } 421 422 /* 423 * XXX This should allocate what it needs up front so we never 424 * need to flail around trying to unwind. 425 */ 426 error = bdevsw_attach(bdev, bmajor); 427 if (error != 0) 428 goto out; 429 error = cdevsw_attach(cdev, cmajor); 430 if (error != 0) { 431 devsw_detach_locked(bdev, NULL); 432 goto out; 433 } 434 435 /* 436 * If we already found a conv, we're done. Otherwise, find an 437 * empty slot or extend the table. 438 */ 439 if (i < max_devsw_convs) { 440 error = 0; 441 goto out; 442 } 443 444 for (i = 0; i < max_devsw_convs; i++) { 445 if (devsw_conv[i].d_name == NULL) 446 break; 447 } 448 if (i == max_devsw_convs) { 449 struct devsw_conv *newptr; 450 int old_convs, new_convs; 451 452 old_convs = max_devsw_convs; 453 new_convs = old_convs + 1; 454 455 newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP); 456 if (newptr == NULL) { 457 devsw_detach_locked(bdev, cdev); 458 error = ENOMEM; 459 goto out; 460 } 461 newptr[old_convs].d_name = NULL; 462 newptr[old_convs].d_bmajor = -1; 463 newptr[old_convs].d_cmajor = -1; 464 memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE); 465 if (devsw_conv != devsw_conv0) 466 kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE); 467 devsw_conv = newptr; 468 max_devsw_convs = new_convs; 469 } 470 471 name = kmem_strdupsize(devname, NULL, KM_NOSLEEP); 472 if (name == NULL) { 473 devsw_detach_locked(bdev, cdev); 474 error = ENOMEM; 475 goto out; 476 } 477 478 devsw_conv[i].d_name = name; 479 devsw_conv[i].d_bmajor = *bmajor; 480 devsw_conv[i].d_cmajor = *cmajor; 481 error = 0; 482 out: 483 mutex_exit(&device_lock); 484 return error; 485 } 486 487 static int 488 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor) 489 { 490 const struct bdevsw **newbdevsw = NULL; 491 struct devswref *newbdevswref = NULL; 492 struct localcount *lc; 493 devmajor_t bmajor; 494 int i; 495 496 KASSERT(mutex_owned(&device_lock)); 497 498 if (devsw == NULL) 499 return 0; 500 501 if (*devmajor < 0) { 502 for (bmajor = sys_bdevsws; bmajor < max_bdevsws; bmajor++) { 503 if (bdevsw[bmajor] != NULL) 504 continue; 505 for (i = 0; i < max_devsw_convs; i++) { 506 if (devsw_conv[i].d_bmajor == bmajor) 507 break; 508 } 509 if (i != max_devsw_convs) 510 continue; 511 break; 512 } 513 *devmajor = bmajor; 514 } 515 516 if (*devmajor >= MAXDEVSW) { 517 printf("%s: block majors exhausted\n", __func__); 518 return ENOMEM; 519 } 520 521 if (bdevswref == NULL) { 522 newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]), 523 KM_NOSLEEP); 524 if (newbdevswref == NULL) 525 return ENOMEM; 526 atomic_store_release(&bdevswref, newbdevswref); 527 } 528 529 if (*devmajor >= max_bdevsws) { 530 KASSERT(bdevsw == bdevsw0); 531 newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]), 532 KM_NOSLEEP); 533 if (newbdevsw == NULL) 534 return ENOMEM; 535 memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0])); 536 atomic_store_release(&bdevsw, newbdevsw); 537 atomic_store_release(&max_bdevsws, MAXDEVSW); 538 } 539 540 if (bdevsw[*devmajor] != NULL) 541 return EEXIST; 542 543 KASSERT(bdevswref[*devmajor].dr_lc == NULL); 544 lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); 545 localcount_init(lc); 546 bdevswref[*devmajor].dr_lc = lc; 547 548 atomic_store_release(&bdevsw[*devmajor], devsw); 549 550 return 0; 551 } 552 553 static int 554 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor) 555 { 556 const struct cdevsw **newcdevsw = NULL; 557 struct devswref *newcdevswref = NULL; 558 struct localcount *lc; 559 devmajor_t cmajor; 560 int i; 561 562 KASSERT(mutex_owned(&device_lock)); 563 564 if (*devmajor < 0) { 565 for (cmajor = sys_cdevsws; cmajor < max_cdevsws; cmajor++) { 566 if (cdevsw[cmajor] != NULL) 567 continue; 568 for (i = 0; i < max_devsw_convs; i++) { 569 if (devsw_conv[i].d_cmajor == cmajor) 570 break; 571 } 572 if (i != max_devsw_convs) 573 continue; 574 break; 575 } 576 *devmajor = cmajor; 577 } 578 579 if (*devmajor >= MAXDEVSW) { 580 printf("%s: character majors exhausted\n", __func__); 581 return ENOMEM; 582 } 583 584 if (cdevswref == NULL) { 585 newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]), 586 KM_NOSLEEP); 587 if (newcdevswref == NULL) 588 return ENOMEM; 589 atomic_store_release(&cdevswref, newcdevswref); 590 } 591 592 if (*devmajor >= max_cdevsws) { 593 KASSERT(cdevsw == cdevsw0); 594 newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]), 595 KM_NOSLEEP); 596 if (newcdevsw == NULL) 597 return ENOMEM; 598 memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0])); 599 atomic_store_release(&cdevsw, newcdevsw); 600 atomic_store_release(&max_cdevsws, MAXDEVSW); 601 } 602 603 if (cdevsw[*devmajor] != NULL) 604 return EEXIST; 605 606 KASSERT(cdevswref[*devmajor].dr_lc == NULL); 607 lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); 608 localcount_init(lc); 609 cdevswref[*devmajor].dr_lc = lc; 610 611 atomic_store_release(&cdevsw[*devmajor], devsw); 612 613 return 0; 614 } 615 616 static void 617 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev) 618 { 619 int bi, ci = -1/*XXXGCC*/, di; 620 struct cfdriver *cd; 621 device_t dv; 622 623 KASSERT(mutex_owned(&device_lock)); 624 625 /* 626 * If this is wired to an autoconf device, make sure the device 627 * has no more instances. No locking here because under 628 * correct use of devsw_detach, none of this state can change 629 * at this point. 630 */ 631 if (cdev != NULL && (cd = cdev->d_cfdriver) != NULL) { 632 for (di = 0; di < cd->cd_ndevs; di++) { 633 KASSERTMSG((dv = cd->cd_devs[di]) == NULL, 634 "detaching character device driver %s" 635 " still has attached unit %s", 636 cd->cd_name, device_xname(dv)); 637 } 638 } 639 if (bdev != NULL && (cd = bdev->d_cfdriver) != NULL) { 640 for (di = 0; di < cd->cd_ndevs; di++) { 641 KASSERTMSG((dv = cd->cd_devs[di]) == NULL, 642 "detaching block device driver %s" 643 " still has attached unit %s", 644 cd->cd_name, device_xname(dv)); 645 } 646 } 647 648 /* Prevent new references. */ 649 if (bdev != NULL) { 650 for (bi = 0; bi < max_bdevsws; bi++) { 651 if (bdevsw[bi] != bdev) 652 continue; 653 atomic_store_relaxed(&bdevsw[bi], NULL); 654 break; 655 } 656 KASSERT(bi < max_bdevsws); 657 } 658 if (cdev != NULL) { 659 for (ci = 0; ci < max_cdevsws; ci++) { 660 if (cdevsw[ci] != cdev) 661 continue; 662 atomic_store_relaxed(&cdevsw[ci], NULL); 663 break; 664 } 665 KASSERT(ci < max_cdevsws); 666 } 667 668 if (bdev == NULL && cdev == NULL) /* XXX possible? */ 669 return; 670 671 /* 672 * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire 673 * calls to notice that the devsw is gone. 674 * 675 * XXX Despite the use of the pserialize_read_enter/exit API 676 * elsewhere in this file, we use xc_barrier here instead of 677 * pserialize_perform -- because devsw_init is too early for 678 * pserialize_create. Either pserialize_create should be made 679 * to work earlier, or it should be nixed altogether. Until 680 * that is fixed, xc_barrier will serve the same purpose. 681 */ 682 xc_barrier(0); 683 684 /* 685 * Wait for all references to drain. It is the caller's 686 * responsibility to ensure that at this point, there are no 687 * extant open instances and all new d_open calls will fail. 688 * 689 * Note that localcount_drain may release and reacquire 690 * device_lock. 691 */ 692 if (bdev != NULL) { 693 localcount_drain(bdevswref[bi].dr_lc, 694 &devsw_cv, &device_lock); 695 localcount_fini(bdevswref[bi].dr_lc); 696 kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc)); 697 bdevswref[bi].dr_lc = NULL; 698 } 699 if (cdev != NULL) { 700 localcount_drain(cdevswref[ci].dr_lc, 701 &devsw_cv, &device_lock); 702 localcount_fini(cdevswref[ci].dr_lc); 703 kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc)); 704 cdevswref[ci].dr_lc = NULL; 705 } 706 } 707 708 void 709 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev) 710 { 711 712 mutex_enter(&device_lock); 713 devsw_detach_locked(bdev, cdev); 714 mutex_exit(&device_lock); 715 } 716 717 /* 718 * Look up a block device by number. 719 * 720 * => Caller must ensure that the device is attached. 721 */ 722 const struct bdevsw * 723 bdevsw_lookup(dev_t dev) 724 { 725 devmajor_t bmajor; 726 727 if (dev == NODEV) 728 return NULL; 729 bmajor = major(dev); 730 if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws)) 731 return NULL; 732 733 return atomic_load_consume(&bdevsw)[bmajor]; 734 } 735 736 static const struct bdevsw * 737 bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) 738 { 739 devmajor_t bmajor; 740 const struct bdevsw *bdev = NULL, *const *curbdevsw; 741 struct devswref *curbdevswref; 742 int s; 743 744 if (dev == NODEV) 745 return NULL; 746 bmajor = major(dev); 747 if (bmajor < 0) 748 return NULL; 749 750 s = pserialize_read_enter(); 751 752 /* 753 * max_bdevsws never goes down, so it is safe to rely on this 754 * condition without any locking for the array access below. 755 * Test sys_bdevsws first so we can avoid the memory barrier in 756 * that case. 757 */ 758 if (bmajor >= sys_bdevsws && 759 bmajor >= atomic_load_acquire(&max_bdevsws)) 760 goto out; 761 curbdevsw = atomic_load_consume(&bdevsw); 762 if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL) 763 goto out; 764 765 curbdevswref = atomic_load_consume(&bdevswref); 766 if (curbdevswref == NULL) { 767 *lcp = NULL; 768 } else if ((*lcp = curbdevswref[bmajor].dr_lc) != NULL) { 769 localcount_acquire(*lcp); 770 } 771 out: 772 pserialize_read_exit(s); 773 return bdev; 774 } 775 776 static void 777 bdevsw_release(const struct bdevsw *bdev, struct localcount *lc) 778 { 779 780 if (lc == NULL) 781 return; 782 localcount_release(lc, &devsw_cv, &device_lock); 783 } 784 785 /* 786 * Look up a character device by number. 787 * 788 * => Caller must ensure that the device is attached. 789 */ 790 const struct cdevsw * 791 cdevsw_lookup(dev_t dev) 792 { 793 devmajor_t cmajor; 794 795 if (dev == NODEV) 796 return NULL; 797 cmajor = major(dev); 798 if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws)) 799 return NULL; 800 801 return atomic_load_consume(&cdevsw)[cmajor]; 802 } 803 804 static const struct cdevsw * 805 cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) 806 { 807 devmajor_t cmajor; 808 const struct cdevsw *cdev = NULL, *const *curcdevsw; 809 struct devswref *curcdevswref; 810 int s; 811 812 if (dev == NODEV) 813 return NULL; 814 cmajor = major(dev); 815 if (cmajor < 0) 816 return NULL; 817 818 s = pserialize_read_enter(); 819 820 /* 821 * max_cdevsws never goes down, so it is safe to rely on this 822 * condition without any locking for the array access below. 823 * Test sys_cdevsws first so we can avoid the memory barrier in 824 * that case. 825 */ 826 if (cmajor >= sys_cdevsws && 827 cmajor >= atomic_load_acquire(&max_cdevsws)) 828 goto out; 829 curcdevsw = atomic_load_consume(&cdevsw); 830 if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL) 831 goto out; 832 833 curcdevswref = atomic_load_consume(&cdevswref); 834 if (curcdevswref == NULL) { 835 *lcp = NULL; 836 } else if ((*lcp = curcdevswref[cmajor].dr_lc) != NULL) { 837 localcount_acquire(*lcp); 838 } 839 out: 840 pserialize_read_exit(s); 841 return cdev; 842 } 843 844 static void 845 cdevsw_release(const struct cdevsw *cdev, struct localcount *lc) 846 { 847 848 if (lc == NULL) 849 return; 850 localcount_release(lc, &devsw_cv, &device_lock); 851 } 852 853 /* 854 * Look up a block device by reference to its operations set. 855 * 856 * => Caller must ensure that the device is not detached, and therefore 857 * that the returned major is still valid when dereferenced. 858 */ 859 devmajor_t 860 bdevsw_lookup_major(const struct bdevsw *bdev) 861 { 862 const struct bdevsw *const *curbdevsw; 863 devmajor_t bmajor, bmax; 864 865 bmax = atomic_load_acquire(&max_bdevsws); 866 curbdevsw = atomic_load_consume(&bdevsw); 867 for (bmajor = 0; bmajor < bmax; bmajor++) { 868 if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev) 869 return bmajor; 870 } 871 872 return NODEVMAJOR; 873 } 874 875 /* 876 * Look up a character device by reference to its operations set. 877 * 878 * => Caller must ensure that the device is not detached, and therefore 879 * that the returned major is still valid when dereferenced. 880 */ 881 devmajor_t 882 cdevsw_lookup_major(const struct cdevsw *cdev) 883 { 884 const struct cdevsw *const *curcdevsw; 885 devmajor_t cmajor, cmax; 886 887 cmax = atomic_load_acquire(&max_cdevsws); 888 curcdevsw = atomic_load_consume(&cdevsw); 889 for (cmajor = 0; cmajor < cmax; cmajor++) { 890 if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev) 891 return cmajor; 892 } 893 894 return NODEVMAJOR; 895 } 896 897 /* 898 * Convert from block major number to name. 899 * 900 * => Caller must ensure that the device is not detached, and therefore 901 * that the name pointer is still valid when dereferenced. 902 */ 903 const char * 904 devsw_blk2name(devmajor_t bmajor) 905 { 906 const char *name; 907 devmajor_t cmajor; 908 int i; 909 910 name = NULL; 911 cmajor = -1; 912 913 mutex_enter(&device_lock); 914 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 915 mutex_exit(&device_lock); 916 return NULL; 917 } 918 for (i = 0; i < max_devsw_convs; i++) { 919 if (devsw_conv[i].d_bmajor == bmajor) { 920 cmajor = devsw_conv[i].d_cmajor; 921 break; 922 } 923 } 924 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 925 name = devsw_conv[i].d_name; 926 mutex_exit(&device_lock); 927 928 return name; 929 } 930 931 /* 932 * Convert char major number to device driver name. 933 */ 934 const char * 935 cdevsw_getname(devmajor_t major) 936 { 937 const char *name; 938 int i; 939 940 name = NULL; 941 942 if (major < 0) 943 return NULL; 944 945 mutex_enter(&device_lock); 946 for (i = 0; i < max_devsw_convs; i++) { 947 if (devsw_conv[i].d_cmajor == major) { 948 name = devsw_conv[i].d_name; 949 break; 950 } 951 } 952 mutex_exit(&device_lock); 953 return name; 954 } 955 956 /* 957 * Convert block major number to device driver name. 958 */ 959 const char * 960 bdevsw_getname(devmajor_t major) 961 { 962 const char *name; 963 int i; 964 965 name = NULL; 966 967 if (major < 0) 968 return NULL; 969 970 mutex_enter(&device_lock); 971 for (i = 0; i < max_devsw_convs; i++) { 972 if (devsw_conv[i].d_bmajor == major) { 973 name = devsw_conv[i].d_name; 974 break; 975 } 976 } 977 mutex_exit(&device_lock); 978 return name; 979 } 980 981 /* 982 * Convert from device name to block major number. 983 * 984 * => Caller must ensure that the device is not detached, and therefore 985 * that the major number is still valid when dereferenced. 986 */ 987 devmajor_t 988 devsw_name2blk(const char *name, char *devname, size_t devnamelen) 989 { 990 struct devsw_conv *conv; 991 devmajor_t bmajor; 992 int i; 993 994 if (name == NULL) 995 return NODEVMAJOR; 996 997 mutex_enter(&device_lock); 998 for (i = 0; i < max_devsw_convs; i++) { 999 size_t len; 1000 1001 conv = &devsw_conv[i]; 1002 if (conv->d_name == NULL) 1003 continue; 1004 len = strlen(conv->d_name); 1005 if (strncmp(conv->d_name, name, len) != 0) 1006 continue; 1007 if (name[len] != '\0' && !isdigit((unsigned char)name[len])) 1008 continue; 1009 bmajor = conv->d_bmajor; 1010 if (bmajor < 0 || bmajor >= max_bdevsws || 1011 bdevsw[bmajor] == NULL) 1012 break; 1013 if (devname != NULL) { 1014 #ifdef DEVSW_DEBUG 1015 if (strlen(conv->d_name) >= devnamelen) 1016 printf("%s: too short buffer\n", __func__); 1017 #endif /* DEVSW_DEBUG */ 1018 strncpy(devname, conv->d_name, devnamelen); 1019 devname[devnamelen - 1] = '\0'; 1020 } 1021 mutex_exit(&device_lock); 1022 return bmajor; 1023 } 1024 1025 mutex_exit(&device_lock); 1026 return NODEVMAJOR; 1027 } 1028 1029 /* 1030 * Convert from device name to char major number. 1031 * 1032 * => Caller must ensure that the device is not detached, and therefore 1033 * that the major number is still valid when dereferenced. 1034 */ 1035 devmajor_t 1036 devsw_name2chr(const char *name, char *devname, size_t devnamelen) 1037 { 1038 struct devsw_conv *conv; 1039 devmajor_t cmajor; 1040 int i; 1041 1042 if (name == NULL) 1043 return NODEVMAJOR; 1044 1045 mutex_enter(&device_lock); 1046 for (i = 0; i < max_devsw_convs; i++) { 1047 size_t len; 1048 1049 conv = &devsw_conv[i]; 1050 if (conv->d_name == NULL) 1051 continue; 1052 len = strlen(conv->d_name); 1053 if (strncmp(conv->d_name, name, len) != 0) 1054 continue; 1055 if (name[len] != '\0' && !isdigit((unsigned char)name[len])) 1056 continue; 1057 cmajor = conv->d_cmajor; 1058 if (cmajor < 0 || cmajor >= max_cdevsws || 1059 cdevsw[cmajor] == NULL) 1060 break; 1061 if (devname != NULL) { 1062 #ifdef DEVSW_DEBUG 1063 if (strlen(conv->d_name) >= devnamelen) 1064 printf("%s: too short buffer", __func__); 1065 #endif /* DEVSW_DEBUG */ 1066 strncpy(devname, conv->d_name, devnamelen); 1067 devname[devnamelen - 1] = '\0'; 1068 } 1069 mutex_exit(&device_lock); 1070 return cmajor; 1071 } 1072 1073 mutex_exit(&device_lock); 1074 return NODEVMAJOR; 1075 } 1076 1077 /* 1078 * Convert from character dev_t to block dev_t. 1079 * 1080 * => Caller must ensure that the device is not detached, and therefore 1081 * that the major number is still valid when dereferenced. 1082 */ 1083 dev_t 1084 devsw_chr2blk(dev_t cdev) 1085 { 1086 devmajor_t bmajor, cmajor; 1087 int i; 1088 dev_t rv; 1089 1090 cmajor = major(cdev); 1091 bmajor = NODEVMAJOR; 1092 rv = NODEV; 1093 1094 mutex_enter(&device_lock); 1095 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) { 1096 mutex_exit(&device_lock); 1097 return NODEV; 1098 } 1099 for (i = 0; i < max_devsw_convs; i++) { 1100 if (devsw_conv[i].d_cmajor == cmajor) { 1101 bmajor = devsw_conv[i].d_bmajor; 1102 break; 1103 } 1104 } 1105 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL) 1106 rv = makedev(bmajor, minor(cdev)); 1107 mutex_exit(&device_lock); 1108 1109 return rv; 1110 } 1111 1112 /* 1113 * Convert from block dev_t to character dev_t. 1114 * 1115 * => Caller must ensure that the device is not detached, and therefore 1116 * that the major number is still valid when dereferenced. 1117 */ 1118 dev_t 1119 devsw_blk2chr(dev_t bdev) 1120 { 1121 devmajor_t bmajor, cmajor; 1122 int i; 1123 dev_t rv; 1124 1125 bmajor = major(bdev); 1126 cmajor = NODEVMAJOR; 1127 rv = NODEV; 1128 1129 mutex_enter(&device_lock); 1130 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) { 1131 mutex_exit(&device_lock); 1132 return NODEV; 1133 } 1134 for (i = 0; i < max_devsw_convs; i++) { 1135 if (devsw_conv[i].d_bmajor == bmajor) { 1136 cmajor = devsw_conv[i].d_cmajor; 1137 break; 1138 } 1139 } 1140 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL) 1141 rv = makedev(cmajor, minor(bdev)); 1142 mutex_exit(&device_lock); 1143 1144 return rv; 1145 } 1146 1147 /* 1148 * Device access methods. 1149 */ 1150 1151 #define DEV_LOCK(d) \ 1152 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \ 1153 KERNEL_LOCK(1, NULL); \ 1154 } 1155 1156 #define DEV_UNLOCK(d) \ 1157 if (mpflag == 0) { \ 1158 KERNEL_UNLOCK_ONE(NULL); \ 1159 } 1160 1161 int 1162 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 1163 { 1164 const struct bdevsw *d; 1165 struct localcount *lc; 1166 device_t dv = NULL/*XXXGCC*/; 1167 int unit = -1/*XXXGCC*/, rv, mpflag; 1168 1169 d = bdevsw_lookup_acquire(dev, &lc); 1170 if (d == NULL) 1171 return ENXIO; 1172 1173 if (d->d_devtounit) { 1174 /* 1175 * If the device node corresponds to an autoconf device 1176 * instance, acquire a reference to it so that during 1177 * d_open, device_lookup is stable. 1178 * 1179 * XXX This should also arrange to instantiate cloning 1180 * pseudo-devices if appropriate, but that requires 1181 * reviewing them all to find and verify a common 1182 * pattern. 1183 */ 1184 if ((unit = (*d->d_devtounit)(dev)) == -1) 1185 return ENXIO; 1186 if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) 1187 return ENXIO; 1188 SDT_PROBE6(sdt, bdev, open, acquire, 1189 d, dev, flag, devtype, unit, dv); 1190 } 1191 1192 DEV_LOCK(d); 1193 SDT_PROBE4(sdt, bdev, open, entry, d, dev, flag, devtype); 1194 rv = (*d->d_open)(dev, flag, devtype, l); 1195 SDT_PROBE5(sdt, bdev, open, return, d, dev, flag, devtype, rv); 1196 DEV_UNLOCK(d); 1197 1198 if (d->d_devtounit) { 1199 SDT_PROBE6(sdt, bdev, open, release, 1200 d, dev, flag, devtype, unit, dv); 1201 device_release(dv); 1202 } 1203 1204 bdevsw_release(d, lc); 1205 1206 return rv; 1207 } 1208 1209 int 1210 bdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l) 1211 { 1212 const struct bdevsw *d; 1213 int rv, mpflag; 1214 1215 if ((d = bdevsw_lookup(dev)) == NULL) 1216 return ENXIO; 1217 if (d->d_cancel == NULL) 1218 return ENODEV; 1219 1220 DEV_LOCK(d); 1221 SDT_PROBE4(sdt, bdev, cancel, entry, d, dev, flag, devtype); 1222 rv = (*d->d_cancel)(dev, flag, devtype, l); 1223 SDT_PROBE5(sdt, bdev, cancel, return, d, dev, flag, devtype, rv); 1224 DEV_UNLOCK(d); 1225 1226 return rv; 1227 } 1228 1229 int 1230 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 1231 { 1232 const struct bdevsw *d; 1233 int rv, mpflag; 1234 1235 if ((d = bdevsw_lookup(dev)) == NULL) 1236 return ENXIO; 1237 1238 DEV_LOCK(d); 1239 SDT_PROBE4(sdt, bdev, close, entry, d, dev, flag, devtype); 1240 rv = (*d->d_close)(dev, flag, devtype, l); 1241 SDT_PROBE5(sdt, bdev, close, return, d, dev, flag, devtype, rv); 1242 DEV_UNLOCK(d); 1243 1244 return rv; 1245 } 1246 1247 SDT_PROVIDER_DECLARE(io); 1248 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/); 1249 1250 void 1251 bdev_strategy(struct buf *bp) 1252 { 1253 const struct bdevsw *d; 1254 int mpflag; 1255 1256 SDT_PROBE1(io, kernel, , start, bp); 1257 1258 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) { 1259 bp->b_error = ENXIO; 1260 bp->b_resid = bp->b_bcount; 1261 biodone_vfs(bp); /* biodone() iff vfs present */ 1262 return; 1263 } 1264 1265 DEV_LOCK(d); 1266 SDT_PROBE3(sdt, bdev, strategy, entry, d, bp->b_dev, bp); 1267 (*d->d_strategy)(bp); 1268 SDT_PROBE3(sdt, bdev, strategy, return, d, bp->b_dev, bp); 1269 DEV_UNLOCK(d); 1270 } 1271 1272 int 1273 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 1274 { 1275 const struct bdevsw *d; 1276 int rv, mpflag; 1277 1278 if ((d = bdevsw_lookup(dev)) == NULL) 1279 return ENXIO; 1280 1281 DEV_LOCK(d); 1282 SDT_PROBE5(sdt, bdev, ioctl, entry, d, dev, cmd, data, flag); 1283 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 1284 SDT_PROBE6(sdt, bdev, ioctl, return, d, dev, cmd, data, flag, rv); 1285 DEV_UNLOCK(d); 1286 1287 return rv; 1288 } 1289 1290 int 1291 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz) 1292 { 1293 const struct bdevsw *d; 1294 int rv; 1295 1296 /* 1297 * Dump can be called without the device open. Since it can 1298 * currently only be called with the system paused (and in a 1299 * potentially unstable state), we don't perform any locking. 1300 */ 1301 if ((d = bdevsw_lookup(dev)) == NULL) 1302 return ENXIO; 1303 1304 /* DEV_LOCK(d); */ 1305 rv = (*d->d_dump)(dev, addr, data, sz); 1306 /* DEV_UNLOCK(d); */ 1307 1308 return rv; 1309 } 1310 1311 int 1312 bdev_flags(dev_t dev) 1313 { 1314 const struct bdevsw *d; 1315 1316 if ((d = bdevsw_lookup(dev)) == NULL) 1317 return 0; 1318 return d->d_flag & ~D_TYPEMASK; 1319 } 1320 1321 int 1322 bdev_type(dev_t dev) 1323 { 1324 const struct bdevsw *d; 1325 1326 if ((d = bdevsw_lookup(dev)) == NULL) 1327 return D_OTHER; 1328 return d->d_flag & D_TYPEMASK; 1329 } 1330 1331 int 1332 bdev_size(dev_t dev) 1333 { 1334 const struct bdevsw *d; 1335 int rv, mpflag = 0; 1336 1337 if ((d = bdevsw_lookup(dev)) == NULL || 1338 d->d_psize == NULL) 1339 return -1; 1340 1341 /* 1342 * Don't to try lock the device if we're dumping. 1343 * XXX: is there a better way to test this? 1344 */ 1345 if ((boothowto & RB_DUMP) == 0) 1346 DEV_LOCK(d); 1347 SDT_PROBE2(sdt, bdev, psize, entry, d, dev); 1348 rv = (*d->d_psize)(dev); 1349 SDT_PROBE3(sdt, bdev, psize, return, d, dev, rv); 1350 if ((boothowto & RB_DUMP) == 0) 1351 DEV_UNLOCK(d); 1352 1353 return rv; 1354 } 1355 1356 int 1357 bdev_discard(dev_t dev, off_t pos, off_t len) 1358 { 1359 const struct bdevsw *d; 1360 int rv, mpflag; 1361 1362 if ((d = bdevsw_lookup(dev)) == NULL) 1363 return ENXIO; 1364 1365 DEV_LOCK(d); 1366 SDT_PROBE4(sdt, bdev, discard, entry, d, dev, pos, len); 1367 rv = (*d->d_discard)(dev, pos, len); 1368 SDT_PROBE5(sdt, bdev, discard, return, d, dev, pos, len, rv); 1369 DEV_UNLOCK(d); 1370 1371 return rv; 1372 } 1373 1374 void 1375 bdev_detached(dev_t dev) 1376 { 1377 const struct bdevsw *d; 1378 device_t dv; 1379 int unit; 1380 1381 if ((d = bdevsw_lookup(dev)) == NULL) 1382 return; 1383 if (d->d_devtounit == NULL) 1384 return; 1385 if ((unit = (*d->d_devtounit)(dev)) == -1) 1386 return; 1387 if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL) 1388 return; 1389 config_detach_commit(dv); 1390 } 1391 1392 int 1393 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l) 1394 { 1395 const struct cdevsw *d; 1396 struct localcount *lc; 1397 device_t dv = NULL/*XXXGCC*/; 1398 int unit = -1/*XXXGCC*/, rv, mpflag; 1399 1400 d = cdevsw_lookup_acquire(dev, &lc); 1401 if (d == NULL) 1402 return ENXIO; 1403 1404 if (d->d_devtounit) { 1405 /* 1406 * If the device node corresponds to an autoconf device 1407 * instance, acquire a reference to it so that during 1408 * d_open, device_lookup is stable. 1409 * 1410 * XXX This should also arrange to instantiate cloning 1411 * pseudo-devices if appropriate, but that requires 1412 * reviewing them all to find and verify a common 1413 * pattern. 1414 */ 1415 if ((unit = (*d->d_devtounit)(dev)) == -1) 1416 return ENXIO; 1417 if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) 1418 return ENXIO; 1419 SDT_PROBE6(sdt, cdev, open, acquire, 1420 d, dev, flag, devtype, unit, dv); 1421 } 1422 1423 DEV_LOCK(d); 1424 SDT_PROBE4(sdt, cdev, open, entry, d, dev, flag, devtype); 1425 rv = (*d->d_open)(dev, flag, devtype, l); 1426 SDT_PROBE5(sdt, cdev, open, return, d, dev, flag, devtype, rv); 1427 DEV_UNLOCK(d); 1428 1429 if (d->d_devtounit) { 1430 SDT_PROBE6(sdt, cdev, open, release, 1431 d, dev, flag, devtype, unit, dv); 1432 device_release(dv); 1433 } 1434 1435 cdevsw_release(d, lc); 1436 1437 return rv; 1438 } 1439 1440 int 1441 cdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l) 1442 { 1443 const struct cdevsw *d; 1444 int rv, mpflag; 1445 1446 if ((d = cdevsw_lookup(dev)) == NULL) 1447 return ENXIO; 1448 if (d->d_cancel == NULL) 1449 return ENODEV; 1450 1451 DEV_LOCK(d); 1452 SDT_PROBE4(sdt, cdev, cancel, entry, d, dev, flag, devtype); 1453 rv = (*d->d_cancel)(dev, flag, devtype, l); 1454 SDT_PROBE5(sdt, cdev, cancel, return, d, dev, flag, devtype, rv); 1455 DEV_UNLOCK(d); 1456 1457 return rv; 1458 } 1459 1460 int 1461 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l) 1462 { 1463 const struct cdevsw *d; 1464 int rv, mpflag; 1465 1466 if ((d = cdevsw_lookup(dev)) == NULL) 1467 return ENXIO; 1468 1469 DEV_LOCK(d); 1470 SDT_PROBE4(sdt, cdev, close, entry, d, dev, flag, devtype); 1471 rv = (*d->d_close)(dev, flag, devtype, l); 1472 SDT_PROBE5(sdt, cdev, close, return, d, dev, flag, devtype, rv); 1473 DEV_UNLOCK(d); 1474 1475 return rv; 1476 } 1477 1478 int 1479 cdev_read(dev_t dev, struct uio *uio, int flag) 1480 { 1481 const struct cdevsw *d; 1482 int rv, mpflag; 1483 1484 if ((d = cdevsw_lookup(dev)) == NULL) 1485 return ENXIO; 1486 1487 DEV_LOCK(d); 1488 SDT_PROBE4(sdt, cdev, read, entry, d, dev, uio, flag); 1489 rv = (*d->d_read)(dev, uio, flag); 1490 SDT_PROBE5(sdt, cdev, read, return, d, dev, uio, flag, rv); 1491 DEV_UNLOCK(d); 1492 1493 return rv; 1494 } 1495 1496 int 1497 cdev_write(dev_t dev, struct uio *uio, int flag) 1498 { 1499 const struct cdevsw *d; 1500 int rv, mpflag; 1501 1502 if ((d = cdevsw_lookup(dev)) == NULL) 1503 return ENXIO; 1504 1505 DEV_LOCK(d); 1506 SDT_PROBE4(sdt, cdev, write, entry, d, dev, uio, flag); 1507 rv = (*d->d_write)(dev, uio, flag); 1508 SDT_PROBE5(sdt, cdev, write, return, d, dev, uio, flag, rv); 1509 DEV_UNLOCK(d); 1510 1511 return rv; 1512 } 1513 1514 int 1515 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 1516 { 1517 const struct cdevsw *d; 1518 int rv, mpflag; 1519 1520 if ((d = cdevsw_lookup(dev)) == NULL) 1521 return ENXIO; 1522 1523 DEV_LOCK(d); 1524 SDT_PROBE5(sdt, cdev, ioctl, entry, d, dev, cmd, data, flag); 1525 rv = (*d->d_ioctl)(dev, cmd, data, flag, l); 1526 SDT_PROBE6(sdt, cdev, ioctl, return, d, dev, cmd, data, flag, rv); 1527 DEV_UNLOCK(d); 1528 1529 return rv; 1530 } 1531 1532 void 1533 cdev_stop(struct tty *tp, int flag) 1534 { 1535 const struct cdevsw *d; 1536 int mpflag; 1537 1538 if ((d = cdevsw_lookup(tp->t_dev)) == NULL) 1539 return; 1540 1541 DEV_LOCK(d); 1542 SDT_PROBE4(sdt, cdev, stop, entry, d, tp->t_dev, tp, flag); 1543 (*d->d_stop)(tp, flag); 1544 SDT_PROBE4(sdt, cdev, stop, return, d, tp->t_dev, tp, flag); 1545 DEV_UNLOCK(d); 1546 } 1547 1548 struct tty * 1549 cdev_tty(dev_t dev) 1550 { 1551 const struct cdevsw *d; 1552 1553 if ((d = cdevsw_lookup(dev)) == NULL) 1554 return NULL; 1555 1556 /* XXX Check if necessary. */ 1557 if (d->d_tty == NULL) 1558 return NULL; 1559 1560 return (*d->d_tty)(dev); 1561 } 1562 1563 int 1564 cdev_poll(dev_t dev, int flag, lwp_t *l) 1565 { 1566 const struct cdevsw *d; 1567 int rv, mpflag; 1568 1569 if ((d = cdevsw_lookup(dev)) == NULL) 1570 return POLLERR; 1571 1572 DEV_LOCK(d); 1573 SDT_PROBE3(sdt, cdev, poll, entry, d, dev, flag); 1574 rv = (*d->d_poll)(dev, flag, l); 1575 SDT_PROBE4(sdt, cdev, poll, return, d, dev, flag, rv); 1576 DEV_UNLOCK(d); 1577 1578 return rv; 1579 } 1580 1581 paddr_t 1582 cdev_mmap(dev_t dev, off_t off, int flag) 1583 { 1584 const struct cdevsw *d; 1585 paddr_t rv; 1586 int mpflag; 1587 1588 if ((d = cdevsw_lookup(dev)) == NULL) 1589 return (paddr_t)-1LL; 1590 1591 DEV_LOCK(d); 1592 SDT_PROBE4(sdt, cdev, mmap, entry, d, dev, off, flag); 1593 rv = (*d->d_mmap)(dev, off, flag); 1594 SDT_PROBE5(sdt, cdev, mmap, return, d, dev, off, flag, rv); 1595 DEV_UNLOCK(d); 1596 1597 return rv; 1598 } 1599 1600 int 1601 cdev_kqfilter(dev_t dev, struct knote *kn) 1602 { 1603 const struct cdevsw *d; 1604 int rv, mpflag; 1605 1606 if ((d = cdevsw_lookup(dev)) == NULL) 1607 return ENXIO; 1608 1609 DEV_LOCK(d); 1610 SDT_PROBE3(sdt, cdev, kqfilter, entry, d, dev, kn); 1611 rv = (*d->d_kqfilter)(dev, kn); 1612 SDT_PROBE4(sdt, cdev, kqfilter, return, d, dev, kn, rv); 1613 DEV_UNLOCK(d); 1614 1615 return rv; 1616 } 1617 1618 int 1619 cdev_discard(dev_t dev, off_t pos, off_t len) 1620 { 1621 const struct cdevsw *d; 1622 int rv, mpflag; 1623 1624 if ((d = cdevsw_lookup(dev)) == NULL) 1625 return ENXIO; 1626 1627 DEV_LOCK(d); 1628 SDT_PROBE4(sdt, cdev, discard, entry, d, dev, pos, len); 1629 rv = (*d->d_discard)(dev, pos, len); 1630 SDT_PROBE5(sdt, cdev, discard, return, d, dev, pos, len, rv); 1631 DEV_UNLOCK(d); 1632 1633 return rv; 1634 } 1635 1636 int 1637 cdev_flags(dev_t dev) 1638 { 1639 const struct cdevsw *d; 1640 1641 if ((d = cdevsw_lookup(dev)) == NULL) 1642 return 0; 1643 return d->d_flag & ~D_TYPEMASK; 1644 } 1645 1646 int 1647 cdev_type(dev_t dev) 1648 { 1649 const struct cdevsw *d; 1650 1651 if ((d = cdevsw_lookup(dev)) == NULL) 1652 return D_OTHER; 1653 return d->d_flag & D_TYPEMASK; 1654 } 1655 1656 void 1657 cdev_detached(dev_t dev) 1658 { 1659 const struct cdevsw *d; 1660 device_t dv; 1661 int unit; 1662 1663 if ((d = cdevsw_lookup(dev)) == NULL) 1664 return; 1665 if (d->d_devtounit == NULL) 1666 return; 1667 if ((unit = (*d->d_devtounit)(dev)) == -1) 1668 return; 1669 if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL) 1670 return; 1671 config_detach_commit(dv); 1672 } 1673 1674 /* 1675 * nommap(dev, off, prot) 1676 * 1677 * mmap routine that always fails, for non-mmappable devices. 1678 */ 1679 paddr_t 1680 nommap(dev_t dev, off_t off, int prot) 1681 { 1682 1683 return (paddr_t)-1; 1684 } 1685 1686 /* 1687 * dev_minor_unit(dev) 1688 * 1689 * Returns minor(dev) as an int. Intended for use with struct 1690 * bdevsw, cdevsw::d_devtounit for drivers whose /dev nodes are 1691 * implemented by reference to an autoconf instance with the minor 1692 * number. 1693 */ 1694 int 1695 dev_minor_unit(dev_t dev) 1696 { 1697 1698 return minor(dev); 1699 } 1700