1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert 4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $DragonFly: src/sys/kern/kern_device.c,v 1.10 2004/05/13 23:49:23 dillon Exp $ 29 */ 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/sysctl.h> 33 #include <sys/systm.h> 34 #include <sys/module.h> 35 #include <sys/malloc.h> 36 #include <sys/conf.h> 37 #include <sys/vnode.h> 38 #include <sys/queue.h> 39 #include <sys/msgport.h> 40 #include <sys/device.h> 41 #include <machine/stdarg.h> 42 #include <sys/proc.h> 43 #include <sys/thread2.h> 44 #include <sys/msgport2.h> 45 46 static struct cdevsw *cdevsw[NUMCDEVSW]; 47 static struct lwkt_port *cdevport[NUMCDEVSW]; 48 49 static int cdevsw_putport(lwkt_port_t port, lwkt_msg_t msg); 50 51 /* 52 * Initialize a message port to serve as the default message-handling port 53 * for device operations. This message port provides compatibility with 54 * traditional cdevsw dispatch functions by running them synchronously. 55 * 56 * YYY NOTE: ms_cmd can now hold a function pointer, should this code be 57 * converted from an integer op to a function pointer with a flag to 58 * indicate legacy operation? 59 */ 60 static void 61 init_default_cdevsw_port(lwkt_port_t port) 62 { 63 lwkt_initport(port, NULL); 64 port->mp_putport = cdevsw_putport; 65 } 66 67 static 68 int 69 cdevsw_putport(lwkt_port_t port, lwkt_msg_t lmsg) 70 { 71 cdevallmsg_t msg = (cdevallmsg_t)lmsg; 72 struct cdevsw *csw = msg->am_msg.csw; 73 int error; 74 75 /* 76 * Run the device switch function synchronously in the context of the 77 * caller and return a synchronous error code (anything not EASYNC). 78 */ 79 switch(msg->am_lmsg.ms_cmd.cm_op) { 80 case CDEV_CMD_OPEN: 81 error = csw->old_open( 82 msg->am_open.msg.dev, 83 msg->am_open.oflags, 84 msg->am_open.devtype, 85 msg->am_open.td); 86 break; 87 case CDEV_CMD_CLOSE: 88 error = csw->old_close( 89 msg->am_close.msg.dev, 90 msg->am_close.fflag, 91 msg->am_close.devtype, 92 msg->am_close.td); 93 break; 94 case CDEV_CMD_STRATEGY: 95 csw->old_strategy(msg->am_strategy.bp); 96 error = 0; 97 break; 98 case CDEV_CMD_IOCTL: 99 error = csw->old_ioctl( 100 msg->am_ioctl.msg.dev, 101 msg->am_ioctl.cmd, 102 msg->am_ioctl.data, 103 msg->am_ioctl.fflag, 104 msg->am_ioctl.td); 105 break; 106 case CDEV_CMD_DUMP: 107 error = csw->old_dump(msg->am_ioctl.msg.dev); 108 break; 109 case CDEV_CMD_PSIZE: 110 msg->am_psize.result = csw->old_psize(msg->am_psize.msg.dev); 111 error = 0; /* XXX */ 112 break; 113 case CDEV_CMD_READ: 114 error = csw->old_read( 115 msg->am_read.msg.dev, 116 msg->am_read.uio, 117 msg->am_read.ioflag); 118 break; 119 case CDEV_CMD_WRITE: 120 error = csw->old_write( 121 msg->am_read.msg.dev, 122 msg->am_read.uio, 123 msg->am_read.ioflag); 124 break; 125 case CDEV_CMD_POLL: 126 msg->am_poll.events = csw->old_poll( 127 msg->am_poll.msg.dev, 128 msg->am_poll.events, 129 msg->am_poll.td); 130 error = 0; 131 break; 132 case CDEV_CMD_KQFILTER: 133 msg->am_kqfilter.result = csw->old_kqfilter( 134 msg->am_kqfilter.msg.dev, 135 msg->am_kqfilter.kn); 136 error = 0; 137 break; 138 case CDEV_CMD_MMAP: 139 msg->am_mmap.result = csw->old_mmap( 140 msg->am_mmap.msg.dev, 141 msg->am_mmap.offset, 142 msg->am_mmap.nprot); 143 error = 0; /* XXX */ 144 break; 145 default: 146 error = ENOSYS; 147 break; 148 } 149 KKASSERT(error != EASYNC); 150 return(error); 151 } 152 153 /* 154 * These device dispatch functions provide convenient entry points for 155 * any code wishing to make a dev call. 156 * 157 * YYY we ought to be able to optimize the port lookup by caching it in 158 * the dev_t structure itself. 159 */ 160 static __inline 161 struct cdevsw * 162 _devsw(dev_t dev) 163 { 164 if (dev == NULL) 165 return(NULL); 166 if (dev->si_devsw) 167 return (dev->si_devsw); 168 return(cdevsw[major(dev)]); 169 } 170 171 static __inline 172 lwkt_port_t 173 _init_cdevmsg(dev_t dev, cdevmsg_t msg, int cmd) 174 { 175 struct cdevsw *csw; 176 177 lwkt_initmsg_simple(&msg->msg, cmd); 178 msg->dev = dev; 179 msg->csw = csw = _devsw(dev); 180 if (csw != NULL) { /* YYY too hackish */ 181 KKASSERT(csw->d_port); /* YYY too hackish */ 182 if (cdevport[major(dev)]) /* YYY too hackish */ 183 return(cdevport[major(dev)]); 184 return(csw->d_port); 185 } 186 return(NULL); 187 } 188 189 int 190 dev_dopen(dev_t dev, int oflags, int devtype, thread_t td) 191 { 192 struct cdevmsg_open msg; 193 lwkt_port_t port; 194 195 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN); 196 if (port == NULL) 197 return(ENXIO); 198 msg.oflags = oflags; 199 msg.devtype = devtype; 200 msg.td = td; 201 return(lwkt_domsg(port, &msg.msg.msg)); 202 } 203 204 int 205 dev_dclose(dev_t dev, int fflag, int devtype, thread_t td) 206 { 207 struct cdevmsg_close msg; 208 lwkt_port_t port; 209 210 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE); 211 if (port == NULL) 212 return(ENXIO); 213 msg.fflag = fflag; 214 msg.devtype = devtype; 215 msg.td = td; 216 return(lwkt_domsg(port, &msg.msg.msg)); 217 } 218 219 void 220 dev_dstrategy(dev_t dev, struct buf *bp) 221 { 222 struct cdevmsg_strategy msg; 223 lwkt_port_t port; 224 225 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY); 226 KKASSERT(port); /* 'nostrategy' function is NULL YYY */ 227 msg.bp = bp; 228 lwkt_domsg(port, &msg.msg.msg); 229 } 230 231 int 232 dev_dioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td) 233 { 234 struct cdevmsg_ioctl msg; 235 lwkt_port_t port; 236 237 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL); 238 if (port == NULL) 239 return(ENXIO); 240 msg.cmd = cmd; 241 msg.data = data; 242 msg.fflag = fflag; 243 msg.td = td; 244 return(lwkt_domsg(port, &msg.msg.msg)); 245 } 246 247 int 248 dev_ddump(dev_t dev) 249 { 250 struct cdevmsg_dump msg; 251 lwkt_port_t port; 252 253 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP); 254 if (port == NULL) 255 return(ENXIO); 256 return(lwkt_domsg(port, &msg.msg.msg)); 257 } 258 259 int 260 dev_dpsize(dev_t dev) 261 { 262 struct cdevmsg_psize msg; 263 lwkt_port_t port; 264 int error; 265 266 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE); 267 if (port == NULL) 268 return(-1); 269 error = lwkt_domsg(port, &msg.msg.msg); 270 if (error == 0) 271 return(msg.result); 272 return(-1); 273 } 274 275 int 276 dev_dread(dev_t dev, struct uio *uio, int ioflag) 277 { 278 struct cdevmsg_read msg; 279 lwkt_port_t port; 280 281 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ); 282 if (port == NULL) 283 return(ENXIO); 284 msg.uio = uio; 285 msg.ioflag = ioflag; 286 return(lwkt_domsg(port, &msg.msg.msg)); 287 } 288 289 int 290 dev_dwrite(dev_t dev, struct uio *uio, int ioflag) 291 { 292 struct cdevmsg_write msg; 293 lwkt_port_t port; 294 295 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE); 296 if (port == NULL) 297 return(ENXIO); 298 msg.uio = uio; 299 msg.ioflag = ioflag; 300 return(lwkt_domsg(port, &msg.msg.msg)); 301 } 302 303 int 304 dev_dpoll(dev_t dev, int events, thread_t td) 305 { 306 struct cdevmsg_poll msg; 307 lwkt_port_t port; 308 int error; 309 310 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL); 311 if (port == NULL) 312 return(ENXIO); 313 msg.events = events; 314 msg.td = td; 315 error = lwkt_domsg(port, &msg.msg.msg); 316 if (error == 0) 317 return(msg.events); 318 return(seltrue(dev, msg.events, td)); 319 } 320 321 int 322 dev_dkqfilter(dev_t dev, struct knote *kn) 323 { 324 struct cdevmsg_kqfilter msg; 325 lwkt_port_t port; 326 int error; 327 328 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER); 329 if (port == NULL) 330 return(ENXIO); 331 msg.kn = kn; 332 error = lwkt_domsg(port, &msg.msg.msg); 333 if (error == 0) 334 return(msg.result); 335 return(ENODEV); 336 } 337 338 int 339 dev_dmmap(dev_t dev, vm_offset_t offset, int nprot) 340 { 341 struct cdevmsg_mmap msg; 342 lwkt_port_t port; 343 int error; 344 345 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP); 346 if (port == NULL) 347 return(-1); 348 msg.offset = offset; 349 msg.nprot = nprot; 350 error = lwkt_domsg(port, &msg.msg.msg); 351 if (error == 0) 352 return(msg.result); 353 return(-1); 354 } 355 356 int 357 dev_port_dopen(lwkt_port_t port, dev_t dev, int oflags, int devtype, thread_t td) 358 { 359 struct cdevmsg_open msg; 360 361 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN); 362 if (port == NULL) 363 return(ENXIO); 364 msg.oflags = oflags; 365 msg.devtype = devtype; 366 msg.td = td; 367 return(lwkt_domsg(port, &msg.msg.msg)); 368 } 369 370 int 371 dev_port_dclose(lwkt_port_t port, dev_t dev, int fflag, int devtype, thread_t td) 372 { 373 struct cdevmsg_close msg; 374 375 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE); 376 if (port == NULL) 377 return(ENXIO); 378 msg.fflag = fflag; 379 msg.devtype = devtype; 380 msg.td = td; 381 return(lwkt_domsg(port, &msg.msg.msg)); 382 } 383 384 void 385 dev_port_dstrategy(lwkt_port_t port, dev_t dev, struct buf *bp) 386 { 387 struct cdevmsg_strategy msg; 388 389 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY); 390 KKASSERT(port); /* 'nostrategy' function is NULL YYY */ 391 msg.bp = bp; 392 lwkt_domsg(port, &msg.msg.msg); 393 } 394 395 int 396 dev_port_dioctl(lwkt_port_t port, dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td) 397 { 398 struct cdevmsg_ioctl msg; 399 400 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL); 401 if (port == NULL) 402 return(ENXIO); 403 msg.cmd = cmd; 404 msg.data = data; 405 msg.fflag = fflag; 406 msg.td = td; 407 return(lwkt_domsg(port, &msg.msg.msg)); 408 } 409 410 int 411 dev_port_ddump(lwkt_port_t port, dev_t dev) 412 { 413 struct cdevmsg_dump msg; 414 415 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP); 416 if (port == NULL) 417 return(ENXIO); 418 return(lwkt_domsg(port, &msg.msg.msg)); 419 } 420 421 int 422 dev_port_dpsize(lwkt_port_t port, dev_t dev) 423 { 424 struct cdevmsg_psize msg; 425 int error; 426 427 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE); 428 if (port == NULL) 429 return(-1); 430 error = lwkt_domsg(port, &msg.msg.msg); 431 if (error == 0) 432 return(msg.result); 433 return(-1); 434 } 435 436 int 437 dev_port_dread(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag) 438 { 439 struct cdevmsg_read msg; 440 441 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ); 442 if (port == NULL) 443 return(ENXIO); 444 msg.uio = uio; 445 msg.ioflag = ioflag; 446 return(lwkt_domsg(port, &msg.msg.msg)); 447 } 448 449 int 450 dev_port_dwrite(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag) 451 { 452 struct cdevmsg_write msg; 453 454 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE); 455 if (port == NULL) 456 return(ENXIO); 457 msg.uio = uio; 458 msg.ioflag = ioflag; 459 return(lwkt_domsg(port, &msg.msg.msg)); 460 } 461 462 int 463 dev_port_dpoll(lwkt_port_t port, dev_t dev, int events, thread_t td) 464 { 465 struct cdevmsg_poll msg; 466 int error; 467 468 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL); 469 if (port == NULL) 470 return(ENXIO); 471 msg.events = events; 472 msg.td = td; 473 error = lwkt_domsg(port, &msg.msg.msg); 474 if (error == 0) 475 return(msg.events); 476 return(seltrue(dev, msg.events, td)); 477 } 478 479 int 480 dev_port_dkqfilter(lwkt_port_t port, dev_t dev, struct knote *kn) 481 { 482 struct cdevmsg_kqfilter msg; 483 int error; 484 485 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER); 486 if (port == NULL) 487 return(ENXIO); 488 msg.kn = kn; 489 error = lwkt_domsg(port, &msg.msg.msg); 490 if (error == 0) 491 return(msg.result); 492 return(ENODEV); 493 } 494 495 int 496 dev_port_dmmap(lwkt_port_t port, dev_t dev, vm_offset_t offset, int nprot) 497 { 498 struct cdevmsg_mmap msg; 499 int error; 500 501 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP); 502 if (port == NULL) 503 return(-1); 504 msg.offset = offset; 505 msg.nprot = nprot; 506 error = lwkt_domsg(port, &msg.msg.msg); 507 if (error == 0) 508 return(msg.result); 509 return(-1); 510 } 511 512 const char * 513 dev_dname(dev_t dev) 514 { 515 struct cdevsw *csw; 516 517 if ((csw = _devsw(dev)) != NULL) 518 return(csw->d_name); 519 return(NULL); 520 } 521 522 int 523 dev_dflags(dev_t dev) 524 { 525 struct cdevsw *csw; 526 527 if ((csw = _devsw(dev)) != NULL) 528 return(csw->d_flags); 529 return(0); 530 } 531 532 int 533 dev_dmaj(dev_t dev) 534 { 535 struct cdevsw *csw; 536 537 if ((csw = _devsw(dev)) != NULL) 538 return(csw->d_maj); 539 return(0); 540 } 541 542 lwkt_port_t 543 dev_dport(dev_t dev) 544 { 545 struct cdevsw *csw; 546 547 if ((csw = _devsw(dev)) != NULL) { 548 if (cdevport[major(dev)]) /* YYY too hackish */ 549 return(cdevport[major(dev)]); 550 return(csw->d_port); 551 } 552 return(NULL); 553 } 554 555 #if 0 556 /* 557 * cdevsw[] array functions, moved from kern/kern_conf.c 558 */ 559 struct cdevsw * 560 devsw(dev_t dev) 561 { 562 return(_devsw(dev)); 563 } 564 #endif 565 566 /* 567 * Convert a cdevsw template into the real thing, filling in fields the 568 * device left empty with appropriate defaults. 569 */ 570 void 571 compile_devsw(struct cdevsw *devsw) 572 { 573 static lwkt_port devsw_compat_port; 574 575 if (devsw_compat_port.mp_putport == NULL) 576 init_default_cdevsw_port(&devsw_compat_port); 577 578 if (devsw->old_open == NULL) 579 devsw->old_open = noopen; 580 if (devsw->old_close == NULL) 581 devsw->old_close = noclose; 582 if (devsw->old_read == NULL) 583 devsw->old_read = noread; 584 if (devsw->old_write == NULL) 585 devsw->old_write = nowrite; 586 if (devsw->old_ioctl == NULL) 587 devsw->old_ioctl = noioctl; 588 if (devsw->old_poll == NULL) 589 devsw->old_poll = nopoll; 590 if (devsw->old_mmap == NULL) 591 devsw->old_mmap = nommap; 592 if (devsw->old_strategy == NULL) 593 devsw->old_strategy = nostrategy; 594 if (devsw->old_dump == NULL) 595 devsw->old_dump = nodump; 596 if (devsw->old_psize == NULL) 597 devsw->old_psize = nopsize; 598 if (devsw->old_kqfilter == NULL) 599 devsw->old_kqfilter = nokqfilter; 600 601 if (devsw->d_port == NULL) 602 devsw->d_port = &devsw_compat_port; 603 } 604 605 /* 606 * Add a cdevsw entry 607 */ 608 int 609 cdevsw_add(struct cdevsw *newentry) 610 { 611 compile_devsw(newentry); 612 if (newentry->d_maj < 0 || newentry->d_maj >= NUMCDEVSW) { 613 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n", 614 newentry->d_name, newentry->d_maj); 615 return (EINVAL); 616 } 617 if (cdevsw[newentry->d_maj]) { 618 printf("WARNING: \"%s\" is usurping \"%s\"'s cdevsw[]\n", 619 newentry->d_name, cdevsw[newentry->d_maj]->d_name); 620 } 621 cdevsw[newentry->d_maj] = newentry; 622 return (0); 623 } 624 625 /* 626 * Add a cdevsw entry and override the port. 627 */ 628 lwkt_port_t 629 cdevsw_add_override(struct cdevsw *newentry, lwkt_port_t port) 630 { 631 int error; 632 633 if ((error = cdevsw_add(newentry)) == 0) 634 cdevport[newentry->d_maj] = port; 635 return(newentry->d_port); 636 } 637 638 lwkt_port_t 639 cdevsw_dev_override(dev_t dev, lwkt_port_t port) 640 { 641 struct cdevsw *csw; 642 643 KKASSERT(major(dev) >= 0 && major(dev) < NUMCDEVSW); 644 if ((csw = _devsw(dev)) != NULL) { 645 cdevport[major(dev)] = port; 646 return(csw->d_port); 647 } 648 return(NULL); 649 } 650 651 /* 652 * Remove a cdevsw entry 653 */ 654 int 655 cdevsw_remove(struct cdevsw *oldentry) 656 { 657 if (oldentry->d_maj < 0 || oldentry->d_maj >= NUMCDEVSW) { 658 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n", 659 oldentry->d_name, oldentry->d_maj); 660 return EINVAL; 661 } 662 cdevsw[oldentry->d_maj] = NULL; 663 cdevport[oldentry->d_maj] = NULL; 664 return 0; 665 } 666 667