xref: /dflybsd-src/sys/kern/kern_device.c (revision bf22d4c1f95f57623b2b3030738e116d3a547284)
1 /*
2  * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
5  *							All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $DragonFly: src/sys/kern/kern_device.c,v 1.9 2004/04/20 01:52:22 dillon Exp $
29  */
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/sysctl.h>
33 #include <sys/systm.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
36 #include <sys/conf.h>
37 #include <sys/vnode.h>
38 #include <sys/queue.h>
39 #include <sys/msgport.h>
40 #include <sys/device.h>
41 #include <machine/stdarg.h>
42 #include <sys/proc.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 
46 static struct cdevsw 	*cdevsw[NUMCDEVSW];
47 static struct lwkt_port	*cdevport[NUMCDEVSW];
48 
49 static int cdevsw_putport(lwkt_port_t port, lwkt_msg_t msg);
50 
51 /*
52  * Initialize a message port to serve as the default message-handling port
53  * for device operations.  This message port provides compatibility with
54  * traditional cdevsw dispatch functions.  There are two primary modes:
55  *
56  * mp_td is NULL:  The d_autoq mask is ignored and all messages are translated
57  * 		   into directly, synchronous cdevsw calls.
58  *
59  * mp_td not NULL: The d_autoq mask is used to determine which messages should
60  *		   be queued and which should be handled synchronously.
61  *
62  * Don't worry too much about optimizing this code, the critical devices
63  * will implement their own port messaging functions directly.
64  *
65  * YYY NOTE: ms_cmd can now hold a function pointer, should this code be
66  * converted from an integer op to a function pointer with a flag to
67  * indicate legacy operation?
68  */
69 static void
70 init_default_cdevsw_port(lwkt_port_t port)
71 {
72     lwkt_initport(port, NULL);
73     port->mp_putport = cdevsw_putport;
74 }
75 
76 static
77 int
78 cdevsw_putport(lwkt_port_t port, lwkt_msg_t lmsg)
79 {
80     cdevallmsg_t msg = (cdevallmsg_t)lmsg;
81     struct cdevsw *csw = msg->am_msg.csw;
82     int error;
83 
84     /*
85      * If queueable then officially queue the message
86      */
87     if (port->mp_td) {
88 	int mask = (1 << (msg->am_lmsg.ms_cmd.cm_op & MSG_SUBCMD_MASK));
89 	if (csw->d_autoq & mask)
90 	    return(lwkt_beginmsg(port, &msg->am_lmsg));
91     }
92 
93     /*
94      * Run the device switch function synchronously in the context of the
95      * caller and return a synchronous error code (anything not EASYNC).
96      */
97     switch(msg->am_lmsg.ms_cmd.cm_op) {
98     case CDEV_CMD_OPEN:
99 	error = csw->old_open(
100 		    msg->am_open.msg.dev,
101 		    msg->am_open.oflags,
102 		    msg->am_open.devtype,
103 		    msg->am_open.td);
104 	break;
105     case CDEV_CMD_CLOSE:
106 	error = csw->old_close(
107 		    msg->am_close.msg.dev,
108 		    msg->am_close.fflag,
109 		    msg->am_close.devtype,
110 		    msg->am_close.td);
111 	break;
112     case CDEV_CMD_STRATEGY:
113 	csw->old_strategy(msg->am_strategy.bp);
114 	error = 0;
115 	break;
116     case CDEV_CMD_IOCTL:
117 	error = csw->old_ioctl(
118 		    msg->am_ioctl.msg.dev,
119 		    msg->am_ioctl.cmd,
120 		    msg->am_ioctl.data,
121 		    msg->am_ioctl.fflag,
122 		    msg->am_ioctl.td);
123 	break;
124     case CDEV_CMD_DUMP:
125 	error = csw->old_dump(msg->am_ioctl.msg.dev);
126 	break;
127     case CDEV_CMD_PSIZE:
128 	msg->am_psize.result = csw->old_psize(msg->am_psize.msg.dev);
129 	error = 0;	/* XXX */
130 	break;
131     case CDEV_CMD_READ:
132 	error = csw->old_read(
133 		    msg->am_read.msg.dev,
134 		    msg->am_read.uio,
135 		    msg->am_read.ioflag);
136 	break;
137     case CDEV_CMD_WRITE:
138 	error = csw->old_write(
139 		    msg->am_read.msg.dev,
140 		    msg->am_read.uio,
141 		    msg->am_read.ioflag);
142 	break;
143     case CDEV_CMD_POLL:
144 	msg->am_poll.events = csw->old_poll(
145 				msg->am_poll.msg.dev,
146 				msg->am_poll.events,
147 				msg->am_poll.td);
148 	error = 0;
149 	break;
150     case CDEV_CMD_KQFILTER:
151 	msg->am_kqfilter.result = csw->old_kqfilter(
152 				msg->am_kqfilter.msg.dev,
153 				msg->am_kqfilter.kn);
154 	error = 0;
155 	break;
156     case CDEV_CMD_MMAP:
157 	msg->am_mmap.result = csw->old_mmap(
158 		    msg->am_mmap.msg.dev,
159 		    msg->am_mmap.offset,
160 		    msg->am_mmap.nprot);
161 	error = 0;	/* XXX */
162 	break;
163     default:
164 	error = ENOSYS;
165 	break;
166     }
167     KKASSERT(error != EASYNC);
168     return(error);
169 }
170 
171 /*
172  * These device dispatch functions provide convenient entry points for
173  * any code wishing to make a dev call.
174  *
175  * YYY we ought to be able to optimize the port lookup by caching it in
176  * the dev_t structure itself.
177  */
178 static __inline
179 struct cdevsw *
180 _devsw(dev_t dev)
181 {
182     if (dev == NULL)
183 	return(NULL);
184     if (dev->si_devsw)
185 	return (dev->si_devsw);
186     return(cdevsw[major(dev)]);
187 }
188 
189 static __inline
190 lwkt_port_t
191 _init_cdevmsg(dev_t dev, cdevmsg_t msg, int cmd)
192 {
193     struct cdevsw *csw;
194 
195     lwkt_initmsg_simple(&msg->msg, cmd);
196     msg->dev = dev;
197     msg->csw = csw = _devsw(dev);
198     if (csw != NULL) {			/* YYY too hackish */
199 	KKASSERT(csw->d_port);		/* YYY too hackish */
200 	if (cdevport[major(dev)])	/* YYY too hackish */
201 	    return(cdevport[major(dev)]);
202 	return(csw->d_port);
203     }
204     return(NULL);
205 }
206 
207 int
208 dev_dopen(dev_t dev, int oflags, int devtype, thread_t td)
209 {
210     struct cdevmsg_open	msg;
211     lwkt_port_t port;
212 
213     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
214     if (port == NULL)
215 	return(ENXIO);
216     msg.oflags = oflags;
217     msg.devtype = devtype;
218     msg.td = td;
219     return(lwkt_domsg(port, &msg.msg.msg));
220 }
221 
222 int
223 dev_dclose(dev_t dev, int fflag, int devtype, thread_t td)
224 {
225     struct cdevmsg_close msg;
226     lwkt_port_t port;
227 
228     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
229     if (port == NULL)
230 	return(ENXIO);
231     msg.fflag = fflag;
232     msg.devtype = devtype;
233     msg.td = td;
234     return(lwkt_domsg(port, &msg.msg.msg));
235 }
236 
237 void
238 dev_dstrategy(dev_t dev, struct buf *bp)
239 {
240     struct cdevmsg_strategy msg;
241     lwkt_port_t port;
242 
243     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
244     KKASSERT(port);	/* 'nostrategy' function is NULL YYY */
245     msg.bp = bp;
246     lwkt_domsg(port, &msg.msg.msg);
247 }
248 
249 int
250 dev_dioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
251 {
252     struct cdevmsg_ioctl msg;
253     lwkt_port_t port;
254 
255     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
256     if (port == NULL)
257 	return(ENXIO);
258     msg.cmd = cmd;
259     msg.data = data;
260     msg.fflag = fflag;
261     msg.td = td;
262     return(lwkt_domsg(port, &msg.msg.msg));
263 }
264 
265 int
266 dev_ddump(dev_t dev)
267 {
268     struct cdevmsg_dump	msg;
269     lwkt_port_t port;
270 
271     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
272     if (port == NULL)
273 	return(ENXIO);
274     return(lwkt_domsg(port, &msg.msg.msg));
275 }
276 
277 int
278 dev_dpsize(dev_t dev)
279 {
280     struct cdevmsg_psize msg;
281     lwkt_port_t port;
282     int error;
283 
284     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
285     if (port == NULL)
286 	return(-1);
287     error = lwkt_domsg(port, &msg.msg.msg);
288     if (error == 0)
289 	return(msg.result);
290     return(-1);
291 }
292 
293 int
294 dev_dread(dev_t dev, struct uio *uio, int ioflag)
295 {
296     struct cdevmsg_read msg;
297     lwkt_port_t port;
298 
299     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
300     if (port == NULL)
301 	return(ENXIO);
302     msg.uio = uio;
303     msg.ioflag = ioflag;
304     return(lwkt_domsg(port, &msg.msg.msg));
305 }
306 
307 int
308 dev_dwrite(dev_t dev, struct uio *uio, int ioflag)
309 {
310     struct cdevmsg_write msg;
311     lwkt_port_t port;
312 
313     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
314     if (port == NULL)
315 	return(ENXIO);
316     msg.uio = uio;
317     msg.ioflag = ioflag;
318     return(lwkt_domsg(port, &msg.msg.msg));
319 }
320 
321 int
322 dev_dpoll(dev_t dev, int events, thread_t td)
323 {
324     struct cdevmsg_poll msg;
325     lwkt_port_t port;
326     int error;
327 
328     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
329     if (port == NULL)
330 	return(ENXIO);
331     msg.events = events;
332     msg.td = td;
333     error = lwkt_domsg(port, &msg.msg.msg);
334     if (error == 0)
335 	return(msg.events);
336     return(seltrue(dev, msg.events, td));
337 }
338 
339 int
340 dev_dkqfilter(dev_t dev, struct knote *kn)
341 {
342     struct cdevmsg_kqfilter msg;
343     lwkt_port_t port;
344     int error;
345 
346     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
347     if (port == NULL)
348 	return(ENXIO);
349     msg.kn = kn;
350     error = lwkt_domsg(port, &msg.msg.msg);
351     if (error == 0)
352 	return(msg.result);
353     return(ENODEV);
354 }
355 
356 int
357 dev_dmmap(dev_t dev, vm_offset_t offset, int nprot)
358 {
359     struct cdevmsg_mmap msg;
360     lwkt_port_t port;
361     int error;
362 
363     port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
364     if (port == NULL)
365 	return(-1);
366     msg.offset = offset;
367     msg.nprot = nprot;
368     error = lwkt_domsg(port, &msg.msg.msg);
369     if (error == 0)
370 	return(msg.result);
371     return(-1);
372 }
373 
374 int
375 dev_port_dopen(lwkt_port_t port, dev_t dev, int oflags, int devtype, thread_t td)
376 {
377     struct cdevmsg_open	msg;
378 
379     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
380     if (port == NULL)
381 	return(ENXIO);
382     msg.oflags = oflags;
383     msg.devtype = devtype;
384     msg.td = td;
385     return(lwkt_domsg(port, &msg.msg.msg));
386 }
387 
388 int
389 dev_port_dclose(lwkt_port_t port, dev_t dev, int fflag, int devtype, thread_t td)
390 {
391     struct cdevmsg_close msg;
392 
393     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
394     if (port == NULL)
395 	return(ENXIO);
396     msg.fflag = fflag;
397     msg.devtype = devtype;
398     msg.td = td;
399     return(lwkt_domsg(port, &msg.msg.msg));
400 }
401 
402 void
403 dev_port_dstrategy(lwkt_port_t port, dev_t dev, struct buf *bp)
404 {
405     struct cdevmsg_strategy msg;
406 
407     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
408     KKASSERT(port);	/* 'nostrategy' function is NULL YYY */
409     msg.bp = bp;
410     lwkt_domsg(port, &msg.msg.msg);
411 }
412 
413 int
414 dev_port_dioctl(lwkt_port_t port, dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
415 {
416     struct cdevmsg_ioctl msg;
417 
418     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
419     if (port == NULL)
420 	return(ENXIO);
421     msg.cmd = cmd;
422     msg.data = data;
423     msg.fflag = fflag;
424     msg.td = td;
425     return(lwkt_domsg(port, &msg.msg.msg));
426 }
427 
428 int
429 dev_port_ddump(lwkt_port_t port, dev_t dev)
430 {
431     struct cdevmsg_dump	msg;
432 
433     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
434     if (port == NULL)
435 	return(ENXIO);
436     return(lwkt_domsg(port, &msg.msg.msg));
437 }
438 
439 int
440 dev_port_dpsize(lwkt_port_t port, dev_t dev)
441 {
442     struct cdevmsg_psize msg;
443     int error;
444 
445     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
446     if (port == NULL)
447 	return(-1);
448     error = lwkt_domsg(port, &msg.msg.msg);
449     if (error == 0)
450 	return(msg.result);
451     return(-1);
452 }
453 
454 int
455 dev_port_dread(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
456 {
457     struct cdevmsg_read msg;
458 
459     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
460     if (port == NULL)
461 	return(ENXIO);
462     msg.uio = uio;
463     msg.ioflag = ioflag;
464     return(lwkt_domsg(port, &msg.msg.msg));
465 }
466 
467 int
468 dev_port_dwrite(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
469 {
470     struct cdevmsg_write msg;
471 
472     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
473     if (port == NULL)
474 	return(ENXIO);
475     msg.uio = uio;
476     msg.ioflag = ioflag;
477     return(lwkt_domsg(port, &msg.msg.msg));
478 }
479 
480 int
481 dev_port_dpoll(lwkt_port_t port, dev_t dev, int events, thread_t td)
482 {
483     struct cdevmsg_poll msg;
484     int error;
485 
486     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
487     if (port == NULL)
488 	return(ENXIO);
489     msg.events = events;
490     msg.td = td;
491     error = lwkt_domsg(port, &msg.msg.msg);
492     if (error == 0)
493 	return(msg.events);
494     return(seltrue(dev, msg.events, td));
495 }
496 
497 int
498 dev_port_dkqfilter(lwkt_port_t port, dev_t dev, struct knote *kn)
499 {
500     struct cdevmsg_kqfilter msg;
501     int error;
502 
503     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
504     if (port == NULL)
505 	return(ENXIO);
506     msg.kn = kn;
507     error = lwkt_domsg(port, &msg.msg.msg);
508     if (error == 0)
509 	return(msg.result);
510     return(ENODEV);
511 }
512 
513 int
514 dev_port_dmmap(lwkt_port_t port, dev_t dev, vm_offset_t offset, int nprot)
515 {
516     struct cdevmsg_mmap msg;
517     int error;
518 
519     _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
520     if (port == NULL)
521 	return(-1);
522     msg.offset = offset;
523     msg.nprot = nprot;
524     error = lwkt_domsg(port, &msg.msg.msg);
525     if (error == 0)
526 	return(msg.result);
527     return(-1);
528 }
529 
530 const char *
531 dev_dname(dev_t dev)
532 {
533     struct cdevsw *csw;
534 
535     if ((csw = _devsw(dev)) != NULL)
536 	return(csw->d_name);
537     return(NULL);
538 }
539 
540 int
541 dev_dflags(dev_t dev)
542 {
543     struct cdevsw *csw;
544 
545     if ((csw = _devsw(dev)) != NULL)
546 	return(csw->d_flags);
547     return(0);
548 }
549 
550 int
551 dev_dmaj(dev_t dev)
552 {
553     struct cdevsw *csw;
554 
555     if ((csw = _devsw(dev)) != NULL)
556 	return(csw->d_maj);
557     return(0);
558 }
559 
560 lwkt_port_t
561 dev_dport(dev_t dev)
562 {
563     struct cdevsw *csw;
564 
565     if ((csw = _devsw(dev)) != NULL) {
566 	if (cdevport[major(dev)])	/* YYY too hackish */
567 	    return(cdevport[major(dev)]);
568 	return(csw->d_port);
569     }
570     return(NULL);
571 }
572 
573 #if 0
574 /*
575  * cdevsw[] array functions, moved from kern/kern_conf.c
576  */
577 struct cdevsw *
578 devsw(dev_t dev)
579 {
580     return(_devsw(dev));
581 }
582 #endif
583 
584 /*
585  * Convert a cdevsw template into the real thing, filling in fields the
586  * device left empty with appropriate defaults.
587  */
588 void
589 compile_devsw(struct cdevsw *devsw)
590 {
591     static lwkt_port devsw_compat_port;
592 
593     if (devsw_compat_port.mp_putport == NULL)
594 	init_default_cdevsw_port(&devsw_compat_port);
595 
596     if (devsw->old_open == NULL)
597 	devsw->old_open = noopen;
598     if (devsw->old_close == NULL)
599 	devsw->old_close = noclose;
600     if (devsw->old_read == NULL)
601 	devsw->old_read = noread;
602     if (devsw->old_write == NULL)
603 	devsw->old_write = nowrite;
604     if (devsw->old_ioctl == NULL)
605 	devsw->old_ioctl = noioctl;
606     if (devsw->old_poll == NULL)
607 	devsw->old_poll = nopoll;
608     if (devsw->old_mmap == NULL)
609 	devsw->old_mmap = nommap;
610     if (devsw->old_strategy == NULL)
611 	devsw->old_strategy = nostrategy;
612     if (devsw->old_dump == NULL)
613 	devsw->old_dump = nodump;
614     if (devsw->old_psize == NULL)
615 	devsw->old_psize = nopsize;
616     if (devsw->old_kqfilter == NULL)
617 	devsw->old_kqfilter = nokqfilter;
618 
619     if (devsw->d_port == NULL)
620 	devsw->d_port = &devsw_compat_port;
621 }
622 
623 /*
624  * Add a cdevsw entry
625  */
626 int
627 cdevsw_add(struct cdevsw *newentry)
628 {
629     compile_devsw(newentry);
630     if (newentry->d_maj < 0 || newentry->d_maj >= NUMCDEVSW) {
631 	printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
632 	    newentry->d_name, newentry->d_maj);
633 	return (EINVAL);
634     }
635     if (cdevsw[newentry->d_maj]) {
636 	printf("WARNING: \"%s\" is usurping \"%s\"'s cdevsw[]\n",
637 	    newentry->d_name, cdevsw[newentry->d_maj]->d_name);
638     }
639     cdevsw[newentry->d_maj] = newentry;
640     return (0);
641 }
642 
643 /*
644  * Add a cdevsw entry and override the port.
645  */
646 lwkt_port_t
647 cdevsw_add_override(struct cdevsw *newentry, lwkt_port_t port)
648 {
649     int error;
650 
651     if ((error = cdevsw_add(newentry)) == 0)
652 	cdevport[newentry->d_maj] = port;
653     return(newentry->d_port);
654 }
655 
656 lwkt_port_t
657 cdevsw_dev_override(dev_t dev, lwkt_port_t port)
658 {
659     struct cdevsw *csw;
660 
661     KKASSERT(major(dev) >= 0 && major(dev) < NUMCDEVSW);
662     if ((csw = _devsw(dev)) != NULL) {
663 	cdevport[major(dev)] = port;
664 	return(csw->d_port);
665     }
666     return(NULL);
667 }
668 
669 /*
670  *  Remove a cdevsw entry
671  */
672 int
673 cdevsw_remove(struct cdevsw *oldentry)
674 {
675     if (oldentry->d_maj < 0 || oldentry->d_maj >= NUMCDEVSW) {
676 	printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
677 	    oldentry->d_name, oldentry->d_maj);
678 	return EINVAL;
679     }
680     cdevsw[oldentry->d_maj] = NULL;
681     cdevport[oldentry->d_maj] = NULL;
682     return 0;
683 }
684 
685