xref: /dflybsd-src/sys/kern/kern_device.c (revision b5ac91c1330c6b5cef4923a181679828e9ec4550)
1 /*
2  * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
5  *							All rights reserved.
6  * Copyright (c) 1982, 1986, 1991, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $DragonFly: src/sys/kern/kern_device.c,v 1.24 2007/05/09 00:53:34 dillon Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/systm.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/vnode.h>
43 #include <sys/queue.h>
44 #include <sys/device.h>
45 #include <sys/tree.h>
46 #include <sys/syslink_rpc.h>
47 #include <sys/proc.h>
48 #include <machine/stdarg.h>
49 #include <sys/thread2.h>
50 
51 /*
52  * system link descriptors identify the command in the
53  * arguments structure.
54  */
55 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc)
56 
57 #define DEVOP_DESC_INIT(name)						\
58 	    struct syslink_desc DDESCNAME(name) = {			\
59 		__offsetof(struct dev_ops, __CONCAT(d_, name)),	\
60 	    #name }
61 
62 DEVOP_DESC_INIT(default);
63 DEVOP_DESC_INIT(open);
64 DEVOP_DESC_INIT(close);
65 DEVOP_DESC_INIT(read);
66 DEVOP_DESC_INIT(write);
67 DEVOP_DESC_INIT(ioctl);
68 DEVOP_DESC_INIT(dump);
69 DEVOP_DESC_INIT(psize);
70 DEVOP_DESC_INIT(poll);
71 DEVOP_DESC_INIT(mmap);
72 DEVOP_DESC_INIT(strategy);
73 DEVOP_DESC_INIT(kqfilter);
74 DEVOP_DESC_INIT(clone);
75 
76 /*
77  * Misc default ops
78  */
79 struct dev_ops dead_dev_ops;
80 
81 struct dev_ops default_dev_ops = {
82 	{ "null" },
83 	.d_default = NULL,	/* must be NULL */
84 	.d_open = noopen,
85 	.d_close = noclose,
86 	.d_read = noread,
87 	.d_write = nowrite,
88 	.d_ioctl = noioctl,
89 	.d_poll = nopoll,
90 	.d_mmap = nommap,
91 	.d_strategy = nostrategy,
92 	.d_dump = nodump,
93 	.d_psize = nopsize,
94 	.d_kqfilter = nokqfilter,
95 	.d_clone = noclone
96 };
97 
98 /************************************************************************
99  *			GENERAL DEVICE API FUNCTIONS			*
100  ************************************************************************/
101 
102 int
103 dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred)
104 {
105 	struct dev_open_args ap;
106 
107 	ap.a_head.a_desc = &dev_open_desc;
108 	ap.a_head.a_dev = dev;
109 	ap.a_oflags = oflags;
110 	ap.a_devtype = devtype;
111 	ap.a_cred = cred;
112 	return(dev->si_ops->d_open(&ap));
113 }
114 
115 int
116 dev_dclose(cdev_t dev, int fflag, int devtype)
117 {
118 	struct dev_close_args ap;
119 
120 	ap.a_head.a_desc = &dev_close_desc;
121 	ap.a_head.a_dev = dev;
122 	ap.a_fflag = fflag;
123 	ap.a_devtype = devtype;
124 	return(dev->si_ops->d_close(&ap));
125 }
126 
127 int
128 dev_dread(cdev_t dev, struct uio *uio, int ioflag)
129 {
130 	struct dev_read_args ap;
131 	int error;
132 
133 	ap.a_head.a_desc = &dev_read_desc;
134 	ap.a_head.a_dev = dev;
135 	ap.a_uio = uio;
136 	ap.a_ioflag = ioflag;
137 	error = dev->si_ops->d_read(&ap);
138 	if (error == 0)
139 		dev->si_lastread = time_second;
140 	return (error);
141 }
142 
143 int
144 dev_dwrite(cdev_t dev, struct uio *uio, int ioflag)
145 {
146 	struct dev_write_args ap;
147 	int error;
148 
149 	dev->si_lastwrite = time_second;
150 	ap.a_head.a_desc = &dev_write_desc;
151 	ap.a_head.a_dev = dev;
152 	ap.a_uio = uio;
153 	ap.a_ioflag = ioflag;
154 	error = dev->si_ops->d_write(&ap);
155 	return (error);
156 }
157 
158 int
159 dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred)
160 {
161 	struct dev_ioctl_args ap;
162 
163 	ap.a_head.a_desc = &dev_ioctl_desc;
164 	ap.a_head.a_dev = dev;
165 	ap.a_cmd = cmd;
166 	ap.a_data = data;
167 	ap.a_fflag = fflag;
168 	ap.a_cred = cred;
169 	return(dev->si_ops->d_ioctl(&ap));
170 }
171 
172 int
173 dev_dpoll(cdev_t dev, int events)
174 {
175 	struct dev_poll_args ap;
176 	int error;
177 
178 	ap.a_head.a_desc = &dev_poll_desc;
179 	ap.a_head.a_dev = dev;
180 	ap.a_events = events;
181 	error = dev->si_ops->d_poll(&ap);
182 	if (error == 0)
183 		return(ap.a_events);
184 	return (seltrue(dev, events));
185 }
186 
187 int
188 dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot)
189 {
190 	struct dev_mmap_args ap;
191 	int error;
192 
193 	ap.a_head.a_desc = &dev_mmap_desc;
194 	ap.a_head.a_dev = dev;
195 	ap.a_offset = offset;
196 	ap.a_nprot = nprot;
197 	error = dev->si_ops->d_mmap(&ap);
198 	if (error == 0)
199 		return(ap.a_result);
200 	return(-1);
201 }
202 
203 int
204 dev_dclone(cdev_t dev)
205 {
206 	struct dev_clone_args ap;
207 
208 	ap.a_head.a_desc = &dev_clone_desc;
209 	ap.a_head.a_dev = dev;
210 	return (dev->si_ops->d_clone(&ap));
211 }
212 
213 /*
214  * Core device strategy call, used to issue I/O on a device.  There are
215  * two versions, a non-chained version and a chained version.  The chained
216  * version reuses a BIO set up by vn_strategy().  The only difference is
217  * that, for now, we do not push a new tracking structure when chaining
218  * from vn_strategy.  XXX this will ultimately have to change.
219  */
220 void
221 dev_dstrategy(cdev_t dev, struct bio *bio)
222 {
223 	struct dev_strategy_args ap;
224 	struct bio_track *track;
225 
226 	ap.a_head.a_desc = &dev_strategy_desc;
227 	ap.a_head.a_dev = dev;
228 	ap.a_bio = bio;
229 
230 	KKASSERT(bio->bio_track == NULL);
231 	KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE);
232 	if (bio->bio_buf->b_cmd == BUF_CMD_READ)
233 	    track = &dev->si_track_read;
234 	else
235 	    track = &dev->si_track_write;
236 	atomic_add_int(&track->bk_active, 1);
237 	bio->bio_track = track;
238 	(void)dev->si_ops->d_strategy(&ap);
239 }
240 
241 void
242 dev_dstrategy_chain(cdev_t dev, struct bio *bio)
243 {
244 	struct dev_strategy_args ap;
245 
246 	KKASSERT(bio->bio_track != NULL);
247 	ap.a_head.a_desc = &dev_strategy_desc;
248 	ap.a_head.a_dev = dev;
249 	ap.a_bio = bio;
250 	(void)dev->si_ops->d_strategy(&ap);
251 }
252 
253 /*
254  * note: the disk layer is expected to set count, blkno, and secsize before
255  * forwarding the message.
256  */
257 int
258 dev_ddump(cdev_t dev)
259 {
260 	struct dev_dump_args ap;
261 
262 	ap.a_head.a_desc = &dev_dump_desc;
263 	ap.a_head.a_dev = dev;
264 	ap.a_count = 0;
265 	ap.a_blkno = 0;
266 	ap.a_secsize = 0;
267 	return(dev->si_ops->d_dump(&ap));
268 }
269 
270 int
271 dev_dpsize(cdev_t dev)
272 {
273 	struct dev_psize_args ap;
274 	int error;
275 
276 	ap.a_head.a_desc = &dev_psize_desc;
277 	ap.a_head.a_dev = dev;
278 	error = dev->si_ops->d_psize(&ap);
279 	if (error == 0)
280 		return (ap.a_result);
281 	return(-1);
282 }
283 
284 int
285 dev_dkqfilter(cdev_t dev, struct knote *kn)
286 {
287 	struct dev_kqfilter_args ap;
288 	int error;
289 
290 	ap.a_head.a_desc = &dev_kqfilter_desc;
291 	ap.a_head.a_dev = dev;
292 	ap.a_kn = kn;
293 	error = dev->si_ops->d_kqfilter(&ap);
294 	if (error == 0)
295 		return(ap.a_result);
296 	return(ENODEV);
297 }
298 
299 /************************************************************************
300  *			DEVICE HELPER FUNCTIONS				*
301  ************************************************************************/
302 
303 const char *
304 dev_dname(cdev_t dev)
305 {
306     return(dev->si_ops->head.name);
307 }
308 
309 int
310 dev_dflags(cdev_t dev)
311 {
312     return(dev->si_ops->head.flags);
313 }
314 
315 int
316 dev_dmaj(cdev_t dev)
317 {
318     return(dev->si_ops->head.maj);
319 }
320 
321 /*
322  * Used when forwarding a request through layers.  The caller adjusts
323  * ap->a_head.a_dev and then calls this function.
324  */
325 int
326 dev_doperate(struct dev_generic_args *ap)
327 {
328     int (*func)(struct dev_generic_args *);
329 
330     func = *(void **)((char *)ap->a_dev->si_ops + ap->a_desc->sd_offset);
331     return (func(ap));
332 }
333 
334 /*
335  * Used by the console intercept code only.  Issue an operation through
336  * a foreign ops structure allowing the ops structure associated
337  * with the device to remain intact.
338  */
339 int
340 dev_doperate_ops(struct dev_ops *ops, struct dev_generic_args *ap)
341 {
342     int (*func)(struct dev_generic_args *);
343 
344     func = *(void **)((char *)ops + ap->a_desc->sd_offset);
345     return (func(ap));
346 }
347 
348 /*
349  * Convert a template dev_ops into the real thing by filling in
350  * uninitialized fields.
351  */
352 void
353 compile_dev_ops(struct dev_ops *ops)
354 {
355 	int offset;
356 
357 	for (offset = offsetof(struct dev_ops, dev_ops_first_field);
358 	     offset <= offsetof(struct dev_ops, dev_ops_last_field);
359 	     offset += sizeof(void *)
360 	) {
361 		void **func_p = (void **)((char *)ops + offset);
362 		void **def_p = (void **)((char *)&default_dev_ops + offset);
363 		if (*func_p == NULL) {
364 			if (ops->d_default)
365 				*func_p = ops->d_default;
366 			else
367 				*func_p = *def_p;
368 		}
369 	}
370 }
371 
372 /************************************************************************
373  *			MAJOR/MINOR SPACE FUNCTION 			*
374  ************************************************************************/
375 
376 /*
377  * This makes a dev_ops entry visible to userland (e.g /dev/<blah>).
378  *
379  * The kernel can overload a data space by making multiple dev_ops_add()
380  * calls, but only the most recent one in the list matching the mask/match
381  * will be visible to userland.
382  *
383  * make_dev() does not automatically call dev_ops_add() (nor do we want it
384  * to, since partition-managed disk devices are overloaded on top of the
385  * raw device).
386  *
387  * Disk devices typically register their major, e.g. 'ad0', and then call
388  * into the disk label management code which overloads its own onto e.g. 'ad0'
389  * to support all the various slice and partition combinations.
390  *
391  * The mask/match supplied in this call are a full 32 bits and the same
392  * mask and match must be specified in a later dev_ops_remove() call to
393  * match this add.  However, the match value for the minor number should never
394  * have any bits set in the major number's bit range (8-15).  The mask value
395  * may be conveniently specified as -1 without creating any major number
396  * interference.
397  */
398 
399 static
400 int
401 rb_dev_ops_compare(struct dev_ops_maj *a, struct dev_ops_maj *b)
402 {
403     if (a->maj < b->maj)
404 	return(-1);
405     else if (a->maj > b->maj)
406 	return(1);
407     return(0);
408 }
409 
410 RB_GENERATE2(dev_ops_rb_tree, dev_ops_maj, rbnode, rb_dev_ops_compare, int, maj);
411 
412 struct dev_ops_rb_tree dev_ops_rbhead = RB_INITIALIZER(dev_ops_rbhead);
413 
414 int
415 dev_ops_add(struct dev_ops *ops, u_int mask, u_int match)
416 {
417     static int next_maj = 256;		/* first dynamic major number */
418     struct dev_ops_maj *rbmaj;
419     struct dev_ops_link *link;
420 
421     compile_dev_ops(ops);
422     if (ops->head.maj < 0) {
423 	while (dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, next_maj) != NULL) {
424 		if (++next_maj <= 0)
425 			next_maj = 256;
426 	}
427 	ops->head.maj = next_maj;
428     }
429     rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj);
430     if (rbmaj == NULL) {
431 	rbmaj = kmalloc(sizeof(*rbmaj), M_DEVBUF, M_INTWAIT | M_ZERO);
432 	rbmaj->maj = ops->head.maj;
433 	dev_ops_rb_tree_RB_INSERT(&dev_ops_rbhead, rbmaj);
434     }
435     for (link = rbmaj->link; link; link = link->next) {
436 	    /*
437 	     * If we get an exact match we usurp the target, but we only print
438 	     * a warning message if a different device switch is installed.
439 	     */
440 	    if (link->mask == mask && link->match == match) {
441 		    if (link->ops != ops) {
442 			    kprintf("WARNING: \"%s\" (%p) is usurping \"%s\"'s"
443 				" (%p)\n",
444 				ops->head.name, ops,
445 				link->ops->head.name, link->ops);
446 			    link->ops = ops;
447 			    ++ops->head.refs;
448 		    }
449 		    return(0);
450 	    }
451 	    /*
452 	     * XXX add additional warnings for overlaps
453 	     */
454     }
455 
456     link = kmalloc(sizeof(struct dev_ops_link), M_DEVBUF, M_INTWAIT|M_ZERO);
457     link->mask = mask;
458     link->match = match;
459     link->ops = ops;
460     link->next = rbmaj->link;
461     rbmaj->link = link;
462     ++ops->head.refs;
463     return(0);
464 }
465 
466 /*
467  * Should only be used by udev2dev().
468  *
469  * If the minor number is -1, we match the first ops we find for this
470  * major.   If the mask is not -1 then multiple minor numbers can match
471  * the same ops.
472  *
473  * Note that this function will return NULL if the minor number is not within
474  * the bounds of the installed mask(s).
475  *
476  * The specified minor number should NOT include any major bits.
477  */
478 struct dev_ops *
479 dev_ops_get(int x, int y)
480 {
481 	struct dev_ops_maj *rbmaj;
482 	struct dev_ops_link *link;
483 
484 	rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, x);
485 	if (rbmaj == NULL)
486 		return(NULL);
487 	for (link = rbmaj->link; link; link = link->next) {
488 		if (y == -1 || (link->mask & y) == link->match)
489 			return(link->ops);
490 	}
491 	return(NULL);
492 }
493 
494 /*
495  * Take a cookie cutter to the major/minor device space for the passed
496  * device and generate a new dev_ops visible to userland which the caller
497  * can then modify.  The original device is not modified but portions of
498  * its major/minor space will no longer be visible to userland.
499  */
500 struct dev_ops *
501 dev_ops_add_override(cdev_t backing_dev, struct dev_ops *template,
502 		     u_int mask, u_int match)
503 {
504 	struct dev_ops *ops;
505 	struct dev_ops *backing_ops = backing_dev->si_ops;
506 
507 	ops = kmalloc(sizeof(struct dev_ops), M_DEVBUF, M_INTWAIT);
508 	*ops = *template;
509 	ops->head.name = backing_ops->head.name;
510 	ops->head.maj = backing_ops->head.maj;
511 	ops->head.flags = backing_ops->head.flags;
512 	compile_dev_ops(ops);
513 	dev_ops_add(ops, mask, match);
514 
515 	return(ops);
516 }
517 
518 /*
519  * Remove all matching dev_ops entries from the dev_ops_array[] major
520  * array so no new user opens can be performed, and destroy all devices
521  * installed in the hash table that are associated with this dev_ops.  (see
522  * destroy_all_devs()).
523  *
524  * The mask and match should match a previous call to dev_ops_add*().
525  */
526 int
527 dev_ops_remove(struct dev_ops *ops, u_int mask, u_int match)
528 {
529 	struct dev_ops_maj *rbmaj;
530 	struct dev_ops_link *link;
531 	struct dev_ops_link **plink;
532 
533 	if (ops != &dead_dev_ops)
534 		destroy_all_devs(ops, mask, match);
535 
536 	rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj);
537 	if (rbmaj == NULL) {
538 		kprintf("double-remove of dev_ops %p for %s(%d)\n",
539 			ops, ops->head.name, ops->head.maj);
540 		return(0);
541 	}
542 	for (plink = &rbmaj->link; (link = *plink) != NULL;
543 	     plink = &link->next) {
544 		if (link->mask == mask && link->match == match) {
545 			if (link->ops == ops)
546 				break;
547 			kprintf("%s: ERROR: cannot remove dev_ops, "
548 			       "its major number %d was stolen by %s\n",
549 				ops->head.name, ops->head.maj,
550 				link->ops->head.name
551 			);
552 		}
553 	}
554 	if (link == NULL) {
555 		kprintf("%s(%d)[%08x/%08x]: WARNING: ops removed "
556 		       "multiple times!\n",
557 		       ops->head.name, ops->head.maj, mask, match);
558 	} else {
559 		*plink = link->next;
560 		--ops->head.refs; /* XXX ops_release() / record refs */
561 		kfree(link, M_DEVBUF);
562 	}
563 
564 	/*
565 	 * Scrap the RB tree node for the major number if no ops are
566 	 * installed any longer.
567 	 */
568 	if (rbmaj->link == NULL) {
569 		dev_ops_rb_tree_RB_REMOVE(&dev_ops_rbhead, rbmaj);
570 		kfree(rbmaj, M_DEVBUF);
571 	}
572 
573 	if (ops->head.refs != 0) {
574 		kprintf("%s(%d)[%08x/%08x]: Warning: dev_ops_remove() called "
575 			"while %d device refs still exist!\n",
576 			ops->head.name, ops->head.maj, mask, match,
577 			ops->head.refs);
578 	} else {
579 		if (bootverbose)
580 			kprintf("%s: ops removed\n", ops->head.name);
581 	}
582 	return 0;
583 }
584 
585 /*
586  * dev_ops_scan() - Issue a callback for all installed dev_ops structures.
587  *
588  * The scan will terminate if a callback returns a negative number.
589  */
590 struct dev_ops_scan_info {
591 	int	(*callback)(struct dev_ops *, void *);
592 	void	*arg;
593 };
594 
595 static
596 int
597 dev_ops_scan_callback(struct dev_ops_maj *rbmaj, void *arg)
598 {
599 	struct dev_ops_scan_info *info = arg;
600 	struct dev_ops_link *link;
601 	int count = 0;
602 	int r;
603 
604 	for (link = rbmaj->link; link; link = link->next) {
605 		r = info->callback(link->ops, info->arg);
606 		if (r < 0)
607 			return(r);
608 		count += r;
609 	}
610 	return(count);
611 }
612 
613 int
614 dev_ops_scan(int (*callback)(struct dev_ops *, void *), void *arg)
615 {
616 	struct dev_ops_scan_info info = { callback, arg };
617 
618 	return (dev_ops_rb_tree_RB_SCAN(&dev_ops_rbhead, NULL,
619 					dev_ops_scan_callback, &info));
620 }
621 
622 
623 /*
624  * Release a ops entry.  When the ref count reaches zero, recurse
625  * through the stack.
626  */
627 void
628 dev_ops_release(struct dev_ops *ops)
629 {
630 	--ops->head.refs;
631 	if (ops->head.refs == 0) {
632 		/* XXX */
633 	}
634 }
635 
636 struct dev_ops *
637 dev_ops_intercept(cdev_t dev, struct dev_ops *iops)
638 {
639 	struct dev_ops *oops = dev->si_ops;
640 
641 	compile_dev_ops(iops);
642 	iops->head.maj = oops->head.maj;
643 	iops->head.data = oops->head.data;
644 	iops->head.flags = oops->head.flags;
645 	dev->si_ops = iops;
646 	dev->si_flags |= SI_INTERCEPTED;
647 
648 	return (oops);
649 }
650 
651 void
652 dev_ops_restore(cdev_t dev, struct dev_ops *oops)
653 {
654 	struct dev_ops *iops = dev->si_ops;
655 
656 	dev->si_ops = oops;
657 	dev->si_flags &= ~SI_INTERCEPTED;
658 	iops->head.maj = 0;
659 	iops->head.data = NULL;
660 	iops->head.flags = 0;
661 }
662 
663 /************************************************************************
664  *			DEFAULT DEV OPS FUNCTIONS			*
665  ************************************************************************/
666 
667 
668 /*
669  * Unsupported devswitch functions (e.g. for writing to read-only device).
670  * XXX may belong elsewhere.
671  */
672 
673 int
674 noclone(struct dev_clone_args *ap)
675 {
676 	/* take no action */
677 	return (0);	/* allow the clone */
678 }
679 
680 int
681 noopen(struct dev_open_args *ap)
682 {
683 	return (ENODEV);
684 }
685 
686 int
687 noclose(struct dev_close_args *ap)
688 {
689 	return (ENODEV);
690 }
691 
692 int
693 noread(struct dev_read_args *ap)
694 {
695 	return (ENODEV);
696 }
697 
698 int
699 nowrite(struct dev_write_args *ap)
700 {
701 	return (ENODEV);
702 }
703 
704 int
705 noioctl(struct dev_ioctl_args *ap)
706 {
707 	return (ENODEV);
708 }
709 
710 int
711 nokqfilter(struct dev_kqfilter_args *ap)
712 {
713 	return (ENODEV);
714 }
715 
716 int
717 nommap(struct dev_mmap_args *ap)
718 {
719 	return (ENODEV);
720 }
721 
722 int
723 nopoll(struct dev_poll_args *ap)
724 {
725 	ap->a_events = 0;
726 	return(0);
727 }
728 
729 int
730 nostrategy(struct dev_strategy_args *ap)
731 {
732 	struct bio *bio = ap->a_bio;
733 
734 	bio->bio_buf->b_flags |= B_ERROR;
735 	bio->bio_buf->b_error = EOPNOTSUPP;
736 	biodone(bio);
737 	return(0);
738 }
739 
740 int
741 nopsize(struct dev_psize_args *ap)
742 {
743 	ap->a_result = 0;
744 	return(0);
745 }
746 
747 int
748 nodump(struct dev_dump_args *ap)
749 {
750 	return (ENODEV);
751 }
752 
753 /*
754  * XXX this is probably bogus.  Any device that uses it isn't checking the
755  * minor number.
756  */
757 int
758 nullopen(struct dev_open_args *ap)
759 {
760 	return (0);
761 }
762 
763 int
764 nullclose(struct dev_close_args *ap)
765 {
766 	return (0);
767 }
768 
769