xref: /dflybsd-src/sys/kern/kern_device.c (revision 5cccfb7b21444e0e73a738d924f82daf27b4854d)
1 /*
2  * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4  * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
5  *							All rights reserved.
6  * Copyright (c) 1982, 1986, 1991, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $DragonFly: src/sys/kern/kern_device.c,v 1.27 2007/07/23 18:59:50 dillon Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/systm.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/vnode.h>
43 #include <sys/queue.h>
44 #include <sys/device.h>
45 #include <sys/tree.h>
46 #include <sys/syslink_rpc.h>
47 #include <sys/proc.h>
48 #include <machine/stdarg.h>
49 #include <sys/thread2.h>
50 
51 /*
52  * system link descriptors identify the command in the
53  * arguments structure.
54  */
55 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc)
56 
57 #define DEVOP_DESC_INIT(name)						\
58 	    struct syslink_desc DDESCNAME(name) = {			\
59 		__offsetof(struct dev_ops, __CONCAT(d_, name)),	\
60 	    #name }
61 
62 DEVOP_DESC_INIT(default);
63 DEVOP_DESC_INIT(open);
64 DEVOP_DESC_INIT(close);
65 DEVOP_DESC_INIT(read);
66 DEVOP_DESC_INIT(write);
67 DEVOP_DESC_INIT(ioctl);
68 DEVOP_DESC_INIT(dump);
69 DEVOP_DESC_INIT(psize);
70 DEVOP_DESC_INIT(poll);
71 DEVOP_DESC_INIT(mmap);
72 DEVOP_DESC_INIT(strategy);
73 DEVOP_DESC_INIT(kqfilter);
74 DEVOP_DESC_INIT(revoke);
75 DEVOP_DESC_INIT(clone);
76 
77 /*
78  * Misc default ops
79  */
80 struct dev_ops dead_dev_ops;
81 
82 struct dev_ops default_dev_ops = {
83 	{ "null" },
84 	.d_default = NULL,	/* must be NULL */
85 	.d_open = noopen,
86 	.d_close = noclose,
87 	.d_read = noread,
88 	.d_write = nowrite,
89 	.d_ioctl = noioctl,
90 	.d_poll = nopoll,
91 	.d_mmap = nommap,
92 	.d_strategy = nostrategy,
93 	.d_dump = nodump,
94 	.d_psize = nopsize,
95 	.d_kqfilter = nokqfilter,
96 	.d_revoke = norevoke,
97 	.d_clone = noclone
98 };
99 
100 /************************************************************************
101  *			GENERAL DEVICE API FUNCTIONS			*
102  ************************************************************************/
103 
104 int
105 dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred)
106 {
107 	struct dev_open_args ap;
108 
109 	ap.a_head.a_desc = &dev_open_desc;
110 	ap.a_head.a_dev = dev;
111 	ap.a_oflags = oflags;
112 	ap.a_devtype = devtype;
113 	ap.a_cred = cred;
114 	return(dev->si_ops->d_open(&ap));
115 }
116 
117 int
118 dev_dclose(cdev_t dev, int fflag, int devtype)
119 {
120 	struct dev_close_args ap;
121 
122 	ap.a_head.a_desc = &dev_close_desc;
123 	ap.a_head.a_dev = dev;
124 	ap.a_fflag = fflag;
125 	ap.a_devtype = devtype;
126 	return(dev->si_ops->d_close(&ap));
127 }
128 
129 int
130 dev_dread(cdev_t dev, struct uio *uio, int ioflag)
131 {
132 	struct dev_read_args ap;
133 	int error;
134 
135 	ap.a_head.a_desc = &dev_read_desc;
136 	ap.a_head.a_dev = dev;
137 	ap.a_uio = uio;
138 	ap.a_ioflag = ioflag;
139 	error = dev->si_ops->d_read(&ap);
140 	if (error == 0)
141 		dev->si_lastread = time_second;
142 	return (error);
143 }
144 
145 int
146 dev_dwrite(cdev_t dev, struct uio *uio, int ioflag)
147 {
148 	struct dev_write_args ap;
149 	int error;
150 
151 	dev->si_lastwrite = time_second;
152 	ap.a_head.a_desc = &dev_write_desc;
153 	ap.a_head.a_dev = dev;
154 	ap.a_uio = uio;
155 	ap.a_ioflag = ioflag;
156 	error = dev->si_ops->d_write(&ap);
157 	return (error);
158 }
159 
160 int
161 dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred)
162 {
163 	struct dev_ioctl_args ap;
164 
165 	ap.a_head.a_desc = &dev_ioctl_desc;
166 	ap.a_head.a_dev = dev;
167 	ap.a_cmd = cmd;
168 	ap.a_data = data;
169 	ap.a_fflag = fflag;
170 	ap.a_cred = cred;
171 	return(dev->si_ops->d_ioctl(&ap));
172 }
173 
174 int
175 dev_dpoll(cdev_t dev, int events)
176 {
177 	struct dev_poll_args ap;
178 	int error;
179 
180 	ap.a_head.a_desc = &dev_poll_desc;
181 	ap.a_head.a_dev = dev;
182 	ap.a_events = events;
183 	error = dev->si_ops->d_poll(&ap);
184 	if (error == 0)
185 		return(ap.a_events);
186 	return (seltrue(dev, events));
187 }
188 
189 int
190 dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot)
191 {
192 	struct dev_mmap_args ap;
193 	int error;
194 
195 	ap.a_head.a_desc = &dev_mmap_desc;
196 	ap.a_head.a_dev = dev;
197 	ap.a_offset = offset;
198 	ap.a_nprot = nprot;
199 	error = dev->si_ops->d_mmap(&ap);
200 	if (error == 0)
201 		return(ap.a_result);
202 	return(-1);
203 }
204 
205 int
206 dev_dclone(cdev_t dev)
207 {
208 	struct dev_clone_args ap;
209 
210 	ap.a_head.a_desc = &dev_clone_desc;
211 	ap.a_head.a_dev = dev;
212 	return (dev->si_ops->d_clone(&ap));
213 }
214 
215 int
216 dev_drevoke(cdev_t dev)
217 {
218 	struct dev_revoke_args ap;
219 
220 	ap.a_head.a_desc = &dev_revoke_desc;
221 	ap.a_head.a_dev = dev;
222 	return (dev->si_ops->d_revoke(&ap));
223 }
224 
225 /*
226  * Core device strategy call, used to issue I/O on a device.  There are
227  * two versions, a non-chained version and a chained version.  The chained
228  * version reuses a BIO set up by vn_strategy().  The only difference is
229  * that, for now, we do not push a new tracking structure when chaining
230  * from vn_strategy.  XXX this will ultimately have to change.
231  */
232 void
233 dev_dstrategy(cdev_t dev, struct bio *bio)
234 {
235 	struct dev_strategy_args ap;
236 	struct bio_track *track;
237 
238 	ap.a_head.a_desc = &dev_strategy_desc;
239 	ap.a_head.a_dev = dev;
240 	ap.a_bio = bio;
241 
242 	KKASSERT(bio->bio_track == NULL);
243 	KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE);
244 	if (bio->bio_buf->b_cmd == BUF_CMD_READ)
245 	    track = &dev->si_track_read;
246 	else
247 	    track = &dev->si_track_write;
248 	bio_track_ref(track);
249 	bio->bio_track = track;
250 	(void)dev->si_ops->d_strategy(&ap);
251 }
252 
253 void
254 dev_dstrategy_chain(cdev_t dev, struct bio *bio)
255 {
256 	struct dev_strategy_args ap;
257 
258 	KKASSERT(bio->bio_track != NULL);
259 	ap.a_head.a_desc = &dev_strategy_desc;
260 	ap.a_head.a_dev = dev;
261 	ap.a_bio = bio;
262 	(void)dev->si_ops->d_strategy(&ap);
263 }
264 
265 /*
266  * note: the disk layer is expected to set count, blkno, and secsize before
267  * forwarding the message.
268  */
269 int
270 dev_ddump(cdev_t dev)
271 {
272 	struct dev_dump_args ap;
273 
274 	ap.a_head.a_desc = &dev_dump_desc;
275 	ap.a_head.a_dev = dev;
276 	ap.a_count = 0;
277 	ap.a_blkno = 0;
278 	ap.a_secsize = 0;
279 	return(dev->si_ops->d_dump(&ap));
280 }
281 
282 int64_t
283 dev_dpsize(cdev_t dev)
284 {
285 	struct dev_psize_args ap;
286 	int error;
287 
288 	ap.a_head.a_desc = &dev_psize_desc;
289 	ap.a_head.a_dev = dev;
290 	error = dev->si_ops->d_psize(&ap);
291 	if (error == 0)
292 		return (ap.a_result);
293 	return(-1);
294 }
295 
296 int
297 dev_dkqfilter(cdev_t dev, struct knote *kn)
298 {
299 	struct dev_kqfilter_args ap;
300 	int error;
301 
302 	ap.a_head.a_desc = &dev_kqfilter_desc;
303 	ap.a_head.a_dev = dev;
304 	ap.a_kn = kn;
305 	error = dev->si_ops->d_kqfilter(&ap);
306 	if (error == 0)
307 		return(ap.a_result);
308 	return(ENODEV);
309 }
310 
311 /************************************************************************
312  *			DEVICE HELPER FUNCTIONS				*
313  ************************************************************************/
314 
315 /*
316  * MPSAFE
317  */
318 int
319 dev_drefs(cdev_t dev)
320 {
321     return(dev->si_sysref.refcnt);
322 }
323 
324 /*
325  * MPSAFE
326  */
327 const char *
328 dev_dname(cdev_t dev)
329 {
330     return(dev->si_ops->head.name);
331 }
332 
333 /*
334  * MPSAFE
335  */
336 int
337 dev_dflags(cdev_t dev)
338 {
339     return(dev->si_ops->head.flags);
340 }
341 
342 /*
343  * MPSAFE
344  */
345 int
346 dev_dmaj(cdev_t dev)
347 {
348     return(dev->si_ops->head.maj);
349 }
350 
351 /*
352  * Used when forwarding a request through layers.  The caller adjusts
353  * ap->a_head.a_dev and then calls this function.
354  */
355 int
356 dev_doperate(struct dev_generic_args *ap)
357 {
358     int (*func)(struct dev_generic_args *);
359 
360     func = *(void **)((char *)ap->a_dev->si_ops + ap->a_desc->sd_offset);
361     return (func(ap));
362 }
363 
364 /*
365  * Used by the console intercept code only.  Issue an operation through
366  * a foreign ops structure allowing the ops structure associated
367  * with the device to remain intact.
368  */
369 int
370 dev_doperate_ops(struct dev_ops *ops, struct dev_generic_args *ap)
371 {
372     int (*func)(struct dev_generic_args *);
373 
374     func = *(void **)((char *)ops + ap->a_desc->sd_offset);
375     return (func(ap));
376 }
377 
378 /*
379  * Convert a template dev_ops into the real thing by filling in
380  * uninitialized fields.
381  */
382 void
383 compile_dev_ops(struct dev_ops *ops)
384 {
385 	int offset;
386 
387 	for (offset = offsetof(struct dev_ops, dev_ops_first_field);
388 	     offset <= offsetof(struct dev_ops, dev_ops_last_field);
389 	     offset += sizeof(void *)
390 	) {
391 		void **func_p = (void **)((char *)ops + offset);
392 		void **def_p = (void **)((char *)&default_dev_ops + offset);
393 		if (*func_p == NULL) {
394 			if (ops->d_default)
395 				*func_p = ops->d_default;
396 			else
397 				*func_p = *def_p;
398 		}
399 	}
400 }
401 
402 /************************************************************************
403  *			MAJOR/MINOR SPACE FUNCTION 			*
404  ************************************************************************/
405 
406 /*
407  * This makes a dev_ops entry visible to userland (e.g /dev/<blah>).
408  *
409  * The kernel can overload a data space by making multiple dev_ops_add()
410  * calls, but only the most recent one in the list matching the mask/match
411  * will be visible to userland.
412  *
413  * make_dev() does not automatically call dev_ops_add() (nor do we want it
414  * to, since partition-managed disk devices are overloaded on top of the
415  * raw device).
416  *
417  * Disk devices typically register their major, e.g. 'ad0', and then call
418  * into the disk label management code which overloads its own onto e.g. 'ad0'
419  * to support all the various slice and partition combinations.
420  *
421  * The mask/match supplied in this call are a full 32 bits and the same
422  * mask and match must be specified in a later dev_ops_remove() call to
423  * match this add.  However, the match value for the minor number should never
424  * have any bits set in the major number's bit range (8-15).  The mask value
425  * may be conveniently specified as -1 without creating any major number
426  * interference.
427  */
428 
429 static
430 int
431 rb_dev_ops_compare(struct dev_ops_maj *a, struct dev_ops_maj *b)
432 {
433     if (a->maj < b->maj)
434 	return(-1);
435     else if (a->maj > b->maj)
436 	return(1);
437     return(0);
438 }
439 
440 RB_GENERATE2(dev_ops_rb_tree, dev_ops_maj, rbnode, rb_dev_ops_compare, int, maj);
441 
442 struct dev_ops_rb_tree dev_ops_rbhead = RB_INITIALIZER(dev_ops_rbhead);
443 
444 int
445 dev_ops_add(struct dev_ops *ops, u_int mask, u_int match)
446 {
447     static int next_maj = 256;		/* first dynamic major number */
448     struct dev_ops_maj *rbmaj;
449     struct dev_ops_link *link;
450 
451     compile_dev_ops(ops);
452     if (ops->head.maj < 0) {
453 	while (dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, next_maj) != NULL) {
454 		if (++next_maj <= 0)
455 			next_maj = 256;
456 	}
457 	ops->head.maj = next_maj;
458     }
459     rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj);
460     if (rbmaj == NULL) {
461 	rbmaj = kmalloc(sizeof(*rbmaj), M_DEVBUF, M_INTWAIT | M_ZERO);
462 	rbmaj->maj = ops->head.maj;
463 	dev_ops_rb_tree_RB_INSERT(&dev_ops_rbhead, rbmaj);
464     }
465     for (link = rbmaj->link; link; link = link->next) {
466 	    /*
467 	     * If we get an exact match we usurp the target, but we only print
468 	     * a warning message if a different device switch is installed.
469 	     */
470 	    if (link->mask == mask && link->match == match) {
471 		    if (link->ops != ops) {
472 			    kprintf("WARNING: \"%s\" (%p) is usurping \"%s\"'s"
473 				" (%p)\n",
474 				ops->head.name, ops,
475 				link->ops->head.name, link->ops);
476 			    link->ops = ops;
477 			    ++ops->head.refs;
478 		    }
479 		    return(0);
480 	    }
481 	    /*
482 	     * XXX add additional warnings for overlaps
483 	     */
484     }
485 
486     link = kmalloc(sizeof(struct dev_ops_link), M_DEVBUF, M_INTWAIT|M_ZERO);
487     link->mask = mask;
488     link->match = match;
489     link->ops = ops;
490     link->next = rbmaj->link;
491     rbmaj->link = link;
492     ++ops->head.refs;
493     return(0);
494 }
495 
496 /*
497  * Should only be used by udev2dev().
498  *
499  * If the minor number is -1, we match the first ops we find for this
500  * major.   If the mask is not -1 then multiple minor numbers can match
501  * the same ops.
502  *
503  * Note that this function will return NULL if the minor number is not within
504  * the bounds of the installed mask(s).
505  *
506  * The specified minor number should NOT include any major bits.
507  */
508 struct dev_ops *
509 dev_ops_get(int x, int y)
510 {
511 	struct dev_ops_maj *rbmaj;
512 	struct dev_ops_link *link;
513 
514 	rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, x);
515 	if (rbmaj == NULL)
516 		return(NULL);
517 	for (link = rbmaj->link; link; link = link->next) {
518 		if (y == -1 || (link->mask & y) == link->match)
519 			return(link->ops);
520 	}
521 	return(NULL);
522 }
523 
524 /*
525  * Take a cookie cutter to the major/minor device space for the passed
526  * device and generate a new dev_ops visible to userland which the caller
527  * can then modify.  The original device is not modified but portions of
528  * its major/minor space will no longer be visible to userland.
529  */
530 struct dev_ops *
531 dev_ops_add_override(cdev_t backing_dev, struct dev_ops *template,
532 		     u_int mask, u_int match)
533 {
534 	struct dev_ops *ops;
535 	struct dev_ops *backing_ops = backing_dev->si_ops;
536 
537 	ops = kmalloc(sizeof(struct dev_ops), M_DEVBUF, M_INTWAIT);
538 	*ops = *template;
539 	ops->head.name = backing_ops->head.name;
540 	ops->head.maj = backing_ops->head.maj;
541 	ops->head.flags |= backing_ops->head.flags & ~D_TRACKCLOSE;
542 	compile_dev_ops(ops);
543 	dev_ops_add(ops, mask, match);
544 
545 	return(ops);
546 }
547 
548 void
549 dev_ops_remove_override(struct dev_ops *ops, u_int mask, u_int match)
550 {
551 	dev_ops_remove(ops, mask, match);
552 	if (ops->head.refs) {
553 		kprintf("dev_ops_remove_override: %s still has %d refs!\n",
554 			ops->head.name, ops->head.refs);
555 	} else {
556 		bzero(ops, sizeof(*ops));
557 		kfree(ops, M_DEVBUF);
558 	}
559 }
560 
561 /*
562  * Remove all matching dev_ops entries from the dev_ops_array[] major
563  * array so no new user opens can be performed, and destroy all devices
564  * installed in the hash table that are associated with this dev_ops.  (see
565  * destroy_all_devs()).
566  *
567  * The mask and match should match a previous call to dev_ops_add*().
568  */
569 int
570 dev_ops_remove(struct dev_ops *ops, u_int mask, u_int match)
571 {
572 	struct dev_ops_maj *rbmaj;
573 	struct dev_ops_link *link;
574 	struct dev_ops_link **plink;
575 
576 	if (ops != &dead_dev_ops)
577 		destroy_all_devs(ops, mask, match);
578 
579 	rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj);
580 	if (rbmaj == NULL) {
581 		kprintf("double-remove of dev_ops %p for %s(%d)\n",
582 			ops, ops->head.name, ops->head.maj);
583 		return(0);
584 	}
585 	for (plink = &rbmaj->link; (link = *plink) != NULL;
586 	     plink = &link->next) {
587 		if (link->mask == mask && link->match == match) {
588 			if (link->ops == ops)
589 				break;
590 			kprintf("%s: ERROR: cannot remove dev_ops, "
591 			       "its major number %d was stolen by %s\n",
592 				ops->head.name, ops->head.maj,
593 				link->ops->head.name
594 			);
595 		}
596 	}
597 	if (link == NULL) {
598 		kprintf("%s(%d)[%08x/%08x]: WARNING: ops removed "
599 		       "multiple times!\n",
600 		       ops->head.name, ops->head.maj, mask, match);
601 	} else {
602 		*plink = link->next;
603 		--ops->head.refs; /* XXX ops_release() / record refs */
604 		kfree(link, M_DEVBUF);
605 	}
606 
607 	/*
608 	 * Scrap the RB tree node for the major number if no ops are
609 	 * installed any longer.
610 	 */
611 	if (rbmaj->link == NULL) {
612 		dev_ops_rb_tree_RB_REMOVE(&dev_ops_rbhead, rbmaj);
613 		kfree(rbmaj, M_DEVBUF);
614 	}
615 
616 #if 0
617 	/*
618 	 * The same ops might be used with multiple devices, so don't
619 	 * complain if the ref count is non-zero.
620 	 */
621 	if (ops->head.refs != 0) {
622 		kprintf("%s(%d)[%08x/%08x]: Warning: dev_ops_remove() called "
623 			"while %d device refs still exist!\n",
624 			ops->head.name, ops->head.maj, mask, match,
625 			ops->head.refs);
626 	} else {
627 		if (bootverbose)
628 			kprintf("%s: ops removed\n", ops->head.name);
629 	}
630 #endif
631 	return 0;
632 }
633 
634 /*
635  * dev_ops_scan() - Issue a callback for all installed dev_ops structures.
636  *
637  * The scan will terminate if a callback returns a negative number.
638  */
639 struct dev_ops_scan_info {
640 	int	(*callback)(struct dev_ops *, void *);
641 	void	*arg;
642 };
643 
644 static
645 int
646 dev_ops_scan_callback(struct dev_ops_maj *rbmaj, void *arg)
647 {
648 	struct dev_ops_scan_info *info = arg;
649 	struct dev_ops_link *link;
650 	int count = 0;
651 	int r;
652 
653 	for (link = rbmaj->link; link; link = link->next) {
654 		r = info->callback(link->ops, info->arg);
655 		if (r < 0)
656 			return(r);
657 		count += r;
658 	}
659 	return(count);
660 }
661 
662 int
663 dev_ops_scan(int (*callback)(struct dev_ops *, void *), void *arg)
664 {
665 	struct dev_ops_scan_info info = { callback, arg };
666 
667 	return (dev_ops_rb_tree_RB_SCAN(&dev_ops_rbhead, NULL,
668 					dev_ops_scan_callback, &info));
669 }
670 
671 
672 /*
673  * Release a ops entry.  When the ref count reaches zero, recurse
674  * through the stack.
675  */
676 void
677 dev_ops_release(struct dev_ops *ops)
678 {
679 	--ops->head.refs;
680 	if (ops->head.refs == 0) {
681 		/* XXX */
682 	}
683 }
684 
685 struct dev_ops *
686 dev_ops_intercept(cdev_t dev, struct dev_ops *iops)
687 {
688 	struct dev_ops *oops = dev->si_ops;
689 
690 	compile_dev_ops(iops);
691 	iops->head.maj = oops->head.maj;
692 	iops->head.data = oops->head.data;
693 	iops->head.flags = oops->head.flags;
694 	dev->si_ops = iops;
695 	dev->si_flags |= SI_INTERCEPTED;
696 
697 	return (oops);
698 }
699 
700 void
701 dev_ops_restore(cdev_t dev, struct dev_ops *oops)
702 {
703 	struct dev_ops *iops = dev->si_ops;
704 
705 	dev->si_ops = oops;
706 	dev->si_flags &= ~SI_INTERCEPTED;
707 	iops->head.maj = 0;
708 	iops->head.data = NULL;
709 	iops->head.flags = 0;
710 }
711 
712 /************************************************************************
713  *			DEFAULT DEV OPS FUNCTIONS			*
714  ************************************************************************/
715 
716 
717 /*
718  * Unsupported devswitch functions (e.g. for writing to read-only device).
719  * XXX may belong elsewhere.
720  */
721 int
722 norevoke(struct dev_revoke_args *ap)
723 {
724 	/* take no action */
725 	return(0);
726 }
727 
728 int
729 noclone(struct dev_clone_args *ap)
730 {
731 	/* take no action */
732 	return (0);	/* allow the clone */
733 }
734 
735 int
736 noopen(struct dev_open_args *ap)
737 {
738 	return (ENODEV);
739 }
740 
741 int
742 noclose(struct dev_close_args *ap)
743 {
744 	return (ENODEV);
745 }
746 
747 int
748 noread(struct dev_read_args *ap)
749 {
750 	return (ENODEV);
751 }
752 
753 int
754 nowrite(struct dev_write_args *ap)
755 {
756 	return (ENODEV);
757 }
758 
759 int
760 noioctl(struct dev_ioctl_args *ap)
761 {
762 	return (ENODEV);
763 }
764 
765 int
766 nokqfilter(struct dev_kqfilter_args *ap)
767 {
768 	return (ENODEV);
769 }
770 
771 int
772 nommap(struct dev_mmap_args *ap)
773 {
774 	return (ENODEV);
775 }
776 
777 int
778 nopoll(struct dev_poll_args *ap)
779 {
780 	ap->a_events = 0;
781 	return(0);
782 }
783 
784 int
785 nostrategy(struct dev_strategy_args *ap)
786 {
787 	struct bio *bio = ap->a_bio;
788 
789 	bio->bio_buf->b_flags |= B_ERROR;
790 	bio->bio_buf->b_error = EOPNOTSUPP;
791 	biodone(bio);
792 	return(0);
793 }
794 
795 int
796 nopsize(struct dev_psize_args *ap)
797 {
798 	ap->a_result = 0;
799 	return(0);
800 }
801 
802 int
803 nodump(struct dev_dump_args *ap)
804 {
805 	return (ENODEV);
806 }
807 
808 /*
809  * XXX this is probably bogus.  Any device that uses it isn't checking the
810  * minor number.
811  */
812 int
813 nullopen(struct dev_open_args *ap)
814 {
815 	return (0);
816 }
817 
818 int
819 nullclose(struct dev_close_args *ap)
820 {
821 	return (0);
822 }
823 
824