xref: /netbsd-src/sys/kern/subr_devsw.c (revision 2de962bd804263c16657f586aa00f1704045df8e)
1 /*	$NetBSD: subr_devsw.c,v 1.19 2008/05/19 17:15:00 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	subr_devsw.c: registers device drivers by name and by major
36  *	number, and provides wrapper methods for performing I/O and
37  *	other tasks on device drivers, keying on the device number
38  *	(dev_t).
39  *
40  *	When the system is built, the config(8) command generates
41  *	static tables of device drivers built into the kernel image
42  *	along with their associated methods.  These are recorded in
43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
44  *	and removed from the system dynamically.
45  *
46  * Allocation
47  *
48  *	When the system initially boots only the statically allocated
49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
50  *	allocation, we allocate a fixed block of memory to hold the new,
51  *	expanded index.  This "fork" of the table is only ever performed
52  *	once in order to guarantee that other threads may safely access
53  *	the device tables:
54  *
55  *	o Once a thread has a "reference" to the table via an earlier
56  *	  open() call, we know that the entry in the table must exist
57  *	  and so it is safe to access it.
58  *
59  *	o Regardless of whether other threads see the old or new
60  *	  pointers, they will point to a correct device switch
61  *	  structure for the operation being performed.
62  *
63  *	XXX Currently, the wrapper methods such as cdev_read() verify
64  *	that a device driver does in fact exist before calling the
65  *	associated driver method.  This should be changed so that
66  *	once the device is has been referenced by a vnode (opened),
67  *	calling	the other methods should be valid until that reference
68  *	is dropped.
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.19 2008/05/19 17:15:00 ad Exp $");
73 
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 
83 #ifdef DEVSW_DEBUG
84 #define	DPRINTF(x)	printf x
85 #else /* DEVSW_DEBUG */
86 #define	DPRINTF(x)
87 #endif /* DEVSW_DEBUG */
88 
89 #define	MAXDEVSW	512	/* the maximum of major device number */
90 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
91 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
92 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
93 
94 extern const struct bdevsw **bdevsw, *bdevsw0[];
95 extern const struct cdevsw **cdevsw, *cdevsw0[];
96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
97 extern const int sys_bdevsws, sys_cdevsws;
98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
99 
100 static int bdevsw_attach(const struct bdevsw *, int *);
101 static int cdevsw_attach(const struct cdevsw *, int *);
102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
103 
104 kmutex_t devsw_lock;
105 
106 void
107 devsw_init(void)
108 {
109 
110 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
112 
113 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
114 }
115 
116 int
117 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
118 	     const struct cdevsw *cdev, int *cmajor)
119 {
120 	struct devsw_conv *conv;
121 	char *name;
122 	int error, i;
123 
124 	if (devname == NULL || cdev == NULL)
125 		return (EINVAL);
126 
127 	mutex_enter(&devsw_lock);
128 
129 	for (i = 0 ; i < max_devsw_convs ; i++) {
130 		conv = &devsw_conv[i];
131 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
132 			continue;
133 
134 		if (*bmajor < 0)
135 			*bmajor = conv->d_bmajor;
136 		if (*cmajor < 0)
137 			*cmajor = conv->d_cmajor;
138 
139 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
140 			error = EINVAL;
141 			goto fail;
142 		}
143 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
144 			error = EINVAL;
145 			goto fail;
146 		}
147 
148 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
149 		    cdevsw[*cmajor] != NULL) {
150 			error = EEXIST;
151 			goto fail;
152 		}
153 
154 		if (bdev != NULL)
155 			bdevsw[*bmajor] = bdev;
156 		cdevsw[*cmajor] = cdev;
157 
158 		mutex_exit(&devsw_lock);
159 		return (0);
160 	}
161 
162 	error = bdevsw_attach(bdev, bmajor);
163 	if (error != 0)
164 		goto fail;
165 	error = cdevsw_attach(cdev, cmajor);
166 	if (error != 0) {
167 		devsw_detach_locked(bdev, NULL);
168 		goto fail;
169 	}
170 
171 	for (i = 0 ; i < max_devsw_convs ; i++) {
172 		if (devsw_conv[i].d_name == NULL)
173 			break;
174 	}
175 	if (i == max_devsw_convs) {
176 		struct devsw_conv *newptr;
177 		int old, new;
178 
179 		old = max_devsw_convs;
180 		new = old + 1;
181 
182 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
183 		if (newptr == NULL) {
184 			devsw_detach_locked(bdev, cdev);
185 			error = ENOMEM;
186 			goto fail;
187 		}
188 		newptr[old].d_name = NULL;
189 		newptr[old].d_bmajor = -1;
190 		newptr[old].d_cmajor = -1;
191 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
192 		if (devsw_conv != devsw_conv0)
193 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
194 		devsw_conv = newptr;
195 		max_devsw_convs = new;
196 	}
197 
198 	i = strlen(devname) + 1;
199 	name = kmem_alloc(i, KM_NOSLEEP);
200 	if (name == NULL) {
201 		devsw_detach_locked(bdev, cdev);
202 		goto fail;
203 	}
204 	strlcpy(name, devname, i);
205 
206 	devsw_conv[i].d_name = name;
207 	devsw_conv[i].d_bmajor = *bmajor;
208 	devsw_conv[i].d_cmajor = *cmajor;
209 
210 	mutex_exit(&devsw_lock);
211 	return (0);
212  fail:
213 	mutex_exit(&devsw_lock);
214 	return (error);
215 }
216 
217 static int
218 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
219 {
220 	const struct bdevsw **newptr;
221 	int bmajor, i;
222 
223 	KASSERT(mutex_owned(&devsw_lock));
224 
225 	if (devsw == NULL)
226 		return (0);
227 
228 	if (*devmajor < 0) {
229 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
230 			if (bdevsw[bmajor] != NULL)
231 				continue;
232 			for (i = 0 ; i < max_devsw_convs ; i++) {
233 				if (devsw_conv[i].d_bmajor == bmajor)
234 					break;
235 			}
236 			if (i != max_devsw_convs)
237 				continue;
238 			break;
239 		}
240 		*devmajor = bmajor;
241 	}
242 
243 	if (*devmajor >= MAXDEVSW) {
244 		printf("bdevsw_attach: block majors exhausted");
245 		return (ENOMEM);
246 	}
247 
248 	if (*devmajor >= max_bdevsws) {
249 		KASSERT(bdevsw == bdevsw0);
250 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
251 		if (newptr == NULL)
252 			return (ENOMEM);
253 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
254 		bdevsw = newptr;
255 		max_bdevsws = MAXDEVSW;
256 	}
257 
258 	if (bdevsw[*devmajor] != NULL)
259 		return (EEXIST);
260 
261 	bdevsw[*devmajor] = devsw;
262 
263 	return (0);
264 }
265 
266 static int
267 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
268 {
269 	const struct cdevsw **newptr;
270 	int cmajor, i;
271 
272 	KASSERT(mutex_owned(&devsw_lock));
273 
274 	if (*devmajor < 0) {
275 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
276 			if (cdevsw[cmajor] != NULL)
277 				continue;
278 			for (i = 0 ; i < max_devsw_convs ; i++) {
279 				if (devsw_conv[i].d_cmajor == cmajor)
280 					break;
281 			}
282 			if (i != max_devsw_convs)
283 				continue;
284 			break;
285 		}
286 		*devmajor = cmajor;
287 	}
288 
289 	if (*devmajor >= MAXDEVSW) {
290 		printf("cdevsw_attach: character majors exhausted");
291 		return (ENOMEM);
292 	}
293 
294 	if (*devmajor >= max_cdevsws) {
295 		KASSERT(cdevsw == cdevsw0);
296 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
297 		if (newptr == NULL)
298 			return (ENOMEM);
299 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
300 		cdevsw = newptr;
301 		max_cdevsws = MAXDEVSW;
302 	}
303 
304 	if (cdevsw[*devmajor] != NULL)
305 		return (EEXIST);
306 
307 	cdevsw[*devmajor] = devsw;
308 
309 	return (0);
310 }
311 
312 static void
313 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
314 {
315 	int i;
316 
317 	KASSERT(mutex_owned(&devsw_lock));
318 
319 	if (bdev != NULL) {
320 		for (i = 0 ; i < max_bdevsws ; i++) {
321 			if (bdevsw[i] != bdev)
322 				continue;
323 			bdevsw[i] = NULL;
324 			break;
325 		}
326 	}
327 	if (cdev != NULL) {
328 		for (i = 0 ; i < max_cdevsws ; i++) {
329 			if (cdevsw[i] != cdev)
330 				continue;
331 			cdevsw[i] = NULL;
332 			break;
333 		}
334 	}
335 }
336 
337 int
338 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
339 {
340 
341 	mutex_enter(&devsw_lock);
342 	devsw_detach_locked(bdev, cdev);
343 	mutex_exit(&devsw_lock);
344 	return 0;
345 }
346 
347 /*
348  * Look up a block device by number.
349  *
350  * => Caller must ensure that the device is attached.
351  */
352 const struct bdevsw *
353 bdevsw_lookup(dev_t dev)
354 {
355 	int bmajor;
356 
357 	if (dev == NODEV)
358 		return (NULL);
359 	bmajor = major(dev);
360 	if (bmajor < 0 || bmajor >= max_bdevsws)
361 		return (NULL);
362 
363 	return (bdevsw[bmajor]);
364 }
365 
366 /*
367  * Look up a character device by number.
368  *
369  * => Caller must ensure that the device is attached.
370  */
371 const struct cdevsw *
372 cdevsw_lookup(dev_t dev)
373 {
374 	int cmajor;
375 
376 	if (dev == NODEV)
377 		return (NULL);
378 	cmajor = major(dev);
379 	if (cmajor < 0 || cmajor >= max_cdevsws)
380 		return (NULL);
381 
382 	return (cdevsw[cmajor]);
383 }
384 
385 /*
386  * Look up a block device by reference to its operations set.
387  *
388  * => Caller must ensure that the device is not detached, and therefore
389  *    that the returned major is still valid when dereferenced.
390  */
391 int
392 bdevsw_lookup_major(const struct bdevsw *bdev)
393 {
394 	int bmajor;
395 
396 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
397 		if (bdevsw[bmajor] == bdev)
398 			return (bmajor);
399 	}
400 
401 	return (-1);
402 }
403 
404 /*
405  * Look up a character device by reference to its operations set.
406  *
407  * => Caller must ensure that the device is not detached, and therefore
408  *    that the returned major is still valid when dereferenced.
409  */
410 int
411 cdevsw_lookup_major(const struct cdevsw *cdev)
412 {
413 	int cmajor;
414 
415 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
416 		if (cdevsw[cmajor] == cdev)
417 			return (cmajor);
418 	}
419 
420 	return (-1);
421 }
422 
423 /*
424  * Convert from block major number to name.
425  *
426  * => Caller must ensure that the device is not detached, and therefore
427  *    that the name pointer is still valid when dereferenced.
428  */
429 const char *
430 devsw_blk2name(int bmajor)
431 {
432 	const char *name;
433 	int cmajor, i;
434 
435 	name = NULL;
436 	cmajor = -1;
437 
438 	mutex_enter(&devsw_lock);
439 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
440 		mutex_exit(&devsw_lock);
441 		return (NULL);
442 	}
443 	for (i = 0 ; i < max_devsw_convs; i++) {
444 		if (devsw_conv[i].d_bmajor == bmajor) {
445 			cmajor = devsw_conv[i].d_cmajor;
446 			break;
447 		}
448 	}
449 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
450 		name = devsw_conv[i].d_name;
451 	mutex_exit(&devsw_lock);
452 
453 	return (name);
454 }
455 
456 /*
457  * Convert from device name to block major number.
458  *
459  * => Caller must ensure that the device is not detached, and therefore
460  *    that the major number is still valid when dereferenced.
461  */
462 int
463 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
464 {
465 	struct devsw_conv *conv;
466 	int bmajor, i;
467 
468 	if (name == NULL)
469 		return (-1);
470 
471 	mutex_enter(&devsw_lock);
472 	for (i = 0 ; i < max_devsw_convs ; i++) {
473 		size_t len;
474 
475 		conv = &devsw_conv[i];
476 		if (conv->d_name == NULL)
477 			continue;
478 		len = strlen(conv->d_name);
479 		if (strncmp(conv->d_name, name, len) != 0)
480 			continue;
481 		if (*(name +len) && !isdigit(*(name + len)))
482 			continue;
483 		bmajor = conv->d_bmajor;
484 		if (bmajor < 0 || bmajor >= max_bdevsws ||
485 		    bdevsw[bmajor] == NULL)
486 			break;
487 		if (devname != NULL) {
488 #ifdef DEVSW_DEBUG
489 			if (strlen(conv->d_name) >= devnamelen)
490 				printf("devsw_name2blk: too short buffer");
491 #endif /* DEVSW_DEBUG */
492 			strncpy(devname, conv->d_name, devnamelen);
493 			devname[devnamelen - 1] = '\0';
494 		}
495 		mutex_exit(&devsw_lock);
496 		return (bmajor);
497 	}
498 
499 	mutex_exit(&devsw_lock);
500 	return (-1);
501 }
502 
503 /*
504  * Convert from device name to char major number.
505  *
506  * => Caller must ensure that the device is not detached, and therefore
507  *    that the major number is still valid when dereferenced.
508  */
509 int
510 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
511 {
512 	struct devsw_conv *conv;
513 	int cmajor, i;
514 
515 	if (name == NULL)
516 		return (-1);
517 
518 	mutex_enter(&devsw_lock);
519 	for (i = 0 ; i < max_devsw_convs ; i++) {
520 		size_t len;
521 
522 		conv = &devsw_conv[i];
523 		if (conv->d_name == NULL)
524 			continue;
525 		len = strlen(conv->d_name);
526 		if (strncmp(conv->d_name, name, len) != 0)
527 			continue;
528 		if (*(name +len) && !isdigit(*(name + len)))
529 			continue;
530 		cmajor = conv->d_cmajor;
531 		if (cmajor < 0 || cmajor >= max_cdevsws ||
532 		    cdevsw[cmajor] == NULL)
533 			break;
534 		if (devname != NULL) {
535 #ifdef DEVSW_DEBUG
536 			if (strlen(conv->d_name) >= devnamelen)
537 				printf("devsw_name2chr: too short buffer");
538 #endif /* DEVSW_DEBUG */
539 			strncpy(devname, conv->d_name, devnamelen);
540 			devname[devnamelen - 1] = '\0';
541 		}
542 		mutex_exit(&devsw_lock);
543 		return (cmajor);
544 	}
545 
546 	mutex_exit(&devsw_lock);
547 	return (-1);
548 }
549 
550 /*
551  * Convert from character dev_t to block dev_t.
552  *
553  * => Caller must ensure that the device is not detached, and therefore
554  *    that the major number is still valid when dereferenced.
555  */
556 dev_t
557 devsw_chr2blk(dev_t cdev)
558 {
559 	int bmajor, cmajor, i;
560 	dev_t rv;
561 
562 	cmajor = major(cdev);
563 	bmajor = -1;
564 	rv = NODEV;
565 
566 	mutex_enter(&devsw_lock);
567 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
568 		mutex_exit(&devsw_lock);
569 		return (NODEV);
570 	}
571 	for (i = 0 ; i < max_devsw_convs ; i++) {
572 		if (devsw_conv[i].d_cmajor == cmajor) {
573 			bmajor = devsw_conv[i].d_bmajor;
574 			break;
575 		}
576 	}
577 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
578 		rv = makedev(bmajor, minor(cdev));
579 	mutex_exit(&devsw_lock);
580 
581 	return (rv);
582 }
583 
584 /*
585  * Convert from block dev_t to character dev_t.
586  *
587  * => Caller must ensure that the device is not detached, and therefore
588  *    that the major number is still valid when dereferenced.
589  */
590 dev_t
591 devsw_blk2chr(dev_t bdev)
592 {
593 	int bmajor, cmajor, i;
594 	dev_t rv;
595 
596 	bmajor = major(bdev);
597 	cmajor = -1;
598 	rv = NODEV;
599 
600 	mutex_enter(&devsw_lock);
601 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
602 		mutex_exit(&devsw_lock);
603 		return (NODEV);
604 	}
605 	for (i = 0 ; i < max_devsw_convs ; i++) {
606 		if (devsw_conv[i].d_bmajor == bmajor) {
607 			cmajor = devsw_conv[i].d_cmajor;
608 			break;
609 		}
610 	}
611 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
612 		rv = makedev(cmajor, minor(bdev));
613 	mutex_exit(&devsw_lock);
614 
615 	return (rv);
616 }
617 
618 /*
619  * Device access methods.
620  */
621 
622 #define	DEV_LOCK(d)						\
623 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
624 		KERNEL_LOCK(1, NULL);				\
625 	}
626 
627 #define	DEV_UNLOCK(d)						\
628 	if (mpflag == 0) {					\
629 		KERNEL_UNLOCK_ONE(NULL);			\
630 	}
631 
632 int
633 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
634 {
635 	const struct bdevsw *d;
636 	int rv, mpflag;
637 
638 	/*
639 	 * For open we need to lock, in order to synchronize
640 	 * with attach/detach.
641 	 */
642 	mutex_enter(&devsw_lock);
643 	d = bdevsw_lookup(dev);
644 	mutex_exit(&devsw_lock);
645 	if (d == NULL)
646 		return ENXIO;
647 
648 	DEV_LOCK(d);
649 	rv = (*d->d_open)(dev, flag, devtype, l);
650 	DEV_UNLOCK(d);
651 
652 	return rv;
653 }
654 
655 int
656 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
657 {
658 	const struct bdevsw *d;
659 	int rv, mpflag;
660 
661 	if ((d = bdevsw_lookup(dev)) == NULL)
662 		return ENXIO;
663 
664 	DEV_LOCK(d);
665 	rv = (*d->d_close)(dev, flag, devtype, l);
666 	DEV_UNLOCK(d);
667 
668 	return rv;
669 }
670 
671 void
672 bdev_strategy(struct buf *bp)
673 {
674 	const struct bdevsw *d;
675 	int mpflag;
676 
677 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
678 		panic("bdev_strategy");
679 
680 	DEV_LOCK(d);
681 	(*d->d_strategy)(bp);
682 	DEV_UNLOCK(d);
683 }
684 
685 int
686 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
687 {
688 	const struct bdevsw *d;
689 	int rv, mpflag;
690 
691 	if ((d = bdevsw_lookup(dev)) == NULL)
692 		return ENXIO;
693 
694 	DEV_LOCK(d);
695 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
696 	DEV_UNLOCK(d);
697 
698 	return rv;
699 }
700 
701 int
702 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
703 {
704 	const struct bdevsw *d;
705 	int rv;
706 
707 	/*
708 	 * Dump can be called without the device open.  Since it can
709 	 * currently only be called with the system paused (and in a
710 	 * potentially unstable state), we don't perform any locking.
711 	 */
712 	if ((d = bdevsw_lookup(dev)) == NULL)
713 		return ENXIO;
714 
715 	/* DEV_LOCK(d); */
716 	rv = (*d->d_dump)(dev, addr, data, sz);
717 	/* DEV_UNLOCK(d); */
718 
719 	return rv;
720 }
721 
722 int
723 bdev_type(dev_t dev)
724 {
725 	const struct bdevsw *d;
726 
727 	if ((d = bdevsw_lookup(dev)) == NULL)
728 		return D_OTHER;
729 	return d->d_flag & D_TYPEMASK;
730 }
731 
732 int
733 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
734 {
735 	const struct cdevsw *d;
736 	int rv, mpflag;
737 
738 	/*
739 	 * For open we need to lock, in order to synchronize
740 	 * with attach/detach.
741 	 */
742 	mutex_enter(&devsw_lock);
743 	d = cdevsw_lookup(dev);
744 	mutex_exit(&devsw_lock);
745 	if (d == NULL)
746 		return ENXIO;
747 
748 	DEV_LOCK(d);
749 	rv = (*d->d_open)(dev, flag, devtype, l);
750 	DEV_UNLOCK(d);
751 
752 	return rv;
753 }
754 
755 int
756 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
757 {
758 	const struct cdevsw *d;
759 	int rv, mpflag;
760 
761 	if ((d = cdevsw_lookup(dev)) == NULL)
762 		return ENXIO;
763 
764 	DEV_LOCK(d);
765 	rv = (*d->d_close)(dev, flag, devtype, l);
766 	DEV_UNLOCK(d);
767 
768 	return rv;
769 }
770 
771 int
772 cdev_read(dev_t dev, struct uio *uio, int flag)
773 {
774 	const struct cdevsw *d;
775 	int rv, mpflag;
776 
777 	if ((d = cdevsw_lookup(dev)) == NULL)
778 		return ENXIO;
779 
780 	DEV_LOCK(d);
781 	rv = (*d->d_read)(dev, uio, flag);
782 	DEV_UNLOCK(d);
783 
784 	return rv;
785 }
786 
787 int
788 cdev_write(dev_t dev, struct uio *uio, int flag)
789 {
790 	const struct cdevsw *d;
791 	int rv, mpflag;
792 
793 	if ((d = cdevsw_lookup(dev)) == NULL)
794 		return ENXIO;
795 
796 	DEV_LOCK(d);
797 	rv = (*d->d_write)(dev, uio, flag);
798 	DEV_UNLOCK(d);
799 
800 	return rv;
801 }
802 
803 int
804 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
805 {
806 	const struct cdevsw *d;
807 	int rv, mpflag;
808 
809 	if ((d = cdevsw_lookup(dev)) == NULL)
810 		return ENXIO;
811 
812 	DEV_LOCK(d);
813 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
814 	DEV_UNLOCK(d);
815 
816 	return rv;
817 }
818 
819 void
820 cdev_stop(struct tty *tp, int flag)
821 {
822 	const struct cdevsw *d;
823 	int mpflag;
824 
825 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
826 		return;
827 
828 	DEV_LOCK(d);
829 	(*d->d_stop)(tp, flag);
830 	DEV_UNLOCK(d);
831 }
832 
833 struct tty *
834 cdev_tty(dev_t dev)
835 {
836 	const struct cdevsw *d;
837 	struct tty * rv;
838 	int mpflag;
839 
840 	if ((d = cdevsw_lookup(dev)) == NULL)
841 		return NULL;
842 
843 	/* XXX Check if necessary. */
844 	if (d->d_tty == NULL)
845 		return NULL;
846 
847 	DEV_LOCK(d);
848 	rv = (*d->d_tty)(dev);
849 	DEV_UNLOCK(d);
850 
851 	return rv;
852 }
853 
854 int
855 cdev_poll(dev_t dev, int flag, lwp_t *l)
856 {
857 	const struct cdevsw *d;
858 	int rv, mpflag;
859 
860 	if ((d = cdevsw_lookup(dev)) == NULL)
861 		return POLLERR;
862 
863 	DEV_LOCK(d);
864 	rv = (*d->d_poll)(dev, flag, l);
865 	DEV_UNLOCK(d);
866 
867 	return rv;
868 }
869 
870 paddr_t
871 cdev_mmap(dev_t dev, off_t off, int flag)
872 {
873 	const struct cdevsw *d;
874 	paddr_t rv;
875 	int mpflag;
876 
877 	if ((d = cdevsw_lookup(dev)) == NULL)
878 		return (paddr_t)-1LL;
879 
880 	DEV_LOCK(d);
881 	rv = (*d->d_mmap)(dev, off, flag);
882 	DEV_UNLOCK(d);
883 
884 	return rv;
885 }
886 
887 int
888 cdev_kqfilter(dev_t dev, struct knote *kn)
889 {
890 	const struct cdevsw *d;
891 	int rv, mpflag;
892 
893 	if ((d = cdevsw_lookup(dev)) == NULL)
894 		return ENXIO;
895 
896 	DEV_LOCK(d);
897 	rv = (*d->d_kqfilter)(dev, kn);
898 	DEV_UNLOCK(d);
899 
900 	return rv;
901 }
902 
903 int
904 cdev_type(dev_t dev)
905 {
906 	const struct cdevsw *d;
907 
908 	if ((d = cdevsw_lookup(dev)) == NULL)
909 		return D_OTHER;
910 	return d->d_flag & D_TYPEMASK;
911 }
912