xref: /netbsd-src/sys/kern/subr_devsw.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: subr_devsw.c,v 1.23 2008/12/29 17:41:18 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	subr_devsw.c: registers device drivers by name and by major
36  *	number, and provides wrapper methods for performing I/O and
37  *	other tasks on device drivers, keying on the device number
38  *	(dev_t).
39  *
40  *	When the system is built, the config(8) command generates
41  *	static tables of device drivers built into the kernel image
42  *	along with their associated methods.  These are recorded in
43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
44  *	and removed from the system dynamically.
45  *
46  * Allocation
47  *
48  *	When the system initially boots only the statically allocated
49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
50  *	allocation, we allocate a fixed block of memory to hold the new,
51  *	expanded index.  This "fork" of the table is only ever performed
52  *	once in order to guarantee that other threads may safely access
53  *	the device tables:
54  *
55  *	o Once a thread has a "reference" to the table via an earlier
56  *	  open() call, we know that the entry in the table must exist
57  *	  and so it is safe to access it.
58  *
59  *	o Regardless of whether other threads see the old or new
60  *	  pointers, they will point to a correct device switch
61  *	  structure for the operation being performed.
62  *
63  *	XXX Currently, the wrapper methods such as cdev_read() verify
64  *	that a device driver does in fact exist before calling the
65  *	associated driver method.  This should be changed so that
66  *	once the device is has been referenced by a vnode (opened),
67  *	calling	the other methods should be valid until that reference
68  *	is dropped.
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.23 2008/12/29 17:41:18 pooka Exp $");
73 
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 
83 #ifdef DEVSW_DEBUG
84 #define	DPRINTF(x)	printf x
85 #else /* DEVSW_DEBUG */
86 #define	DPRINTF(x)
87 #endif /* DEVSW_DEBUG */
88 
89 #define	MAXDEVSW	512	/* the maximum of major device number */
90 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
91 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
92 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
93 
94 extern const struct bdevsw **bdevsw, *bdevsw0[];
95 extern const struct cdevsw **cdevsw, *cdevsw0[];
96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
97 extern const int sys_bdevsws, sys_cdevsws;
98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
99 
100 static int bdevsw_attach(const struct bdevsw *, int *);
101 static int cdevsw_attach(const struct cdevsw *, int *);
102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
103 
104 kmutex_t device_lock;
105 
106 void
107 devsw_init(void)
108 {
109 
110 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
112 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
113 }
114 
115 int
116 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
117 	     const struct cdevsw *cdev, int *cmajor)
118 {
119 	struct devsw_conv *conv;
120 	char *name;
121 	int error, i;
122 
123 	if (devname == NULL || cdev == NULL)
124 		return (EINVAL);
125 
126 	mutex_enter(&device_lock);
127 
128 	for (i = 0 ; i < max_devsw_convs ; i++) {
129 		conv = &devsw_conv[i];
130 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
131 			continue;
132 
133 		if (*bmajor < 0)
134 			*bmajor = conv->d_bmajor;
135 		if (*cmajor < 0)
136 			*cmajor = conv->d_cmajor;
137 
138 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
139 			error = EINVAL;
140 			goto fail;
141 		}
142 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
143 			error = EINVAL;
144 			goto fail;
145 		}
146 
147 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
148 		    cdevsw[*cmajor] != NULL) {
149 			error = EEXIST;
150 			goto fail;
151 		}
152 
153 		if (bdev != NULL)
154 			bdevsw[*bmajor] = bdev;
155 		cdevsw[*cmajor] = cdev;
156 
157 		mutex_exit(&device_lock);
158 		return (0);
159 	}
160 
161 	error = bdevsw_attach(bdev, bmajor);
162 	if (error != 0)
163 		goto fail;
164 	error = cdevsw_attach(cdev, cmajor);
165 	if (error != 0) {
166 		devsw_detach_locked(bdev, NULL);
167 		goto fail;
168 	}
169 
170 	for (i = 0 ; i < max_devsw_convs ; i++) {
171 		if (devsw_conv[i].d_name == NULL)
172 			break;
173 	}
174 	if (i == max_devsw_convs) {
175 		struct devsw_conv *newptr;
176 		int old, new;
177 
178 		old = max_devsw_convs;
179 		new = old + 1;
180 
181 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
182 		if (newptr == NULL) {
183 			devsw_detach_locked(bdev, cdev);
184 			error = ENOMEM;
185 			goto fail;
186 		}
187 		newptr[old].d_name = NULL;
188 		newptr[old].d_bmajor = -1;
189 		newptr[old].d_cmajor = -1;
190 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
191 		if (devsw_conv != devsw_conv0)
192 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
193 		devsw_conv = newptr;
194 		max_devsw_convs = new;
195 	}
196 
197 	i = strlen(devname) + 1;
198 	name = kmem_alloc(i, KM_NOSLEEP);
199 	if (name == NULL) {
200 		devsw_detach_locked(bdev, cdev);
201 		goto fail;
202 	}
203 	strlcpy(name, devname, i);
204 
205 	devsw_conv[i].d_name = name;
206 	devsw_conv[i].d_bmajor = *bmajor;
207 	devsw_conv[i].d_cmajor = *cmajor;
208 
209 	mutex_exit(&device_lock);
210 	return (0);
211  fail:
212 	mutex_exit(&device_lock);
213 	return (error);
214 }
215 
216 static int
217 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
218 {
219 	const struct bdevsw **newptr;
220 	int bmajor, i;
221 
222 	KASSERT(mutex_owned(&device_lock));
223 
224 	if (devsw == NULL)
225 		return (0);
226 
227 	if (*devmajor < 0) {
228 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
229 			if (bdevsw[bmajor] != NULL)
230 				continue;
231 			for (i = 0 ; i < max_devsw_convs ; i++) {
232 				if (devsw_conv[i].d_bmajor == bmajor)
233 					break;
234 			}
235 			if (i != max_devsw_convs)
236 				continue;
237 			break;
238 		}
239 		*devmajor = bmajor;
240 	}
241 
242 	if (*devmajor >= MAXDEVSW) {
243 		printf("bdevsw_attach: block majors exhausted");
244 		return (ENOMEM);
245 	}
246 
247 	if (*devmajor >= max_bdevsws) {
248 		KASSERT(bdevsw == bdevsw0);
249 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
250 		if (newptr == NULL)
251 			return (ENOMEM);
252 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
253 		bdevsw = newptr;
254 		max_bdevsws = MAXDEVSW;
255 	}
256 
257 	if (bdevsw[*devmajor] != NULL)
258 		return (EEXIST);
259 
260 	bdevsw[*devmajor] = devsw;
261 
262 	return (0);
263 }
264 
265 static int
266 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
267 {
268 	const struct cdevsw **newptr;
269 	int cmajor, i;
270 
271 	KASSERT(mutex_owned(&device_lock));
272 
273 	if (*devmajor < 0) {
274 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
275 			if (cdevsw[cmajor] != NULL)
276 				continue;
277 			for (i = 0 ; i < max_devsw_convs ; i++) {
278 				if (devsw_conv[i].d_cmajor == cmajor)
279 					break;
280 			}
281 			if (i != max_devsw_convs)
282 				continue;
283 			break;
284 		}
285 		*devmajor = cmajor;
286 	}
287 
288 	if (*devmajor >= MAXDEVSW) {
289 		printf("cdevsw_attach: character majors exhausted");
290 		return (ENOMEM);
291 	}
292 
293 	if (*devmajor >= max_cdevsws) {
294 		KASSERT(cdevsw == cdevsw0);
295 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
296 		if (newptr == NULL)
297 			return (ENOMEM);
298 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
299 		cdevsw = newptr;
300 		max_cdevsws = MAXDEVSW;
301 	}
302 
303 	if (cdevsw[*devmajor] != NULL)
304 		return (EEXIST);
305 
306 	cdevsw[*devmajor] = devsw;
307 
308 	return (0);
309 }
310 
311 static void
312 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
313 {
314 	int i;
315 
316 	KASSERT(mutex_owned(&device_lock));
317 
318 	if (bdev != NULL) {
319 		for (i = 0 ; i < max_bdevsws ; i++) {
320 			if (bdevsw[i] != bdev)
321 				continue;
322 			bdevsw[i] = NULL;
323 			break;
324 		}
325 	}
326 	if (cdev != NULL) {
327 		for (i = 0 ; i < max_cdevsws ; i++) {
328 			if (cdevsw[i] != cdev)
329 				continue;
330 			cdevsw[i] = NULL;
331 			break;
332 		}
333 	}
334 }
335 
336 int
337 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
338 {
339 
340 	mutex_enter(&device_lock);
341 	devsw_detach_locked(bdev, cdev);
342 	mutex_exit(&device_lock);
343 	return 0;
344 }
345 
346 /*
347  * Look up a block device by number.
348  *
349  * => Caller must ensure that the device is attached.
350  */
351 const struct bdevsw *
352 bdevsw_lookup(dev_t dev)
353 {
354 	int bmajor;
355 
356 	if (dev == NODEV)
357 		return (NULL);
358 	bmajor = major(dev);
359 	if (bmajor < 0 || bmajor >= max_bdevsws)
360 		return (NULL);
361 
362 	return (bdevsw[bmajor]);
363 }
364 
365 /*
366  * Look up a character device by number.
367  *
368  * => Caller must ensure that the device is attached.
369  */
370 const struct cdevsw *
371 cdevsw_lookup(dev_t dev)
372 {
373 	int cmajor;
374 
375 	if (dev == NODEV)
376 		return (NULL);
377 	cmajor = major(dev);
378 	if (cmajor < 0 || cmajor >= max_cdevsws)
379 		return (NULL);
380 
381 	return (cdevsw[cmajor]);
382 }
383 
384 /*
385  * Look up a block device by reference to its operations set.
386  *
387  * => Caller must ensure that the device is not detached, and therefore
388  *    that the returned major is still valid when dereferenced.
389  */
390 int
391 bdevsw_lookup_major(const struct bdevsw *bdev)
392 {
393 	int bmajor;
394 
395 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
396 		if (bdevsw[bmajor] == bdev)
397 			return (bmajor);
398 	}
399 
400 	return (-1);
401 }
402 
403 /*
404  * Look up a character device by reference to its operations set.
405  *
406  * => Caller must ensure that the device is not detached, and therefore
407  *    that the returned major is still valid when dereferenced.
408  */
409 int
410 cdevsw_lookup_major(const struct cdevsw *cdev)
411 {
412 	int cmajor;
413 
414 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
415 		if (cdevsw[cmajor] == cdev)
416 			return (cmajor);
417 	}
418 
419 	return (-1);
420 }
421 
422 /*
423  * Convert from block major number to name.
424  *
425  * => Caller must ensure that the device is not detached, and therefore
426  *    that the name pointer is still valid when dereferenced.
427  */
428 const char *
429 devsw_blk2name(int bmajor)
430 {
431 	const char *name;
432 	int cmajor, i;
433 
434 	name = NULL;
435 	cmajor = -1;
436 
437 	mutex_enter(&device_lock);
438 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
439 		mutex_exit(&device_lock);
440 		return (NULL);
441 	}
442 	for (i = 0 ; i < max_devsw_convs; i++) {
443 		if (devsw_conv[i].d_bmajor == bmajor) {
444 			cmajor = devsw_conv[i].d_cmajor;
445 			break;
446 		}
447 	}
448 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
449 		name = devsw_conv[i].d_name;
450 	mutex_exit(&device_lock);
451 
452 	return (name);
453 }
454 
455 /*
456  * Convert from device name to block major number.
457  *
458  * => Caller must ensure that the device is not detached, and therefore
459  *    that the major number is still valid when dereferenced.
460  */
461 int
462 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
463 {
464 	struct devsw_conv *conv;
465 	int bmajor, i;
466 
467 	if (name == NULL)
468 		return (-1);
469 
470 	mutex_enter(&device_lock);
471 	for (i = 0 ; i < max_devsw_convs ; i++) {
472 		size_t len;
473 
474 		conv = &devsw_conv[i];
475 		if (conv->d_name == NULL)
476 			continue;
477 		len = strlen(conv->d_name);
478 		if (strncmp(conv->d_name, name, len) != 0)
479 			continue;
480 		if (*(name +len) && !isdigit(*(name + len)))
481 			continue;
482 		bmajor = conv->d_bmajor;
483 		if (bmajor < 0 || bmajor >= max_bdevsws ||
484 		    bdevsw[bmajor] == NULL)
485 			break;
486 		if (devname != NULL) {
487 #ifdef DEVSW_DEBUG
488 			if (strlen(conv->d_name) >= devnamelen)
489 				printf("devsw_name2blk: too short buffer");
490 #endif /* DEVSW_DEBUG */
491 			strncpy(devname, conv->d_name, devnamelen);
492 			devname[devnamelen - 1] = '\0';
493 		}
494 		mutex_exit(&device_lock);
495 		return (bmajor);
496 	}
497 
498 	mutex_exit(&device_lock);
499 	return (-1);
500 }
501 
502 /*
503  * Convert from device name to char major number.
504  *
505  * => Caller must ensure that the device is not detached, and therefore
506  *    that the major number is still valid when dereferenced.
507  */
508 int
509 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
510 {
511 	struct devsw_conv *conv;
512 	int cmajor, i;
513 
514 	if (name == NULL)
515 		return (-1);
516 
517 	mutex_enter(&device_lock);
518 	for (i = 0 ; i < max_devsw_convs ; i++) {
519 		size_t len;
520 
521 		conv = &devsw_conv[i];
522 		if (conv->d_name == NULL)
523 			continue;
524 		len = strlen(conv->d_name);
525 		if (strncmp(conv->d_name, name, len) != 0)
526 			continue;
527 		if (*(name +len) && !isdigit(*(name + len)))
528 			continue;
529 		cmajor = conv->d_cmajor;
530 		if (cmajor < 0 || cmajor >= max_cdevsws ||
531 		    cdevsw[cmajor] == NULL)
532 			break;
533 		if (devname != NULL) {
534 #ifdef DEVSW_DEBUG
535 			if (strlen(conv->d_name) >= devnamelen)
536 				printf("devsw_name2chr: too short buffer");
537 #endif /* DEVSW_DEBUG */
538 			strncpy(devname, conv->d_name, devnamelen);
539 			devname[devnamelen - 1] = '\0';
540 		}
541 		mutex_exit(&device_lock);
542 		return (cmajor);
543 	}
544 
545 	mutex_exit(&device_lock);
546 	return (-1);
547 }
548 
549 /*
550  * Convert from character dev_t to block dev_t.
551  *
552  * => Caller must ensure that the device is not detached, and therefore
553  *    that the major number is still valid when dereferenced.
554  */
555 dev_t
556 devsw_chr2blk(dev_t cdev)
557 {
558 	int bmajor, cmajor, i;
559 	dev_t rv;
560 
561 	cmajor = major(cdev);
562 	bmajor = -1;
563 	rv = NODEV;
564 
565 	mutex_enter(&device_lock);
566 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
567 		mutex_exit(&device_lock);
568 		return (NODEV);
569 	}
570 	for (i = 0 ; i < max_devsw_convs ; i++) {
571 		if (devsw_conv[i].d_cmajor == cmajor) {
572 			bmajor = devsw_conv[i].d_bmajor;
573 			break;
574 		}
575 	}
576 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
577 		rv = makedev(bmajor, minor(cdev));
578 	mutex_exit(&device_lock);
579 
580 	return (rv);
581 }
582 
583 /*
584  * Convert from block dev_t to character dev_t.
585  *
586  * => Caller must ensure that the device is not detached, and therefore
587  *    that the major number is still valid when dereferenced.
588  */
589 dev_t
590 devsw_blk2chr(dev_t bdev)
591 {
592 	int bmajor, cmajor, i;
593 	dev_t rv;
594 
595 	bmajor = major(bdev);
596 	cmajor = -1;
597 	rv = NODEV;
598 
599 	mutex_enter(&device_lock);
600 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
601 		mutex_exit(&device_lock);
602 		return (NODEV);
603 	}
604 	for (i = 0 ; i < max_devsw_convs ; i++) {
605 		if (devsw_conv[i].d_bmajor == bmajor) {
606 			cmajor = devsw_conv[i].d_cmajor;
607 			break;
608 		}
609 	}
610 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
611 		rv = makedev(cmajor, minor(bdev));
612 	mutex_exit(&device_lock);
613 
614 	return (rv);
615 }
616 
617 /*
618  * Device access methods.
619  */
620 
621 #define	DEV_LOCK(d)						\
622 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
623 		KERNEL_LOCK(1, NULL);				\
624 	}
625 
626 #define	DEV_UNLOCK(d)						\
627 	if (mpflag == 0) {					\
628 		KERNEL_UNLOCK_ONE(NULL);			\
629 	}
630 
631 int
632 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
633 {
634 	const struct bdevsw *d;
635 	int rv, mpflag;
636 
637 	/*
638 	 * For open we need to lock, in order to synchronize
639 	 * with attach/detach.
640 	 */
641 	mutex_enter(&device_lock);
642 	d = bdevsw_lookup(dev);
643 	mutex_exit(&device_lock);
644 	if (d == NULL)
645 		return ENXIO;
646 
647 	DEV_LOCK(d);
648 	rv = (*d->d_open)(dev, flag, devtype, l);
649 	DEV_UNLOCK(d);
650 
651 	return rv;
652 }
653 
654 int
655 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
656 {
657 	const struct bdevsw *d;
658 	int rv, mpflag;
659 
660 	if ((d = bdevsw_lookup(dev)) == NULL)
661 		return ENXIO;
662 
663 	DEV_LOCK(d);
664 	rv = (*d->d_close)(dev, flag, devtype, l);
665 	DEV_UNLOCK(d);
666 
667 	return rv;
668 }
669 
670 void
671 bdev_strategy(struct buf *bp)
672 {
673 	const struct bdevsw *d;
674 	int mpflag;
675 
676 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
677 		panic("bdev_strategy");
678 
679 	DEV_LOCK(d);
680 	(*d->d_strategy)(bp);
681 	DEV_UNLOCK(d);
682 }
683 
684 int
685 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
686 {
687 	const struct bdevsw *d;
688 	int rv, mpflag;
689 
690 	if ((d = bdevsw_lookup(dev)) == NULL)
691 		return ENXIO;
692 
693 	DEV_LOCK(d);
694 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
695 	DEV_UNLOCK(d);
696 
697 	return rv;
698 }
699 
700 int
701 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
702 {
703 	const struct bdevsw *d;
704 	int rv;
705 
706 	/*
707 	 * Dump can be called without the device open.  Since it can
708 	 * currently only be called with the system paused (and in a
709 	 * potentially unstable state), we don't perform any locking.
710 	 */
711 	if ((d = bdevsw_lookup(dev)) == NULL)
712 		return ENXIO;
713 
714 	/* DEV_LOCK(d); */
715 	rv = (*d->d_dump)(dev, addr, data, sz);
716 	/* DEV_UNLOCK(d); */
717 
718 	return rv;
719 }
720 
721 int
722 bdev_type(dev_t dev)
723 {
724 	const struct bdevsw *d;
725 
726 	if ((d = bdevsw_lookup(dev)) == NULL)
727 		return D_OTHER;
728 	return d->d_flag & D_TYPEMASK;
729 }
730 
731 int
732 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
733 {
734 	const struct cdevsw *d;
735 	int rv, mpflag;
736 
737 	/*
738 	 * For open we need to lock, in order to synchronize
739 	 * with attach/detach.
740 	 */
741 	mutex_enter(&device_lock);
742 	d = cdevsw_lookup(dev);
743 	mutex_exit(&device_lock);
744 	if (d == NULL)
745 		return ENXIO;
746 
747 	DEV_LOCK(d);
748 	rv = (*d->d_open)(dev, flag, devtype, l);
749 	DEV_UNLOCK(d);
750 
751 	return rv;
752 }
753 
754 int
755 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
756 {
757 	const struct cdevsw *d;
758 	int rv, mpflag;
759 
760 	if ((d = cdevsw_lookup(dev)) == NULL)
761 		return ENXIO;
762 
763 	DEV_LOCK(d);
764 	rv = (*d->d_close)(dev, flag, devtype, l);
765 	DEV_UNLOCK(d);
766 
767 	return rv;
768 }
769 
770 int
771 cdev_read(dev_t dev, struct uio *uio, int flag)
772 {
773 	const struct cdevsw *d;
774 	int rv, mpflag;
775 
776 	if ((d = cdevsw_lookup(dev)) == NULL)
777 		return ENXIO;
778 
779 	DEV_LOCK(d);
780 	rv = (*d->d_read)(dev, uio, flag);
781 	DEV_UNLOCK(d);
782 
783 	return rv;
784 }
785 
786 int
787 cdev_write(dev_t dev, struct uio *uio, int flag)
788 {
789 	const struct cdevsw *d;
790 	int rv, mpflag;
791 
792 	if ((d = cdevsw_lookup(dev)) == NULL)
793 		return ENXIO;
794 
795 	DEV_LOCK(d);
796 	rv = (*d->d_write)(dev, uio, flag);
797 	DEV_UNLOCK(d);
798 
799 	return rv;
800 }
801 
802 int
803 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
804 {
805 	const struct cdevsw *d;
806 	int rv, mpflag;
807 
808 	if ((d = cdevsw_lookup(dev)) == NULL)
809 		return ENXIO;
810 
811 	DEV_LOCK(d);
812 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
813 	DEV_UNLOCK(d);
814 
815 	return rv;
816 }
817 
818 void
819 cdev_stop(struct tty *tp, int flag)
820 {
821 	const struct cdevsw *d;
822 	int mpflag;
823 
824 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
825 		return;
826 
827 	DEV_LOCK(d);
828 	(*d->d_stop)(tp, flag);
829 	DEV_UNLOCK(d);
830 }
831 
832 struct tty *
833 cdev_tty(dev_t dev)
834 {
835 	const struct cdevsw *d;
836 
837 	if ((d = cdevsw_lookup(dev)) == NULL)
838 		return NULL;
839 
840 	/* XXX Check if necessary. */
841 	if (d->d_tty == NULL)
842 		return NULL;
843 
844 	return (*d->d_tty)(dev);
845 }
846 
847 int
848 cdev_poll(dev_t dev, int flag, lwp_t *l)
849 {
850 	const struct cdevsw *d;
851 	int rv, mpflag;
852 
853 	if ((d = cdevsw_lookup(dev)) == NULL)
854 		return POLLERR;
855 
856 	DEV_LOCK(d);
857 	rv = (*d->d_poll)(dev, flag, l);
858 	DEV_UNLOCK(d);
859 
860 	return rv;
861 }
862 
863 paddr_t
864 cdev_mmap(dev_t dev, off_t off, int flag)
865 {
866 	const struct cdevsw *d;
867 	paddr_t rv;
868 	int mpflag;
869 
870 	if ((d = cdevsw_lookup(dev)) == NULL)
871 		return (paddr_t)-1LL;
872 
873 	DEV_LOCK(d);
874 	rv = (*d->d_mmap)(dev, off, flag);
875 	DEV_UNLOCK(d);
876 
877 	return rv;
878 }
879 
880 int
881 cdev_kqfilter(dev_t dev, struct knote *kn)
882 {
883 	const struct cdevsw *d;
884 	int rv, mpflag;
885 
886 	if ((d = cdevsw_lookup(dev)) == NULL)
887 		return ENXIO;
888 
889 	DEV_LOCK(d);
890 	rv = (*d->d_kqfilter)(dev, kn);
891 	DEV_UNLOCK(d);
892 
893 	return rv;
894 }
895 
896 int
897 cdev_type(dev_t dev)
898 {
899 	const struct cdevsw *d;
900 
901 	if ((d = cdevsw_lookup(dev)) == NULL)
902 		return D_OTHER;
903 	return d->d_flag & D_TYPEMASK;
904 }
905