xref: /netbsd-src/sys/kern/subr_devsw.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: subr_devsw.c,v 1.14 2007/11/20 14:47:14 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Overview
41  *
42  *	subr_devsw.c: registers device drivers by name and by major
43  *	number, and provides wrapper methods for performing I/O and
44  *	other tasks on device drivers, keying on the device number
45  *	(dev_t).
46  *
47  *	When the system is built, the config(8) command generates
48  *	static tables of device drivers built into the kernel image
49  *	along with their associated methods.  These are recorded in
50  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
51  *	and removed from the system dynamically.
52  *
53  * Allocation
54  *
55  *	When the system initially boots only the statically allocated
56  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
57  *	allocation, we allocate a fixed block of memory to hold the new,
58  *	expanded index.  This "fork" of the table is only ever performed
59  *	once in order to guarantee that other threads may safely access
60  *	the device tables:
61  *
62  *	o Once a thread has a "reference" to the table via an earlier
63  *	  open() call, we know that the entry in the table must exist
64  *	  and so it is safe to access it.
65  *
66  *	o Regardless of whether other threads see the old or new
67  *	  pointers, they will point to a correct device switch
68  *	  structure for the operation being performed.
69  *
70  *	XXX Currently, the wrapper methods such as cdev_read() verify
71  *	that a device driver does in fact exist before calling the
72  *	associated driver method.  This should be changed so that
73  *	once the device is has been referenced by a vnode (opened),
74  *	calling	the other methods should be valid until that reference
75  *	is dropped.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.14 2007/11/20 14:47:14 pooka Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/conf.h>
83 #include <sys/kmem.h>
84 #include <sys/systm.h>
85 #include <sys/poll.h>
86 #include <sys/tty.h>
87 #include <sys/buf.h>
88 
89 #ifdef DEVSW_DEBUG
90 #define	DPRINTF(x)	printf x
91 #else /* DEVSW_DEBUG */
92 #define	DPRINTF(x)
93 #endif /* DEVSW_DEBUG */
94 
95 #define	MAXDEVSW	512	/* the maximum of major device number */
96 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
97 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
98 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
99 
100 extern const struct bdevsw **bdevsw, *bdevsw0[];
101 extern const struct cdevsw **cdevsw, *cdevsw0[];
102 extern struct devsw_conv *devsw_conv, devsw_conv0[];
103 extern const int sys_bdevsws, sys_cdevsws;
104 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
105 
106 static int bdevsw_attach(const struct bdevsw *, int *);
107 static int cdevsw_attach(const struct cdevsw *, int *);
108 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
109 
110 kmutex_t devsw_lock;
111 
112 void
113 devsw_init(void)
114 {
115 
116 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
117 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
118 
119 	mutex_init(&devsw_lock, MUTEX_DEFAULT, IPL_NONE);
120 }
121 
122 int
123 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
124 	     const struct cdevsw *cdev, int *cmajor)
125 {
126 	struct devsw_conv *conv;
127 	char *name;
128 	int error, i;
129 
130 	if (devname == NULL || cdev == NULL)
131 		return (EINVAL);
132 
133 	mutex_enter(&devsw_lock);
134 
135 	for (i = 0 ; i < max_devsw_convs ; i++) {
136 		conv = &devsw_conv[i];
137 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
138 			continue;
139 
140 		if (*bmajor < 0)
141 			*bmajor = conv->d_bmajor;
142 		if (*cmajor < 0)
143 			*cmajor = conv->d_cmajor;
144 
145 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
146 			error = EINVAL;
147 			goto fail;
148 		}
149 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
150 			error = EINVAL;
151 			goto fail;
152 		}
153 
154 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
155 		    cdevsw[*cmajor] != NULL) {
156 			error = EEXIST;
157 			goto fail;
158 		}
159 
160 		if (bdev != NULL)
161 			bdevsw[*bmajor] = bdev;
162 		cdevsw[*cmajor] = cdev;
163 
164 		mutex_exit(&devsw_lock);
165 		return (0);
166 	}
167 
168 	error = bdevsw_attach(bdev, bmajor);
169 	if (error != 0)
170 		goto fail;
171 	error = cdevsw_attach(cdev, cmajor);
172 	if (error != 0) {
173 		devsw_detach_locked(bdev, NULL);
174 		goto fail;
175 	}
176 
177 	for (i = 0 ; i < max_devsw_convs ; i++) {
178 		if (devsw_conv[i].d_name == NULL)
179 			break;
180 	}
181 	if (i == max_devsw_convs) {
182 		struct devsw_conv *newptr;
183 		int old, new;
184 
185 		old = max_devsw_convs;
186 		new = old + 1;
187 
188 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
189 		if (newptr == NULL) {
190 			devsw_detach_locked(bdev, cdev);
191 			error = ENOMEM;
192 			goto fail;
193 		}
194 		newptr[old].d_name = NULL;
195 		newptr[old].d_bmajor = -1;
196 		newptr[old].d_cmajor = -1;
197 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
198 		if (devsw_conv != devsw_conv0)
199 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
200 		devsw_conv = newptr;
201 		max_devsw_convs = new;
202 	}
203 
204 	i = strlen(devname) + 1;
205 	name = kmem_alloc(i, KM_NOSLEEP);
206 	if (name == NULL) {
207 		devsw_detach_locked(bdev, cdev);
208 		goto fail;
209 	}
210 	strlcpy(name, devname, i);
211 
212 	devsw_conv[i].d_name = name;
213 	devsw_conv[i].d_bmajor = *bmajor;
214 	devsw_conv[i].d_cmajor = *cmajor;
215 
216 	mutex_exit(&devsw_lock);
217 	return (0);
218  fail:
219 	mutex_exit(&devsw_lock);
220 	return (error);
221 }
222 
223 static int
224 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
225 {
226 	const struct bdevsw **newptr;
227 	int bmajor, i;
228 
229 	KASSERT(mutex_owned(&devsw_lock));
230 
231 	if (devsw == NULL)
232 		return (0);
233 
234 	if (*devmajor < 0) {
235 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
236 			if (bdevsw[bmajor] != NULL)
237 				continue;
238 			for (i = 0 ; i < max_devsw_convs ; i++) {
239 				if (devsw_conv[i].d_bmajor == bmajor)
240 					break;
241 			}
242 			if (i != max_devsw_convs)
243 				continue;
244 			break;
245 		}
246 		*devmajor = bmajor;
247 	}
248 
249 	if (*devmajor >= MAXDEVSW) {
250 		printf("bdevsw_attach: block majors exhausted");
251 		return (ENOMEM);
252 	}
253 
254 	if (*devmajor >= max_bdevsws) {
255 		KASSERT(bdevsw == bdevsw0);
256 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
257 		if (newptr == NULL)
258 			return (ENOMEM);
259 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
260 		bdevsw = newptr;
261 		max_bdevsws = MAXDEVSW;
262 	}
263 
264 	if (bdevsw[*devmajor] != NULL)
265 		return (EEXIST);
266 
267 	bdevsw[*devmajor] = devsw;
268 
269 	return (0);
270 }
271 
272 static int
273 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
274 {
275 	const struct cdevsw **newptr;
276 	int cmajor, i;
277 
278 	KASSERT(mutex_owned(&devsw_lock));
279 
280 	if (*devmajor < 0) {
281 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
282 			if (cdevsw[cmajor] != NULL)
283 				continue;
284 			for (i = 0 ; i < max_devsw_convs ; i++) {
285 				if (devsw_conv[i].d_cmajor == cmajor)
286 					break;
287 			}
288 			if (i != max_devsw_convs)
289 				continue;
290 			break;
291 		}
292 		*devmajor = cmajor;
293 	}
294 
295 	if (*devmajor >= MAXDEVSW) {
296 		printf("cdevsw_attach: character majors exhausted");
297 		return (ENOMEM);
298 	}
299 
300 	if (*devmajor >= max_cdevsws) {
301 		KASSERT(cdevsw == cdevsw0);
302 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
303 		if (newptr == NULL)
304 			return (ENOMEM);
305 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
306 		cdevsw = newptr;
307 		max_cdevsws = MAXDEVSW;
308 	}
309 
310 	if (cdevsw[*devmajor] != NULL)
311 		return (EEXIST);
312 
313 	cdevsw[*devmajor] = devsw;
314 
315 	return (0);
316 }
317 
318 static void
319 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
320 {
321 	int i;
322 
323 	KASSERT(mutex_owned(&devsw_lock));
324 
325 	if (bdev != NULL) {
326 		for (i = 0 ; i < max_bdevsws ; i++) {
327 			if (bdevsw[i] != bdev)
328 				continue;
329 			bdevsw[i] = NULL;
330 			break;
331 		}
332 	}
333 	if (cdev != NULL) {
334 		for (i = 0 ; i < max_cdevsws ; i++) {
335 			if (cdevsw[i] != cdev)
336 				continue;
337 			cdevsw[i] = NULL;
338 			break;
339 		}
340 	}
341 }
342 
343 void
344 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
345 {
346 
347 	mutex_enter(&devsw_lock);
348 	devsw_detach_locked(bdev, cdev);
349 	mutex_exit(&devsw_lock);
350 }
351 
352 /*
353  * Look up a block device by number.
354  *
355  * => Caller must ensure that the device is attached.
356  */
357 const struct bdevsw *
358 bdevsw_lookup(dev_t dev)
359 {
360 	int bmajor;
361 
362 	if (dev == NODEV)
363 		return (NULL);
364 	bmajor = major(dev);
365 	if (bmajor < 0 || bmajor >= max_bdevsws)
366 		return (NULL);
367 
368 	return (bdevsw[bmajor]);
369 }
370 
371 /*
372  * Look up a character device by number.
373  *
374  * => Caller must ensure that the device is attached.
375  */
376 const struct cdevsw *
377 cdevsw_lookup(dev_t dev)
378 {
379 	int cmajor;
380 
381 	if (dev == NODEV)
382 		return (NULL);
383 	cmajor = major(dev);
384 	if (cmajor < 0 || cmajor >= max_cdevsws)
385 		return (NULL);
386 
387 	return (cdevsw[cmajor]);
388 }
389 
390 /*
391  * Look up a block device by reference to its operations set.
392  *
393  * => Caller must ensure that the device is not detached, and therefore
394  *    that the returned major is still valid when dereferenced.
395  */
396 int
397 bdevsw_lookup_major(const struct bdevsw *bdev)
398 {
399 	int bmajor;
400 
401 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
402 		if (bdevsw[bmajor] == bdev)
403 			return (bmajor);
404 	}
405 
406 	return (-1);
407 }
408 
409 /*
410  * Look up a character device by reference to its operations set.
411  *
412  * => Caller must ensure that the device is not detached, and therefore
413  *    that the returned major is still valid when dereferenced.
414  */
415 int
416 cdevsw_lookup_major(const struct cdevsw *cdev)
417 {
418 	int cmajor;
419 
420 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
421 		if (cdevsw[cmajor] == cdev)
422 			return (cmajor);
423 	}
424 
425 	return (-1);
426 }
427 
428 /*
429  * Convert from block major number to name.
430  *
431  * => Caller must ensure that the device is not detached, and therefore
432  *    that the name pointer is still valid when dereferenced.
433  */
434 const char *
435 devsw_blk2name(int bmajor)
436 {
437 	const char *name;
438 	int cmajor, i;
439 
440 	name = NULL;
441 	cmajor = -1;
442 
443 	mutex_enter(&devsw_lock);
444 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
445 		mutex_exit(&devsw_lock);
446 		return (NULL);
447 	}
448 	for (i = 0 ; i < max_devsw_convs; i++) {
449 		if (devsw_conv[i].d_bmajor == bmajor) {
450 			cmajor = devsw_conv[i].d_cmajor;
451 			break;
452 		}
453 	}
454 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
455 		name = devsw_conv[i].d_name;
456 	mutex_exit(&devsw_lock);
457 
458 	return (name);
459 }
460 
461 /*
462  * Convert from device name to block major number.
463  *
464  * => Caller must ensure that the device is not detached, and therefore
465  *    that the major number is still valid when dereferenced.
466  */
467 int
468 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
469 {
470 	struct devsw_conv *conv;
471 	int bmajor, i;
472 
473 	if (name == NULL)
474 		return (-1);
475 
476 	mutex_enter(&devsw_lock);
477 	for (i = 0 ; i < max_devsw_convs ; i++) {
478 		size_t len;
479 
480 		conv = &devsw_conv[i];
481 		if (conv->d_name == NULL)
482 			continue;
483 		len = strlen(conv->d_name);
484 		if (strncmp(conv->d_name, name, len) != 0)
485 			continue;
486 		if (*(name +len) && !isdigit(*(name + len)))
487 			continue;
488 		bmajor = conv->d_bmajor;
489 		if (bmajor < 0 || bmajor >= max_bdevsws ||
490 		    bdevsw[bmajor] == NULL)
491 			break;
492 		if (devname != NULL) {
493 #ifdef DEVSW_DEBUG
494 			if (strlen(conv->d_name) >= devnamelen)
495 				printf("devsw_name2blk: too short buffer");
496 #endif /* DEVSW_DEBUG */
497 			strncpy(devname, conv->d_name, devnamelen);
498 			devname[devnamelen - 1] = '\0';
499 		}
500 		mutex_exit(&devsw_lock);
501 		return (bmajor);
502 	}
503 
504 	mutex_exit(&devsw_lock);
505 	return (-1);
506 }
507 
508 /*
509  * Convert from character dev_t to block dev_t.
510  *
511  * => Caller must ensure that the device is not detached, and therefore
512  *    that the major number is still valid when dereferenced.
513  */
514 dev_t
515 devsw_chr2blk(dev_t cdev)
516 {
517 	int bmajor, cmajor, i;
518 	dev_t rv;
519 
520 	cmajor = major(cdev);
521 	bmajor = -1;
522 	rv = NODEV;
523 
524 	mutex_enter(&devsw_lock);
525 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
526 		mutex_exit(&devsw_lock);
527 		return (NODEV);
528 	}
529 	for (i = 0 ; i < max_devsw_convs ; i++) {
530 		if (devsw_conv[i].d_cmajor == cmajor) {
531 			bmajor = devsw_conv[i].d_bmajor;
532 			break;
533 		}
534 	}
535 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
536 		rv = makedev(bmajor, minor(cdev));
537 	mutex_exit(&devsw_lock);
538 
539 	return (rv);
540 }
541 
542 /*
543  * Convert from block dev_t to character dev_t.
544  *
545  * => Caller must ensure that the device is not detached, and therefore
546  *    that the major number is still valid when dereferenced.
547  */
548 dev_t
549 devsw_blk2chr(dev_t bdev)
550 {
551 	int bmajor, cmajor, i;
552 	dev_t rv;
553 
554 	bmajor = major(bdev);
555 	cmajor = -1;
556 	rv = NODEV;
557 
558 	mutex_enter(&devsw_lock);
559 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
560 		mutex_exit(&devsw_lock);
561 		return (NODEV);
562 	}
563 	for (i = 0 ; i < max_devsw_convs ; i++) {
564 		if (devsw_conv[i].d_bmajor == bmajor) {
565 			cmajor = devsw_conv[i].d_cmajor;
566 			break;
567 		}
568 	}
569 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
570 		rv = makedev(cmajor, minor(bdev));
571 	mutex_exit(&devsw_lock);
572 
573 	return (rv);
574 }
575 
576 /*
577  * Device access methods.
578  */
579 
580 #define	DEV_LOCK(d)						\
581 	if ((d->d_flag & D_MPSAFE) == 0) {			\
582 		KERNEL_LOCK(1, curlwp);				\
583 	}
584 
585 #define	DEV_UNLOCK(d)						\
586 	if ((d->d_flag & D_MPSAFE) == 0) {			\
587 		KERNEL_UNLOCK_ONE(curlwp);			\
588 	}
589 
590 int
591 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
592 {
593 	const struct bdevsw *d;
594 	int rv;
595 
596 	/*
597 	 * For open we need to lock, in order to synchronize
598 	 * with attach/detach.
599 	 */
600 	mutex_enter(&devsw_lock);
601 	d = bdevsw_lookup(dev);
602 	mutex_exit(&devsw_lock);
603 	if (d == NULL)
604 		return ENXIO;
605 
606 	DEV_LOCK(d);
607 	rv = (*d->d_open)(dev, flag, devtype, l);
608 	DEV_UNLOCK(d);
609 
610 	return rv;
611 }
612 
613 int
614 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
615 {
616 	const struct bdevsw *d;
617 	int rv;
618 
619 	if ((d = bdevsw_lookup(dev)) == NULL)
620 		return ENXIO;
621 
622 	DEV_LOCK(d);
623 	rv = (*d->d_close)(dev, flag, devtype, l);
624 	DEV_UNLOCK(d);
625 
626 	return rv;
627 }
628 
629 void
630 bdev_strategy(struct buf *bp)
631 {
632 	const struct bdevsw *d;
633 
634 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
635 		panic("bdev_strategy");
636 
637 	DEV_LOCK(d);
638 	(*d->d_strategy)(bp);
639 	DEV_UNLOCK(d);
640 }
641 
642 int
643 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
644 {
645 	const struct bdevsw *d;
646 	int rv;
647 
648 	if ((d = bdevsw_lookup(dev)) == NULL)
649 		return ENXIO;
650 
651 	DEV_LOCK(d);
652 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
653 	DEV_UNLOCK(d);
654 
655 	return rv;
656 }
657 
658 int
659 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
660 {
661 	const struct bdevsw *d;
662 	int rv;
663 
664 	/*
665 	 * Dump can be called without the device open.  Since it can
666 	 * currently only be called with the system paused (and in a
667 	 * potentially unstable state), we don't perform any locking.
668 	 */
669 	if ((d = bdevsw_lookup(dev)) == NULL)
670 		return ENXIO;
671 
672 	/* DEV_LOCK(d); */
673 	rv = (*d->d_dump)(dev, addr, data, sz);
674 	/* DEV_UNLOCK(d); */
675 
676 	return rv;
677 }
678 
679 int
680 bdev_type(dev_t dev)
681 {
682 	const struct bdevsw *d;
683 
684 	if ((d = bdevsw_lookup(dev)) == NULL)
685 		return D_OTHER;
686 	return d->d_flag & D_TYPEMASK;
687 }
688 
689 int
690 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
691 {
692 	const struct cdevsw *d;
693 	int rv;
694 
695 	/*
696 	 * For open we need to lock, in order to synchronize
697 	 * with attach/detach.
698 	 */
699 	mutex_enter(&devsw_lock);
700 	d = cdevsw_lookup(dev);
701 	mutex_exit(&devsw_lock);
702 	if (d == NULL)
703 		return ENXIO;
704 
705 	DEV_LOCK(d);
706 	rv = (*d->d_open)(dev, flag, devtype, l);
707 	DEV_UNLOCK(d);
708 
709 	return rv;
710 }
711 
712 int
713 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
714 {
715 	const struct cdevsw *d;
716 	int rv;
717 
718 	if ((d = cdevsw_lookup(dev)) == NULL)
719 		return ENXIO;
720 
721 	DEV_LOCK(d);
722 	rv = (*d->d_close)(dev, flag, devtype, l);
723 	DEV_UNLOCK(d);
724 
725 	return rv;
726 }
727 
728 int
729 cdev_read(dev_t dev, struct uio *uio, int flag)
730 {
731 	const struct cdevsw *d;
732 	int rv;
733 
734 	if ((d = cdevsw_lookup(dev)) == NULL)
735 		return ENXIO;
736 
737 	DEV_LOCK(d);
738 	rv = (*d->d_read)(dev, uio, flag);
739 	DEV_UNLOCK(d);
740 
741 	return rv;
742 }
743 
744 int
745 cdev_write(dev_t dev, struct uio *uio, int flag)
746 {
747 	const struct cdevsw *d;
748 	int rv;
749 
750 	if ((d = cdevsw_lookup(dev)) == NULL)
751 		return ENXIO;
752 
753 	DEV_LOCK(d);
754 	rv = (*d->d_write)(dev, uio, flag);
755 	DEV_UNLOCK(d);
756 
757 	return rv;
758 }
759 
760 int
761 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
762 {
763 	const struct cdevsw *d;
764 	int rv;
765 
766 	if ((d = cdevsw_lookup(dev)) == NULL)
767 		return ENXIO;
768 
769 	DEV_LOCK(d);
770 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
771 	DEV_UNLOCK(d);
772 
773 	return rv;
774 }
775 
776 void
777 cdev_stop(struct tty *tp, int flag)
778 {
779 	const struct cdevsw *d;
780 
781 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
782 		return;
783 
784 	DEV_LOCK(d);
785 	(*d->d_stop)(tp, flag);
786 	DEV_UNLOCK(d);
787 }
788 
789 struct tty *
790 cdev_tty(dev_t dev)
791 {
792 	const struct cdevsw *d;
793 	struct tty * rv;
794 
795 	if ((d = cdevsw_lookup(dev)) == NULL)
796 		return NULL;
797 
798 	/* XXX Check if necessary. */
799 	if (d->d_tty == NULL)
800 		return NULL;
801 
802 	DEV_LOCK(d);
803 	rv = (*d->d_tty)(dev);
804 	DEV_UNLOCK(d);
805 
806 	return rv;
807 }
808 
809 int
810 cdev_poll(dev_t dev, int flag, lwp_t *l)
811 {
812 	const struct cdevsw *d;
813 	int rv;
814 
815 	if ((d = cdevsw_lookup(dev)) == NULL)
816 		return POLLERR;
817 
818 	DEV_LOCK(d);
819 	rv = (*d->d_poll)(dev, flag, l);
820 	DEV_UNLOCK(d);
821 
822 	return rv;
823 }
824 
825 paddr_t
826 cdev_mmap(dev_t dev, off_t off, int flag)
827 {
828 	const struct cdevsw *d;
829 	paddr_t rv;
830 
831 	if ((d = cdevsw_lookup(dev)) == NULL)
832 		return (paddr_t)-1LL;
833 
834 	DEV_LOCK(d);
835 	rv = (*d->d_mmap)(dev, off, flag);
836 	DEV_UNLOCK(d);
837 
838 	return rv;
839 }
840 
841 int
842 cdev_kqfilter(dev_t dev, struct knote *kn)
843 {
844 	const struct cdevsw *d;
845 	int rv;
846 
847 	if ((d = cdevsw_lookup(dev)) == NULL)
848 		return ENXIO;
849 
850 	DEV_LOCK(d);
851 	rv = (*d->d_kqfilter)(dev, kn);
852 	DEV_UNLOCK(d);
853 
854 	return rv;
855 }
856 
857 int
858 cdev_type(dev_t dev)
859 {
860 	const struct cdevsw *d;
861 
862 	if ((d = cdevsw_lookup(dev)) == NULL)
863 		return D_OTHER;
864 	return d->d_flag & D_TYPEMASK;
865 }
866