xref: /netbsd-src/sys/kern/subr_devsw.c (revision a536ee5124e62c9a0051a252f7833dc8f50f44c9)
1 /*	$NetBSD: subr_devsw.c,v 1.30 2012/02/18 06:29:10 mrg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	subr_devsw.c: registers device drivers by name and by major
36  *	number, and provides wrapper methods for performing I/O and
37  *	other tasks on device drivers, keying on the device number
38  *	(dev_t).
39  *
40  *	When the system is built, the config(8) command generates
41  *	static tables of device drivers built into the kernel image
42  *	along with their associated methods.  These are recorded in
43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
44  *	and removed from the system dynamically.
45  *
46  * Allocation
47  *
48  *	When the system initially boots only the statically allocated
49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
50  *	allocation, we allocate a fixed block of memory to hold the new,
51  *	expanded index.  This "fork" of the table is only ever performed
52  *	once in order to guarantee that other threads may safely access
53  *	the device tables:
54  *
55  *	o Once a thread has a "reference" to the table via an earlier
56  *	  open() call, we know that the entry in the table must exist
57  *	  and so it is safe to access it.
58  *
59  *	o Regardless of whether other threads see the old or new
60  *	  pointers, they will point to a correct device switch
61  *	  structure for the operation being performed.
62  *
63  *	XXX Currently, the wrapper methods such as cdev_read() verify
64  *	that a device driver does in fact exist before calling the
65  *	associated driver method.  This should be changed so that
66  *	once the device is has been referenced by a vnode (opened),
67  *	calling	the other methods should be valid until that reference
68  *	is dropped.
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.30 2012/02/18 06:29:10 mrg Exp $");
73 
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82 #include <sys/reboot.h>
83 
84 #ifdef DEVSW_DEBUG
85 #define	DPRINTF(x)	printf x
86 #else /* DEVSW_DEBUG */
87 #define	DPRINTF(x)
88 #endif /* DEVSW_DEBUG */
89 
90 #define	MAXDEVSW	512	/* the maximum of major device number */
91 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
92 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
93 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
94 
95 extern const struct bdevsw **bdevsw, *bdevsw0[];
96 extern const struct cdevsw **cdevsw, *cdevsw0[];
97 extern struct devsw_conv *devsw_conv, devsw_conv0[];
98 extern const int sys_bdevsws, sys_cdevsws;
99 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
100 
101 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
102 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
103 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
104 
105 kmutex_t device_lock;
106 
107 void
108 devsw_init(void)
109 {
110 
111 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
112 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
113 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
114 }
115 
116 int
117 devsw_attach(const char *devname,
118 	     const struct bdevsw *bdev, devmajor_t *bmajor,
119 	     const struct cdevsw *cdev, devmajor_t *cmajor)
120 {
121 	struct devsw_conv *conv;
122 	char *name;
123 	int error, i;
124 	size_t len;
125 
126 	if (devname == NULL || cdev == NULL)
127 		return (EINVAL);
128 
129 	mutex_enter(&device_lock);
130 
131 	for (i = 0 ; i < max_devsw_convs ; i++) {
132 		conv = &devsw_conv[i];
133 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
134 			continue;
135 
136 		if (*bmajor < 0)
137 			*bmajor = conv->d_bmajor;
138 		if (*cmajor < 0)
139 			*cmajor = conv->d_cmajor;
140 
141 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
142 			error = EINVAL;
143 			goto fail;
144 		}
145 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
146 			error = EINVAL;
147 			goto fail;
148 		}
149 
150 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
151 		    cdevsw[*cmajor] != NULL) {
152 			error = EEXIST;
153 			goto fail;
154 		}
155 
156 		if (bdev != NULL)
157 			bdevsw[*bmajor] = bdev;
158 		cdevsw[*cmajor] = cdev;
159 
160 		mutex_exit(&device_lock);
161 		return (0);
162 	}
163 
164 	error = bdevsw_attach(bdev, bmajor);
165 	if (error != 0)
166 		goto fail;
167 	error = cdevsw_attach(cdev, cmajor);
168 	if (error != 0) {
169 		devsw_detach_locked(bdev, NULL);
170 		goto fail;
171 	}
172 
173 	for (i = 0 ; i < max_devsw_convs ; i++) {
174 		if (devsw_conv[i].d_name == NULL)
175 			break;
176 	}
177 	if (i == max_devsw_convs) {
178 		struct devsw_conv *newptr;
179 		int old, new;
180 
181 		old = max_devsw_convs;
182 		new = old + 1;
183 
184 		newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
185 		if (newptr == NULL) {
186 			devsw_detach_locked(bdev, cdev);
187 			error = ENOMEM;
188 			goto fail;
189 		}
190 		newptr[old].d_name = NULL;
191 		newptr[old].d_bmajor = -1;
192 		newptr[old].d_cmajor = -1;
193 		memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
194 		if (devsw_conv != devsw_conv0)
195 			kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
196 		devsw_conv = newptr;
197 		max_devsw_convs = new;
198 	}
199 
200 	len = strlen(devname) + 1;
201 	name = kmem_alloc(len, KM_NOSLEEP);
202 	if (name == NULL) {
203 		devsw_detach_locked(bdev, cdev);
204 		error = ENOMEM;
205 		goto fail;
206 	}
207 	strlcpy(name, devname, len);
208 
209 	devsw_conv[i].d_name = name;
210 	devsw_conv[i].d_bmajor = *bmajor;
211 	devsw_conv[i].d_cmajor = *cmajor;
212 
213 	mutex_exit(&device_lock);
214 	return (0);
215  fail:
216 	mutex_exit(&device_lock);
217 	return (error);
218 }
219 
220 static int
221 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
222 {
223 	const struct bdevsw **newptr;
224 	devmajor_t bmajor;
225 	int i;
226 
227 	KASSERT(mutex_owned(&device_lock));
228 
229 	if (devsw == NULL)
230 		return (0);
231 
232 	if (*devmajor < 0) {
233 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
234 			if (bdevsw[bmajor] != NULL)
235 				continue;
236 			for (i = 0 ; i < max_devsw_convs ; i++) {
237 				if (devsw_conv[i].d_bmajor == bmajor)
238 					break;
239 			}
240 			if (i != max_devsw_convs)
241 				continue;
242 			break;
243 		}
244 		*devmajor = bmajor;
245 	}
246 
247 	if (*devmajor >= MAXDEVSW) {
248 		printf("bdevsw_attach: block majors exhausted");
249 		return (ENOMEM);
250 	}
251 
252 	if (*devmajor >= max_bdevsws) {
253 		KASSERT(bdevsw == bdevsw0);
254 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
255 		if (newptr == NULL)
256 			return (ENOMEM);
257 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
258 		bdevsw = newptr;
259 		max_bdevsws = MAXDEVSW;
260 	}
261 
262 	if (bdevsw[*devmajor] != NULL)
263 		return (EEXIST);
264 
265 	bdevsw[*devmajor] = devsw;
266 
267 	return (0);
268 }
269 
270 static int
271 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
272 {
273 	const struct cdevsw **newptr;
274 	devmajor_t cmajor;
275 	int i;
276 
277 	KASSERT(mutex_owned(&device_lock));
278 
279 	if (*devmajor < 0) {
280 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
281 			if (cdevsw[cmajor] != NULL)
282 				continue;
283 			for (i = 0 ; i < max_devsw_convs ; i++) {
284 				if (devsw_conv[i].d_cmajor == cmajor)
285 					break;
286 			}
287 			if (i != max_devsw_convs)
288 				continue;
289 			break;
290 		}
291 		*devmajor = cmajor;
292 	}
293 
294 	if (*devmajor >= MAXDEVSW) {
295 		printf("cdevsw_attach: character majors exhausted");
296 		return (ENOMEM);
297 	}
298 
299 	if (*devmajor >= max_cdevsws) {
300 		KASSERT(cdevsw == cdevsw0);
301 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
302 		if (newptr == NULL)
303 			return (ENOMEM);
304 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
305 		cdevsw = newptr;
306 		max_cdevsws = MAXDEVSW;
307 	}
308 
309 	if (cdevsw[*devmajor] != NULL)
310 		return (EEXIST);
311 
312 	cdevsw[*devmajor] = devsw;
313 
314 	return (0);
315 }
316 
317 static void
318 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
319 {
320 	int i;
321 
322 	KASSERT(mutex_owned(&device_lock));
323 
324 	if (bdev != NULL) {
325 		for (i = 0 ; i < max_bdevsws ; i++) {
326 			if (bdevsw[i] != bdev)
327 				continue;
328 			bdevsw[i] = NULL;
329 			break;
330 		}
331 	}
332 	if (cdev != NULL) {
333 		for (i = 0 ; i < max_cdevsws ; i++) {
334 			if (cdevsw[i] != cdev)
335 				continue;
336 			cdevsw[i] = NULL;
337 			break;
338 		}
339 	}
340 }
341 
342 int
343 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
344 {
345 
346 	mutex_enter(&device_lock);
347 	devsw_detach_locked(bdev, cdev);
348 	mutex_exit(&device_lock);
349 	return 0;
350 }
351 
352 /*
353  * Look up a block device by number.
354  *
355  * => Caller must ensure that the device is attached.
356  */
357 const struct bdevsw *
358 bdevsw_lookup(dev_t dev)
359 {
360 	devmajor_t bmajor;
361 
362 	if (dev == NODEV)
363 		return (NULL);
364 	bmajor = major(dev);
365 	if (bmajor < 0 || bmajor >= max_bdevsws)
366 		return (NULL);
367 
368 	return (bdevsw[bmajor]);
369 }
370 
371 /*
372  * Look up a character device by number.
373  *
374  * => Caller must ensure that the device is attached.
375  */
376 const struct cdevsw *
377 cdevsw_lookup(dev_t dev)
378 {
379 	devmajor_t cmajor;
380 
381 	if (dev == NODEV)
382 		return (NULL);
383 	cmajor = major(dev);
384 	if (cmajor < 0 || cmajor >= max_cdevsws)
385 		return (NULL);
386 
387 	return (cdevsw[cmajor]);
388 }
389 
390 /*
391  * Look up a block device by reference to its operations set.
392  *
393  * => Caller must ensure that the device is not detached, and therefore
394  *    that the returned major is still valid when dereferenced.
395  */
396 devmajor_t
397 bdevsw_lookup_major(const struct bdevsw *bdev)
398 {
399 	devmajor_t bmajor;
400 
401 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
402 		if (bdevsw[bmajor] == bdev)
403 			return (bmajor);
404 	}
405 
406 	return (NODEVMAJOR);
407 }
408 
409 /*
410  * Look up a character device by reference to its operations set.
411  *
412  * => Caller must ensure that the device is not detached, and therefore
413  *    that the returned major is still valid when dereferenced.
414  */
415 devmajor_t
416 cdevsw_lookup_major(const struct cdevsw *cdev)
417 {
418 	devmajor_t cmajor;
419 
420 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
421 		if (cdevsw[cmajor] == cdev)
422 			return (cmajor);
423 	}
424 
425 	return (NODEVMAJOR);
426 }
427 
428 /*
429  * Convert from block major number to name.
430  *
431  * => Caller must ensure that the device is not detached, and therefore
432  *    that the name pointer is still valid when dereferenced.
433  */
434 const char *
435 devsw_blk2name(devmajor_t bmajor)
436 {
437 	const char *name;
438 	devmajor_t cmajor;
439 	int i;
440 
441 	name = NULL;
442 	cmajor = -1;
443 
444 	mutex_enter(&device_lock);
445 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
446 		mutex_exit(&device_lock);
447 		return (NULL);
448 	}
449 	for (i = 0 ; i < max_devsw_convs; i++) {
450 		if (devsw_conv[i].d_bmajor == bmajor) {
451 			cmajor = devsw_conv[i].d_cmajor;
452 			break;
453 		}
454 	}
455 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
456 		name = devsw_conv[i].d_name;
457 	mutex_exit(&device_lock);
458 
459 	return (name);
460 }
461 
462 /*
463  * Convert char major number to device driver name.
464  */
465 const char *
466 cdevsw_getname(devmajor_t major)
467 {
468 	const char *name;
469 	int i;
470 
471 	name = NULL;
472 
473 	if (major < 0)
474 		return (NULL);
475 
476 	mutex_enter(&device_lock);
477 	for (i = 0 ; i < max_devsw_convs; i++) {
478 		if (devsw_conv[i].d_cmajor == major) {
479 			name = devsw_conv[i].d_name;
480 			break;
481 		}
482 	}
483 	mutex_exit(&device_lock);
484 	return (name);
485 }
486 
487 /*
488  * Convert block major number to device driver name.
489  */
490 const char *
491 bdevsw_getname(devmajor_t major)
492 {
493 	const char *name;
494 	int i;
495 
496 	name = NULL;
497 
498 	if (major < 0)
499 		return (NULL);
500 
501 	mutex_enter(&device_lock);
502 	for (i = 0 ; i < max_devsw_convs; i++) {
503 		if (devsw_conv[i].d_bmajor == major) {
504 			name = devsw_conv[i].d_name;
505 			break;
506 		}
507 	}
508 	mutex_exit(&device_lock);
509 	return (name);
510 }
511 
512 /*
513  * Convert from device name to block major number.
514  *
515  * => Caller must ensure that the device is not detached, and therefore
516  *    that the major number is still valid when dereferenced.
517  */
518 devmajor_t
519 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
520 {
521 	struct devsw_conv *conv;
522 	devmajor_t bmajor;
523 	int i;
524 
525 	if (name == NULL)
526 		return (NODEVMAJOR);
527 
528 	mutex_enter(&device_lock);
529 	for (i = 0 ; i < max_devsw_convs ; i++) {
530 		size_t len;
531 
532 		conv = &devsw_conv[i];
533 		if (conv->d_name == NULL)
534 			continue;
535 		len = strlen(conv->d_name);
536 		if (strncmp(conv->d_name, name, len) != 0)
537 			continue;
538 		if (*(name +len) && !isdigit(*(name + len)))
539 			continue;
540 		bmajor = conv->d_bmajor;
541 		if (bmajor < 0 || bmajor >= max_bdevsws ||
542 		    bdevsw[bmajor] == NULL)
543 			break;
544 		if (devname != NULL) {
545 #ifdef DEVSW_DEBUG
546 			if (strlen(conv->d_name) >= devnamelen)
547 				printf("devsw_name2blk: too short buffer");
548 #endif /* DEVSW_DEBUG */
549 			strncpy(devname, conv->d_name, devnamelen);
550 			devname[devnamelen - 1] = '\0';
551 		}
552 		mutex_exit(&device_lock);
553 		return (bmajor);
554 	}
555 
556 	mutex_exit(&device_lock);
557 	return (NODEVMAJOR);
558 }
559 
560 /*
561  * Convert from device name to char major number.
562  *
563  * => Caller must ensure that the device is not detached, and therefore
564  *    that the major number is still valid when dereferenced.
565  */
566 devmajor_t
567 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
568 {
569 	struct devsw_conv *conv;
570 	devmajor_t cmajor;
571 	int i;
572 
573 	if (name == NULL)
574 		return (NODEVMAJOR);
575 
576 	mutex_enter(&device_lock);
577 	for (i = 0 ; i < max_devsw_convs ; i++) {
578 		size_t len;
579 
580 		conv = &devsw_conv[i];
581 		if (conv->d_name == NULL)
582 			continue;
583 		len = strlen(conv->d_name);
584 		if (strncmp(conv->d_name, name, len) != 0)
585 			continue;
586 		if (*(name +len) && !isdigit(*(name + len)))
587 			continue;
588 		cmajor = conv->d_cmajor;
589 		if (cmajor < 0 || cmajor >= max_cdevsws ||
590 		    cdevsw[cmajor] == NULL)
591 			break;
592 		if (devname != NULL) {
593 #ifdef DEVSW_DEBUG
594 			if (strlen(conv->d_name) >= devnamelen)
595 				printf("devsw_name2chr: too short buffer");
596 #endif /* DEVSW_DEBUG */
597 			strncpy(devname, conv->d_name, devnamelen);
598 			devname[devnamelen - 1] = '\0';
599 		}
600 		mutex_exit(&device_lock);
601 		return (cmajor);
602 	}
603 
604 	mutex_exit(&device_lock);
605 	return (NODEVMAJOR);
606 }
607 
608 /*
609  * Convert from character dev_t to block dev_t.
610  *
611  * => Caller must ensure that the device is not detached, and therefore
612  *    that the major number is still valid when dereferenced.
613  */
614 dev_t
615 devsw_chr2blk(dev_t cdev)
616 {
617 	devmajor_t bmajor, cmajor;
618 	int i;
619 	dev_t rv;
620 
621 	cmajor = major(cdev);
622 	bmajor = NODEVMAJOR;
623 	rv = NODEV;
624 
625 	mutex_enter(&device_lock);
626 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
627 		mutex_exit(&device_lock);
628 		return (NODEV);
629 	}
630 	for (i = 0 ; i < max_devsw_convs ; i++) {
631 		if (devsw_conv[i].d_cmajor == cmajor) {
632 			bmajor = devsw_conv[i].d_bmajor;
633 			break;
634 		}
635 	}
636 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
637 		rv = makedev(bmajor, minor(cdev));
638 	mutex_exit(&device_lock);
639 
640 	return (rv);
641 }
642 
643 /*
644  * Convert from block dev_t to character dev_t.
645  *
646  * => Caller must ensure that the device is not detached, and therefore
647  *    that the major number is still valid when dereferenced.
648  */
649 dev_t
650 devsw_blk2chr(dev_t bdev)
651 {
652 	devmajor_t bmajor, cmajor;
653 	int i;
654 	dev_t rv;
655 
656 	bmajor = major(bdev);
657 	cmajor = NODEVMAJOR;
658 	rv = NODEV;
659 
660 	mutex_enter(&device_lock);
661 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
662 		mutex_exit(&device_lock);
663 		return (NODEV);
664 	}
665 	for (i = 0 ; i < max_devsw_convs ; i++) {
666 		if (devsw_conv[i].d_bmajor == bmajor) {
667 			cmajor = devsw_conv[i].d_cmajor;
668 			break;
669 		}
670 	}
671 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
672 		rv = makedev(cmajor, minor(bdev));
673 	mutex_exit(&device_lock);
674 
675 	return (rv);
676 }
677 
678 /*
679  * Device access methods.
680  */
681 
682 #define	DEV_LOCK(d)						\
683 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
684 		KERNEL_LOCK(1, NULL);				\
685 	}
686 
687 #define	DEV_UNLOCK(d)						\
688 	if (mpflag == 0) {					\
689 		KERNEL_UNLOCK_ONE(NULL);			\
690 	}
691 
692 int
693 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
694 {
695 	const struct bdevsw *d;
696 	int rv, mpflag;
697 
698 	/*
699 	 * For open we need to lock, in order to synchronize
700 	 * with attach/detach.
701 	 */
702 	mutex_enter(&device_lock);
703 	d = bdevsw_lookup(dev);
704 	mutex_exit(&device_lock);
705 	if (d == NULL)
706 		return ENXIO;
707 
708 	DEV_LOCK(d);
709 	rv = (*d->d_open)(dev, flag, devtype, l);
710 	DEV_UNLOCK(d);
711 
712 	return rv;
713 }
714 
715 int
716 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
717 {
718 	const struct bdevsw *d;
719 	int rv, mpflag;
720 
721 	if ((d = bdevsw_lookup(dev)) == NULL)
722 		return ENXIO;
723 
724 	DEV_LOCK(d);
725 	rv = (*d->d_close)(dev, flag, devtype, l);
726 	DEV_UNLOCK(d);
727 
728 	return rv;
729 }
730 
731 void
732 bdev_strategy(struct buf *bp)
733 {
734 	const struct bdevsw *d;
735 	int mpflag;
736 
737 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
738 		bp->b_error = ENXIO;
739 		bp->b_resid = bp->b_bcount;
740 		biodone(bp);
741 		return;
742 	}
743 
744 	DEV_LOCK(d);
745 	(*d->d_strategy)(bp);
746 	DEV_UNLOCK(d);
747 }
748 
749 int
750 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
751 {
752 	const struct bdevsw *d;
753 	int rv, mpflag;
754 
755 	if ((d = bdevsw_lookup(dev)) == NULL)
756 		return ENXIO;
757 
758 	DEV_LOCK(d);
759 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
760 	DEV_UNLOCK(d);
761 
762 	return rv;
763 }
764 
765 int
766 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
767 {
768 	const struct bdevsw *d;
769 	int rv;
770 
771 	/*
772 	 * Dump can be called without the device open.  Since it can
773 	 * currently only be called with the system paused (and in a
774 	 * potentially unstable state), we don't perform any locking.
775 	 */
776 	if ((d = bdevsw_lookup(dev)) == NULL)
777 		return ENXIO;
778 
779 	/* DEV_LOCK(d); */
780 	rv = (*d->d_dump)(dev, addr, data, sz);
781 	/* DEV_UNLOCK(d); */
782 
783 	return rv;
784 }
785 
786 int
787 bdev_type(dev_t dev)
788 {
789 	const struct bdevsw *d;
790 
791 	if ((d = bdevsw_lookup(dev)) == NULL)
792 		return D_OTHER;
793 	return d->d_flag & D_TYPEMASK;
794 }
795 
796 int
797 bdev_size(dev_t dev)
798 {
799 	const struct bdevsw *d;
800 	int rv, mpflag = 0;
801 
802 	if ((d = bdevsw_lookup(dev)) == NULL ||
803 	    d->d_psize == NULL)
804 		return -1;
805 
806 	/*
807 	 * Don't to try lock the device if we're dumping.
808 	 * XXX: is there a better way to test this?
809 	 */
810 	if ((boothowto & RB_DUMP) == 0)
811 		DEV_LOCK(d);
812 	rv = (*d->d_psize)(dev);
813 	if ((boothowto & RB_DUMP) == 0)
814 		DEV_UNLOCK(d);
815 
816 	return rv;
817 }
818 
819 int
820 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
821 {
822 	const struct cdevsw *d;
823 	int rv, mpflag;
824 
825 	/*
826 	 * For open we need to lock, in order to synchronize
827 	 * with attach/detach.
828 	 */
829 	mutex_enter(&device_lock);
830 	d = cdevsw_lookup(dev);
831 	mutex_exit(&device_lock);
832 	if (d == NULL)
833 		return ENXIO;
834 
835 	DEV_LOCK(d);
836 	rv = (*d->d_open)(dev, flag, devtype, l);
837 	DEV_UNLOCK(d);
838 
839 	return rv;
840 }
841 
842 int
843 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
844 {
845 	const struct cdevsw *d;
846 	int rv, mpflag;
847 
848 	if ((d = cdevsw_lookup(dev)) == NULL)
849 		return ENXIO;
850 
851 	DEV_LOCK(d);
852 	rv = (*d->d_close)(dev, flag, devtype, l);
853 	DEV_UNLOCK(d);
854 
855 	return rv;
856 }
857 
858 int
859 cdev_read(dev_t dev, struct uio *uio, int flag)
860 {
861 	const struct cdevsw *d;
862 	int rv, mpflag;
863 
864 	if ((d = cdevsw_lookup(dev)) == NULL)
865 		return ENXIO;
866 
867 	DEV_LOCK(d);
868 	rv = (*d->d_read)(dev, uio, flag);
869 	DEV_UNLOCK(d);
870 
871 	return rv;
872 }
873 
874 int
875 cdev_write(dev_t dev, struct uio *uio, int flag)
876 {
877 	const struct cdevsw *d;
878 	int rv, mpflag;
879 
880 	if ((d = cdevsw_lookup(dev)) == NULL)
881 		return ENXIO;
882 
883 	DEV_LOCK(d);
884 	rv = (*d->d_write)(dev, uio, flag);
885 	DEV_UNLOCK(d);
886 
887 	return rv;
888 }
889 
890 int
891 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
892 {
893 	const struct cdevsw *d;
894 	int rv, mpflag;
895 
896 	if ((d = cdevsw_lookup(dev)) == NULL)
897 		return ENXIO;
898 
899 	DEV_LOCK(d);
900 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
901 	DEV_UNLOCK(d);
902 
903 	return rv;
904 }
905 
906 void
907 cdev_stop(struct tty *tp, int flag)
908 {
909 	const struct cdevsw *d;
910 	int mpflag;
911 
912 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
913 		return;
914 
915 	DEV_LOCK(d);
916 	(*d->d_stop)(tp, flag);
917 	DEV_UNLOCK(d);
918 }
919 
920 struct tty *
921 cdev_tty(dev_t dev)
922 {
923 	const struct cdevsw *d;
924 
925 	if ((d = cdevsw_lookup(dev)) == NULL)
926 		return NULL;
927 
928 	/* XXX Check if necessary. */
929 	if (d->d_tty == NULL)
930 		return NULL;
931 
932 	return (*d->d_tty)(dev);
933 }
934 
935 int
936 cdev_poll(dev_t dev, int flag, lwp_t *l)
937 {
938 	const struct cdevsw *d;
939 	int rv, mpflag;
940 
941 	if ((d = cdevsw_lookup(dev)) == NULL)
942 		return POLLERR;
943 
944 	DEV_LOCK(d);
945 	rv = (*d->d_poll)(dev, flag, l);
946 	DEV_UNLOCK(d);
947 
948 	return rv;
949 }
950 
951 paddr_t
952 cdev_mmap(dev_t dev, off_t off, int flag)
953 {
954 	const struct cdevsw *d;
955 	paddr_t rv;
956 	int mpflag;
957 
958 	if ((d = cdevsw_lookup(dev)) == NULL)
959 		return (paddr_t)-1LL;
960 
961 	DEV_LOCK(d);
962 	rv = (*d->d_mmap)(dev, off, flag);
963 	DEV_UNLOCK(d);
964 
965 	return rv;
966 }
967 
968 int
969 cdev_kqfilter(dev_t dev, struct knote *kn)
970 {
971 	const struct cdevsw *d;
972 	int rv, mpflag;
973 
974 	if ((d = cdevsw_lookup(dev)) == NULL)
975 		return ENXIO;
976 
977 	DEV_LOCK(d);
978 	rv = (*d->d_kqfilter)(dev, kn);
979 	DEV_UNLOCK(d);
980 
981 	return rv;
982 }
983 
984 int
985 cdev_type(dev_t dev)
986 {
987 	const struct cdevsw *d;
988 
989 	if ((d = cdevsw_lookup(dev)) == NULL)
990 		return D_OTHER;
991 	return d->d_flag & D_TYPEMASK;
992 }
993