xref: /netbsd-src/sys/kern/subr_devsw.c (revision 76c7fc5f6b13ed0b1508e6b313e88e59977ed78e)
1 /*	$NetBSD: subr_devsw.c,v 1.38 2017/11/07 18:35:57 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	subr_devsw.c: registers device drivers by name and by major
36  *	number, and provides wrapper methods for performing I/O and
37  *	other tasks on device drivers, keying on the device number
38  *	(dev_t).
39  *
40  *	When the system is built, the config(8) command generates
41  *	static tables of device drivers built into the kernel image
42  *	along with their associated methods.  These are recorded in
43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
44  *	and removed from the system dynamically.
45  *
46  * Allocation
47  *
48  *	When the system initially boots only the statically allocated
49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
50  *	allocation, we allocate a fixed block of memory to hold the new,
51  *	expanded index.  This "fork" of the table is only ever performed
52  *	once in order to guarantee that other threads may safely access
53  *	the device tables:
54  *
55  *	o Once a thread has a "reference" to the table via an earlier
56  *	  open() call, we know that the entry in the table must exist
57  *	  and so it is safe to access it.
58  *
59  *	o Regardless of whether other threads see the old or new
60  *	  pointers, they will point to a correct device switch
61  *	  structure for the operation being performed.
62  *
63  *	XXX Currently, the wrapper methods such as cdev_read() verify
64  *	that a device driver does in fact exist before calling the
65  *	associated driver method.  This should be changed so that
66  *	once the device is has been referenced by a vnode (opened),
67  *	calling	the other methods should be valid until that reference
68  *	is dropped.
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.38 2017/11/07 18:35:57 christos Exp $");
73 
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77 
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 
89 #ifdef DEVSW_DEBUG
90 #define	DPRINTF(x)	printf x
91 #else /* DEVSW_DEBUG */
92 #define	DPRINTF(x)
93 #endif /* DEVSW_DEBUG */
94 
95 #define	MAXDEVSW	512	/* the maximum of major device number */
96 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
97 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
98 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
99 
100 extern const struct bdevsw **bdevsw, *bdevsw0[];
101 extern const struct cdevsw **cdevsw, *cdevsw0[];
102 extern struct devsw_conv *devsw_conv, devsw_conv0[];
103 extern const int sys_bdevsws, sys_cdevsws;
104 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
105 
106 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
107 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
108 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
109 
110 kmutex_t device_lock;
111 
112 void (*biodone_vfs)(buf_t *) = (void *)nullop;
113 
114 void
115 devsw_init(void)
116 {
117 
118 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
119 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
120 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
121 }
122 
123 int
124 devsw_attach(const char *devname,
125 	     const struct bdevsw *bdev, devmajor_t *bmajor,
126 	     const struct cdevsw *cdev, devmajor_t *cmajor)
127 {
128 	struct devsw_conv *conv;
129 	char *name;
130 	int error, i;
131 
132 	if (devname == NULL || cdev == NULL)
133 		return (EINVAL);
134 
135 	mutex_enter(&device_lock);
136 
137 	for (i = 0 ; i < max_devsw_convs ; i++) {
138 		conv = &devsw_conv[i];
139 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
140 			continue;
141 
142 		if (*bmajor < 0)
143 			*bmajor = conv->d_bmajor;
144 		if (*cmajor < 0)
145 			*cmajor = conv->d_cmajor;
146 
147 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
148 			error = EINVAL;
149 			goto fail;
150 		}
151 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
152 			error = EINVAL;
153 			goto fail;
154 		}
155 
156 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
157 		    cdevsw[*cmajor] != NULL) {
158 			error = EEXIST;
159 			goto fail;
160 		}
161 
162 		if (bdev != NULL)
163 			bdevsw[*bmajor] = bdev;
164 		cdevsw[*cmajor] = cdev;
165 
166 		mutex_exit(&device_lock);
167 		return (0);
168 	}
169 
170 	error = bdevsw_attach(bdev, bmajor);
171 	if (error != 0)
172 		goto fail;
173 	error = cdevsw_attach(cdev, cmajor);
174 	if (error != 0) {
175 		devsw_detach_locked(bdev, NULL);
176 		goto fail;
177 	}
178 
179 	for (i = 0 ; i < max_devsw_convs ; i++) {
180 		if (devsw_conv[i].d_name == NULL)
181 			break;
182 	}
183 	if (i == max_devsw_convs) {
184 		struct devsw_conv *newptr;
185 		int old_convs, new_convs;
186 
187 		old_convs = max_devsw_convs;
188 		new_convs = old_convs + 1;
189 
190 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
191 		if (newptr == NULL) {
192 			devsw_detach_locked(bdev, cdev);
193 			error = ENOMEM;
194 			goto fail;
195 		}
196 		newptr[old_convs].d_name = NULL;
197 		newptr[old_convs].d_bmajor = -1;
198 		newptr[old_convs].d_cmajor = -1;
199 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
200 		if (devsw_conv != devsw_conv0)
201 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
202 		devsw_conv = newptr;
203 		max_devsw_convs = new_convs;
204 	}
205 
206 	name = kmem_strdupsize(devname, NULL, KM_NOSLEEP);
207 	if (name == NULL) {
208 		devsw_detach_locked(bdev, cdev);
209 		error = ENOMEM;
210 		goto fail;
211 	}
212 
213 	devsw_conv[i].d_name = name;
214 	devsw_conv[i].d_bmajor = *bmajor;
215 	devsw_conv[i].d_cmajor = *cmajor;
216 
217 	mutex_exit(&device_lock);
218 	return (0);
219  fail:
220 	mutex_exit(&device_lock);
221 	return (error);
222 }
223 
224 static int
225 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
226 {
227 	const struct bdevsw **newptr;
228 	devmajor_t bmajor;
229 	int i;
230 
231 	KASSERT(mutex_owned(&device_lock));
232 
233 	if (devsw == NULL)
234 		return (0);
235 
236 	if (*devmajor < 0) {
237 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
238 			if (bdevsw[bmajor] != NULL)
239 				continue;
240 			for (i = 0 ; i < max_devsw_convs ; i++) {
241 				if (devsw_conv[i].d_bmajor == bmajor)
242 					break;
243 			}
244 			if (i != max_devsw_convs)
245 				continue;
246 			break;
247 		}
248 		*devmajor = bmajor;
249 	}
250 
251 	if (*devmajor >= MAXDEVSW) {
252 		printf("%s: block majors exhausted", __func__);
253 		return (ENOMEM);
254 	}
255 
256 	if (*devmajor >= max_bdevsws) {
257 		KASSERT(bdevsw == bdevsw0);
258 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
259 		if (newptr == NULL)
260 			return (ENOMEM);
261 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
262 		bdevsw = newptr;
263 		max_bdevsws = MAXDEVSW;
264 	}
265 
266 	if (bdevsw[*devmajor] != NULL)
267 		return (EEXIST);
268 
269 	bdevsw[*devmajor] = devsw;
270 
271 	return (0);
272 }
273 
274 static int
275 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
276 {
277 	const struct cdevsw **newptr;
278 	devmajor_t cmajor;
279 	int i;
280 
281 	KASSERT(mutex_owned(&device_lock));
282 
283 	if (*devmajor < 0) {
284 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
285 			if (cdevsw[cmajor] != NULL)
286 				continue;
287 			for (i = 0 ; i < max_devsw_convs ; i++) {
288 				if (devsw_conv[i].d_cmajor == cmajor)
289 					break;
290 			}
291 			if (i != max_devsw_convs)
292 				continue;
293 			break;
294 		}
295 		*devmajor = cmajor;
296 	}
297 
298 	if (*devmajor >= MAXDEVSW) {
299 		printf("%s: character majors exhausted", __func__);
300 		return (ENOMEM);
301 	}
302 
303 	if (*devmajor >= max_cdevsws) {
304 		KASSERT(cdevsw == cdevsw0);
305 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
306 		if (newptr == NULL)
307 			return (ENOMEM);
308 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
309 		cdevsw = newptr;
310 		max_cdevsws = MAXDEVSW;
311 	}
312 
313 	if (cdevsw[*devmajor] != NULL)
314 		return (EEXIST);
315 
316 	cdevsw[*devmajor] = devsw;
317 
318 	return (0);
319 }
320 
321 static void
322 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
323 {
324 	int i;
325 
326 	KASSERT(mutex_owned(&device_lock));
327 
328 	if (bdev != NULL) {
329 		for (i = 0 ; i < max_bdevsws ; i++) {
330 			if (bdevsw[i] != bdev)
331 				continue;
332 			bdevsw[i] = NULL;
333 			break;
334 		}
335 	}
336 	if (cdev != NULL) {
337 		for (i = 0 ; i < max_cdevsws ; i++) {
338 			if (cdevsw[i] != cdev)
339 				continue;
340 			cdevsw[i] = NULL;
341 			break;
342 		}
343 	}
344 }
345 
346 int
347 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
348 {
349 
350 	mutex_enter(&device_lock);
351 	devsw_detach_locked(bdev, cdev);
352 	mutex_exit(&device_lock);
353 	return 0;
354 }
355 
356 /*
357  * Look up a block device by number.
358  *
359  * => Caller must ensure that the device is attached.
360  */
361 const struct bdevsw *
362 bdevsw_lookup(dev_t dev)
363 {
364 	devmajor_t bmajor;
365 
366 	if (dev == NODEV)
367 		return (NULL);
368 	bmajor = major(dev);
369 	if (bmajor < 0 || bmajor >= max_bdevsws)
370 		return (NULL);
371 
372 	return (bdevsw[bmajor]);
373 }
374 
375 /*
376  * Look up a character device by number.
377  *
378  * => Caller must ensure that the device is attached.
379  */
380 const struct cdevsw *
381 cdevsw_lookup(dev_t dev)
382 {
383 	devmajor_t cmajor;
384 
385 	if (dev == NODEV)
386 		return (NULL);
387 	cmajor = major(dev);
388 	if (cmajor < 0 || cmajor >= max_cdevsws)
389 		return (NULL);
390 
391 	return (cdevsw[cmajor]);
392 }
393 
394 /*
395  * Look up a block device by reference to its operations set.
396  *
397  * => Caller must ensure that the device is not detached, and therefore
398  *    that the returned major is still valid when dereferenced.
399  */
400 devmajor_t
401 bdevsw_lookup_major(const struct bdevsw *bdev)
402 {
403 	devmajor_t bmajor;
404 
405 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
406 		if (bdevsw[bmajor] == bdev)
407 			return (bmajor);
408 	}
409 
410 	return (NODEVMAJOR);
411 }
412 
413 /*
414  * Look up a character device by reference to its operations set.
415  *
416  * => Caller must ensure that the device is not detached, and therefore
417  *    that the returned major is still valid when dereferenced.
418  */
419 devmajor_t
420 cdevsw_lookup_major(const struct cdevsw *cdev)
421 {
422 	devmajor_t cmajor;
423 
424 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
425 		if (cdevsw[cmajor] == cdev)
426 			return (cmajor);
427 	}
428 
429 	return (NODEVMAJOR);
430 }
431 
432 /*
433  * Convert from block major number to name.
434  *
435  * => Caller must ensure that the device is not detached, and therefore
436  *    that the name pointer is still valid when dereferenced.
437  */
438 const char *
439 devsw_blk2name(devmajor_t bmajor)
440 {
441 	const char *name;
442 	devmajor_t cmajor;
443 	int i;
444 
445 	name = NULL;
446 	cmajor = -1;
447 
448 	mutex_enter(&device_lock);
449 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
450 		mutex_exit(&device_lock);
451 		return (NULL);
452 	}
453 	for (i = 0 ; i < max_devsw_convs; i++) {
454 		if (devsw_conv[i].d_bmajor == bmajor) {
455 			cmajor = devsw_conv[i].d_cmajor;
456 			break;
457 		}
458 	}
459 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
460 		name = devsw_conv[i].d_name;
461 	mutex_exit(&device_lock);
462 
463 	return (name);
464 }
465 
466 /*
467  * Convert char major number to device driver name.
468  */
469 const char *
470 cdevsw_getname(devmajor_t major)
471 {
472 	const char *name;
473 	int i;
474 
475 	name = NULL;
476 
477 	if (major < 0)
478 		return (NULL);
479 
480 	mutex_enter(&device_lock);
481 	for (i = 0 ; i < max_devsw_convs; i++) {
482 		if (devsw_conv[i].d_cmajor == major) {
483 			name = devsw_conv[i].d_name;
484 			break;
485 		}
486 	}
487 	mutex_exit(&device_lock);
488 	return (name);
489 }
490 
491 /*
492  * Convert block major number to device driver name.
493  */
494 const char *
495 bdevsw_getname(devmajor_t major)
496 {
497 	const char *name;
498 	int i;
499 
500 	name = NULL;
501 
502 	if (major < 0)
503 		return (NULL);
504 
505 	mutex_enter(&device_lock);
506 	for (i = 0 ; i < max_devsw_convs; i++) {
507 		if (devsw_conv[i].d_bmajor == major) {
508 			name = devsw_conv[i].d_name;
509 			break;
510 		}
511 	}
512 	mutex_exit(&device_lock);
513 	return (name);
514 }
515 
516 /*
517  * Convert from device name to block major number.
518  *
519  * => Caller must ensure that the device is not detached, and therefore
520  *    that the major number is still valid when dereferenced.
521  */
522 devmajor_t
523 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
524 {
525 	struct devsw_conv *conv;
526 	devmajor_t bmajor;
527 	int i;
528 
529 	if (name == NULL)
530 		return (NODEVMAJOR);
531 
532 	mutex_enter(&device_lock);
533 	for (i = 0 ; i < max_devsw_convs ; i++) {
534 		size_t len;
535 
536 		conv = &devsw_conv[i];
537 		if (conv->d_name == NULL)
538 			continue;
539 		len = strlen(conv->d_name);
540 		if (strncmp(conv->d_name, name, len) != 0)
541 			continue;
542 		if (*(name +len) && !isdigit(*(name + len)))
543 			continue;
544 		bmajor = conv->d_bmajor;
545 		if (bmajor < 0 || bmajor >= max_bdevsws ||
546 		    bdevsw[bmajor] == NULL)
547 			break;
548 		if (devname != NULL) {
549 #ifdef DEVSW_DEBUG
550 			if (strlen(conv->d_name) >= devnamelen)
551 				printf("%s: too short buffer", __func__);
552 #endif /* DEVSW_DEBUG */
553 			strncpy(devname, conv->d_name, devnamelen);
554 			devname[devnamelen - 1] = '\0';
555 		}
556 		mutex_exit(&device_lock);
557 		return (bmajor);
558 	}
559 
560 	mutex_exit(&device_lock);
561 	return (NODEVMAJOR);
562 }
563 
564 /*
565  * Convert from device name to char major number.
566  *
567  * => Caller must ensure that the device is not detached, and therefore
568  *    that the major number is still valid when dereferenced.
569  */
570 devmajor_t
571 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
572 {
573 	struct devsw_conv *conv;
574 	devmajor_t cmajor;
575 	int i;
576 
577 	if (name == NULL)
578 		return (NODEVMAJOR);
579 
580 	mutex_enter(&device_lock);
581 	for (i = 0 ; i < max_devsw_convs ; i++) {
582 		size_t len;
583 
584 		conv = &devsw_conv[i];
585 		if (conv->d_name == NULL)
586 			continue;
587 		len = strlen(conv->d_name);
588 		if (strncmp(conv->d_name, name, len) != 0)
589 			continue;
590 		if (*(name +len) && !isdigit(*(name + len)))
591 			continue;
592 		cmajor = conv->d_cmajor;
593 		if (cmajor < 0 || cmajor >= max_cdevsws ||
594 		    cdevsw[cmajor] == NULL)
595 			break;
596 		if (devname != NULL) {
597 #ifdef DEVSW_DEBUG
598 			if (strlen(conv->d_name) >= devnamelen)
599 				printf("%s: too short buffer", __func__);
600 #endif /* DEVSW_DEBUG */
601 			strncpy(devname, conv->d_name, devnamelen);
602 			devname[devnamelen - 1] = '\0';
603 		}
604 		mutex_exit(&device_lock);
605 		return (cmajor);
606 	}
607 
608 	mutex_exit(&device_lock);
609 	return (NODEVMAJOR);
610 }
611 
612 /*
613  * Convert from character dev_t to block dev_t.
614  *
615  * => Caller must ensure that the device is not detached, and therefore
616  *    that the major number is still valid when dereferenced.
617  */
618 dev_t
619 devsw_chr2blk(dev_t cdev)
620 {
621 	devmajor_t bmajor, cmajor;
622 	int i;
623 	dev_t rv;
624 
625 	cmajor = major(cdev);
626 	bmajor = NODEVMAJOR;
627 	rv = NODEV;
628 
629 	mutex_enter(&device_lock);
630 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
631 		mutex_exit(&device_lock);
632 		return (NODEV);
633 	}
634 	for (i = 0 ; i < max_devsw_convs ; i++) {
635 		if (devsw_conv[i].d_cmajor == cmajor) {
636 			bmajor = devsw_conv[i].d_bmajor;
637 			break;
638 		}
639 	}
640 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
641 		rv = makedev(bmajor, minor(cdev));
642 	mutex_exit(&device_lock);
643 
644 	return (rv);
645 }
646 
647 /*
648  * Convert from block dev_t to character dev_t.
649  *
650  * => Caller must ensure that the device is not detached, and therefore
651  *    that the major number is still valid when dereferenced.
652  */
653 dev_t
654 devsw_blk2chr(dev_t bdev)
655 {
656 	devmajor_t bmajor, cmajor;
657 	int i;
658 	dev_t rv;
659 
660 	bmajor = major(bdev);
661 	cmajor = NODEVMAJOR;
662 	rv = NODEV;
663 
664 	mutex_enter(&device_lock);
665 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
666 		mutex_exit(&device_lock);
667 		return (NODEV);
668 	}
669 	for (i = 0 ; i < max_devsw_convs ; i++) {
670 		if (devsw_conv[i].d_bmajor == bmajor) {
671 			cmajor = devsw_conv[i].d_cmajor;
672 			break;
673 		}
674 	}
675 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
676 		rv = makedev(cmajor, minor(bdev));
677 	mutex_exit(&device_lock);
678 
679 	return (rv);
680 }
681 
682 /*
683  * Device access methods.
684  */
685 
686 #define	DEV_LOCK(d)						\
687 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
688 		KERNEL_LOCK(1, NULL);				\
689 	}
690 
691 #define	DEV_UNLOCK(d)						\
692 	if (mpflag == 0) {					\
693 		KERNEL_UNLOCK_ONE(NULL);			\
694 	}
695 
696 int
697 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
698 {
699 	const struct bdevsw *d;
700 	int rv, mpflag;
701 
702 	/*
703 	 * For open we need to lock, in order to synchronize
704 	 * with attach/detach.
705 	 */
706 	mutex_enter(&device_lock);
707 	d = bdevsw_lookup(dev);
708 	mutex_exit(&device_lock);
709 	if (d == NULL)
710 		return ENXIO;
711 
712 	DEV_LOCK(d);
713 	rv = (*d->d_open)(dev, flag, devtype, l);
714 	DEV_UNLOCK(d);
715 
716 	return rv;
717 }
718 
719 int
720 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
721 {
722 	const struct bdevsw *d;
723 	int rv, mpflag;
724 
725 	if ((d = bdevsw_lookup(dev)) == NULL)
726 		return ENXIO;
727 
728 	DEV_LOCK(d);
729 	rv = (*d->d_close)(dev, flag, devtype, l);
730 	DEV_UNLOCK(d);
731 
732 	return rv;
733 }
734 
735 SDT_PROVIDER_DECLARE(io);
736 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
737 
738 void
739 bdev_strategy(struct buf *bp)
740 {
741 	const struct bdevsw *d;
742 	int mpflag;
743 
744 	SDT_PROBE1(io, kernel, , start, bp);
745 
746 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
747 		bp->b_error = ENXIO;
748 		bp->b_resid = bp->b_bcount;
749 		biodone_vfs(bp); /* biodone() iff vfs present */
750 		return;
751 	}
752 
753 	DEV_LOCK(d);
754 	(*d->d_strategy)(bp);
755 	DEV_UNLOCK(d);
756 }
757 
758 int
759 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
760 {
761 	const struct bdevsw *d;
762 	int rv, mpflag;
763 
764 	if ((d = bdevsw_lookup(dev)) == NULL)
765 		return ENXIO;
766 
767 	DEV_LOCK(d);
768 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
769 	DEV_UNLOCK(d);
770 
771 	return rv;
772 }
773 
774 int
775 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
776 {
777 	const struct bdevsw *d;
778 	int rv;
779 
780 	/*
781 	 * Dump can be called without the device open.  Since it can
782 	 * currently only be called with the system paused (and in a
783 	 * potentially unstable state), we don't perform any locking.
784 	 */
785 	if ((d = bdevsw_lookup(dev)) == NULL)
786 		return ENXIO;
787 
788 	/* DEV_LOCK(d); */
789 	rv = (*d->d_dump)(dev, addr, data, sz);
790 	/* DEV_UNLOCK(d); */
791 
792 	return rv;
793 }
794 
795 int
796 bdev_flags(dev_t dev)
797 {
798 	const struct bdevsw *d;
799 
800 	if ((d = bdevsw_lookup(dev)) == NULL)
801 		return 0;
802 	return d->d_flag & ~D_TYPEMASK;
803 }
804 
805 int
806 bdev_type(dev_t dev)
807 {
808 	const struct bdevsw *d;
809 
810 	if ((d = bdevsw_lookup(dev)) == NULL)
811 		return D_OTHER;
812 	return d->d_flag & D_TYPEMASK;
813 }
814 
815 int
816 bdev_size(dev_t dev)
817 {
818 	const struct bdevsw *d;
819 	int rv, mpflag = 0;
820 
821 	if ((d = bdevsw_lookup(dev)) == NULL ||
822 	    d->d_psize == NULL)
823 		return -1;
824 
825 	/*
826 	 * Don't to try lock the device if we're dumping.
827 	 * XXX: is there a better way to test this?
828 	 */
829 	if ((boothowto & RB_DUMP) == 0)
830 		DEV_LOCK(d);
831 	rv = (*d->d_psize)(dev);
832 	if ((boothowto & RB_DUMP) == 0)
833 		DEV_UNLOCK(d);
834 
835 	return rv;
836 }
837 
838 int
839 bdev_discard(dev_t dev, off_t pos, off_t len)
840 {
841 	const struct bdevsw *d;
842 	int rv, mpflag;
843 
844 	if ((d = bdevsw_lookup(dev)) == NULL)
845 		return ENXIO;
846 
847 	DEV_LOCK(d);
848 	rv = (*d->d_discard)(dev, pos, len);
849 	DEV_UNLOCK(d);
850 
851 	return rv;
852 }
853 
854 int
855 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
856 {
857 	const struct cdevsw *d;
858 	int rv, mpflag;
859 
860 	/*
861 	 * For open we need to lock, in order to synchronize
862 	 * with attach/detach.
863 	 */
864 	mutex_enter(&device_lock);
865 	d = cdevsw_lookup(dev);
866 	mutex_exit(&device_lock);
867 	if (d == NULL)
868 		return ENXIO;
869 
870 	DEV_LOCK(d);
871 	rv = (*d->d_open)(dev, flag, devtype, l);
872 	DEV_UNLOCK(d);
873 
874 	return rv;
875 }
876 
877 int
878 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
879 {
880 	const struct cdevsw *d;
881 	int rv, mpflag;
882 
883 	if ((d = cdevsw_lookup(dev)) == NULL)
884 		return ENXIO;
885 
886 	DEV_LOCK(d);
887 	rv = (*d->d_close)(dev, flag, devtype, l);
888 	DEV_UNLOCK(d);
889 
890 	return rv;
891 }
892 
893 int
894 cdev_read(dev_t dev, struct uio *uio, int flag)
895 {
896 	const struct cdevsw *d;
897 	int rv, mpflag;
898 
899 	if ((d = cdevsw_lookup(dev)) == NULL)
900 		return ENXIO;
901 
902 	DEV_LOCK(d);
903 	rv = (*d->d_read)(dev, uio, flag);
904 	DEV_UNLOCK(d);
905 
906 	return rv;
907 }
908 
909 int
910 cdev_write(dev_t dev, struct uio *uio, int flag)
911 {
912 	const struct cdevsw *d;
913 	int rv, mpflag;
914 
915 	if ((d = cdevsw_lookup(dev)) == NULL)
916 		return ENXIO;
917 
918 	DEV_LOCK(d);
919 	rv = (*d->d_write)(dev, uio, flag);
920 	DEV_UNLOCK(d);
921 
922 	return rv;
923 }
924 
925 int
926 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
927 {
928 	const struct cdevsw *d;
929 	int rv, mpflag;
930 
931 	if ((d = cdevsw_lookup(dev)) == NULL)
932 		return ENXIO;
933 
934 	DEV_LOCK(d);
935 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
936 	DEV_UNLOCK(d);
937 
938 	return rv;
939 }
940 
941 void
942 cdev_stop(struct tty *tp, int flag)
943 {
944 	const struct cdevsw *d;
945 	int mpflag;
946 
947 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
948 		return;
949 
950 	DEV_LOCK(d);
951 	(*d->d_stop)(tp, flag);
952 	DEV_UNLOCK(d);
953 }
954 
955 struct tty *
956 cdev_tty(dev_t dev)
957 {
958 	const struct cdevsw *d;
959 
960 	if ((d = cdevsw_lookup(dev)) == NULL)
961 		return NULL;
962 
963 	/* XXX Check if necessary. */
964 	if (d->d_tty == NULL)
965 		return NULL;
966 
967 	return (*d->d_tty)(dev);
968 }
969 
970 int
971 cdev_poll(dev_t dev, int flag, lwp_t *l)
972 {
973 	const struct cdevsw *d;
974 	int rv, mpflag;
975 
976 	if ((d = cdevsw_lookup(dev)) == NULL)
977 		return POLLERR;
978 
979 	DEV_LOCK(d);
980 	rv = (*d->d_poll)(dev, flag, l);
981 	DEV_UNLOCK(d);
982 
983 	return rv;
984 }
985 
986 paddr_t
987 cdev_mmap(dev_t dev, off_t off, int flag)
988 {
989 	const struct cdevsw *d;
990 	paddr_t rv;
991 	int mpflag;
992 
993 	if ((d = cdevsw_lookup(dev)) == NULL)
994 		return (paddr_t)-1LL;
995 
996 	DEV_LOCK(d);
997 	rv = (*d->d_mmap)(dev, off, flag);
998 	DEV_UNLOCK(d);
999 
1000 	return rv;
1001 }
1002 
1003 int
1004 cdev_kqfilter(dev_t dev, struct knote *kn)
1005 {
1006 	const struct cdevsw *d;
1007 	int rv, mpflag;
1008 
1009 	if ((d = cdevsw_lookup(dev)) == NULL)
1010 		return ENXIO;
1011 
1012 	DEV_LOCK(d);
1013 	rv = (*d->d_kqfilter)(dev, kn);
1014 	DEV_UNLOCK(d);
1015 
1016 	return rv;
1017 }
1018 
1019 int
1020 cdev_discard(dev_t dev, off_t pos, off_t len)
1021 {
1022 	const struct cdevsw *d;
1023 	int rv, mpflag;
1024 
1025 	if ((d = cdevsw_lookup(dev)) == NULL)
1026 		return ENXIO;
1027 
1028 	DEV_LOCK(d);
1029 	rv = (*d->d_discard)(dev, pos, len);
1030 	DEV_UNLOCK(d);
1031 
1032 	return rv;
1033 }
1034 
1035 int
1036 cdev_flags(dev_t dev)
1037 {
1038 	const struct cdevsw *d;
1039 
1040 	if ((d = cdevsw_lookup(dev)) == NULL)
1041 		return 0;
1042 	return d->d_flag & ~D_TYPEMASK;
1043 }
1044 
1045 int
1046 cdev_type(dev_t dev)
1047 {
1048 	const struct cdevsw *d;
1049 
1050 	if ((d = cdevsw_lookup(dev)) == NULL)
1051 		return D_OTHER;
1052 	return d->d_flag & D_TYPEMASK;
1053 }
1054 
1055 /*
1056  * nommap(dev, off, prot)
1057  *
1058  *	mmap routine that always fails, for non-mmappable devices.
1059  */
1060 paddr_t
1061 nommap(dev_t dev, off_t off, int prot)
1062 {
1063 
1064 	return (paddr_t)-1;
1065 }
1066