xref: /dflybsd-src/sys/kern/subr_disk.c (revision 5b991541a99aa38e5ca17ac8e6abee49bd57ac56)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ----------------------------------------------------------------------------
35  * "THE BEER-WARE LICENSE" (Revision 42):
36  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
37  * can do whatever you want with this stuff. If we meet some day, and you think
38  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
39  * ----------------------------------------------------------------------------
40  *
41  * Copyright (c) 1982, 1986, 1988, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
78  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80  * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/proc.h>
87 #include <sys/sysctl.h>
88 #include <sys/buf.h>
89 #include <sys/conf.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
95 #include <sys/disk.h>
96 #include <sys/kerneldump.h>
97 #include <sys/malloc.h>
98 #include <sys/sysctl.h>
99 #include <machine/md_var.h>
100 #include <sys/ctype.h>
101 #include <sys/syslog.h>
102 #include <sys/device.h>
103 #include <sys/msgport.h>
104 #include <sys/devfs.h>
105 #include <sys/thread.h>
106 #include <sys/dsched.h>
107 #include <sys/queue.h>
108 #include <sys/lock.h>
109 #include <sys/udev.h>
110 
111 #include <sys/buf2.h>
112 #include <sys/mplock2.h>
113 #include <sys/msgport2.h>
114 #include <sys/thread2.h>
115 
116 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
117 static int disk_debug_enable = 0;
118 
119 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
120 static void disk_msg_core(void *);
121 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
122 static void disk_probe(struct disk *dp, int reprobe);
123 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
124 static void bioqwritereorder(struct bio_queue_head *bioq);
125 static void disk_cleanserial(char *serno);
126 static int disk_debug(int, char *, ...) __printflike(2, 3);
127 
128 static d_open_t diskopen;
129 static d_close_t diskclose;
130 static d_ioctl_t diskioctl;
131 static d_strategy_t diskstrategy;
132 static d_psize_t diskpsize;
133 static d_clone_t diskclone;
134 static d_dump_t diskdump;
135 
136 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
137 static struct lwkt_token disklist_token;
138 
139 static struct dev_ops disk_ops = {
140 	{ "disk", 0, D_DISK | D_MPSAFE },
141 	.d_open = diskopen,
142 	.d_close = diskclose,
143 	.d_read = physread,
144 	.d_write = physwrite,
145 	.d_ioctl = diskioctl,
146 	.d_strategy = diskstrategy,
147 	.d_dump = diskdump,
148 	.d_psize = diskpsize,
149 	.d_clone = diskclone
150 };
151 
152 static struct objcache 	*disk_msg_cache;
153 
154 struct objcache_malloc_args disk_msg_malloc_args = {
155 	sizeof(struct disk_msg), M_DISK };
156 
157 static struct lwkt_port disk_dispose_port;
158 static struct lwkt_port disk_msg_port;
159 
160 static int
161 disk_debug(int level, char *fmt, ...)
162 {
163 	__va_list ap;
164 
165 	__va_start(ap, fmt);
166 	if (level <= disk_debug_enable)
167 		kvprintf(fmt, ap);
168 	__va_end(ap);
169 
170 	return 0;
171 }
172 
173 static int
174 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
175 {
176 	struct disk_info *info = &dp->d_info;
177 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
178 	disklabel_ops_t ops;
179 	struct partinfo part;
180 	const char *msg;
181 	cdev_t ndev;
182 	int sno;
183 	u_int i;
184 
185 	disk_debug(2,
186 		    "disk_probe_slice (begin): %s (%s)\n",
187 			dev->si_name, dp->d_cdev->si_name);
188 
189 	sno = slice ? slice - 1 : 0;
190 
191 	ops = &disklabel32_ops;
192 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
193 	if (msg && !strcmp(msg, "no disk label")) {
194 		ops = &disklabel64_ops;
195 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
196 	}
197 	if (msg == NULL) {
198 		if (slice != WHOLE_DISK_SLICE)
199 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
200 		else
201 			sp->ds_reserved = 0;
202 
203 		sp->ds_ops = ops;
204 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
205 			ops->op_loadpartinfo(sp->ds_label, i, &part);
206 			if (part.fstype) {
207 				if (reprobe &&
208 				    (ndev = devfs_find_device_by_name("%s%c",
209 						dev->si_name, 'a' + i))
210 				) {
211 					/*
212 					 * Device already exists and
213 					 * is still valid.
214 					 */
215 					ndev->si_flags |= SI_REPROBE_TEST;
216 				} else {
217 					ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
218 						dkmakeminor(dkunit(dp->d_cdev),
219 							    slice, i),
220 						UID_ROOT, GID_OPERATOR, 0640,
221 						"%s%c", dev->si_name, 'a'+ i);
222 					ndev->si_disk = dp;
223 					udev_dict_set_cstr(ndev, "subsystem", "disk");
224 					/* Inherit parent's disk type */
225 					if (dp->d_disktype) {
226 						udev_dict_set_cstr(ndev, "disk-type",
227 						    __DECONST(char *, dp->d_disktype));
228 					}
229 
230 					/* Create serno alias */
231 					if (dp->d_info.d_serialno) {
232 						make_dev_alias(ndev,
233 						    "serno/%s.s%d%c",
234 						    dp->d_info.d_serialno,
235 						    sno, 'a' + i);
236 					}
237 
238 					ndev->si_flags |= SI_REPROBE_TEST;
239 				}
240 			}
241 		}
242 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
243 		msg = NULL;
244 		if (sp->ds_size >= 0x100000000ULL)
245 			ops = &disklabel64_ops;
246 		else
247 			ops = &disklabel32_ops;
248 		sp->ds_label = ops->op_clone_label(info, sp);
249 	} else {
250 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
251 		    sp->ds_type == DOSPTYP_NETBSD ||
252 		    sp->ds_type == DOSPTYP_OPENBSD) {
253 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
254 			    dev->si_name, msg);
255 		}
256 	}
257 
258 	if (msg == NULL) {
259 		sp->ds_wlabel = FALSE;
260 	}
261 
262 	return (msg ? EINVAL : 0);
263 }
264 
265 /*
266  * This routine is only called for newly minted drives or to reprobe
267  * a drive with no open slices.  disk_probe_slice() is called directly
268  * when reprobing partition changes within slices.
269  */
270 static void
271 disk_probe(struct disk *dp, int reprobe)
272 {
273 	struct disk_info *info = &dp->d_info;
274 	cdev_t dev = dp->d_cdev;
275 	cdev_t ndev;
276 	int error, i, sno;
277 	struct diskslices *osp;
278 	struct diskslice *sp;
279 
280 	KKASSERT (info->d_media_blksize != 0);
281 
282 	osp = dp->d_slice;
283 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
284 	disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
285 
286 	error = mbrinit(dev, info, &(dp->d_slice));
287 	if (error) {
288 		dsgone(&osp);
289 		return;
290 	}
291 
292 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
293 		/*
294 		 * Ignore the whole-disk slice, it has already been created.
295 		 */
296 		if (i == WHOLE_DISK_SLICE)
297 			continue;
298 
299 #if 0
300 		/*
301 		 * Ignore the compatibility slice s0 if it's a device mapper
302 		 * volume.
303 		 */
304 		if ((i == COMPATIBILITY_SLICE) &&
305 		    (info->d_dsflags & DSO_DEVICEMAPPER))
306 			continue;
307 #endif
308 
309 		sp = &dp->d_slice->dss_slices[i];
310 
311 		/*
312 		 * Handle s0.  s0 is a compatibility slice if there are no
313 		 * other slices and it has not otherwise been set up, else
314 		 * we ignore it.
315 		 */
316 		if (i == COMPATIBILITY_SLICE) {
317 			sno = 0;
318 			if (sp->ds_type == 0 &&
319 			    dp->d_slice->dss_nslices == BASE_SLICE) {
320 				sp->ds_size = info->d_media_blocks;
321 				sp->ds_reserved = 0;
322 			}
323 		} else {
324 			sno = i - 1;
325 			sp->ds_reserved = 0;
326 		}
327 
328 		/*
329 		 * Ignore 0-length slices
330 		 */
331 		if (sp->ds_size == 0)
332 			continue;
333 
334 		if (reprobe &&
335 		    (ndev = devfs_find_device_by_name("%ss%d",
336 						      dev->si_name, sno))) {
337 			/*
338 			 * Device already exists and is still valid
339 			 */
340 			ndev->si_flags |= SI_REPROBE_TEST;
341 		} else {
342 			/*
343 			 * Else create new device
344 			 */
345 			ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
346 					dkmakewholeslice(dkunit(dev), i),
347 					UID_ROOT, GID_OPERATOR, 0640,
348 					(info->d_dsflags & DSO_DEVICEMAPPER)?
349 					"%s.s%d" : "%ss%d", dev->si_name, sno);
350 			udev_dict_set_cstr(ndev, "subsystem", "disk");
351 			/* Inherit parent's disk type */
352 			if (dp->d_disktype) {
353 				udev_dict_set_cstr(ndev, "disk-type",
354 				    __DECONST(char *, dp->d_disktype));
355 			}
356 
357 			/* Create serno alias */
358 			if (dp->d_info.d_serialno) {
359 				make_dev_alias(ndev, "serno/%s.s%d",
360 					       dp->d_info.d_serialno, sno);
361 			}
362 
363 			ndev->si_disk = dp;
364 			ndev->si_flags |= SI_REPROBE_TEST;
365 		}
366 		sp->ds_dev = ndev;
367 
368 		/*
369 		 * Probe appropriate slices for a disklabel
370 		 *
371 		 * XXX slice type 1 used by our gpt probe code.
372 		 * XXX slice type 0 used by mbr compat slice.
373 		 */
374 		if (sp->ds_type == DOSPTYP_386BSD ||
375 		    sp->ds_type == DOSPTYP_NETBSD ||
376 		    sp->ds_type == DOSPTYP_OPENBSD ||
377 		    sp->ds_type == 0 ||
378 		    sp->ds_type == 1) {
379 			if (dp->d_slice->dss_first_bsd_slice == 0)
380 				dp->d_slice->dss_first_bsd_slice = i;
381 			disk_probe_slice(dp, ndev, i, reprobe);
382 		}
383 	}
384 	dsgone(&osp);
385 	disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
386 }
387 
388 
389 static void
390 disk_msg_core(void *arg)
391 {
392 	struct disk	*dp;
393 	struct diskslice *sp;
394 	disk_msg_t msg;
395 	int run;
396 
397 	lwkt_gettoken(&disklist_token);
398 	lwkt_initport_thread(&disk_msg_port, curthread);
399 	wakeup(curthread);	/* synchronous startup */
400 	lwkt_reltoken(&disklist_token);
401 
402 	get_mplock();	/* not mpsafe yet? */
403 	run = 1;
404 
405 	while (run) {
406 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
407 
408 		switch (msg->hdr.u.ms_result) {
409 		case DISK_DISK_PROBE:
410 			dp = (struct disk *)msg->load;
411 			disk_debug(1,
412 				    "DISK_DISK_PROBE: %s\n",
413 					dp->d_cdev->si_name);
414 			disk_probe(dp, 0);
415 			break;
416 		case DISK_DISK_DESTROY:
417 			dp = (struct disk *)msg->load;
418 			disk_debug(1,
419 				    "DISK_DISK_DESTROY: %s\n",
420 					dp->d_cdev->si_name);
421 			devfs_destroy_subnames(dp->d_cdev->si_name);
422 			devfs_destroy_dev(dp->d_cdev);
423 			lwkt_gettoken(&disklist_token);
424 			LIST_REMOVE(dp, d_list);
425 			lwkt_reltoken(&disklist_token);
426 			if (dp->d_info.d_serialno) {
427 				kfree(dp->d_info.d_serialno, M_TEMP);
428 				dp->d_info.d_serialno = NULL;
429 			}
430 			break;
431 		case DISK_UNPROBE:
432 			dp = (struct disk *)msg->load;
433 			disk_debug(1,
434 				    "DISK_DISK_UNPROBE: %s\n",
435 					dp->d_cdev->si_name);
436 			devfs_destroy_subnames(dp->d_cdev->si_name);
437 			break;
438 		case DISK_SLICE_REPROBE:
439 			dp = (struct disk *)msg->load;
440 			sp = (struct diskslice *)msg->load2;
441 			devfs_clr_subnames_flag(sp->ds_dev->si_name,
442 						SI_REPROBE_TEST);
443 			disk_debug(1,
444 				    "DISK_SLICE_REPROBE: %s\n",
445 				    sp->ds_dev->si_name);
446 			disk_probe_slice(dp, sp->ds_dev,
447 					 dkslice(sp->ds_dev), 1);
448 			devfs_destroy_subnames_without_flag(
449 					sp->ds_dev->si_name, SI_REPROBE_TEST);
450 			break;
451 		case DISK_DISK_REPROBE:
452 			dp = (struct disk *)msg->load;
453 			devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
454 			disk_debug(1,
455 				    "DISK_DISK_REPROBE: %s\n",
456 				    dp->d_cdev->si_name);
457 			disk_probe(dp, 1);
458 			devfs_destroy_subnames_without_flag(
459 					dp->d_cdev->si_name, SI_REPROBE_TEST);
460 			break;
461 		case DISK_SYNC:
462 			disk_debug(1, "DISK_SYNC\n");
463 			break;
464 		default:
465 			devfs_debug(DEVFS_DEBUG_WARNING,
466 				    "disk_msg_core: unknown message "
467 				    "received at core\n");
468 			break;
469 		}
470 		lwkt_replymsg(&msg->hdr, 0);
471 	}
472 	lwkt_exit();
473 }
474 
475 
476 /*
477  * Acts as a message drain. Any message that is replied to here gets
478  * destroyed and the memory freed.
479  */
480 static void
481 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
482 {
483 	objcache_put(disk_msg_cache, msg);
484 }
485 
486 
487 void
488 disk_msg_send(uint32_t cmd, void *load, void *load2)
489 {
490 	disk_msg_t disk_msg;
491 	lwkt_port_t port = &disk_msg_port;
492 
493 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
494 
495 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
496 
497 	disk_msg->hdr.u.ms_result = cmd;
498 	disk_msg->load = load;
499 	disk_msg->load2 = load2;
500 	KKASSERT(port);
501 	lwkt_sendmsg(port, &disk_msg->hdr);
502 }
503 
504 void
505 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
506 {
507 	struct lwkt_port rep_port;
508 	disk_msg_t disk_msg;
509 	lwkt_port_t port;
510 
511 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
512 	port = &disk_msg_port;
513 
514 	/* XXX could probably use curthread's built-in msgport */
515 	lwkt_initport_thread(&rep_port, curthread);
516 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
517 
518 	disk_msg->hdr.u.ms_result = cmd;
519 	disk_msg->load = load;
520 	disk_msg->load2 = load2;
521 
522 	lwkt_sendmsg(port, &disk_msg->hdr);
523 	lwkt_waitmsg(&disk_msg->hdr, 0);
524 	objcache_put(disk_msg_cache, disk_msg);
525 }
526 
527 /*
528  * Create a raw device for the dev_ops template (which is returned).  Also
529  * create a slice and unit managed disk and overload the user visible
530  * device space with it.
531  *
532  * NOTE: The returned raw device is NOT a slice and unit managed device.
533  * It is an actual raw device representing the raw disk as specified by
534  * the passed dev_ops.  The disk layer not only returns such a raw device,
535  * it also uses it internally when passing (modified) commands through.
536  */
537 cdev_t
538 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
539 {
540 	return disk_create_named(NULL, unit, dp, raw_ops);
541 }
542 
543 cdev_t
544 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops)
545 {
546 	cdev_t rawdev;
547 
548 	disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
549 
550 	if (name) {
551 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
552 		    UID_ROOT, GID_OPERATOR, 0640, "%s", name);
553 	} else {
554 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
555 		    UID_ROOT, GID_OPERATOR, 0640,
556 		    "%s%d", raw_ops->head.name, unit);
557 	}
558 
559 	bzero(dp, sizeof(*dp));
560 
561 	dp->d_rawdev = rawdev;
562 	dp->d_raw_ops = raw_ops;
563 	dp->d_dev_ops = &disk_ops;
564 
565 	if (name) {
566 		dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
567 		    dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640,
568 		    "%s", name);
569 	} else {
570 		dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
571 		    dkmakewholedisk(unit),
572 		    UID_ROOT, GID_OPERATOR, 0640,
573 		    "%s%d", raw_ops->head.name, unit);
574 	}
575 
576 	udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
577 	dp->d_cdev->si_disk = dp;
578 
579 	if (name)
580 		dsched_disk_create_callback(dp, name, unit);
581 	else
582 		dsched_disk_create_callback(dp, raw_ops->head.name, unit);
583 
584 	lwkt_gettoken(&disklist_token);
585 	LIST_INSERT_HEAD(&disklist, dp, d_list);
586 	lwkt_reltoken(&disklist_token);
587 
588 	disk_debug(1, "disk_create (end): %s%d\n",
589 	    (name != NULL)?(name):(raw_ops->head.name), unit);
590 
591 	return (dp->d_rawdev);
592 }
593 
594 int
595 disk_setdisktype(struct disk *disk, const char *type)
596 {
597 	KKASSERT(disk != NULL);
598 
599 	disk->d_disktype = type;
600 	return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type));
601 }
602 
603 static void
604 _setdiskinfo(struct disk *disk, struct disk_info *info)
605 {
606 	char *oldserialno;
607 
608 	oldserialno = disk->d_info.d_serialno;
609 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
610 	info = &disk->d_info;
611 
612 	disk_debug(1,
613 		    "_setdiskinfo: %s\n",
614 			disk->d_cdev->si_name);
615 
616 	/*
617 	 * The serial number is duplicated so the caller can throw
618 	 * their copy away.
619 	 */
620 	if (info->d_serialno && info->d_serialno[0]) {
621 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
622 		disk_cleanserial(info->d_serialno);
623 		if (disk->d_cdev) {
624 			make_dev_alias(disk->d_cdev, "serno/%s",
625 					info->d_serialno);
626 		}
627 	} else {
628 		info->d_serialno = NULL;
629 	}
630 	if (oldserialno)
631 		kfree(oldserialno, M_TEMP);
632 
633 	dsched_disk_update_callback(disk, info);
634 
635 	/*
636 	 * The caller may set d_media_size or d_media_blocks and we
637 	 * calculate the other.
638 	 */
639 	KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
640 	if (info->d_media_size == 0 && info->d_media_blocks) {
641 		info->d_media_size = (u_int64_t)info->d_media_blocks *
642 				     info->d_media_blksize;
643 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
644 		   info->d_media_blksize) {
645 		info->d_media_blocks = info->d_media_size /
646 				       info->d_media_blksize;
647 	}
648 
649 	/*
650 	 * The si_* fields for rawdev are not set until after the
651 	 * disk_create() call, so someone using the cooked version
652 	 * of the raw device (i.e. da0s0) will not get the right
653 	 * si_iosize_max unless we fix it up here.
654 	 */
655 	if (disk->d_cdev && disk->d_rawdev &&
656 	    disk->d_cdev->si_iosize_max == 0) {
657 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
658 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
659 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
660 	}
661 
662 	/* Add the serial number to the udev_dictionary */
663 	if (info->d_serialno)
664 		udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
665 }
666 
667 /*
668  * Disk drivers must call this routine when media parameters are available
669  * or have changed.
670  */
671 void
672 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
673 {
674 	_setdiskinfo(disk, info);
675 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
676 	disk_debug(1,
677 		    "disk_setdiskinfo: sent probe for %s\n",
678 			disk->d_cdev->si_name);
679 }
680 
681 void
682 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
683 {
684 	_setdiskinfo(disk, info);
685 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
686 	disk_debug(1,
687 		    "disk_setdiskinfo_sync: sent probe for %s\n",
688 			disk->d_cdev->si_name);
689 }
690 
691 /*
692  * This routine is called when an adapter detaches.  The higher level
693  * managed disk device is destroyed while the lower level raw device is
694  * released.
695  */
696 void
697 disk_destroy(struct disk *disk)
698 {
699 	dsched_disk_destroy_callback(disk);
700 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
701 	return;
702 }
703 
704 int
705 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize)
706 {
707 	struct partinfo pinfo;
708 	int error;
709 
710 	bzero(&pinfo, sizeof(pinfo));
711 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
712 			   proc0.p_ucred, NULL);
713 	if (error)
714 		return (error);
715 
716 	if (pinfo.media_blksize == 0)
717 		return (ENXIO);
718 
719 	if (blkno) /* XXX: make sure this reserved stuff is right */
720 		*blkno = pinfo.reserved_blocks +
721 			pinfo.media_offset / pinfo.media_blksize;
722 	if (secsize)
723 		*secsize = pinfo.media_blksize;
724 	if (size)
725 		*size = (pinfo.media_blocks - pinfo.reserved_blocks);
726 
727 	return (0);
728 }
729 
730 int
731 disk_dumpconf(cdev_t dev, u_int onoff)
732 {
733 	struct dumperinfo di;
734 	u_int64_t	size, blkno;
735 	u_int32_t	secsize;
736 	int error;
737 
738 	if (!onoff)
739 		return set_dumper(NULL);
740 
741 	error = disk_dumpcheck(dev, &size, &blkno, &secsize);
742 
743 	if (error)
744 		return ENXIO;
745 
746 	bzero(&di, sizeof(struct dumperinfo));
747 	di.dumper = diskdump;
748 	di.priv = dev;
749 	di.blocksize = secsize;
750 	di.mediaoffset = blkno * DEV_BSIZE;
751 	di.mediasize = size * DEV_BSIZE;
752 
753 	return set_dumper(&di);
754 }
755 
756 void
757 disk_unprobe(struct disk *disk)
758 {
759 	if (disk == NULL)
760 		return;
761 
762 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
763 }
764 
765 void
766 disk_invalidate (struct disk *disk)
767 {
768 	dsgone(&disk->d_slice);
769 }
770 
771 struct disk *
772 disk_enumerate(struct disk *disk)
773 {
774 	struct disk *dp;
775 
776 	lwkt_gettoken(&disklist_token);
777 	if (!disk)
778 		dp = (LIST_FIRST(&disklist));
779 	else
780 		dp = (LIST_NEXT(disk, d_list));
781 	lwkt_reltoken(&disklist_token);
782 
783 	return dp;
784 }
785 
786 static
787 int
788 sysctl_disks(SYSCTL_HANDLER_ARGS)
789 {
790 	struct disk *disk;
791 	int error, first;
792 
793 	disk = NULL;
794 	first = 1;
795 
796 	while ((disk = disk_enumerate(disk))) {
797 		if (!first) {
798 			error = SYSCTL_OUT(req, " ", 1);
799 			if (error)
800 				return error;
801 		} else {
802 			first = 0;
803 		}
804 		error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
805 				   strlen(disk->d_rawdev->si_name));
806 		if (error)
807 			return error;
808 	}
809 	error = SYSCTL_OUT(req, "", 1);
810 	return error;
811 }
812 
813 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
814     sysctl_disks, "A", "names of available disks");
815 
816 /*
817  * Open a disk device or partition.
818  */
819 static
820 int
821 diskopen(struct dev_open_args *ap)
822 {
823 	cdev_t dev = ap->a_head.a_dev;
824 	struct disk *dp;
825 	int error;
826 
827 	/*
828 	 * dp can't be NULL here XXX.
829 	 *
830 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
831 	 * setdiskinfo() is typically called whether the disk is present
832 	 * or not (e.g. CD), but the base disk device is created first
833 	 * and there may be a race.
834 	 */
835 	dp = dev->si_disk;
836 	if (dp == NULL || dp->d_slice == NULL)
837 		return (ENXIO);
838 	error = 0;
839 
840 	/*
841 	 * Deal with open races
842 	 */
843 	get_mplock();
844 	while (dp->d_flags & DISKFLAG_LOCK) {
845 		dp->d_flags |= DISKFLAG_WANTED;
846 		error = tsleep(dp, PCATCH, "diskopen", hz);
847 		if (error) {
848 			rel_mplock();
849 			return (error);
850 		}
851 	}
852 	dp->d_flags |= DISKFLAG_LOCK;
853 
854 	/*
855 	 * Open the underlying raw device.
856 	 */
857 	if (!dsisopen(dp->d_slice)) {
858 #if 0
859 		if (!pdev->si_iosize_max)
860 			pdev->si_iosize_max = dev->si_iosize_max;
861 #endif
862 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
863 				  ap->a_devtype, ap->a_cred);
864 	}
865 #if 0
866 	/*
867 	 * Inherit properties from the underlying device now that it is
868 	 * open.
869 	 */
870 	dev_dclone(dev);
871 #endif
872 
873 	if (error)
874 		goto out;
875 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
876 		       &dp->d_slice, &dp->d_info);
877 	if (!dsisopen(dp->d_slice)) {
878 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
879 	}
880 out:
881 	dp->d_flags &= ~DISKFLAG_LOCK;
882 	if (dp->d_flags & DISKFLAG_WANTED) {
883 		dp->d_flags &= ~DISKFLAG_WANTED;
884 		wakeup(dp);
885 	}
886 	rel_mplock();
887 
888 	return(error);
889 }
890 
891 /*
892  * Close a disk device or partition
893  */
894 static
895 int
896 diskclose(struct dev_close_args *ap)
897 {
898 	cdev_t dev = ap->a_head.a_dev;
899 	struct disk *dp;
900 	int error;
901 
902 	error = 0;
903 	dp = dev->si_disk;
904 
905 	get_mplock();
906 	dsclose(dev, ap->a_devtype, dp->d_slice);
907 	if (!dsisopen(dp->d_slice)) {
908 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
909 	}
910 	rel_mplock();
911 	return (error);
912 }
913 
914 /*
915  * First execute the ioctl on the disk device, and if it isn't supported
916  * try running it on the backing device.
917  */
918 static
919 int
920 diskioctl(struct dev_ioctl_args *ap)
921 {
922 	cdev_t dev = ap->a_head.a_dev;
923 	struct disk *dp;
924 	int error;
925 	u_int u;
926 
927 	dp = dev->si_disk;
928 	if (dp == NULL)
929 		return (ENXIO);
930 
931 	devfs_debug(DEVFS_DEBUG_DEBUG,
932 		    "diskioctl: cmd is: %lx (name: %s)\n",
933 		    ap->a_cmd, dev->si_name);
934 	devfs_debug(DEVFS_DEBUG_DEBUG,
935 		    "diskioctl: &dp->d_slice is: %p, %p\n",
936 		    &dp->d_slice, dp->d_slice);
937 
938 	if (ap->a_cmd == DIOCGKERNELDUMP) {
939 		u = *(u_int *)ap->a_data;
940 		return disk_dumpconf(dev, u);
941 	}
942 
943 	if (&dp->d_slice == NULL || dp->d_slice == NULL) {
944 		error = ENOIOCTL;
945 	} else {
946 		get_mplock();
947 		error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
948 				&dp->d_slice, &dp->d_info);
949 		rel_mplock();
950 	}
951 
952 	if (error == ENOIOCTL) {
953 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
954 				   ap->a_fflag, ap->a_cred, NULL);
955 	}
956 	return (error);
957 }
958 
959 /*
960  * Execute strategy routine
961  */
962 static
963 int
964 diskstrategy(struct dev_strategy_args *ap)
965 {
966 	cdev_t dev = ap->a_head.a_dev;
967 	struct bio *bio = ap->a_bio;
968 	struct bio *nbio;
969 	struct disk *dp;
970 
971 	dp = dev->si_disk;
972 
973 	if (dp == NULL) {
974 		bio->bio_buf->b_error = ENXIO;
975 		bio->bio_buf->b_flags |= B_ERROR;
976 		biodone(bio);
977 		return(0);
978 	}
979 	KKASSERT(dev->si_disk == dp);
980 
981 	/*
982 	 * The dscheck() function will also transform the slice relative
983 	 * block number i.e. bio->bio_offset into a block number that can be
984 	 * passed directly to the underlying raw device.  If dscheck()
985 	 * returns NULL it will have handled the bio for us (e.g. EOF
986 	 * or error due to being beyond the device size).
987 	 */
988 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
989 		dsched_queue(dp, nbio);
990 	} else {
991 		biodone(bio);
992 	}
993 	return(0);
994 }
995 
996 /*
997  * Return the partition size in ?blocks?
998  */
999 static
1000 int
1001 diskpsize(struct dev_psize_args *ap)
1002 {
1003 	cdev_t dev = ap->a_head.a_dev;
1004 	struct disk *dp;
1005 
1006 	dp = dev->si_disk;
1007 	if (dp == NULL)
1008 		return(ENODEV);
1009 	ap->a_result = dssize(dev, &dp->d_slice);
1010 	return(0);
1011 }
1012 
1013 /*
1014  * When new device entries are instantiated, make sure they inherit our
1015  * si_disk structure and block and iosize limits from the raw device.
1016  *
1017  * This routine is always called synchronously in the context of the
1018  * client.
1019  *
1020  * XXX The various io and block size constraints are not always initialized
1021  * properly by devices.
1022  */
1023 static
1024 int
1025 diskclone(struct dev_clone_args *ap)
1026 {
1027 	cdev_t dev = ap->a_head.a_dev;
1028 	struct disk *dp;
1029 	dp = dev->si_disk;
1030 
1031 	KKASSERT(dp != NULL);
1032 	dev->si_disk = dp;
1033 	dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
1034 	dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
1035 	dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
1036 	return(0);
1037 }
1038 
1039 int
1040 diskdump(struct dev_dump_args *ap)
1041 {
1042 	cdev_t dev = ap->a_head.a_dev;
1043 	struct disk *dp = dev->si_disk;
1044 	u_int64_t size, offset;
1045 	int error;
1046 
1047 	error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1048 	/* XXX: this should probably go in disk_dumpcheck somehow */
1049 	if (ap->a_length != 0) {
1050 		size *= DEV_BSIZE;
1051 		offset = ap->a_blkno * DEV_BSIZE;
1052 		if ((ap->a_offset < offset) ||
1053 		    (ap->a_offset + ap->a_length - offset > size)) {
1054 			kprintf("Attempt to write outside dump device boundaries.\n");
1055 			error = ENOSPC;
1056 		}
1057 	}
1058 
1059 	if (error == 0) {
1060 		ap->a_head.a_dev = dp->d_rawdev;
1061 		error = dev_doperate(&ap->a_head);
1062 	}
1063 
1064 	return(error);
1065 }
1066 
1067 
1068 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1069     0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1070 
1071 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1072     0, sizeof(struct disk), "sizeof(struct disk)");
1073 
1074 /*
1075  * Reorder interval for burst write allowance and minor write
1076  * allowance.
1077  *
1078  * We always want to trickle some writes in to make use of the
1079  * disk's zone cache.  Bursting occurs on a longer interval and only
1080  * runningbufspace is well over the hirunningspace limit.
1081  */
1082 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
1083 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1084 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1085 int bioq_reorder_minor_interval = 5;
1086 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1087 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1088 
1089 int bioq_reorder_burst_bytes = 3000000;
1090 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1091 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1092 int bioq_reorder_minor_bytes = 262144;
1093 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1094 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1095 
1096 
1097 /*
1098  * Order I/Os.  Generally speaking this code is designed to make better
1099  * use of drive zone caches.  A drive zone cache can typically track linear
1100  * reads or writes for around 16 zones simultaniously.
1101  *
1102  * Read prioritization issues:  It is possible for hundreds of megabytes worth
1103  * of writes to be queued asynchronously.  This creates a huge bottleneck
1104  * for reads which reduce read bandwidth to a trickle.
1105  *
1106  * To solve this problem we generally reorder reads before writes.
1107  *
1108  * However, a large number of random reads can also starve writes and
1109  * make poor use of the drive zone cache so we allow writes to trickle
1110  * in every N reads.
1111  */
1112 void
1113 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1114 {
1115 	/*
1116 	 * The BIO wants to be ordered.  Adding to the tail also
1117 	 * causes transition to be set to NULL, forcing the ordering
1118 	 * of all prior I/O's.
1119 	 */
1120 	if (bio->bio_buf->b_flags & B_ORDERED) {
1121 		bioq_insert_tail(bioq, bio);
1122 		return;
1123 	}
1124 
1125 	switch(bio->bio_buf->b_cmd) {
1126 	case BUF_CMD_READ:
1127 		if (bioq->transition) {
1128 			/*
1129 			 * Insert before the first write.  Bleedover writes
1130 			 * based on reorder intervals to prevent starvation.
1131 			 */
1132 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1133 			++bioq->reorder;
1134 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1135 				bioqwritereorder(bioq);
1136 				if (bioq->reorder >=
1137 				    bioq_reorder_burst_interval) {
1138 					bioq->reorder = 0;
1139 				}
1140 			}
1141 		} else {
1142 			/*
1143 			 * No writes queued (or ordering was forced),
1144 			 * insert at tail.
1145 			 */
1146 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1147 		}
1148 		break;
1149 	case BUF_CMD_WRITE:
1150 		/*
1151 		 * Writes are always appended.  If no writes were previously
1152 		 * queued or an ordered tail insertion occured the transition
1153 		 * field will be NULL.
1154 		 */
1155 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1156 		if (bioq->transition == NULL)
1157 			bioq->transition = bio;
1158 		break;
1159 	default:
1160 		/*
1161 		 * All other request types are forced to be ordered.
1162 		 */
1163 		bioq_insert_tail(bioq, bio);
1164 		break;
1165 	}
1166 }
1167 
1168 /*
1169  * Move the read-write transition point to prevent reads from
1170  * completely starving our writes.  This brings a number of writes into
1171  * the fold every N reads.
1172  *
1173  * We bring a few linear writes into the fold on a minor interval
1174  * and we bring a non-linear burst of writes into the fold on a major
1175  * interval.  Bursting only occurs if runningbufspace is really high
1176  * (typically from syncs, fsyncs, or HAMMER flushes).
1177  */
1178 static
1179 void
1180 bioqwritereorder(struct bio_queue_head *bioq)
1181 {
1182 	struct bio *bio;
1183 	off_t next_offset;
1184 	size_t left;
1185 	size_t n;
1186 	int check_off;
1187 
1188 	if (bioq->reorder < bioq_reorder_burst_interval ||
1189 	    !buf_runningbufspace_severe()) {
1190 		left = (size_t)bioq_reorder_minor_bytes;
1191 		check_off = 1;
1192 	} else {
1193 		left = (size_t)bioq_reorder_burst_bytes;
1194 		check_off = 0;
1195 	}
1196 
1197 	next_offset = bioq->transition->bio_offset;
1198 	while ((bio = bioq->transition) != NULL &&
1199 	       (check_off == 0 || next_offset == bio->bio_offset)
1200 	) {
1201 		n = bio->bio_buf->b_bcount;
1202 		next_offset = bio->bio_offset + n;
1203 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1204 		if (left < n)
1205 			break;
1206 		left -= n;
1207 	}
1208 }
1209 
1210 /*
1211  * Bounds checking against the media size, used for the raw partition.
1212  * secsize, mediasize and b_blkno must all be the same units.
1213  * Possibly this has to be DEV_BSIZE (512).
1214  */
1215 int
1216 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1217 {
1218 	struct buf *bp = bio->bio_buf;
1219 	int64_t sz;
1220 
1221 	sz = howmany(bp->b_bcount, secsize);
1222 
1223 	if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1224 		sz = mediasize - bio->bio_offset/DEV_BSIZE;
1225 		if (sz == 0) {
1226 			/* If exactly at end of disk, return EOF. */
1227 			bp->b_resid = bp->b_bcount;
1228 			return 0;
1229 		}
1230 		if (sz < 0) {
1231 			/* If past end of disk, return EINVAL. */
1232 			bp->b_error = EINVAL;
1233 			return 0;
1234 		}
1235 		/* Otherwise, truncate request. */
1236 		bp->b_bcount = sz * secsize;
1237 	}
1238 
1239 	return 1;
1240 }
1241 
1242 /*
1243  * Disk error is the preface to plaintive error messages
1244  * about failing disk transfers.  It prints messages of the form
1245 
1246 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1247 
1248  * if the offset of the error in the transfer and a disk label
1249  * are both available.  blkdone should be -1 if the position of the error
1250  * is unknown; the disklabel pointer may be null from drivers that have not
1251  * been converted to use them.  The message is printed with kprintf
1252  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1253  * The message should be completed (with at least a newline) with kprintf
1254  * or log(-1, ...), respectively.  There is no trailing space.
1255  */
1256 void
1257 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1258 {
1259 	struct buf *bp = bio->bio_buf;
1260 	const char *term;
1261 
1262 	switch(bp->b_cmd) {
1263 	case BUF_CMD_READ:
1264 		term = "read";
1265 		break;
1266 	case BUF_CMD_WRITE:
1267 		term = "write";
1268 		break;
1269 	default:
1270 		term = "access";
1271 		break;
1272 	}
1273 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1274 	kprintf("offset %012llx for %d",
1275 		(long long)bio->bio_offset,
1276 		bp->b_bcount);
1277 
1278 	if (donecnt)
1279 		kprintf(" (%d bytes completed)", donecnt);
1280 }
1281 
1282 /*
1283  * Locate a disk device
1284  */
1285 cdev_t
1286 disk_locate(const char *devname)
1287 {
1288 	return devfs_find_device_by_name(devname);
1289 }
1290 
1291 void
1292 disk_config(void *arg)
1293 {
1294 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1295 }
1296 
1297 static void
1298 disk_init(void)
1299 {
1300 	struct thread* td_core;
1301 
1302 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1303 					 NULL, NULL, NULL,
1304 					 objcache_malloc_alloc,
1305 					 objcache_malloc_free,
1306 					 &disk_msg_malloc_args);
1307 
1308 	lwkt_token_init(&disklist_token, 1, "disks");
1309 
1310 	/*
1311 	 * Initialize the reply-only port which acts as a message drain
1312 	 */
1313 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1314 
1315 	lwkt_gettoken(&disklist_token);
1316 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1317 		    0, 0, "disk_msg_core");
1318 	tsleep(td_core, 0, "diskcore", 0);
1319 	lwkt_reltoken(&disklist_token);
1320 }
1321 
1322 static void
1323 disk_uninit(void)
1324 {
1325 	objcache_destroy(disk_msg_cache);
1326 }
1327 
1328 /*
1329  * Clean out illegal characters in serial numbers.
1330  */
1331 static void
1332 disk_cleanserial(char *serno)
1333 {
1334 	char c;
1335 
1336 	while ((c = *serno) != 0) {
1337 		if (c >= 'a' && c <= 'z')
1338 			;
1339 		else if (c >= 'A' && c <= 'Z')
1340 			;
1341 		else if (c >= '0' && c <= '9')
1342 			;
1343 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1344 			;
1345 		else
1346 			c = '_';
1347 		*serno++= c;
1348 	}
1349 }
1350 
1351 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1352 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1353 		0, "Enable subr_disk debugging");
1354 
1355 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1356 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1357