xref: /dflybsd-src/sys/kern/subr_disk.c (revision 2a53016d85f1096c9234a62db3a55aebc5227f1c)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * and Alex Hornung <ahornung@gmail.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * ----------------------------------------------------------------------------
36  * "THE BEER-WARE LICENSE" (Revision 42):
37  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
38  * can do whatever you want with this stuff. If we meet some day, and you think
39  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
40  * ----------------------------------------------------------------------------
41  *
42  * Copyright (c) 1982, 1986, 1988, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. All advertising materials mentioning features or use of this software
59  *    must display the following acknowledgement:
60  *	This product includes software developed by the University of
61  *	California, Berkeley and its contributors.
62  * 4. Neither the name of the University nor the names of its contributors
63  *    may be used to endorse or promote products derived from this software
64  *    without specific prior written permission.
65  *
66  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
70  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76  * SUCH DAMAGE.
77  *
78  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
79  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
80  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/proc.h>
87 #include <sys/sysctl.h>
88 #include <sys/buf.h>
89 #include <sys/conf.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
95 #include <sys/disk.h>
96 #include <sys/kerneldump.h>
97 #include <sys/malloc.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/devfs.h>
104 #include <sys/thread.h>
105 #include <sys/dsched.h>
106 #include <sys/queue.h>
107 #include <sys/lock.h>
108 #include <sys/udev.h>
109 #include <sys/uuid.h>
110 
111 #include <sys/buf2.h>
112 #include <sys/mplock2.h>
113 #include <sys/msgport2.h>
114 #include <sys/thread2.h>
115 
116 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
117 static int disk_debug_enable = 0;
118 
119 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
120 static void disk_msg_core(void *);
121 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
122 static void disk_probe(struct disk *dp, int reprobe);
123 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
124 static void bioqwritereorder(struct bio_queue_head *bioq);
125 static void disk_cleanserial(char *serno);
126 static int disk_debug(int, char *, ...) __printflike(2, 3);
127 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
128     struct dev_ops *raw_ops, int clone);
129 
130 static d_open_t diskopen;
131 static d_close_t diskclose;
132 static d_ioctl_t diskioctl;
133 static d_strategy_t diskstrategy;
134 static d_psize_t diskpsize;
135 static d_dump_t diskdump;
136 
137 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
138 static struct lwkt_token disklist_token;
139 
140 static struct dev_ops disk_ops = {
141 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE },
142 	.d_open = diskopen,
143 	.d_close = diskclose,
144 	.d_read = physread,
145 	.d_write = physwrite,
146 	.d_ioctl = diskioctl,
147 	.d_strategy = diskstrategy,
148 	.d_dump = diskdump,
149 	.d_psize = diskpsize,
150 };
151 
152 static struct objcache 	*disk_msg_cache;
153 
154 struct objcache_malloc_args disk_msg_malloc_args = {
155 	sizeof(struct disk_msg), M_DISK };
156 
157 static struct lwkt_port disk_dispose_port;
158 static struct lwkt_port disk_msg_port;
159 
160 static int
161 disk_debug(int level, char *fmt, ...)
162 {
163 	__va_list ap;
164 
165 	__va_start(ap, fmt);
166 	if (level <= disk_debug_enable)
167 		kvprintf(fmt, ap);
168 	__va_end(ap);
169 
170 	return 0;
171 }
172 
173 static int
174 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
175 {
176 	struct disk_info *info = &dp->d_info;
177 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
178 	disklabel_ops_t ops;
179 	struct partinfo part;
180 	const char *msg;
181 	char uuid_buf[128];
182 	cdev_t ndev;
183 	int sno;
184 	u_int i;
185 
186 	disk_debug(2,
187 		    "disk_probe_slice (begin): %s (%s)\n",
188 			dev->si_name, dp->d_cdev->si_name);
189 
190 	sno = slice ? slice - 1 : 0;
191 
192 	ops = &disklabel32_ops;
193 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
194 	if (msg && !strcmp(msg, "no disk label")) {
195 		ops = &disklabel64_ops;
196 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
197 	}
198 	if (msg == NULL) {
199 		if (slice != WHOLE_DISK_SLICE)
200 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
201 		else
202 			sp->ds_reserved = 0;
203 
204 		sp->ds_ops = ops;
205 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
206 			ops->op_loadpartinfo(sp->ds_label, i, &part);
207 			if (part.fstype) {
208 				if (reprobe &&
209 				    (ndev = devfs_find_device_by_name("%s%c",
210 						dev->si_name, 'a' + i))
211 				) {
212 					/*
213 					 * Device already exists and
214 					 * is still valid.
215 					 */
216 					ndev->si_flags |= SI_REPROBE_TEST;
217 
218 					/*
219 					 * Destroy old UUID alias
220 					 */
221 					destroy_dev_alias(ndev, "part-by-uuid/*");
222 
223 					/* Create UUID alias */
224 					if (!kuuid_is_nil(&part.storage_uuid)) {
225 						snprintf_uuid(uuid_buf,
226 						    sizeof(uuid_buf),
227 						    &part.storage_uuid);
228 						make_dev_alias(ndev,
229 						    "part-by-uuid/%s",
230 						    uuid_buf);
231 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
232 					}
233 				} else {
234 					ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
235 						dkmakeminor(dkunit(dp->d_cdev),
236 							    slice, i),
237 						UID_ROOT, GID_OPERATOR, 0640,
238 						"%s%c", dev->si_name, 'a'+ i);
239 					ndev->si_parent = dev;
240 					ndev->si_disk = dp;
241 					udev_dict_set_cstr(ndev, "subsystem", "disk");
242 					/* Inherit parent's disk type */
243 					if (dp->d_disktype) {
244 						udev_dict_set_cstr(ndev, "disk-type",
245 						    __DECONST(char *, dp->d_disktype));
246 					}
247 
248 					/* Create serno alias */
249 					if (dp->d_info.d_serialno) {
250 						make_dev_alias(ndev,
251 						    "serno/%s.s%d%c",
252 						    dp->d_info.d_serialno,
253 						    sno, 'a' + i);
254 					}
255 
256 					/* Create UUID alias */
257 					if (!kuuid_is_nil(&part.storage_uuid)) {
258 						snprintf_uuid(uuid_buf,
259 						    sizeof(uuid_buf),
260 						    &part.storage_uuid);
261 						make_dev_alias(ndev,
262 						    "part-by-uuid/%s",
263 						    uuid_buf);
264 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
265 					}
266 					ndev->si_flags |= SI_REPROBE_TEST;
267 				}
268 			}
269 		}
270 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
271 		msg = NULL;
272 		if (sp->ds_size >= 0x100000000ULL)
273 			ops = &disklabel64_ops;
274 		else
275 			ops = &disklabel32_ops;
276 		sp->ds_label = ops->op_clone_label(info, sp);
277 	} else {
278 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
279 		    sp->ds_type == DOSPTYP_NETBSD ||
280 		    sp->ds_type == DOSPTYP_OPENBSD) {
281 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
282 			    dev->si_name, msg);
283 		}
284 	}
285 
286 	if (msg == NULL) {
287 		sp->ds_wlabel = FALSE;
288 	}
289 
290 	return (msg ? EINVAL : 0);
291 }
292 
293 /*
294  * This routine is only called for newly minted drives or to reprobe
295  * a drive with no open slices.  disk_probe_slice() is called directly
296  * when reprobing partition changes within slices.
297  */
298 static void
299 disk_probe(struct disk *dp, int reprobe)
300 {
301 	struct disk_info *info = &dp->d_info;
302 	cdev_t dev = dp->d_cdev;
303 	cdev_t ndev;
304 	int error, i, sno;
305 	struct diskslices *osp;
306 	struct diskslice *sp;
307 	char uuid_buf[128];
308 
309 	KKASSERT (info->d_media_blksize != 0);
310 
311 	osp = dp->d_slice;
312 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
313 	disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
314 
315 	error = mbrinit(dev, info, &(dp->d_slice));
316 	if (error) {
317 		dsgone(&osp);
318 		return;
319 	}
320 
321 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
322 		/*
323 		 * Ignore the whole-disk slice, it has already been created.
324 		 */
325 		if (i == WHOLE_DISK_SLICE)
326 			continue;
327 
328 #if 1
329 		/*
330 		 * Ignore the compatibility slice s0 if it's a device mapper
331 		 * volume.
332 		 */
333 		if ((i == COMPATIBILITY_SLICE) &&
334 		    (info->d_dsflags & DSO_DEVICEMAPPER))
335 			continue;
336 #endif
337 
338 		sp = &dp->d_slice->dss_slices[i];
339 
340 		/*
341 		 * Handle s0.  s0 is a compatibility slice if there are no
342 		 * other slices and it has not otherwise been set up, else
343 		 * we ignore it.
344 		 */
345 		if (i == COMPATIBILITY_SLICE) {
346 			sno = 0;
347 			if (sp->ds_type == 0 &&
348 			    dp->d_slice->dss_nslices == BASE_SLICE) {
349 				sp->ds_size = info->d_media_blocks;
350 				sp->ds_reserved = 0;
351 			}
352 		} else {
353 			sno = i - 1;
354 			sp->ds_reserved = 0;
355 		}
356 
357 		/*
358 		 * Ignore 0-length slices
359 		 */
360 		if (sp->ds_size == 0)
361 			continue;
362 
363 		if (reprobe &&
364 		    (ndev = devfs_find_device_by_name("%ss%d",
365 						      dev->si_name, sno))) {
366 			/*
367 			 * Device already exists and is still valid
368 			 */
369 			ndev->si_flags |= SI_REPROBE_TEST;
370 
371 			/*
372 			 * Destroy old UUID alias
373 			 */
374 			destroy_dev_alias(ndev, "slice-by-uuid/*");
375 
376 			/* Create UUID alias */
377 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
378 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
379 				    &sp->ds_stor_uuid);
380 				make_dev_alias(ndev, "slice-by-uuid/%s",
381 				    uuid_buf);
382 			}
383 		} else {
384 			/*
385 			 * Else create new device
386 			 */
387 			ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
388 					dkmakewholeslice(dkunit(dev), i),
389 					UID_ROOT, GID_OPERATOR, 0640,
390 					(info->d_dsflags & DSO_DEVICEMAPPER)?
391 					"%s.s%d" : "%ss%d", dev->si_name, sno);
392 			ndev->si_parent = dev;
393 			udev_dict_set_cstr(ndev, "subsystem", "disk");
394 			/* Inherit parent's disk type */
395 			if (dp->d_disktype) {
396 				udev_dict_set_cstr(ndev, "disk-type",
397 				    __DECONST(char *, dp->d_disktype));
398 			}
399 
400 			/* Create serno alias */
401 			if (dp->d_info.d_serialno) {
402 				make_dev_alias(ndev, "serno/%s.s%d",
403 					       dp->d_info.d_serialno, sno);
404 			}
405 
406 			/* Create UUID alias */
407 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
408 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
409 				    &sp->ds_stor_uuid);
410 				make_dev_alias(ndev, "slice-by-uuid/%s",
411 				    uuid_buf);
412 			}
413 
414 			ndev->si_disk = dp;
415 			ndev->si_flags |= SI_REPROBE_TEST;
416 		}
417 		sp->ds_dev = ndev;
418 
419 		/*
420 		 * Probe appropriate slices for a disklabel
421 		 *
422 		 * XXX slice type 1 used by our gpt probe code.
423 		 * XXX slice type 0 used by mbr compat slice.
424 		 */
425 		if (sp->ds_type == DOSPTYP_386BSD ||
426 		    sp->ds_type == DOSPTYP_NETBSD ||
427 		    sp->ds_type == DOSPTYP_OPENBSD ||
428 		    sp->ds_type == 0 ||
429 		    sp->ds_type == 1) {
430 			if (dp->d_slice->dss_first_bsd_slice == 0)
431 				dp->d_slice->dss_first_bsd_slice = i;
432 			disk_probe_slice(dp, ndev, i, reprobe);
433 		}
434 	}
435 	dsgone(&osp);
436 	disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
437 }
438 
439 
440 static void
441 disk_msg_core(void *arg)
442 {
443 	struct disk	*dp;
444 	struct diskslice *sp;
445 	disk_msg_t msg;
446 	int run;
447 
448 	lwkt_gettoken(&disklist_token);
449 	lwkt_initport_thread(&disk_msg_port, curthread);
450 	wakeup(curthread);	/* synchronous startup */
451 	lwkt_reltoken(&disklist_token);
452 
453 	get_mplock();	/* not mpsafe yet? */
454 	run = 1;
455 
456 	while (run) {
457 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
458 
459 		switch (msg->hdr.u.ms_result) {
460 		case DISK_DISK_PROBE:
461 			dp = (struct disk *)msg->load;
462 			disk_debug(1,
463 				    "DISK_DISK_PROBE: %s\n",
464 					dp->d_cdev->si_name);
465 			disk_probe(dp, 0);
466 			break;
467 		case DISK_DISK_DESTROY:
468 			dp = (struct disk *)msg->load;
469 			disk_debug(1,
470 				    "DISK_DISK_DESTROY: %s\n",
471 					dp->d_cdev->si_name);
472 			devfs_destroy_related(dp->d_cdev);
473 			destroy_dev(dp->d_cdev);
474 			destroy_only_dev(dp->d_rawdev);
475 			lwkt_gettoken(&disklist_token);
476 			LIST_REMOVE(dp, d_list);
477 			lwkt_reltoken(&disklist_token);
478 			if (dp->d_info.d_serialno) {
479 				kfree(dp->d_info.d_serialno, M_TEMP);
480 				dp->d_info.d_serialno = NULL;
481 			}
482 			break;
483 		case DISK_UNPROBE:
484 			dp = (struct disk *)msg->load;
485 			disk_debug(1,
486 				    "DISK_DISK_UNPROBE: %s\n",
487 					dp->d_cdev->si_name);
488 			devfs_destroy_related(dp->d_cdev);
489 			break;
490 		case DISK_SLICE_REPROBE:
491 			dp = (struct disk *)msg->load;
492 			sp = (struct diskslice *)msg->load2;
493 			devfs_clr_related_flag(sp->ds_dev,
494 						SI_REPROBE_TEST);
495 			disk_debug(1,
496 				    "DISK_SLICE_REPROBE: %s\n",
497 				    sp->ds_dev->si_name);
498 			disk_probe_slice(dp, sp->ds_dev,
499 					 dkslice(sp->ds_dev), 1);
500 			devfs_destroy_related_without_flag(
501 					sp->ds_dev, SI_REPROBE_TEST);
502 			break;
503 		case DISK_DISK_REPROBE:
504 			dp = (struct disk *)msg->load;
505 			devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
506 			disk_debug(1,
507 				    "DISK_DISK_REPROBE: %s\n",
508 				    dp->d_cdev->si_name);
509 			disk_probe(dp, 1);
510 			devfs_destroy_related_without_flag(
511 					dp->d_cdev, SI_REPROBE_TEST);
512 			break;
513 		case DISK_SYNC:
514 			disk_debug(1, "DISK_SYNC\n");
515 			break;
516 		default:
517 			devfs_debug(DEVFS_DEBUG_WARNING,
518 				    "disk_msg_core: unknown message "
519 				    "received at core\n");
520 			break;
521 		}
522 		lwkt_replymsg(&msg->hdr, 0);
523 	}
524 	lwkt_exit();
525 }
526 
527 
528 /*
529  * Acts as a message drain. Any message that is replied to here gets
530  * destroyed and the memory freed.
531  */
532 static void
533 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
534 {
535 	objcache_put(disk_msg_cache, msg);
536 }
537 
538 
539 void
540 disk_msg_send(uint32_t cmd, void *load, void *load2)
541 {
542 	disk_msg_t disk_msg;
543 	lwkt_port_t port = &disk_msg_port;
544 
545 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
546 
547 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
548 
549 	disk_msg->hdr.u.ms_result = cmd;
550 	disk_msg->load = load;
551 	disk_msg->load2 = load2;
552 	KKASSERT(port);
553 	lwkt_sendmsg(port, &disk_msg->hdr);
554 }
555 
556 void
557 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
558 {
559 	struct lwkt_port rep_port;
560 	disk_msg_t disk_msg;
561 	lwkt_port_t port;
562 
563 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
564 	port = &disk_msg_port;
565 
566 	/* XXX could probably use curthread's built-in msgport */
567 	lwkt_initport_thread(&rep_port, curthread);
568 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
569 
570 	disk_msg->hdr.u.ms_result = cmd;
571 	disk_msg->load = load;
572 	disk_msg->load2 = load2;
573 
574 	lwkt_sendmsg(port, &disk_msg->hdr);
575 	lwkt_waitmsg(&disk_msg->hdr, 0);
576 	objcache_put(disk_msg_cache, disk_msg);
577 }
578 
579 /*
580  * Create a raw device for the dev_ops template (which is returned).  Also
581  * create a slice and unit managed disk and overload the user visible
582  * device space with it.
583  *
584  * NOTE: The returned raw device is NOT a slice and unit managed device.
585  * It is an actual raw device representing the raw disk as specified by
586  * the passed dev_ops.  The disk layer not only returns such a raw device,
587  * it also uses it internally when passing (modified) commands through.
588  */
589 cdev_t
590 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
591 {
592 	return _disk_create_named(NULL, unit, dp, raw_ops, 0);
593 }
594 
595 cdev_t
596 disk_create_clone(int unit, struct disk *dp, struct dev_ops *raw_ops)
597 {
598 	return _disk_create_named(NULL, unit, dp, raw_ops, 1);
599 }
600 
601 cdev_t
602 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops)
603 {
604 	return _disk_create_named(name, unit, dp, raw_ops, 0);
605 }
606 
607 cdev_t
608 disk_create_named_clone(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops)
609 {
610 	return _disk_create_named(name, unit, dp, raw_ops, 1);
611 }
612 
613 static cdev_t
614 _disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops, int clone)
615 {
616 	cdev_t rawdev;
617 
618 	disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
619 
620 	if (name) {
621 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
622 		    UID_ROOT, GID_OPERATOR, 0640, "%s", name);
623 	} else {
624 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
625 		    UID_ROOT, GID_OPERATOR, 0640,
626 		    "%s%d", raw_ops->head.name, unit);
627 	}
628 
629 	bzero(dp, sizeof(*dp));
630 
631 	dp->d_rawdev = rawdev;
632 	dp->d_raw_ops = raw_ops;
633 	dp->d_dev_ops = &disk_ops;
634 
635 	if (name) {
636 		if (clone) {
637 			dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
638 			    dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640,
639 			    "%s", name);
640 		} else {
641 			dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
642 			    dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640,
643 			    "%s", name);
644 		}
645 	} else {
646 		if (clone) {
647 			dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
648 			    dkmakewholedisk(unit),
649 			    UID_ROOT, GID_OPERATOR, 0640,
650 			    "%s%d", raw_ops->head.name, unit);
651 		} else {
652 			dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
653 			    dkmakewholedisk(unit),
654 			    UID_ROOT, GID_OPERATOR, 0640,
655 			    "%s%d", raw_ops->head.name, unit);
656 		}
657 	}
658 
659 	udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
660 	dp->d_cdev->si_disk = dp;
661 
662 	if (name)
663 		dsched_disk_create_callback(dp, name, unit);
664 	else
665 		dsched_disk_create_callback(dp, raw_ops->head.name, unit);
666 
667 	lwkt_gettoken(&disklist_token);
668 	LIST_INSERT_HEAD(&disklist, dp, d_list);
669 	lwkt_reltoken(&disklist_token);
670 
671 	disk_debug(1, "disk_create (end): %s%d\n",
672 	    (name != NULL)?(name):(raw_ops->head.name), unit);
673 
674 	return (dp->d_rawdev);
675 }
676 
677 int
678 disk_setdisktype(struct disk *disk, const char *type)
679 {
680 	KKASSERT(disk != NULL);
681 
682 	disk->d_disktype = type;
683 	return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type));
684 }
685 
686 int
687 disk_getopencount(struct disk *disk)
688 {
689 	return disk->d_opencount;
690 }
691 
692 static void
693 _setdiskinfo(struct disk *disk, struct disk_info *info)
694 {
695 	char *oldserialno;
696 
697 	oldserialno = disk->d_info.d_serialno;
698 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
699 	info = &disk->d_info;
700 
701 	disk_debug(1,
702 		    "_setdiskinfo: %s\n",
703 			disk->d_cdev->si_name);
704 
705 	/*
706 	 * The serial number is duplicated so the caller can throw
707 	 * their copy away.
708 	 */
709 	if (info->d_serialno && info->d_serialno[0] &&
710 	    (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
711 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
712 		disk_cleanserial(info->d_serialno);
713 		if (disk->d_cdev) {
714 			make_dev_alias(disk->d_cdev, "serno/%s",
715 					info->d_serialno);
716 		}
717 	} else {
718 		info->d_serialno = NULL;
719 	}
720 	if (oldserialno)
721 		kfree(oldserialno, M_TEMP);
722 
723 	dsched_disk_update_callback(disk, info);
724 
725 	/*
726 	 * The caller may set d_media_size or d_media_blocks and we
727 	 * calculate the other.
728 	 */
729 	KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
730 	if (info->d_media_size == 0 && info->d_media_blocks) {
731 		info->d_media_size = (u_int64_t)info->d_media_blocks *
732 				     info->d_media_blksize;
733 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
734 		   info->d_media_blksize) {
735 		info->d_media_blocks = info->d_media_size /
736 				       info->d_media_blksize;
737 	}
738 
739 	/*
740 	 * The si_* fields for rawdev are not set until after the
741 	 * disk_create() call, so someone using the cooked version
742 	 * of the raw device (i.e. da0s0) will not get the right
743 	 * si_iosize_max unless we fix it up here.
744 	 */
745 	if (disk->d_cdev && disk->d_rawdev &&
746 	    disk->d_cdev->si_iosize_max == 0) {
747 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
748 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
749 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
750 	}
751 
752 	/* Add the serial number to the udev_dictionary */
753 	if (info->d_serialno)
754 		udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
755 }
756 
757 /*
758  * Disk drivers must call this routine when media parameters are available
759  * or have changed.
760  */
761 void
762 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
763 {
764 	_setdiskinfo(disk, info);
765 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
766 	disk_debug(1,
767 		    "disk_setdiskinfo: sent probe for %s\n",
768 			disk->d_cdev->si_name);
769 }
770 
771 void
772 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
773 {
774 	_setdiskinfo(disk, info);
775 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
776 	disk_debug(1,
777 		    "disk_setdiskinfo_sync: sent probe for %s\n",
778 			disk->d_cdev->si_name);
779 }
780 
781 /*
782  * This routine is called when an adapter detaches.  The higher level
783  * managed disk device is destroyed while the lower level raw device is
784  * released.
785  */
786 void
787 disk_destroy(struct disk *disk)
788 {
789 	dsched_disk_destroy_callback(disk);
790 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
791 	return;
792 }
793 
794 int
795 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize)
796 {
797 	struct partinfo pinfo;
798 	int error;
799 
800 	bzero(&pinfo, sizeof(pinfo));
801 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
802 			   proc0.p_ucred, NULL);
803 	if (error)
804 		return (error);
805 
806 	if (pinfo.media_blksize == 0)
807 		return (ENXIO);
808 
809 	if (blkno) /* XXX: make sure this reserved stuff is right */
810 		*blkno = pinfo.reserved_blocks +
811 			pinfo.media_offset / pinfo.media_blksize;
812 	if (secsize)
813 		*secsize = pinfo.media_blksize;
814 	if (size)
815 		*size = (pinfo.media_blocks - pinfo.reserved_blocks);
816 
817 	return (0);
818 }
819 
820 int
821 disk_dumpconf(cdev_t dev, u_int onoff)
822 {
823 	struct dumperinfo di;
824 	u_int64_t	size, blkno;
825 	u_int32_t	secsize;
826 	int error;
827 
828 	if (!onoff)
829 		return set_dumper(NULL);
830 
831 	error = disk_dumpcheck(dev, &size, &blkno, &secsize);
832 
833 	if (error)
834 		return ENXIO;
835 
836 	bzero(&di, sizeof(struct dumperinfo));
837 	di.dumper = diskdump;
838 	di.priv = dev;
839 	di.blocksize = secsize;
840 	di.mediaoffset = blkno * DEV_BSIZE;
841 	di.mediasize = size * DEV_BSIZE;
842 
843 	return set_dumper(&di);
844 }
845 
846 void
847 disk_unprobe(struct disk *disk)
848 {
849 	if (disk == NULL)
850 		return;
851 
852 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
853 }
854 
855 void
856 disk_invalidate (struct disk *disk)
857 {
858 	dsgone(&disk->d_slice);
859 }
860 
861 struct disk *
862 disk_enumerate(struct disk *disk)
863 {
864 	struct disk *dp;
865 
866 	lwkt_gettoken(&disklist_token);
867 	if (!disk)
868 		dp = (LIST_FIRST(&disklist));
869 	else
870 		dp = (LIST_NEXT(disk, d_list));
871 	lwkt_reltoken(&disklist_token);
872 
873 	return dp;
874 }
875 
876 static
877 int
878 sysctl_disks(SYSCTL_HANDLER_ARGS)
879 {
880 	struct disk *disk;
881 	int error, first;
882 
883 	disk = NULL;
884 	first = 1;
885 
886 	while ((disk = disk_enumerate(disk))) {
887 		if (!first) {
888 			error = SYSCTL_OUT(req, " ", 1);
889 			if (error)
890 				return error;
891 		} else {
892 			first = 0;
893 		}
894 		error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
895 				   strlen(disk->d_rawdev->si_name));
896 		if (error)
897 			return error;
898 	}
899 	error = SYSCTL_OUT(req, "", 1);
900 	return error;
901 }
902 
903 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
904     sysctl_disks, "A", "names of available disks");
905 
906 /*
907  * Open a disk device or partition.
908  */
909 static
910 int
911 diskopen(struct dev_open_args *ap)
912 {
913 	cdev_t dev = ap->a_head.a_dev;
914 	struct disk *dp;
915 	int error;
916 
917 	/*
918 	 * dp can't be NULL here XXX.
919 	 *
920 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
921 	 * setdiskinfo() is typically called whether the disk is present
922 	 * or not (e.g. CD), but the base disk device is created first
923 	 * and there may be a race.
924 	 */
925 	dp = dev->si_disk;
926 	if (dp == NULL || dp->d_slice == NULL)
927 		return (ENXIO);
928 	error = 0;
929 
930 	/*
931 	 * Deal with open races
932 	 */
933 	get_mplock();
934 	while (dp->d_flags & DISKFLAG_LOCK) {
935 		dp->d_flags |= DISKFLAG_WANTED;
936 		error = tsleep(dp, PCATCH, "diskopen", hz);
937 		if (error) {
938 			rel_mplock();
939 			return (error);
940 		}
941 	}
942 	dp->d_flags |= DISKFLAG_LOCK;
943 
944 	/*
945 	 * Open the underlying raw device.
946 	 */
947 	if (!dsisopen(dp->d_slice)) {
948 #if 0
949 		if (!pdev->si_iosize_max)
950 			pdev->si_iosize_max = dev->si_iosize_max;
951 #endif
952 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
953 				  ap->a_devtype, ap->a_cred);
954 	}
955 
956 	if (error)
957 		goto out;
958 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
959 		       &dp->d_slice, &dp->d_info);
960 	if (!dsisopen(dp->d_slice)) {
961 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
962 	}
963 out:
964 	dp->d_flags &= ~DISKFLAG_LOCK;
965 	if (dp->d_flags & DISKFLAG_WANTED) {
966 		dp->d_flags &= ~DISKFLAG_WANTED;
967 		wakeup(dp);
968 	}
969 	rel_mplock();
970 
971 	KKASSERT(dp->d_opencount >= 0);
972 	/* If the open was successful, bump open count */
973 	if (error == 0)
974 		atomic_add_int(&dp->d_opencount, 1);
975 
976 	return(error);
977 }
978 
979 /*
980  * Close a disk device or partition
981  */
982 static
983 int
984 diskclose(struct dev_close_args *ap)
985 {
986 	cdev_t dev = ap->a_head.a_dev;
987 	struct disk *dp;
988 	int error;
989 	int lcount;
990 
991 	error = 0;
992 	dp = dev->si_disk;
993 
994 	/*
995 	 * The cdev_t represents the disk/slice/part.  The shared
996 	 * dp structure governs all cdevs associated with the disk.
997 	 *
998 	 * As a safety only close the underlying raw device on the last
999 	 * close the disk device if our tracking of the slices/partitions
1000 	 * also indicates nothing is open.
1001 	 */
1002 	KKASSERT(dp->d_opencount >= 1);
1003 	lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1004 
1005 	get_mplock();
1006 	dsclose(dev, ap->a_devtype, dp->d_slice);
1007 	if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1008 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
1009 	}
1010 	rel_mplock();
1011 	return (error);
1012 }
1013 
1014 /*
1015  * First execute the ioctl on the disk device, and if it isn't supported
1016  * try running it on the backing device.
1017  */
1018 static
1019 int
1020 diskioctl(struct dev_ioctl_args *ap)
1021 {
1022 	cdev_t dev = ap->a_head.a_dev;
1023 	struct disk *dp;
1024 	int error;
1025 	u_int u;
1026 
1027 	dp = dev->si_disk;
1028 	if (dp == NULL)
1029 		return (ENXIO);
1030 
1031 	devfs_debug(DEVFS_DEBUG_DEBUG,
1032 		    "diskioctl: cmd is: %lx (name: %s)\n",
1033 		    ap->a_cmd, dev->si_name);
1034 	devfs_debug(DEVFS_DEBUG_DEBUG,
1035 		    "diskioctl: &dp->d_slice is: %p, %p\n",
1036 		    &dp->d_slice, dp->d_slice);
1037 
1038 	if (ap->a_cmd == DIOCGKERNELDUMP) {
1039 		u = *(u_int *)ap->a_data;
1040 		return disk_dumpconf(dev, u);
1041 	}
1042 
1043 	if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1044 	    ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1045 	     dkslice(dev) == WHOLE_DISK_SLICE)) {
1046 		error = ENOIOCTL;
1047 	} else {
1048 		get_mplock();
1049 		error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1050 				&dp->d_slice, &dp->d_info);
1051 		rel_mplock();
1052 	}
1053 
1054 	if (error == ENOIOCTL) {
1055 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1056 				   ap->a_fflag, ap->a_cred, NULL);
1057 	}
1058 	return (error);
1059 }
1060 
1061 /*
1062  * Execute strategy routine
1063  */
1064 static
1065 int
1066 diskstrategy(struct dev_strategy_args *ap)
1067 {
1068 	cdev_t dev = ap->a_head.a_dev;
1069 	struct bio *bio = ap->a_bio;
1070 	struct bio *nbio;
1071 	struct disk *dp;
1072 
1073 	dp = dev->si_disk;
1074 
1075 	if (dp == NULL) {
1076 		bio->bio_buf->b_error = ENXIO;
1077 		bio->bio_buf->b_flags |= B_ERROR;
1078 		biodone(bio);
1079 		return(0);
1080 	}
1081 	KKASSERT(dev->si_disk == dp);
1082 
1083 	/*
1084 	 * The dscheck() function will also transform the slice relative
1085 	 * block number i.e. bio->bio_offset into a block number that can be
1086 	 * passed directly to the underlying raw device.  If dscheck()
1087 	 * returns NULL it will have handled the bio for us (e.g. EOF
1088 	 * or error due to being beyond the device size).
1089 	 */
1090 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1091 		dsched_queue(dp, nbio);
1092 	} else {
1093 		biodone(bio);
1094 	}
1095 	return(0);
1096 }
1097 
1098 /*
1099  * Return the partition size in ?blocks?
1100  */
1101 static
1102 int
1103 diskpsize(struct dev_psize_args *ap)
1104 {
1105 	cdev_t dev = ap->a_head.a_dev;
1106 	struct disk *dp;
1107 
1108 	dp = dev->si_disk;
1109 	if (dp == NULL)
1110 		return(ENODEV);
1111 
1112 	ap->a_result = dssize(dev, &dp->d_slice);
1113 
1114 	if ((ap->a_result == -1) &&
1115 	   (dp->d_info.d_dsflags & DSO_DEVICEMAPPER)) {
1116 		ap->a_head.a_dev = dp->d_rawdev;
1117 		return dev_doperate(&ap->a_head);
1118 	}
1119 	return(0);
1120 }
1121 
1122 int
1123 diskdump(struct dev_dump_args *ap)
1124 {
1125 	cdev_t dev = ap->a_head.a_dev;
1126 	struct disk *dp = dev->si_disk;
1127 	u_int64_t size, offset;
1128 	int error;
1129 
1130 	error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1131 	/* XXX: this should probably go in disk_dumpcheck somehow */
1132 	if (ap->a_length != 0) {
1133 		size *= DEV_BSIZE;
1134 		offset = ap->a_blkno * DEV_BSIZE;
1135 		if ((ap->a_offset < offset) ||
1136 		    (ap->a_offset + ap->a_length - offset > size)) {
1137 			kprintf("Attempt to write outside dump device boundaries.\n");
1138 			error = ENOSPC;
1139 		}
1140 	}
1141 
1142 	if (error == 0) {
1143 		ap->a_head.a_dev = dp->d_rawdev;
1144 		error = dev_doperate(&ap->a_head);
1145 	}
1146 
1147 	return(error);
1148 }
1149 
1150 
1151 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1152     0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1153 
1154 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1155     0, sizeof(struct disk), "sizeof(struct disk)");
1156 
1157 /*
1158  * Reorder interval for burst write allowance and minor write
1159  * allowance.
1160  *
1161  * We always want to trickle some writes in to make use of the
1162  * disk's zone cache.  Bursting occurs on a longer interval and only
1163  * runningbufspace is well over the hirunningspace limit.
1164  */
1165 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
1166 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1167 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1168 int bioq_reorder_minor_interval = 5;
1169 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1170 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1171 
1172 int bioq_reorder_burst_bytes = 3000000;
1173 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1174 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1175 int bioq_reorder_minor_bytes = 262144;
1176 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1177 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1178 
1179 
1180 /*
1181  * Order I/Os.  Generally speaking this code is designed to make better
1182  * use of drive zone caches.  A drive zone cache can typically track linear
1183  * reads or writes for around 16 zones simultaniously.
1184  *
1185  * Read prioritization issues:  It is possible for hundreds of megabytes worth
1186  * of writes to be queued asynchronously.  This creates a huge bottleneck
1187  * for reads which reduce read bandwidth to a trickle.
1188  *
1189  * To solve this problem we generally reorder reads before writes.
1190  *
1191  * However, a large number of random reads can also starve writes and
1192  * make poor use of the drive zone cache so we allow writes to trickle
1193  * in every N reads.
1194  */
1195 void
1196 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1197 {
1198 	/*
1199 	 * The BIO wants to be ordered.  Adding to the tail also
1200 	 * causes transition to be set to NULL, forcing the ordering
1201 	 * of all prior I/O's.
1202 	 */
1203 	if (bio->bio_buf->b_flags & B_ORDERED) {
1204 		bioq_insert_tail(bioq, bio);
1205 		return;
1206 	}
1207 
1208 	switch(bio->bio_buf->b_cmd) {
1209 	case BUF_CMD_READ:
1210 		if (bioq->transition) {
1211 			/*
1212 			 * Insert before the first write.  Bleedover writes
1213 			 * based on reorder intervals to prevent starvation.
1214 			 */
1215 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1216 			++bioq->reorder;
1217 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1218 				bioqwritereorder(bioq);
1219 				if (bioq->reorder >=
1220 				    bioq_reorder_burst_interval) {
1221 					bioq->reorder = 0;
1222 				}
1223 			}
1224 		} else {
1225 			/*
1226 			 * No writes queued (or ordering was forced),
1227 			 * insert at tail.
1228 			 */
1229 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1230 		}
1231 		break;
1232 	case BUF_CMD_WRITE:
1233 		/*
1234 		 * Writes are always appended.  If no writes were previously
1235 		 * queued or an ordered tail insertion occured the transition
1236 		 * field will be NULL.
1237 		 */
1238 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1239 		if (bioq->transition == NULL)
1240 			bioq->transition = bio;
1241 		break;
1242 	default:
1243 		/*
1244 		 * All other request types are forced to be ordered.
1245 		 */
1246 		bioq_insert_tail(bioq, bio);
1247 		break;
1248 	}
1249 }
1250 
1251 /*
1252  * Move the read-write transition point to prevent reads from
1253  * completely starving our writes.  This brings a number of writes into
1254  * the fold every N reads.
1255  *
1256  * We bring a few linear writes into the fold on a minor interval
1257  * and we bring a non-linear burst of writes into the fold on a major
1258  * interval.  Bursting only occurs if runningbufspace is really high
1259  * (typically from syncs, fsyncs, or HAMMER flushes).
1260  */
1261 static
1262 void
1263 bioqwritereorder(struct bio_queue_head *bioq)
1264 {
1265 	struct bio *bio;
1266 	off_t next_offset;
1267 	size_t left;
1268 	size_t n;
1269 	int check_off;
1270 
1271 	if (bioq->reorder < bioq_reorder_burst_interval ||
1272 	    !buf_runningbufspace_severe()) {
1273 		left = (size_t)bioq_reorder_minor_bytes;
1274 		check_off = 1;
1275 	} else {
1276 		left = (size_t)bioq_reorder_burst_bytes;
1277 		check_off = 0;
1278 	}
1279 
1280 	next_offset = bioq->transition->bio_offset;
1281 	while ((bio = bioq->transition) != NULL &&
1282 	       (check_off == 0 || next_offset == bio->bio_offset)
1283 	) {
1284 		n = bio->bio_buf->b_bcount;
1285 		next_offset = bio->bio_offset + n;
1286 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1287 		if (left < n)
1288 			break;
1289 		left -= n;
1290 	}
1291 }
1292 
1293 /*
1294  * Bounds checking against the media size, used for the raw partition.
1295  * secsize, mediasize and b_blkno must all be the same units.
1296  * Possibly this has to be DEV_BSIZE (512).
1297  */
1298 int
1299 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1300 {
1301 	struct buf *bp = bio->bio_buf;
1302 	int64_t sz;
1303 
1304 	sz = howmany(bp->b_bcount, secsize);
1305 
1306 	if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1307 		sz = mediasize - bio->bio_offset/DEV_BSIZE;
1308 		if (sz == 0) {
1309 			/* If exactly at end of disk, return EOF. */
1310 			bp->b_resid = bp->b_bcount;
1311 			return 0;
1312 		}
1313 		if (sz < 0) {
1314 			/* If past end of disk, return EINVAL. */
1315 			bp->b_error = EINVAL;
1316 			return 0;
1317 		}
1318 		/* Otherwise, truncate request. */
1319 		bp->b_bcount = sz * secsize;
1320 	}
1321 
1322 	return 1;
1323 }
1324 
1325 /*
1326  * Disk error is the preface to plaintive error messages
1327  * about failing disk transfers.  It prints messages of the form
1328 
1329 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1330 
1331  * if the offset of the error in the transfer and a disk label
1332  * are both available.  blkdone should be -1 if the position of the error
1333  * is unknown; the disklabel pointer may be null from drivers that have not
1334  * been converted to use them.  The message is printed with kprintf
1335  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1336  * The message should be completed (with at least a newline) with kprintf
1337  * or log(-1, ...), respectively.  There is no trailing space.
1338  */
1339 void
1340 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1341 {
1342 	struct buf *bp = bio->bio_buf;
1343 	const char *term;
1344 
1345 	switch(bp->b_cmd) {
1346 	case BUF_CMD_READ:
1347 		term = "read";
1348 		break;
1349 	case BUF_CMD_WRITE:
1350 		term = "write";
1351 		break;
1352 	default:
1353 		term = "access";
1354 		break;
1355 	}
1356 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1357 	kprintf("offset %012llx for %d",
1358 		(long long)bio->bio_offset,
1359 		bp->b_bcount);
1360 
1361 	if (donecnt)
1362 		kprintf(" (%d bytes completed)", donecnt);
1363 }
1364 
1365 /*
1366  * Locate a disk device
1367  */
1368 cdev_t
1369 disk_locate(const char *devname)
1370 {
1371 	return devfs_find_device_by_name(devname);
1372 }
1373 
1374 void
1375 disk_config(void *arg)
1376 {
1377 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1378 }
1379 
1380 static void
1381 disk_init(void)
1382 {
1383 	struct thread* td_core;
1384 
1385 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1386 					 NULL, NULL, NULL,
1387 					 objcache_malloc_alloc,
1388 					 objcache_malloc_free,
1389 					 &disk_msg_malloc_args);
1390 
1391 	lwkt_token_init(&disklist_token, "disks");
1392 
1393 	/*
1394 	 * Initialize the reply-only port which acts as a message drain
1395 	 */
1396 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1397 
1398 	lwkt_gettoken(&disklist_token);
1399 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1400 		    0, -1, "disk_msg_core");
1401 	tsleep(td_core, 0, "diskcore", 0);
1402 	lwkt_reltoken(&disklist_token);
1403 }
1404 
1405 static void
1406 disk_uninit(void)
1407 {
1408 	objcache_destroy(disk_msg_cache);
1409 }
1410 
1411 /*
1412  * Clean out illegal characters in serial numbers.
1413  */
1414 static void
1415 disk_cleanserial(char *serno)
1416 {
1417 	char c;
1418 
1419 	while ((c = *serno) != 0) {
1420 		if (c >= 'a' && c <= 'z')
1421 			;
1422 		else if (c >= 'A' && c <= 'Z')
1423 			;
1424 		else if (c >= '0' && c <= '9')
1425 			;
1426 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1427 			;
1428 		else
1429 			c = '_';
1430 		*serno++= c;
1431 	}
1432 }
1433 
1434 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1435 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1436 		0, "Enable subr_disk debugging");
1437 
1438 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1439 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1440