xref: /dflybsd-src/sys/kern/subr_disk.c (revision 04b7d74f5ed671fa01e7589d92dc802131ea95af)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * and Alex Hornung <ahornung@gmail.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * ----------------------------------------------------------------------------
36  * "THE BEER-WARE LICENSE" (Revision 42):
37  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
38  * can do whatever you want with this stuff. If we meet some day, and you think
39  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
40  * ----------------------------------------------------------------------------
41  *
42  * Copyright (c) 1982, 1986, 1988, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
75  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
76  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/buf.h>
85 #include <sys/conf.h>
86 #include <sys/disklabel.h>
87 #include <sys/disklabel32.h>
88 #include <sys/disklabel64.h>
89 #include <sys/diskslice.h>
90 #include <sys/diskmbr.h>
91 #include <sys/disk.h>
92 #include <sys/kerneldump.h>
93 #include <sys/malloc.h>
94 #include <machine/md_var.h>
95 #include <sys/ctype.h>
96 #include <sys/syslog.h>
97 #include <sys/device.h>
98 #include <sys/msgport.h>
99 #include <sys/devfs.h>
100 #include <sys/thread.h>
101 #include <sys/dsched.h>
102 #include <sys/queue.h>
103 #include <sys/lock.h>
104 #include <sys/udev.h>
105 #include <sys/uuid.h>
106 
107 #include <sys/buf2.h>
108 #include <sys/msgport2.h>
109 #include <sys/thread2.h>
110 
111 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
112 static int disk_debug_enable = 0;
113 
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
119 static void bioqwritereorder(struct bio_queue_head *bioq);
120 static void disk_cleanserial(char *serno);
121 static int disk_debug(int, char *, ...) __printflike(2, 3);
122 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
123     struct dev_ops *raw_ops, int clone);
124 
125 static d_open_t diskopen;
126 static d_close_t diskclose;
127 static d_ioctl_t diskioctl;
128 static d_strategy_t diskstrategy;
129 static d_psize_t diskpsize;
130 static d_dump_t diskdump;
131 
132 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
133 static struct lwkt_token disklist_token;
134 static struct lwkt_token ds_token;
135 
136 static struct dev_ops disk1_ops = {
137 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO },
138 	.d_open = diskopen,
139 	.d_close = diskclose,
140 	.d_read = physread,
141 	.d_write = physwrite,
142 	.d_ioctl = diskioctl,
143 	.d_strategy = diskstrategy,
144 	.d_dump = diskdump,
145 	.d_psize = diskpsize,
146 };
147 
148 static struct dev_ops disk2_ops = {
149 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO |
150 		     D_NOEMERGPGR },
151 	.d_open = diskopen,
152 	.d_close = diskclose,
153 	.d_read = physread,
154 	.d_write = physwrite,
155 	.d_ioctl = diskioctl,
156 	.d_strategy = diskstrategy,
157 	.d_dump = diskdump,
158 	.d_psize = diskpsize,
159 };
160 
161 static struct objcache 	*disk_msg_cache;
162 
163 struct objcache_malloc_args disk_msg_malloc_args = {
164 	sizeof(struct disk_msg), M_DISK };
165 
166 static struct lwkt_port disk_dispose_port;
167 static struct lwkt_port disk_msg_port;
168 
169 static int
170 disk_debug(int level, char *fmt, ...)
171 {
172 	__va_list ap;
173 
174 	__va_start(ap, fmt);
175 	if (level <= disk_debug_enable)
176 		kvprintf(fmt, ap);
177 	__va_end(ap);
178 
179 	return 0;
180 }
181 
182 static int
183 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
184 {
185 	struct disk_info *info = &dp->d_info;
186 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
187 	disklabel_ops_t ops;
188 	struct dev_ops *dops;
189 	struct partinfo part;
190 	const char *msg;
191 	char uuid_buf[128];
192 	cdev_t ndev;
193 	int sno;
194 	u_int i;
195 
196 	disk_debug(2, "disk_probe_slice (begin): %s (%s)\n",
197 		   dev->si_name, dp->d_cdev->si_name);
198 
199 	sno = slice ? slice - 1 : 0;
200 	dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
201 		&disk2_ops : &disk1_ops;
202 
203 	ops = &disklabel32_ops;
204 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
205 	if (msg && !strcmp(msg, "no disk label")) {
206 		ops = &disklabel64_ops;
207 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
208 	}
209 
210 	if (msg == NULL) {
211 		char packname[DISKLABEL_MAXPACKNAME];
212 
213 		if (slice != WHOLE_DISK_SLICE)
214 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
215 		else
216 			sp->ds_reserved = 0;
217 
218 		ops->op_getpackname(sp->ds_label, packname, sizeof(packname));
219 
220 		destroy_dev_alias(dev, "by-label/*");
221 		if (packname[0])
222 			make_dev_alias(dev, "by-label/%s", packname);
223 
224 		sp->ds_ops = ops;
225 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
226 			ops->op_loadpartinfo(sp->ds_label, i, &part);
227 
228 			if (part.fstype) {
229 				if (reprobe &&
230 				    (ndev = devfs_find_device_by_name("%s%c",
231 						dev->si_name, 'a' + i))
232 				) {
233 					/*
234 					 * Device already exists and
235 					 * is still valid.
236 					 */
237 					ndev->si_flags |= SI_REPROBE_TEST;
238 
239 					/*
240 					 * Destroy old UUID alias
241 					 */
242 					destroy_dev_alias(ndev,
243 							  "part-by-uuid/*");
244 					destroy_dev_alias(ndev,
245 							  "part-by-label/*");
246 
247 					/* Create UUID alias */
248 					if (!kuuid_is_nil(&part.storage_uuid)) {
249 						snprintf_uuid(uuid_buf,
250 						    sizeof(uuid_buf),
251 						    &part.storage_uuid);
252 						make_dev_alias(ndev,
253 						    "part-by-uuid/%s",
254 						    uuid_buf);
255 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
256 					}
257 					if (packname[0]) {
258 						make_dev_alias(ndev,
259 						    "part-by-label/%s.%c",
260 						    packname, 'a' + i);
261 					}
262 				} else {
263 					ndev = make_dev_covering(dops,
264 						dp->d_rawdev->si_ops,
265 						dkmakeminor(dkunit(dp->d_cdev),
266 							    slice, i),
267 						UID_ROOT, GID_OPERATOR, 0640,
268 						"%s%c", dev->si_name, 'a'+ i);
269 					ndev->si_parent = dev;
270 					ndev->si_iosize_max = dev->si_iosize_max;
271 					ndev->si_disk = dp;
272 					udev_dict_set_cstr(ndev, "subsystem", "disk");
273 					/* Inherit parent's disk type */
274 					if (dp->d_disktype) {
275 						udev_dict_set_cstr(ndev, "disk-type",
276 						    __DECONST(char *, dp->d_disktype));
277 					}
278 
279 					/* Create serno alias */
280 					if (dp->d_info.d_serialno) {
281 						make_dev_alias(ndev,
282 						    "serno/%s.s%d%c",
283 						    dp->d_info.d_serialno,
284 						    sno, 'a' + i);
285 					}
286 
287 					/* Create UUID alias */
288 					if (!kuuid_is_nil(&part.storage_uuid)) {
289 						snprintf_uuid(uuid_buf,
290 						    sizeof(uuid_buf),
291 						    &part.storage_uuid);
292 						make_dev_alias(ndev,
293 						    "part-by-uuid/%s",
294 						    uuid_buf);
295 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
296 					}
297 					if (packname[0]) {
298 						make_dev_alias(ndev,
299 						    "part-by-label/%s.%c",
300 						    packname, 'a' + i);
301 					}
302 					ndev->si_flags |= SI_REPROBE_TEST;
303 				}
304 			}
305 		}
306 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
307 		msg = NULL;
308 		if (sp->ds_size >= 0x100000000ULL)
309 			ops = &disklabel64_ops;
310 		else
311 			ops = &disklabel32_ops;
312 		sp->ds_label = ops->op_clone_label(info, sp);
313 	} else {
314 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
315 		    sp->ds_type == DOSPTYP_NETBSD ||
316 		    sp->ds_type == DOSPTYP_OPENBSD ||
317 		    sp->ds_type == DOSPTYP_DFLYBSD) {
318 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
319 			    dev->si_name, msg);
320 		}
321 
322 		if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) {
323 			/* Clear out old label - it's not around anymore */
324 			disk_debug(2,
325 			    "disk_probe_slice: clear out old diskabel on %s\n",
326 			    dev->si_name);
327 
328 			sp->ds_ops->op_freedisklabel(&sp->ds_label);
329 			sp->ds_ops = NULL;
330 		}
331 	}
332 
333 	if (msg == NULL) {
334 		sp->ds_wlabel = FALSE;
335 	}
336 
337 	return (msg ? EINVAL : 0);
338 }
339 
340 /*
341  * This routine is only called for newly minted drives or to reprobe
342  * a drive with no open slices.  disk_probe_slice() is called directly
343  * when reprobing partition changes within slices.
344  */
345 static void
346 disk_probe(struct disk *dp, int reprobe)
347 {
348 	struct disk_info *info = &dp->d_info;
349 	cdev_t dev = dp->d_cdev;
350 	cdev_t ndev;
351 	int error, i, sno;
352 	struct diskslices *osp;
353 	struct diskslice *sp;
354 	struct dev_ops *dops;
355 	char uuid_buf[128];
356 
357 	/*
358 	 * d_media_blksize can be 0 for non-disk storage devices such
359 	 * as audio CDs.
360 	 */
361 	if (info->d_media_blksize == 0)
362 		return;
363 
364 	osp = dp->d_slice;
365 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
366 	disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
367 
368 	error = mbrinit(dev, info, &(dp->d_slice));
369 	if (error) {
370 		dsgone(&osp);
371 		return;
372 	}
373 
374 	dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
375 		&disk2_ops : &disk1_ops;
376 
377 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
378 		/*
379 		 * Ignore the whole-disk slice, it has already been created.
380 		 */
381 		if (i == WHOLE_DISK_SLICE)
382 			continue;
383 
384 #if 1
385 		/*
386 		 * Ignore the compatibility slice s0 if it's a device mapper
387 		 * volume.
388 		 */
389 		if ((i == COMPATIBILITY_SLICE) &&
390 		    (info->d_dsflags & DSO_DEVICEMAPPER))
391 			continue;
392 #endif
393 
394 		sp = &dp->d_slice->dss_slices[i];
395 
396 		/*
397 		 * Handle s0.  s0 is a compatibility slice if there are no
398 		 * other slices and it has not otherwise been set up, else
399 		 * we ignore it.
400 		 */
401 		if (i == COMPATIBILITY_SLICE) {
402 			sno = 0;
403 			if (sp->ds_type == 0 &&
404 			    dp->d_slice->dss_nslices == BASE_SLICE) {
405 				sp->ds_size = info->d_media_blocks;
406 				sp->ds_reserved = 0;
407 			}
408 		} else {
409 			sno = i - 1;
410 			sp->ds_reserved = 0;
411 		}
412 
413 		/*
414 		 * Ignore 0-length slices
415 		 */
416 		if (sp->ds_size == 0)
417 			continue;
418 
419 		if (reprobe &&
420 		    (ndev = devfs_find_device_by_name("%ss%d",
421 						      dev->si_name, sno))) {
422 			/*
423 			 * Device already exists and is still valid
424 			 */
425 			ndev->si_flags |= SI_REPROBE_TEST;
426 
427 			/*
428 			 * Destroy old UUID alias
429 			 */
430 			destroy_dev_alias(ndev, "slice-by-uuid/*");
431 
432 			/* Create UUID alias */
433 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
434 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
435 				    &sp->ds_stor_uuid);
436 				make_dev_alias(ndev, "slice-by-uuid/%s",
437 				    uuid_buf);
438 			}
439 		} else {
440 			/*
441 			 * Else create new device
442 			 */
443 			ndev = make_dev_covering(dops, dp->d_rawdev->si_ops,
444 					dkmakewholeslice(dkunit(dev), i),
445 					UID_ROOT, GID_OPERATOR, 0640,
446 					(info->d_dsflags & DSO_DEVICEMAPPER)?
447 					"%s.s%d" : "%ss%d", dev->si_name, sno);
448 			ndev->si_parent = dev;
449 			ndev->si_iosize_max = dev->si_iosize_max;
450 			udev_dict_set_cstr(ndev, "subsystem", "disk");
451 			/* Inherit parent's disk type */
452 			if (dp->d_disktype) {
453 				udev_dict_set_cstr(ndev, "disk-type",
454 				    __DECONST(char *, dp->d_disktype));
455 			}
456 
457 			/* Create serno alias */
458 			if (dp->d_info.d_serialno) {
459 				make_dev_alias(ndev, "serno/%s.s%d",
460 					       dp->d_info.d_serialno, sno);
461 			}
462 
463 			/* Create UUID alias */
464 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
465 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
466 				    &sp->ds_stor_uuid);
467 				make_dev_alias(ndev, "slice-by-uuid/%s",
468 				    uuid_buf);
469 			}
470 
471 			ndev->si_disk = dp;
472 			ndev->si_flags |= SI_REPROBE_TEST;
473 		}
474 		sp->ds_dev = ndev;
475 
476 		/*
477 		 * Probe appropriate slices for a disklabel
478 		 *
479 		 * XXX slice type 1 used by our gpt probe code.
480 		 * XXX slice type 0 used by mbr compat slice.
481 		 */
482 		if (sp->ds_type == DOSPTYP_386BSD ||
483 		    sp->ds_type == DOSPTYP_NETBSD ||
484 		    sp->ds_type == DOSPTYP_OPENBSD ||
485 		    sp->ds_type == DOSPTYP_DFLYBSD ||
486 		    sp->ds_type == 0 ||
487 		    sp->ds_type == 1) {
488 			if (dp->d_slice->dss_first_bsd_slice == 0)
489 				dp->d_slice->dss_first_bsd_slice = i;
490 			disk_probe_slice(dp, ndev, i, reprobe);
491 		}
492 	}
493 	dsgone(&osp);
494 	disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
495 }
496 
497 
498 static void
499 disk_msg_core(void *arg)
500 {
501 	struct disk	*dp;
502 	struct diskslice *sp;
503 	disk_msg_t msg;
504 	int run;
505 
506 	lwkt_gettoken(&disklist_token);
507 	lwkt_initport_thread(&disk_msg_port, curthread);
508 	wakeup(curthread);	/* synchronous startup */
509 	lwkt_reltoken(&disklist_token);
510 
511 	lwkt_gettoken(&ds_token);
512 	run = 1;
513 
514 	while (run) {
515 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
516 
517 		switch (msg->hdr.u.ms_result) {
518 		case DISK_DISK_PROBE:
519 			dp = (struct disk *)msg->load;
520 			disk_debug(1,
521 				    "DISK_DISK_PROBE: %s\n",
522 					dp->d_cdev->si_name);
523 			disk_iocom_update(dp);
524 			disk_probe(dp, 0);
525 			break;
526 		case DISK_DISK_DESTROY:
527 			dp = (struct disk *)msg->load;
528 			disk_debug(1,
529 				    "DISK_DISK_DESTROY: %s\n",
530 					dp->d_cdev->si_name);
531 			disk_iocom_uninit(dp);
532 
533 			/*
534 			 * Interlock against struct disk enumerations.
535 			 * Wait for enumerations to complete then remove
536 			 * the dp from the list before tearing it down.
537 			 * This avoids numerous races.
538 			 */
539 			lwkt_gettoken(&disklist_token);
540 			while (dp->d_refs)
541 				tsleep(&dp->d_refs, 0, "diskdel", hz / 10);
542 			LIST_REMOVE(dp, d_list);
543 
544 			dsched_disk_destroy(dp);
545 			devfs_destroy_related(dp->d_cdev);
546 			destroy_dev(dp->d_cdev);
547 			destroy_only_dev(dp->d_rawdev);
548 
549 			lwkt_reltoken(&disklist_token);
550 
551 			if (dp->d_info.d_serialno) {
552 				kfree(dp->d_info.d_serialno, M_TEMP);
553 				dp->d_info.d_serialno = NULL;
554 			}
555 			break;
556 		case DISK_UNPROBE:
557 			dp = (struct disk *)msg->load;
558 			disk_debug(1,
559 				    "DISK_DISK_UNPROBE: %s\n",
560 					dp->d_cdev->si_name);
561 			devfs_destroy_related(dp->d_cdev);
562 			break;
563 		case DISK_SLICE_REPROBE:
564 			dp = (struct disk *)msg->load;
565 			sp = (struct diskslice *)msg->load2;
566 			devfs_clr_related_flag(sp->ds_dev,
567 						SI_REPROBE_TEST);
568 			disk_debug(1,
569 				    "DISK_SLICE_REPROBE: %s\n",
570 				    sp->ds_dev->si_name);
571 			disk_probe_slice(dp, sp->ds_dev,
572 					 dkslice(sp->ds_dev), 1);
573 			devfs_destroy_related_without_flag(
574 					sp->ds_dev, SI_REPROBE_TEST);
575 			break;
576 		case DISK_DISK_REPROBE:
577 			dp = (struct disk *)msg->load;
578 			devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
579 			disk_debug(1,
580 				    "DISK_DISK_REPROBE: %s\n",
581 				    dp->d_cdev->si_name);
582 			disk_probe(dp, 1);
583 			devfs_destroy_related_without_flag(
584 					dp->d_cdev, SI_REPROBE_TEST);
585 			break;
586 		case DISK_SYNC:
587 			disk_debug(1, "DISK_SYNC\n");
588 			break;
589 		default:
590 			devfs_debug(DEVFS_DEBUG_WARNING,
591 				    "disk_msg_core: unknown message "
592 				    "received at core\n");
593 			break;
594 		}
595 		lwkt_replymsg(&msg->hdr, 0);
596 	}
597 	lwkt_reltoken(&ds_token);
598 	lwkt_exit();
599 }
600 
601 
602 /*
603  * Acts as a message drain. Any message that is replied to here gets
604  * destroyed and the memory freed.
605  */
606 static void
607 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
608 {
609 	objcache_put(disk_msg_cache, msg);
610 }
611 
612 
613 void
614 disk_msg_send(uint32_t cmd, void *load, void *load2)
615 {
616 	disk_msg_t disk_msg;
617 	lwkt_port_t port = &disk_msg_port;
618 
619 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
620 
621 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
622 
623 	disk_msg->hdr.u.ms_result = cmd;
624 	disk_msg->load = load;
625 	disk_msg->load2 = load2;
626 	KKASSERT(port);
627 	lwkt_sendmsg(port, &disk_msg->hdr);
628 }
629 
630 void
631 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
632 {
633 	struct lwkt_port rep_port;
634 	disk_msg_t disk_msg;
635 	lwkt_port_t port;
636 
637 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
638 	port = &disk_msg_port;
639 
640 	/* XXX could probably use curthread's built-in msgport */
641 	lwkt_initport_thread(&rep_port, curthread);
642 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
643 
644 	disk_msg->hdr.u.ms_result = cmd;
645 	disk_msg->load = load;
646 	disk_msg->load2 = load2;
647 
648 	lwkt_domsg(port, &disk_msg->hdr, 0);
649 	objcache_put(disk_msg_cache, disk_msg);
650 }
651 
652 /*
653  * Create a raw device for the dev_ops template (which is returned).  Also
654  * create a slice and unit managed disk and overload the user visible
655  * device space with it.
656  *
657  * NOTE: The returned raw device is NOT a slice and unit managed device.
658  * It is an actual raw device representing the raw disk as specified by
659  * the passed dev_ops.  The disk layer not only returns such a raw device,
660  * it also uses it internally when passing (modified) commands through.
661  */
662 cdev_t
663 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
664 {
665 	return _disk_create_named(NULL, unit, dp, raw_ops, 0);
666 }
667 
668 cdev_t
669 disk_create_clone(int unit, struct disk *dp,
670 		  struct dev_ops *raw_ops)
671 {
672 	return _disk_create_named(NULL, unit, dp, raw_ops, 1);
673 }
674 
675 cdev_t
676 disk_create_named(const char *name, int unit, struct disk *dp,
677 		  struct dev_ops *raw_ops)
678 {
679 	return _disk_create_named(name, unit, dp, raw_ops, 0);
680 }
681 
682 cdev_t
683 disk_create_named_clone(const char *name, int unit, struct disk *dp,
684 			struct dev_ops *raw_ops)
685 {
686 	return _disk_create_named(name, unit, dp, raw_ops, 1);
687 }
688 
689 static cdev_t
690 _disk_create_named(const char *name, int unit, struct disk *dp,
691 		   struct dev_ops *raw_ops, int clone)
692 {
693 	cdev_t rawdev;
694 	struct dev_ops *dops;
695 
696 	disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
697 
698 	if (name) {
699 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
700 		    UID_ROOT, GID_OPERATOR, 0640, "%s", name);
701 	} else {
702 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
703 		    UID_ROOT, GID_OPERATOR, 0640,
704 		    "%s%d", raw_ops->head.name, unit);
705 	}
706 
707 	bzero(dp, sizeof(*dp));
708 
709 	dops = (raw_ops->head.flags & D_NOEMERGPGR) ? &disk2_ops : &disk1_ops;
710 
711 	dp->d_rawdev = rawdev;
712 	dp->d_raw_ops = raw_ops;
713 	dp->d_dev_ops = dops;
714 
715 	if (name) {
716 		if (clone) {
717 			dp->d_cdev = make_only_dev_covering(
718 					dops, dp->d_rawdev->si_ops,
719 					dkmakewholedisk(unit),
720 					UID_ROOT, GID_OPERATOR, 0640,
721 					"%s", name);
722 		} else {
723 			dp->d_cdev = make_dev_covering(
724 					dops, dp->d_rawdev->si_ops,
725 					dkmakewholedisk(unit),
726 					UID_ROOT, GID_OPERATOR, 0640,
727 					"%s", name);
728 		}
729 	} else {
730 		if (clone) {
731 			dp->d_cdev = make_only_dev_covering(
732 					dops, dp->d_rawdev->si_ops,
733 					dkmakewholedisk(unit),
734 					UID_ROOT, GID_OPERATOR, 0640,
735 					"%s%d", raw_ops->head.name, unit);
736 		} else {
737 			dp->d_cdev = make_dev_covering(
738 					dops, dp->d_rawdev->si_ops,
739 					dkmakewholedisk(unit),
740 					UID_ROOT, GID_OPERATOR, 0640,
741 					"%s%d", raw_ops->head.name, unit);
742 		}
743 	}
744 
745 	udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
746 	dp->d_cdev->si_disk = dp;
747 
748 	if (name)
749 		dsched_disk_create(dp, name, unit);
750 	else
751 		dsched_disk_create(dp, raw_ops->head.name, unit);
752 
753 	lwkt_gettoken(&disklist_token);
754 	LIST_INSERT_HEAD(&disklist, dp, d_list);
755 	lwkt_reltoken(&disklist_token);
756 
757 	disk_iocom_init(dp);
758 
759 	disk_debug(1, "disk_create (end): %s%d\n",
760 		   (name != NULL)?(name):(raw_ops->head.name), unit);
761 
762 	return (dp->d_rawdev);
763 }
764 
765 int
766 disk_setdisktype(struct disk *disk, const char *type)
767 {
768 	int error;
769 
770 	KKASSERT(disk != NULL);
771 
772 	disk->d_disktype = type;
773 	error = udev_dict_set_cstr(disk->d_cdev, "disk-type",
774 				   __DECONST(char *, type));
775 	return error;
776 }
777 
778 int
779 disk_getopencount(struct disk *disk)
780 {
781 	return disk->d_opencount;
782 }
783 
784 static void
785 _setdiskinfo(struct disk *disk, struct disk_info *info)
786 {
787 	char *oldserialno;
788 
789 	oldserialno = disk->d_info.d_serialno;
790 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
791 	info = &disk->d_info;
792 
793 	disk_debug(1, "_setdiskinfo: %s\n", disk->d_cdev->si_name);
794 
795 	/*
796 	 * The serial number is duplicated so the caller can throw
797 	 * their copy away.
798 	 */
799 	if (info->d_serialno && info->d_serialno[0] &&
800 	    (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
801 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
802 		disk_cleanserial(info->d_serialno);
803 		if (disk->d_cdev) {
804 			make_dev_alias(disk->d_cdev, "serno/%s",
805 				       info->d_serialno);
806 		}
807 	} else {
808 		info->d_serialno = NULL;
809 	}
810 	if (oldserialno)
811 		kfree(oldserialno, M_TEMP);
812 
813 	dsched_disk_update(disk, info);
814 
815 	/*
816 	 * The caller may set d_media_size or d_media_blocks and we
817 	 * calculate the other.
818 	 */
819 	KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
820 	if (info->d_media_size == 0 && info->d_media_blocks) {
821 		info->d_media_size = (u_int64_t)info->d_media_blocks *
822 				     info->d_media_blksize;
823 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
824 		   info->d_media_blksize) {
825 		info->d_media_blocks = info->d_media_size /
826 				       info->d_media_blksize;
827 	}
828 
829 	/*
830 	 * The si_* fields for rawdev are not set until after the
831 	 * disk_create() call, so someone using the cooked version
832 	 * of the raw device (i.e. da0s0) will not get the right
833 	 * si_iosize_max unless we fix it up here.
834 	 */
835 	if (disk->d_cdev && disk->d_rawdev &&
836 	    disk->d_cdev->si_iosize_max == 0) {
837 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
838 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
839 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
840 	}
841 
842 	/* Add the serial number to the udev_dictionary */
843 	if (info->d_serialno)
844 		udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
845 }
846 
847 /*
848  * Disk drivers must call this routine when media parameters are available
849  * or have changed.
850  */
851 void
852 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
853 {
854 	_setdiskinfo(disk, info);
855 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
856 	disk_debug(1, "disk_setdiskinfo: sent probe for %s\n",
857 		   disk->d_cdev->si_name);
858 }
859 
860 void
861 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
862 {
863 	_setdiskinfo(disk, info);
864 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
865 	disk_debug(1, "disk_setdiskinfo_sync: sent probe for %s\n",
866 		   disk->d_cdev->si_name);
867 }
868 
869 /*
870  * This routine is called when an adapter detaches.  The higher level
871  * managed disk device is destroyed while the lower level raw device is
872  * released.
873  */
874 void
875 disk_destroy(struct disk *disk)
876 {
877 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
878 	return;
879 }
880 
881 int
882 disk_dumpcheck(cdev_t dev, u_int64_t *size,
883 	       u_int64_t *blkno, u_int32_t *secsize)
884 {
885 	struct partinfo pinfo;
886 	int error;
887 
888 	if (size)
889 		*size = 0;	/* avoid gcc warnings */
890 	if (secsize)
891 		*secsize = 512;	/* avoid gcc warnings */
892 	bzero(&pinfo, sizeof(pinfo));
893 
894 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
895 			   proc0.p_ucred, NULL, NULL);
896 	if (error)
897 		return (error);
898 
899 	if (pinfo.media_blksize == 0)
900 		return (ENXIO);
901 
902 	if (blkno) /* XXX: make sure this reserved stuff is right */
903 		*blkno = pinfo.reserved_blocks +
904 			pinfo.media_offset / pinfo.media_blksize;
905 	if (secsize)
906 		*secsize = pinfo.media_blksize;
907 	if (size)
908 		*size = (pinfo.media_blocks - pinfo.reserved_blocks);
909 
910 	return (0);
911 }
912 
913 int
914 disk_dumpconf(cdev_t dev, u_int onoff)
915 {
916 	struct dumperinfo di;
917 	u_int64_t	size, blkno;
918 	u_int32_t	secsize;
919 	int error;
920 
921 	if (!onoff)
922 		return set_dumper(NULL);
923 
924 	error = disk_dumpcheck(dev, &size, &blkno, &secsize);
925 
926 	if (error)
927 		return ENXIO;
928 
929 	bzero(&di, sizeof(struct dumperinfo));
930 	di.dumper = diskdump;
931 	di.priv = dev;
932 	di.blocksize = secsize;
933 	di.maxiosize = dev->si_iosize_max;
934 	di.mediaoffset = blkno * DEV_BSIZE;
935 	di.mediasize = size * DEV_BSIZE;
936 
937 	return set_dumper(&di);
938 }
939 
940 void
941 disk_unprobe(struct disk *disk)
942 {
943 	if (disk == NULL)
944 		return;
945 
946 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
947 }
948 
949 void
950 disk_invalidate (struct disk *disk)
951 {
952 	dsgone(&disk->d_slice);
953 }
954 
955 /*
956  * Enumerate disks, pass a marker and an initial NULL dp to initialize,
957  * then loop with the previously returned dp.
958  *
959  * The returned dp will be referenced, preventing its destruction.  When
960  * you pass the returned dp back into the loop the ref is dropped.
961  *
962  * WARNING: If terminating your loop early you must call
963  *	    disk_enumerate_stop().
964  */
965 struct disk *
966 disk_enumerate(struct disk *marker, struct disk *dp)
967 {
968 	lwkt_gettoken(&disklist_token);
969 	if (dp) {
970 		--dp->d_refs;
971 		dp = LIST_NEXT(marker, d_list);
972 		LIST_REMOVE(marker, d_list);
973 	} else {
974 		bzero(marker, sizeof(*marker));
975 		marker->d_flags = DISKFLAG_MARKER;
976 		dp = LIST_FIRST(&disklist);
977 	}
978 	while (dp) {
979 		if ((dp->d_flags & DISKFLAG_MARKER) == 0)
980 			break;
981 		dp = LIST_NEXT(dp, d_list);
982 	}
983 	if (dp) {
984 		++dp->d_refs;
985 		LIST_INSERT_AFTER(dp, marker, d_list);
986 	}
987 	lwkt_reltoken(&disklist_token);
988 	return (dp);
989 }
990 
991 /*
992  * Terminate an enumeration early.  Do not call this function if the
993  * enumeration ended normally.  dp can be NULL, indicating that you
994  * wish to retain the ref count on dp.
995  *
996  * This function removes the marker.
997  */
998 void
999 disk_enumerate_stop(struct disk *marker, struct disk *dp)
1000 {
1001 	lwkt_gettoken(&disklist_token);
1002 	LIST_REMOVE(marker, d_list);
1003 	if (dp)
1004 		--dp->d_refs;
1005 	lwkt_reltoken(&disklist_token);
1006 }
1007 
1008 static
1009 int
1010 sysctl_disks(SYSCTL_HANDLER_ARGS)
1011 {
1012 	struct disk marker;
1013 	struct disk *dp;
1014 	int error, first;
1015 
1016 	first = 1;
1017 	error = 0;
1018 	dp = NULL;
1019 
1020 	while ((dp = disk_enumerate(&marker, dp))) {
1021 		if (!first) {
1022 			error = SYSCTL_OUT(req, " ", 1);
1023 			if (error) {
1024 				disk_enumerate_stop(&marker, dp);
1025 				break;
1026 			}
1027 		} else {
1028 			first = 0;
1029 		}
1030 		error = SYSCTL_OUT(req, dp->d_rawdev->si_name,
1031 				   strlen(dp->d_rawdev->si_name));
1032 		if (error) {
1033 			disk_enumerate_stop(&marker, dp);
1034 			break;
1035 		}
1036 	}
1037 	if (error == 0)
1038 		error = SYSCTL_OUT(req, "", 1);
1039 	return error;
1040 }
1041 
1042 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
1043     sysctl_disks, "A", "names of available disks");
1044 
1045 /*
1046  * Open a disk device or partition.
1047  */
1048 static
1049 int
1050 diskopen(struct dev_open_args *ap)
1051 {
1052 	cdev_t dev = ap->a_head.a_dev;
1053 	struct disk *dp;
1054 	int error;
1055 
1056 	/*
1057 	 * dp can't be NULL here XXX.
1058 	 *
1059 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
1060 	 * setdiskinfo() is typically called whether the disk is present
1061 	 * or not (e.g. CD), but the base disk device is created first
1062 	 * and there may be a race.
1063 	 */
1064 	dp = dev->si_disk;
1065 	if (dp == NULL || dp->d_slice == NULL)
1066 		return (ENXIO);
1067 	error = 0;
1068 
1069 	/*
1070 	 * Deal with open races
1071 	 */
1072 	lwkt_gettoken(&ds_token);
1073 	while (dp->d_flags & DISKFLAG_LOCK) {
1074 		dp->d_flags |= DISKFLAG_WANTED;
1075 		error = tsleep(dp, PCATCH, "diskopen", hz);
1076 		if (error) {
1077 			lwkt_reltoken(&ds_token);
1078 			return (error);
1079 		}
1080 	}
1081 	dp->d_flags |= DISKFLAG_LOCK;
1082 
1083 	/*
1084 	 * Open the underlying raw device.
1085 	 */
1086 	if (!dsisopen(dp->d_slice)) {
1087 #if 0
1088 		if (!pdev->si_iosize_max)
1089 			pdev->si_iosize_max = dev->si_iosize_max;
1090 #endif
1091 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
1092 				  ap->a_devtype, ap->a_cred, NULL);
1093 	}
1094 
1095 	if (error)
1096 		goto out;
1097 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
1098 		       &dp->d_slice, &dp->d_info);
1099 	if (!dsisopen(dp->d_slice)) {
1100 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype, NULL);
1101 	}
1102 out:
1103 	dp->d_flags &= ~DISKFLAG_LOCK;
1104 	if (dp->d_flags & DISKFLAG_WANTED) {
1105 		dp->d_flags &= ~DISKFLAG_WANTED;
1106 		wakeup(dp);
1107 	}
1108 	lwkt_reltoken(&ds_token);
1109 
1110 	KKASSERT(dp->d_opencount >= 0);
1111 	/* If the open was successful, bump open count */
1112 	if (error == 0)
1113 		atomic_add_int(&dp->d_opencount, 1);
1114 
1115 	return(error);
1116 }
1117 
1118 /*
1119  * Close a disk device or partition
1120  */
1121 static
1122 int
1123 diskclose(struct dev_close_args *ap)
1124 {
1125 	cdev_t dev = ap->a_head.a_dev;
1126 	struct disk *dp;
1127 	int error;
1128 	int lcount;
1129 
1130 	error = 0;
1131 	dp = dev->si_disk;
1132 
1133 	/*
1134 	 * The cdev_t represents the disk/slice/part.  The shared
1135 	 * dp structure governs all cdevs associated with the disk.
1136 	 *
1137 	 * As a safety only close the underlying raw device on the last
1138 	 * close the disk device if our tracking of the slices/partitions
1139 	 * also indicates nothing is open.
1140 	 */
1141 	KKASSERT(dp->d_opencount >= 1);
1142 	lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1143 
1144 	lwkt_gettoken(&ds_token);
1145 	dsclose(dev, ap->a_devtype, dp->d_slice);
1146 	if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1147 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype, NULL);
1148 	}
1149 	lwkt_reltoken(&ds_token);
1150 
1151 	return (error);
1152 }
1153 
1154 /*
1155  * First execute the ioctl on the disk device, and if it isn't supported
1156  * try running it on the backing device.
1157  */
1158 static
1159 int
1160 diskioctl(struct dev_ioctl_args *ap)
1161 {
1162 	cdev_t dev = ap->a_head.a_dev;
1163 	struct disk *dp;
1164 	int error;
1165 	u_int u;
1166 
1167 	dp = dev->si_disk;
1168 	if (dp == NULL)
1169 		return (ENXIO);
1170 
1171 	devfs_debug(DEVFS_DEBUG_DEBUG,
1172 		    "diskioctl: cmd is: %lx (name: %s)\n",
1173 		    ap->a_cmd, dev->si_name);
1174 	devfs_debug(DEVFS_DEBUG_DEBUG,
1175 		    "diskioctl: &dp->d_slice is: %p, %p\n",
1176 		    &dp->d_slice, dp->d_slice);
1177 
1178 	if (ap->a_cmd == DIOCGKERNELDUMP) {
1179 		u = *(u_int *)ap->a_data;
1180 		return disk_dumpconf(dev, u);
1181 	}
1182 
1183 	if (ap->a_cmd == DIOCRECLUSTER && dev == dp->d_cdev) {
1184 		error = disk_iocom_ioctl(dp, ap->a_cmd, ap->a_data);
1185 		return error;
1186 	}
1187 
1188 	if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1189 	    ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1190 	     dkslice(dev) == WHOLE_DISK_SLICE)) {
1191 		error = ENOIOCTL;
1192 	} else {
1193 		lwkt_gettoken(&ds_token);
1194 		error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1195 				&dp->d_slice, &dp->d_info);
1196 		lwkt_reltoken(&ds_token);
1197 	}
1198 
1199 	if (error == ENOIOCTL) {
1200 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1201 				   ap->a_fflag, ap->a_cred, NULL, NULL);
1202 	}
1203 	return (error);
1204 }
1205 
1206 /*
1207  * Execute strategy routine
1208  *
1209  * WARNING! We are using the KVABIO API and must not access memory
1210  *         through bp->b_data without first calling bkvasync(bp).
1211  */
1212 static
1213 int
1214 diskstrategy(struct dev_strategy_args *ap)
1215 {
1216 	cdev_t dev = ap->a_head.a_dev;
1217 	struct bio *bio = ap->a_bio;
1218 	struct bio *nbio;
1219 	struct disk *dp;
1220 
1221 	dp = dev->si_disk;
1222 
1223 	if (dp == NULL) {
1224 		bio->bio_buf->b_error = ENXIO;
1225 		bio->bio_buf->b_flags |= B_ERROR;
1226 		biodone(bio);
1227 		return(0);
1228 	}
1229 	KKASSERT(dev->si_disk == dp);
1230 
1231 	/*
1232 	 * The dscheck() function will also transform the slice relative
1233 	 * block number i.e. bio->bio_offset into a block number that can be
1234 	 * passed directly to the underlying raw device.  If dscheck()
1235 	 * returns NULL it will have handled the bio for us (e.g. EOF
1236 	 * or error due to being beyond the device size).
1237 	 */
1238 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1239 		dev_dstrategy(dp->d_rawdev, nbio);
1240 	} else {
1241 		biodone(bio);
1242 	}
1243 	return(0);
1244 }
1245 
1246 /*
1247  * Return the partition size in ?blocks?
1248  */
1249 static
1250 int
1251 diskpsize(struct dev_psize_args *ap)
1252 {
1253 	cdev_t dev = ap->a_head.a_dev;
1254 	struct disk *dp;
1255 
1256 	dp = dev->si_disk;
1257 	if (dp == NULL)
1258 		return(ENODEV);
1259 
1260 	ap->a_result = dssize(dev, &dp->d_slice);
1261 
1262 	if ((ap->a_result == -1) &&
1263 	   (dp->d_info.d_dsflags & DSO_RAWPSIZE)) {
1264 		ap->a_head.a_dev = dp->d_rawdev;
1265 		return dev_doperate(&ap->a_head);
1266 	}
1267 	return(0);
1268 }
1269 
1270 static int
1271 diskdump(struct dev_dump_args *ap)
1272 {
1273 	cdev_t dev = ap->a_head.a_dev;
1274 	struct disk *dp = dev->si_disk;
1275 	u_int64_t size, offset;
1276 	int error;
1277 
1278 	error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1279 	/* XXX: this should probably go in disk_dumpcheck somehow */
1280 	if (ap->a_length != 0) {
1281 		size *= DEV_BSIZE;
1282 		offset = ap->a_blkno * DEV_BSIZE;
1283 		if ((ap->a_offset < offset) ||
1284 		    (ap->a_offset + ap->a_length - offset > size)) {
1285 			kprintf("Attempt to write outside dump "
1286 				"device boundaries.\n");
1287 			error = ENOSPC;
1288 		}
1289 	}
1290 
1291 	if (error == 0) {
1292 		ap->a_head.a_dev = dp->d_rawdev;
1293 		error = dev_doperate(&ap->a_head);
1294 	}
1295 
1296 	return(error);
1297 }
1298 
1299 
1300 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1301 	   0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1302 
1303 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1304 	   0, sizeof(struct disk), "sizeof(struct disk)");
1305 
1306 /*
1307  * Reorder interval for burst write allowance and minor write
1308  * allowance.
1309  *
1310  * We always want to trickle some writes in to make use of the
1311  * disk's zone cache.  Bursting occurs on a longer interval and only
1312  * runningbufspace is well over the hirunningspace limit.
1313  */
1314 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
1315 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1316 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1317 int bioq_reorder_minor_interval = 5;
1318 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1319 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1320 
1321 int bioq_reorder_burst_bytes = 3000000;
1322 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1323 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1324 int bioq_reorder_minor_bytes = 262144;
1325 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1326 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1327 
1328 
1329 /*
1330  * Order I/Os.  Generally speaking this code is designed to make better
1331  * use of drive zone caches.  A drive zone cache can typically track linear
1332  * reads or writes for around 16 zones simultaniously.
1333  *
1334  * Read prioritization issues:  It is possible for hundreds of megabytes worth
1335  * of writes to be queued asynchronously.  This creates a huge bottleneck
1336  * for reads which reduce read bandwidth to a trickle.
1337  *
1338  * To solve this problem we generally reorder reads before writes.
1339  *
1340  * However, a large number of random reads can also starve writes and
1341  * make poor use of the drive zone cache so we allow writes to trickle
1342  * in every N reads.
1343  */
1344 void
1345 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1346 {
1347 #if 0
1348 	/*
1349 	 * The BIO wants to be ordered.  Adding to the tail also
1350 	 * causes transition to be set to NULL, forcing the ordering
1351 	 * of all prior I/O's.
1352 	 */
1353 	if (bio->bio_buf->b_flags & B_ORDERED) {
1354 		bioq_insert_tail(bioq, bio);
1355 		return;
1356 	}
1357 #endif
1358 
1359 	switch(bio->bio_buf->b_cmd) {
1360 	case BUF_CMD_READ:
1361 		if (bioq->transition) {
1362 			/*
1363 			 * Insert before the first write.  Bleedover writes
1364 			 * based on reorder intervals to prevent starvation.
1365 			 */
1366 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1367 			++bioq->reorder;
1368 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1369 				bioqwritereorder(bioq);
1370 				if (bioq->reorder >=
1371 				    bioq_reorder_burst_interval) {
1372 					bioq->reorder = 0;
1373 				}
1374 			}
1375 		} else {
1376 			/*
1377 			 * No writes queued (or ordering was forced),
1378 			 * insert at tail.
1379 			 */
1380 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1381 		}
1382 		break;
1383 	case BUF_CMD_WRITE:
1384 		/*
1385 		 * Writes are always appended.  If no writes were previously
1386 		 * queued or an ordered tail insertion occured the transition
1387 		 * field will be NULL.
1388 		 */
1389 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1390 		if (bioq->transition == NULL)
1391 			bioq->transition = bio;
1392 		break;
1393 	default:
1394 		/*
1395 		 * All other request types are forced to be ordered.
1396 		 */
1397 		bioq_insert_tail(bioq, bio);
1398 		break;
1399 	}
1400 }
1401 
1402 /*
1403  * Move the read-write transition point to prevent reads from
1404  * completely starving our writes.  This brings a number of writes into
1405  * the fold every N reads.
1406  *
1407  * We bring a few linear writes into the fold on a minor interval
1408  * and we bring a non-linear burst of writes into the fold on a major
1409  * interval.  Bursting only occurs if runningbufspace is really high
1410  * (typically from syncs, fsyncs, or HAMMER flushes).
1411  */
1412 static
1413 void
1414 bioqwritereorder(struct bio_queue_head *bioq)
1415 {
1416 	struct bio *bio;
1417 	off_t next_offset;
1418 	size_t left;
1419 	size_t n;
1420 	int check_off;
1421 
1422 	if (bioq->reorder < bioq_reorder_burst_interval ||
1423 	    !buf_runningbufspace_severe()) {
1424 		left = (size_t)bioq_reorder_minor_bytes;
1425 		check_off = 1;
1426 	} else {
1427 		left = (size_t)bioq_reorder_burst_bytes;
1428 		check_off = 0;
1429 	}
1430 
1431 	next_offset = bioq->transition->bio_offset;
1432 	while ((bio = bioq->transition) != NULL &&
1433 	       (check_off == 0 || next_offset == bio->bio_offset)
1434 	) {
1435 		n = bio->bio_buf->b_bcount;
1436 		next_offset = bio->bio_offset + n;
1437 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1438 		if (left < n)
1439 			break;
1440 		left -= n;
1441 	}
1442 }
1443 
1444 /*
1445  * Bounds checking against the media size, used for the raw partition.
1446  * secsize, mediasize and b_blkno must all be the same units.
1447  * Possibly this has to be DEV_BSIZE (512).
1448  */
1449 int
1450 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1451 {
1452 	struct buf *bp = bio->bio_buf;
1453 	int64_t sz;
1454 
1455 	sz = howmany(bp->b_bcount, secsize);
1456 
1457 	if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1458 		sz = mediasize - bio->bio_offset/DEV_BSIZE;
1459 		if (sz == 0) {
1460 			/* If exactly at end of disk, return EOF. */
1461 			bp->b_resid = bp->b_bcount;
1462 			return 0;
1463 		}
1464 		if (sz < 0) {
1465 			/* If past end of disk, return EINVAL. */
1466 			bp->b_error = EINVAL;
1467 			return 0;
1468 		}
1469 		/* Otherwise, truncate request. */
1470 		bp->b_bcount = sz * secsize;
1471 	}
1472 
1473 	return 1;
1474 }
1475 
1476 /*
1477  * Disk error is the preface to plaintive error messages
1478  * about failing disk transfers.  It prints messages of the form
1479 
1480 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1481 
1482  * if the offset of the error in the transfer and a disk label
1483  * are both available.  blkdone should be -1 if the position of the error
1484  * is unknown; the disklabel pointer may be null from drivers that have not
1485  * been converted to use them.  The message is printed with kprintf
1486  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1487  * The message should be completed (with at least a newline) with kprintf
1488  * or log(-1, ...), respectively.  There is no trailing space.
1489  */
1490 void
1491 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1492 {
1493 	struct buf *bp = bio->bio_buf;
1494 	const char *term;
1495 
1496 	switch(bp->b_cmd) {
1497 	case BUF_CMD_READ:
1498 		term = "read";
1499 		break;
1500 	case BUF_CMD_WRITE:
1501 		term = "write";
1502 		break;
1503 	default:
1504 		term = "access";
1505 		break;
1506 	}
1507 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1508 	kprintf("offset %012llx for %d",
1509 		(long long)bio->bio_offset,
1510 		bp->b_bcount);
1511 
1512 	if (donecnt)
1513 		kprintf(" (%d bytes completed)", donecnt);
1514 }
1515 
1516 /*
1517  * Locate a disk device
1518  */
1519 cdev_t
1520 disk_locate(const char *devname)
1521 {
1522 	return devfs_find_device_by_name("%s", devname);
1523 }
1524 
1525 void
1526 disk_config(void *arg)
1527 {
1528 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1529 }
1530 
1531 static void
1532 disk_init(void)
1533 {
1534 	struct thread* td_core;
1535 
1536 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1537 					 NULL, NULL, NULL,
1538 					 objcache_malloc_alloc,
1539 					 objcache_malloc_free,
1540 					 &disk_msg_malloc_args);
1541 
1542 	lwkt_token_init(&disklist_token, "disks");
1543 	lwkt_token_init(&ds_token, "ds");
1544 
1545 	/*
1546 	 * Initialize the reply-only port which acts as a message drain
1547 	 */
1548 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1549 
1550 	lwkt_gettoken(&disklist_token);
1551 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1552 		    0, -1, "disk_msg_core");
1553 	tsleep(td_core, 0, "diskcore", 0);
1554 	lwkt_reltoken(&disklist_token);
1555 }
1556 
1557 static void
1558 disk_uninit(void)
1559 {
1560 	objcache_destroy(disk_msg_cache);
1561 }
1562 
1563 /*
1564  * Clean out illegal characters in serial numbers.
1565  */
1566 static void
1567 disk_cleanserial(char *serno)
1568 {
1569 	char c;
1570 
1571 	while ((c = *serno) != 0) {
1572 		if (c >= 'a' && c <= 'z')
1573 			;
1574 		else if (c >= 'A' && c <= 'Z')
1575 			;
1576 		else if (c >= '0' && c <= '9')
1577 			;
1578 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1579 			;
1580 		else
1581 			c = '_';
1582 		*serno++= c;
1583 	}
1584 }
1585 
1586 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1587 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1588 	   0, "Enable subr_disk debugging");
1589 
1590 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1591 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1592