xref: /dflybsd-src/sys/kern/subr_disk.c (revision a69e510a03efa04b5051de1d2c204f0d059845e1)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ----------------------------------------------------------------------------
35  * "THE BEER-WARE LICENSE" (Revision 42):
36  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
37  * can do whatever you want with this stuff. If we meet some day, and you think
38  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
39  * ----------------------------------------------------------------------------
40  *
41  * Copyright (c) 1982, 1986, 1988, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
78  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80  * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/proc.h>
87 #include <sys/sysctl.h>
88 #include <sys/buf.h>
89 #include <sys/conf.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
95 #include <sys/disk.h>
96 #include <sys/malloc.h>
97 #include <sys/sysctl.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/msgport2.h>
104 #include <sys/buf2.h>
105 #include <sys/devfs.h>
106 #include <sys/thread.h>
107 #include <sys/thread2.h>
108 
109 #include <sys/queue.h>
110 #include <sys/lock.h>
111 
112 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
113 static int disk_debug_enable = 0;
114 
115 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
116 static void disk_msg_core(void *);
117 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
118 static void disk_probe(struct disk *dp, int reprobe);
119 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
120 static void bioqwritereorder(struct bio_queue_head *bioq);
121 static void disk_cleanserial(char *serno);
122 
123 static d_open_t diskopen;
124 static d_close_t diskclose;
125 static d_ioctl_t diskioctl;
126 static d_strategy_t diskstrategy;
127 static d_psize_t diskpsize;
128 static d_clone_t diskclone;
129 static d_dump_t diskdump;
130 
131 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
132 static struct lwkt_token disklist_token;
133 
134 static struct dev_ops disk_ops = {
135 	{ "disk", 0, D_DISK },
136 	.d_open = diskopen,
137 	.d_close = diskclose,
138 	.d_read = physread,
139 	.d_write = physwrite,
140 	.d_ioctl = diskioctl,
141 	.d_strategy = diskstrategy,
142 	.d_dump = diskdump,
143 	.d_psize = diskpsize,
144 	.d_clone = diskclone
145 };
146 
147 static struct objcache 	*disk_msg_cache;
148 
149 struct objcache_malloc_args disk_msg_malloc_args = {
150 	sizeof(struct disk_msg), M_DISK };
151 
152 static struct lwkt_port disk_dispose_port;
153 static struct lwkt_port disk_msg_port;
154 
155 static int
156 disk_debug(int level, char *fmt, ...)
157 {
158 	__va_list ap;
159 
160 	__va_start(ap, fmt);
161 	if (level <= disk_debug_enable)
162 		kvprintf(fmt, ap);
163 	__va_end(ap);
164 
165 	return 0;
166 }
167 
168 static int
169 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
170 {
171 	struct disk_info *info = &dp->d_info;
172 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
173 	disklabel_ops_t ops;
174 	struct partinfo part;
175 	const char *msg;
176 	cdev_t ndev;
177 	int sno;
178 	u_int i;
179 
180 	disk_debug(2,
181 		    "disk_probe_slice (begin): %s (%s)\n",
182 			dev->si_name, dp->d_cdev->si_name);
183 
184 	sno = slice ? slice - 1 : 0;
185 
186 	ops = &disklabel32_ops;
187 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
188 	if (msg && !strcmp(msg, "no disk label")) {
189 		ops = &disklabel64_ops;
190 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
191 	}
192 	if (msg == NULL) {
193 		if (slice != WHOLE_DISK_SLICE)
194 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
195 		else
196 			sp->ds_reserved = 0;
197 
198 		sp->ds_ops = ops;
199 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
200 			ops->op_loadpartinfo(sp->ds_label, i, &part);
201 			if (part.fstype) {
202 				if (reprobe &&
203 				    (ndev = devfs_find_device_by_name("%s%c",
204 						dev->si_name, 'a' + i))
205 				) {
206 					/*
207 					 * Device already exists and
208 					 * is still valid.
209 					 */
210 					ndev->si_flags |= SI_REPROBE_TEST;
211 				} else {
212 					ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
213 						dkmakeminor(dkunit(dp->d_cdev),
214 							    slice, i),
215 						UID_ROOT, GID_OPERATOR, 0640,
216 						"%s%c", dev->si_name, 'a'+ i);
217 					ndev->si_disk = dp;
218 					if (dp->d_info.d_serialno) {
219 						make_dev_alias(ndev,
220 						    "serno/%s.s%d%c",
221 						    dp->d_info.d_serialno,
222 						    sno, 'a' + i);
223 					}
224 					ndev->si_flags |= SI_REPROBE_TEST;
225 				}
226 			}
227 		}
228 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
229 		msg = NULL;
230 		if (sp->ds_size >= 0x100000000ULL)
231 			ops = &disklabel64_ops;
232 		else
233 			ops = &disklabel32_ops;
234 		sp->ds_label = ops->op_clone_label(info, sp);
235 	} else {
236 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
237 		    sp->ds_type == DOSPTYP_NETBSD ||
238 		    sp->ds_type == DOSPTYP_OPENBSD) {
239 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
240 			    dev->si_name, msg);
241 		}
242 	}
243 
244 	if (msg == NULL) {
245 		sp->ds_wlabel = FALSE;
246 	}
247 
248 	return (msg ? EINVAL : 0);
249 }
250 
251 
252 static void
253 disk_probe(struct disk *dp, int reprobe)
254 {
255 	struct disk_info *info = &dp->d_info;
256 	cdev_t dev = dp->d_cdev;
257 	cdev_t ndev;
258 	int error, i, sno;
259 	struct diskslice *sp;
260 
261 	KKASSERT (info->d_media_blksize != 0);
262 
263 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
264 	disk_debug(1,
265 		    "disk_probe (begin): %s\n",
266 			dp->d_cdev->si_name);
267 
268 	error = mbrinit(dev, info, &(dp->d_slice));
269 	if (error)
270 		return;
271 
272 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
273 		/*
274 		 * Ignore the whole-disk slice, it has already been created.
275 		 */
276 		if (i == WHOLE_DISK_SLICE)
277 			continue;
278 		sp = &dp->d_slice->dss_slices[i];
279 
280 		/*
281 		 * Handle s0.  s0 is a compatibility slice if there are no
282 		 * other slices and it has not otherwise been set up, else
283 		 * we ignore it.
284 		 */
285 		if (i == COMPATIBILITY_SLICE) {
286 			sno = 0;
287 			if (sp->ds_type == 0 &&
288 			    dp->d_slice->dss_nslices == BASE_SLICE) {
289 				sp->ds_size = info->d_media_blocks;
290 				sp->ds_reserved = 0;
291 			}
292 		} else {
293 			sno = i - 1;
294 			sp->ds_reserved = 0;
295 		}
296 
297 		/*
298 		 * Ignore 0-length slices
299 		 */
300 		if (sp->ds_size == 0)
301 			continue;
302 
303 		if (reprobe &&
304 		    (ndev = devfs_find_device_by_name("%ss%d",
305 						      dev->si_name, sno))) {
306 			/*
307 			 * Device already exists and is still valid
308 			 */
309 			ndev->si_flags |= SI_REPROBE_TEST;
310 		} else {
311 			/*
312 			 * Else create new device
313 			 */
314 			ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
315 					dkmakewholeslice(dkunit(dev), i),
316 					UID_ROOT, GID_OPERATOR, 0640,
317 					"%ss%d", dev->si_name, sno);
318 			if (dp->d_info.d_serialno) {
319 				make_dev_alias(ndev, "serno/%s.s%d",
320 					       dp->d_info.d_serialno, sno);
321 			}
322 			ndev->si_disk = dp;
323 			ndev->si_flags |= SI_REPROBE_TEST;
324 		}
325 		sp->ds_dev = ndev;
326 
327 		/*
328 		 * Probe appropriate slices for a disklabel
329 		 *
330 		 * XXX slice type 1 used by our gpt probe code.
331 		 * XXX slice type 0 used by mbr compat slice.
332 		 */
333 		if (sp->ds_type == DOSPTYP_386BSD ||
334 		    sp->ds_type == DOSPTYP_NETBSD ||
335 		    sp->ds_type == DOSPTYP_OPENBSD ||
336 		    sp->ds_type == 0 ||
337 		    sp->ds_type == 1) {
338 			if (dp->d_slice->dss_first_bsd_slice == 0)
339 				dp->d_slice->dss_first_bsd_slice = i;
340 			disk_probe_slice(dp, ndev, i, reprobe);
341 		}
342 	}
343 	disk_debug(1,
344 		    "disk_probe (end): %s\n",
345 			dp->d_cdev->si_name);
346 }
347 
348 
349 static void
350 disk_msg_core(void *arg)
351 {
352 	struct disk	*dp;
353 	struct diskslice *sp;
354 	lwkt_tokref ilock;
355 	disk_msg_t msg;
356 	int run;
357 
358 	lwkt_initport_thread(&disk_msg_port, curthread);
359 	wakeup(curthread);
360 	run = 1;
361 
362 	while (run) {
363 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
364 
365 		switch (msg->hdr.u.ms_result) {
366 		case DISK_DISK_PROBE:
367 			dp = (struct disk *)msg->load;
368 			disk_debug(1,
369 				    "DISK_DISK_PROBE: %s\n",
370 					dp->d_cdev->si_name);
371 			disk_probe(dp, 0);
372 			break;
373 		case DISK_DISK_DESTROY:
374 			dp = (struct disk *)msg->load;
375 			disk_debug(1,
376 				    "DISK_DISK_DESTROY: %s\n",
377 					dp->d_cdev->si_name);
378 			devfs_destroy_subnames(dp->d_cdev->si_name);
379 			devfs_destroy_dev(dp->d_cdev);
380 			lwkt_gettoken(&ilock, &disklist_token);
381 			LIST_REMOVE(dp, d_list);
382 			lwkt_reltoken(&ilock);
383 			if (dp->d_info.d_serialno) {
384 				kfree(dp->d_info.d_serialno, M_TEMP);
385 				dp->d_info.d_serialno = NULL;
386 			}
387 			break;
388 		case DISK_UNPROBE:
389 			dp = (struct disk *)msg->load;
390 			disk_debug(1,
391 				    "DISK_DISK_UNPROBE: %s\n",
392 					dp->d_cdev->si_name);
393 			devfs_destroy_subnames(dp->d_cdev->si_name);
394 			break;
395 		case DISK_SLICE_REPROBE:
396 			dp = (struct disk *)msg->load;
397 			sp = (struct diskslice *)msg->load2;
398 			devfs_clr_subnames_flag(sp->ds_dev->si_name,
399 						SI_REPROBE_TEST);
400 			disk_debug(1,
401 				    "DISK_SLICE_REPROBE: %s\n",
402 				    sp->ds_dev->si_name);
403 			disk_probe_slice(dp, sp->ds_dev,
404 					 dkslice(sp->ds_dev), 1);
405 			devfs_destroy_subnames_without_flag(
406 					sp->ds_dev->si_name, SI_REPROBE_TEST);
407 			break;
408 		case DISK_DISK_REPROBE:
409 			dp = (struct disk *)msg->load;
410 			devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
411 			disk_debug(1,
412 				    "DISK_DISK_REPROBE: %s\n",
413 				    dp->d_cdev->si_name);
414 			disk_probe(dp, 1);
415 			devfs_destroy_subnames_without_flag(
416 					dp->d_cdev->si_name, SI_REPROBE_TEST);
417 			break;
418 		case DISK_SYNC:
419 			disk_debug(1, "DISK_SYNC\n");
420 			break;
421 		default:
422 			devfs_debug(DEVFS_DEBUG_WARNING,
423 				    "disk_msg_core: unknown message "
424 				    "received at core\n");
425 			break;
426 		}
427 		lwkt_replymsg((lwkt_msg_t)msg, 0);
428 	}
429 	lwkt_exit();
430 }
431 
432 
433 /*
434  * Acts as a message drain. Any message that is replied to here gets
435  * destroyed and the memory freed.
436  */
437 static void
438 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
439 {
440 	objcache_put(disk_msg_cache, msg);
441 }
442 
443 
444 void
445 disk_msg_send(uint32_t cmd, void *load, void *load2)
446 {
447 	disk_msg_t disk_msg;
448 	lwkt_port_t port = &disk_msg_port;
449 
450 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
451 
452 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
453 
454 	disk_msg->hdr.u.ms_result = cmd;
455 	disk_msg->load = load;
456 	disk_msg->load2 = load2;
457 	KKASSERT(port);
458 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
459 }
460 
461 void
462 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
463 {
464 	struct lwkt_port rep_port;
465 	disk_msg_t disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
466 	disk_msg_t	msg_incoming;
467 	lwkt_port_t port = &disk_msg_port;
468 
469 	lwkt_initport_thread(&rep_port, curthread);
470 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
471 
472 	disk_msg->hdr.u.ms_result = cmd;
473 	disk_msg->load = load;
474 	disk_msg->load2 = load2;
475 
476 	KKASSERT(port);
477 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
478 	msg_incoming = lwkt_waitport(&rep_port, 0);
479 }
480 
481 /*
482  * Create a raw device for the dev_ops template (which is returned).  Also
483  * create a slice and unit managed disk and overload the user visible
484  * device space with it.
485  *
486  * NOTE: The returned raw device is NOT a slice and unit managed device.
487  * It is an actual raw device representing the raw disk as specified by
488  * the passed dev_ops.  The disk layer not only returns such a raw device,
489  * it also uses it internally when passing (modified) commands through.
490  */
491 cdev_t
492 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
493 {
494 	lwkt_tokref ilock;
495 	cdev_t rawdev;
496 
497 	disk_debug(1,
498 		    "disk_create (begin): %s%d\n",
499 			raw_ops->head.name, unit);
500 
501 	rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
502 			    UID_ROOT, GID_OPERATOR, 0640,
503 			    "%s%d", raw_ops->head.name, unit);
504 
505 	bzero(dp, sizeof(*dp));
506 
507 	dp->d_rawdev = rawdev;
508 	dp->d_raw_ops = raw_ops;
509 	dp->d_dev_ops = &disk_ops;
510 	dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
511 			    dkmakewholedisk(unit),
512 			    UID_ROOT, GID_OPERATOR, 0640,
513 			    "%s%d", raw_ops->head.name, unit);
514 
515 	dp->d_cdev->si_disk = dp;
516 
517 	lwkt_gettoken(&ilock, &disklist_token);
518 	LIST_INSERT_HEAD(&disklist, dp, d_list);
519 	lwkt_reltoken(&ilock);
520 
521 	disk_debug(1,
522 		    "disk_create (end): %s%d\n",
523 			raw_ops->head.name, unit);
524 
525 	return (dp->d_rawdev);
526 }
527 
528 
529 static void
530 _setdiskinfo(struct disk *disk, struct disk_info *info)
531 {
532 	char *oldserialno;
533 
534 	oldserialno = disk->d_info.d_serialno;
535 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
536 	info = &disk->d_info;
537 
538 	disk_debug(1,
539 		    "_setdiskinfo: %s\n",
540 			disk->d_cdev->si_name);
541 
542 	/*
543 	 * The serial number is duplicated so the caller can throw
544 	 * their copy away.
545 	 */
546 	if (info->d_serialno && info->d_serialno[0]) {
547 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
548 		disk_cleanserial(info->d_serialno);
549 		if (disk->d_cdev) {
550 			make_dev_alias(disk->d_cdev, "serno/%s",
551 					info->d_serialno);
552 		}
553 	} else {
554 		info->d_serialno = NULL;
555 	}
556 	if (oldserialno)
557 		kfree(oldserialno, M_TEMP);
558 
559 	/*
560 	 * The caller may set d_media_size or d_media_blocks and we
561 	 * calculate the other.
562 	 */
563 	KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0);
564 	if (info->d_media_size == 0 && info->d_media_blocks) {
565 		info->d_media_size = (u_int64_t)info->d_media_blocks *
566 				     info->d_media_blksize;
567 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
568 		   info->d_media_blksize) {
569 		info->d_media_blocks = info->d_media_size /
570 				       info->d_media_blksize;
571 	}
572 
573 	/*
574 	 * The si_* fields for rawdev are not set until after the
575 	 * disk_create() call, so someone using the cooked version
576 	 * of the raw device (i.e. da0s0) will not get the right
577 	 * si_iosize_max unless we fix it up here.
578 	 */
579 	if (disk->d_cdev && disk->d_rawdev &&
580 	    disk->d_cdev->si_iosize_max == 0) {
581 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
582 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
583 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
584 	}
585 }
586 
587 /*
588  * Disk drivers must call this routine when media parameters are available
589  * or have changed.
590  */
591 void
592 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
593 {
594 	_setdiskinfo(disk, info);
595 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
596 	disk_debug(1,
597 		    "disk_setdiskinfo: sent probe for %s\n",
598 			disk->d_cdev->si_name);
599 }
600 
601 void
602 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
603 {
604 	_setdiskinfo(disk, info);
605 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
606 	disk_debug(1,
607 		    "disk_setdiskinfo_sync: sent probe for %s\n",
608 			disk->d_cdev->si_name);
609 }
610 
611 /*
612  * This routine is called when an adapter detaches.  The higher level
613  * managed disk device is destroyed while the lower level raw device is
614  * released.
615  */
616 void
617 disk_destroy(struct disk *disk)
618 {
619 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
620 	return;
621 }
622 
623 int
624 disk_dumpcheck(cdev_t dev, u_int64_t *count, u_int64_t *blkno, u_int *secsize)
625 {
626 	struct partinfo pinfo;
627 	int error;
628 
629 	bzero(&pinfo, sizeof(pinfo));
630 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
631 			   proc0.p_ucred, NULL);
632 	if (error)
633 		return (error);
634 	if (pinfo.media_blksize == 0)
635 		return (ENXIO);
636 	*count = (u_int64_t)Maxmem * PAGE_SIZE / pinfo.media_blksize;
637 	if (dumplo64 < pinfo.reserved_blocks ||
638 	    dumplo64 + *count > pinfo.media_blocks) {
639 		return (ENOSPC);
640 	}
641 	*blkno = dumplo64 + pinfo.media_offset / pinfo.media_blksize;
642 	*secsize = pinfo.media_blksize;
643 	return (0);
644 }
645 
646 void
647 disk_unprobe(struct disk *disk)
648 {
649 	if (disk == NULL)
650 		return;
651 
652 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
653 }
654 
655 void
656 disk_invalidate (struct disk *disk)
657 {
658 	if (disk->d_slice)
659 		dsgone(&disk->d_slice);
660 }
661 
662 struct disk *
663 disk_enumerate(struct disk *disk)
664 {
665 	struct disk *dp;
666 	lwkt_tokref ilock;
667 
668 	lwkt_gettoken(&ilock, &disklist_token);
669 	if (!disk)
670 		dp = (LIST_FIRST(&disklist));
671 	else
672 		dp = (LIST_NEXT(disk, d_list));
673 	lwkt_reltoken(&ilock);
674 
675 	return dp;
676 }
677 
678 static
679 int
680 sysctl_disks(SYSCTL_HANDLER_ARGS)
681 {
682 	struct disk *disk;
683 	int error, first;
684 
685 	disk = NULL;
686 	first = 1;
687 
688 	while ((disk = disk_enumerate(disk))) {
689 		if (!first) {
690 			error = SYSCTL_OUT(req, " ", 1);
691 			if (error)
692 				return error;
693 		} else {
694 			first = 0;
695 		}
696 		error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
697 				   strlen(disk->d_rawdev->si_name));
698 		if (error)
699 			return error;
700 	}
701 	error = SYSCTL_OUT(req, "", 1);
702 	return error;
703 }
704 
705 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
706     sysctl_disks, "A", "names of available disks");
707 
708 /*
709  * Open a disk device or partition.
710  */
711 static
712 int
713 diskopen(struct dev_open_args *ap)
714 {
715 	cdev_t dev = ap->a_head.a_dev;
716 	struct disk *dp;
717 	int error;
718 
719 	/*
720 	 * dp can't be NULL here XXX.
721 	 *
722 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
723 	 * setdiskinfo() is typically called whether the disk is present
724 	 * or not (e.g. CD), but the base disk device is created first
725 	 * and there may be a race.
726 	 */
727 	dp = dev->si_disk;
728 	if (dp == NULL || dp->d_slice == NULL)
729 		return (ENXIO);
730 	error = 0;
731 
732 	/*
733 	 * Deal with open races
734 	 */
735 	while (dp->d_flags & DISKFLAG_LOCK) {
736 		dp->d_flags |= DISKFLAG_WANTED;
737 		error = tsleep(dp, PCATCH, "diskopen", hz);
738 		if (error)
739 			return (error);
740 	}
741 	dp->d_flags |= DISKFLAG_LOCK;
742 
743 	/*
744 	 * Open the underlying raw device.
745 	 */
746 	if (!dsisopen(dp->d_slice)) {
747 #if 0
748 		if (!pdev->si_iosize_max)
749 			pdev->si_iosize_max = dev->si_iosize_max;
750 #endif
751 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
752 				  ap->a_devtype, ap->a_cred);
753 	}
754 #if 0
755 	/*
756 	 * Inherit properties from the underlying device now that it is
757 	 * open.
758 	 */
759 	dev_dclone(dev);
760 #endif
761 
762 	if (error)
763 		goto out;
764 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
765 		       &dp->d_slice, &dp->d_info);
766 	if (!dsisopen(dp->d_slice)) {
767 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
768 	}
769 out:
770 	dp->d_flags &= ~DISKFLAG_LOCK;
771 	if (dp->d_flags & DISKFLAG_WANTED) {
772 		dp->d_flags &= ~DISKFLAG_WANTED;
773 		wakeup(dp);
774 	}
775 
776 	return(error);
777 }
778 
779 /*
780  * Close a disk device or partition
781  */
782 static
783 int
784 diskclose(struct dev_close_args *ap)
785 {
786 	cdev_t dev = ap->a_head.a_dev;
787 	struct disk *dp;
788 	int error;
789 
790 	error = 0;
791 	dp = dev->si_disk;
792 
793 	dsclose(dev, ap->a_devtype, dp->d_slice);
794 	if (!dsisopen(dp->d_slice)) {
795 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
796 	}
797 	return (error);
798 }
799 
800 /*
801  * First execute the ioctl on the disk device, and if it isn't supported
802  * try running it on the backing device.
803  */
804 static
805 int
806 diskioctl(struct dev_ioctl_args *ap)
807 {
808 	cdev_t dev = ap->a_head.a_dev;
809 	struct disk *dp;
810 	int error;
811 
812 	dp = dev->si_disk;
813 	if (dp == NULL)
814 		return (ENXIO);
815 
816 	devfs_debug(DEVFS_DEBUG_DEBUG,
817 		    "diskioctl: cmd is: %x (name: %s)\n",
818 		    ap->a_cmd, dev->si_name);
819 	devfs_debug(DEVFS_DEBUG_DEBUG,
820 		    "diskioctl: &dp->d_slice is: %x, %x\n",
821 		    &dp->d_slice, dp->d_slice);
822 
823 	error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
824 			&dp->d_slice, &dp->d_info);
825 
826 	if (error == ENOIOCTL) {
827 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
828 				   ap->a_fflag, ap->a_cred, NULL);
829 	}
830 	return (error);
831 }
832 
833 /*
834  * Execute strategy routine
835  */
836 static
837 int
838 diskstrategy(struct dev_strategy_args *ap)
839 {
840 	cdev_t dev = ap->a_head.a_dev;
841 	struct bio *bio = ap->a_bio;
842 	struct bio *nbio;
843 	struct disk *dp;
844 
845 	dp = dev->si_disk;
846 
847 	if (dp == NULL) {
848 		bio->bio_buf->b_error = ENXIO;
849 		bio->bio_buf->b_flags |= B_ERROR;
850 		biodone(bio);
851 		return(0);
852 	}
853 	KKASSERT(dev->si_disk == dp);
854 
855 	/*
856 	 * The dscheck() function will also transform the slice relative
857 	 * block number i.e. bio->bio_offset into a block number that can be
858 	 * passed directly to the underlying raw device.  If dscheck()
859 	 * returns NULL it will have handled the bio for us (e.g. EOF
860 	 * or error due to being beyond the device size).
861 	 */
862 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
863 		dev_dstrategy(dp->d_rawdev, nbio);
864 	} else {
865 		biodone(bio);
866 	}
867 	return(0);
868 }
869 
870 /*
871  * Return the partition size in ?blocks?
872  */
873 static
874 int
875 diskpsize(struct dev_psize_args *ap)
876 {
877 	cdev_t dev = ap->a_head.a_dev;
878 	struct disk *dp;
879 
880 	dp = dev->si_disk;
881 	if (dp == NULL)
882 		return(ENODEV);
883 	ap->a_result = dssize(dev, &dp->d_slice);
884 	return(0);
885 }
886 
887 /*
888  * When new device entries are instantiated, make sure they inherit our
889  * si_disk structure and block and iosize limits from the raw device.
890  *
891  * This routine is always called synchronously in the context of the
892  * client.
893  *
894  * XXX The various io and block size constraints are not always initialized
895  * properly by devices.
896  */
897 static
898 int
899 diskclone(struct dev_clone_args *ap)
900 {
901 	cdev_t dev = ap->a_head.a_dev;
902 	struct disk *dp;
903 	dp = dev->si_disk;
904 
905 	KKASSERT(dp != NULL);
906 	dev->si_disk = dp;
907 	dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
908 	dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
909 	dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
910 	return(0);
911 }
912 
913 int
914 diskdump(struct dev_dump_args *ap)
915 {
916 	cdev_t dev = ap->a_head.a_dev;
917 	struct disk *dp = dev->si_disk;
918 	int error;
919 
920 	error = disk_dumpcheck(dev, &ap->a_count, &ap->a_blkno, &ap->a_secsize);
921 	if (error == 0) {
922 		ap->a_head.a_dev = dp->d_rawdev;
923 		error = dev_doperate(&ap->a_head);
924 	}
925 
926 	return(error);
927 }
928 
929 
930 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
931     0, sizeof(struct diskslices), "sizeof(struct diskslices)");
932 
933 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
934     0, sizeof(struct disk), "sizeof(struct disk)");
935 
936 /*
937  * Reorder interval for burst write allowance and minor write
938  * allowance.
939  *
940  * We always want to trickle some writes in to make use of the
941  * disk's zone cache.  Bursting occurs on a longer interval and only
942  * runningbufspace is well over the hirunningspace limit.
943  */
944 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
945 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
946 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
947 int bioq_reorder_minor_interval = 5;
948 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
949 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
950 
951 int bioq_reorder_burst_bytes = 3000000;
952 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
953 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
954 int bioq_reorder_minor_bytes = 262144;
955 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
956 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
957 
958 
959 /*
960  * Order I/Os.  Generally speaking this code is designed to make better
961  * use of drive zone caches.  A drive zone cache can typically track linear
962  * reads or writes for around 16 zones simultaniously.
963  *
964  * Read prioritization issues:  It is possible for hundreds of megabytes worth
965  * of writes to be queued asynchronously.  This creates a huge bottleneck
966  * for reads which reduce read bandwidth to a trickle.
967  *
968  * To solve this problem we generally reorder reads before writes.
969  *
970  * However, a large number of random reads can also starve writes and
971  * make poor use of the drive zone cache so we allow writes to trickle
972  * in every N reads.
973  */
974 void
975 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
976 {
977 	/*
978 	 * The BIO wants to be ordered.  Adding to the tail also
979 	 * causes transition to be set to NULL, forcing the ordering
980 	 * of all prior I/O's.
981 	 */
982 	if (bio->bio_buf->b_flags & B_ORDERED) {
983 		bioq_insert_tail(bioq, bio);
984 		return;
985 	}
986 
987 	switch(bio->bio_buf->b_cmd) {
988 	case BUF_CMD_READ:
989 		if (bioq->transition) {
990 			/*
991 			 * Insert before the first write.  Bleedover writes
992 			 * based on reorder intervals to prevent starvation.
993 			 */
994 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
995 			++bioq->reorder;
996 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
997 				bioqwritereorder(bioq);
998 				if (bioq->reorder >=
999 				    bioq_reorder_burst_interval) {
1000 					bioq->reorder = 0;
1001 				}
1002 			}
1003 		} else {
1004 			/*
1005 			 * No writes queued (or ordering was forced),
1006 			 * insert at tail.
1007 			 */
1008 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1009 		}
1010 		break;
1011 	case BUF_CMD_WRITE:
1012 		/*
1013 		 * Writes are always appended.  If no writes were previously
1014 		 * queued or an ordered tail insertion occured the transition
1015 		 * field will be NULL.
1016 		 */
1017 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1018 		if (bioq->transition == NULL)
1019 			bioq->transition = bio;
1020 		break;
1021 	default:
1022 		/*
1023 		 * All other request types are forced to be ordered.
1024 		 */
1025 		bioq_insert_tail(bioq, bio);
1026 		break;
1027 	}
1028 }
1029 
1030 /*
1031  * Move the read-write transition point to prevent reads from
1032  * completely starving our writes.  This brings a number of writes into
1033  * the fold every N reads.
1034  *
1035  * We bring a few linear writes into the fold on a minor interval
1036  * and we bring a non-linear burst of writes into the fold on a major
1037  * interval.  Bursting only occurs if runningbufspace is really high
1038  * (typically from syncs, fsyncs, or HAMMER flushes).
1039  */
1040 static
1041 void
1042 bioqwritereorder(struct bio_queue_head *bioq)
1043 {
1044 	struct bio *bio;
1045 	off_t next_offset;
1046 	size_t left;
1047 	size_t n;
1048 	int check_off;
1049 
1050 	if (bioq->reorder < bioq_reorder_burst_interval ||
1051 	    !buf_runningbufspace_severe()) {
1052 		left = (size_t)bioq_reorder_minor_bytes;
1053 		check_off = 1;
1054 	} else {
1055 		left = (size_t)bioq_reorder_burst_bytes;
1056 		check_off = 0;
1057 	}
1058 
1059 	next_offset = bioq->transition->bio_offset;
1060 	while ((bio = bioq->transition) != NULL &&
1061 	       (check_off == 0 || next_offset == bio->bio_offset)
1062 	) {
1063 		n = bio->bio_buf->b_bcount;
1064 		next_offset = bio->bio_offset + n;
1065 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1066 		if (left < n)
1067 			break;
1068 		left -= n;
1069 	}
1070 }
1071 
1072 /*
1073  * Disk error is the preface to plaintive error messages
1074  * about failing disk transfers.  It prints messages of the form
1075 
1076 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1077 
1078  * if the offset of the error in the transfer and a disk label
1079  * are both available.  blkdone should be -1 if the position of the error
1080  * is unknown; the disklabel pointer may be null from drivers that have not
1081  * been converted to use them.  The message is printed with kprintf
1082  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1083  * The message should be completed (with at least a newline) with kprintf
1084  * or log(-1, ...), respectively.  There is no trailing space.
1085  */
1086 void
1087 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1088 {
1089 	struct buf *bp = bio->bio_buf;
1090 	const char *term;
1091 
1092 	switch(bp->b_cmd) {
1093 	case BUF_CMD_READ:
1094 		term = "read";
1095 		break;
1096 	case BUF_CMD_WRITE:
1097 		term = "write";
1098 		break;
1099 	default:
1100 		term = "access";
1101 		break;
1102 	}
1103 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1104 	kprintf("offset %012llx for %d",
1105 		(long long)bio->bio_offset,
1106 		bp->b_bcount);
1107 
1108 	if (donecnt)
1109 		kprintf(" (%d bytes completed)", donecnt);
1110 }
1111 
1112 /*
1113  * Locate a disk device
1114  */
1115 cdev_t
1116 disk_locate(const char *devname)
1117 {
1118 	return devfs_find_device_by_name(devname);
1119 }
1120 
1121 void
1122 disk_config(void *arg)
1123 {
1124 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1125 }
1126 
1127 static void
1128 disk_init(void)
1129 {
1130 	struct thread* td_core;
1131 
1132 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1133 					 NULL, NULL, NULL,
1134 					 objcache_malloc_alloc,
1135 					 objcache_malloc_free,
1136 					 &disk_msg_malloc_args);
1137 
1138 	lwkt_token_init(&disklist_token);
1139 
1140 	/*
1141 	 * Initialize the reply-only port which acts as a message drain
1142 	 */
1143 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1144 
1145 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1146 		    0, 0, "disk_msg_core");
1147 
1148 	tsleep(td_core, 0, "diskcore", 0);
1149 }
1150 
1151 static void
1152 disk_uninit(void)
1153 {
1154 	objcache_destroy(disk_msg_cache);
1155 }
1156 
1157 /*
1158  * Clean out illegal characters in serial numbers.
1159  */
1160 static void
1161 disk_cleanserial(char *serno)
1162 {
1163 	char c;
1164 
1165 	while ((c = *serno) != 0) {
1166 		if (c >= 'a' && c <= 'z')
1167 			;
1168 		else if (c >= 'A' && c <= 'Z')
1169 			;
1170 		else if (c >= '0' && c <= '9')
1171 			;
1172 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1173 			;
1174 		else
1175 			c = '_';
1176 		*serno++= c;
1177 	}
1178 }
1179 
1180 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1181 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1182 		0, "Enable subr_disk debugging");
1183 
1184 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1185 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1186