xref: /dflybsd-src/sys/kern/subr_disk.c (revision a579280ade25ef68ce67cd8432a625d6bce8d3bb)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ----------------------------------------------------------------------------
35  * "THE BEER-WARE LICENSE" (Revision 42):
36  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
37  * can do whatever you want with this stuff. If we meet some day, and you think
38  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
39  * ----------------------------------------------------------------------------
40  *
41  * Copyright (c) 1982, 1986, 1988, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
78  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80  * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/proc.h>
87 #include <sys/sysctl.h>
88 #include <sys/buf.h>
89 #include <sys/conf.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
95 #include <sys/disk.h>
96 #include <sys/malloc.h>
97 #include <sys/sysctl.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/msgport2.h>
104 #include <sys/buf2.h>
105 #include <sys/devfs.h>
106 #include <sys/thread.h>
107 #include <sys/thread2.h>
108 
109 #include <sys/queue.h>
110 #include <sys/lock.h>
111 
112 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
113 static int disk_debug_enable = 0;
114 
115 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
116 static void disk_msg_core(void *);
117 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
118 static void disk_probe(struct disk *dp, int reprobe);
119 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
120 static void bioqwritereorder(struct bio_queue_head *bioq);
121 
122 static d_open_t diskopen;
123 static d_close_t diskclose;
124 static d_ioctl_t diskioctl;
125 static d_strategy_t diskstrategy;
126 static d_psize_t diskpsize;
127 static d_clone_t diskclone;
128 static d_dump_t diskdump;
129 
130 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
131 static struct lwkt_token disklist_token;
132 
133 static struct dev_ops disk_ops = {
134 	{ "disk", 0, D_DISK },
135 	.d_open = diskopen,
136 	.d_close = diskclose,
137 	.d_read = physread,
138 	.d_write = physwrite,
139 	.d_ioctl = diskioctl,
140 	.d_strategy = diskstrategy,
141 	.d_dump = diskdump,
142 	.d_psize = diskpsize,
143 	.d_clone = diskclone
144 };
145 
146 static struct objcache 	*disk_msg_cache;
147 
148 struct objcache_malloc_args disk_msg_malloc_args = {
149 	sizeof(struct disk_msg), M_DISK };
150 
151 static struct lwkt_port disk_dispose_port;
152 static struct lwkt_port disk_msg_port;
153 
154 static int
155 disk_debug(int level, char *fmt, ...)
156 {
157 	__va_list ap;
158 
159 	__va_start(ap, fmt);
160 	if (level <= disk_debug_enable)
161 		kvprintf(fmt, ap);
162 	__va_end(ap);
163 
164 	return 0;
165 }
166 
167 static int
168 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
169 {
170 	struct disk_info *info = &dp->d_info;
171 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
172 	disklabel_ops_t ops;
173 	struct partinfo part;
174 	const char *msg;
175 	cdev_t ndev;
176 	int sno;
177 	u_int i;
178 
179 	disk_debug(2,
180 		    "disk_probe_slice (begin): %s (%s)\n",
181 			dev->si_name, dp->d_cdev->si_name);
182 
183 	sno = slice ? slice - 1 : 0;
184 
185 	ops = &disklabel32_ops;
186 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
187 	if (msg && !strcmp(msg, "no disk label")) {
188 		ops = &disklabel64_ops;
189 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
190 	}
191 	if (msg == NULL) {
192 		if (slice != WHOLE_DISK_SLICE)
193 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
194 		else
195 			sp->ds_reserved = 0;
196 
197 		sp->ds_ops = ops;
198 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
199 			ops->op_loadpartinfo(sp->ds_label, i, &part);
200 			if (part.fstype) {
201 				if (reprobe &&
202 				    (ndev = devfs_find_device_by_name("%s%c",
203 						dev->si_name, 'a' + i))
204 				) {
205 					/*
206 					 * Device already exists and
207 					 * is still valid.
208 					 */
209 					ndev->si_flags |= SI_REPROBE_TEST;
210 				} else {
211 					ndev = make_dev_covering(&disk_ops, dp->d_rawdev,
212 						dkmakeminor(dkunit(dp->d_cdev),
213 							    slice, i),
214 						UID_ROOT, GID_OPERATOR, 0640,
215 						"%s%c", dev->si_name, 'a'+ i);
216 					ndev->si_disk = dp;
217 					if (dp->d_info.d_serialno) {
218 						make_dev_alias(ndev,
219 						    "serno/%s.s%d%c",
220 						    dp->d_info.d_serialno,
221 						    sno, 'a' + i);
222 					}
223 					ndev->si_flags |= SI_REPROBE_TEST;
224 				}
225 			}
226 		}
227 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
228 		msg = NULL;
229 		if (sp->ds_size >= 0x100000000ULL)
230 			ops = &disklabel64_ops;
231 		else
232 			ops = &disklabel32_ops;
233 		sp->ds_label = ops->op_clone_label(info, sp);
234 	} else {
235 		if (sp->ds_type == DOSPTYP_386BSD /* XXX */) {
236 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
237 			    dev->si_name, msg);
238 		}
239 	}
240 
241 	if (msg == NULL) {
242 		sp->ds_wlabel = FALSE;
243 	}
244 
245 	return (msg ? EINVAL : 0);
246 }
247 
248 
249 static void
250 disk_probe(struct disk *dp, int reprobe)
251 {
252 	struct disk_info *info = &dp->d_info;
253 	cdev_t dev = dp->d_cdev;
254 	cdev_t ndev;
255 	int error, i, sno;
256 	struct diskslice *sp;
257 
258 	KKASSERT (info->d_media_blksize != 0);
259 
260 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
261 	disk_debug(1,
262 		    "disk_probe (begin): %s\n",
263 			dp->d_cdev->si_name);
264 
265 	error = mbrinit(dev, info, &(dp->d_slice));
266 	if (error)
267 		return;
268 
269 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
270 		/*
271 		 * Ignore the whole-disk slice, it has already been created.
272 		 */
273 		if (i == WHOLE_DISK_SLICE)
274 			continue;
275 		sp = &dp->d_slice->dss_slices[i];
276 
277 		/*
278 		 * Handle s0.  s0 is a compatibility slice if there are no
279 		 * other slices and it has not otherwise been set up, else
280 		 * we ignore it.
281 		 */
282 		if (i == COMPATIBILITY_SLICE) {
283 			sno = 0;
284 			if (sp->ds_type == 0 &&
285 			    dp->d_slice->dss_nslices == BASE_SLICE) {
286 				sp->ds_size = info->d_media_blocks;
287 				sp->ds_reserved = 0;
288 			}
289 		} else {
290 			sno = i - 1;
291 			sp->ds_reserved = 0;
292 		}
293 
294 		/*
295 		 * Ignore 0-length slices
296 		 */
297 		if (sp->ds_size == 0)
298 			continue;
299 
300 		if (reprobe &&
301 		    (ndev = devfs_find_device_by_name("%ss%d",
302 						      dev->si_name, sno))) {
303 			/*
304 			 * Device already exists and is still valid
305 			 */
306 			ndev->si_flags |= SI_REPROBE_TEST;
307 		} else {
308 			/*
309 			 * Else create new device
310 			 */
311 			ndev = make_dev_covering(&disk_ops, dp->d_rawdev,
312 					dkmakewholeslice(dkunit(dev), i),
313 					UID_ROOT, GID_OPERATOR, 0640,
314 					"%ss%d", dev->si_name, sno);
315 			if (dp->d_info.d_serialno) {
316 				make_dev_alias(ndev, "serno/%s.s%d",
317 					       dp->d_info.d_serialno, sno);
318 			}
319 			ndev->si_disk = dp;
320 			ndev->si_flags |= SI_REPROBE_TEST;
321 		}
322 		sp->ds_dev = ndev;
323 
324 		/*
325 		 * Probe appropriate slices for a disklabel
326 		 *
327 		 * XXX slice type 1 used by our gpt probe code.
328 		 * XXX slice type 0 used by mbr compat slice.
329 		 */
330 		if (sp->ds_type == DOSPTYP_386BSD || sp->ds_type == 0 ||
331 			sp->ds_type == 1) {
332 			if (dp->d_slice->dss_first_bsd_slice == 0)
333 				dp->d_slice->dss_first_bsd_slice = i;
334 			disk_probe_slice(dp, ndev, i, reprobe);
335 		}
336 	}
337 	disk_debug(1,
338 		    "disk_probe (end): %s\n",
339 			dp->d_cdev->si_name);
340 }
341 
342 
343 static void
344 disk_msg_core(void *arg)
345 {
346 	struct disk	*dp;
347 	struct diskslice *sp;
348 	lwkt_tokref ilock;
349 	disk_msg_t msg;
350 	int run;
351 
352 	lwkt_initport_thread(&disk_msg_port, curthread);
353 	wakeup(curthread);
354 	run = 1;
355 
356 	while (run) {
357 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
358 
359 		switch (msg->hdr.u.ms_result) {
360 		case DISK_DISK_PROBE:
361 			dp = (struct disk *)msg->load;
362 			disk_debug(1,
363 				    "DISK_DISK_PROBE: %s\n",
364 					dp->d_cdev->si_name);
365 			disk_probe(dp, 0);
366 			break;
367 		case DISK_DISK_DESTROY:
368 			dp = (struct disk *)msg->load;
369 			disk_debug(1,
370 				    "DISK_DISK_DESTROY: %s\n",
371 					dp->d_cdev->si_name);
372 			devfs_destroy_subnames(dp->d_cdev->si_name);
373 			devfs_destroy_dev(dp->d_cdev);
374 			lwkt_gettoken(&ilock, &disklist_token);
375 			LIST_REMOVE(dp, d_list);
376 			lwkt_reltoken(&ilock);
377 			if (dp->d_info.d_serialno) {
378 				kfree(dp->d_info.d_serialno, M_TEMP);
379 				dp->d_info.d_serialno = NULL;
380 			}
381 			break;
382 		case DISK_UNPROBE:
383 			dp = (struct disk *)msg->load;
384 			disk_debug(1,
385 				    "DISK_DISK_UNPROBE: %s\n",
386 					dp->d_cdev->si_name);
387 			devfs_destroy_subnames(dp->d_cdev->si_name);
388 			break;
389 		case DISK_SLICE_REPROBE:
390 			dp = (struct disk *)msg->load;
391 			sp = (struct diskslice *)msg->load2;
392 			devfs_clr_subnames_flag(sp->ds_dev->si_name,
393 						SI_REPROBE_TEST);
394 			disk_debug(1,
395 				    "DISK_SLICE_REPROBE: %s\n",
396 				    sp->ds_dev->si_name);
397 			disk_probe_slice(dp, sp->ds_dev,
398 					 dkslice(sp->ds_dev), 1);
399 			devfs_destroy_subnames_without_flag(
400 					sp->ds_dev->si_name, SI_REPROBE_TEST);
401 			break;
402 		case DISK_DISK_REPROBE:
403 			dp = (struct disk *)msg->load;
404 			devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
405 			disk_debug(1,
406 				    "DISK_DISK_REPROBE: %s\n",
407 				    dp->d_cdev->si_name);
408 			disk_probe(dp, 1);
409 			devfs_destroy_subnames_without_flag(
410 					dp->d_cdev->si_name, SI_REPROBE_TEST);
411 			break;
412 		case DISK_SYNC:
413 			disk_debug(1, "DISK_SYNC\n");
414 			break;
415 		default:
416 			devfs_debug(DEVFS_DEBUG_WARNING,
417 				    "disk_msg_core: unknown message "
418 				    "received at core\n");
419 			break;
420 		}
421 		lwkt_replymsg((lwkt_msg_t)msg, 0);
422 	}
423 	lwkt_exit();
424 }
425 
426 
427 /*
428  * Acts as a message drain. Any message that is replied to here gets
429  * destroyed and the memory freed.
430  */
431 static void
432 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
433 {
434 	objcache_put(disk_msg_cache, msg);
435 }
436 
437 
438 void
439 disk_msg_send(uint32_t cmd, void *load, void *load2)
440 {
441 	disk_msg_t disk_msg;
442 	lwkt_port_t port = &disk_msg_port;
443 
444 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
445 
446 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
447 
448 	disk_msg->hdr.u.ms_result = cmd;
449 	disk_msg->load = load;
450 	disk_msg->load2 = load2;
451 	KKASSERT(port);
452 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
453 }
454 
455 void
456 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
457 {
458 	struct lwkt_port rep_port;
459 	disk_msg_t disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
460 	disk_msg_t	msg_incoming;
461 	lwkt_port_t port = &disk_msg_port;
462 
463 	lwkt_initport_thread(&rep_port, curthread);
464 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
465 
466 	disk_msg->hdr.u.ms_result = cmd;
467 	disk_msg->load = load;
468 	disk_msg->load2 = load2;
469 
470 	KKASSERT(port);
471 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
472 	msg_incoming = lwkt_waitport(&rep_port, 0);
473 }
474 
475 /*
476  * Create a raw device for the dev_ops template (which is returned).  Also
477  * create a slice and unit managed disk and overload the user visible
478  * device space with it.
479  *
480  * NOTE: The returned raw device is NOT a slice and unit managed device.
481  * It is an actual raw device representing the raw disk as specified by
482  * the passed dev_ops.  The disk layer not only returns such a raw device,
483  * it also uses it internally when passing (modified) commands through.
484  */
485 cdev_t
486 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
487 {
488 	lwkt_tokref ilock;
489 	cdev_t rawdev;
490 
491 	disk_debug(1,
492 		    "disk_create (begin): %s%d\n",
493 			raw_ops->head.name, unit);
494 
495 	rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
496 			    UID_ROOT, GID_OPERATOR, 0640,
497 			    "%s%d", raw_ops->head.name, unit);
498 
499 	bzero(dp, sizeof(*dp));
500 
501 	dp->d_rawdev = rawdev;
502 	dp->d_raw_ops = raw_ops;
503 	dp->d_dev_ops = &disk_ops;
504 	dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev,
505 			    dkmakewholedisk(unit),
506 			    UID_ROOT, GID_OPERATOR, 0640,
507 			    "%s%d", raw_ops->head.name, unit);
508 
509 	dp->d_cdev->si_disk = dp;
510 
511 	lwkt_gettoken(&ilock, &disklist_token);
512 	LIST_INSERT_HEAD(&disklist, dp, d_list);
513 	lwkt_reltoken(&ilock);
514 
515 	disk_debug(1,
516 		    "disk_create (end): %s%d\n",
517 			raw_ops->head.name, unit);
518 
519 	return (dp->d_rawdev);
520 }
521 
522 
523 static void
524 _setdiskinfo(struct disk *disk, struct disk_info *info)
525 {
526 	char *oldserialno;
527 
528 	oldserialno = disk->d_info.d_serialno;
529 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
530 	info = &disk->d_info;
531 
532 	disk_debug(1,
533 		    "_setdiskinfo: %s\n",
534 			disk->d_cdev->si_name);
535 
536 	/*
537 	 * The serial number is duplicated so the caller can throw
538 	 * their copy away.
539 	 */
540 	if (info->d_serialno && info->d_serialno[0]) {
541 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
542 		if (disk->d_cdev) {
543 			make_dev_alias(disk->d_cdev, "serno/%s",
544 					info->d_serialno);
545 		}
546 	} else {
547 		info->d_serialno = NULL;
548 	}
549 	if (oldserialno)
550 		kfree(oldserialno, M_TEMP);
551 
552 	/*
553 	 * The caller may set d_media_size or d_media_blocks and we
554 	 * calculate the other.
555 	 */
556 	KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0);
557 	if (info->d_media_size == 0 && info->d_media_blocks) {
558 		info->d_media_size = (u_int64_t)info->d_media_blocks *
559 				     info->d_media_blksize;
560 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
561 		   info->d_media_blksize) {
562 		info->d_media_blocks = info->d_media_size /
563 				       info->d_media_blksize;
564 	}
565 
566 	/*
567 	 * The si_* fields for rawdev are not set until after the
568 	 * disk_create() call, so someone using the cooked version
569 	 * of the raw device (i.e. da0s0) will not get the right
570 	 * si_iosize_max unless we fix it up here.
571 	 */
572 	if (disk->d_cdev && disk->d_rawdev &&
573 	    disk->d_cdev->si_iosize_max == 0) {
574 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
575 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
576 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
577 	}
578 }
579 
580 /*
581  * Disk drivers must call this routine when media parameters are available
582  * or have changed.
583  */
584 void
585 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
586 {
587 	_setdiskinfo(disk, info);
588 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
589 	disk_debug(1,
590 		    "disk_setdiskinfo: sent probe for %s\n",
591 			disk->d_cdev->si_name);
592 }
593 
594 void
595 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
596 {
597 	_setdiskinfo(disk, info);
598 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
599 	disk_debug(1,
600 		    "disk_setdiskinfo_sync: sent probe for %s\n",
601 			disk->d_cdev->si_name);
602 }
603 
604 /*
605  * This routine is called when an adapter detaches.  The higher level
606  * managed disk device is destroyed while the lower level raw device is
607  * released.
608  */
609 void
610 disk_destroy(struct disk *disk)
611 {
612 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
613 	return;
614 }
615 
616 int
617 disk_dumpcheck(cdev_t dev, u_int64_t *count, u_int64_t *blkno, u_int *secsize)
618 {
619 	struct partinfo pinfo;
620 	int error;
621 
622 	bzero(&pinfo, sizeof(pinfo));
623 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
624 			   proc0.p_ucred, NULL);
625 	if (error)
626 		return (error);
627 	if (pinfo.media_blksize == 0)
628 		return (ENXIO);
629 	*count = (u_int64_t)Maxmem * PAGE_SIZE / pinfo.media_blksize;
630 	if (dumplo64 < pinfo.reserved_blocks ||
631 	    dumplo64 + *count > pinfo.media_blocks) {
632 		return (ENOSPC);
633 	}
634 	*blkno = dumplo64 + pinfo.media_offset / pinfo.media_blksize;
635 	*secsize = pinfo.media_blksize;
636 	return (0);
637 }
638 
639 void
640 disk_unprobe(struct disk *disk)
641 {
642 	if (disk == NULL)
643 		return;
644 
645 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
646 }
647 
648 void
649 disk_invalidate (struct disk *disk)
650 {
651 	if (disk->d_slice)
652 		dsgone(&disk->d_slice);
653 }
654 
655 struct disk *
656 disk_enumerate(struct disk *disk)
657 {
658 	struct disk *dp;
659 	lwkt_tokref ilock;
660 
661 	lwkt_gettoken(&ilock, &disklist_token);
662 	if (!disk)
663 		dp = (LIST_FIRST(&disklist));
664 	else
665 		dp = (LIST_NEXT(disk, d_list));
666 	lwkt_reltoken(&ilock);
667 
668 	return dp;
669 }
670 
671 static
672 int
673 sysctl_disks(SYSCTL_HANDLER_ARGS)
674 {
675 	struct disk *disk;
676 	int error, first;
677 
678 	disk = NULL;
679 	first = 1;
680 
681 	while ((disk = disk_enumerate(disk))) {
682 		if (!first) {
683 			error = SYSCTL_OUT(req, " ", 1);
684 			if (error)
685 				return error;
686 		} else {
687 			first = 0;
688 		}
689 		error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
690 				   strlen(disk->d_rawdev->si_name));
691 		if (error)
692 			return error;
693 	}
694 	error = SYSCTL_OUT(req, "", 1);
695 	return error;
696 }
697 
698 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
699     sysctl_disks, "A", "names of available disks");
700 
701 /*
702  * Open a disk device or partition.
703  */
704 static
705 int
706 diskopen(struct dev_open_args *ap)
707 {
708 	cdev_t dev = ap->a_head.a_dev;
709 	struct disk *dp;
710 	int error;
711 
712 	/*
713 	 * dp can't be NULL here XXX.
714 	 *
715 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
716 	 * setdiskinfo() is typically called whether the disk is present
717 	 * or not (e.g. CD), but the base disk device is created first
718 	 * and there may be a race.
719 	 */
720 	dp = dev->si_disk;
721 	if (dp == NULL || dp->d_slice == NULL)
722 		return (ENXIO);
723 	error = 0;
724 
725 	/*
726 	 * Deal with open races
727 	 */
728 	while (dp->d_flags & DISKFLAG_LOCK) {
729 		dp->d_flags |= DISKFLAG_WANTED;
730 		error = tsleep(dp, PCATCH, "diskopen", hz);
731 		if (error)
732 			return (error);
733 	}
734 	dp->d_flags |= DISKFLAG_LOCK;
735 
736 	/*
737 	 * Open the underlying raw device.
738 	 */
739 	if (!dsisopen(dp->d_slice)) {
740 #if 0
741 		if (!pdev->si_iosize_max)
742 			pdev->si_iosize_max = dev->si_iosize_max;
743 #endif
744 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
745 				  ap->a_devtype, ap->a_cred);
746 	}
747 #if 0
748 	/*
749 	 * Inherit properties from the underlying device now that it is
750 	 * open.
751 	 */
752 	dev_dclone(dev);
753 #endif
754 
755 	if (error)
756 		goto out;
757 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
758 		       &dp->d_slice, &dp->d_info);
759 	if (!dsisopen(dp->d_slice)) {
760 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
761 	}
762 out:
763 	dp->d_flags &= ~DISKFLAG_LOCK;
764 	if (dp->d_flags & DISKFLAG_WANTED) {
765 		dp->d_flags &= ~DISKFLAG_WANTED;
766 		wakeup(dp);
767 	}
768 
769 	return(error);
770 }
771 
772 /*
773  * Close a disk device or partition
774  */
775 static
776 int
777 diskclose(struct dev_close_args *ap)
778 {
779 	cdev_t dev = ap->a_head.a_dev;
780 	struct disk *dp;
781 	int error;
782 
783 	error = 0;
784 	dp = dev->si_disk;
785 
786 	dsclose(dev, ap->a_devtype, dp->d_slice);
787 	if (!dsisopen(dp->d_slice)) {
788 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
789 	}
790 	return (error);
791 }
792 
793 /*
794  * First execute the ioctl on the disk device, and if it isn't supported
795  * try running it on the backing device.
796  */
797 static
798 int
799 diskioctl(struct dev_ioctl_args *ap)
800 {
801 	cdev_t dev = ap->a_head.a_dev;
802 	struct disk *dp;
803 	int error;
804 
805 	dp = dev->si_disk;
806 	if (dp == NULL)
807 		return (ENXIO);
808 
809 	devfs_debug(DEVFS_DEBUG_DEBUG,
810 		    "diskioctl: cmd is: %x (name: %s)\n",
811 		    ap->a_cmd, dev->si_name);
812 	devfs_debug(DEVFS_DEBUG_DEBUG,
813 		    "diskioctl: &dp->d_slice is: %x, %x\n",
814 		    &dp->d_slice, dp->d_slice);
815 
816 	error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
817 			&dp->d_slice, &dp->d_info);
818 
819 	if (error == ENOIOCTL) {
820 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
821 				   ap->a_fflag, ap->a_cred, NULL);
822 	}
823 	return (error);
824 }
825 
826 /*
827  * Execute strategy routine
828  */
829 static
830 int
831 diskstrategy(struct dev_strategy_args *ap)
832 {
833 	cdev_t dev = ap->a_head.a_dev;
834 	struct bio *bio = ap->a_bio;
835 	struct bio *nbio;
836 	struct disk *dp;
837 
838 	dp = dev->si_disk;
839 
840 	if (dp == NULL) {
841 		bio->bio_buf->b_error = ENXIO;
842 		bio->bio_buf->b_flags |= B_ERROR;
843 		biodone(bio);
844 		return(0);
845 	}
846 	KKASSERT(dev->si_disk == dp);
847 
848 	/*
849 	 * The dscheck() function will also transform the slice relative
850 	 * block number i.e. bio->bio_offset into a block number that can be
851 	 * passed directly to the underlying raw device.  If dscheck()
852 	 * returns NULL it will have handled the bio for us (e.g. EOF
853 	 * or error due to being beyond the device size).
854 	 */
855 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
856 		dev_dstrategy(dp->d_rawdev, nbio);
857 	} else {
858 		biodone(bio);
859 	}
860 	return(0);
861 }
862 
863 /*
864  * Return the partition size in ?blocks?
865  */
866 static
867 int
868 diskpsize(struct dev_psize_args *ap)
869 {
870 	cdev_t dev = ap->a_head.a_dev;
871 	struct disk *dp;
872 
873 	dp = dev->si_disk;
874 	if (dp == NULL)
875 		return(ENODEV);
876 	ap->a_result = dssize(dev, &dp->d_slice);
877 	return(0);
878 }
879 
880 /*
881  * When new device entries are instantiated, make sure they inherit our
882  * si_disk structure and block and iosize limits from the raw device.
883  *
884  * This routine is always called synchronously in the context of the
885  * client.
886  *
887  * XXX The various io and block size constraints are not always initialized
888  * properly by devices.
889  */
890 static
891 int
892 diskclone(struct dev_clone_args *ap)
893 {
894 	cdev_t dev = ap->a_head.a_dev;
895 	struct disk *dp;
896 	dp = dev->si_disk;
897 
898 	KKASSERT(dp != NULL);
899 	dev->si_disk = dp;
900 	dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
901 	dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
902 	dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
903 	return(0);
904 }
905 
906 int
907 diskdump(struct dev_dump_args *ap)
908 {
909 	cdev_t dev = ap->a_head.a_dev;
910 	struct disk *dp = dev->si_disk;
911 	int error;
912 
913 	error = disk_dumpcheck(dev, &ap->a_count, &ap->a_blkno, &ap->a_secsize);
914 	if (error == 0) {
915 		ap->a_head.a_dev = dp->d_rawdev;
916 		error = dev_doperate(&ap->a_head);
917 	}
918 
919 	return(error);
920 }
921 
922 
923 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
924     0, sizeof(struct diskslices), "sizeof(struct diskslices)");
925 
926 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
927     0, sizeof(struct disk), "sizeof(struct disk)");
928 
929 /*
930  * Reorder interval for burst write allowance and minor write
931  * allowance.
932  *
933  * We always want to trickle some writes in to make use of the
934  * disk's zone cache.  Bursting occurs on a longer interval and only
935  * runningbufspace is well over the hirunningspace limit.
936  */
937 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
938 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
939 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
940 int bioq_reorder_minor_interval = 5;
941 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
942 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
943 
944 int bioq_reorder_burst_bytes = 3000000;
945 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
946 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
947 int bioq_reorder_minor_bytes = 262144;
948 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
949 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
950 
951 
952 /*
953  * Order I/Os.  Generally speaking this code is designed to make better
954  * use of drive zone caches.  A drive zone cache can typically track linear
955  * reads or writes for around 16 zones simultaniously.
956  *
957  * Read prioritization issues:  It is possible for hundreds of megabytes worth
958  * of writes to be queued asynchronously.  This creates a huge bottleneck
959  * for reads which reduce read bandwidth to a trickle.
960  *
961  * To solve this problem we generally reorder reads before writes.
962  *
963  * However, a large number of random reads can also starve writes and
964  * make poor use of the drive zone cache so we allow writes to trickle
965  * in every N reads.
966  */
967 void
968 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
969 {
970 	/*
971 	 * The BIO wants to be ordered.  Adding to the tail also
972 	 * causes transition to be set to NULL, forcing the ordering
973 	 * of all prior I/O's.
974 	 */
975 	if (bio->bio_buf->b_flags & B_ORDERED) {
976 		bioq_insert_tail(bioq, bio);
977 		return;
978 	}
979 
980 	switch(bio->bio_buf->b_cmd) {
981 	case BUF_CMD_READ:
982 		if (bioq->transition) {
983 			/*
984 			 * Insert before the first write.  Bleedover writes
985 			 * based on reorder intervals to prevent starvation.
986 			 */
987 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
988 			++bioq->reorder;
989 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
990 				bioqwritereorder(bioq);
991 				if (bioq->reorder >=
992 				    bioq_reorder_burst_interval) {
993 					bioq->reorder = 0;
994 				}
995 			}
996 		} else {
997 			/*
998 			 * No writes queued (or ordering was forced),
999 			 * insert at tail.
1000 			 */
1001 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1002 		}
1003 		break;
1004 	case BUF_CMD_WRITE:
1005 		/*
1006 		 * Writes are always appended.  If no writes were previously
1007 		 * queued or an ordered tail insertion occured the transition
1008 		 * field will be NULL.
1009 		 */
1010 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1011 		if (bioq->transition == NULL)
1012 			bioq->transition = bio;
1013 		break;
1014 	default:
1015 		/*
1016 		 * All other request types are forced to be ordered.
1017 		 */
1018 		bioq_insert_tail(bioq, bio);
1019 		break;
1020 	}
1021 }
1022 
1023 /*
1024  * Move the read-write transition point to prevent reads from
1025  * completely starving our writes.  This brings a number of writes into
1026  * the fold every N reads.
1027  *
1028  * We bring a few linear writes into the fold on a minor interval
1029  * and we bring a non-linear burst of writes into the fold on a major
1030  * interval.  Bursting only occurs if runningbufspace is really high
1031  * (typically from syncs, fsyncs, or HAMMER flushes).
1032  */
1033 static
1034 void
1035 bioqwritereorder(struct bio_queue_head *bioq)
1036 {
1037 	struct bio *bio;
1038 	off_t next_offset;
1039 	size_t left;
1040 	size_t n;
1041 	int check_off;
1042 
1043 	if (bioq->reorder < bioq_reorder_burst_interval ||
1044 	    !buf_runningbufspace_severe()) {
1045 		left = (size_t)bioq_reorder_minor_bytes;
1046 		check_off = 1;
1047 	} else {
1048 		left = (size_t)bioq_reorder_burst_bytes;
1049 		check_off = 0;
1050 	}
1051 
1052 	next_offset = bioq->transition->bio_offset;
1053 	while ((bio = bioq->transition) != NULL &&
1054 	       (check_off == 0 || next_offset == bio->bio_offset)
1055 	) {
1056 		n = bio->bio_buf->b_bcount;
1057 		next_offset = bio->bio_offset + n;
1058 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1059 		if (left < n)
1060 			break;
1061 		left -= n;
1062 	}
1063 }
1064 
1065 /*
1066  * Disk error is the preface to plaintive error messages
1067  * about failing disk transfers.  It prints messages of the form
1068 
1069 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1070 
1071  * if the offset of the error in the transfer and a disk label
1072  * are both available.  blkdone should be -1 if the position of the error
1073  * is unknown; the disklabel pointer may be null from drivers that have not
1074  * been converted to use them.  The message is printed with kprintf
1075  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1076  * The message should be completed (with at least a newline) with kprintf
1077  * or log(-1, ...), respectively.  There is no trailing space.
1078  */
1079 void
1080 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1081 {
1082 	struct buf *bp = bio->bio_buf;
1083 	const char *term;
1084 
1085 	switch(bp->b_cmd) {
1086 	case BUF_CMD_READ:
1087 		term = "read";
1088 		break;
1089 	case BUF_CMD_WRITE:
1090 		term = "write";
1091 		break;
1092 	default:
1093 		term = "access";
1094 		break;
1095 	}
1096 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1097 	kprintf("offset %012llx for %d",
1098 		(long long)bio->bio_offset,
1099 		bp->b_bcount);
1100 
1101 	if (donecnt)
1102 		kprintf(" (%d bytes completed)", donecnt);
1103 }
1104 
1105 /*
1106  * Locate a disk device
1107  */
1108 cdev_t
1109 disk_locate(const char *devname)
1110 {
1111 	return devfs_find_device_by_name(devname);
1112 }
1113 
1114 void
1115 disk_config(void *arg)
1116 {
1117 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1118 }
1119 
1120 static void
1121 disk_init(void)
1122 {
1123 	struct thread* td_core;
1124 
1125 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1126 					 NULL, NULL, NULL,
1127 					 objcache_malloc_alloc,
1128 					 objcache_malloc_free,
1129 					 &disk_msg_malloc_args);
1130 
1131 	lwkt_token_init(&disklist_token);
1132 
1133 	/*
1134 	 * Initialize the reply-only port which acts as a message drain
1135 	 */
1136 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1137 
1138 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1139 		    0, 0, "disk_msg_core");
1140 
1141 	tsleep(td_core, 0, "diskcore", 0);
1142 }
1143 
1144 static void
1145 disk_uninit(void)
1146 {
1147 	objcache_destroy(disk_msg_cache);
1148 }
1149 
1150 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1151 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1152 		0, "Enable subr_disk debugging");
1153 
1154 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1155 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1156