xref: /dflybsd-src/sys/kern/subr_disk.c (revision 65c62024e97be0964ff6de261081aec59a904f78)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ----------------------------------------------------------------------------
35  * "THE BEER-WARE LICENSE" (Revision 42):
36  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
37  * can do whatever you want with this stuff. If we meet some day, and you think
38  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
39  * ----------------------------------------------------------------------------
40  *
41  * Copyright (c) 1982, 1986, 1988, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
78  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80  * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/proc.h>
87 #include <sys/sysctl.h>
88 #include <sys/buf.h>
89 #include <sys/conf.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
95 #include <sys/disk.h>
96 #include <sys/malloc.h>
97 #include <sys/sysctl.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/msgport2.h>
104 #include <sys/buf2.h>
105 #include <sys/devfs.h>
106 #include <sys/thread.h>
107 #include <sys/thread2.h>
108 
109 #include <sys/queue.h>
110 #include <sys/lock.h>
111 
112 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
113 
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
119 static void bioqwritereorder(struct bio_queue_head *bioq);
120 
121 static d_open_t diskopen;
122 static d_close_t diskclose;
123 static d_ioctl_t diskioctl;
124 static d_strategy_t diskstrategy;
125 static d_psize_t diskpsize;
126 static d_clone_t diskclone;
127 static d_dump_t diskdump;
128 
129 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
130 static struct lwkt_token disklist_token;
131 
132 static struct dev_ops disk_ops = {
133 	{ "disk", 0, D_DISK },
134 	.d_open = diskopen,
135 	.d_close = diskclose,
136 	.d_read = physread,
137 	.d_write = physwrite,
138 	.d_ioctl = diskioctl,
139 	.d_strategy = diskstrategy,
140 	.d_dump = diskdump,
141 	.d_psize = diskpsize,
142 	.d_clone = diskclone
143 };
144 
145 static struct objcache 	*disk_msg_cache;
146 
147 struct objcache_malloc_args disk_msg_malloc_args = {
148 	sizeof(struct disk_msg), M_DISK };
149 
150 static struct lwkt_port disk_dispose_port;
151 static struct lwkt_port disk_msg_port;
152 
153 
154 static int
155 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
156 {
157 	struct disk_info *info = &dp->d_info;
158 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
159 	disklabel_ops_t ops;
160 	struct partinfo part;
161 	const char *msg;
162 	cdev_t ndev;
163 	int sno;
164 	u_int i;
165 
166 	sno = slice ? slice - 1 : 0;
167 
168 	ops = &disklabel32_ops;
169 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
170 	if (msg && !strcmp(msg, "no disk label")) {
171 		ops = &disklabel64_ops;
172 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
173 	}
174 	if (msg == NULL) {
175 		if (slice != WHOLE_DISK_SLICE)
176 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
177 		else
178 			sp->ds_reserved = 0;
179 
180 		sp->ds_ops = ops;
181 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
182 			ops->op_loadpartinfo(sp->ds_label, i, &part);
183 			if (part.fstype) {
184 				if (reprobe &&
185 				    (ndev = devfs_find_device_by_name("%s%c",
186 						dev->si_name, 'a' + i))
187 				) {
188 					/*
189 					 * Device already exists and
190 					 * is still valid.
191 					 */
192 					ndev->si_flags |= SI_REPROBE_TEST;
193 				} else {
194 					ndev = make_dev(&disk_ops,
195 						dkmakeminor(dkunit(dp->d_cdev),
196 							    slice, i),
197 						UID_ROOT, GID_OPERATOR, 0640,
198 						"%s%c", dev->si_name, 'a'+ i);
199 					ndev->si_disk = dp;
200 					if (dp->d_info.d_serialno) {
201 						make_dev_alias(ndev,
202 						    "serno/%s.s%d%c",
203 						    dp->d_info.d_serialno,
204 						    sno, 'a' + i);
205 					}
206 					ndev->si_flags |= SI_REPROBE_TEST;
207 				}
208 			}
209 		}
210 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
211 		msg = NULL;
212 		if (sp->ds_size >= 0x100000000ULL)
213 			ops = &disklabel64_ops;
214 		else
215 			ops = &disklabel32_ops;
216 		sp->ds_label = ops->op_clone_label(info, sp);
217 	} else {
218 		if (sp->ds_type == DOSPTYP_386BSD /* XXX */) {
219 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
220 			    dev->si_name, msg);
221 		}
222 	}
223 
224 	if (msg == NULL) {
225 		sp->ds_wlabel = FALSE;
226 	}
227 
228 	return (msg ? EINVAL : 0);
229 }
230 
231 
232 static void
233 disk_probe(struct disk *dp, int reprobe)
234 {
235 	struct disk_info *info = &dp->d_info;
236 	cdev_t dev = dp->d_cdev;
237 	cdev_t ndev;
238 	int error, i, sno;
239 	struct diskslice *sp;
240 
241 	KKASSERT (info->d_media_blksize != 0);
242 
243 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
244 
245 	error = mbrinit(dev, info, &(dp->d_slice));
246 	if (error)
247 		return;
248 
249 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
250 		/*
251 		 * Ignore the whole-disk slice, it has already been created.
252 		 */
253 		if (i == WHOLE_DISK_SLICE)
254 			continue;
255 		sp = &dp->d_slice->dss_slices[i];
256 
257 		/*
258 		 * Handle s0.  s0 is a compatibility slice if there are no
259 		 * other slices and it has not otherwise been set up, else
260 		 * we ignore it.
261 		 */
262 		if (i == COMPATIBILITY_SLICE) {
263 			sno = 0;
264 			if (sp->ds_type == 0 &&
265 			    dp->d_slice->dss_nslices == BASE_SLICE) {
266 				sp->ds_size = info->d_media_blocks;
267 				sp->ds_reserved = 0;
268 			}
269 		} else {
270 			sno = i - 1;
271 			sp->ds_reserved = 0;
272 		}
273 
274 		/*
275 		 * Ignore 0-length slices
276 		 */
277 		if (sp->ds_size == 0)
278 			continue;
279 
280 		if (reprobe &&
281 		    (ndev = devfs_find_device_by_name("%ss%d",
282 						      dev->si_name, sno))) {
283 			/*
284 			 * Device already exists and is still valid
285 			 */
286 			ndev->si_flags |= SI_REPROBE_TEST;
287 		} else {
288 			/*
289 			 * Else create new device
290 			 */
291 			ndev = make_dev(&disk_ops,
292 					dkmakewholeslice(dkunit(dev), i),
293 					UID_ROOT, GID_OPERATOR, 0640,
294 					"%ss%d", dev->si_name, sno);
295 			if (dp->d_info.d_serialno) {
296 				make_dev_alias(ndev, "serno/%s.s%d",
297 					       dp->d_info.d_serialno, sno);
298 			}
299 			ndev->si_disk = dp;
300 			ndev->si_flags |= SI_REPROBE_TEST;
301 		}
302 		sp->ds_dev = ndev;
303 
304 		/*
305 		 * Probe appropriate slices for a disklabel
306 		 *
307 		 * XXX slice type 1 used by our gpt probe code.
308 		 * XXX slice type 0 used by mbr compat slice.
309 		 */
310 		if (sp->ds_type == DOSPTYP_386BSD || sp->ds_type == 0 ||
311 			sp->ds_type == 1) {
312 			if (dp->d_slice->dss_first_bsd_slice == 0)
313 				dp->d_slice->dss_first_bsd_slice = i;
314 			disk_probe_slice(dp, ndev, i, reprobe);
315 		}
316 	}
317 }
318 
319 
320 static void
321 disk_msg_core(void *arg)
322 {
323 	struct disk	*dp;
324 	struct diskslice *sp;
325 	lwkt_tokref ilock;
326 	disk_msg_t msg;
327 	int run;
328 
329 	lwkt_initport_thread(&disk_msg_port, curthread);
330 	wakeup(curthread);
331 	run = 1;
332 
333 	while (run) {
334 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
335 
336 		switch (msg->hdr.u.ms_result) {
337 		case DISK_DISK_PROBE:
338 			dp = (struct disk *)msg->load;
339 			disk_probe(dp, 0);
340 			break;
341 		case DISK_DISK_DESTROY:
342 			dp = (struct disk *)msg->load;
343 			devfs_destroy_subnames(dp->d_cdev->si_name);
344 			devfs_destroy_dev(dp->d_cdev);
345 			lwkt_gettoken(&ilock, &disklist_token);
346 			LIST_REMOVE(dp, d_list);
347 			lwkt_reltoken(&ilock);
348 			if (dp->d_info.d_serialno) {
349 				kfree(dp->d_info.d_serialno, M_TEMP);
350 				dp->d_info.d_serialno = NULL;
351 			}
352 			break;
353 		case DISK_UNPROBE:
354 			dp = (struct disk *)msg->load;
355 			devfs_destroy_subnames(dp->d_cdev->si_name);
356 			break;
357 		case DISK_SLICE_REPROBE:
358 			dp = (struct disk *)msg->load;
359 			sp = (struct diskslice *)msg->load2;
360 			devfs_clr_subnames_flag(sp->ds_dev->si_name,
361 						SI_REPROBE_TEST);
362 			devfs_debug(DEVFS_DEBUG_DEBUG,
363 				    "DISK_SLICE_REPROBE: %s\n",
364 				    sp->ds_dev->si_name);
365 			disk_probe_slice(dp, sp->ds_dev,
366 					 dkslice(sp->ds_dev), 1);
367 			devfs_destroy_subnames_without_flag(
368 					sp->ds_dev->si_name, SI_REPROBE_TEST);
369 			break;
370 		case DISK_DISK_REPROBE:
371 			dp = (struct disk *)msg->load;
372 			devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
373 			devfs_debug(DEVFS_DEBUG_DEBUG,
374 				    "DISK_DISK_REPROBE: %s\n",
375 				    dp->d_cdev->si_name);
376 			disk_probe(dp, 1);
377 			devfs_destroy_subnames_without_flag(
378 					dp->d_cdev->si_name, SI_REPROBE_TEST);
379 			break;
380 		case DISK_SYNC:
381 			break;
382 		default:
383 			devfs_debug(DEVFS_DEBUG_WARNING,
384 				    "disk_msg_core: unknown message "
385 				    "received at core\n");
386 			break;
387 		}
388 		lwkt_replymsg((lwkt_msg_t)msg, 0);
389 	}
390 	lwkt_exit();
391 }
392 
393 
394 /*
395  * Acts as a message drain. Any message that is replied to here gets
396  * destroyed and the memory freed.
397  */
398 static void
399 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
400 {
401 	objcache_put(disk_msg_cache, msg);
402 }
403 
404 
405 void
406 disk_msg_send(uint32_t cmd, void *load, void *load2)
407 {
408 	disk_msg_t disk_msg;
409 	lwkt_port_t port = &disk_msg_port;
410 
411 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
412 
413 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
414 
415 	disk_msg->hdr.u.ms_result = cmd;
416 	disk_msg->load = load;
417 	disk_msg->load2 = load2;
418 	KKASSERT(port);
419 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
420 }
421 
422 void
423 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
424 {
425 	struct lwkt_port rep_port;
426 	disk_msg_t disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
427 	disk_msg_t	msg_incoming;
428 	lwkt_port_t port = &disk_msg_port;
429 
430 	lwkt_initport_thread(&rep_port, curthread);
431 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
432 
433 	disk_msg->hdr.u.ms_result = cmd;
434 	disk_msg->load = load;
435 	disk_msg->load2 = load2;
436 
437 	KKASSERT(port);
438 	lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
439 	msg_incoming = lwkt_waitport(&rep_port, 0);
440 }
441 
442 /*
443  * Create a raw device for the dev_ops template (which is returned).  Also
444  * create a slice and unit managed disk and overload the user visible
445  * device space with it.
446  *
447  * NOTE: The returned raw device is NOT a slice and unit managed device.
448  * It is an actual raw device representing the raw disk as specified by
449  * the passed dev_ops.  The disk layer not only returns such a raw device,
450  * it also uses it internally when passing (modified) commands through.
451  */
452 cdev_t
453 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
454 {
455 	lwkt_tokref ilock;
456 	cdev_t rawdev;
457 
458 	rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
459 			    UID_ROOT, GID_OPERATOR, 0640,
460 			    "%s%d", raw_ops->head.name, unit);
461 
462 	bzero(dp, sizeof(*dp));
463 
464 	dp->d_rawdev = rawdev;
465 	dp->d_raw_ops = raw_ops;
466 	dp->d_dev_ops = &disk_ops;
467 	dp->d_cdev = make_dev(&disk_ops,
468 			    dkmakewholedisk(unit),
469 			    UID_ROOT, GID_OPERATOR, 0640,
470 			    "%s%d", raw_ops->head.name, unit);
471 
472 	dp->d_cdev->si_disk = dp;
473 
474 	lwkt_gettoken(&ilock, &disklist_token);
475 	LIST_INSERT_HEAD(&disklist, dp, d_list);
476 	lwkt_reltoken(&ilock);
477 	return (dp->d_rawdev);
478 }
479 
480 
481 static void
482 _setdiskinfo(struct disk *disk, struct disk_info *info)
483 {
484 	char *oldserialno;
485 
486 	oldserialno = disk->d_info.d_serialno;
487 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
488 	info = &disk->d_info;
489 
490 	/*
491 	 * The serial number is duplicated so the caller can throw
492 	 * their copy away.
493 	 */
494 	if (info->d_serialno && info->d_serialno[0]) {
495 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
496 		if (disk->d_cdev) {
497 			make_dev_alias(disk->d_cdev, "serno/%s",
498 					info->d_serialno);
499 		}
500 	} else {
501 		info->d_serialno = NULL;
502 	}
503 	if (oldserialno)
504 		kfree(oldserialno, M_TEMP);
505 
506 	/*
507 	 * The caller may set d_media_size or d_media_blocks and we
508 	 * calculate the other.
509 	 */
510 	KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0);
511 	if (info->d_media_size == 0 && info->d_media_blocks) {
512 		info->d_media_size = (u_int64_t)info->d_media_blocks *
513 				     info->d_media_blksize;
514 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
515 		   info->d_media_blksize) {
516 		info->d_media_blocks = info->d_media_size /
517 				       info->d_media_blksize;
518 	}
519 
520 	/*
521 	 * The si_* fields for rawdev are not set until after the
522 	 * disk_create() call, so someone using the cooked version
523 	 * of the raw device (i.e. da0s0) will not get the right
524 	 * si_iosize_max unless we fix it up here.
525 	 */
526 	if (disk->d_cdev && disk->d_rawdev &&
527 	    disk->d_cdev->si_iosize_max == 0) {
528 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
529 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
530 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
531 	}
532 }
533 
534 /*
535  * Disk drivers must call this routine when media parameters are available
536  * or have changed.
537  */
538 void
539 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
540 {
541 	_setdiskinfo(disk, info);
542 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
543 }
544 
545 void
546 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
547 {
548 	_setdiskinfo(disk, info);
549 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
550 }
551 
552 /*
553  * This routine is called when an adapter detaches.  The higher level
554  * managed disk device is destroyed while the lower level raw device is
555  * released.
556  */
557 void
558 disk_destroy(struct disk *disk)
559 {
560 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
561 	return;
562 }
563 
564 int
565 disk_dumpcheck(cdev_t dev, u_int64_t *count, u_int64_t *blkno, u_int *secsize)
566 {
567 	struct partinfo pinfo;
568 	int error;
569 
570 	bzero(&pinfo, sizeof(pinfo));
571 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
572 			   proc0.p_ucred, NULL);
573 	if (error)
574 		return (error);
575 	if (pinfo.media_blksize == 0)
576 		return (ENXIO);
577 	*count = (u_int64_t)Maxmem * PAGE_SIZE / pinfo.media_blksize;
578 	if (dumplo64 < pinfo.reserved_blocks ||
579 	    dumplo64 + *count > pinfo.media_blocks) {
580 		return (ENOSPC);
581 	}
582 	*blkno = dumplo64 + pinfo.media_offset / pinfo.media_blksize;
583 	*secsize = pinfo.media_blksize;
584 	return (0);
585 }
586 
587 void
588 disk_unprobe(struct disk *disk)
589 {
590 	if (disk == NULL)
591 		return;
592 
593 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
594 }
595 
596 void
597 disk_invalidate (struct disk *disk)
598 {
599 	if (disk->d_slice)
600 		dsgone(&disk->d_slice);
601 }
602 
603 struct disk *
604 disk_enumerate(struct disk *disk)
605 {
606 	struct disk *dp;
607 	lwkt_tokref ilock;
608 
609 	lwkt_gettoken(&ilock, &disklist_token);
610 	if (!disk)
611 		dp = (LIST_FIRST(&disklist));
612 	else
613 		dp = (LIST_NEXT(disk, d_list));
614 	lwkt_reltoken(&ilock);
615 
616 	return dp;
617 }
618 
619 static
620 int
621 sysctl_disks(SYSCTL_HANDLER_ARGS)
622 {
623 	struct disk *disk;
624 	int error, first;
625 
626 	disk = NULL;
627 	first = 1;
628 
629 	while ((disk = disk_enumerate(disk))) {
630 		if (!first) {
631 			error = SYSCTL_OUT(req, " ", 1);
632 			if (error)
633 				return error;
634 		} else {
635 			first = 0;
636 		}
637 		error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
638 				   strlen(disk->d_rawdev->si_name));
639 		if (error)
640 			return error;
641 	}
642 	error = SYSCTL_OUT(req, "", 1);
643 	return error;
644 }
645 
646 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
647     sysctl_disks, "A", "names of available disks");
648 
649 /*
650  * Open a disk device or partition.
651  */
652 static
653 int
654 diskopen(struct dev_open_args *ap)
655 {
656 	cdev_t dev = ap->a_head.a_dev;
657 	struct disk *dp;
658 	int error;
659 
660 	/*
661 	 * dp can't be NULL here XXX.
662 	 *
663 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
664 	 * setdiskinfo() is typically called whether the disk is present
665 	 * or not (e.g. CD), but the base disk device is created first
666 	 * and there may be a race.
667 	 */
668 	dp = dev->si_disk;
669 	if (dp == NULL || dp->d_slice == NULL)
670 		return (ENXIO);
671 	error = 0;
672 
673 	/*
674 	 * Deal with open races
675 	 */
676 	while (dp->d_flags & DISKFLAG_LOCK) {
677 		dp->d_flags |= DISKFLAG_WANTED;
678 		error = tsleep(dp, PCATCH, "diskopen", hz);
679 		if (error)
680 			return (error);
681 	}
682 	dp->d_flags |= DISKFLAG_LOCK;
683 
684 	/*
685 	 * Open the underlying raw device.
686 	 */
687 	if (!dsisopen(dp->d_slice)) {
688 #if 0
689 		if (!pdev->si_iosize_max)
690 			pdev->si_iosize_max = dev->si_iosize_max;
691 #endif
692 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
693 				  ap->a_devtype, ap->a_cred);
694 	}
695 #if 0
696 	/*
697 	 * Inherit properties from the underlying device now that it is
698 	 * open.
699 	 */
700 	dev_dclone(dev);
701 #endif
702 
703 	if (error)
704 		goto out;
705 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
706 		       &dp->d_slice, &dp->d_info);
707 	if (!dsisopen(dp->d_slice)) {
708 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
709 	}
710 out:
711 	dp->d_flags &= ~DISKFLAG_LOCK;
712 	if (dp->d_flags & DISKFLAG_WANTED) {
713 		dp->d_flags &= ~DISKFLAG_WANTED;
714 		wakeup(dp);
715 	}
716 
717 	return(error);
718 }
719 
720 /*
721  * Close a disk device or partition
722  */
723 static
724 int
725 diskclose(struct dev_close_args *ap)
726 {
727 	cdev_t dev = ap->a_head.a_dev;
728 	struct disk *dp;
729 	int error;
730 
731 	error = 0;
732 	dp = dev->si_disk;
733 
734 	dsclose(dev, ap->a_devtype, dp->d_slice);
735 	if (!dsisopen(dp->d_slice)) {
736 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
737 	}
738 	return (error);
739 }
740 
741 /*
742  * First execute the ioctl on the disk device, and if it isn't supported
743  * try running it on the backing device.
744  */
745 static
746 int
747 diskioctl(struct dev_ioctl_args *ap)
748 {
749 	cdev_t dev = ap->a_head.a_dev;
750 	struct disk *dp;
751 	int error;
752 
753 	dp = dev->si_disk;
754 	if (dp == NULL)
755 		return (ENXIO);
756 
757 	devfs_debug(DEVFS_DEBUG_DEBUG,
758 		    "diskioctl: cmd is: %x (name: %s)\n",
759 		    ap->a_cmd, dev->si_name);
760 	devfs_debug(DEVFS_DEBUG_DEBUG,
761 		    "diskioctl: &dp->d_slice is: %x, %x\n",
762 		    &dp->d_slice, dp->d_slice);
763 
764 	error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
765 			&dp->d_slice, &dp->d_info);
766 
767 	if (error == ENOIOCTL) {
768 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
769 				   ap->a_fflag, ap->a_cred, NULL);
770 	}
771 	return (error);
772 }
773 
774 /*
775  * Execute strategy routine
776  */
777 static
778 int
779 diskstrategy(struct dev_strategy_args *ap)
780 {
781 	cdev_t dev = ap->a_head.a_dev;
782 	struct bio *bio = ap->a_bio;
783 	struct bio *nbio;
784 	struct disk *dp;
785 
786 	dp = dev->si_disk;
787 
788 	if (dp == NULL) {
789 		bio->bio_buf->b_error = ENXIO;
790 		bio->bio_buf->b_flags |= B_ERROR;
791 		biodone(bio);
792 		return(0);
793 	}
794 	KKASSERT(dev->si_disk == dp);
795 
796 	/*
797 	 * The dscheck() function will also transform the slice relative
798 	 * block number i.e. bio->bio_offset into a block number that can be
799 	 * passed directly to the underlying raw device.  If dscheck()
800 	 * returns NULL it will have handled the bio for us (e.g. EOF
801 	 * or error due to being beyond the device size).
802 	 */
803 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
804 		dev_dstrategy(dp->d_rawdev, nbio);
805 	} else {
806 		biodone(bio);
807 	}
808 	return(0);
809 }
810 
811 /*
812  * Return the partition size in ?blocks?
813  */
814 static
815 int
816 diskpsize(struct dev_psize_args *ap)
817 {
818 	cdev_t dev = ap->a_head.a_dev;
819 	struct disk *dp;
820 
821 	dp = dev->si_disk;
822 	if (dp == NULL)
823 		return(ENODEV);
824 	ap->a_result = dssize(dev, &dp->d_slice);
825 	return(0);
826 }
827 
828 /*
829  * When new device entries are instantiated, make sure they inherit our
830  * si_disk structure and block and iosize limits from the raw device.
831  *
832  * This routine is always called synchronously in the context of the
833  * client.
834  *
835  * XXX The various io and block size constraints are not always initialized
836  * properly by devices.
837  */
838 static
839 int
840 diskclone(struct dev_clone_args *ap)
841 {
842 	cdev_t dev = ap->a_head.a_dev;
843 	struct disk *dp;
844 	dp = dev->si_disk;
845 
846 	KKASSERT(dp != NULL);
847 	dev->si_disk = dp;
848 	dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
849 	dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
850 	dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
851 	return(0);
852 }
853 
854 int
855 diskdump(struct dev_dump_args *ap)
856 {
857 	cdev_t dev = ap->a_head.a_dev;
858 	struct disk *dp = dev->si_disk;
859 	int error;
860 
861 	error = disk_dumpcheck(dev, &ap->a_count, &ap->a_blkno, &ap->a_secsize);
862 	if (error == 0) {
863 		ap->a_head.a_dev = dp->d_rawdev;
864 		error = dev_doperate(&ap->a_head);
865 	}
866 
867 	return(error);
868 }
869 
870 
871 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
872     0, sizeof(struct diskslices), "sizeof(struct diskslices)");
873 
874 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
875     0, sizeof(struct disk), "sizeof(struct disk)");
876 
877 /*
878  * Reorder interval for burst write allowance and minor write
879  * allowance.
880  *
881  * We always want to trickle some writes in to make use of the
882  * disk's zone cache.  Bursting occurs on a longer interval and only
883  * runningbufspace is well over the hirunningspace limit.
884  */
885 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
886 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
887 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
888 int bioq_reorder_minor_interval = 5;
889 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
890 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
891 
892 int bioq_reorder_burst_bytes = 3000000;
893 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
894 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
895 int bioq_reorder_minor_bytes = 262144;
896 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
897 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
898 
899 
900 /*
901  * Order I/Os.  Generally speaking this code is designed to make better
902  * use of drive zone caches.  A drive zone cache can typically track linear
903  * reads or writes for around 16 zones simultaniously.
904  *
905  * Read prioritization issues:  It is possible for hundreds of megabytes worth
906  * of writes to be queued asynchronously.  This creates a huge bottleneck
907  * for reads which reduce read bandwidth to a trickle.
908  *
909  * To solve this problem we generally reorder reads before writes.
910  *
911  * However, a large number of random reads can also starve writes and
912  * make poor use of the drive zone cache so we allow writes to trickle
913  * in every N reads.
914  */
915 void
916 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
917 {
918 	/*
919 	 * The BIO wants to be ordered.  Adding to the tail also
920 	 * causes transition to be set to NULL, forcing the ordering
921 	 * of all prior I/O's.
922 	 */
923 	if (bio->bio_buf->b_flags & B_ORDERED) {
924 		bioq_insert_tail(bioq, bio);
925 		return;
926 	}
927 
928 	switch(bio->bio_buf->b_cmd) {
929 	case BUF_CMD_READ:
930 		if (bioq->transition) {
931 			/*
932 			 * Insert before the first write.  Bleedover writes
933 			 * based on reorder intervals to prevent starvation.
934 			 */
935 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
936 			++bioq->reorder;
937 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
938 				bioqwritereorder(bioq);
939 				if (bioq->reorder >=
940 				    bioq_reorder_burst_interval) {
941 					bioq->reorder = 0;
942 				}
943 			}
944 		} else {
945 			/*
946 			 * No writes queued (or ordering was forced),
947 			 * insert at tail.
948 			 */
949 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
950 		}
951 		break;
952 	case BUF_CMD_WRITE:
953 		/*
954 		 * Writes are always appended.  If no writes were previously
955 		 * queued or an ordered tail insertion occured the transition
956 		 * field will be NULL.
957 		 */
958 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
959 		if (bioq->transition == NULL)
960 			bioq->transition = bio;
961 		break;
962 	default:
963 		/*
964 		 * All other request types are forced to be ordered.
965 		 */
966 		bioq_insert_tail(bioq, bio);
967 		break;
968 	}
969 }
970 
971 /*
972  * Move the read-write transition point to prevent reads from
973  * completely starving our writes.  This brings a number of writes into
974  * the fold every N reads.
975  *
976  * We bring a few linear writes into the fold on a minor interval
977  * and we bring a non-linear burst of writes into the fold on a major
978  * interval.  Bursting only occurs if runningbufspace is really high
979  * (typically from syncs, fsyncs, or HAMMER flushes).
980  */
981 static
982 void
983 bioqwritereorder(struct bio_queue_head *bioq)
984 {
985 	struct bio *bio;
986 	off_t next_offset;
987 	size_t left;
988 	size_t n;
989 	int check_off;
990 
991 	if (bioq->reorder < bioq_reorder_burst_interval ||
992 	    !buf_runningbufspace_severe()) {
993 		left = (size_t)bioq_reorder_minor_bytes;
994 		check_off = 1;
995 	} else {
996 		left = (size_t)bioq_reorder_burst_bytes;
997 		check_off = 0;
998 	}
999 
1000 	next_offset = bioq->transition->bio_offset;
1001 	while ((bio = bioq->transition) != NULL &&
1002 	       (check_off == 0 || next_offset == bio->bio_offset)
1003 	) {
1004 		n = bio->bio_buf->b_bcount;
1005 		next_offset = bio->bio_offset + n;
1006 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1007 		if (left < n)
1008 			break;
1009 		left -= n;
1010 	}
1011 }
1012 
1013 /*
1014  * Disk error is the preface to plaintive error messages
1015  * about failing disk transfers.  It prints messages of the form
1016 
1017 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1018 
1019  * if the offset of the error in the transfer and a disk label
1020  * are both available.  blkdone should be -1 if the position of the error
1021  * is unknown; the disklabel pointer may be null from drivers that have not
1022  * been converted to use them.  The message is printed with kprintf
1023  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1024  * The message should be completed (with at least a newline) with kprintf
1025  * or log(-1, ...), respectively.  There is no trailing space.
1026  */
1027 void
1028 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1029 {
1030 	struct buf *bp = bio->bio_buf;
1031 	const char *term;
1032 
1033 	switch(bp->b_cmd) {
1034 	case BUF_CMD_READ:
1035 		term = "read";
1036 		break;
1037 	case BUF_CMD_WRITE:
1038 		term = "write";
1039 		break;
1040 	default:
1041 		term = "access";
1042 		break;
1043 	}
1044 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1045 	kprintf("offset %012llx for %d",
1046 		(long long)bio->bio_offset,
1047 		bp->b_bcount);
1048 
1049 	if (donecnt)
1050 		kprintf(" (%d bytes completed)", donecnt);
1051 }
1052 
1053 /*
1054  * Locate a disk device
1055  */
1056 cdev_t
1057 disk_locate(const char *devname)
1058 {
1059 	return devfs_find_device_by_name(devname);
1060 }
1061 
1062 void
1063 disk_config(void *arg)
1064 {
1065 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1066 }
1067 
1068 static void
1069 disk_init(void)
1070 {
1071 	struct thread* td_core;
1072 
1073 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1074 					 NULL, NULL, NULL,
1075 					 objcache_malloc_alloc,
1076 					 objcache_malloc_free,
1077 					 &disk_msg_malloc_args);
1078 
1079 	lwkt_token_init(&disklist_token);
1080 
1081 	/*
1082 	 * Initialize the reply-only port which acts as a message drain
1083 	 */
1084 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1085 
1086 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1087 		    0, 0, "disk_msg_core");
1088 
1089 	tsleep(td_core, 0, "diskcore", 0);
1090 }
1091 
1092 static void
1093 disk_uninit(void)
1094 {
1095 	objcache_destroy(disk_msg_cache);
1096 }
1097 
1098 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1099 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1100