xref: /netbsd-src/sys/dev/dksubr.c (revision 9616dacfef448e70e3fbbd865bddf60d54b656c5)
1 /* $NetBSD: dksubr.c,v 1.94 2016/12/22 13:42:14 mlelstv Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.94 2016/12/22 13:42:14 mlelstv Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/stat.h>
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/device.h>
41 #include <sys/disk.h>
42 #include <sys/disklabel.h>
43 #include <sys/buf.h>
44 #include <sys/bufq.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/namei.h>
48 #include <sys/module.h>
49 #include <sys/syslog.h>
50 
51 #include <dev/dkvar.h>
52 #include <miscfs/specfs/specdev.h> /* for v_rdev */
53 
54 int	dkdebug = 0;
55 
56 #ifdef DEBUG
57 #define DKDB_FOLLOW	0x1
58 #define DKDB_INIT	0x2
59 #define DKDB_VNODE	0x4
60 #define DKDB_DUMP	0x8
61 
62 #define IFDEBUG(x,y)		if (dkdebug & (x)) y
63 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
64 #define DPRINTF_FOLLOW(y)	DPRINTF(DKDB_FOLLOW, y)
65 #else
66 #define IFDEBUG(x,y)
67 #define DPRINTF(x,y)
68 #define DPRINTF_FOLLOW(y)
69 #endif
70 
71 #define DKF_READYFORDUMP	(DKF_INITED|DKF_TAKEDUMP)
72 
73 static int dk_subr_modcmd(modcmd_t, void *);
74 
75 #define DKLABELDEV(dev)	\
76 	(MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77 
78 static void	dk_makedisklabel(struct dk_softc *);
79 static int	dk_translate(struct dk_softc *, struct buf *);
80 static void	dk_done1(struct dk_softc *, struct buf *, bool);
81 
82 void
83 dk_init(struct dk_softc *dksc, device_t dev, int dtype)
84 {
85 
86 	memset(dksc, 0x0, sizeof(*dksc));
87 	dksc->sc_dtype = dtype;
88 	dksc->sc_dev = dev;
89 
90 	strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
91 	dksc->sc_dkdev.dk_name = dksc->sc_xname;
92 }
93 
94 void
95 dk_attach(struct dk_softc *dksc)
96 {
97 	KASSERT(dksc->sc_dev != NULL);
98 
99 	mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
100 	dksc->sc_flags |= DKF_READYFORDUMP;
101 #ifdef DIAGNOSTIC
102 	dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
103 #endif
104 
105 	/* Attach the device into the rnd source list. */
106 	rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
107 	    RND_TYPE_DISK, RND_FLAG_DEFAULT);
108 }
109 
110 void
111 dk_detach(struct dk_softc *dksc)
112 {
113 	/* Unhook the entropy source. */
114 	rnd_detach_source(&dksc->sc_rnd_source);
115 
116 	dksc->sc_flags &= ~DKF_READYFORDUMP;
117 	mutex_destroy(&dksc->sc_iolock);
118 }
119 
120 /* ARGSUSED */
121 int
122 dk_open(struct dk_softc *dksc, dev_t dev,
123     int flags, int fmt, struct lwp *l)
124 {
125 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
126 	struct	disklabel *lp = dksc->sc_dkdev.dk_label;
127 	int	part = DISKPART(dev);
128 	int	pmask = 1 << part;
129 	int	ret = 0;
130 	struct disk *dk = &dksc->sc_dkdev;
131 
132 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
133 	    dksc->sc_xname, dksc, dev, flags));
134 
135 	mutex_enter(&dk->dk_openlock);
136 
137 	/*
138 	 * If there are wedges, and this is not RAW_PART, then we
139 	 * need to fail.
140 	 */
141 	if (dk->dk_nwedges != 0 && part != RAW_PART) {
142 		ret = EBUSY;
143 		goto done;
144 	}
145 
146 	/*
147 	 * initialize driver for the first opener
148 	 */
149 	if (dk->dk_openmask == 0 && dkd->d_firstopen != NULL) {
150 		ret = (*dkd->d_firstopen)(dksc->sc_dev, dev, flags, fmt);
151 		if (ret)
152 			goto done;
153 	}
154 
155 	/*
156 	 * If we're init'ed and there are no other open partitions then
157 	 * update the in-core disklabel.
158 	 */
159 	if ((dksc->sc_flags & DKF_INITED)) {
160 		if ((dksc->sc_flags & DKF_VLABEL) == 0) {
161 			dksc->sc_flags |= DKF_VLABEL;
162 			dk_getdisklabel(dksc, dev);
163 		}
164 	}
165 
166 	/* Fail if we can't find the partition. */
167 	if (part != RAW_PART &&
168 	    ((dksc->sc_flags & DKF_VLABEL) == 0 ||
169 	     part >= lp->d_npartitions ||
170 	     lp->d_partitions[part].p_fstype == FS_UNUSED)) {
171 		ret = ENXIO;
172 		goto done;
173 	}
174 
175 	/* Mark our unit as open. */
176 	switch (fmt) {
177 	case S_IFCHR:
178 		dk->dk_copenmask |= pmask;
179 		break;
180 	case S_IFBLK:
181 		dk->dk_bopenmask |= pmask;
182 		break;
183 	}
184 
185 	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
186 
187 done:
188 	mutex_exit(&dk->dk_openlock);
189 	return ret;
190 }
191 
192 /* ARGSUSED */
193 int
194 dk_close(struct dk_softc *dksc, dev_t dev,
195     int flags, int fmt, struct lwp *l)
196 {
197 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
198 	int	part = DISKPART(dev);
199 	int	pmask = 1 << part;
200 	struct disk *dk = &dksc->sc_dkdev;
201 
202 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
203 	    dksc->sc_xname, dksc, dev, flags));
204 
205 	mutex_enter(&dk->dk_openlock);
206 
207 	switch (fmt) {
208 	case S_IFCHR:
209 		dk->dk_copenmask &= ~pmask;
210 		break;
211 	case S_IFBLK:
212 		dk->dk_bopenmask &= ~pmask;
213 		break;
214 	}
215 	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
216 
217 	if (dk->dk_openmask == 0) {
218 		if (dkd->d_lastclose != NULL)
219 			(*dkd->d_lastclose)(dksc->sc_dev);
220 		if ((dksc->sc_flags & DKF_KLABEL) == 0)
221 			dksc->sc_flags &= ~DKF_VLABEL;
222 	}
223 
224 	mutex_exit(&dk->dk_openlock);
225 	return 0;
226 }
227 
228 static int
229 dk_translate(struct dk_softc *dksc, struct buf *bp)
230 {
231 	int	part;
232 	int	wlabel;
233 	daddr_t	blkno;
234 	struct disklabel *lp;
235 	struct disk *dk;
236 	uint64_t numsecs;
237 	unsigned secsize;
238 
239 	lp = dksc->sc_dkdev.dk_label;
240 	dk = &dksc->sc_dkdev;
241 
242 	part = DISKPART(bp->b_dev);
243 	numsecs = dk->dk_geom.dg_secperunit;
244 	secsize = dk->dk_geom.dg_secsize;
245 
246 	/*
247 	 * The transfer must be a whole number of blocks and the offset must
248 	 * not be negative.
249 	 */
250 	if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
251 		bp->b_error = EINVAL;
252 		goto done;
253 	}
254 
255 	/* If there is nothing to do, then we are done */
256 	if (bp->b_bcount == 0)
257 		goto done;
258 
259 	wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
260 	if (part == RAW_PART) {
261 		uint64_t numblocks = btodb(numsecs * secsize);
262 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
263 			goto done;
264 	} else {
265 		if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
266 			goto done;
267 	}
268 
269 	/*
270 	 * Convert the block number to absolute and put it in terms
271 	 * of the device's logical block size.
272 	 */
273 	if (secsize >= DEV_BSIZE)
274 		blkno = bp->b_blkno / (secsize / DEV_BSIZE);
275 	else
276 		blkno = bp->b_blkno * (DEV_BSIZE / secsize);
277 
278 	if (part != RAW_PART)
279 		blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
280 	bp->b_rawblkno = blkno;
281 
282 	return -1;
283 
284 done:
285 	bp->b_resid = bp->b_bcount;
286 	return bp->b_error;
287 }
288 
289 static int
290 dk_strategy1(struct dk_softc *dksc, struct buf *bp)
291 {
292 	int error;
293 
294 	DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
295 	    dksc->sc_xname, dksc, bp));
296 
297 	if (!(dksc->sc_flags & DKF_INITED)) {
298 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
299 		bp->b_error = ENXIO;
300 		bp->b_resid = bp->b_bcount;
301 		biodone(bp);
302 		return 1;
303 	}
304 
305 	error = dk_translate(dksc, bp);
306 	if (error >= 0) {
307 		biodone(bp);
308 		return 1;
309 	}
310 
311 	return 0;
312 }
313 
314 void
315 dk_strategy(struct dk_softc *dksc, struct buf *bp)
316 {
317 	int error;
318 
319 	error = dk_strategy1(dksc, bp);
320 	if (error)
321 		return;
322 
323 	/*
324 	 * Queue buffer and start unit
325 	 */
326 	dk_start(dksc, bp);
327 }
328 
329 int
330 dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
331 {
332 	int error;
333 
334 	error = dk_strategy1(dksc, bp);
335 	if (error)
336 		return error;
337 
338 	/*
339 	 * Queue buffer only
340 	 */
341 	mutex_enter(&dksc->sc_iolock);
342 	bufq_put(dksc->sc_bufq, bp);
343 	mutex_exit(&dksc->sc_iolock);
344 
345 	return 0;
346 }
347 
348 int
349 dk_strategy_pending(struct dk_softc *dksc)
350 {
351 	struct buf *bp;
352 
353 	if (!(dksc->sc_flags & DKF_INITED)) {
354 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
355 		return 0;
356 	}
357 
358 	mutex_enter(&dksc->sc_iolock);
359 	bp = bufq_peek(dksc->sc_bufq);
360 	mutex_exit(&dksc->sc_iolock);
361 
362 	return bp != NULL;
363 }
364 
365 void
366 dk_start(struct dk_softc *dksc, struct buf *bp)
367 {
368 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
369 	int error;
370 
371 	if (!(dksc->sc_flags & DKF_INITED)) {
372 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
373 		return;
374 	}
375 
376 	mutex_enter(&dksc->sc_iolock);
377 
378 	if (bp != NULL)
379 		bufq_put(dksc->sc_bufq, bp);
380 
381 	/*
382 	 * If another thread is running the queue, increment
383 	 * busy counter to 2 so that the queue is retried,
384 	 * because the driver may now accept additional
385 	 * requests.
386 	 */
387 	if (dksc->sc_busy < 2)
388 		dksc->sc_busy++;
389 	if (dksc->sc_busy > 1)
390 		goto done;
391 
392 	/*
393 	 * Peeking at the buffer queue and committing the operation
394 	 * only after success isn't atomic.
395 	 *
396 	 * So when a diskstart fails, the buffer is saved
397 	 * and tried again before the next buffer is fetched.
398 	 * dk_drain() handles flushing of a saved buffer.
399 	 *
400 	 * This keeps order of I/O operations, unlike bufq_put.
401 	 */
402 
403 	while (dksc->sc_busy > 0) {
404 
405 		bp = dksc->sc_deferred;
406 		dksc->sc_deferred = NULL;
407 
408 		if (bp == NULL)
409 			bp = bufq_get(dksc->sc_bufq);
410 
411 		while (bp != NULL) {
412 
413 			disk_busy(&dksc->sc_dkdev);
414 			mutex_exit(&dksc->sc_iolock);
415 			error = dkd->d_diskstart(dksc->sc_dev, bp);
416 			mutex_enter(&dksc->sc_iolock);
417 			if (error == EAGAIN) {
418 				dksc->sc_deferred = bp;
419 				disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
420 				break;
421 			}
422 
423 			if (error != 0) {
424 				bp->b_error = error;
425 				bp->b_resid = bp->b_bcount;
426 				dk_done1(dksc, bp, false);
427 			}
428 
429 			bp = bufq_get(dksc->sc_bufq);
430 		}
431 
432 		dksc->sc_busy--;
433 	}
434 done:
435 	mutex_exit(&dksc->sc_iolock);
436 }
437 
438 static void
439 dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
440 {
441 	struct disk *dk = &dksc->sc_dkdev;
442 
443 	if (bp->b_error != 0) {
444 		struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
445 
446 		diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
447 			dk->dk_label);
448 		printf("\n");
449 	}
450 
451 	if (lock)
452 		mutex_enter(&dksc->sc_iolock);
453 	disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
454 	if (lock)
455 		mutex_exit(&dksc->sc_iolock);
456 
457 	rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
458 
459 	biodone(bp);
460 }
461 
462 void
463 dk_done(struct dk_softc *dksc, struct buf *bp)
464 {
465 	dk_done1(dksc, bp, true);
466 }
467 
468 void
469 dk_drain(struct dk_softc *dksc)
470 {
471 	struct buf *bp;
472 
473 	mutex_enter(&dksc->sc_iolock);
474 	bp = dksc->sc_deferred;
475 	dksc->sc_deferred = NULL;
476 	if (bp != NULL) {
477 		bp->b_error = EIO;
478 		bp->b_resid = bp->b_bcount;
479 		biodone(bp);
480 	}
481 	bufq_drain(dksc->sc_bufq);
482 	mutex_exit(&dksc->sc_iolock);
483 }
484 
485 int
486 dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
487 {
488 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
489 	unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
490 	struct buf tmp, *bp = &tmp;
491 	int error;
492 
493 	DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
494 	    dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
495 
496 	if (!(dksc->sc_flags & DKF_INITED)) {
497 		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
498 		return ENXIO;
499 	}
500 
501 	if (secsize == 0 || (pos % secsize) != 0)
502 		return EINVAL;
503 
504 	/* enough data to please the bounds checking code */
505 	bp->b_dev = dev;
506 	bp->b_blkno = (daddr_t)(pos / secsize);
507 	bp->b_bcount = len;
508 	bp->b_flags = B_WRITE;
509 
510 	error = dk_translate(dksc, bp);
511 	if (error >= 0)
512 		return error;
513 
514 	error = dkd->d_discard(dksc->sc_dev,
515 		(off_t)bp->b_rawblkno * secsize,
516 		(off_t)bp->b_bcount);
517 
518 	return error;
519 }
520 
521 int
522 dk_size(struct dk_softc *dksc, dev_t dev)
523 {
524 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
525 	struct	disklabel *lp;
526 	int	is_open;
527 	int	part;
528 	int	size;
529 
530 	if ((dksc->sc_flags & DKF_INITED) == 0)
531 		return -1;
532 
533 	part = DISKPART(dev);
534 	is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
535 
536 	if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
537 		return -1;
538 
539 	lp = dksc->sc_dkdev.dk_label;
540 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
541 		size = -1;
542 	else
543 		size = lp->d_partitions[part].p_size *
544 		    (lp->d_secsize / DEV_BSIZE);
545 
546 	if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
547 		return -1;
548 
549 	return size;
550 }
551 
552 int
553 dk_ioctl(struct dk_softc *dksc, dev_t dev,
554 	    u_long cmd, void *data, int flag, struct lwp *l)
555 {
556 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
557 	struct	disklabel *lp;
558 	struct	disk *dk = &dksc->sc_dkdev;
559 #ifdef __HAVE_OLD_DISKLABEL
560 	struct	disklabel newlabel;
561 #endif
562 	int	error;
563 
564 	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
565 	    dksc->sc_xname, dksc, dev, cmd));
566 
567 	/* ensure that the pseudo disk is open for writes for these commands */
568 	switch (cmd) {
569 	case DIOCSDINFO:
570 	case DIOCWDINFO:
571 #ifdef __HAVE_OLD_DISKLABEL
572 	case ODIOCSDINFO:
573 	case ODIOCWDINFO:
574 #endif
575 	case DIOCKLABEL:
576 	case DIOCWLABEL:
577 	case DIOCAWEDGE:
578 	case DIOCDWEDGE:
579 	case DIOCSSTRATEGY:
580 		if ((flag & FWRITE) == 0)
581 			return EBADF;
582 	}
583 
584 	/* ensure that the pseudo-disk is initialized for these */
585 	switch (cmd) {
586 	case DIOCGDINFO:
587 	case DIOCSDINFO:
588 	case DIOCWDINFO:
589 	case DIOCGPARTINFO:
590 	case DIOCKLABEL:
591 	case DIOCWLABEL:
592 	case DIOCGDEFLABEL:
593 	case DIOCAWEDGE:
594 	case DIOCDWEDGE:
595 	case DIOCLWEDGES:
596 	case DIOCMWEDGES:
597 	case DIOCCACHESYNC:
598 #ifdef __HAVE_OLD_DISKLABEL
599 	case ODIOCGDINFO:
600 	case ODIOCSDINFO:
601 	case ODIOCWDINFO:
602 	case ODIOCGDEFLABEL:
603 #endif
604 		if ((dksc->sc_flags & DKF_INITED) == 0)
605 			return ENXIO;
606 	}
607 
608 	error = disk_ioctl(dk, dev, cmd, data, flag, l);
609 	if (error != EPASSTHROUGH)
610 		return error;
611 	else
612 		error = 0;
613 
614 	switch (cmd) {
615 	case DIOCWDINFO:
616 	case DIOCSDINFO:
617 #ifdef __HAVE_OLD_DISKLABEL
618 	case ODIOCWDINFO:
619 	case ODIOCSDINFO:
620 #endif
621 #ifdef __HAVE_OLD_DISKLABEL
622 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
623 			memset(&newlabel, 0, sizeof newlabel);
624 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
625 			lp = &newlabel;
626 		} else
627 #endif
628 		lp = (struct disklabel *)data;
629 
630 		mutex_enter(&dk->dk_openlock);
631 		dksc->sc_flags |= DKF_LABELLING;
632 
633 		error = setdisklabel(dksc->sc_dkdev.dk_label,
634 		    lp, 0, dksc->sc_dkdev.dk_cpulabel);
635 		if (error == 0) {
636 			if (cmd == DIOCWDINFO
637 #ifdef __HAVE_OLD_DISKLABEL
638 			    || cmd == ODIOCWDINFO
639 #endif
640 			   )
641 				error = writedisklabel(DKLABELDEV(dev),
642 				    dkd->d_strategy, dksc->sc_dkdev.dk_label,
643 				    dksc->sc_dkdev.dk_cpulabel);
644 		}
645 
646 		dksc->sc_flags &= ~DKF_LABELLING;
647 		mutex_exit(&dk->dk_openlock);
648 		break;
649 
650 	case DIOCKLABEL:
651 		if (*(int *)data != 0)
652 			dksc->sc_flags |= DKF_KLABEL;
653 		else
654 			dksc->sc_flags &= ~DKF_KLABEL;
655 		break;
656 
657 	case DIOCWLABEL:
658 		if (*(int *)data != 0)
659 			dksc->sc_flags |= DKF_WLABEL;
660 		else
661 			dksc->sc_flags &= ~DKF_WLABEL;
662 		break;
663 
664 	case DIOCGDEFLABEL:
665 		dk_getdefaultlabel(dksc, (struct disklabel *)data);
666 		break;
667 
668 #ifdef __HAVE_OLD_DISKLABEL
669 	case ODIOCGDEFLABEL:
670 		dk_getdefaultlabel(dksc, &newlabel);
671 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
672 			return ENOTTY;
673 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
674 		break;
675 #endif
676 
677 	case DIOCGSTRATEGY:
678 	    {
679 		struct disk_strategy *dks = (void *)data;
680 
681 		mutex_enter(&dksc->sc_iolock);
682 		if (dksc->sc_bufq != NULL)
683 			strlcpy(dks->dks_name,
684 			    bufq_getstrategyname(dksc->sc_bufq),
685 			    sizeof(dks->dks_name));
686 		else
687 			error = EINVAL;
688 		mutex_exit(&dksc->sc_iolock);
689 		dks->dks_paramlen = 0;
690 		break;
691 	    }
692 
693 	case DIOCSSTRATEGY:
694 	    {
695 		struct disk_strategy *dks = (void *)data;
696 		struct bufq_state *new;
697 		struct bufq_state *old;
698 
699 		if (dks->dks_param != NULL) {
700 			return EINVAL;
701 		}
702 		dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
703 		error = bufq_alloc(&new, dks->dks_name,
704 		    BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
705 		if (error) {
706 			return error;
707 		}
708 		mutex_enter(&dksc->sc_iolock);
709 		old = dksc->sc_bufq;
710 		if (old)
711 			bufq_move(new, old);
712 		dksc->sc_bufq = new;
713 		mutex_exit(&dksc->sc_iolock);
714 		if (old)
715 			bufq_free(old);
716 		break;
717 	    }
718 
719 	default:
720 		error = ENOTTY;
721 	}
722 
723 	return error;
724 }
725 
726 /*
727  * dk_dump dumps all of physical memory into the partition specified.
728  * This requires substantially more framework than {s,w}ddump, and hence
729  * is probably much more fragile.
730  *
731  */
732 
733 #define DKFF_READYFORDUMP(x)	(((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
734 static volatile int	dk_dumping = 0;
735 
736 /* ARGSUSED */
737 int
738 dk_dump(struct dk_softc *dksc, dev_t dev,
739     daddr_t blkno, void *vav, size_t size)
740 {
741 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
742 	char *va = vav;
743 	struct disklabel *lp;
744 	struct partition *p;
745 	int part, towrt, nsects, sectoff, maxblkcnt, nblk;
746 	int maxxfer, rv = 0;
747 
748 	/*
749 	 * ensure that we consider this device to be safe for dumping,
750 	 * and that the device is configured.
751 	 */
752 	if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
753 		DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
754 		    dksc->sc_flags));
755 		return ENXIO;
756 	}
757 
758 	/* ensure that we are not already dumping */
759 	if (dk_dumping)
760 		return EFAULT;
761 	dk_dumping = 1;
762 
763 	if (dkd->d_dumpblocks == NULL) {
764 		DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
765 		return ENXIO;
766 	}
767 
768 	/* device specific max transfer size */
769 	maxxfer = MAXPHYS;
770 	if (dkd->d_iosize != NULL)
771 		(*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
772 
773 	/* Convert to disk sectors.  Request must be a multiple of size. */
774 	part = DISKPART(dev);
775 	lp = dksc->sc_dkdev.dk_label;
776 	if ((size % lp->d_secsize) != 0) {
777 		DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
778 		return EFAULT;
779 	}
780 	towrt = size / lp->d_secsize;
781 	blkno = dbtob(blkno) / lp->d_secsize;   /* blkno in secsize units */
782 
783 	p = &lp->d_partitions[part];
784 	if (p->p_fstype != FS_SWAP) {
785 		DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
786 		    p->p_fstype));
787 		return ENXIO;
788 	}
789 	nsects = p->p_size;
790 	sectoff = p->p_offset;
791 
792 	/* Check transfer bounds against partition size. */
793 	if ((blkno < 0) || ((blkno + towrt) > nsects)) {
794 		DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
795 		    "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
796 		return EINVAL;
797 	}
798 
799 	/* Offset block number to start of partition. */
800 	blkno += sectoff;
801 
802 	/* Start dumping and return when done. */
803 	maxblkcnt = howmany(maxxfer, lp->d_secsize);
804 	while (towrt > 0) {
805 		nblk = min(maxblkcnt, towrt);
806 
807 		if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
808 		    != 0) {
809 			DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
810 			    rv));
811 			return rv;
812 		}
813 
814 		towrt -= nblk;
815 		blkno += nblk;
816 		va += nblk * lp->d_secsize;
817 	}
818 
819 	dk_dumping = 0;
820 
821 	return 0;
822 }
823 
824 /* ARGSUSED */
825 void
826 dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
827 {
828 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
829 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
830 
831 	memset(lp, 0, sizeof(*lp));
832 
833 	if (dg->dg_secperunit > UINT32_MAX)
834 		lp->d_secperunit = UINT32_MAX;
835 	else
836 		lp->d_secperunit = dg->dg_secperunit;
837 	lp->d_secsize = dg->dg_secsize;
838 	lp->d_nsectors = dg->dg_nsectors;
839 	lp->d_ntracks = dg->dg_ntracks;
840 	lp->d_ncylinders = dg->dg_ncylinders;
841 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
842 
843 	strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
844 	lp->d_type = dksc->sc_dtype;
845 	strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
846 	lp->d_rpm = 3600;
847 	lp->d_interleave = 1;
848 	lp->d_flags = 0;
849 
850 	lp->d_partitions[RAW_PART].p_offset = 0;
851 	lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
852 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
853 	lp->d_npartitions = RAW_PART + 1;
854 
855 	lp->d_magic = DISKMAGIC;
856 	lp->d_magic2 = DISKMAGIC;
857 
858 	if (dkd->d_label)
859 		dkd->d_label(dksc->sc_dev, lp);
860 
861 	lp->d_checksum = dkcksum(lp);
862 }
863 
864 /* ARGSUSED */
865 void
866 dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
867 {
868 	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
869 	struct	 disklabel *lp = dksc->sc_dkdev.dk_label;
870 	struct	 cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
871 	struct   disk_geom *dg = &dksc->sc_dkdev.dk_geom;
872 	struct	 partition *pp;
873 	int	 i;
874 	const char	*errstring;
875 
876 	memset(clp, 0x0, sizeof(*clp));
877 	dk_getdefaultlabel(dksc, lp);
878 	errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
879 	    dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
880 	if (errstring) {
881 		dk_makedisklabel(dksc);
882 		if (dksc->sc_flags & DKF_WARNLABEL)
883 			printf("%s: %s\n", dksc->sc_xname, errstring);
884 		return;
885 	}
886 
887 	if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
888 		return;
889 
890 	/* Sanity check */
891 	if (lp->d_secperunit < UINT32_MAX ?
892 		lp->d_secperunit != dg->dg_secperunit :
893 		lp->d_secperunit > dg->dg_secperunit)
894 		printf("WARNING: %s: total sector size in disklabel (%ju) "
895 		    "!= the size of %s (%ju)\n", dksc->sc_xname,
896 		    (uintmax_t)lp->d_secperunit, dksc->sc_xname,
897 		    (uintmax_t)dg->dg_secperunit);
898 
899 	for (i=0; i < lp->d_npartitions; i++) {
900 		pp = &lp->d_partitions[i];
901 		if (pp->p_offset + pp->p_size > dg->dg_secperunit)
902 			printf("WARNING: %s: end of partition `%c' exceeds "
903 			    "the size of %s (%ju)\n", dksc->sc_xname,
904 			    'a' + i, dksc->sc_xname,
905 			    (uintmax_t)dg->dg_secperunit);
906 	}
907 }
908 
909 /* ARGSUSED */
910 static void
911 dk_makedisklabel(struct dk_softc *dksc)
912 {
913 	struct	disklabel *lp = dksc->sc_dkdev.dk_label;
914 
915 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
916 	strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
917 	lp->d_checksum = dkcksum(lp);
918 }
919 
920 /* This function is taken from ccd.c:1.76  --rcd */
921 
922 /*
923  * XXX this function looks too generic for dksubr.c, shouldn't we
924  *     put it somewhere better?
925  */
926 
927 /*
928  * Lookup the provided name in the filesystem.  If the file exists,
929  * is a valid block device, and isn't being used by anyone else,
930  * set *vpp to the file's vnode.
931  */
932 int
933 dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp)
934 {
935 	struct nameidata nd;
936 	struct vnode *vp;
937 	int     error;
938 
939 	if (l == NULL)
940 		return ESRCH;	/* Is ESRCH the best choice? */
941 
942 	NDINIT(&nd, LOOKUP, FOLLOW, pb);
943 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
944 		DPRINTF((DKDB_FOLLOW|DKDB_INIT),
945 		    ("%s: vn_open error = %d\n", __func__, error));
946 		return error;
947 	}
948 
949 	vp = nd.ni_vp;
950 	if (vp->v_type != VBLK) {
951 		error = ENOTBLK;
952 		goto out;
953 	}
954 
955 	/* Reopen as anonymous vnode to protect against forced unmount. */
956 	if ((error = bdevvp(vp->v_rdev, vpp)) != 0)
957 		goto out;
958 	VOP_UNLOCK(vp);
959 	if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) {
960 		vrele(*vpp);
961 		return error;
962 	}
963 	if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
964 		vrele(*vpp);
965 		return error;
966 	}
967 	mutex_enter((*vpp)->v_interlock);
968 	(*vpp)->v_writecount++;
969 	mutex_exit((*vpp)->v_interlock);
970 
971 	IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp));
972 
973 	return 0;
974 out:
975 	VOP_UNLOCK(vp);
976 	(void) vn_close(vp, FREAD | FWRITE, l->l_cred);
977 	return error;
978 }
979 
980 MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
981 
982 static int
983 dk_subr_modcmd(modcmd_t cmd, void *arg)
984 {
985 	switch (cmd) {
986 	case MODULE_CMD_INIT:
987 	case MODULE_CMD_FINI:
988 		return 0;
989 	case MODULE_CMD_STAT:
990 	case MODULE_CMD_AUTOUNLOAD:
991 	default:
992 		return ENOTTY;
993 	}
994 }
995