xref: /netbsd-src/sys/dev/ld.c (revision 3cec974c61d7fac0a37c0377723a33214a458c8b)
1 /*	$NetBSD: ld.c,v 1.7 2001/02/04 17:15:37 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Disk driver for use by RAID controllers.
41  */
42 
43 #include "rnd.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/queue.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/endian.h>
53 #include <sys/disklabel.h>
54 #include <sys/disk.h>
55 #include <sys/dkio.h>
56 #include <sys/stat.h>
57 #include <sys/lock.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/vnode.h>
61 #include <sys/syslog.h>
62 #if NRND > 0
63 #include <sys/rnd.h>
64 #endif
65 
66 #include <dev/ldvar.h>
67 
68 static void	ldgetdefaultlabel(struct ld_softc *, struct disklabel *);
69 static void	ldgetdisklabel(struct ld_softc *);
70 static int	ldlock(struct ld_softc *);
71 static void	ldminphys(struct buf *bp);
72 static void	ldshutdown(void *);
73 static int	ldstart(struct ld_softc *, struct buf *);
74 static void	ldunlock(struct ld_softc *);
75 
76 extern struct	cfdriver ld_cd;
77 
78 static struct	dkdriver lddkdriver = { ldstrategy };
79 static void	*ld_sdh;
80 
81 void
82 ldattach(struct ld_softc *sc)
83 {
84 	char buf[9];
85 
86 	if ((sc->sc_flags & LDF_ENABLED) == 0) {
87 		printf("%s: disabled\n", sc->sc_dv.dv_xname);
88 		return;
89 	}
90 
91 	/* Initialise and attach the disk structure. */
92 	sc->sc_dk.dk_driver = &lddkdriver;
93 	sc->sc_dk.dk_name = sc->sc_dv.dv_xname;
94 	disk_attach(&sc->sc_dk);
95 
96 	if (sc->sc_maxxfer > MAXPHYS)
97 		sc->sc_maxxfer = MAXPHYS;
98 
99 	format_bytes(buf, sizeof(buf), (u_int64_t)sc->sc_secperunit *
100 	    sc->sc_secsize);
101 	printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %d sectors\n",
102 	    sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads,
103 	    sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
104 
105 #if NRND > 0
106 	/* Attach the device into the rnd source list. */
107 	rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
108 	    RND_TYPE_DISK, 0);
109 #endif
110 
111 	/* Set the `shutdownhook'. */
112 	if (ld_sdh == NULL)
113 		ld_sdh = shutdownhook_establish(ldshutdown, NULL);
114 	BUFQ_INIT(&sc->sc_bufq);
115 }
116 
117 int
118 ldadjqparam(struct ld_softc *sc, int max)
119 {
120 	int s, rv;
121 
122 	s = splbio();
123 	sc->sc_maxqueuecnt = max;
124 	if (sc->sc_queuecnt > max) {
125 		sc->sc_flags |= LDF_DRAIN;
126 		rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 30 * hz);
127 		sc->sc_flags &= ~LDF_DRAIN;
128 	} else
129 		rv = 0;
130 	splx(s);
131 
132 	return (rv);
133 }
134 
135 int
136 ldbegindetach(struct ld_softc *sc, int flags)
137 {
138 	int s, rv;
139 
140 	if ((sc->sc_flags & LDF_ENABLED) == 0)
141 		return (0);
142 
143 	if ((flags & DETACH_FORCE) == 0 && sc->sc_dk.dk_openmask != 0)
144 		return (EBUSY);
145 
146 	s = splbio();
147 	sc->sc_flags |= LDF_DETACH;
148 	rv = ldadjqparam(sc, 0);
149 	splx(s);
150 
151 	return (rv);
152 }
153 
154 void
155 ldenddetach(struct ld_softc *sc)
156 {
157 	struct buf *bp;
158 	int s, bmaj, cmaj, mn;
159 
160 	if ((sc->sc_flags & LDF_ENABLED) == 0)
161 		return;
162 
163 	/* Wait for commands queued with the hardware to complete. */
164 	if (sc->sc_queuecnt != 0)
165 		if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
166 			printf("%s: not drained\n", sc->sc_dv.dv_xname);
167 
168 	/* Locate the major numbers. */
169 	for (bmaj = 0; bmaj <= nblkdev; bmaj++)
170 		if (bdevsw[bmaj].d_open == sdopen)
171 			break;
172 	for (cmaj = 0; cmaj <= nchrdev; cmaj++)
173 		if (cdevsw[cmaj].d_open == sdopen)
174 			break;
175 
176 	/* Kill off any queued buffers. */
177 	s = splbio();
178 	while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) {
179 		BUFQ_REMOVE(&sc->sc_bufq, bp);
180 		bp->b_error = EIO;
181 		bp->b_flags |= B_ERROR;
182 		bp->b_resid = bp->b_bcount;
183 		biodone(bp);
184 	}
185 	splx(s);
186 
187 	/* Nuke the vnodes for any open instances. */
188 	mn = DISKUNIT(sc->sc_dv.dv_unit);
189 	vdevgone(bmaj, mn, mn + (MAXPARTITIONS - 1), VBLK);
190 	vdevgone(cmaj, mn, mn + (MAXPARTITIONS - 1), VCHR);
191 
192 	/* Detach from the disk list. */
193 	disk_detach(&sc->sc_dk);
194 
195 #if NRND > 0
196 	/* Unhook the entropy source. */
197 	rnd_detach_source(&sc->sc_rnd_source);
198 #endif
199 
200 	/* Flush the device's cache. */
201 	if (sc->sc_flush != NULL)
202 		if ((*sc->sc_flush)(sc) != 0)
203 			printf("%s: unable to flush cache\n",
204 			    sc->sc_dv.dv_xname);
205 }
206 
207 static void
208 ldshutdown(void *cookie)
209 {
210 	struct ld_softc *sc;
211 	int i;
212 
213 	for (i = 0; i < ld_cd.cd_ndevs; i++) {
214 		if ((sc = device_lookup(&ld_cd, i)) == NULL)
215 			continue;
216 		if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
217 			printf("%s: unable to flush cache\n",
218 			    sc->sc_dv.dv_xname);
219 	}
220 }
221 
222 int
223 ldopen(dev_t dev, int flags, int fmt, struct proc *p)
224 {
225 	struct ld_softc *sc;
226 	int unit, part;
227 
228 	unit = DISKUNIT(dev);
229 	if ((sc = device_lookup(&ld_cd, unit))== NULL)
230 		return (ENXIO);
231 	if ((sc->sc_flags & LDF_ENABLED) == 0)
232 		return (ENODEV);
233 	part = DISKPART(dev);
234 	ldlock(sc);
235 
236 	if (sc->sc_dk.dk_openmask == 0)
237 		ldgetdisklabel(sc);
238 
239 	/* Check that the partition exists. */
240 	if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions ||
241 	    sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
242 	     	ldunlock(sc);
243 		return (ENXIO);
244 	}
245 
246 	/* Ensure only one open at a time. */
247 	switch (fmt) {
248 	case S_IFCHR:
249 		sc->sc_dk.dk_copenmask |= (1 << part);
250 		break;
251 	case S_IFBLK:
252 		sc->sc_dk.dk_bopenmask |= (1 << part);
253 		break;
254 	}
255 	sc->sc_dk.dk_openmask =
256 	    sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
257 
258 	ldunlock(sc);
259 	return (0);
260 }
261 
262 int
263 ldclose(dev_t dev, int flags, int fmt, struct proc *p)
264 {
265 	struct ld_softc *sc;
266 	int part, unit;
267 
268 	unit = DISKUNIT(dev);
269 	part = DISKPART(dev);
270 	sc = device_lookup(&ld_cd, unit);
271 	ldlock(sc);
272 
273 	switch (fmt) {
274 	case S_IFCHR:
275 		sc->sc_dk.dk_copenmask &= ~(1 << part);
276 		break;
277 	case S_IFBLK:
278 		sc->sc_dk.dk_bopenmask &= ~(1 << part);
279 		break;
280 	}
281 	sc->sc_dk.dk_openmask =
282 	    sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
283 
284 	if (sc->sc_dk.dk_openmask == 0 && sc->sc_flush != NULL)
285 		if ((*sc->sc_flush)(sc) != 0)
286 			printf("%s: unable to flush cache\n",
287 			    sc->sc_dv.dv_xname);
288 
289 	ldunlock(sc);
290 	return (0);
291 }
292 
293 int
294 ldread(dev_t dev, struct uio *uio, int ioflag)
295 {
296 
297 	return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
298 }
299 
300 int
301 ldwrite(dev_t dev, struct uio *uio, int ioflag)
302 {
303 
304 	return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
305 }
306 
307 int
308 ldioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p)
309 {
310 	struct ld_softc *sc;
311 	int part, unit, error;
312 #ifdef __HAVE_OLD_DISKLABEL
313 	struct disklabel newlabel;
314 #endif
315 	struct disklabel *lp;
316 
317 	unit = DISKUNIT(dev);
318 	part = DISKPART(dev);
319 	sc = device_lookup(&ld_cd, unit);
320 	error = 0;
321 
322 	switch (cmd) {
323 	case DIOCGDINFO:
324 		memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel));
325 		return (0);
326 
327 #ifdef __HAVE_OLD_DISKLABEL
328 	case ODIOCGDINFO:
329 		newlabel = *(sc->sc_dk.dk_label);
330 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
331 			return ENOTTY;
332 		memcpy(addr, &newlabel, sizeof(struct olddisklabel));
333 		return (0);
334 #endif
335 
336 	case DIOCGPART:
337 		((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
338 		((struct partinfo *)addr)->part =
339 		    &sc->sc_dk.dk_label->d_partitions[part];
340 		break;
341 
342 	case DIOCWDINFO:
343 	case DIOCSDINFO:
344 #ifdef __HAVE_OLD_DISKLABEL
345 	case ODIOCWDINFO:
346 	case ODIOCSDINFO:
347 
348 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
349 			memset(&newlabel, 0, sizeof newlabel);
350 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
351 			lp = &newlabel;
352 		} else
353 #endif
354 		lp = (struct disklabel *)addr;
355 
356 		if ((flag & FWRITE) == 0)
357 			return (EBADF);
358 
359 		if ((error = ldlock(sc)) != 0)
360 			return (error);
361 		sc->sc_flags |= LDF_LABELLING;
362 
363 		error = setdisklabel(sc->sc_dk.dk_label,
364 		    lp, /*sc->sc_dk.dk_openmask : */0,
365 		    sc->sc_dk.dk_cpulabel);
366 		if (error == 0 && (cmd == DIOCWDINFO
367 #ifdef __HAVE_OLD_DISKLABEL
368 		    || cmd == ODIOCWDINFO
369 #endif
370 		    ))
371 			error = writedisklabel(
372 			    MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
373 			    ldstrategy, sc->sc_dk.dk_label,
374 			    sc->sc_dk.dk_cpulabel);
375 
376 		sc->sc_flags &= ~LDF_LABELLING;
377 		ldunlock(sc);
378 		break;
379 
380 	case DIOCWLABEL:
381 		if ((flag & FWRITE) == 0)
382 			return (EBADF);
383 		if (*(int *)addr)
384 			sc->sc_flags |= LDF_WLABEL;
385 		else
386 			sc->sc_flags &= ~LDF_WLABEL;
387 		break;
388 
389 	case DIOCGDEFLABEL:
390 		ldgetdefaultlabel(sc, (struct disklabel *)addr);
391 		break;
392 
393 #ifdef __HAVE_OLD_DISKLABEL
394 	case ODIOCGDEFLABEL:
395 		ldgetdefaultlabel(sc, &newlabel);
396 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
397 			return ENOTTY;
398 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
399 		break;
400 #endif
401 
402 	default:
403 		error = ENOTTY;
404 		break;
405 	}
406 
407 	return (error);
408 }
409 
410 void
411 ldstrategy(struct buf *bp)
412 {
413 	struct ld_softc *sc;
414 	int s;
415 
416 	sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
417 
418 	s = splbio();
419 	if (sc->sc_queuecnt >= sc->sc_maxqueuecnt) {
420 		BUFQ_INSERT_TAIL(&sc->sc_bufq, bp);
421 		splx(s);
422 		return;
423 	}
424 	splx(s);
425 	ldstart(sc, bp);
426 }
427 
428 static int
429 ldstart(struct ld_softc *sc, struct buf *bp)
430 {
431 	struct disklabel *lp;
432 	int part, s, rv;
433 
434 	if ((sc->sc_flags & LDF_DETACH) != 0) {
435 		bp->b_error = EIO;
436 		bp->b_flags |= B_ERROR;
437 		bp->b_resid = bp->b_bcount;
438 		biodone(bp);
439 		return (-1);
440 	}
441 
442 	part = DISKPART(bp->b_dev);
443 	lp = sc->sc_dk.dk_label;
444 
445 	/*
446 	 * The transfer must be a whole number of blocks and the offset must
447 	 * not be negative.
448 	 */
449 	if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
450 		bp->b_flags |= B_ERROR;
451 		biodone(bp);
452 		return (-1);
453 	}
454 
455 	/*
456 	 * If it's a null transfer, return.
457 	 */
458 	if (bp->b_bcount == 0) {
459 		bp->b_resid = bp->b_bcount;
460 		biodone(bp);
461 		return (-1);
462 	}
463 
464 	/*
465 	 * Do bounds checking and adjust the transfer.  If error, process.
466 	 * If past the end of partition, just return.
467 	 */
468 	if (part != RAW_PART &&
469 	    bounds_check_with_label(bp, lp,
470 	    (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) {
471 		bp->b_resid = bp->b_bcount;
472 		biodone(bp);
473 		return (-1);
474 	}
475 
476 	/*
477 	 * Convert the logical block number to a physical one and put it in
478 	 * terms of the device's logical block size.
479 	 */
480 	if (lp->d_secsize >= DEV_BSIZE)
481 		bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
482 	else
483 		bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
484 
485 	if (bp->b_dev != RAW_PART)
486 		bp->b_rawblkno += lp->d_partitions[part].p_offset;
487 
488 	s = splbio();
489 	disk_busy(&sc->sc_dk);
490 	sc->sc_queuecnt++;
491 	splx(s);
492 
493 	if ((rv = (*sc->sc_start)(sc, bp)) != 0) {
494 		bp->b_error = rv;
495 		bp->b_flags |= B_ERROR;
496 		bp->b_resid = bp->b_bcount;
497 		s = splbio();
498 		lddone(sc, bp);
499 		splx(s);
500 	}
501 
502 	return (0);
503 }
504 
505 void
506 lddone(struct ld_softc *sc, struct buf *bp)
507 {
508 
509 	if ((bp->b_flags & B_ERROR) != 0) {
510 		diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
511 		printf("\n");
512 	}
513 
514 	disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid);
515 #if NRND > 0
516 	rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
517 #endif
518 	biodone(bp);
519 
520 	if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
521 		if ((sc->sc_flags & LDF_DRAIN) != 0)
522 			wakeup(&sc->sc_queuecnt);
523 		while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) {
524 			BUFQ_REMOVE(&sc->sc_bufq, bp);
525 			if (!ldstart(sc, bp))
526 				break;
527 		}
528 	}
529 }
530 
531 int
532 ldsize(dev_t dev)
533 {
534 	struct ld_softc *sc;
535 	int part, unit, omask, size;
536 
537 	unit = DISKUNIT(dev);
538 	if ((sc = device_lookup(&ld_cd, unit)) == NULL)
539 		return (ENODEV);
540 	if ((sc->sc_flags & LDF_ENABLED) == 0)
541 		return (ENODEV);
542 	part = DISKPART(dev);
543 
544 	omask = sc->sc_dk.dk_openmask & (1 << part);
545 
546 	if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0)
547 		return (-1);
548 	else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
549 		size = -1;
550 	else
551 		size = sc->sc_dk.dk_label->d_partitions[part].p_size *
552 		    (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
553 	if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0)
554 		return (-1);
555 
556 	return (size);
557 }
558 
559 /*
560  * Load the label information from the specified device.
561  */
562 static void
563 ldgetdisklabel(struct ld_softc *sc)
564 {
565 	const char *errstring;
566 
567 	ldgetdefaultlabel(sc, sc->sc_dk.dk_label);
568 
569 	/* Call the generic disklabel extraction routine. */
570 	errstring = readdisklabel(MAKEDISKDEV(0, sc->sc_dv.dv_unit, RAW_PART),
571 	    ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel);
572 	if (errstring != NULL)
573 		printf("%s: %s\n", sc->sc_dv.dv_xname, errstring);
574 }
575 
576 /*
577  * Construct a ficticious label.
578  */
579 static void
580 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp)
581 {
582 
583 	memset(lp, 0, sizeof(struct disklabel));
584 
585 	lp->d_secsize = sc->sc_secsize;
586 	lp->d_ntracks = sc->sc_nheads;
587 	lp->d_nsectors = sc->sc_nsectors;
588 	lp->d_ncylinders = sc->sc_ncylinders;
589 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
590 	lp->d_type = DTYPE_LD;
591 	strcpy(lp->d_typename, "unknown");
592 	strcpy(lp->d_packname, "fictitious");
593 	lp->d_secperunit = sc->sc_secperunit;
594 	lp->d_rpm = 7200;
595 	lp->d_interleave = 1;
596 	lp->d_flags = 0;
597 
598 	lp->d_partitions[RAW_PART].p_offset = 0;
599 	lp->d_partitions[RAW_PART].p_size =
600 	    lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
601 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
602 	lp->d_npartitions = RAW_PART + 1;
603 
604 	lp->d_magic = DISKMAGIC;
605 	lp->d_magic2 = DISKMAGIC;
606 	lp->d_checksum = dkcksum(lp);
607 }
608 
609 /*
610  * Wait interruptibly for an exclusive lock.
611  *
612  * XXX Several drivers do this; it should be abstracted and made MP-safe.
613  */
614 static int
615 ldlock(struct ld_softc *sc)
616 {
617 	int error;
618 
619 	while ((sc->sc_flags & LDF_LKHELD) != 0) {
620 		sc->sc_flags |= LDF_LKWANTED;
621 		if ((error = tsleep(sc, PRIBIO | PCATCH, "ldlck", 0)) != 0)
622 			return (error);
623 	}
624 	sc->sc_flags |= LDF_LKHELD;
625 	return (0);
626 }
627 
628 /*
629  * Unlock and wake up any waiters.
630  */
631 static void
632 ldunlock(struct ld_softc *sc)
633 {
634 
635 	sc->sc_flags &= ~LDF_LKHELD;
636 	if ((sc->sc_flags & LDF_LKWANTED) != 0) {
637 		sc->sc_flags &= ~LDF_LKWANTED;
638 		wakeup(sc);
639 	}
640 }
641 
642 /*
643  * Take a dump.
644  */
645 int
646 lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
647 {
648 	struct ld_softc *sc;
649 	struct disklabel *lp;
650 	int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
651 	static int dumping;
652 
653 	unit = DISKUNIT(dev);
654 	if ((sc = device_lookup(&ld_cd, unit)) == NULL)
655 		return (ENXIO);
656 	if ((sc->sc_flags & LDF_ENABLED) == 0)
657 		return (ENODEV);
658 	if (sc->sc_dump == NULL)
659 		return (ENXIO);
660 
661 	/* Check if recursive dump; if so, punt. */
662 	if (dumping)
663 		return (EFAULT);
664 	dumping = 1;
665 
666 	/* Convert to disk sectors.  Request must be a multiple of size. */
667 	part = DISKPART(dev);
668 	lp = sc->sc_dk.dk_label;
669 	if ((size % lp->d_secsize) != 0)
670 		return (EFAULT);
671 	towrt = size / lp->d_secsize;
672 	blkno = dbtob(blkno) / lp->d_secsize;	/* blkno in DEV_BSIZE units */
673 
674 	nsects = lp->d_partitions[part].p_size;
675 	sectoff = lp->d_partitions[part].p_offset;
676 
677 	/* Check transfer bounds against partition size. */
678 	if ((blkno < 0) || ((blkno + towrt) > nsects))
679 		return (EINVAL);
680 
681 	/* Offset block number to start of partition. */
682 	blkno += sectoff;
683 
684 	/* Start dumping and return when done. */
685 	maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
686 	while (towrt > 0) {
687 		nblk = min(maxblkcnt, towrt);
688 
689 		if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
690 			return (rv);
691 
692 		towrt -= nblk;
693 		blkno += nblk;
694 		va += nblk * sc->sc_secsize;
695 	}
696 
697 	dumping = 0;
698 	return (0);
699 }
700 
701 /*
702  * Adjust the size of a transfer.
703  */
704 static void
705 ldminphys(struct buf *bp)
706 {
707 	struct ld_softc *sc;
708 
709 	sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
710 
711 	if (bp->b_bcount > sc->sc_maxxfer)
712 		bp->b_bcount = sc->sc_maxxfer;
713 	minphys(bp);
714 }
715