xref: /netbsd-src/sys/dev/vnd.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /*	$NetBSD: vnd.c,v 1.220 2012/03/26 16:28:08 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1988 University of Utah.
34  * Copyright (c) 1990, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  * from: Utah $Hdr: vn.c 1.13 94/04/02$
66  *
67  *	@(#)vn.c	8.9 (Berkeley) 5/14/95
68  */
69 
70 /*
71  * Vnode disk driver.
72  *
73  * Block/character interface to a vnode.  Allows one to treat a file
74  * as a disk (e.g. build a filesystem in it, mount it, etc.).
75  *
76  * NOTE 1: If the vnode supports the VOP_BMAP and VOP_STRATEGY operations,
77  * this uses them to avoid distorting the local buffer cache.  If those
78  * block-level operations are not available, this falls back to the regular
79  * read and write calls.  Using these may distort the cache in some cases
80  * but better have the driver working than preventing it to work on file
81  * systems where the block-level operations are not implemented for
82  * whatever reason.
83  *
84  * NOTE 2: There is a security issue involved with this driver.
85  * Once mounted all access to the contents of the "mapped" file via
86  * the special file is controlled by the permissions on the special
87  * file, the protection of the mapped file is ignored (effectively,
88  * by using root credentials in all transactions).
89  *
90  * NOTE 3: Doesn't interact with leases, should it?
91  */
92 
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.220 2012/03/26 16:28:08 hannken Exp $");
95 
96 #if defined(_KERNEL_OPT)
97 #include "opt_vnd.h"
98 #include "opt_compat_netbsd.h"
99 #endif
100 
101 #include <sys/param.h>
102 #include <sys/systm.h>
103 #include <sys/namei.h>
104 #include <sys/proc.h>
105 #include <sys/kthread.h>
106 #include <sys/errno.h>
107 #include <sys/buf.h>
108 #include <sys/bufq.h>
109 #include <sys/malloc.h>
110 #include <sys/ioctl.h>
111 #include <sys/disklabel.h>
112 #include <sys/device.h>
113 #include <sys/disk.h>
114 #include <sys/stat.h>
115 #include <sys/mount.h>
116 #include <sys/vnode.h>
117 #include <sys/file.h>
118 #include <sys/uio.h>
119 #include <sys/conf.h>
120 #include <sys/kauth.h>
121 
122 #include <net/zlib.h>
123 
124 #include <miscfs/genfs/genfs.h>
125 #include <miscfs/specfs/specdev.h>
126 
127 #include <dev/dkvar.h>
128 #include <dev/vndvar.h>
129 
130 #include <prop/proplib.h>
131 
132 #if defined(VNDDEBUG) && !defined(DEBUG)
133 #define DEBUG
134 #endif
135 
136 #ifdef DEBUG
137 int dovndcluster = 1;
138 #define VDB_FOLLOW	0x01
139 #define VDB_INIT	0x02
140 #define VDB_IO		0x04
141 #define VDB_LABEL	0x08
142 int vnddebug = 0x00;
143 #endif
144 
145 #define vndunit(x)	DISKUNIT(x)
146 
147 struct vndxfer {
148 	struct buf vx_buf;
149 	struct vnd_softc *vx_vnd;
150 };
151 #define	VND_BUFTOXFER(bp)	((struct vndxfer *)(void *)bp)
152 
153 #define VND_GETXFER(vnd)	pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
154 #define VND_PUTXFER(vnd, vx)	pool_put(&(vnd)->sc_vxpool, (vx))
155 
156 #define VNDLABELDEV(dev) \
157     (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
158 
159 #define	VND_MAXPENDING(vnd)	((vnd)->sc_maxactive * 4)
160 
161 /* called by main() at boot time */
162 void	vndattach(int);
163 
164 static void	vndclear(struct vnd_softc *, int);
165 static int	vnddoclear(struct vnd_softc *, int, int, bool);
166 static int	vndsetcred(struct vnd_softc *, kauth_cred_t);
167 static void	vndthrottle(struct vnd_softc *, struct vnode *);
168 static void	vndiodone(struct buf *);
169 #if 0
170 static void	vndshutdown(void);
171 #endif
172 
173 static void	vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
174 static void	vndgetdisklabel(dev_t, struct vnd_softc *);
175 
176 static int	vndlock(struct vnd_softc *);
177 static void	vndunlock(struct vnd_softc *);
178 #ifdef VND_COMPRESSION
179 static void	compstrategy(struct buf *, off_t);
180 static void	*vnd_alloc(void *, u_int, u_int);
181 static void	vnd_free(void *, void *);
182 #endif /* VND_COMPRESSION */
183 
184 static void	vndthread(void *);
185 static bool	vnode_has_op(const struct vnode *, int);
186 static void	handle_with_rdwr(struct vnd_softc *, const struct buf *,
187 		    struct buf *);
188 static void	handle_with_strategy(struct vnd_softc *, const struct buf *,
189 		    struct buf *);
190 static void	vnd_set_properties(struct vnd_softc *);
191 
192 static dev_type_open(vndopen);
193 static dev_type_close(vndclose);
194 static dev_type_read(vndread);
195 static dev_type_write(vndwrite);
196 static dev_type_ioctl(vndioctl);
197 static dev_type_strategy(vndstrategy);
198 static dev_type_dump(vnddump);
199 static dev_type_size(vndsize);
200 
201 const struct bdevsw vnd_bdevsw = {
202 	vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
203 };
204 
205 const struct cdevsw vnd_cdevsw = {
206 	vndopen, vndclose, vndread, vndwrite, vndioctl,
207 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
208 };
209 
210 static int	vnd_match(device_t, cfdata_t, void *);
211 static void	vnd_attach(device_t, device_t, void *);
212 static int	vnd_detach(device_t, int);
213 
214 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
215     vnd_match, vnd_attach, vnd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
216 extern struct cfdriver vnd_cd;
217 
218 static struct vnd_softc	*vnd_spawn(int);
219 int	vnd_destroy(device_t);
220 
221 static struct	dkdriver vnddkdriver = { vndstrategy, minphys };
222 
223 void
224 vndattach(int num)
225 {
226 	int error;
227 
228 	error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
229 	if (error)
230 		aprint_error("%s: unable to register cfattach\n",
231 		    vnd_cd.cd_name);
232 }
233 
234 static int
235 vnd_match(device_t self, cfdata_t cfdata, void *aux)
236 {
237 
238 	return 1;
239 }
240 
241 static void
242 vnd_attach(device_t parent, device_t self, void *aux)
243 {
244 	struct vnd_softc *sc = device_private(self);
245 
246 	sc->sc_dev = self;
247 	sc->sc_comp_offsets = NULL;
248 	sc->sc_comp_buff = NULL;
249 	sc->sc_comp_decombuf = NULL;
250 	bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
251 	disk_init(&sc->sc_dkdev, device_xname(self), &vnddkdriver);
252 	if (!pmf_device_register(self, NULL, NULL))
253 		aprint_error_dev(self, "couldn't establish power handler\n");
254 }
255 
256 static int
257 vnd_detach(device_t self, int flags)
258 {
259 	int error;
260 	struct vnd_softc *sc = device_private(self);
261 
262 	if (sc->sc_flags & VNF_INITED) {
263 		error = vnddoclear(sc, 0, -1, (flags & DETACH_FORCE) != 0);
264 		if (error != 0)
265 			return error;
266 	}
267 
268 	pmf_device_deregister(self);
269 	bufq_free(sc->sc_tab);
270 	disk_destroy(&sc->sc_dkdev);
271 
272 	return 0;
273 }
274 
275 static struct vnd_softc *
276 vnd_spawn(int unit)
277 {
278 	cfdata_t cf;
279 
280 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
281 	cf->cf_name = vnd_cd.cd_name;
282 	cf->cf_atname = vnd_cd.cd_name;
283 	cf->cf_unit = unit;
284 	cf->cf_fstate = FSTATE_STAR;
285 
286 	return device_private(config_attach_pseudo(cf));
287 }
288 
289 int
290 vnd_destroy(device_t dev)
291 {
292 	int error;
293 	cfdata_t cf;
294 
295 	cf = device_cfdata(dev);
296 	error = config_detach(dev, DETACH_QUIET);
297 	if (error)
298 		return error;
299 	free(cf, M_DEVBUF);
300 	return 0;
301 }
302 
303 static int
304 vndopen(dev_t dev, int flags, int mode, struct lwp *l)
305 {
306 	int unit = vndunit(dev);
307 	struct vnd_softc *sc;
308 	int error = 0, part, pmask;
309 	struct disklabel *lp;
310 
311 #ifdef DEBUG
312 	if (vnddebug & VDB_FOLLOW)
313 		printf("vndopen(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
314 #endif
315 	sc = device_lookup_private(&vnd_cd, unit);
316 	if (sc == NULL) {
317 		sc = vnd_spawn(unit);
318 		if (sc == NULL)
319 			return ENOMEM;
320 	}
321 
322 	if ((error = vndlock(sc)) != 0)
323 		return error;
324 
325 	if ((sc->sc_flags & VNF_CLEARING) != 0) {
326 		error = ENXIO;
327 		goto done;
328 	}
329 
330 	lp = sc->sc_dkdev.dk_label;
331 
332 	part = DISKPART(dev);
333 	pmask = (1 << part);
334 
335 	/*
336 	 * If we're initialized, check to see if there are any other
337 	 * open partitions.  If not, then it's safe to update the
338 	 * in-core disklabel.  Only read the disklabel if it is
339 	 * not already valid.
340 	 */
341 	if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
342 	    sc->sc_dkdev.dk_openmask == 0)
343 		vndgetdisklabel(dev, sc);
344 
345 	/* Check that the partitions exists. */
346 	if (part != RAW_PART) {
347 		if (((sc->sc_flags & VNF_INITED) == 0) ||
348 		    ((part >= lp->d_npartitions) ||
349 		     (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
350 			error = ENXIO;
351 			goto done;
352 		}
353 	}
354 
355 	/* Prevent our unit from being unconfigured while open. */
356 	switch (mode) {
357 	case S_IFCHR:
358 		sc->sc_dkdev.dk_copenmask |= pmask;
359 		break;
360 
361 	case S_IFBLK:
362 		sc->sc_dkdev.dk_bopenmask |= pmask;
363 		break;
364 	}
365 	sc->sc_dkdev.dk_openmask =
366 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
367 
368  done:
369 	vndunlock(sc);
370 	return error;
371 }
372 
373 static int
374 vndclose(dev_t dev, int flags, int mode, struct lwp *l)
375 {
376 	int unit = vndunit(dev);
377 	struct vnd_softc *sc;
378 	int error = 0, part;
379 
380 #ifdef DEBUG
381 	if (vnddebug & VDB_FOLLOW)
382 		printf("vndclose(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
383 #endif
384 	sc = device_lookup_private(&vnd_cd, unit);
385 	if (sc == NULL)
386 		return ENXIO;
387 
388 	if ((error = vndlock(sc)) != 0)
389 		return error;
390 
391 	part = DISKPART(dev);
392 
393 	/* ...that much closer to allowing unconfiguration... */
394 	switch (mode) {
395 	case S_IFCHR:
396 		sc->sc_dkdev.dk_copenmask &= ~(1 << part);
397 		break;
398 
399 	case S_IFBLK:
400 		sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
401 		break;
402 	}
403 	sc->sc_dkdev.dk_openmask =
404 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
405 
406 	vndunlock(sc);
407 
408 	if ((sc->sc_flags & VNF_INITED) == 0) {
409 		if ((error = vnd_destroy(sc->sc_dev)) != 0) {
410 			aprint_error_dev(sc->sc_dev,
411 			    "unable to detach instance\n");
412 			return error;
413 		}
414 	}
415 
416 	return 0;
417 }
418 
419 /*
420  * Queue the request, and wakeup the kernel thread to handle it.
421  */
422 static void
423 vndstrategy(struct buf *bp)
424 {
425 	int unit = vndunit(bp->b_dev);
426 	struct vnd_softc *vnd =
427 	    device_lookup_private(&vnd_cd, unit);
428 	struct disklabel *lp;
429 	daddr_t blkno;
430 	int s = splbio();
431 
432 	if (vnd == NULL) {
433 		bp->b_error = ENXIO;
434 		goto done;
435 	}
436 	lp = vnd->sc_dkdev.dk_label;
437 
438 	if ((vnd->sc_flags & VNF_INITED) == 0) {
439 		bp->b_error = ENXIO;
440 		goto done;
441 	}
442 
443 	/*
444 	 * The transfer must be a whole number of blocks.
445 	 */
446 	if ((bp->b_bcount % lp->d_secsize) != 0) {
447 		bp->b_error = EINVAL;
448 		goto done;
449 	}
450 
451 	/*
452 	 * check if we're read-only.
453 	 */
454 	if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
455 		bp->b_error = EACCES;
456 		goto done;
457 	}
458 
459 	/* If it's a nil transfer, wake up the top half now. */
460 	if (bp->b_bcount == 0) {
461 		goto done;
462 	}
463 
464 	/*
465 	 * Do bounds checking and adjust transfer.  If there's an error,
466 	 * the bounds check will flag that for us.
467 	 */
468 	if (DISKPART(bp->b_dev) == RAW_PART) {
469 		if (bounds_check_with_mediasize(bp, DEV_BSIZE,
470 		    vnd->sc_size) <= 0)
471 			goto done;
472 	} else {
473 		if (bounds_check_with_label(&vnd->sc_dkdev,
474 		    bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
475 			goto done;
476 	}
477 
478 	/*
479 	 * Put the block number in terms of the logical blocksize
480 	 * of the "device".
481 	 */
482 
483 	blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
484 
485 	/*
486 	 * Translate the partition-relative block number to an absolute.
487 	 */
488 	if (DISKPART(bp->b_dev) != RAW_PART) {
489 		struct partition *pp;
490 
491 		pp = &vnd->sc_dkdev.dk_label->d_partitions[
492 		    DISKPART(bp->b_dev)];
493 		blkno += pp->p_offset;
494 	}
495 	bp->b_rawblkno = blkno;
496 
497 #ifdef DEBUG
498 	if (vnddebug & VDB_FOLLOW)
499 		printf("vndstrategy(%p): unit %d\n", bp, unit);
500 #endif
501 	if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
502 		KASSERT(vnd->sc_pending >= 0 &&
503 		    vnd->sc_pending <= VND_MAXPENDING(vnd));
504 		while (vnd->sc_pending == VND_MAXPENDING(vnd))
505 			tsleep(&vnd->sc_pending, PRIBIO, "vndpc", 0);
506 		vnd->sc_pending++;
507 	}
508 	bufq_put(vnd->sc_tab, bp);
509 	wakeup(&vnd->sc_tab);
510 	splx(s);
511 	return;
512 
513 done:
514 	bp->b_resid = bp->b_bcount;
515 	biodone(bp);
516 	splx(s);
517 }
518 
519 static bool
520 vnode_has_strategy(struct vnd_softc *vnd)
521 {
522 	return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
523 	    vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
524 }
525 
526 /* XXX this function needs a reliable check to detect
527  * sparse files. Otherwise, bmap/strategy may be used
528  * and fail on non-allocated blocks. VOP_READ/VOP_WRITE
529  * works on sparse files.
530  */
531 #if notyet
532 static bool
533 vnode_strategy_probe(struct vnd_softc *vnd)
534 {
535 	int error;
536 	daddr_t nbn;
537 
538 	if (!vnode_has_strategy(vnd))
539 		return false;
540 
541 	/* Convert the first logical block number to its
542 	 * physical block number.
543 	 */
544 	error = 0;
545 	vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
546 	error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
547 	VOP_UNLOCK(vnd->sc_vp);
548 
549 	/* Test if that worked. */
550 	if (error == 0 && (long)nbn == -1)
551 		return false;
552 
553 	return true;
554 }
555 #endif
556 
557 static void
558 vndthread(void *arg)
559 {
560 	struct vnd_softc *vnd = arg;
561 	int s;
562 
563 	/* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
564 	 * directly access the backing vnode.  If we can, use these two
565 	 * operations to avoid messing with the local buffer cache.
566 	 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
567 	 * which are guaranteed to work with any file system. */
568 	if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
569 	    ! vnode_has_strategy(vnd))
570 		vnd->sc_flags |= VNF_USE_VN_RDWR;
571 
572 #ifdef DEBUG
573 	if (vnddebug & VDB_INIT)
574 		printf("vndthread: vp %p, %s\n", vnd->sc_vp,
575 		    (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
576 		    "using bmap/strategy operations" :
577 		    "using read/write operations");
578 #endif
579 
580 	s = splbio();
581 	vnd->sc_flags |= VNF_KTHREAD;
582 	wakeup(&vnd->sc_kthread);
583 
584 	/*
585 	 * Dequeue requests and serve them depending on the available
586 	 * vnode operations.
587 	 */
588 	while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
589 		struct vndxfer *vnx;
590 		int flags;
591 		struct buf *obp;
592 		struct buf *bp;
593 
594 		obp = bufq_get(vnd->sc_tab);
595 		if (obp == NULL) {
596 			tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
597 			continue;
598 		};
599 		if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
600 			KASSERT(vnd->sc_pending > 0 &&
601 			    vnd->sc_pending <= VND_MAXPENDING(vnd));
602 			if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
603 				wakeup(&vnd->sc_pending);
604 		}
605 		splx(s);
606 		flags = obp->b_flags;
607 #ifdef DEBUG
608 		if (vnddebug & VDB_FOLLOW)
609 			printf("vndthread(%p)\n", obp);
610 #endif
611 
612 		if (vnd->sc_vp->v_mount == NULL) {
613 			obp->b_error = ENXIO;
614 			goto done;
615 		}
616 #ifdef VND_COMPRESSION
617 		/* handle a compressed read */
618 		if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
619 			off_t bn;
620 
621 			/* Convert to a byte offset within the file. */
622 			bn = obp->b_rawblkno *
623 			    vnd->sc_dkdev.dk_label->d_secsize;
624 
625 			compstrategy(obp, bn);
626 			goto done;
627 		}
628 #endif /* VND_COMPRESSION */
629 
630 		/*
631 		 * Allocate a header for this transfer and link it to the
632 		 * buffer
633 		 */
634 		s = splbio();
635 		vnx = VND_GETXFER(vnd);
636 		splx(s);
637 		vnx->vx_vnd = vnd;
638 
639 		s = splbio();
640 		while (vnd->sc_active >= vnd->sc_maxactive) {
641 			tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
642 		}
643 		vnd->sc_active++;
644 		splx(s);
645 
646 		/* Instrumentation. */
647 		disk_busy(&vnd->sc_dkdev);
648 
649 		bp = &vnx->vx_buf;
650 		buf_init(bp);
651 		bp->b_flags = (obp->b_flags & B_READ);
652 		bp->b_oflags = obp->b_oflags;
653 		bp->b_cflags = obp->b_cflags;
654 		bp->b_iodone = vndiodone;
655 		bp->b_private = obp;
656 		bp->b_vp = vnd->sc_vp;
657 		bp->b_objlock = bp->b_vp->v_interlock;
658 		bp->b_data = obp->b_data;
659 		bp->b_bcount = obp->b_bcount;
660 		BIO_COPYPRIO(bp, obp);
661 
662 		/* Handle the request using the appropriate operations. */
663 		if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
664 			handle_with_strategy(vnd, obp, bp);
665 		else
666 			handle_with_rdwr(vnd, obp, bp);
667 
668 		s = splbio();
669 		continue;
670 
671 done:
672 		biodone(obp);
673 		s = splbio();
674 	}
675 
676 	vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
677 	wakeup(&vnd->sc_kthread);
678 	splx(s);
679 	kthread_exit(0);
680 }
681 
682 /*
683  * Checks if the given vnode supports the requested operation.
684  * The operation is specified the offset returned by VOFFSET.
685  *
686  * XXX The test below used to determine this is quite fragile
687  * because it relies on the file system to use genfs to specify
688  * unimplemented operations.  There might be another way to do
689  * it more cleanly.
690  */
691 static bool
692 vnode_has_op(const struct vnode *vp, int opoffset)
693 {
694 	int (*defaultp)(void *);
695 	int (*opp)(void *);
696 
697 	defaultp = vp->v_op[VOFFSET(vop_default)];
698 	opp = vp->v_op[opoffset];
699 
700 	return opp != defaultp && opp != genfs_eopnotsupp &&
701 	    opp != genfs_badop && opp != genfs_nullop;
702 }
703 
704 /*
705  * Handes the read/write request given in 'bp' using the vnode's VOP_READ
706  * and VOP_WRITE operations.
707  *
708  * 'obp' is a pointer to the original request fed to the vnd device.
709  */
710 static void
711 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
712 {
713 	bool doread;
714 	off_t offset;
715 	size_t len, resid;
716 	struct vnode *vp;
717 
718 	doread = bp->b_flags & B_READ;
719 	offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
720 	len = bp->b_bcount;
721 	vp = vnd->sc_vp;
722 
723 #if defined(DEBUG)
724 	if (vnddebug & VDB_IO)
725 		printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
726 		    ", secsize %d, offset %" PRIu64
727 		    ", bcount %d\n",
728 		    vp, doread ? "read" : "write", obp->b_rawblkno,
729 		    vnd->sc_dkdev.dk_label->d_secsize, offset,
730 		    bp->b_bcount);
731 #endif
732 
733 	/* Issue the read or write operation. */
734 	bp->b_error =
735 	    vn_rdwr(doread ? UIO_READ : UIO_WRITE,
736 	    vp, bp->b_data, len, offset, UIO_SYSSPACE,
737 	    IO_ADV_ENCODE(POSIX_FADV_NOREUSE), vnd->sc_cred, &resid, NULL);
738 	bp->b_resid = resid;
739 
740 	mutex_enter(vp->v_interlock);
741 	(void) VOP_PUTPAGES(vp, 0, 0,
742 	    PGO_ALLPAGES | PGO_CLEANIT | PGO_FREE | PGO_SYNCIO);
743 
744 	/* We need to increase the number of outputs on the vnode if
745 	 * there was any write to it. */
746 	if (!doread) {
747 		mutex_enter(vp->v_interlock);
748 		vp->v_numoutput++;
749 		mutex_exit(vp->v_interlock);
750 	}
751 
752 	biodone(bp);
753 }
754 
755 /*
756  * Handes the read/write request given in 'bp' using the vnode's VOP_BMAP
757  * and VOP_STRATEGY operations.
758  *
759  * 'obp' is a pointer to the original request fed to the vnd device.
760  */
761 static void
762 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
763     struct buf *bp)
764 {
765 	int bsize, error, flags, skipped;
766 	size_t resid, sz;
767 	off_t bn, offset;
768 	struct vnode *vp;
769 
770 	flags = obp->b_flags;
771 
772 	if (!(flags & B_READ)) {
773 		vp = bp->b_vp;
774 		mutex_enter(vp->v_interlock);
775 		vp->v_numoutput++;
776 		mutex_exit(vp->v_interlock);
777 	}
778 
779 	/* convert to a byte offset within the file. */
780 	bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
781 
782 	bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
783 	skipped = 0;
784 
785 	/*
786 	 * Break the request into bsize pieces and feed them
787 	 * sequentially using VOP_BMAP/VOP_STRATEGY.
788 	 * We do it this way to keep from flooding NFS servers if we
789 	 * are connected to an NFS file.  This places the burden on
790 	 * the client rather than the server.
791 	 */
792 	error = 0;
793 	bp->b_resid = bp->b_bcount;
794 	for (offset = 0, resid = bp->b_resid; resid;
795 	    resid -= sz, offset += sz) {
796 		struct buf *nbp;
797 		daddr_t nbn;
798 		int off, nra;
799 
800 		nra = 0;
801 		vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
802 		error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
803 		VOP_UNLOCK(vnd->sc_vp);
804 
805 		if (error == 0 && (long)nbn == -1)
806 			error = EIO;
807 
808 		/*
809 		 * If there was an error or a hole in the file...punt.
810 		 * Note that we may have to wait for any operations
811 		 * that we have already fired off before releasing
812 		 * the buffer.
813 		 *
814 		 * XXX we could deal with holes here but it would be
815 		 * a hassle (in the write case).
816 		 */
817 		if (error) {
818 			skipped += resid;
819 			break;
820 		}
821 
822 #ifdef DEBUG
823 		if (!dovndcluster)
824 			nra = 0;
825 #endif
826 
827 		off = bn % bsize;
828 		sz = MIN(((off_t)1 + nra) * bsize - off, resid);
829 #ifdef	DEBUG
830 		if (vnddebug & VDB_IO)
831 			printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
832 			    " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
833 			    nbn, sz);
834 #endif
835 
836 		nbp = getiobuf(vp, true);
837 		nestiobuf_setup(bp, nbp, offset, sz);
838 		nbp->b_blkno = nbn + btodb(off);
839 
840 #if 0 /* XXX #ifdef DEBUG */
841 		if (vnddebug & VDB_IO)
842 			printf("vndstart(%ld): bp %p vp %p blkno "
843 			    "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
844 			    (long) (vnd-vnd_softc), &nbp->vb_buf,
845 			    nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
846 			    nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
847 			    nbp->vb_buf.b_bcount);
848 #endif
849 		VOP_STRATEGY(vp, nbp);
850 		bn += sz;
851 	}
852 	nestiobuf_done(bp, skipped, error);
853 }
854 
855 static void
856 vndiodone(struct buf *bp)
857 {
858 	struct vndxfer *vnx = VND_BUFTOXFER(bp);
859 	struct vnd_softc *vnd = vnx->vx_vnd;
860 	struct buf *obp = bp->b_private;
861 	int s = splbio();
862 
863 	KASSERT(&vnx->vx_buf == bp);
864 	KASSERT(vnd->sc_active > 0);
865 #ifdef DEBUG
866 	if (vnddebug & VDB_IO) {
867 		printf("vndiodone1: bp %p iodone: error %d\n",
868 		    bp, bp->b_error);
869 	}
870 #endif
871 	disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
872 	    (bp->b_flags & B_READ));
873 	vnd->sc_active--;
874 	if (vnd->sc_active == 0) {
875 		wakeup(&vnd->sc_tab);
876 	}
877 	splx(s);
878 	obp->b_error = bp->b_error;
879 	obp->b_resid = bp->b_resid;
880 	buf_destroy(bp);
881 	VND_PUTXFER(vnd, vnx);
882 	biodone(obp);
883 }
884 
885 /* ARGSUSED */
886 static int
887 vndread(dev_t dev, struct uio *uio, int flags)
888 {
889 	int unit = vndunit(dev);
890 	struct vnd_softc *sc;
891 
892 #ifdef DEBUG
893 	if (vnddebug & VDB_FOLLOW)
894 		printf("vndread(0x%"PRIx64", %p)\n", dev, uio);
895 #endif
896 
897 	sc = device_lookup_private(&vnd_cd, unit);
898 	if (sc == NULL)
899 		return ENXIO;
900 
901 	if ((sc->sc_flags & VNF_INITED) == 0)
902 		return ENXIO;
903 
904 	return physio(vndstrategy, NULL, dev, B_READ, minphys, uio);
905 }
906 
907 /* ARGSUSED */
908 static int
909 vndwrite(dev_t dev, struct uio *uio, int flags)
910 {
911 	int unit = vndunit(dev);
912 	struct vnd_softc *sc;
913 
914 #ifdef DEBUG
915 	if (vnddebug & VDB_FOLLOW)
916 		printf("vndwrite(0x%"PRIx64", %p)\n", dev, uio);
917 #endif
918 
919 	sc = device_lookup_private(&vnd_cd, unit);
920 	if (sc == NULL)
921 		return ENXIO;
922 
923 	if ((sc->sc_flags & VNF_INITED) == 0)
924 		return ENXIO;
925 
926 	return physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio);
927 }
928 
929 static int
930 vnd_cget(struct lwp *l, int unit, int *un, struct vattr *va)
931 {
932 	int error;
933 	struct vnd_softc *vnd;
934 
935 	if (*un == -1)
936 		*un = unit;
937 	if (*un < 0)
938 		return EINVAL;
939 
940 	vnd = device_lookup_private(&vnd_cd, *un);
941 	if (vnd == NULL)
942 		return (*un >= vnd_cd.cd_ndevs) ? ENXIO : -1;
943 
944 	if ((vnd->sc_flags & VNF_INITED) == 0)
945 		return -1;
946 
947 	vn_lock(vnd->sc_vp, LK_SHARED | LK_RETRY);
948 	error = VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
949 	VOP_UNLOCK(vnd->sc_vp);
950 	return error;
951 }
952 
953 static int
954 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
955 {
956 	int error;
957 
958 	if ((error = vndlock(vnd)) != 0)
959 		return error;
960 
961 	/*
962 	 * Don't unconfigure if any other partitions are open
963 	 * or if both the character and block flavors of this
964 	 * partition are open.
965 	 */
966 	if (DK_BUSY(vnd, pmask) && !force) {
967 		vndunlock(vnd);
968 		return EBUSY;
969 	}
970 
971 	/*
972 	 * XXX vndclear() might call vndclose() implicitly;
973 	 * release lock to avoid recursion
974 	 *
975 	 * Set VNF_CLEARING to prevent vndopen() from
976 	 * sneaking in after we vndunlock().
977 	 */
978 	vnd->sc_flags |= VNF_CLEARING;
979 	vndunlock(vnd);
980 	vndclear(vnd, minor);
981 #ifdef DEBUG
982 	if (vnddebug & VDB_INIT)
983 		printf("vndioctl: CLRed\n");
984 #endif
985 
986 	/* Destroy the xfer and buffer pools. */
987 	pool_destroy(&vnd->sc_vxpool);
988 
989 	/* Detach the disk. */
990 	disk_detach(&vnd->sc_dkdev);
991 
992 	return 0;
993 }
994 
995 /* ARGSUSED */
996 static int
997 vndioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
998 {
999 	bool force;
1000 	int unit = vndunit(dev);
1001 	struct vnd_softc *vnd;
1002 	struct vnd_ioctl *vio;
1003 	struct vattr vattr;
1004 	struct pathbuf *pb;
1005 	struct nameidata nd;
1006 	int error, part, pmask;
1007 	size_t geomsize;
1008 	int fflags;
1009 #ifdef __HAVE_OLD_DISKLABEL
1010 	struct disklabel newlabel;
1011 #endif
1012 	struct dkwedge_info *dkw;
1013 	struct dkwedge_list *dkwl;
1014 
1015 #ifdef DEBUG
1016 	if (vnddebug & VDB_FOLLOW)
1017 		printf("vndioctl(0x%"PRIx64", 0x%lx, %p, 0x%x, %p): unit %d\n",
1018 		    dev, cmd, data, flag, l->l_proc, unit);
1019 #endif
1020 	vnd = device_lookup_private(&vnd_cd, unit);
1021 	if (vnd == NULL &&
1022 #ifdef COMPAT_30
1023 	    cmd != VNDIOCGET30 &&
1024 #endif
1025 #ifdef COMPAT_50
1026 	    cmd != VNDIOCGET50 &&
1027 #endif
1028 	    cmd != VNDIOCGET)
1029 		return ENXIO;
1030 	vio = (struct vnd_ioctl *)data;
1031 
1032 	/* Must be open for writes for these commands... */
1033 	switch (cmd) {
1034 	case VNDIOCSET:
1035 	case VNDIOCCLR:
1036 #ifdef COMPAT_50
1037 	case VNDIOCSET50:
1038 	case VNDIOCCLR50:
1039 #endif
1040 	case DIOCSDINFO:
1041 	case DIOCWDINFO:
1042 #ifdef __HAVE_OLD_DISKLABEL
1043 	case ODIOCSDINFO:
1044 	case ODIOCWDINFO:
1045 #endif
1046 	case DIOCKLABEL:
1047 	case DIOCWLABEL:
1048 		if ((flag & FWRITE) == 0)
1049 			return EBADF;
1050 	}
1051 
1052 	/* Must be initialized for these... */
1053 	switch (cmd) {
1054 	case VNDIOCCLR:
1055 #ifdef VNDIOCCLR50
1056 	case VNDIOCCLR50:
1057 #endif
1058 	case DIOCGDINFO:
1059 	case DIOCSDINFO:
1060 	case DIOCWDINFO:
1061 	case DIOCGPART:
1062 	case DIOCKLABEL:
1063 	case DIOCWLABEL:
1064 	case DIOCGDEFLABEL:
1065 	case DIOCCACHESYNC:
1066 #ifdef __HAVE_OLD_DISKLABEL
1067 	case ODIOCGDINFO:
1068 	case ODIOCSDINFO:
1069 	case ODIOCWDINFO:
1070 	case ODIOCGDEFLABEL:
1071 #endif
1072 		if ((vnd->sc_flags & VNF_INITED) == 0)
1073 			return ENXIO;
1074 	}
1075 
1076 	switch (cmd) {
1077 #ifdef VNDIOCSET50
1078 	case VNDIOCSET50:
1079 #endif
1080 	case VNDIOCSET:
1081 		if (vnd->sc_flags & VNF_INITED)
1082 			return EBUSY;
1083 
1084 		if ((error = vndlock(vnd)) != 0)
1085 			return error;
1086 
1087 		fflags = FREAD;
1088 		if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
1089 			fflags |= FWRITE;
1090 		error = pathbuf_copyin(vio->vnd_file, &pb);
1091 		if (error) {
1092 			goto unlock_and_exit;
1093 		}
1094 		NDINIT(&nd, LOOKUP, FOLLOW, pb);
1095 		if ((error = vn_open(&nd, fflags, 0)) != 0) {
1096 			pathbuf_destroy(pb);
1097 			goto unlock_and_exit;
1098 		}
1099 		KASSERT(l);
1100 		error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_cred);
1101 		if (!error && nd.ni_vp->v_type != VREG)
1102 			error = EOPNOTSUPP;
1103 		if (!error && vattr.va_bytes < vattr.va_size)
1104 			/* File is definitely sparse, use vn_rdwr() */
1105 			vnd->sc_flags |= VNF_USE_VN_RDWR;
1106 		if (error) {
1107 			VOP_UNLOCK(nd.ni_vp);
1108 			goto close_and_exit;
1109 		}
1110 
1111 		/* If using a compressed file, initialize its info */
1112 		/* (or abort with an error if kernel has no compression) */
1113 		if (vio->vnd_flags & VNF_COMP) {
1114 #ifdef VND_COMPRESSION
1115 			struct vnd_comp_header *ch;
1116 			int i;
1117 			u_int32_t comp_size;
1118 			u_int32_t comp_maxsize;
1119 
1120 			/* allocate space for compresed file header */
1121 			ch = malloc(sizeof(struct vnd_comp_header),
1122 			M_TEMP, M_WAITOK);
1123 
1124 			/* read compressed file header */
1125 			error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ch,
1126 			  sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
1127 			  IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1128 			if (error) {
1129 				free(ch, M_TEMP);
1130 				VOP_UNLOCK(nd.ni_vp);
1131 				goto close_and_exit;
1132 			}
1133 
1134 			/* save some header info */
1135 			vnd->sc_comp_blksz = ntohl(ch->block_size);
1136 			/* note last offset is the file byte size */
1137 			vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
1138 			free(ch, M_TEMP);
1139 			if (vnd->sc_comp_blksz == 0 ||
1140 			    vnd->sc_comp_blksz % DEV_BSIZE !=0) {
1141 				VOP_UNLOCK(nd.ni_vp);
1142 				error = EINVAL;
1143 				goto close_and_exit;
1144 			}
1145 			if (sizeof(struct vnd_comp_header) +
1146 			  sizeof(u_int64_t) * vnd->sc_comp_numoffs >
1147 			  vattr.va_size) {
1148 				VOP_UNLOCK(nd.ni_vp);
1149 				error = EINVAL;
1150 				goto close_and_exit;
1151 			}
1152 
1153 			/* set decompressed file size */
1154 			vattr.va_size =
1155 			    ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1156 			     (u_quad_t)vnd->sc_comp_blksz;
1157 
1158 			/* allocate space for all the compressed offsets */
1159 			vnd->sc_comp_offsets =
1160 			malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1161 			M_DEVBUF, M_WAITOK);
1162 
1163 			/* read in the offsets */
1164 			error = vn_rdwr(UIO_READ, nd.ni_vp,
1165 			  (void *)vnd->sc_comp_offsets,
1166 			  sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1167 			  sizeof(struct vnd_comp_header), UIO_SYSSPACE,
1168 			  IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1169 			if (error) {
1170 				VOP_UNLOCK(nd.ni_vp);
1171 				goto close_and_exit;
1172 			}
1173 			/*
1174 			 * find largest block size (used for allocation limit).
1175 			 * Also convert offset to native byte order.
1176 			 */
1177 			comp_maxsize = 0;
1178 			for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1179 				vnd->sc_comp_offsets[i] =
1180 				  be64toh(vnd->sc_comp_offsets[i]);
1181 				comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
1182 				  - vnd->sc_comp_offsets[i];
1183 				if (comp_size > comp_maxsize)
1184 					comp_maxsize = comp_size;
1185 			}
1186 			vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1187 			  be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
1188 
1189 			/* create compressed data buffer */
1190 			vnd->sc_comp_buff = malloc(comp_maxsize,
1191 			  M_DEVBUF, M_WAITOK);
1192 
1193 			/* create decompressed buffer */
1194 			vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1195 			  M_DEVBUF, M_WAITOK);
1196 			vnd->sc_comp_buffblk = -1;
1197 
1198 			/* Initialize decompress stream */
1199 			memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1200 			vnd->sc_comp_stream.zalloc = vnd_alloc;
1201 			vnd->sc_comp_stream.zfree = vnd_free;
1202 			error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1203 			if (error) {
1204 				if (vnd->sc_comp_stream.msg)
1205 					printf("vnd%d: compressed file, %s\n",
1206 					  unit, vnd->sc_comp_stream.msg);
1207 				VOP_UNLOCK(nd.ni_vp);
1208 				error = EINVAL;
1209 				goto close_and_exit;
1210 			}
1211 
1212 			vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1213 #else /* !VND_COMPRESSION */
1214 			VOP_UNLOCK(nd.ni_vp);
1215 			error = EOPNOTSUPP;
1216 			goto close_and_exit;
1217 #endif /* VND_COMPRESSION */
1218 		}
1219 
1220 		VOP_UNLOCK(nd.ni_vp);
1221 		vnd->sc_vp = nd.ni_vp;
1222 		vnd->sc_size = btodb(vattr.va_size);	/* note truncation */
1223 
1224 		/*
1225 		 * Use pseudo-geometry specified.  If none was provided,
1226 		 * use "standard" Adaptec fictitious geometry.
1227 		 */
1228 		if (vio->vnd_flags & VNDIOF_HASGEOM) {
1229 
1230 			memcpy(&vnd->sc_geom, &vio->vnd_geom,
1231 			    sizeof(vio->vnd_geom));
1232 
1233 			/*
1234 			 * Sanity-check the sector size.
1235 			 * XXX Don't allow secsize < DEV_BSIZE.	 Should
1236 			 * XXX we?
1237 			 */
1238 			if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
1239 			    (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
1240 			    vnd->sc_geom.vng_ncylinders == 0 ||
1241 			    (vnd->sc_geom.vng_ntracks *
1242 			     vnd->sc_geom.vng_nsectors) == 0) {
1243 				error = EINVAL;
1244 				goto close_and_exit;
1245 			}
1246 
1247 			/*
1248 			 * Compute the size (in DEV_BSIZE blocks) specified
1249 			 * by the geometry.
1250 			 */
1251 			geomsize = (vnd->sc_geom.vng_nsectors *
1252 			    vnd->sc_geom.vng_ntracks *
1253 			    vnd->sc_geom.vng_ncylinders) *
1254 			    (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1255 
1256 			/*
1257 			 * Sanity-check the size against the specified
1258 			 * geometry.
1259 			 */
1260 			if (vnd->sc_size < geomsize) {
1261 				error = EINVAL;
1262 				goto close_and_exit;
1263 			}
1264 		} else if (vnd->sc_size >= (32 * 64)) {
1265 			/*
1266 			 * Size must be at least 2048 DEV_BSIZE blocks
1267 			 * (1M) in order to use this geometry.
1268 			 */
1269 			vnd->sc_geom.vng_secsize = DEV_BSIZE;
1270 			vnd->sc_geom.vng_nsectors = 32;
1271 			vnd->sc_geom.vng_ntracks = 64;
1272 			vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1273 		} else {
1274 			vnd->sc_geom.vng_secsize = DEV_BSIZE;
1275 			vnd->sc_geom.vng_nsectors = 1;
1276 			vnd->sc_geom.vng_ntracks = 1;
1277 			vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1278 		}
1279 
1280 		vnd_set_properties(vnd);
1281 
1282 		if (vio->vnd_flags & VNDIOF_READONLY) {
1283 			vnd->sc_flags |= VNF_READONLY;
1284 		}
1285 
1286 		if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1287 			goto close_and_exit;
1288 
1289 		vndthrottle(vnd, vnd->sc_vp);
1290 		vio->vnd_osize = dbtob(vnd->sc_size);
1291 #ifdef VNDIOCSET50
1292 		if (cmd != VNDIOCSET50)
1293 #endif
1294 			vio->vnd_size = dbtob(vnd->sc_size);
1295 		vnd->sc_flags |= VNF_INITED;
1296 
1297 		/* create the kernel thread, wait for it to be up */
1298 		error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1299 		    &vnd->sc_kthread, "%s", device_xname(vnd->sc_dev));
1300 		if (error)
1301 			goto close_and_exit;
1302 		while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1303 			tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1304 		}
1305 #ifdef DEBUG
1306 		if (vnddebug & VDB_INIT)
1307 			printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
1308 			    vnd->sc_vp, (unsigned long) vnd->sc_size,
1309 			    vnd->sc_geom.vng_secsize,
1310 			    vnd->sc_geom.vng_nsectors,
1311 			    vnd->sc_geom.vng_ntracks,
1312 			    vnd->sc_geom.vng_ncylinders);
1313 #endif
1314 
1315 		/* Attach the disk. */
1316 		disk_attach(&vnd->sc_dkdev);
1317 		disk_blocksize(&vnd->sc_dkdev, vnd->sc_geom.vng_secsize);
1318 
1319 		/* Initialize the xfer and buffer pools. */
1320 		pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1321 		    0, 0, "vndxpl", NULL, IPL_BIO);
1322 
1323 		/* Try and read the disklabel. */
1324 		vndgetdisklabel(dev, vnd);
1325 
1326 		vndunlock(vnd);
1327 
1328 		pathbuf_destroy(pb);
1329 		break;
1330 
1331 close_and_exit:
1332 		(void) vn_close(nd.ni_vp, fflags, l->l_cred);
1333 		pathbuf_destroy(pb);
1334 unlock_and_exit:
1335 #ifdef VND_COMPRESSION
1336 		/* free any allocated memory (for compressed file) */
1337 		if (vnd->sc_comp_offsets) {
1338 			free(vnd->sc_comp_offsets, M_DEVBUF);
1339 			vnd->sc_comp_offsets = NULL;
1340 		}
1341 		if (vnd->sc_comp_buff) {
1342 			free(vnd->sc_comp_buff, M_DEVBUF);
1343 			vnd->sc_comp_buff = NULL;
1344 		}
1345 		if (vnd->sc_comp_decombuf) {
1346 			free(vnd->sc_comp_decombuf, M_DEVBUF);
1347 			vnd->sc_comp_decombuf = NULL;
1348 		}
1349 #endif /* VND_COMPRESSION */
1350 		vndunlock(vnd);
1351 		return error;
1352 
1353 #ifdef VNDIOCCLR50
1354 	case VNDIOCCLR50:
1355 #endif
1356 	case VNDIOCCLR:
1357 		part = DISKPART(dev);
1358 		pmask = (1 << part);
1359 		force = (vio->vnd_flags & VNDIOF_FORCE) != 0;
1360 
1361 		if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1362 			return error;
1363 
1364 		break;
1365 
1366 #ifdef COMPAT_30
1367 	case VNDIOCGET30: {
1368 		struct vnd_user30 *vnu;
1369 		struct vattr va;
1370 		vnu = (struct vnd_user30 *)data;
1371 		KASSERT(l);
1372 		switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1373 		case 0:
1374 			vnu->vnu_dev = va.va_fsid;
1375 			vnu->vnu_ino = va.va_fileid;
1376 			break;
1377 		case -1:
1378 			/* unused is not an error */
1379 			vnu->vnu_dev = 0;
1380 			vnu->vnu_ino = 0;
1381 			break;
1382 		default:
1383 			return error;
1384 		}
1385 		break;
1386 	}
1387 #endif
1388 
1389 #ifdef COMPAT_50
1390 	case VNDIOCGET50: {
1391 		struct vnd_user50 *vnu;
1392 		struct vattr va;
1393 		vnu = (struct vnd_user50 *)data;
1394 		KASSERT(l);
1395 		switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1396 		case 0:
1397 			vnu->vnu_dev = va.va_fsid;
1398 			vnu->vnu_ino = va.va_fileid;
1399 			break;
1400 		case -1:
1401 			/* unused is not an error */
1402 			vnu->vnu_dev = 0;
1403 			vnu->vnu_ino = 0;
1404 			break;
1405 		default:
1406 			return error;
1407 		}
1408 		break;
1409 	}
1410 #endif
1411 
1412 	case VNDIOCGET: {
1413 		struct vnd_user *vnu;
1414 		struct vattr va;
1415 		vnu = (struct vnd_user *)data;
1416 		KASSERT(l);
1417 		switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1418 		case 0:
1419 			vnu->vnu_dev = va.va_fsid;
1420 			vnu->vnu_ino = va.va_fileid;
1421 			break;
1422 		case -1:
1423 			/* unused is not an error */
1424 			vnu->vnu_dev = 0;
1425 			vnu->vnu_ino = 0;
1426 			break;
1427 		default:
1428 			return error;
1429 		}
1430 		break;
1431 	}
1432 
1433 	case DIOCGDINFO:
1434 		*(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
1435 		break;
1436 
1437 #ifdef __HAVE_OLD_DISKLABEL
1438 	case ODIOCGDINFO:
1439 		newlabel = *(vnd->sc_dkdev.dk_label);
1440 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1441 			return ENOTTY;
1442 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1443 		break;
1444 #endif
1445 
1446 	case DIOCGPART:
1447 		((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1448 		((struct partinfo *)data)->part =
1449 		    &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1450 		break;
1451 
1452 	case DIOCWDINFO:
1453 	case DIOCSDINFO:
1454 #ifdef __HAVE_OLD_DISKLABEL
1455 	case ODIOCWDINFO:
1456 	case ODIOCSDINFO:
1457 #endif
1458 	{
1459 		struct disklabel *lp;
1460 
1461 		if ((error = vndlock(vnd)) != 0)
1462 			return error;
1463 
1464 		vnd->sc_flags |= VNF_LABELLING;
1465 
1466 #ifdef __HAVE_OLD_DISKLABEL
1467 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1468 			memset(&newlabel, 0, sizeof newlabel);
1469 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
1470 			lp = &newlabel;
1471 		} else
1472 #endif
1473 		lp = (struct disklabel *)data;
1474 
1475 		error = setdisklabel(vnd->sc_dkdev.dk_label,
1476 		    lp, 0, vnd->sc_dkdev.dk_cpulabel);
1477 		if (error == 0) {
1478 			if (cmd == DIOCWDINFO
1479 #ifdef __HAVE_OLD_DISKLABEL
1480 			    || cmd == ODIOCWDINFO
1481 #endif
1482 			   )
1483 				error = writedisklabel(VNDLABELDEV(dev),
1484 				    vndstrategy, vnd->sc_dkdev.dk_label,
1485 				    vnd->sc_dkdev.dk_cpulabel);
1486 		}
1487 
1488 		vnd->sc_flags &= ~VNF_LABELLING;
1489 
1490 		vndunlock(vnd);
1491 
1492 		if (error)
1493 			return error;
1494 		break;
1495 	}
1496 
1497 	case DIOCKLABEL:
1498 		if (*(int *)data != 0)
1499 			vnd->sc_flags |= VNF_KLABEL;
1500 		else
1501 			vnd->sc_flags &= ~VNF_KLABEL;
1502 		break;
1503 
1504 	case DIOCWLABEL:
1505 		if (*(int *)data != 0)
1506 			vnd->sc_flags |= VNF_WLABEL;
1507 		else
1508 			vnd->sc_flags &= ~VNF_WLABEL;
1509 		break;
1510 
1511 	case DIOCGDEFLABEL:
1512 		vndgetdefaultlabel(vnd, (struct disklabel *)data);
1513 		break;
1514 
1515 #ifdef __HAVE_OLD_DISKLABEL
1516 	case ODIOCGDEFLABEL:
1517 		vndgetdefaultlabel(vnd, &newlabel);
1518 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1519 			return ENOTTY;
1520 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1521 		break;
1522 #endif
1523 
1524 	case DIOCCACHESYNC:
1525 		vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1526 		error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1527 		    FSYNC_WAIT | FSYNC_DATAONLY | FSYNC_CACHE, 0, 0);
1528 		VOP_UNLOCK(vnd->sc_vp);
1529 		return error;
1530 
1531 	case DIOCAWEDGE:
1532 		dkw = (void *) data;
1533 
1534 		if ((flag & FWRITE) == 0)
1535 			return EBADF;
1536 
1537 		/* If the ioctl happens here, the parent is us. */
1538 		strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev),
1539 		    sizeof(dkw->dkw_parent));
1540 		return dkwedge_add(dkw);
1541 
1542 	case DIOCDWEDGE:
1543 		dkw = (void *) data;
1544 
1545 		if ((flag & FWRITE) == 0)
1546 			return EBADF;
1547 
1548 		/* If the ioctl happens here, the parent is us. */
1549 		strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev),
1550 		    sizeof(dkw->dkw_parent));
1551 		return dkwedge_del(dkw);
1552 
1553 	case DIOCLWEDGES:
1554 		dkwl = (void *) data;
1555 
1556 		return dkwedge_list(&vnd->sc_dkdev, dkwl, l);
1557 
1558 	default:
1559 		return ENOTTY;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 /*
1566  * Duplicate the current processes' credentials.  Since we are called only
1567  * as the result of a SET ioctl and only root can do that, any future access
1568  * to this "disk" is essentially as root.  Note that credentials may change
1569  * if some other uid can write directly to the mapped file (NFS).
1570  */
1571 static int
1572 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1573 {
1574 	struct uio auio;
1575 	struct iovec aiov;
1576 	char *tmpbuf;
1577 	int error;
1578 
1579 	vnd->sc_cred = kauth_cred_dup(cred);
1580 	tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1581 
1582 	/* XXX: Horrible kludge to establish credentials for NFS */
1583 	aiov.iov_base = tmpbuf;
1584 	aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1585 	auio.uio_iov = &aiov;
1586 	auio.uio_iovcnt = 1;
1587 	auio.uio_offset = 0;
1588 	auio.uio_rw = UIO_READ;
1589 	auio.uio_resid = aiov.iov_len;
1590 	UIO_SETUP_SYSSPACE(&auio);
1591 	vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1592 	error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1593 	if (error == 0) {
1594 		/*
1595 		 * Because vnd does all IO directly through the vnode
1596 		 * we need to flush (at least) the buffer from the above
1597 		 * VOP_READ from the buffer cache to prevent cache
1598 		 * incoherencies.  Also, be careful to write dirty
1599 		 * buffers back to stable storage.
1600 		 */
1601 		error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1602 			    curlwp, 0, 0);
1603 	}
1604 	VOP_UNLOCK(vnd->sc_vp);
1605 
1606 	free(tmpbuf, M_TEMP);
1607 	return error;
1608 }
1609 
1610 /*
1611  * Set maxactive based on FS type
1612  */
1613 static void
1614 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1615 {
1616 
1617 	if (vp->v_tag == VT_NFS)
1618 		vnd->sc_maxactive = 2;
1619 	else
1620 		vnd->sc_maxactive = 8;
1621 
1622 	if (vnd->sc_maxactive < 1)
1623 		vnd->sc_maxactive = 1;
1624 }
1625 
1626 #if 0
1627 static void
1628 vndshutdown(void)
1629 {
1630 	struct vnd_softc *vnd;
1631 
1632 	for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1633 		if (vnd->sc_flags & VNF_INITED)
1634 			vndclear(vnd);
1635 }
1636 #endif
1637 
1638 static void
1639 vndclear(struct vnd_softc *vnd, int myminor)
1640 {
1641 	struct vnode *vp = vnd->sc_vp;
1642 	int fflags = FREAD;
1643 	int bmaj, cmaj, i, mn;
1644 	int s;
1645 
1646 #ifdef DEBUG
1647 	if (vnddebug & VDB_FOLLOW)
1648 		printf("vndclear(%p): vp %p\n", vnd, vp);
1649 #endif
1650 	/* locate the major number */
1651 	bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1652 	cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1653 
1654 	/* Nuke the vnodes for any open instances */
1655 	for (i = 0; i < MAXPARTITIONS; i++) {
1656 		mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1657 		vdevgone(bmaj, mn, mn, VBLK);
1658 		if (mn != myminor) /* XXX avoid to kill own vnode */
1659 			vdevgone(cmaj, mn, mn, VCHR);
1660 	}
1661 
1662 	if ((vnd->sc_flags & VNF_READONLY) == 0)
1663 		fflags |= FWRITE;
1664 
1665 	s = splbio();
1666 	bufq_drain(vnd->sc_tab);
1667 	splx(s);
1668 
1669 	vnd->sc_flags |= VNF_VUNCONF;
1670 	wakeup(&vnd->sc_tab);
1671 	while (vnd->sc_flags & VNF_KTHREAD)
1672 		tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1673 
1674 #ifdef VND_COMPRESSION
1675 	/* free the compressed file buffers */
1676 	if (vnd->sc_flags & VNF_COMP) {
1677 		if (vnd->sc_comp_offsets) {
1678 			free(vnd->sc_comp_offsets, M_DEVBUF);
1679 			vnd->sc_comp_offsets = NULL;
1680 		}
1681 		if (vnd->sc_comp_buff) {
1682 			free(vnd->sc_comp_buff, M_DEVBUF);
1683 			vnd->sc_comp_buff = NULL;
1684 		}
1685 		if (vnd->sc_comp_decombuf) {
1686 			free(vnd->sc_comp_decombuf, M_DEVBUF);
1687 			vnd->sc_comp_decombuf = NULL;
1688 		}
1689 	}
1690 #endif /* VND_COMPRESSION */
1691 	vnd->sc_flags &=
1692 	    ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
1693 	      | VNF_VUNCONF | VNF_COMP | VNF_CLEARING);
1694 	if (vp == NULL)
1695 		panic("vndclear: null vp");
1696 	(void) vn_close(vp, fflags, vnd->sc_cred);
1697 	kauth_cred_free(vnd->sc_cred);
1698 	vnd->sc_vp = NULL;
1699 	vnd->sc_cred = NULL;
1700 	vnd->sc_size = 0;
1701 }
1702 
1703 static int
1704 vndsize(dev_t dev)
1705 {
1706 	struct vnd_softc *sc;
1707 	struct disklabel *lp;
1708 	int part, unit, omask;
1709 	int size;
1710 
1711 	unit = vndunit(dev);
1712 	sc = device_lookup_private(&vnd_cd, unit);
1713 	if (sc == NULL)
1714 		return -1;
1715 
1716 	if ((sc->sc_flags & VNF_INITED) == 0)
1717 		return -1;
1718 
1719 	part = DISKPART(dev);
1720 	omask = sc->sc_dkdev.dk_openmask & (1 << part);
1721 	lp = sc->sc_dkdev.dk_label;
1722 
1723 	if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp))	/* XXX */
1724 		return -1;
1725 
1726 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
1727 		size = -1;
1728 	else
1729 		size = lp->d_partitions[part].p_size *
1730 		    (lp->d_secsize / DEV_BSIZE);
1731 
1732 	if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp))	/* XXX */
1733 		return -1;
1734 
1735 	return size;
1736 }
1737 
1738 static int
1739 vnddump(dev_t dev, daddr_t blkno, void *va,
1740     size_t size)
1741 {
1742 
1743 	/* Not implemented. */
1744 	return ENXIO;
1745 }
1746 
1747 static void
1748 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
1749 {
1750 	struct vndgeom *vng = &sc->sc_geom;
1751 	struct partition *pp;
1752 
1753 	memset(lp, 0, sizeof(*lp));
1754 
1755 	lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1756 	lp->d_secsize = vng->vng_secsize;
1757 	lp->d_nsectors = vng->vng_nsectors;
1758 	lp->d_ntracks = vng->vng_ntracks;
1759 	lp->d_ncylinders = vng->vng_ncylinders;
1760 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1761 
1762 	strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1763 	lp->d_type = DTYPE_VND;
1764 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1765 	lp->d_rpm = 3600;
1766 	lp->d_interleave = 1;
1767 	lp->d_flags = 0;
1768 
1769 	pp = &lp->d_partitions[RAW_PART];
1770 	pp->p_offset = 0;
1771 	pp->p_size = lp->d_secperunit;
1772 	pp->p_fstype = FS_UNUSED;
1773 	lp->d_npartitions = RAW_PART + 1;
1774 
1775 	lp->d_magic = DISKMAGIC;
1776 	lp->d_magic2 = DISKMAGIC;
1777 	lp->d_checksum = dkcksum(lp);
1778 }
1779 
1780 /*
1781  * Read the disklabel from a vnd.  If one is not present, create a fake one.
1782  */
1783 static void
1784 vndgetdisklabel(dev_t dev, struct vnd_softc *sc)
1785 {
1786 	const char *errstring;
1787 	struct disklabel *lp = sc->sc_dkdev.dk_label;
1788 	struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1789 	int i;
1790 
1791 	memset(clp, 0, sizeof(*clp));
1792 
1793 	vndgetdefaultlabel(sc, lp);
1794 
1795 	/*
1796 	 * Call the generic disklabel extraction routine.
1797 	 */
1798 	errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1799 	if (errstring) {
1800 		/*
1801 		 * Lack of disklabel is common, but we print the warning
1802 		 * anyway, since it might contain other useful information.
1803 		 */
1804 		aprint_normal_dev(sc->sc_dev, "%s\n", errstring);
1805 
1806 		/*
1807 		 * For historical reasons, if there's no disklabel
1808 		 * present, all partitions must be FS_BSDFFS and
1809 		 * occupy the entire disk.
1810 		 */
1811 		for (i = 0; i < MAXPARTITIONS; i++) {
1812 			/*
1813 			 * Don't wipe out port specific hack (such as
1814 			 * dos partition hack of i386 port).
1815 			 */
1816 			if (lp->d_partitions[i].p_size != 0)
1817 				continue;
1818 
1819 			lp->d_partitions[i].p_size = lp->d_secperunit;
1820 			lp->d_partitions[i].p_offset = 0;
1821 			lp->d_partitions[i].p_fstype = FS_BSDFFS;
1822 		}
1823 
1824 		strncpy(lp->d_packname, "default label",
1825 		    sizeof(lp->d_packname));
1826 
1827 		lp->d_npartitions = MAXPARTITIONS;
1828 		lp->d_checksum = dkcksum(lp);
1829 	}
1830 
1831 	/* In-core label now valid. */
1832 	sc->sc_flags |= VNF_VLABEL;
1833 }
1834 
1835 /*
1836  * Wait interruptibly for an exclusive lock.
1837  *
1838  * XXX
1839  * Several drivers do this; it should be abstracted and made MP-safe.
1840  */
1841 static int
1842 vndlock(struct vnd_softc *sc)
1843 {
1844 	int error;
1845 
1846 	while ((sc->sc_flags & VNF_LOCKED) != 0) {
1847 		sc->sc_flags |= VNF_WANTED;
1848 		if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1849 			return error;
1850 	}
1851 	sc->sc_flags |= VNF_LOCKED;
1852 	return 0;
1853 }
1854 
1855 /*
1856  * Unlock and wake up any waiters.
1857  */
1858 static void
1859 vndunlock(struct vnd_softc *sc)
1860 {
1861 
1862 	sc->sc_flags &= ~VNF_LOCKED;
1863 	if ((sc->sc_flags & VNF_WANTED) != 0) {
1864 		sc->sc_flags &= ~VNF_WANTED;
1865 		wakeup(sc);
1866 	}
1867 }
1868 
1869 #ifdef VND_COMPRESSION
1870 /* compressed file read */
1871 static void
1872 compstrategy(struct buf *bp, off_t bn)
1873 {
1874 	int error;
1875 	int unit = vndunit(bp->b_dev);
1876 	struct vnd_softc *vnd =
1877 	    device_lookup_private(&vnd_cd, unit);
1878 	u_int32_t comp_block;
1879 	struct uio auio;
1880 	char *addr;
1881 	int s;
1882 
1883 	/* set up constants for data move */
1884 	auio.uio_rw = UIO_READ;
1885 	UIO_SETUP_SYSSPACE(&auio);
1886 
1887 	/* read, and transfer the data */
1888 	addr = bp->b_data;
1889 	bp->b_resid = bp->b_bcount;
1890 	s = splbio();
1891 	while (bp->b_resid > 0) {
1892 		unsigned length;
1893 		size_t length_in_buffer;
1894 		u_int32_t offset_in_buffer;
1895 		struct iovec aiov;
1896 
1897 		/* calculate the compressed block number */
1898 		comp_block = bn / (off_t)vnd->sc_comp_blksz;
1899 
1900 		/* check for good block number */
1901 		if (comp_block >= vnd->sc_comp_numoffs) {
1902 			bp->b_error = EINVAL;
1903 			splx(s);
1904 			return;
1905 		}
1906 
1907 		/* read in the compressed block, if not in buffer */
1908 		if (comp_block != vnd->sc_comp_buffblk) {
1909 			length = vnd->sc_comp_offsets[comp_block + 1] -
1910 			    vnd->sc_comp_offsets[comp_block];
1911 			vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1912 			error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
1913 			    length, vnd->sc_comp_offsets[comp_block],
1914 			    UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
1915 			    NULL, NULL);
1916 			if (error) {
1917 				bp->b_error = error;
1918 				VOP_UNLOCK(vnd->sc_vp);
1919 				splx(s);
1920 				return;
1921 			}
1922 			/* uncompress the buffer */
1923 			vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
1924 			vnd->sc_comp_stream.avail_in = length;
1925 			vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
1926 			vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
1927 			inflateReset(&vnd->sc_comp_stream);
1928 			error = inflate(&vnd->sc_comp_stream, Z_FINISH);
1929 			if (error != Z_STREAM_END) {
1930 				if (vnd->sc_comp_stream.msg)
1931 					aprint_normal_dev(vnd->sc_dev,
1932 					    "compressed file, %s\n",
1933 					    vnd->sc_comp_stream.msg);
1934 				bp->b_error = EBADMSG;
1935 				VOP_UNLOCK(vnd->sc_vp);
1936 				splx(s);
1937 				return;
1938 			}
1939 			vnd->sc_comp_buffblk = comp_block;
1940 			VOP_UNLOCK(vnd->sc_vp);
1941 		}
1942 
1943 		/* transfer the usable uncompressed data */
1944 		offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
1945 		length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
1946 		if (length_in_buffer > bp->b_resid)
1947 			length_in_buffer = bp->b_resid;
1948 		auio.uio_iov = &aiov;
1949 		auio.uio_iovcnt = 1;
1950 		aiov.iov_base = addr;
1951 		aiov.iov_len = length_in_buffer;
1952 		auio.uio_resid = aiov.iov_len;
1953 		auio.uio_offset = 0;
1954 		error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
1955 		    length_in_buffer, &auio);
1956 		if (error) {
1957 			bp->b_error = error;
1958 			splx(s);
1959 			return;
1960 		}
1961 
1962 		bn += length_in_buffer;
1963 		addr += length_in_buffer;
1964 		bp->b_resid -= length_in_buffer;
1965 	}
1966 	splx(s);
1967 }
1968 
1969 /* compression memory allocation routines */
1970 static void *
1971 vnd_alloc(void *aux, u_int items, u_int siz)
1972 {
1973 	return malloc(items * siz, M_TEMP, M_NOWAIT);
1974 }
1975 
1976 static void
1977 vnd_free(void *aux, void *ptr)
1978 {
1979 	free(ptr, M_TEMP);
1980 }
1981 #endif /* VND_COMPRESSION */
1982 
1983 static void
1984 vnd_set_properties(struct vnd_softc *vnd)
1985 {
1986 	prop_dictionary_t disk_info, odisk_info, geom;
1987 
1988 	disk_info = prop_dictionary_create();
1989 
1990 	geom = prop_dictionary_create();
1991 
1992 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
1993 	    vnd->sc_geom.vng_nsectors * vnd->sc_geom.vng_ntracks *
1994 	    vnd->sc_geom.vng_ncylinders);
1995 
1996 	prop_dictionary_set_uint32(geom, "sector-size",
1997 	    vnd->sc_geom.vng_secsize);
1998 
1999 	prop_dictionary_set_uint16(geom, "sectors-per-track",
2000 	    vnd->sc_geom.vng_nsectors);
2001 
2002 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
2003 	    vnd->sc_geom.vng_ntracks);
2004 
2005 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
2006 	    vnd->sc_geom.vng_ncylinders);
2007 
2008 	prop_dictionary_set(disk_info, "geometry", geom);
2009 	prop_object_release(geom);
2010 
2011 	prop_dictionary_set(device_properties(vnd->sc_dev),
2012 	    "disk-info", disk_info);
2013 
2014 	/*
2015 	 * Don't release disk_info here; we keep a reference to it.
2016 	 * disk_detach() will release it when we go away.
2017 	 */
2018 
2019 	odisk_info = vnd->sc_dkdev.dk_info;
2020 	vnd->sc_dkdev.dk_info = disk_info;
2021 	if (odisk_info)
2022 		prop_object_release(odisk_info);
2023 }
2024 
2025 #ifdef _MODULE
2026 
2027 #include <sys/module.h>
2028 
2029 MODULE(MODULE_CLASS_DRIVER, vnd, NULL);
2030 CFDRIVER_DECL(vnd, DV_DISK, NULL);
2031 
2032 static int
2033 vnd_modcmd(modcmd_t cmd, void *arg)
2034 {
2035 	int bmajor = -1, cmajor = -1,  error = 0;
2036 
2037 	switch (cmd) {
2038 	case MODULE_CMD_INIT:
2039 		error = config_cfdriver_attach(&vnd_cd);
2040 		if (error)
2041 			break;
2042 
2043 		error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
2044 	        if (error) {
2045 			config_cfdriver_detach(&vnd_cd);
2046 			aprint_error("%s: unable to register cfattach\n",
2047 			    vnd_cd.cd_name);
2048 			break;
2049 		}
2050 
2051 		error = devsw_attach("vnd", &vnd_bdevsw, &bmajor,
2052 		    &vnd_cdevsw, &cmajor);
2053 		if (error) {
2054 			config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2055 			config_cfdriver_detach(&vnd_cd);
2056 			break;
2057 		}
2058 
2059 		break;
2060 
2061 	case MODULE_CMD_FINI:
2062 		error = config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2063 		if (error)
2064 			break;
2065 		config_cfdriver_detach(&vnd_cd);
2066 		devsw_detach(&vnd_bdevsw, &vnd_cdevsw);
2067 		break;
2068 
2069 	case MODULE_CMD_STAT:
2070 		return ENOTTY;
2071 
2072 	default:
2073 		return ENOTTY;
2074 	}
2075 
2076 	return error;
2077 }
2078 
2079 #endif
2080