xref: /netbsd-src/sys/dev/vnd.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: vnd.c,v 1.97 2003/05/02 08:45:26 dsl Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1988 University of Utah.
41  * Copyright (c) 1990, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  *
44  * This code is derived from software contributed to Berkeley by
45  * the Systems Programming Group of the University of Utah Computer
46  * Science Department.
47  *
48  * Redistribution and use in source and binary forms, with or without
49  * modification, are permitted provided that the following conditions
50  * are met:
51  * 1. Redistributions of source code must retain the above copyright
52  *    notice, this list of conditions and the following disclaimer.
53  * 2. Redistributions in binary form must reproduce the above copyright
54  *    notice, this list of conditions and the following disclaimer in the
55  *    documentation and/or other materials provided with the distribution.
56  * 3. All advertising materials mentioning features or use of this software
57  *    must display the following acknowledgement:
58  *	This product includes software developed by the University of
59  *	California, Berkeley and its contributors.
60  * 4. Neither the name of the University nor the names of its contributors
61  *    may be used to endorse or promote products derived from this software
62  *    without specific prior written permission.
63  *
64  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74  * SUCH DAMAGE.
75  *
76  * from: Utah $Hdr: vn.c 1.13 94/04/02$
77  *
78  *	@(#)vn.c	8.9 (Berkeley) 5/14/95
79  */
80 
81 /*
82  * Vnode disk driver.
83  *
84  * Block/character interface to a vnode.  Allows one to treat a file
85  * as a disk (e.g. build a filesystem in it, mount it, etc.).
86  *
87  * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
88  * instead of a simple VOP_RDWR.  We do this to avoid distorting the
89  * local buffer cache.
90  *
91  * NOTE 2: There is a security issue involved with this driver.
92  * Once mounted all access to the contents of the "mapped" file via
93  * the special file is controlled by the permissions on the special
94  * file, the protection of the mapped file is ignored (effectively,
95  * by using root credentials in all transactions).
96  *
97  * NOTE 3: Doesn't interact with leases, should it?
98  */
99 
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.97 2003/05/02 08:45:26 dsl Exp $");
102 
103 #if defined(_KERNEL_OPT)
104 #include "fs_nfs.h"
105 #endif
106 
107 #include <sys/param.h>
108 #include <sys/systm.h>
109 #include <sys/namei.h>
110 #include <sys/proc.h>
111 #include <sys/errno.h>
112 #include <sys/buf.h>
113 #include <sys/malloc.h>
114 #include <sys/ioctl.h>
115 #include <sys/disklabel.h>
116 #include <sys/device.h>
117 #include <sys/disk.h>
118 #include <sys/stat.h>
119 #include <sys/mount.h>
120 #include <sys/vnode.h>
121 #include <sys/file.h>
122 #include <sys/uio.h>
123 #include <sys/conf.h>
124 
125 #include <miscfs/specfs/specdev.h>
126 
127 #include <dev/vndvar.h>
128 
129 #if defined(VNDDEBUG) && !defined(DEBUG)
130 #define	DEBUG
131 #endif
132 
133 #ifdef DEBUG
134 int dovndcluster = 1;
135 #define	VDB_FOLLOW	0x01
136 #define	VDB_INIT	0x02
137 #define	VDB_IO		0x04
138 #define	VDB_LABEL	0x08
139 int vnddebug = 0x00;
140 #endif
141 
142 #define	vndunit(x)	DISKUNIT(x)
143 
144 struct vndxfer {
145 	struct buf	*vx_bp;		/* Pointer to parent buffer */
146 	int		vx_error;
147 	int		vx_pending;	/* # of pending aux buffers */
148 	int		vx_flags;
149 #define VX_BUSY		1
150 };
151 
152 struct vndbuf {
153 	struct buf	vb_buf;
154 	struct vndxfer	*vb_xfer;
155 };
156 
157 #define	VND_GETXFER(vnd)	pool_get(&(vnd)->sc_vxpool, PR_NOWAIT)
158 #define	VND_PUTXFER(vnd, vx)	pool_put(&(vnd)->sc_vxpool, (vx))
159 
160 #define	VND_GETBUF(vnd)		pool_get(&(vnd)->sc_vbpool, PR_NOWAIT)
161 #define	VND_PUTBUF(vnd, vb)	pool_put(&(vnd)->sc_vbpool, (vb))
162 
163 struct vnd_softc *vnd_softc;
164 int numvnd = 0;
165 
166 #define	VNDLABELDEV(dev) \
167 	(MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
168 
169 /* called by main() at boot time (XXX: and the LKM driver) */
170 void	vndattach __P((int));
171 int	vnddetach __P((void));
172 
173 void	vndclear __P((struct vnd_softc *, int));
174 void	vndstart __P((struct vnd_softc *));
175 int	vndsetcred __P((struct vnd_softc *, struct ucred *));
176 void	vndthrottle __P((struct vnd_softc *, struct vnode *));
177 void	vndiodone __P((struct buf *));
178 #if 0
179 void	vndshutdown __P((void));
180 #endif
181 
182 void	vndgetdefaultlabel __P((struct vnd_softc *, struct disklabel *));
183 void	vndgetdisklabel __P((dev_t));
184 
185 static	int vndlock __P((struct vnd_softc *));
186 static	void vndunlock __P((struct vnd_softc *));
187 
188 dev_type_open(vndopen);
189 dev_type_close(vndclose);
190 dev_type_read(vndread);
191 dev_type_write(vndwrite);
192 dev_type_ioctl(vndioctl);
193 dev_type_strategy(vndstrategy);
194 dev_type_dump(vnddump);
195 dev_type_size(vndsize);
196 
197 const struct bdevsw vnd_bdevsw = {
198 	vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
199 };
200 
201 const struct cdevsw vnd_cdevsw = {
202 	vndopen, vndclose, vndread, vndwrite, vndioctl,
203 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
204 };
205 
206 int vndattached = 0;
207 
208 void
209 vndattach(num)
210 	int num;
211 {
212 	int i;
213 	char *mem;
214 
215 	if (vndattached)
216 		return;
217 	vndattached = 1;
218 	if (num <= 0)
219 		return;
220 	i = num * sizeof(struct vnd_softc);
221 	mem = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
222 	if (mem == NULL) {
223 		printf("WARNING: no memory for vnode disks\n");
224 		return;
225 	}
226 	vnd_softc = (struct vnd_softc *)mem;
227 	numvnd = num;
228 
229 	for (i = 0; i < numvnd; i++) {
230 		vnd_softc[i].sc_unit = i;
231 		bufq_alloc(&vnd_softc[i].sc_tab,
232 		    BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
233 	}
234 }
235 
236 int
237 vnddetach()
238 {
239 	int i;
240 
241 	/* First check we aren't in use. */
242 	for (i = 0; i < numvnd; i++)
243 		if (vnd_softc[i].sc_flags & VNF_INITED)
244 			return (EBUSY);
245 
246 	for (i = 0; i < numvnd; i++)
247 		bufq_free(&vnd_softc[i].sc_tab);
248 
249 	free(vnd_softc, M_DEVBUF);
250 	vndattached = 0;
251 
252 	return (0);
253 }
254 
255 int
256 vndopen(dev, flags, mode, p)
257 	dev_t dev;
258 	int flags, mode;
259 	struct proc *p;
260 {
261 	int unit = vndunit(dev);
262 	struct vnd_softc *sc;
263 	int error = 0, part, pmask;
264 	struct disklabel *lp;
265 
266 #ifdef DEBUG
267 	if (vnddebug & VDB_FOLLOW)
268 		printf("vndopen(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, p);
269 #endif
270 	if (unit >= numvnd)
271 		return (ENXIO);
272 	sc = &vnd_softc[unit];
273 
274 	if ((error = vndlock(sc)) != 0)
275 		return (error);
276 
277 	lp = sc->sc_dkdev.dk_label;
278 
279 	part = DISKPART(dev);
280 	pmask = (1 << part);
281 
282 	/*
283 	 * If we're initialized, check to see if there are any other
284 	 * open partitions.  If not, then it's safe to update the
285 	 * in-core disklabel.
286 	 */
287 	if ((sc->sc_flags & VNF_INITED) && (sc->sc_dkdev.dk_openmask == 0))
288 		vndgetdisklabel(dev);
289 
290 	/* Check that the partitions exists. */
291 	if (part != RAW_PART) {
292 		if (((sc->sc_flags & VNF_INITED) == 0) ||
293 		    ((part >= lp->d_npartitions) ||
294 		     (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
295 			error = ENXIO;
296 			goto done;
297 		}
298 	}
299 
300 	/* Prevent our unit from being unconfigured while open. */
301 	switch (mode) {
302 	case S_IFCHR:
303 		sc->sc_dkdev.dk_copenmask |= pmask;
304 		break;
305 
306 	case S_IFBLK:
307 		sc->sc_dkdev.dk_bopenmask |= pmask;
308 		break;
309 	}
310 	sc->sc_dkdev.dk_openmask =
311 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
312 
313  done:
314 	vndunlock(sc);
315 	return (error);
316 }
317 
318 int
319 vndclose(dev, flags, mode, p)
320 	dev_t dev;
321 	int flags, mode;
322 	struct proc *p;
323 {
324 	int unit = vndunit(dev);
325 	struct vnd_softc *sc;
326 	int error = 0, part;
327 
328 #ifdef DEBUG
329 	if (vnddebug & VDB_FOLLOW)
330 		printf("vndclose(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, p);
331 #endif
332 
333 	if (unit >= numvnd)
334 		return (ENXIO);
335 	sc = &vnd_softc[unit];
336 
337 	if ((error = vndlock(sc)) != 0)
338 		return (error);
339 
340 	part = DISKPART(dev);
341 
342 	/* ...that much closer to allowing unconfiguration... */
343 	switch (mode) {
344 	case S_IFCHR:
345 		sc->sc_dkdev.dk_copenmask &= ~(1 << part);
346 		break;
347 
348 	case S_IFBLK:
349 		sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
350 		break;
351 	}
352 	sc->sc_dkdev.dk_openmask =
353 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
354 
355 	vndunlock(sc);
356 	return (0);
357 }
358 
359 /*
360  * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
361  */
362 void
363 vndstrategy(bp)
364 	struct buf *bp;
365 {
366 	int unit = vndunit(bp->b_dev);
367 	struct vnd_softc *vnd = &vnd_softc[unit];
368 	struct vndxfer *vnx;
369 	int s, bsize, resid;
370 	off_t bn;
371 	caddr_t addr;
372 	int sz, flags, error, wlabel;
373 	struct disklabel *lp;
374 	struct partition *pp;
375 
376 #ifdef DEBUG
377 	if (vnddebug & VDB_FOLLOW)
378 		printf("vndstrategy(%p): unit %d\n", bp, unit);
379 #endif
380 	if ((vnd->sc_flags & VNF_INITED) == 0) {
381 		bp->b_error = ENXIO;
382 		bp->b_flags |= B_ERROR;
383 		goto done;
384 	}
385 
386 	/* If it's a nil transfer, wake up the top half now. */
387 	if (bp->b_bcount == 0)
388 		goto done;
389 
390 	lp = vnd->sc_dkdev.dk_label;
391 
392 	/*
393 	 * The transfer must be a whole number of blocks.
394 	 */
395 	if ((bp->b_bcount % lp->d_secsize) != 0) {
396 		bp->b_error = EINVAL;
397 		bp->b_flags |= B_ERROR;
398 		goto done;
399 	}
400 
401 	/*
402 	 * Do bounds checking and adjust transfer.  If there's an error,
403 	 * the bounds check will flag that for us.
404 	 */
405 	wlabel = vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING);
406 	if (DISKPART(bp->b_dev) != RAW_PART)
407 		if (bounds_check_with_label(bp, lp, wlabel) <= 0)
408 			goto done;
409 
410 	/*
411 	 * check if we're read-only.
412 	 */
413 	if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
414 		bp->b_error = EACCES;
415 		bp->b_flags |= B_ERROR;
416 		goto done;
417 	}
418 
419 	bp->b_resid = bp->b_bcount;
420 
421 	/*
422 	 * Put the block number in terms of the logical blocksize
423 	 * of the "device".
424 	 */
425 	bn = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
426 
427 	/*
428 	 * Translate the partition-relative block number to an absolute.
429 	 */
430 	if (DISKPART(bp->b_dev) != RAW_PART) {
431 		pp = &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
432 		bn += pp->p_offset;
433 	}
434 
435 	/* ...and convert to a byte offset within the file. */
436 	bn *= lp->d_secsize;
437 
438 	if (vnd->sc_vp->v_mount == NULL) {
439 		bp->b_error = ENXIO;
440 		bp->b_flags |= B_ERROR;
441 		goto done;
442 	}
443  	bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
444 	addr = bp->b_data;
445 	flags = (bp->b_flags & (B_READ|B_ASYNC)) | B_CALL;
446 
447 	/* Allocate a header for this transfer and link it to the buffer */
448 	s = splbio();
449 	vnx = VND_GETXFER(vnd);
450 	splx(s);
451 	vnx->vx_flags = VX_BUSY;
452 	vnx->vx_error = 0;
453 	vnx->vx_pending = 0;
454 	vnx->vx_bp = bp;
455 
456 	for (resid = bp->b_resid; resid; resid -= sz) {
457 		struct vndbuf *nbp;
458 		struct vnode *vp;
459 		daddr_t nbn;
460 		int off, nra;
461 
462 		nra = 0;
463 		vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
464 		error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
465 		VOP_UNLOCK(vnd->sc_vp, 0);
466 
467 		if (error == 0 && (long)nbn == -1)
468 			error = EIO;
469 
470 		/*
471 		 * If there was an error or a hole in the file...punt.
472 		 * Note that we may have to wait for any operations
473 		 * that we have already fired off before releasing
474 		 * the buffer.
475 		 *
476 		 * XXX we could deal with holes here but it would be
477 		 * a hassle (in the write case).
478 		 */
479 		if (error) {
480 			s = splbio();
481 			vnx->vx_error = error;
482 			goto out;
483 		}
484 
485 #ifdef DEBUG
486 		if (!dovndcluster)
487 			nra = 0;
488 #endif
489 
490 		if ((off = bn % bsize) != 0)
491 			sz = bsize - off;
492 		else
493 			sz = (1 + nra) * bsize;
494 		if (resid < sz)
495 			sz = resid;
496 #ifdef DEBUG
497 		if (vnddebug & VDB_IO)
498 			printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
499 			       " sz 0x%x\n",
500 			    vnd->sc_vp, vp, (long long)bn, nbn, sz);
501 #endif
502 
503 		s = splbio();
504 		nbp = VND_GETBUF(vnd);
505 		splx(s);
506 		BUF_INIT(&nbp->vb_buf);
507 		nbp->vb_buf.b_flags = flags;
508 		nbp->vb_buf.b_bcount = sz;
509 		nbp->vb_buf.b_bufsize = round_page((ulong)addr + sz)
510 		    - trunc_page((ulong) addr);
511 		nbp->vb_buf.b_error = 0;
512 		nbp->vb_buf.b_data = addr;
513 		nbp->vb_buf.b_blkno = nbp->vb_buf.b_rawblkno = nbn + btodb(off);
514 		nbp->vb_buf.b_proc = bp->b_proc;
515 		nbp->vb_buf.b_iodone = vndiodone;
516 		nbp->vb_buf.b_vp = NULLVP;
517 
518 		nbp->vb_xfer = vnx;
519 
520 		/*
521 		 * Just sort by block number
522 		 */
523 		s = splbio();
524 		if (vnx->vx_error != 0) {
525 			VND_PUTBUF(vnd, nbp);
526 			goto out;
527 		}
528 		vnx->vx_pending++;
529 		bgetvp(vp, &nbp->vb_buf);
530 		BUFQ_PUT(&vnd->sc_tab, &nbp->vb_buf);
531 		vndstart(vnd);
532 		splx(s);
533 		bn += sz;
534 		addr += sz;
535 	}
536 
537 	s = splbio();
538 
539 out: /* Arrive here at splbio */
540 	vnx->vx_flags &= ~VX_BUSY;
541 	if (vnx->vx_pending == 0) {
542 		if (vnx->vx_error != 0) {
543 			bp->b_error = vnx->vx_error;
544 			bp->b_flags |= B_ERROR;
545 		}
546 		VND_PUTXFER(vnd, vnx);
547 		biodone(bp);
548 	}
549 	splx(s);
550 	return;
551 
552  done:
553 	biodone(bp);
554 }
555 
556 /*
557  * Feed requests sequentially.
558  * We do it this way to keep from flooding NFS servers if we are connected
559  * to an NFS file.  This places the burden on the client rather than the
560  * server.
561  */
562 void
563 vndstart(vnd)
564 	struct vnd_softc *vnd;
565 {
566 	struct buf	*bp;
567 
568 	/*
569 	 * Dequeue now since lower level strategy routine might
570 	 * queue using same links
571 	 */
572 
573 	if ((vnd->sc_flags & VNF_BUSY) != 0)
574 		return;
575 
576 	vnd->sc_flags |= VNF_BUSY;
577 
578 	while (vnd->sc_active < vnd->sc_maxactive) {
579 		bp = BUFQ_GET(&vnd->sc_tab);
580 		if (bp == NULL)
581 			break;
582 		vnd->sc_active++;
583 #ifdef DEBUG
584 		if (vnddebug & VDB_IO)
585 			printf("vndstart(%ld): bp %p vp %p blkno 0x%" PRIx64
586 				" flags %lx addr %p cnt 0x%lx\n",
587 			    (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
588 			    bp->b_flags, bp->b_data, bp->b_bcount);
589 #endif
590 
591 		/* Instrumentation. */
592 		disk_busy(&vnd->sc_dkdev);
593 
594 		if ((bp->b_flags & B_READ) == 0)
595 			bp->b_vp->v_numoutput++;
596 		VOP_STRATEGY(bp);
597 	}
598 	vnd->sc_flags &= ~VNF_BUSY;
599 }
600 
601 void
602 vndiodone(bp)
603 	struct buf *bp;
604 {
605 	struct vndbuf *vbp = (struct vndbuf *) bp;
606 	struct vndxfer *vnx = (struct vndxfer *)vbp->vb_xfer;
607 	struct buf *pbp = vnx->vx_bp;
608 	struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
609 	int s, resid;
610 
611 	s = splbio();
612 #ifdef DEBUG
613 	if (vnddebug & VDB_IO)
614 		printf("vndiodone(%ld): vbp %p vp %p blkno 0x%" PRIx64
615 		       " addr %p cnt 0x%lx\n",
616 		    (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
617 		    vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
618 		    vbp->vb_buf.b_bcount);
619 #endif
620 
621 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
622 	pbp->b_resid -= resid;
623 	disk_unbusy(&vnd->sc_dkdev, resid, (pbp->b_flags & B_READ));
624 	vnx->vx_pending--;
625 
626 	if (vbp->vb_buf.b_error) {
627 #ifdef DEBUG
628 		if (vnddebug & VDB_IO)
629 			printf("vndiodone: vbp %p error %d\n", vbp,
630 			    vbp->vb_buf.b_error);
631 #endif
632 		vnx->vx_error = vbp->vb_buf.b_error;
633 	}
634 
635 	if (vbp->vb_buf.b_vp != NULLVP)
636 		brelvp(&vbp->vb_buf);
637 
638 	VND_PUTBUF(vnd, vbp);
639 
640 	/*
641 	 * Wrap up this transaction if it has run to completion or, in
642 	 * case of an error, when all auxiliary buffers have returned.
643 	 */
644 	if (vnx->vx_error != 0) {
645 		pbp->b_flags |= B_ERROR;
646 		pbp->b_error = vnx->vx_error;
647 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
648 
649 #ifdef DEBUG
650 			if (vnddebug & VDB_IO)
651 				printf("vndiodone: pbp %p iodone: error %d\n",
652 					pbp, vnx->vx_error);
653 #endif
654 			VND_PUTXFER(vnd, vnx);
655 			biodone(pbp);
656 		}
657 	} else if (pbp->b_resid == 0) {
658 
659 #ifdef DIAGNOSTIC
660 		if (vnx->vx_pending != 0)
661 			panic("vndiodone: vnx pending: %d", vnx->vx_pending);
662 #endif
663 
664 		if ((vnx->vx_flags & VX_BUSY) == 0) {
665 #ifdef DEBUG
666 			if (vnddebug & VDB_IO)
667 				printf("vndiodone: pbp %p iodone\n", pbp);
668 #endif
669 			VND_PUTXFER(vnd, vnx);
670 			biodone(pbp);
671 		}
672 	}
673 
674 	vnd->sc_active--;
675 	vndstart(vnd);
676 	splx(s);
677 }
678 
679 /* ARGSUSED */
680 int
681 vndread(dev, uio, flags)
682 	dev_t dev;
683 	struct uio *uio;
684 	int flags;
685 {
686 	int unit = vndunit(dev);
687 	struct vnd_softc *sc;
688 
689 #ifdef DEBUG
690 	if (vnddebug & VDB_FOLLOW)
691 		printf("vndread(0x%x, %p)\n", dev, uio);
692 #endif
693 
694 	if (unit >= numvnd)
695 		return (ENXIO);
696 	sc = &vnd_softc[unit];
697 
698 	if ((sc->sc_flags & VNF_INITED) == 0)
699 		return (ENXIO);
700 
701 	return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
702 }
703 
704 /* ARGSUSED */
705 int
706 vndwrite(dev, uio, flags)
707 	dev_t dev;
708 	struct uio *uio;
709 	int flags;
710 {
711 	int unit = vndunit(dev);
712 	struct vnd_softc *sc;
713 
714 #ifdef DEBUG
715 	if (vnddebug & VDB_FOLLOW)
716 		printf("vndwrite(0x%x, %p)\n", dev, uio);
717 #endif
718 
719 	if (unit >= numvnd)
720 		return (ENXIO);
721 	sc = &vnd_softc[unit];
722 
723 	if ((sc->sc_flags & VNF_INITED) == 0)
724 		return (ENXIO);
725 
726 	return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
727 }
728 
729 /* ARGSUSED */
730 int
731 vndioctl(dev, cmd, data, flag, p)
732 	dev_t dev;
733 	u_long cmd;
734 	caddr_t data;
735 	int flag;
736 	struct proc *p;
737 {
738 	int unit = vndunit(dev);
739 	struct vnd_softc *vnd;
740 	struct vnd_ioctl *vio;
741 	struct vattr vattr;
742 	struct nameidata nd;
743 	int error, part, pmask;
744 	size_t geomsize;
745 	int fflags;
746 #ifdef __HAVE_OLD_DISKLABEL
747 	struct disklabel newlabel;
748 #endif
749 
750 #ifdef DEBUG
751 	if (vnddebug & VDB_FOLLOW)
752 		printf("vndioctl(0x%x, 0x%lx, %p, 0x%x, %p): unit %d\n",
753 		    dev, cmd, data, flag, p, unit);
754 #endif
755 	if (unit >= numvnd)
756 		return (ENXIO);
757 
758 	vnd = &vnd_softc[unit];
759 	vio = (struct vnd_ioctl *)data;
760 
761 	/* Must be open for writes for these commands... */
762 	switch (cmd) {
763 	case VNDIOCSET:
764 	case VNDIOCCLR:
765 	case DIOCSDINFO:
766 	case DIOCWDINFO:
767 #ifdef __HAVE_OLD_DISKLABEL
768 	case ODIOCSDINFO:
769 	case ODIOCWDINFO:
770 #endif
771 	case DIOCWLABEL:
772 		if ((flag & FWRITE) == 0)
773 			return (EBADF);
774 	}
775 
776 	/* Must be initialized for these... */
777 	switch (cmd) {
778 	case VNDIOCCLR:
779 	case DIOCGDINFO:
780 	case DIOCSDINFO:
781 	case DIOCWDINFO:
782 	case DIOCGPART:
783 	case DIOCWLABEL:
784 	case DIOCGDEFLABEL:
785 #ifdef __HAVE_OLD_DISKLABEL
786 	case ODIOCGDINFO:
787 	case ODIOCSDINFO:
788 	case ODIOCWDINFO:
789 	case ODIOCGDEFLABEL:
790 #endif
791 		if ((vnd->sc_flags & VNF_INITED) == 0)
792 			return (ENXIO);
793 	}
794 
795 	switch (cmd) {
796 	case VNDIOCSET:
797 		if (vnd->sc_flags & VNF_INITED)
798 			return (EBUSY);
799 
800 		if ((error = vndlock(vnd)) != 0)
801 			return (error);
802 
803 		fflags = FREAD;
804 		if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
805 			fflags |= FWRITE;
806 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
807 		if ((error = vn_open(&nd, fflags, 0)) != 0)
808 			goto unlock_and_exit;
809 		error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
810 		VOP_UNLOCK(nd.ni_vp, 0);
811 		if (!error && nd.ni_vp->v_type != VREG)
812 			error = EOPNOTSUPP;
813 		if (error)
814 			goto close_and_exit;
815 		vnd->sc_vp = nd.ni_vp;
816 		vnd->sc_size = btodb(vattr.va_size);	/* note truncation */
817 
818 		/*
819 		 * Use pseudo-geometry specified.  If none was provided,
820 		 * use "standard" Adaptec fictitious geometry.
821 		 */
822 		if (vio->vnd_flags & VNDIOF_HASGEOM) {
823 
824 			memcpy(&vnd->sc_geom, &vio->vnd_geom,
825 			    sizeof(vio->vnd_geom));
826 
827 			/*
828 			 * Sanity-check the sector size.
829 			 * XXX Don't allow secsize < DEV_BSIZE.  Should
830 			 * XXX we?
831 			 */
832 			if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
833 			    (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0) {
834 				error = EINVAL;
835 				goto close_and_exit;
836 			}
837 
838 			/*
839 			 * Compute the size (in DEV_BSIZE blocks) specified
840 			 * by the geometry.
841 			 */
842 			geomsize = (vnd->sc_geom.vng_nsectors *
843 			    vnd->sc_geom.vng_ntracks *
844 			    vnd->sc_geom.vng_ncylinders) *
845 			    (vnd->sc_geom.vng_secsize / DEV_BSIZE);
846 
847 			/*
848 			 * Sanity-check the size against the specified
849 			 * geometry.
850 			 */
851 			if (vnd->sc_size < geomsize) {
852 				error = EINVAL;
853 				goto close_and_exit;
854 			}
855 		} else {
856 			/*
857 			 * Size must be at least 2048 DEV_BSIZE blocks
858 			 * (1M) in order to use this geometry.
859 			 */
860 			if (vnd->sc_size < (32 * 64)) {
861 				error = EINVAL;
862 				goto close_and_exit;
863 			}
864 
865 			vnd->sc_geom.vng_secsize = DEV_BSIZE;
866 			vnd->sc_geom.vng_nsectors = 32;
867 			vnd->sc_geom.vng_ntracks = 64;
868 			vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
869 		}
870 
871 		if (vio->vnd_flags & VNDIOF_READONLY) {
872 			vnd->sc_flags |= VNF_READONLY;
873 		}
874 
875 		if ((error = vndsetcred(vnd, p->p_ucred)) != 0)
876 			goto close_and_exit;
877 		vndthrottle(vnd, vnd->sc_vp);
878 		vio->vnd_size = dbtob(vnd->sc_size);
879 		vnd->sc_flags |= VNF_INITED;
880 #ifdef DEBUG
881 		if (vnddebug & VDB_INIT)
882 			printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
883 			    vnd->sc_vp, (unsigned long) vnd->sc_size,
884 			    vnd->sc_geom.vng_secsize,
885 			    vnd->sc_geom.vng_nsectors,
886 			    vnd->sc_geom.vng_ntracks,
887 			    vnd->sc_geom.vng_ncylinders);
888 #endif
889 
890 		/* Attach the disk. */
891 		memset(vnd->sc_xname, 0, sizeof(vnd->sc_xname)); /* XXX */
892 		sprintf(vnd->sc_xname, "vnd%d", unit);		/* XXX */
893 		vnd->sc_dkdev.dk_name = vnd->sc_xname;
894 		disk_attach(&vnd->sc_dkdev);
895 
896 		/* Initialize the xfer and buffer pools. */
897 		pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
898 		    0, 0, "vndxpl", NULL);
899 		pool_init(&vnd->sc_vbpool, sizeof(struct vndbuf), 0,
900 		    0, 0, "vndbpl", NULL);
901 
902 		/* Try and read the disklabel. */
903 		vndgetdisklabel(dev);
904 
905 		vndunlock(vnd);
906 
907 		break;
908 
909 close_and_exit:
910 		(void) vn_close(nd.ni_vp, fflags, p->p_ucred, p);
911 unlock_and_exit:
912 		vndunlock(vnd);
913 		return (error);
914 
915 	case VNDIOCCLR:
916 		if ((error = vndlock(vnd)) != 0)
917 			return (error);
918 
919 		/*
920 		 * Don't unconfigure if any other partitions are open
921 		 * or if both the character and block flavors of this
922 		 * partition are open.
923 		 */
924 		part = DISKPART(dev);
925 		pmask = (1 << part);
926 		if (((vnd->sc_dkdev.dk_openmask & ~pmask) ||
927 		    ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
928 		    (vnd->sc_dkdev.dk_copenmask & pmask))) &&
929 			!(vio->vnd_flags & VNDIOF_FORCE)) {
930 			vndunlock(vnd);
931 			return (EBUSY);
932 		}
933 
934 		/*
935 		 * XXX vndclear() might call vndclose() implicitely;
936 		 * release lock to avoid recursion
937 		 */
938 		vndunlock(vnd);
939 		vndclear(vnd, minor(dev));
940 #ifdef DEBUG
941 		if (vnddebug & VDB_INIT)
942 			printf("vndioctl: CLRed\n");
943 #endif
944 
945 		/* Destroy the xfer and buffer pools. */
946 		pool_destroy(&vnd->sc_vxpool);
947 		pool_destroy(&vnd->sc_vbpool);
948 
949 		/* Detatch the disk. */
950 		disk_detach(&vnd->sc_dkdev);
951 
952 		break;
953 
954 	case VNDIOCGET: {
955 		struct vnd_user *vnu;
956 		struct vattr va;
957 
958 		vnu = (struct vnd_user *)data;
959 
960 		if (vnu->vnu_unit == -1)
961 			vnu->vnu_unit = unit;
962 		if (vnu->vnu_unit >= numvnd)
963 			return (ENXIO);
964 		if (vnu->vnu_unit < 0)
965 			return (EINVAL);
966 
967 		vnd = &vnd_softc[vnu->vnu_unit];
968 
969 		if (vnd->sc_flags & VNF_INITED) {
970 			error = VOP_GETATTR(vnd->sc_vp, &va, p->p_ucred, p);
971 			if (error)
972 				return (error);
973 			vnu->vnu_dev = va.va_fsid;
974 			vnu->vnu_ino = va.va_fileid;
975 		}
976 		else {
977 			/* unused is not an error */
978 			vnu->vnu_dev = 0;
979 			vnu->vnu_ino = 0;
980 		}
981 
982 		break;
983 	}
984 
985 	case DIOCGDINFO:
986 		*(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
987 		break;
988 
989 #ifdef __HAVE_OLD_DISKLABEL
990 	case ODIOCGDINFO:
991 		newlabel = *(vnd->sc_dkdev.dk_label);
992 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
993 			return ENOTTY;
994 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
995 		break;
996 #endif
997 
998 	case DIOCGPART:
999 		((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1000 		((struct partinfo *)data)->part =
1001 		    &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1002 		break;
1003 
1004 	case DIOCWDINFO:
1005 	case DIOCSDINFO:
1006 #ifdef __HAVE_OLD_DISKLABEL
1007 	case ODIOCWDINFO:
1008 	case ODIOCSDINFO:
1009 #endif
1010 	{
1011 		struct disklabel *lp;
1012 
1013 		if ((error = vndlock(vnd)) != 0)
1014 			return (error);
1015 
1016 		vnd->sc_flags |= VNF_LABELLING;
1017 
1018 #ifdef __HAVE_OLD_DISKLABEL
1019 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1020 			memset(&newlabel, 0, sizeof newlabel);
1021 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
1022 			lp = &newlabel;
1023 		} else
1024 #endif
1025 		lp = (struct disklabel *)data;
1026 
1027 		error = setdisklabel(vnd->sc_dkdev.dk_label,
1028 		    lp, 0, vnd->sc_dkdev.dk_cpulabel);
1029 		if (error == 0) {
1030 			if (cmd == DIOCWDINFO
1031 #ifdef __HAVE_OLD_DISKLABEL
1032 			    || cmd == ODIOCWDINFO
1033 #endif
1034 			   )
1035 				error = writedisklabel(VNDLABELDEV(dev),
1036 				    vndstrategy, vnd->sc_dkdev.dk_label,
1037 				    vnd->sc_dkdev.dk_cpulabel);
1038 		}
1039 
1040 		vnd->sc_flags &= ~VNF_LABELLING;
1041 
1042 		vndunlock(vnd);
1043 
1044 		if (error)
1045 			return (error);
1046 		break;
1047 	}
1048 
1049 	case DIOCWLABEL:
1050 		if (*(int *)data != 0)
1051 			vnd->sc_flags |= VNF_WLABEL;
1052 		else
1053 			vnd->sc_flags &= ~VNF_WLABEL;
1054 		break;
1055 
1056 	case DIOCGDEFLABEL:
1057 		vndgetdefaultlabel(vnd, (struct disklabel *)data);
1058 		break;
1059 
1060 #ifdef __HAVE_OLD_DISKLABEL
1061 	case ODIOCGDEFLABEL:
1062 		vndgetdefaultlabel(vnd, &newlabel);
1063 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1064 			return ENOTTY;
1065 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1066 		break;
1067 #endif
1068 
1069 	default:
1070 		return (ENOTTY);
1071 	}
1072 
1073 	return (0);
1074 }
1075 
1076 /*
1077  * Duplicate the current processes' credentials.  Since we are called only
1078  * as the result of a SET ioctl and only root can do that, any future access
1079  * to this "disk" is essentially as root.  Note that credentials may change
1080  * if some other uid can write directly to the mapped file (NFS).
1081  */
1082 int
1083 vndsetcred(vnd, cred)
1084 	struct vnd_softc *vnd;
1085 	struct ucred *cred;
1086 {
1087 	struct uio auio;
1088 	struct iovec aiov;
1089 	char *tmpbuf;
1090 	int error;
1091 
1092 	vnd->sc_cred = crdup(cred);
1093 	tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1094 
1095 	/* XXX: Horrible kludge to establish credentials for NFS */
1096 	aiov.iov_base = tmpbuf;
1097 	aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1098 	auio.uio_iov = &aiov;
1099 	auio.uio_iovcnt = 1;
1100 	auio.uio_offset = 0;
1101 	auio.uio_rw = UIO_READ;
1102 	auio.uio_segflg = UIO_SYSSPACE;
1103 	auio.uio_resid = aiov.iov_len;
1104 	vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1105 	error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1106 	if (error == 0) {
1107 		/*
1108 		 * Because vnd does all IO directly through the vnode
1109 		 * we need to flush (at least) the buffer from the above
1110 		 * VOP_READ from the buffer cache to prevent cache
1111 		 * incoherencies.  Also, be careful to write dirty
1112 		 * buffers back to stable storage.
1113 		 */
1114 		error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1115 			    curproc, 0, 0);
1116 	}
1117 	VOP_UNLOCK(vnd->sc_vp, 0);
1118 
1119 	free(tmpbuf, M_TEMP);
1120 	return (error);
1121 }
1122 
1123 /*
1124  * Set maxactive based on FS type
1125  */
1126 void
1127 vndthrottle(vnd, vp)
1128 	struct vnd_softc *vnd;
1129 	struct vnode *vp;
1130 {
1131 #ifdef NFS
1132 	extern int (**nfsv2_vnodeop_p) __P((void *));
1133 
1134 	if (vp->v_op == nfsv2_vnodeop_p)
1135 		vnd->sc_maxactive = 2;
1136 	else
1137 #endif
1138 		vnd->sc_maxactive = 8;
1139 
1140 	if (vnd->sc_maxactive < 1)
1141 		vnd->sc_maxactive = 1;
1142 }
1143 
1144 #if 0
1145 void
1146 vndshutdown()
1147 {
1148 	struct vnd_softc *vnd;
1149 
1150 	for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1151 		if (vnd->sc_flags & VNF_INITED)
1152 			vndclear(vnd);
1153 }
1154 #endif
1155 
1156 void
1157 vndclear(vnd, myminor)
1158 	struct vnd_softc *vnd;
1159 	int myminor;
1160 {
1161 	struct vnode *vp = vnd->sc_vp;
1162 	struct proc *p = curproc;		/* XXX */
1163 	int fflags = FREAD;
1164 	int bmaj, cmaj, i, mn;
1165 
1166 #ifdef DEBUG
1167 	if (vnddebug & VDB_FOLLOW)
1168 		printf("vndclear(%p): vp %p\n", vnd, vp);
1169 #endif
1170 	/* locate the major number */
1171 	bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1172 	cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1173 
1174 	/* Nuke the vnodes for any open instances */
1175 	for (i = 0; i < MAXPARTITIONS; i++) {
1176 		mn = DISKMINOR(vnd->sc_unit, i);
1177 		vdevgone(bmaj, mn, mn, VBLK);
1178 		if (mn != myminor) /* XXX avoid to kill own vnode */
1179 			vdevgone(cmaj, mn, mn, VCHR);
1180 	}
1181 
1182 	if ((vnd->sc_flags & VNF_READONLY) == 0)
1183 		fflags |= FWRITE;
1184 	vnd->sc_flags &= ~(VNF_INITED | VNF_READONLY);
1185 	if (vp == (struct vnode *)0)
1186 		panic("vndioctl: null vp");
1187 	(void) vn_close(vp, fflags, vnd->sc_cred, p);
1188 	crfree(vnd->sc_cred);
1189 	vnd->sc_vp = (struct vnode *)0;
1190 	vnd->sc_cred = (struct ucred *)0;
1191 	vnd->sc_size = 0;
1192 }
1193 
1194 int
1195 vndsize(dev)
1196 	dev_t dev;
1197 {
1198 	struct vnd_softc *sc;
1199 	struct disklabel *lp;
1200 	int part, unit, omask;
1201 	int size;
1202 
1203 	unit = vndunit(dev);
1204 	if (unit >= numvnd)
1205 		return (-1);
1206 	sc = &vnd_softc[unit];
1207 
1208 	if ((sc->sc_flags & VNF_INITED) == 0)
1209 		return (-1);
1210 
1211 	part = DISKPART(dev);
1212 	omask = sc->sc_dkdev.dk_openmask & (1 << part);
1213 	lp = sc->sc_dkdev.dk_label;
1214 
1215 	if (omask == 0 && vndopen(dev, 0, S_IFBLK, curproc))
1216 		return (-1);
1217 
1218 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
1219 		size = -1;
1220 	else
1221 		size = lp->d_partitions[part].p_size *
1222 		    (lp->d_secsize / DEV_BSIZE);
1223 
1224 	if (omask == 0 && vndclose(dev, 0, S_IFBLK, curproc))
1225 		return (-1);
1226 
1227 	return (size);
1228 }
1229 
1230 int
1231 vnddump(dev, blkno, va, size)
1232 	dev_t dev;
1233 	daddr_t blkno;
1234 	caddr_t va;
1235 	size_t size;
1236 {
1237 
1238 	/* Not implemented. */
1239 	return ENXIO;
1240 }
1241 
1242 void
1243 vndgetdefaultlabel(sc, lp)
1244 	struct vnd_softc *sc;
1245 	struct disklabel *lp;
1246 {
1247 	struct vndgeom *vng = &sc->sc_geom;
1248 	struct partition *pp;
1249 
1250 	memset(lp, 0, sizeof(*lp));
1251 
1252 	lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1253 	lp->d_secsize = vng->vng_secsize;
1254 	lp->d_nsectors = vng->vng_nsectors;
1255 	lp->d_ntracks = vng->vng_ntracks;
1256 	lp->d_ncylinders = vng->vng_ncylinders;
1257 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1258 
1259 	strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1260 	lp->d_type = DTYPE_VND;
1261 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1262 	lp->d_rpm = 3600;
1263 	lp->d_interleave = 1;
1264 	lp->d_flags = 0;
1265 
1266 	pp = &lp->d_partitions[RAW_PART];
1267 	pp->p_offset = 0;
1268 	pp->p_size = lp->d_secperunit;
1269 	pp->p_fstype = FS_UNUSED;
1270 	lp->d_npartitions = RAW_PART + 1;
1271 
1272 	lp->d_magic = DISKMAGIC;
1273 	lp->d_magic2 = DISKMAGIC;
1274 	lp->d_checksum = dkcksum(lp);
1275 }
1276 
1277 /*
1278  * Read the disklabel from a vnd.  If one is not present, create a fake one.
1279  */
1280 void
1281 vndgetdisklabel(dev)
1282 	dev_t dev;
1283 {
1284 	struct vnd_softc *sc = &vnd_softc[vndunit(dev)];
1285 	const char *errstring;
1286 	struct disklabel *lp = sc->sc_dkdev.dk_label;
1287 	struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1288 	int i;
1289 
1290 	memset(clp, 0, sizeof(*clp));
1291 
1292 	vndgetdefaultlabel(sc, lp);
1293 
1294 	/*
1295 	 * Call the generic disklabel extraction routine.
1296 	 */
1297 	errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1298 	if (errstring) {
1299 		/*
1300 		 * Lack of disklabel is common, but we print the warning
1301 		 * anyway, since it might contain other useful information.
1302 		 */
1303 		printf("%s: %s\n", sc->sc_xname, errstring);
1304 
1305 		/*
1306 		 * For historical reasons, if there's no disklabel
1307 		 * present, all partitions must be FS_BSDFFS and
1308 		 * occupy the entire disk.
1309 		 */
1310 		for (i = 0; i < MAXPARTITIONS; i++) {
1311 			/*
1312 			 * Don't wipe out port specific hack (such as
1313 			 * dos partition hack of i386 port).
1314 			 */
1315 			if (lp->d_partitions[i].p_size != 0)
1316 				continue;
1317 
1318 			lp->d_partitions[i].p_size = lp->d_secperunit;
1319 			lp->d_partitions[i].p_offset = 0;
1320 			lp->d_partitions[i].p_fstype = FS_BSDFFS;
1321 		}
1322 
1323 		strncpy(lp->d_packname, "default label",
1324 		    sizeof(lp->d_packname));
1325 
1326 		lp->d_npartitions = MAXPARTITIONS;
1327 		lp->d_checksum = dkcksum(lp);
1328 	}
1329 }
1330 
1331 /*
1332  * Wait interruptibly for an exclusive lock.
1333  *
1334  * XXX
1335  * Several drivers do this; it should be abstracted and made MP-safe.
1336  */
1337 static int
1338 vndlock(sc)
1339 	struct vnd_softc *sc;
1340 {
1341 	int error;
1342 
1343 	while ((sc->sc_flags & VNF_LOCKED) != 0) {
1344 		sc->sc_flags |= VNF_WANTED;
1345 		if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1346 			return (error);
1347 	}
1348 	sc->sc_flags |= VNF_LOCKED;
1349 	return (0);
1350 }
1351 
1352 /*
1353  * Unlock and wake up any waiters.
1354  */
1355 static void
1356 vndunlock(sc)
1357 	struct vnd_softc *sc;
1358 {
1359 
1360 	sc->sc_flags &= ~VNF_LOCKED;
1361 	if ((sc->sc_flags & VNF_WANTED) != 0) {
1362 		sc->sc_flags &= ~VNF_WANTED;
1363 		wakeup(sc);
1364 	}
1365 }
1366