xref: /netbsd-src/sys/dev/vnd.c (revision 81b108b45f75f89f1e3ffad9fb6f074e771c0935)
1 /*	$NetBSD: vnd.c,v 1.27 1996/07/10 18:15:22 cgd Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: vn.c 1.13 94/04/02$
41  *
42  *	@(#)vn.c	8.6 (Berkeley) 4/1/94
43  */
44 
45 /*
46  * Vnode disk driver.
47  *
48  * Block/character interface to a vnode.  Allows one to treat a file
49  * as a disk (e.g. build a filesystem in it, mount it, etc.).
50  *
51  * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52  * instead of a simple VOP_RDWR.  We do this to avoid distorting the
53  * local buffer cache.
54  *
55  * NOTE 2: There is a security issue involved with this driver.
56  * Once mounted all access to the contents of the "mapped" file via
57  * the special file is controlled by the permissions on the special
58  * file, the protection of the mapped file is ignored (effectively,
59  * by using root credentials in all transactions).
60  *
61  * NOTE 3: Doesn't interact with leases, should it?
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/buf.h>
70 #include <sys/malloc.h>
71 #include <sys/ioctl.h>
72 #include <sys/disklabel.h>
73 #include <sys/device.h>
74 #include <sys/disk.h>
75 #include <sys/stat.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/file.h>
79 #include <sys/uio.h>
80 #include <sys/conf.h>
81 
82 #include <miscfs/specfs/specdev.h>
83 
84 #include <dev/vndioctl.h>
85 
86 #ifdef DEBUG
87 int dovndcluster = 1;
88 int vnddebug = 0x00;
89 #define VDB_FOLLOW	0x01
90 #define VDB_INIT	0x02
91 #define VDB_IO		0x04
92 #endif
93 
94 #define b_cylin	b_resid
95 
96 #define	vndunit(x)	DISKUNIT(x)
97 
98 struct vndbuf {
99 	struct buf	vb_buf;
100 	struct buf	*vb_obp;
101 };
102 
103 #define	getvndbuf()	\
104 	((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
105 #define putvndbuf(vbp)	\
106 	free((caddr_t)(vbp), M_DEVBUF)
107 
108 struct vnd_softc {
109 	int		 sc_flags;	/* flags */
110 	size_t		 sc_size;	/* size of vnd */
111 	struct vnode	*sc_vp;		/* vnode */
112 	struct ucred	*sc_cred;	/* credentials */
113 	int		 sc_maxactive;	/* max # of active requests */
114 	struct buf	 sc_tab;	/* transfer queue */
115 	char		 sc_xname[8];	/* XXX external name */
116 	struct disk	 sc_dkdev;	/* generic disk device info */
117 };
118 
119 /* sc_flags */
120 #define	VNF_ALIVE	0x01
121 #define VNF_INITED	0x02
122 #define VNF_WANTED	0x40
123 #define VNF_LOCKED	0x80
124 
125 struct vnd_softc *vnd_softc;
126 int numvnd = 0;
127 
128 /* called by main() at boot time */
129 void	vndattach __P((int));
130 
131 void	vndclear __P((struct vnd_softc *));
132 void	vndstart __P((struct vnd_softc *));
133 int	vndsetcred __P((struct vnd_softc *, struct ucred *));
134 void	vndthrottle __P((struct vnd_softc *, struct vnode *));
135 void	vndiodone __P((struct buf *));
136 void	vndshutdown __P((void));
137 
138 static	int vndlock __P((struct vnd_softc *));
139 static	void vndunlock __P((struct vnd_softc *));
140 
141 void
142 vndattach(num)
143 	int num;
144 {
145 	char *mem;
146 	register u_long size;
147 
148 	if (num <= 0)
149 		return;
150 	size = num * sizeof(struct vnd_softc);
151 	mem = malloc(size, M_DEVBUF, M_NOWAIT);
152 	if (mem == NULL) {
153 		printf("WARNING: no memory for vnode disks\n");
154 		return;
155 	}
156 	bzero(mem, size);
157 	vnd_softc = (struct vnd_softc *)mem;
158 	numvnd = num;
159 }
160 
161 int
162 vndopen(dev, flags, mode, p)
163 	dev_t dev;
164 	int flags, mode;
165 	struct proc *p;
166 {
167 	int unit = vndunit(dev);
168 	struct vnd_softc *sc;
169 	int error = 0, part, pmask;
170 
171 	/*
172 	 * XXX Should support disklabels.
173 	 */
174 
175 #ifdef DEBUG
176 	if (vnddebug & VDB_FOLLOW)
177 		printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
178 #endif
179 	if (unit >= numvnd)
180 		return (ENXIO);
181 	sc = &vnd_softc[unit];
182 
183 	if ((error = vndlock(sc)) != 0)
184 		return (error);
185 
186 	part = DISKPART(dev);
187 	pmask = (1 << part);
188 
189 	/* Prevent our unit from being unconfigured while open. */
190 	switch (mode) {
191 	case S_IFCHR:
192 		sc->sc_dkdev.dk_copenmask |= pmask;
193 		break;
194 
195 	case S_IFBLK:
196 		sc->sc_dkdev.dk_bopenmask |= pmask;
197 		break;
198 	}
199 	sc->sc_dkdev.dk_openmask =
200 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
201 
202 	vndunlock(sc);
203 	return (0);
204 }
205 
206 int
207 vndclose(dev, flags, mode, p)
208 	dev_t dev;
209 	int flags, mode;
210 	struct proc *p;
211 {
212 	int unit = vndunit(dev);
213 	struct vnd_softc *sc;
214 	int error = 0, part;
215 
216 #ifdef DEBUG
217 	if (vnddebug & VDB_FOLLOW)
218 		printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
219 #endif
220 
221 	if (unit >= numvnd)
222 		return (ENXIO);
223 	sc = &vnd_softc[unit];
224 
225 	if ((error = vndlock(sc)) != 0)
226 		return (error);
227 
228 	part = DISKPART(dev);
229 
230 	/* ...that much closer to allowing unconfiguration... */
231 	switch (mode) {
232 	case S_IFCHR:
233 		sc->sc_dkdev.dk_copenmask &= ~(1 << part);
234 		break;
235 
236 	case S_IFBLK:
237 		sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
238 		break;
239 	}
240 	sc->sc_dkdev.dk_openmask =
241 	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
242 
243 	vndunlock(sc);
244 	return (0);
245 }
246 
247 /*
248  * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
249  * Note that this driver can only be used for swapping over NFS on the hp
250  * since nfs_strategy on the vax cannot handle u-areas and page tables.
251  */
252 void
253 vndstrategy(bp)
254 	register struct buf *bp;
255 {
256 	int unit = vndunit(bp->b_dev);
257 	register struct vnd_softc *vnd = &vnd_softc[unit];
258 	register struct vndbuf *nbp;
259 	register int bn, bsize, resid;
260 	register caddr_t addr;
261 	int sz, flags, error;
262 
263 #ifdef DEBUG
264 	if (vnddebug & VDB_FOLLOW)
265 		printf("vndstrategy(%p): unit %d\n", bp, unit);
266 #endif
267 	if ((vnd->sc_flags & VNF_INITED) == 0) {
268 		bp->b_error = ENXIO;
269 		bp->b_flags |= B_ERROR;
270 		biodone(bp);
271 		return;
272 	}
273 	bn = bp->b_blkno;
274 	sz = howmany(bp->b_bcount, DEV_BSIZE);
275 	bp->b_resid = bp->b_bcount;
276 	if (bn < 0 || bn + sz > vnd->sc_size) {
277 		if (bn != vnd->sc_size) {
278 			bp->b_error = EINVAL;
279 			bp->b_flags |= B_ERROR;
280 		}
281 		biodone(bp);
282 		return;
283 	}
284 	bn = dbtob(bn);
285  	bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
286 	addr = bp->b_data;
287 	flags = bp->b_flags | B_CALL;
288 	for (resid = bp->b_resid; resid; resid -= sz) {
289 		struct vnode *vp;
290 		daddr_t nbn;
291 		int off, s, nra;
292 
293 		nra = 0;
294 		VOP_LOCK(vnd->sc_vp);
295 		error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
296 		VOP_UNLOCK(vnd->sc_vp);
297 		if (error == 0 && (long)nbn == -1)
298 			error = EIO;
299 #ifdef DEBUG
300 		if (!dovndcluster)
301 			nra = 0;
302 #endif
303 
304 		if ((off = bn % bsize) != 0)
305 			sz = bsize - off;
306 		else
307 			sz = (1 + nra) * bsize;
308 		if (resid < sz)
309 			sz = resid;
310 #ifdef DEBUG
311 		if (vnddebug & VDB_IO)
312 			printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
313 			       vnd->sc_vp, vp, bn, nbn, sz);
314 #endif
315 
316 		nbp = getvndbuf();
317 		nbp->vb_buf.b_flags = flags;
318 		nbp->vb_buf.b_bcount = sz;
319 		nbp->vb_buf.b_bufsize = bp->b_bufsize;
320 		nbp->vb_buf.b_error = 0;
321 		if (vp->v_type == VBLK || vp->v_type == VCHR)
322 			nbp->vb_buf.b_dev = vp->v_rdev;
323 		else
324 			nbp->vb_buf.b_dev = NODEV;
325 		nbp->vb_buf.b_data = addr;
326 		nbp->vb_buf.b_blkno = nbn + btodb(off);
327 		nbp->vb_buf.b_proc = bp->b_proc;
328 		nbp->vb_buf.b_iodone = vndiodone;
329 		nbp->vb_buf.b_vp = vp;
330 		nbp->vb_buf.b_rcred = vnd->sc_cred;	/* XXX crdup? */
331 		nbp->vb_buf.b_wcred = vnd->sc_cred;	/* XXX crdup? */
332 		nbp->vb_buf.b_dirtyoff = bp->b_dirtyoff;
333 		nbp->vb_buf.b_dirtyend = bp->b_dirtyend;
334 		nbp->vb_buf.b_validoff = bp->b_validoff;
335 		nbp->vb_buf.b_validend = bp->b_validend;
336 
337 		/* save a reference to the old buffer */
338 		nbp->vb_obp = bp;
339 
340 		/*
341 		 * If there was an error or a hole in the file...punt.
342 		 * Note that we deal with this after the nbp allocation.
343 		 * This ensures that we properly clean up any operations
344 		 * that we have already fired off.
345 		 *
346 		 * XXX we could deal with holes here but it would be
347 		 * a hassle (in the write case).
348 		 */
349 		if (error) {
350 			nbp->vb_buf.b_error = error;
351 			nbp->vb_buf.b_flags |= B_ERROR;
352 			bp->b_resid -= (resid - sz);
353 			biodone(&nbp->vb_buf);
354 			return;
355 		}
356 		/*
357 		 * Just sort by block number
358 		 */
359 		nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
360 		s = splbio();
361 		disksort(&vnd->sc_tab, &nbp->vb_buf);
362 		if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
363 			vnd->sc_tab.b_active++;
364 			vndstart(vnd);
365 		}
366 		splx(s);
367 		bn += sz;
368 		addr += sz;
369 	}
370 }
371 
372 /*
373  * Feed requests sequentially.
374  * We do it this way to keep from flooding NFS servers if we are connected
375  * to an NFS file.  This places the burden on the client rather than the
376  * server.
377  */
378 void
379 vndstart(vnd)
380 	register struct vnd_softc *vnd;
381 {
382 	register struct buf *bp;
383 
384 	/*
385 	 * Dequeue now since lower level strategy routine might
386 	 * queue using same links
387 	 */
388 	bp = vnd->sc_tab.b_actf;
389 	vnd->sc_tab.b_actf = bp->b_actf;
390 #ifdef DEBUG
391 	if (vnddebug & VDB_IO)
392 		printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
393 		    vnd-vnd_softc, bp, bp->b_vp, bp->b_blkno, bp->b_data,
394 		    bp->b_bcount);
395 #endif
396 
397 	/* Instrumentation. */
398 	disk_busy(&vnd->sc_dkdev);
399 
400 	if ((bp->b_flags & B_READ) == 0)
401 		bp->b_vp->v_numoutput++;
402 	VOP_STRATEGY(bp);
403 }
404 
405 void
406 vndiodone(bp)
407 	struct buf *bp;
408 {
409 	register struct vndbuf *vbp = (struct vndbuf *) bp;
410 	register struct buf *pbp = vbp->vb_obp;
411 	register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
412 	int s;
413 
414 	s = splbio();
415 #ifdef DEBUG
416 	if (vnddebug & VDB_IO)
417 		printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
418 		    vnd-vnd_softc, vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno,
419 		    vbp->vb_buf.b_data, vbp->vb_buf.b_bcount);
420 #endif
421 
422 	if (vbp->vb_buf.b_error) {
423 #ifdef DEBUG
424 		if (vnddebug & VDB_IO)
425 			printf("vndiodone: vbp %p error %d\n", vbp,
426 			    vbp->vb_buf.b_error);
427 #endif
428 		pbp->b_flags |= B_ERROR;
429 		pbp->b_error = biowait(&vbp->vb_buf);
430 	}
431 	pbp->b_resid -= vbp->vb_buf.b_bcount;
432 	putvndbuf(vbp);
433 	disk_unbusy(&vnd->sc_dkdev, (pbp->b_bcount - pbp->b_resid));
434 	if (pbp->b_resid == 0) {
435 #ifdef DEBUG
436 		if (vnddebug & VDB_IO)
437 			printf("vndiodone: pbp %p iodone\n", pbp);
438 #endif
439 		biodone(pbp);
440 	}
441 	if (vnd->sc_tab.b_actf)
442 		vndstart(vnd);
443 	else
444 		vnd->sc_tab.b_active--;
445 	splx(s);
446 }
447 
448 /* ARGSUSED */
449 int
450 vndread(dev, uio, flags)
451 	dev_t dev;
452 	struct uio *uio;
453 	int flags;
454 {
455 	int unit = vndunit(dev);
456 	struct vnd_softc *sc;
457 
458 #ifdef DEBUG
459 	if (vnddebug & VDB_FOLLOW)
460 		printf("vndread(%x, %p)\n", dev, uio);
461 #endif
462 
463 	if (unit >= numvnd)
464 		return (ENXIO);
465 	sc = &vnd_softc[unit];
466 
467 	if ((sc->sc_flags & VNF_INITED) == 0)
468 		return (ENXIO);
469 
470 	return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
471 }
472 
473 /* ARGSUSED */
474 int
475 vndwrite(dev, uio, flags)
476 	dev_t dev;
477 	struct uio *uio;
478 	int flags;
479 {
480 	int unit = vndunit(dev);
481 	struct vnd_softc *sc;
482 
483 #ifdef DEBUG
484 	if (vnddebug & VDB_FOLLOW)
485 		printf("vndwrite(%x, %p)\n", dev, uio);
486 #endif
487 
488 	if (unit >= numvnd)
489 		return (ENXIO);
490 	sc = &vnd_softc[unit];
491 
492 	if ((sc->sc_flags & VNF_INITED) == 0)
493 		return (ENXIO);
494 
495 	return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
496 }
497 
498 /* ARGSUSED */
499 int
500 vndioctl(dev, cmd, data, flag, p)
501 	dev_t dev;
502 	u_long cmd;
503 	caddr_t data;
504 	int flag;
505 	struct proc *p;
506 {
507 	int unit = vndunit(dev);
508 	register struct vnd_softc *vnd;
509 	struct vnd_ioctl *vio;
510 	struct vattr vattr;
511 	struct nameidata nd;
512 	int error, part, pmask, s;
513 
514 #ifdef DEBUG
515 	if (vnddebug & VDB_FOLLOW)
516 		printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
517 		    dev, cmd, data, flag, p, unit);
518 #endif
519 	error = suser(p->p_ucred, &p->p_acflag);
520 	if (error)
521 		return (error);
522 	if (unit >= numvnd)
523 		return (ENXIO);
524 
525 	vnd = &vnd_softc[unit];
526 	vio = (struct vnd_ioctl *)data;
527 	switch (cmd) {
528 
529 	case VNDIOCSET:
530 		if (vnd->sc_flags & VNF_INITED)
531 			return (EBUSY);
532 
533 		if ((error = vndlock(vnd)) != 0)
534 			return (error);
535 
536 		/*
537 		 * Always open for read and write.
538 		 * This is probably bogus, but it lets vn_open()
539 		 * weed out directories, sockets, etc. so we don't
540 		 * have to worry about them.
541 		 */
542 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
543 		if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
544 			vndunlock(vnd);
545 			return(error);
546 		}
547 		error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
548 		if (error) {
549 			VOP_UNLOCK(nd.ni_vp);
550 			(void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
551 			vndunlock(vnd);
552 			return(error);
553 		}
554 		VOP_UNLOCK(nd.ni_vp);
555 		vnd->sc_vp = nd.ni_vp;
556 		vnd->sc_size = btodb(vattr.va_size);	/* note truncation */
557 		if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
558 			(void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
559 			vndunlock(vnd);
560 			return(error);
561 		}
562 		vndthrottle(vnd, vnd->sc_vp);
563 		vio->vnd_size = dbtob(vnd->sc_size);
564 		vnd->sc_flags |= VNF_INITED;
565 #ifdef DEBUG
566 		if (vnddebug & VDB_INIT)
567 			printf("vndioctl: SET vp %p size %lx\n",
568 			    vnd->sc_vp, vnd->sc_size);
569 #endif
570 
571 		/* Attach the disk. */
572 		bzero(vnd->sc_xname, sizeof(vnd->sc_xname));	/* XXX */
573 		sprintf(vnd->sc_xname, "vnd%d", unit);		/* XXX */
574 		vnd->sc_dkdev.dk_name = vnd->sc_xname;
575 		disk_attach(&vnd->sc_dkdev);
576 
577 		vndunlock(vnd);
578 
579 		break;
580 
581 	case VNDIOCCLR:
582 		if ((vnd->sc_flags & VNF_INITED) == 0)
583 			return (ENXIO);
584 
585 		if ((error = vndlock(vnd)) != 0)
586 			return (error);
587 
588 		/*
589 		 * Don't unconfigure if any other partitions are open
590 		 * or if both the character and block flavors of this
591 		 * partition are open.
592 		 */
593 		part = DISKPART(dev);
594 		pmask = (1 << part);
595 		if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
596 		    ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
597 		    (vnd->sc_dkdev.dk_copenmask & pmask))) {
598 			vndunlock(vnd);
599 			return (EBUSY);
600 		}
601 
602 		vndclear(vnd);
603 #ifdef DEBUG
604 		if (vnddebug & VDB_INIT)
605 			printf("vndioctl: CLRed\n");
606 #endif
607 
608 		/* Detatch the disk. */
609 		disk_detach(&vnd->sc_dkdev);
610 
611 		/* This must be atomic. */
612 		s = splhigh();
613 		vndunlock(vnd);
614 		bzero(vnd, sizeof(struct vnd_softc));
615 		splx(s);
616 
617 		break;
618 
619 	/*
620 	 * XXX Should support disklabels.
621 	 */
622 
623 	default:
624 		return(ENOTTY);
625 	}
626 
627 	return (0);
628 }
629 
630 /*
631  * Duplicate the current processes' credentials.  Since we are called only
632  * as the result of a SET ioctl and only root can do that, any future access
633  * to this "disk" is essentially as root.  Note that credentials may change
634  * if some other uid can write directly to the mapped file (NFS).
635  */
636 int
637 vndsetcred(vnd, cred)
638 	register struct vnd_softc *vnd;
639 	struct ucred *cred;
640 {
641 	struct uio auio;
642 	struct iovec aiov;
643 	char *tmpbuf;
644 	int error;
645 
646 	vnd->sc_cred = crdup(cred);
647 	tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
648 
649 	/* XXX: Horrible kludge to establish credentials for NFS */
650 	aiov.iov_base = tmpbuf;
651 	aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
652 	auio.uio_iov = &aiov;
653 	auio.uio_iovcnt = 1;
654 	auio.uio_offset = 0;
655 	auio.uio_rw = UIO_READ;
656 	auio.uio_segflg = UIO_SYSSPACE;
657 	auio.uio_resid = aiov.iov_len;
658 	VOP_LOCK(vnd->sc_vp);
659 	error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
660 	VOP_UNLOCK(vnd->sc_vp);
661 
662 	free(tmpbuf, M_TEMP);
663 	return (error);
664 }
665 
666 /*
667  * Set maxactive based on FS type
668  */
669 void
670 vndthrottle(vnd, vp)
671 	register struct vnd_softc *vnd;
672 	struct vnode *vp;
673 {
674 #ifdef NFSCLIENT
675 	extern int (**nfsv2_vnodeop_p) __P((void *));
676 
677 	if (vp->v_op == nfsv2_vnodeop_p)
678 		vnd->sc_maxactive = 2;
679 	else
680 #endif
681 		vnd->sc_maxactive = 8;
682 
683 	if (vnd->sc_maxactive < 1)
684 		vnd->sc_maxactive = 1;
685 }
686 
687 void
688 vndshutdown()
689 {
690 	register struct vnd_softc *vnd;
691 
692 	for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
693 		if (vnd->sc_flags & VNF_INITED)
694 			vndclear(vnd);
695 }
696 
697 void
698 vndclear(vnd)
699 	register struct vnd_softc *vnd;
700 {
701 	register struct vnode *vp = vnd->sc_vp;
702 	struct proc *p = curproc;		/* XXX */
703 
704 #ifdef DEBUG
705 	if (vnddebug & VDB_FOLLOW)
706 		printf("vndclear(%p): vp %p\n", vnd, vp);
707 #endif
708 	vnd->sc_flags &= ~VNF_INITED;
709 	if (vp == (struct vnode *)0)
710 		panic("vndioctl: null vp");
711 	(void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
712 	crfree(vnd->sc_cred);
713 	vnd->sc_vp = (struct vnode *)0;
714 	vnd->sc_cred = (struct ucred *)0;
715 	vnd->sc_size = 0;
716 }
717 
718 int
719 vndsize(dev)
720 	dev_t dev;
721 {
722 	int unit = vndunit(dev);
723 	register struct vnd_softc *vnd = &vnd_softc[unit];
724 
725 	if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
726 		return(-1);
727 	return(vnd->sc_size);
728 }
729 
730 int
731 vnddump(dev, blkno, va, size)
732 	dev_t dev;
733 	daddr_t blkno;
734 	caddr_t va;
735 	size_t size;
736 {
737 
738 	/* Not implemented. */
739 	return ENXIO;
740 }
741 
742 /*
743  * Wait interruptibly for an exclusive lock.
744  *
745  * XXX
746  * Several drivers do this; it should be abstracted and made MP-safe.
747  */
748 static int
749 vndlock(sc)
750 	struct vnd_softc *sc;
751 {
752 	int error;
753 
754 	while ((sc->sc_flags & VNF_LOCKED) != 0) {
755 		sc->sc_flags |= VNF_WANTED;
756 		if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
757 			return (error);
758 	}
759 	sc->sc_flags |= VNF_LOCKED;
760 	return (0);
761 }
762 
763 /*
764  * Unlock and wake up any waiters.
765  */
766 static void
767 vndunlock(sc)
768 	struct vnd_softc *sc;
769 {
770 
771 	sc->sc_flags &= ~VNF_LOCKED;
772 	if ((sc->sc_flags & VNF_WANTED) != 0) {
773 		sc->sc_flags &= ~VNF_WANTED;
774 		wakeup(sc);
775 	}
776 }
777