xref: /csrg-svn/sys/kern/kern_physio.c (revision 65771)
149589Sbostic /*-
263174Sbostic  * Copyright (c) 1982, 1986, 1990, 1993
363174Sbostic  *	The Regents of the University of California.  All rights reserved.
4*65771Sbostic  * (c) UNIX System Laboratories, Inc.
5*65771Sbostic  * All or some portions of this file are derived from material licensed
6*65771Sbostic  * to the University of California by American Telephone and Telegraph
7*65771Sbostic  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8*65771Sbostic  * the permission of UNIX System Laboratories, Inc.
923461Smckusick  *
1049589Sbostic  * %sccs.include.proprietary.c%
1149589Sbostic  *
12*65771Sbostic  *	@(#)kern_physio.c	8.4 (Berkeley) 01/21/94
1323461Smckusick  */
148Sbill 
1551454Sbostic #include <sys/param.h>
1651454Sbostic #include <sys/systm.h>
1751454Sbostic #include <sys/buf.h>
1851454Sbostic #include <sys/conf.h>
1951454Sbostic #include <sys/proc.h>
2051454Sbostic #include <sys/vnode.h>
218Sbill 
2251454Sbostic static void freeswbuf __P((struct buf *));
2351454Sbostic static struct buf *getswbuf __P((int));
2448414Skarels 
2591Sbill /*
2649672Smckusick  * This routine does device I/O for a user process.
2749672Smckusick  *
2851759Storek  * If the user has the proper access privileges, the process is
298Sbill  * marked 'delayed unlock' and the pages involved in the I/O are
3049672Smckusick  * faulted and locked. After the completion of the I/O, the pages
318Sbill  * are unlocked.
328Sbill  */
337724Swnj physio(strat, bp, dev, rw, mincnt, uio)
347724Swnj 	int (*strat)();
357724Swnj 	register struct buf *bp;
367724Swnj 	dev_t dev;
377724Swnj 	int rw;
3834215Sbostic 	u_int (*mincnt)();
397724Swnj 	struct uio *uio;
408Sbill {
4117313Skarels 	register struct iovec *iov;
4252424Storek 	register int requested = 0, done = 0;
4347540Skarels 	register struct proc *p = curproc;
448Sbill 	char *a;
4534215Sbostic 	int s, allocbuf = 0, error = 0;
468Sbill 
4734215Sbostic 	if (bp == NULL) {
4834215Sbostic 		allocbuf = 1;
4934215Sbostic 		bp = getswbuf(PRIBIO+1);
5034215Sbostic 	}
5134215Sbostic 	for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) {
5230750Skarels 		iov = uio->uio_iov;
5352424Storek 		if (iov->iov_len == 0)
5452424Storek 			continue;
5534215Sbostic 		if (!useracc(iov->iov_base, (u_int)iov->iov_len,
5634215Sbostic 		    rw == B_READ ? B_WRITE : B_READ)) {
5734215Sbostic 			error = EFAULT;
5834215Sbostic 			break;
5930750Skarels 		}
6034215Sbostic 		if (!allocbuf) {	/* only if sharing caller's buffer */
6134215Sbostic 			s = splbio();
6234215Sbostic 			while (bp->b_flags&B_BUSY) {
6334215Sbostic 				bp->b_flags |= B_WANTED;
6434215Sbostic 				sleep((caddr_t)bp, PRIBIO+1);
6534215Sbostic 			}
6634215Sbostic 			splx(s);
6734215Sbostic 		}
6830750Skarels 		bp->b_error = 0;
6947540Skarels 		bp->b_proc = p;
7042001Smckusick #ifdef HPUXCOMPAT
7142001Smckusick 		if (ISHPMMADDR(iov->iov_base))
7264535Sbostic 			bp->b_data = (caddr_t)HPMMBASEADDR(iov->iov_base);
7342001Smckusick 		else
7442001Smckusick #endif
7564535Sbostic 		bp->b_data = iov->iov_base;
7630750Skarels 		while (iov->iov_len > 0) {
7734215Sbostic 			bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw;
7830750Skarels 			bp->b_dev = dev;
7930750Skarels 			bp->b_blkno = btodb(uio->uio_offset);
8030750Skarels 			bp->b_bcount = iov->iov_len;
8130750Skarels 			(*mincnt)(bp);
8238794Skarels 			requested = bp->b_bcount;
8364581Sbostic 			p->p_flag |= P_PHYSIO;
8464535Sbostic 			vslock(a = bp->b_data, requested);
8542001Smckusick 			vmapbuf(bp);
8634215Sbostic 			(*strat)(bp);
8734215Sbostic 			s = splbio();
8834215Sbostic 			while ((bp->b_flags & B_DONE) == 0)
8934215Sbostic 				sleep((caddr_t)bp, PRIBIO);
9042001Smckusick 			vunmapbuf(bp);
9138794Skarels 			vsunlock(a, requested, rw);
9264581Sbostic 			p->p_flag &= ~P_PHYSIO;
9334215Sbostic 			if (bp->b_flags&B_WANTED)	/* rare */
9430750Skarels 				wakeup((caddr_t)bp);
9530750Skarels 			splx(s);
9638794Skarels 			done = bp->b_bcount - bp->b_resid;
9764535Sbostic 			(char *)bp->b_data += done;
9838794Skarels 			iov->iov_len -= done;
9938794Skarels 			uio->uio_resid -= done;
10038794Skarels 			uio->uio_offset += done;
10138794Skarels 			/* temp kludge for disk drives */
10238794Skarels 			if (done < requested || bp->b_flags & B_ERROR)
10330750Skarels 				break;
10430750Skarels 		}
10534215Sbostic 		bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW);
10637729Smckusick 		error = biowait(bp);
10738794Skarels 		/* temp kludge for disk drives */
10838794Skarels 		if (done < requested || bp->b_flags & B_ERROR)
10934215Sbostic 			break;
1108Sbill 	}
11134215Sbostic 	if (allocbuf)
11234215Sbostic 		freeswbuf(bp);
11334215Sbostic 	return (error);
1148Sbill }
1158Sbill 
11649672Smckusick /*
11749672Smckusick  * Calculate the maximum size of I/O request that can be requested
11849672Smckusick  * in a single operation. This limit is necessary to prevent a single
11949672Smckusick  * process from being able to lock more than a fixed amount of memory
12049672Smckusick  * in the kernel.
12149672Smckusick  */
12234215Sbostic u_int
minphys(bp)1238Sbill minphys(bp)
1247724Swnj 	struct buf *bp;
1258Sbill {
12610400Ssam 	if (bp->b_bcount > MAXPHYS)
12710400Ssam 		bp->b_bcount = MAXPHYS;
1288Sbill }
12934215Sbostic 
13051454Sbostic static struct buf *
getswbuf(prio)13134215Sbostic getswbuf(prio)
13234215Sbostic 	int prio;
13334215Sbostic {
13434215Sbostic 	int s;
13534215Sbostic 	struct buf *bp;
13634215Sbostic 
13734215Sbostic 	s = splbio();
13856387Smckusick 	while (bswlist.b_actf == NULL) {
13934215Sbostic 		bswlist.b_flags |= B_WANTED;
14034215Sbostic 		sleep((caddr_t)&bswlist, prio);
14134215Sbostic 	}
14256387Smckusick 	bp = bswlist.b_actf;
14356387Smckusick 	bswlist.b_actf = bp->b_actf;
14434215Sbostic 	splx(s);
14534215Sbostic 	return (bp);
14634215Sbostic }
14734215Sbostic 
14851454Sbostic static void
freeswbuf(bp)14934215Sbostic freeswbuf(bp)
15034215Sbostic 	struct buf *bp;
15134215Sbostic {
15234215Sbostic 	int s;
15334215Sbostic 
15434215Sbostic 	s = splbio();
15556387Smckusick 	bp->b_actf = bswlist.b_actf;
15656387Smckusick 	bswlist.b_actf = bp;
15739148Smckusick 	if (bp->b_vp)
15839148Smckusick 		brelvp(bp);
15934215Sbostic 	if (bswlist.b_flags & B_WANTED) {
16034215Sbostic 		bswlist.b_flags &= ~B_WANTED;
16134215Sbostic 		wakeup((caddr_t)&bswlist);
16247540Skarels 		wakeup((caddr_t)pageproc);
16334215Sbostic 	}
16434215Sbostic 	splx(s);
16534215Sbostic }
16634215Sbostic 
16749672Smckusick /*
16849672Smckusick  * Do a read on a device for a user process.
16949672Smckusick  */
rawread(dev,uio)17034215Sbostic rawread(dev, uio)
17134215Sbostic 	dev_t dev;
17234215Sbostic 	struct uio *uio;
17334215Sbostic {
17434215Sbostic 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
17534215Sbostic 	    dev, B_READ, minphys, uio));
17634215Sbostic }
17734215Sbostic 
17849672Smckusick /*
17949672Smckusick  * Do a write on a device for a user process.
18049672Smckusick  */
rawwrite(dev,uio)18134215Sbostic rawwrite(dev, uio)
18234215Sbostic 	dev_t dev;
18334215Sbostic 	struct uio *uio;
18434215Sbostic {
18534215Sbostic 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
18634215Sbostic 	    dev, B_WRITE, minphys, uio));
18734215Sbostic }
188