xref: /csrg-svn/sys/kern/kern_physio.c (revision 51454)
149589Sbostic /*-
249589Sbostic  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
349589Sbostic  * All rights reserved.
423461Smckusick  *
549589Sbostic  * %sccs.include.proprietary.c%
649589Sbostic  *
7*51454Sbostic  *	@(#)kern_physio.c	7.22 (Berkeley) 11/01/91
823461Smckusick  */
98Sbill 
10*51454Sbostic #include <sys/param.h>
11*51454Sbostic #include <sys/systm.h>
12*51454Sbostic #include <sys/buf.h>
13*51454Sbostic #include <sys/conf.h>
14*51454Sbostic #include <sys/proc.h>
15*51454Sbostic #include <sys/seg.h>
16*51454Sbostic #include <sys/trace.h>
17*51454Sbostic #include <sys/map.h>
18*51454Sbostic #include <sys/vnode.h>
19*51454Sbostic #include <sys/specdev.h>
208Sbill 
2148414Skarels #ifdef HPUXCOMPAT
22*51454Sbostic #include <sys/user.h>
2348414Skarels #endif
2448414Skarels 
25*51454Sbostic static void freeswbuf __P((struct buf *));
26*51454Sbostic static struct buf *getswbuf __P((int));
2748414Skarels 
2891Sbill /*
2949672Smckusick  * This routine does device I/O for a user process.
3049672Smckusick  *
318Sbill  * If the user has the proper access privilidges, the process is
328Sbill  * marked 'delayed unlock' and the pages involved in the I/O are
3349672Smckusick  * faulted and locked. After the completion of the I/O, the pages
348Sbill  * are unlocked.
358Sbill  */
367724Swnj physio(strat, bp, dev, rw, mincnt, uio)
377724Swnj 	int (*strat)();
387724Swnj 	register struct buf *bp;
397724Swnj 	dev_t dev;
407724Swnj 	int rw;
4134215Sbostic 	u_int (*mincnt)();
427724Swnj 	struct uio *uio;
438Sbill {
4417313Skarels 	register struct iovec *iov;
4538794Skarels 	register int requested, done;
4647540Skarels 	register struct proc *p = curproc;
478Sbill 	char *a;
4834215Sbostic 	int s, allocbuf = 0, error = 0;
498Sbill 
5034215Sbostic 	if (bp == NULL) {
5134215Sbostic 		allocbuf = 1;
5234215Sbostic 		bp = getswbuf(PRIBIO+1);
5334215Sbostic 	}
5434215Sbostic 	for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) {
5530750Skarels 		iov = uio->uio_iov;
5634215Sbostic 		if (!useracc(iov->iov_base, (u_int)iov->iov_len,
5734215Sbostic 		    rw == B_READ ? B_WRITE : B_READ)) {
5834215Sbostic 			error = EFAULT;
5934215Sbostic 			break;
6030750Skarels 		}
6134215Sbostic 		if (!allocbuf) {	/* only if sharing caller's buffer */
6234215Sbostic 			s = splbio();
6334215Sbostic 			while (bp->b_flags&B_BUSY) {
6434215Sbostic 				bp->b_flags |= B_WANTED;
6534215Sbostic 				sleep((caddr_t)bp, PRIBIO+1);
6634215Sbostic 			}
6734215Sbostic 			splx(s);
6834215Sbostic 		}
6930750Skarels 		bp->b_error = 0;
7047540Skarels 		bp->b_proc = p;
7142001Smckusick #ifdef HPUXCOMPAT
7242001Smckusick 		if (ISHPMMADDR(iov->iov_base))
7342001Smckusick 			bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base);
7442001Smckusick 		else
7542001Smckusick #endif
7630750Skarels 		bp->b_un.b_addr = iov->iov_base;
7730750Skarels 		while (iov->iov_len > 0) {
7834215Sbostic 			bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw;
7930750Skarels 			bp->b_dev = dev;
8030750Skarels 			bp->b_blkno = btodb(uio->uio_offset);
8130750Skarels 			bp->b_bcount = iov->iov_len;
8230750Skarels 			(*mincnt)(bp);
8338794Skarels 			requested = bp->b_bcount;
8447540Skarels 			p->p_flag |= SPHYSIO;
8538794Skarels 			vslock(a = bp->b_un.b_addr, requested);
8642001Smckusick 			vmapbuf(bp);
8734215Sbostic 			(*strat)(bp);
8834215Sbostic 			s = splbio();
8934215Sbostic 			while ((bp->b_flags & B_DONE) == 0)
9034215Sbostic 				sleep((caddr_t)bp, PRIBIO);
9142001Smckusick 			vunmapbuf(bp);
9238794Skarels 			vsunlock(a, requested, rw);
9347540Skarels 			p->p_flag &= ~SPHYSIO;
9434215Sbostic 			if (bp->b_flags&B_WANTED)	/* rare */
9530750Skarels 				wakeup((caddr_t)bp);
9630750Skarels 			splx(s);
9738794Skarels 			done = bp->b_bcount - bp->b_resid;
9838794Skarels 			bp->b_un.b_addr += done;
9938794Skarels 			iov->iov_len -= done;
10038794Skarels 			uio->uio_resid -= done;
10138794Skarels 			uio->uio_offset += done;
10238794Skarels 			/* temp kludge for disk drives */
10338794Skarels 			if (done < requested || bp->b_flags & B_ERROR)
10430750Skarels 				break;
10530750Skarels 		}
10634215Sbostic 		bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW);
10737729Smckusick 		error = biowait(bp);
10838794Skarels 		/* temp kludge for disk drives */
10938794Skarels 		if (done < requested || bp->b_flags & B_ERROR)
11034215Sbostic 			break;
1118Sbill 	}
11242001Smckusick #if defined(hp300)
11342001Smckusick 	DCIU();
11442001Smckusick #endif
11534215Sbostic 	if (allocbuf)
11634215Sbostic 		freeswbuf(bp);
11734215Sbostic 	return (error);
1188Sbill }
1198Sbill 
12049672Smckusick /*
12149672Smckusick  * Calculate the maximum size of I/O request that can be requested
12249672Smckusick  * in a single operation. This limit is necessary to prevent a single
12349672Smckusick  * process from being able to lock more than a fixed amount of memory
12449672Smckusick  * in the kernel.
12549672Smckusick  */
12634215Sbostic u_int
1278Sbill minphys(bp)
1287724Swnj 	struct buf *bp;
1298Sbill {
13010400Ssam 	if (bp->b_bcount > MAXPHYS)
13110400Ssam 		bp->b_bcount = MAXPHYS;
1328Sbill }
13334215Sbostic 
134*51454Sbostic static struct buf *
13534215Sbostic getswbuf(prio)
13634215Sbostic 	int prio;
13734215Sbostic {
13834215Sbostic 	int s;
13934215Sbostic 	struct buf *bp;
14034215Sbostic 
14134215Sbostic 	s = splbio();
14234215Sbostic 	while (bswlist.av_forw == NULL) {
14334215Sbostic 		bswlist.b_flags |= B_WANTED;
14434215Sbostic 		sleep((caddr_t)&bswlist, prio);
14534215Sbostic 	}
14634215Sbostic 	bp = bswlist.av_forw;
14734215Sbostic 	bswlist.av_forw = bp->av_forw;
14834215Sbostic 	splx(s);
14934215Sbostic 	return (bp);
15034215Sbostic }
15134215Sbostic 
152*51454Sbostic static void
15334215Sbostic freeswbuf(bp)
15434215Sbostic 	struct buf *bp;
15534215Sbostic {
15634215Sbostic 	int s;
15734215Sbostic 
15834215Sbostic 	s = splbio();
15934215Sbostic 	bp->av_forw = bswlist.av_forw;
16034215Sbostic 	bswlist.av_forw = bp;
16139148Smckusick 	if (bp->b_vp)
16239148Smckusick 		brelvp(bp);
16334215Sbostic 	if (bswlist.b_flags & B_WANTED) {
16434215Sbostic 		bswlist.b_flags &= ~B_WANTED;
16534215Sbostic 		wakeup((caddr_t)&bswlist);
16647540Skarels 		wakeup((caddr_t)pageproc);
16734215Sbostic 	}
16834215Sbostic 	splx(s);
16934215Sbostic }
17034215Sbostic 
17149672Smckusick /*
17249672Smckusick  * Do a read on a device for a user process.
17349672Smckusick  */
17434215Sbostic rawread(dev, uio)
17534215Sbostic 	dev_t dev;
17634215Sbostic 	struct uio *uio;
17734215Sbostic {
17834215Sbostic 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
17934215Sbostic 	    dev, B_READ, minphys, uio));
18034215Sbostic }
18134215Sbostic 
18249672Smckusick /*
18349672Smckusick  * Do a write on a device for a user process.
18449672Smckusick  */
18534215Sbostic rawwrite(dev, uio)
18634215Sbostic 	dev_t dev;
18734215Sbostic 	struct uio *uio;
18834215Sbostic {
18934215Sbostic 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
19034215Sbostic 	    dev, B_WRITE, minphys, uio));
19134215Sbostic }
192