1*8674S /* kern_physio.c 4.34 82/10/21 */ 28Sbill 38Sbill #include "../h/param.h" 48Sbill #include "../h/systm.h" 58Sbill #include "../h/dir.h" 68Sbill #include "../h/user.h" 78Sbill #include "../h/buf.h" 88Sbill #include "../h/conf.h" 98Sbill #include "../h/proc.h" 108Sbill #include "../h/seg.h" 118Sbill #include "../h/pte.h" 128Sbill #include "../h/vm.h" 132045Swnj #include "../h/trace.h" 147724Swnj #include "../h/uio.h" 158Sbill 1691Sbill /* 178Sbill * Swap IO headers - 188Sbill * They contain the necessary information for the swap I/O. 198Sbill * At any given time, a swap header can be in three 208Sbill * different lists. When free it is in the free list, 218Sbill * when allocated and the I/O queued, it is on the swap 228Sbill * device list, and finally, if the operation was a dirty 238Sbill * page push, when the I/O completes, it is inserted 248Sbill * in a list of cleaned pages to be processed by the pageout daemon. 258Sbill */ 262771Swnj struct buf *swbuf; 272771Swnj short *swsize; /* CAN WE JUST USE B_BCOUNT? */ 282771Swnj int *swpf; 298Sbill 308Sbill /* 318Sbill * swap I/O - 328Sbill * 338Sbill * If the flag indicates a dirty page push initiated 348Sbill * by the pageout daemon, we map the page into the i th 358Sbill * virtual page of process 2 (the daemon itself) where i is 368Sbill * the index of the swap header that has been allocated. 378Sbill * We simply initialize the header and queue the I/O but 388Sbill * do not wait for completion. When the I/O completes, 398Sbill * iodone() will link the header to a list of cleaned 408Sbill * pages to be processed by the pageout daemon. 418Sbill */ 428Sbill swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 438Sbill struct proc *p; 448Sbill swblk_t dblkno; 458Sbill caddr_t addr; 46*8674S int nbytes, rdflg, flag; 478Sbill dev_t dev; 48*8674S u_int pfcent; 498Sbill { 508Sbill register struct buf *bp; 518Sbill register int c; 528Sbill int p2dp; 538Sbill register struct pte *dpte, *vpte; 545431Sroot int s; 558Sbill 565431Sroot s = spl6(); 578Sbill while (bswlist.av_forw == NULL) { 588Sbill bswlist.b_flags |= B_WANTED; 598Sbill sleep((caddr_t)&bswlist, PSWP+1); 608Sbill } 618Sbill bp = bswlist.av_forw; 628Sbill bswlist.av_forw = bp->av_forw; 635431Sroot splx(s); 648Sbill 658Sbill bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 668Sbill if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 678Sbill if (rdflg == B_READ) 688Sbill sum.v_pswpin += btoc(nbytes); 698Sbill else 708Sbill sum.v_pswpout += btoc(nbytes); 718Sbill bp->b_proc = p; 728Sbill if (flag & B_DIRTY) { 738Sbill p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 748Sbill dpte = dptopte(&proc[2], p2dp); 758Sbill vpte = vtopte(p, btop(addr)); 768Sbill for (c = 0; c < nbytes; c += NBPG) { 778Sbill if (vpte->pg_pfnum == 0 || vpte->pg_fod) 788Sbill panic("swap bad pte"); 798Sbill *dpte++ = *vpte++; 808Sbill } 818Sbill bp->b_un.b_addr = (caddr_t)ctob(p2dp); 828Sbill } else 838Sbill bp->b_un.b_addr = addr; 848Sbill while (nbytes > 0) { 858Sbill c = imin(ctob(120), nbytes); 868Sbill bp->b_bcount = c; 878Sbill bp->b_blkno = dblkno; 888Sbill bp->b_dev = dev; 89718Sbill if (flag & B_DIRTY) { 90718Sbill swpf[bp - swbuf] = pfcent; 91718Sbill swsize[bp - swbuf] = nbytes; 92718Sbill } 934033Swnj #ifdef TRACE 944033Swnj trace(TR_SWAPIO, dev, bp->b_blkno); 954033Swnj #endif 968Sbill (*bdevsw[major(dev)].d_strategy)(bp); 978Sbill if (flag & B_DIRTY) { 988Sbill if (c < nbytes) 998Sbill panic("big push"); 1008Sbill return; 1018Sbill } 1025431Sroot s = spl6(); 1038Sbill while((bp->b_flags&B_DONE)==0) 1048Sbill sleep((caddr_t)bp, PSWP); 1055431Sroot splx(s); 1068Sbill bp->b_un.b_addr += c; 1078Sbill bp->b_flags &= ~B_DONE; 1088Sbill if (bp->b_flags & B_ERROR) { 1098Sbill if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 1108Sbill panic("hard IO err in swap"); 1118Sbill swkill(p, (char *)0); 1128Sbill } 1138Sbill nbytes -= c; 1148Sbill dblkno += btoc(c); 1158Sbill } 1165431Sroot s = spl6(); 1178Sbill bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 1188Sbill bp->av_forw = bswlist.av_forw; 1198Sbill bswlist.av_forw = bp; 1208Sbill if (bswlist.b_flags & B_WANTED) { 1218Sbill bswlist.b_flags &= ~B_WANTED; 1228Sbill wakeup((caddr_t)&bswlist); 1238Sbill wakeup((caddr_t)&proc[2]); 1248Sbill } 1255431Sroot splx(s); 1268Sbill } 1278Sbill 1288Sbill /* 1298Sbill * If rout == 0 then killed on swap error, else 1308Sbill * rout is the name of the routine where we ran out of 1318Sbill * swap space. 1328Sbill */ 1338Sbill swkill(p, rout) 1348Sbill struct proc *p; 1358Sbill char *rout; 1368Sbill { 1372922Swnj char *mesg; 1388Sbill 1392922Swnj printf("pid %d: ", p->p_pid); 1408Sbill if (rout) 1412922Swnj printf(mesg = "killed due to no swap space\n"); 1428Sbill else 1432922Swnj printf(mesg = "killed on swap error\n"); 1442922Swnj uprintf("sorry, pid %d was %s", p->p_pid, mesg); 1458Sbill /* 1468Sbill * To be sure no looping (e.g. in vmsched trying to 1478Sbill * swap out) mark process locked in core (as though 1488Sbill * done by user) after killing it so noone will try 1498Sbill * to swap it out. 1508Sbill */ 151165Sbill psignal(p, SIGKILL); 1528Sbill p->p_flag |= SULOCK; 1538Sbill } 1548Sbill 1558Sbill /* 1568Sbill * Raw I/O. The arguments are 1578Sbill * The strategy routine for the device 1588Sbill * A buffer, which will always be a special buffer 1598Sbill * header owned exclusively by the device for this purpose 1608Sbill * The device number 1618Sbill * Read/write flag 1628Sbill * Essentially all the work is computing physical addresses and 1638Sbill * validating them. 1648Sbill * If the user has the proper access privilidges, the process is 1658Sbill * marked 'delayed unlock' and the pages involved in the I/O are 1668Sbill * faulted and locked. After the completion of the I/O, the above pages 1678Sbill * are unlocked. 1688Sbill */ 1697724Swnj physio(strat, bp, dev, rw, mincnt, uio) 1707724Swnj int (*strat)(); 1717724Swnj register struct buf *bp; 1727724Swnj dev_t dev; 1737724Swnj int rw; 1747724Swnj unsigned (*mincnt)(); 1757724Swnj struct uio *uio; 1768Sbill { 1777830Sroot register struct iovec *iov = uio->uio_iov; 1788Sbill register int c; 1798Sbill char *a; 1807724Swnj int s, error = 0; 1818Sbill 1827724Swnj nextiov: 1837830Sroot if (uio->uio_iovcnt == 0) 1847724Swnj return (0); 1857830Sroot if (useracc(iov->iov_base,(u_int)iov->iov_len,rw==B_READ?B_WRITE:B_READ) == NULL) 1867724Swnj return (EFAULT); 1875431Sroot s = spl6(); 1888Sbill while (bp->b_flags&B_BUSY) { 1898Sbill bp->b_flags |= B_WANTED; 1908Sbill sleep((caddr_t)bp, PRIBIO+1); 1918Sbill } 1926319Swnj splx(s); 1938Sbill bp->b_error = 0; 1948Sbill bp->b_proc = u.u_procp; 1957724Swnj bp->b_un.b_addr = iov->iov_base; 1967724Swnj while (iov->iov_len > 0) { 1978Sbill bp->b_flags = B_BUSY | B_PHYS | rw; 1988Sbill bp->b_dev = dev; 1997724Swnj bp->b_blkno = uio->uio_offset >> PGSHIFT; 2007724Swnj bp->b_bcount = iov->iov_len; 2018Sbill (*mincnt)(bp); 2028Sbill c = bp->b_bcount; 2038Sbill u.u_procp->p_flag |= SPHYSIO; 2048Sbill vslock(a = bp->b_un.b_addr, c); 2058Sbill (*strat)(bp); 206124Sbill (void) spl6(); 2078Sbill while ((bp->b_flags&B_DONE) == 0) 2088Sbill sleep((caddr_t)bp, PRIBIO); 2098Sbill vsunlock(a, c, rw); 2108Sbill u.u_procp->p_flag &= ~SPHYSIO; 2118Sbill if (bp->b_flags&B_WANTED) 2128Sbill wakeup((caddr_t)bp); 2135431Sroot splx(s); 2147724Swnj c -= bp->b_resid; 2158Sbill bp->b_un.b_addr += c; 2167724Swnj iov->iov_len -= c; 2177724Swnj uio->uio_resid -= c; 2187724Swnj uio->uio_offset += c; 2193667Swnj if (bp->b_flags&B_ERROR) 2203667Swnj break; 2218Sbill } 2228Sbill bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 2237724Swnj error = geterror(bp); 2247830Sroot if (error) 2257724Swnj return (error); 2267724Swnj uio->uio_iov++; 2277724Swnj uio->uio_iovcnt--; 2287724Swnj goto nextiov; 2298Sbill } 2308Sbill 2318Sbill /*ARGSUSED*/ 2328Sbill unsigned 2338Sbill minphys(bp) 2347724Swnj struct buf *bp; 2358Sbill { 2368Sbill 2376379Swnj if (bp->b_bcount > 63 * 1024) 2386379Swnj bp->b_bcount = 63 * 1024; 2398Sbill } 2408Sbill 241