1*23461Smckusick /* 2*23461Smckusick * Copyright (c) 1982 Regents of the University of California. 3*23461Smckusick * All rights reserved. The Berkeley software License Agreement 4*23461Smckusick * specifies the terms and conditions for redistribution. 5*23461Smckusick * 6*23461Smckusick * @(#)kern_physio.c 6.6 (Berkeley) 06/08/85 7*23461Smckusick */ 88Sbill 99766Ssam #include "../machine/pte.h" 109766Ssam 1117108Sbloom #include "param.h" 1217108Sbloom #include "systm.h" 1317108Sbloom #include "dir.h" 1417108Sbloom #include "user.h" 1517108Sbloom #include "buf.h" 1617108Sbloom #include "conf.h" 1717108Sbloom #include "proc.h" 1817108Sbloom #include "seg.h" 1917108Sbloom #include "vm.h" 2017108Sbloom #include "trace.h" 2117108Sbloom #include "map.h" 2217108Sbloom #include "uio.h" 238Sbill 2491Sbill /* 258Sbill * Swap IO headers - 268Sbill * They contain the necessary information for the swap I/O. 278Sbill * At any given time, a swap header can be in three 288Sbill * different lists. When free it is in the free list, 298Sbill * when allocated and the I/O queued, it is on the swap 308Sbill * device list, and finally, if the operation was a dirty 318Sbill * page push, when the I/O completes, it is inserted 328Sbill * in a list of cleaned pages to be processed by the pageout daemon. 338Sbill */ 342771Swnj struct buf *swbuf; 358Sbill 368Sbill /* 378Sbill * swap I/O - 388Sbill * 398Sbill * If the flag indicates a dirty page push initiated 408Sbill * by the pageout daemon, we map the page into the i th 418Sbill * virtual page of process 2 (the daemon itself) where i is 428Sbill * the index of the swap header that has been allocated. 438Sbill * We simply initialize the header and queue the I/O but 448Sbill * do not wait for completion. When the I/O completes, 458Sbill * iodone() will link the header to a list of cleaned 468Sbill * pages to be processed by the pageout daemon. 478Sbill */ 488Sbill swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 498Sbill struct proc *p; 508Sbill swblk_t dblkno; 518Sbill caddr_t addr; 528674S int nbytes, rdflg, flag; 538Sbill dev_t dev; 548674S u_int pfcent; 558Sbill { 568Sbill register struct buf *bp; 578962Sroot register u_int c; 588Sbill int p2dp; 598Sbill register struct pte *dpte, *vpte; 605431Sroot int s; 6112491Ssam extern swdone(); 628Sbill 635431Sroot s = spl6(); 648Sbill while (bswlist.av_forw == NULL) { 658Sbill bswlist.b_flags |= B_WANTED; 668Sbill sleep((caddr_t)&bswlist, PSWP+1); 678Sbill } 688Sbill bp = bswlist.av_forw; 698Sbill bswlist.av_forw = bp->av_forw; 705431Sroot splx(s); 718Sbill 728Sbill bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 738Sbill if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 748Sbill if (rdflg == B_READ) 758Sbill sum.v_pswpin += btoc(nbytes); 768Sbill else 778Sbill sum.v_pswpout += btoc(nbytes); 788Sbill bp->b_proc = p; 798Sbill if (flag & B_DIRTY) { 808Sbill p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 818Sbill dpte = dptopte(&proc[2], p2dp); 828Sbill vpte = vtopte(p, btop(addr)); 838Sbill for (c = 0; c < nbytes; c += NBPG) { 848Sbill if (vpte->pg_pfnum == 0 || vpte->pg_fod) 858Sbill panic("swap bad pte"); 868Sbill *dpte++ = *vpte++; 878Sbill } 8812491Ssam bp->b_un.b_addr = (caddr_t)ctob(dptov(&proc[2], p2dp)); 8912491Ssam bp->b_flags |= B_CALL; 9012491Ssam bp->b_iodone = swdone; 9112491Ssam bp->b_pfcent = pfcent; 928Sbill } else 938Sbill bp->b_un.b_addr = addr; 948Sbill while (nbytes > 0) { 958962Sroot bp->b_bcount = nbytes; 968962Sroot minphys(bp); 978962Sroot c = bp->b_bcount; 988Sbill bp->b_blkno = dblkno; 998Sbill bp->b_dev = dev; 1004033Swnj #ifdef TRACE 1014033Swnj trace(TR_SWAPIO, dev, bp->b_blkno); 1024033Swnj #endif 1039011Sroot physstrat(bp, bdevsw[major(dev)].d_strategy, PSWP); 1048Sbill if (flag & B_DIRTY) { 1058Sbill if (c < nbytes) 1068Sbill panic("big push"); 1078Sbill return; 1088Sbill } 1098Sbill bp->b_un.b_addr += c; 1108Sbill bp->b_flags &= ~B_DONE; 1118Sbill if (bp->b_flags & B_ERROR) { 1128Sbill if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 1138Sbill panic("hard IO err in swap"); 11418319Smckusick swkill(p, "swap: read error from swap device"); 1158Sbill } 1168Sbill nbytes -= c; 11712647Ssam dblkno += btodb(c); 1188Sbill } 1195431Sroot s = spl6(); 1208Sbill bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 1218Sbill bp->av_forw = bswlist.av_forw; 1228Sbill bswlist.av_forw = bp; 1238Sbill if (bswlist.b_flags & B_WANTED) { 1248Sbill bswlist.b_flags &= ~B_WANTED; 1258Sbill wakeup((caddr_t)&bswlist); 1268Sbill wakeup((caddr_t)&proc[2]); 1278Sbill } 1285431Sroot splx(s); 1298Sbill } 1308Sbill 1318Sbill /* 13212491Ssam * Put a buffer on the clean list after I/O is done. 13312491Ssam * Called from biodone. 13412491Ssam */ 13512491Ssam swdone(bp) 13612491Ssam register struct buf *bp; 13712491Ssam { 13812491Ssam register int s; 13912491Ssam 14012491Ssam if (bp->b_flags & B_ERROR) 14112491Ssam panic("IO err in push"); 14212491Ssam s = spl6(); 14312491Ssam bp->av_forw = bclnlist; 14412491Ssam cnt.v_pgout++; 14512491Ssam cnt.v_pgpgout += bp->b_bcount / NBPG; 14612491Ssam bclnlist = bp; 14712491Ssam if (bswlist.b_flags & B_WANTED) 14812491Ssam wakeup((caddr_t)&proc[2]); 14912491Ssam splx(s); 15012491Ssam } 15112491Ssam 15212491Ssam /* 1538Sbill * If rout == 0 then killed on swap error, else 1548Sbill * rout is the name of the routine where we ran out of 1558Sbill * swap space. 1568Sbill */ 1578Sbill swkill(p, rout) 1588Sbill struct proc *p; 1598Sbill char *rout; 1608Sbill { 1618Sbill 16218319Smckusick printf("pid %d: %s", p->p_pid, rout); 16318319Smckusick uprintf("sorry, pid %d was killed in %s", p->p_pid, rout); 1648Sbill /* 1658Sbill * To be sure no looping (e.g. in vmsched trying to 1668Sbill * swap out) mark process locked in core (as though 1678Sbill * done by user) after killing it so noone will try 1688Sbill * to swap it out. 1698Sbill */ 170165Sbill psignal(p, SIGKILL); 1718Sbill p->p_flag |= SULOCK; 1728Sbill } 1738Sbill 1748Sbill /* 1758Sbill * Raw I/O. The arguments are 1768Sbill * The strategy routine for the device 1778Sbill * A buffer, which will always be a special buffer 1788Sbill * header owned exclusively by the device for this purpose 1798Sbill * The device number 1808Sbill * Read/write flag 1818Sbill * Essentially all the work is computing physical addresses and 1828Sbill * validating them. 1838Sbill * If the user has the proper access privilidges, the process is 1848Sbill * marked 'delayed unlock' and the pages involved in the I/O are 1858Sbill * faulted and locked. After the completion of the I/O, the above pages 1868Sbill * are unlocked. 1878Sbill */ 1887724Swnj physio(strat, bp, dev, rw, mincnt, uio) 1897724Swnj int (*strat)(); 1907724Swnj register struct buf *bp; 1917724Swnj dev_t dev; 1927724Swnj int rw; 1937724Swnj unsigned (*mincnt)(); 1947724Swnj struct uio *uio; 1958Sbill { 19617313Skarels register struct iovec *iov; 1978Sbill register int c; 1988Sbill char *a; 1997724Swnj int s, error = 0; 2008Sbill 2017724Swnj nextiov: 2027830Sroot if (uio->uio_iovcnt == 0) 2037724Swnj return (0); 20417313Skarels iov = uio->uio_iov; 2057830Sroot if (useracc(iov->iov_base,(u_int)iov->iov_len,rw==B_READ?B_WRITE:B_READ) == NULL) 2067724Swnj return (EFAULT); 2075431Sroot s = spl6(); 2088Sbill while (bp->b_flags&B_BUSY) { 2098Sbill bp->b_flags |= B_WANTED; 2108Sbill sleep((caddr_t)bp, PRIBIO+1); 2118Sbill } 2126319Swnj splx(s); 2138Sbill bp->b_error = 0; 2148Sbill bp->b_proc = u.u_procp; 2157724Swnj bp->b_un.b_addr = iov->iov_base; 2167724Swnj while (iov->iov_len > 0) { 2178Sbill bp->b_flags = B_BUSY | B_PHYS | rw; 2188Sbill bp->b_dev = dev; 21912647Ssam bp->b_blkno = btodb(uio->uio_offset); 2207724Swnj bp->b_bcount = iov->iov_len; 2218Sbill (*mincnt)(bp); 2228Sbill c = bp->b_bcount; 2238Sbill u.u_procp->p_flag |= SPHYSIO; 2248Sbill vslock(a = bp->b_un.b_addr, c); 2258962Sroot physstrat(bp, strat, PRIBIO); 226124Sbill (void) spl6(); 2278Sbill vsunlock(a, c, rw); 2288Sbill u.u_procp->p_flag &= ~SPHYSIO; 2298Sbill if (bp->b_flags&B_WANTED) 2308Sbill wakeup((caddr_t)bp); 2315431Sroot splx(s); 2327724Swnj c -= bp->b_resid; 2338Sbill bp->b_un.b_addr += c; 2347724Swnj iov->iov_len -= c; 2357724Swnj uio->uio_resid -= c; 2367724Swnj uio->uio_offset += c; 2379766Ssam /* temp kludge for tape drives */ 23810400Ssam if (bp->b_resid || (bp->b_flags&B_ERROR)) 2393667Swnj break; 2408Sbill } 2418Sbill bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 2427724Swnj error = geterror(bp); 2439766Ssam /* temp kludge for tape drives */ 2449766Ssam if (bp->b_resid || error) 2457724Swnj return (error); 2467724Swnj uio->uio_iov++; 2477724Swnj uio->uio_iovcnt--; 2487724Swnj goto nextiov; 2498Sbill } 2508Sbill 25110400Ssam #define MAXPHYS (63 * 1024) 25210400Ssam 2538Sbill unsigned 2548Sbill minphys(bp) 2557724Swnj struct buf *bp; 2568Sbill { 2578Sbill 25810400Ssam if (bp->b_bcount > MAXPHYS) 25910400Ssam bp->b_bcount = MAXPHYS; 2608Sbill } 261