1 /* kern_physio.c 4.31 82/08/13 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/dir.h" 6 #include "../h/user.h" 7 #include "../h/buf.h" 8 #include "../h/conf.h" 9 #include "../h/proc.h" 10 #include "../h/seg.h" 11 #include "../h/pte.h" 12 #include "../h/vm.h" 13 #include "../h/trace.h" 14 #include "../h/uio.h" 15 16 /* 17 * Swap IO headers - 18 * They contain the necessary information for the swap I/O. 19 * At any given time, a swap header can be in three 20 * different lists. When free it is in the free list, 21 * when allocated and the I/O queued, it is on the swap 22 * device list, and finally, if the operation was a dirty 23 * page push, when the I/O completes, it is inserted 24 * in a list of cleaned pages to be processed by the pageout daemon. 25 */ 26 struct buf *swbuf; 27 short *swsize; /* CAN WE JUST USE B_BCOUNT? */ 28 int *swpf; 29 30 /* 31 * swap I/O - 32 * 33 * If the flag indicates a dirty page push initiated 34 * by the pageout daemon, we map the page into the i th 35 * virtual page of process 2 (the daemon itself) where i is 36 * the index of the swap header that has been allocated. 37 * We simply initialize the header and queue the I/O but 38 * do not wait for completion. When the I/O completes, 39 * iodone() will link the header to a list of cleaned 40 * pages to be processed by the pageout daemon. 41 */ 42 swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 43 struct proc *p; 44 swblk_t dblkno; 45 caddr_t addr; 46 int flag, nbytes; 47 dev_t dev; 48 unsigned pfcent; 49 { 50 register struct buf *bp; 51 register int c; 52 int p2dp; 53 register struct pte *dpte, *vpte; 54 int s; 55 56 s = spl6(); 57 while (bswlist.av_forw == NULL) { 58 bswlist.b_flags |= B_WANTED; 59 sleep((caddr_t)&bswlist, PSWP+1); 60 } 61 bp = bswlist.av_forw; 62 bswlist.av_forw = bp->av_forw; 63 splx(s); 64 65 bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 66 if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 67 if (rdflg == B_READ) 68 sum.v_pswpin += btoc(nbytes); 69 else 70 sum.v_pswpout += btoc(nbytes); 71 bp->b_proc = p; 72 if (flag & B_DIRTY) { 73 p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 74 dpte = dptopte(&proc[2], p2dp); 75 vpte = vtopte(p, btop(addr)); 76 for (c = 0; c < nbytes; c += NBPG) { 77 if (vpte->pg_pfnum == 0 || vpte->pg_fod) 78 panic("swap bad pte"); 79 *dpte++ = *vpte++; 80 } 81 bp->b_un.b_addr = (caddr_t)ctob(p2dp); 82 } else 83 bp->b_un.b_addr = addr; 84 while (nbytes > 0) { 85 c = imin(ctob(120), nbytes); 86 bp->b_bcount = c; 87 bp->b_blkno = dblkno; 88 bp->b_dev = dev; 89 if (flag & B_DIRTY) { 90 swpf[bp - swbuf] = pfcent; 91 swsize[bp - swbuf] = nbytes; 92 } 93 #ifdef TRACE 94 trace(TR_SWAPIO, dev, bp->b_blkno); 95 #endif 96 (*bdevsw[major(dev)].d_strategy)(bp); 97 if (flag & B_DIRTY) { 98 if (c < nbytes) 99 panic("big push"); 100 return; 101 } 102 s = spl6(); 103 while((bp->b_flags&B_DONE)==0) 104 sleep((caddr_t)bp, PSWP); 105 splx(s); 106 bp->b_un.b_addr += c; 107 bp->b_flags &= ~B_DONE; 108 if (bp->b_flags & B_ERROR) { 109 if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 110 panic("hard IO err in swap"); 111 swkill(p, (char *)0); 112 } 113 nbytes -= c; 114 dblkno += btoc(c); 115 } 116 s = spl6(); 117 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 118 bp->av_forw = bswlist.av_forw; 119 bswlist.av_forw = bp; 120 if (bswlist.b_flags & B_WANTED) { 121 bswlist.b_flags &= ~B_WANTED; 122 wakeup((caddr_t)&bswlist); 123 wakeup((caddr_t)&proc[2]); 124 } 125 splx(s); 126 } 127 128 /* 129 * If rout == 0 then killed on swap error, else 130 * rout is the name of the routine where we ran out of 131 * swap space. 132 */ 133 swkill(p, rout) 134 struct proc *p; 135 char *rout; 136 { 137 char *mesg; 138 139 printf("pid %d: ", p->p_pid); 140 if (rout) 141 printf(mesg = "killed due to no swap space\n"); 142 else 143 printf(mesg = "killed on swap error\n"); 144 uprintf("sorry, pid %d was %s", p->p_pid, mesg); 145 /* 146 * To be sure no looping (e.g. in vmsched trying to 147 * swap out) mark process locked in core (as though 148 * done by user) after killing it so noone will try 149 * to swap it out. 150 */ 151 psignal(p, SIGKILL); 152 p->p_flag |= SULOCK; 153 } 154 155 /* 156 * Raw I/O. The arguments are 157 * The strategy routine for the device 158 * A buffer, which will always be a special buffer 159 * header owned exclusively by the device for this purpose 160 * The device number 161 * Read/write flag 162 * Essentially all the work is computing physical addresses and 163 * validating them. 164 * If the user has the proper access privilidges, the process is 165 * marked 'delayed unlock' and the pages involved in the I/O are 166 * faulted and locked. After the completion of the I/O, the above pages 167 * are unlocked. 168 */ 169 physio(strat, bp, dev, rw, mincnt, uio) 170 int (*strat)(); 171 register struct buf *bp; 172 dev_t dev; 173 int rw; 174 unsigned (*mincnt)(); 175 struct uio *uio; 176 { 177 register int c; 178 struct uio auio; 179 register struct iovec *iov; 180 struct iovec aiov; 181 char *a; 182 int s, error = 0; 183 184 if (uio == 0) { 185 uio = &auio; 186 uio->uio_iov = &aiov; 187 uio->uio_iovcnt = 1; 188 uio->uio_offset = u.u_offset; 189 uio->uio_segflg = u.u_segflg; 190 iov = &aiov; 191 iov->iov_base = u.u_base; 192 iov->iov_len = u.u_count; 193 uio->uio_resid = iov->iov_len; 194 } else 195 iov = uio->uio_iov; 196 nextiov: 197 if (uio->uio_iovcnt == 0) { 198 u.u_count = uio->uio_resid; 199 return (0); 200 } 201 if (useracc(iov->iov_base,iov->iov_len,rw==B_READ?B_WRITE:B_READ) == NULL) { 202 u.u_count = uio->uio_resid; 203 u.u_error = EFAULT; 204 return (EFAULT); 205 } 206 s = spl6(); 207 while (bp->b_flags&B_BUSY) { 208 bp->b_flags |= B_WANTED; 209 sleep((caddr_t)bp, PRIBIO+1); 210 } 211 splx(s); 212 bp->b_error = 0; 213 bp->b_proc = u.u_procp; 214 bp->b_un.b_addr = iov->iov_base; 215 while (iov->iov_len > 0) { 216 bp->b_flags = B_BUSY | B_PHYS | rw; 217 bp->b_dev = dev; 218 bp->b_blkno = uio->uio_offset >> PGSHIFT; 219 bp->b_bcount = iov->iov_len; 220 (*mincnt)(bp); 221 c = bp->b_bcount; 222 u.u_procp->p_flag |= SPHYSIO; 223 vslock(a = bp->b_un.b_addr, c); 224 (*strat)(bp); 225 (void) spl6(); 226 while ((bp->b_flags&B_DONE) == 0) 227 sleep((caddr_t)bp, PRIBIO); 228 vsunlock(a, c, rw); 229 u.u_procp->p_flag &= ~SPHYSIO; 230 if (bp->b_flags&B_WANTED) 231 wakeup((caddr_t)bp); 232 splx(s); 233 c -= bp->b_resid; 234 bp->b_un.b_addr += c; 235 iov->iov_len -= c; 236 uio->uio_resid -= c; 237 uio->uio_offset += c; 238 if (bp->b_flags&B_ERROR) 239 break; 240 } 241 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 242 error = geterror(bp); 243 if (error) { 244 u.u_error = error; 245 u.u_count = uio->uio_resid; 246 return (error); 247 } 248 uio->uio_iov++; 249 uio->uio_iovcnt--; 250 goto nextiov; 251 } 252 253 /*ARGSUSED*/ 254 unsigned 255 minphys(bp) 256 struct buf *bp; 257 { 258 259 if (bp->b_bcount > 63 * 1024) 260 bp->b_bcount = 63 * 1024; 261 } 262 263