1 #include "u.h" 2 #include "../port/lib.h" 3 #include "mem.h" 4 #include "dat.h" 5 #include "fns.h" 6 #include "../port/error.h" 7 8 int 9 fault(ulong addr, int read) 10 { 11 Segment *s; 12 char *sps; 13 14 if(up->nlocks.ref) print("fault nlocks %ld\n", up->nlocks.ref); 15 16 sps = up->psstate; 17 up->psstate = "Fault"; 18 spllo(); 19 20 m->pfault++; 21 for(;;) { 22 s = seg(up, addr, 1); /* leaves s->lk qlocked if seg != nil */ 23 if(s == 0) { 24 up->psstate = sps; 25 return -1; 26 } 27 28 if(!read && (s->type&SG_RONLY)) { 29 qunlock(&s->lk); 30 up->psstate = sps; 31 return -1; 32 } 33 34 if(fixfault(s, addr, read, 1) == 0) 35 break; 36 } 37 38 up->psstate = sps; 39 return 0; 40 } 41 42 static void 43 faulterror(char *s, Chan *c, int freemem) 44 { 45 char buf[ERRMAX]; 46 47 if(c && c->path){ 48 snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr); 49 s = buf; 50 } 51 if(up->nerrlab) { 52 postnote(up, 1, s, NDebug); 53 error(s); 54 } 55 pexit(s, freemem); 56 } 57 58 int 59 fixfault(Segment *s, ulong addr, int read, int doputmmu) 60 { 61 int type; 62 int ref; 63 Pte **p, *etp; 64 ulong mmuphys=0, soff; 65 Page **pg, *lkp, *new; 66 Page *(*fn)(Segment*, ulong); 67 68 addr &= ~(BY2PG-1); 69 soff = addr-s->base; 70 p = &s->map[soff/PTEMAPMEM]; 71 if(*p == 0) 72 *p = ptealloc(); 73 74 etp = *p; 75 pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG]; 76 type = s->type&SG_TYPE; 77 78 if(pg < etp->first) 79 etp->first = pg; 80 if(pg > etp->last) 81 etp->last = pg; 82 83 switch(type) { 84 default: 85 panic("fault"); 86 break; 87 88 case SG_TEXT: /* Demand load */ 89 if(pagedout(*pg)) 90 pio(s, addr, soff, pg); 91 92 mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID; 93 (*pg)->modref = PG_REF; 94 break; 95 96 case SG_BSS: 97 case SG_SHARED: /* Zero fill on demand */ 98 case SG_STACK: 99 if(*pg == 0) { 100 new = newpage(1, &s, addr); 101 if(s == 0) 102 return -1; 103 104 *pg = new; 105 } 106 goto common; 107 108 case SG_DATA: 109 common: /* Demand load/pagein/copy on write */ 110 if(pagedout(*pg)) 111 pio(s, addr, soff, pg); 112 113 /* 114 * It's only possible to copy on write if 115 * we're the only user of the segment. 116 */ 117 if(read && conf.copymode == 0 && s->ref == 1) { 118 mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID; 119 (*pg)->modref |= PG_REF; 120 break; 121 } 122 123 lkp = *pg; 124 lock(lkp); 125 126 if(lkp->image == &swapimage) 127 ref = lkp->ref + swapcount(lkp->daddr); 128 else 129 ref = lkp->ref; 130 if(ref > 1) { 131 unlock(lkp); 132 133 if(swapfull()){ 134 qunlock(&s->lk); 135 pprint("swap space full\n"); 136 faulterror(Enoswap, nil, 1); 137 } 138 139 new = newpage(0, &s, addr); 140 if(s == 0) 141 return -1; 142 *pg = new; 143 copypage(lkp, *pg); 144 putpage(lkp); 145 } 146 else { 147 /* save a copy of the original for the image cache */ 148 if(lkp->image && !swapfull()) 149 duppage(lkp); 150 151 unlock(lkp); 152 } 153 mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID; 154 (*pg)->modref = PG_MOD|PG_REF; 155 break; 156 157 case SG_PHYSICAL: 158 if(*pg == 0) { 159 fn = s->pseg->pgalloc; 160 if(fn) 161 *pg = (*fn)(s, addr); 162 else { 163 new = smalloc(sizeof(Page)); 164 new->va = addr; 165 new->pa = s->pseg->pa+(addr-s->base); 166 new->ref = 1; 167 *pg = new; 168 } 169 } 170 171 mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID; 172 (*pg)->modref = PG_MOD|PG_REF; 173 break; 174 } 175 qunlock(&s->lk); 176 177 if(doputmmu) 178 putmmu(addr, mmuphys, *pg); 179 180 return 0; 181 } 182 183 void 184 pio(Segment *s, ulong addr, ulong soff, Page **p) 185 { 186 Page *new; 187 KMap *k; 188 Chan *c; 189 int n, ask; 190 char *kaddr; 191 ulong daddr; 192 Page *loadrec; 193 194 retry: 195 loadrec = *p; 196 if(loadrec == 0) { /* from a text/data image */ 197 daddr = s->fstart+soff; 198 new = lookpage(s->image, daddr); 199 if(new != nil) { 200 *p = new; 201 return; 202 } 203 } 204 else { /* from a swap image */ 205 daddr = swapaddr(loadrec); 206 new = lookpage(&swapimage, daddr); 207 if(new != nil) { 208 putswap(loadrec); 209 *p = new; 210 return; 211 } 212 } 213 214 215 qunlock(&s->lk); 216 217 new = newpage(0, 0, addr); 218 k = kmap(new); 219 kaddr = (char*)VA(k); 220 221 if(loadrec == 0) { /* This is demand load */ 222 c = s->image->c; 223 while(waserror()) { 224 if(strcmp(up->errstr, Eintr) == 0) 225 continue; 226 kunmap(k); 227 putpage(new); 228 faulterror("sys: demand load I/O error", c, 0); 229 } 230 231 ask = s->flen-soff; 232 if(ask > BY2PG) 233 ask = BY2PG; 234 235 n = devtab[c->type]->read(c, kaddr, ask, daddr); 236 if(n != ask) 237 faulterror(Eioload, c, 0); 238 if(ask < BY2PG) 239 memset(kaddr+ask, 0, BY2PG-ask); 240 241 poperror(); 242 kunmap(k); 243 qlock(&s->lk); 244 245 /* 246 * race, another proc may have gotten here first while 247 * s->lk was unlocked 248 */ 249 if(*p == 0) { 250 new->daddr = daddr; 251 cachepage(new, s->image); 252 *p = new; 253 } 254 else 255 putpage(new); 256 } 257 else { /* This is paged out */ 258 c = swapimage.c; 259 if(waserror()) { 260 kunmap(k); 261 putpage(new); 262 qlock(&s->lk); 263 qunlock(&s->lk); 264 faulterror("sys: page in I/O error", c, 0); 265 } 266 267 n = devtab[c->type]->read(c, kaddr, BY2PG, daddr); 268 if(n != BY2PG) 269 faulterror(Eioload, c, 0); 270 271 poperror(); 272 kunmap(k); 273 qlock(&s->lk); 274 275 /* 276 * race, another proc may have gotten here first 277 * (and the pager may have run on that page) while 278 * s->lk was unlocked 279 */ 280 if(*p != loadrec){ 281 if(!pagedout(*p)){ 282 /* another process did it for me */ 283 putpage(new); 284 goto done; 285 } else { 286 /* another process and the pager got in */ 287 putpage(new); 288 goto retry; 289 } 290 } 291 292 new->daddr = daddr; 293 cachepage(new, &swapimage); 294 *p = new; 295 putswap(loadrec); 296 } 297 298 done: 299 if(s->flushme) 300 memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl)); 301 } 302 303 /* 304 * Called only in a system call 305 */ 306 int 307 okaddr(ulong addr, ulong len, int write) 308 { 309 Segment *s; 310 311 if((long)len >= 0) { 312 for(;;) { 313 s = seg(up, addr, 0); 314 if(s == 0 || (write && (s->type&SG_RONLY))) 315 break; 316 317 if(addr+len > s->top) { 318 len -= s->top - addr; 319 addr = s->top; 320 continue; 321 } 322 return 1; 323 } 324 } 325 pprint("suicide: invalid address 0x%lux in sys call pc=0x%lux\n", addr, userpc()); 326 return 0; 327 } 328 329 void 330 validaddr(ulong addr, ulong len, int write) 331 { 332 if(!okaddr(addr, len, write)) 333 pexit("Suicide", 0); 334 } 335 336 /* 337 * &s[0] is known to be a valid address. 338 */ 339 void* 340 vmemchr(void *s, int c, int n) 341 { 342 int m; 343 ulong a; 344 void *t; 345 346 a = (ulong)s; 347 while(PGROUND(a) != PGROUND(a+n-1)){ 348 /* spans pages; handle this page */ 349 m = BY2PG - (a & (BY2PG-1)); 350 t = memchr((void*)a, c, m); 351 if(t) 352 return t; 353 a += m; 354 n -= m; 355 if(a < KZERO) 356 validaddr(a, 1, 0); 357 } 358 359 /* fits in one page */ 360 return memchr((void*)a, c, n); 361 } 362 363 Segment* 364 seg(Proc *p, ulong addr, int dolock) 365 { 366 Segment **s, **et, *n; 367 368 et = &p->seg[NSEG]; 369 for(s = p->seg; s < et; s++) { 370 n = *s; 371 if(n == 0) 372 continue; 373 if(addr >= n->base && addr < n->top) { 374 if(dolock == 0) 375 return n; 376 377 qlock(&n->lk); 378 if(addr >= n->base && addr < n->top) 379 return n; 380 qunlock(&n->lk); 381 } 382 } 383 384 return 0; 385 } 386 387 extern void checkmmu(ulong, ulong); 388 void 389 checkpages(void) 390 { 391 int checked; 392 ulong addr, off; 393 Pte *p; 394 Page *pg; 395 Segment **sp, **ep, *s; 396 397 if(up == nil) 398 return; 399 400 checked = 0; 401 for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){ 402 s = *sp; 403 if(s == nil) 404 continue; 405 qlock(&s->lk); 406 for(addr=s->base; addr<s->top; addr+=BY2PG){ 407 off = addr - s->base; 408 p = s->map[off/PTEMAPMEM]; 409 if(p == 0) 410 continue; 411 pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG]; 412 if(pg == 0 || pagedout(pg)) 413 continue; 414 checkmmu(addr, pg->pa); 415 checked++; 416 } 417 qunlock(&s->lk); 418 } 419 print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked); 420 } 421