1 #include "u.h" 2 #include "../port/lib.h" 3 #include "mem.h" 4 #include "dat.h" 5 #include "fns.h" 6 #include "../port/error.h" 7 8 Page *lkpage(Segment*, ulong); 9 void lkpgfree(Page*); 10 void imagereclaim(void); 11 12 /* System specific segattach devices */ 13 #include "io.h" 14 #include "segment.h" 15 16 #define IHASHSIZE 64 17 #define ihash(s) imagealloc.hash[s%IHASHSIZE] 18 struct 19 { 20 Lock; 21 Image *free; 22 Image *hash[IHASHSIZE]; 23 QLock ireclaim; 24 }imagealloc; 25 26 void 27 initseg(void) 28 { 29 Image *i, *ie; 30 31 imagealloc.free = xalloc(conf.nimage*sizeof(Image)); 32 ie = &imagealloc.free[conf.nimage-1]; 33 for(i = imagealloc.free; i < ie; i++) 34 i->next = i+1; 35 i->next = 0; 36 } 37 38 Segment * 39 newseg(int type, ulong base, ulong size) 40 { 41 Segment *s; 42 43 if(size > (SEGMAPSIZE*PTEPERTAB)) 44 error(Enovmem); 45 46 s = smalloc(sizeof(Segment)); 47 s->ref = 1; 48 s->type = type; 49 s->base = base; 50 s->top = base+(size*BY2PG); 51 s->size = size; 52 return s; 53 } 54 55 void 56 putseg(Segment *s) 57 { 58 Pte **pp, **emap; 59 Image *i; 60 61 if(s == 0) 62 return; 63 64 i = s->image; 65 if(i != 0) { 66 lock(i); 67 lock(s); 68 if(i->s == s && s->ref == 1) 69 i->s = 0; 70 unlock(i); 71 } 72 else 73 lock(s); 74 75 s->ref--; 76 if(s->ref != 0) { 77 unlock(s); 78 return; 79 } 80 81 qlock(&s->lk); 82 if(i) 83 putimage(i); 84 85 emap = &s->map[SEGMAPSIZE]; 86 for(pp = s->map; pp < emap; pp++) 87 if(*pp) 88 freepte(s, *pp); 89 90 qunlock(&s->lk); 91 free(s); 92 } 93 94 void 95 relocateseg(Segment *s, ulong offset) 96 { 97 Pte **p, **endpte; 98 Page **pg, **endpages; 99 100 endpte = &s->map[SEGMAPSIZE]; 101 for(p = s->map; p < endpte; p++) { 102 if(*p) { 103 endpages = &((*p)->pages[PTEPERTAB]); 104 for(pg = (*p)->pages; pg < endpages; pg++) 105 if(*pg) 106 (*pg)->va += offset; 107 } 108 } 109 } 110 111 Segment* 112 dupseg(Segment **seg, int segno, int share) 113 { 114 int i; 115 Pte *pte; 116 Segment *n, *s; 117 118 SET(n); 119 s = seg[segno]; 120 121 switch(s->type&SG_TYPE) { 122 case SG_TEXT: /* New segment shares pte set */ 123 case SG_SHARED: 124 case SG_PHYSICAL: 125 case SG_SHDATA: 126 incref(s); 127 return s; 128 129 case SG_STACK: 130 qlock(&s->lk); 131 n = newseg(s->type, s->base, s->size); 132 break; 133 134 case SG_BSS: /* Just copy on write */ 135 qlock(&s->lk); 136 if(share && s->ref == 1) { 137 s->type = (s->type&~SG_TYPE)|SG_SHARED; 138 incref(s); 139 qunlock(&s->lk); 140 return s; 141 } 142 n = newseg(s->type, s->base, s->size); 143 break; 144 145 case SG_DATA: /* Copy on write plus demand load info */ 146 if(segno == TSEG) 147 return data2txt(s); 148 149 qlock(&s->lk); 150 if(share && s->ref == 1) { 151 s->type = (s->type&~SG_TYPE)|SG_SHDATA; 152 incref(s); 153 qunlock(&s->lk); 154 return s; 155 } 156 n = newseg(s->type, s->base, s->size); 157 158 incref(s->image); 159 n->image = s->image; 160 n->fstart = s->fstart; 161 n->flen = s->flen; 162 break; 163 } 164 for(i = 0; i < SEGMAPSIZE; i++) 165 if(pte = s->map[i]) 166 n->map[i] = ptecpy(pte); 167 168 n->flushme = s->flushme; 169 qunlock(&s->lk); 170 return n; 171 } 172 173 void 174 segpage(Segment *s, Page *p) 175 { 176 Pte **pte; 177 ulong off; 178 Page **pg; 179 180 if(p->va < s->base || p->va >= s->top) 181 panic("segpage"); 182 183 off = p->va - s->base; 184 pte = &s->map[off/PTEMAPMEM]; 185 if(*pte == 0) 186 *pte = ptealloc(); 187 188 pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG]; 189 *pg = p; 190 if(pg < (*pte)->first) 191 (*pte)->first = pg; 192 if(pg > (*pte)->last) 193 (*pte)->last = pg; 194 } 195 196 Image* 197 attachimage(int type, Chan *c, ulong base, ulong len) 198 { 199 Image *i, **l; 200 201 lock(&imagealloc); 202 203 /* 204 * Search the image cache for remains of the text from a previous 205 * or currently running incarnation 206 */ 207 for(i = ihash(c->qid.path); i; i = i->hash) { 208 if(c->qid.path == i->qid.path) { 209 lock(i); 210 if(eqqid(c->qid, i->qid) && 211 eqqid(c->mqid, i->mqid) && 212 c->mchan == i->mchan && 213 c->type == i->type) { 214 i->ref++; 215 goto found; 216 } 217 unlock(i); 218 } 219 } 220 221 /* 222 * imagereclaim dumps pages from the free list which are cached by image 223 * structures. This should free some image structures. 224 */ 225 while(!(i = imagealloc.free)) { 226 unlock(&imagealloc); 227 imagereclaim(); 228 resrcwait(0); 229 lock(&imagealloc); 230 } 231 232 imagealloc.free = i->next; 233 234 lock(i); 235 incref(c); 236 i->c = c; 237 i->type = c->type; 238 i->qid = c->qid; 239 i->mqid = c->mqid; 240 i->mchan = c->mchan; 241 i->ref = 1; 242 l = &ihash(c->qid.path); 243 i->hash = *l; 244 *l = i; 245 found: 246 unlock(&imagealloc); 247 248 if(i->s == 0) { 249 /* Disaster after commit in exec */ 250 if(waserror()) { 251 unlock(i); 252 pexit(Enovmem, 1); 253 } 254 i->s = newseg(type, base, len); 255 i->s->image = i; 256 poperror(); 257 } 258 else 259 incref(i->s); 260 261 return i; 262 } 263 264 void 265 imagereclaim(void) 266 { 267 Page *p; 268 269 /* Somebody is already cleaning the page cache */ 270 if(!canqlock(&imagealloc.ireclaim)) 271 return; 272 273 lock(&palloc); 274 for(p = palloc.head; p; p = p->next) { 275 if(p->image && p->ref == 0 && p->image != &swapimage && canlock(p)) { 276 if(p->ref == 0) 277 uncachepage(p); 278 unlock(p); 279 } 280 } 281 unlock(&palloc); 282 qunlock(&imagealloc.ireclaim); 283 } 284 285 void 286 putimage(Image *i) 287 { 288 Chan *c; 289 Image *f, **l; 290 291 if(i == &swapimage) 292 return; 293 294 lock(i); 295 if(--i->ref == 0) { 296 l = &ihash(i->qid.path); 297 i->qid = (Qid){~0, ~0}; 298 unlock(i); 299 c = i->c; 300 301 lock(&imagealloc); 302 for(f = *l; f; f = f->hash) { 303 if(f == i) { 304 *l = i->hash; 305 break; 306 } 307 l = &f->hash; 308 } 309 310 i->next = imagealloc.free; 311 imagealloc.free = i; 312 unlock(&imagealloc); 313 314 close(c); 315 return; 316 } 317 unlock(i); 318 } 319 320 long 321 ibrk(ulong addr, int seg) 322 { 323 Segment *s, *ns; 324 ulong newtop, newsize; 325 int i; 326 327 s = u->p->seg[seg]; 328 if(s == 0) 329 error(Ebadarg); 330 331 if(addr == 0) 332 return s->base; 333 334 qlock(&s->lk); 335 336 /* We may start with the bss overlapping the data */ 337 if(addr < s->base) { 338 if(seg != BSEG || u->p->seg[DSEG] == 0 || addr < u->p->seg[DSEG]->base) { 339 qunlock(&s->lk); 340 error(Enovmem); 341 } 342 addr = s->base; 343 } 344 345 newtop = PGROUND(addr); 346 newsize = (newtop-s->base)/BY2PG; 347 if(newtop < s->top) { 348 mfreeseg(s, newtop, (s->top-newtop)/BY2PG); 349 qunlock(&s->lk); 350 flushmmu(); 351 return 0; 352 } 353 354 for(i = 0; i < NSEG; i++) { 355 ns = u->p->seg[i]; 356 if(ns == 0 || ns == s) 357 continue; 358 if(newtop >= ns->base && newtop < ns->top) { 359 qunlock(&s->lk); 360 error(Esoverlap); 361 } 362 } 363 364 if(newsize > (PTEMAPMEM*SEGMAPSIZE)/BY2PG) { 365 qunlock(&s->lk); 366 error(Enovmem); 367 } 368 369 s->top = newtop; 370 s->size = newsize; 371 qunlock(&s->lk); 372 return 0; 373 } 374 375 void 376 mfreeseg(Segment *s, ulong start, int pages) 377 { 378 int i, j; 379 ulong soff; 380 Page *pg; 381 382 soff = start-s->base; 383 j = (soff&(PTEMAPMEM-1))/BY2PG; 384 385 for(i = soff/PTEMAPMEM; i < SEGMAPSIZE; i++) { 386 if(pages <= 0) 387 break; 388 if(s->map[i] == 0) { 389 pages -= PTEPERTAB-j; 390 j = 0; 391 continue; 392 } 393 while(j < PTEPERTAB) { 394 pg = s->map[i]->pages[j]; 395 if(pg) { 396 putpage(pg); 397 s->map[i]->pages[j] = 0; 398 } 399 if(--pages == 0) 400 return; 401 j++; 402 } 403 j = 0; 404 } 405 } 406 407 int 408 isoverlap(ulong va, int len) 409 { 410 int i; 411 Segment *ns; 412 ulong newtop; 413 414 newtop = va+len; 415 for(i = 0; i < NSEG; i++) { 416 ns = u->p->seg[i]; 417 if(ns == 0) 418 continue; 419 if((newtop > ns->base && newtop <= ns->top) || 420 (va >= ns->base && va < ns->top)) 421 return 1; 422 } 423 return 0; 424 } 425 426 ulong 427 segattach(Proc *p, ulong attr, char *name, ulong va, ulong len) 428 { 429 int i, sno; 430 Segment *s; 431 Physseg *ps; 432 433 USED(p); 434 if(va != 0 && (va&KZERO) == KZERO) /* BUG: Only ok for now */ 435 error(Ebadarg); 436 437 validaddr((ulong)name, 1, 0); 438 vmemchr(name, 0, ~0); 439 440 for(sno = 0; sno < NSEG; sno++) 441 if(u->p->seg[sno] == 0 && sno != ESEG) 442 break; 443 444 if(sno == NSEG) 445 error(Enovmem); 446 447 len = PGROUND(len); 448 449 /* Find a hole in the address space */ 450 if(va == 0) { 451 va = p->seg[SSEG]->base - len; 452 for(i = 0; i < 20; i++) { 453 if(isoverlap(va, len) == 0) 454 break; 455 va -= len; 456 } 457 } 458 459 va = va&~(BY2PG-1); 460 if(isoverlap(va, len)) 461 error(Esoverlap); 462 463 for(ps = physseg; ps->name; ps++) 464 if(strcmp(name, ps->name) == 0) 465 goto found; 466 467 error(Ebadarg); 468 found: 469 if(len > ps->size) 470 error(Enovmem); 471 472 attr &= ~SG_TYPE; /* Turn off what we are not allowed */ 473 attr |= ps->attr; /* Copy in defaults */ 474 475 s = newseg(attr, va, len/BY2PG); 476 s->pseg = ps; 477 u->p->seg[sno] = s; 478 479 return va; 480 } 481 482 void 483 pteflush(Pte *pte, int s, int e) 484 { 485 int i; 486 Page *p; 487 488 for(i = s; i < e; i++) { 489 p = pte->pages[i]; 490 if(pagedout(p) == 0) 491 memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl)); 492 } 493 } 494 495 496 long 497 syssegflush(ulong *arg) 498 { Segment *s; 499 ulong addr, l; 500 Pte *pte; 501 int chunk, ps, pe, len; 502 503 addr = arg[0]; 504 len = arg[1]; 505 506 while(len > 0) { 507 s = seg(u->p, addr, 1); 508 if(s == 0) 509 error(Ebadarg); 510 511 s->flushme = 1; 512 more: 513 l = len; 514 if(addr+l > s->top) 515 l = s->top - addr; 516 517 ps = addr-s->base; 518 pte = s->map[ps/PTEMAPMEM]; 519 ps &= PTEMAPMEM-1; 520 pe = PTEMAPMEM; 521 if(pe-ps > l){ 522 pe = ps + l; 523 pe = (pe+BY2PG-1)&~(BY2PG-1); 524 } 525 if(pe == ps) { 526 qunlock(&s->lk); 527 error(Ebadarg); 528 } 529 530 if(pte) 531 pteflush(pte, ps/BY2PG, pe/BY2PG); 532 533 chunk = pe-ps; 534 len -= chunk; 535 addr += chunk; 536 537 if(len > 0 && addr < s->top) 538 goto more; 539 540 qunlock(&s->lk); 541 } 542 flushmmu(); 543 return 0; 544 } 545