1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 #include "ureg.h"
7
8 /*
9 * tlb entry 0 is used only by mmuswitch() to set the current tlb pid.
10 *
11 * It is apparently assumed that user tlb entries are not
12 * overwritten during start-up, so ...
13 * During system start-up (before up first becomes non-nil),
14 * Kmap entries start at tlb index 1 and work their way up until
15 * kmapinval() removes them. They then restart at 1. As long as there
16 * are few kmap entries they will not pass tlbroff (the WIRED tlb entry
17 * limit) and interfere with user tlb entries.
18 * Once start-up is over, we combine the kernel and user tlb pools into one,
19 * in the hope of making better use of the tlb on systems with small ones.
20 *
21 * All invalidations of the tlb are via indexed entries. The virtual
22 * address used is always 'KZERO | (x<<(PGSHIFT+1) | currentpid' where
23 * 'x' is the index into the tlb. This ensures that the current pid doesn't
24 * change and that no two invalidated entries have matching virtual
25 * addresses just in case SGI/MIPS ever makes a chip that cares (as
26 * they keep threatening). These entries should never be used in
27 * lookups since accesses to KZERO addresses don't go through the tlb
28 * (actually only true of KSEG0 and KSEG1; KSEG2 and KSEG3 do go
29 * through the tlb).
30 */
31
32 #define TLBINVAL(x, pid) puttlbx(x, KZERO|((x)<<(PGSHIFT+1))|(pid), 0, 0, PGSZ)
33
34 enum {
35 Debugswitch = 0,
36 Debughash = 0,
37 };
38
39 static ulong ktime[8]; /* only for first 8 cpus */
40
41 void
tlbinit(void)42 tlbinit(void)
43 {
44 int i;
45
46 for(i=0; i<NTLB; i++)
47 TLBINVAL(i, 0);
48 }
49
50 Lock kmaplock;
51 KMap kpte[KPTESIZE];
52 KMap* kmapfree;
53
54 static int minfree = KPTESIZE;
55 static int lastfree;
56 static int tlbroff = TLBROFF;
57
58 static void
nfree(void)59 nfree(void)
60 {
61 int i;
62 KMap *k;
63
64 i = 0;
65 for(k=kmapfree; k; k=k->next)
66 i++;
67 if(i<minfree){
68 iprint("%d free\n", i);
69 minfree = i;
70 }
71 lastfree = i;
72 }
73
74 void
kmapinit(void)75 kmapinit(void)
76 {
77 KMap *k, *klast;
78
79 lock(&kmaplock);
80 kmapfree = kpte;
81 klast = &kpte[KPTESIZE-1];
82 for(k=kpte; k<klast; k++)
83 k->next = k+1;
84 k->next = 0;
85 unlock(&kmaplock);
86
87 m->ktlbnext = TLBOFF;
88 }
89
90 void
kmapdump(void)91 kmapdump(void)
92 {
93 int i;
94
95 for(i=0; i<KPTESIZE; i++)
96 iprint("%d: %lud pc=%#lux\n", i, kpte[i].ref, kpte[i].pc);
97 }
98
99 static int
putktlb(KMap * k)100 putktlb(KMap *k)
101 {
102 int x;
103 ulong virt;
104 ulong tlbent[3];
105
106 virt = k->virt & ~BY2PG | TLBPID(tlbvirt());
107 x = gettlbp(virt, tlbent);
108 if (!m->paststartup)
109 if (up) { /* startup just ended? */
110 tlbroff = 1;
111 setwired(tlbroff); /* share all-but-one entries */
112 m->paststartup = 1;
113 } else if (x < 0) { /* no such entry? use next */
114 x = m->ktlbnext++;
115 if(m->ktlbnext >= tlbroff)
116 m->ktlbnext = TLBOFF;
117 }
118 if (x < 0) /* no entry for va? overwrite random one */
119 x = puttlb(virt, k->phys0, k->phys1);
120 else
121 puttlbx(x, virt, k->phys0, k->phys1, PGSZ);
122 m->ktlbx[x] = 1;
123 return x;
124 }
125
126 /*
127 * Arrange that the KMap'd virtual address will hit the same
128 * primary cache line as pg->va by making bits 14...12 of the
129 * tag the same as virtual address. These bits are the index
130 * into the primary cache and are checked whenever accessing
131 * the secondary cache through the primary. Violation causes
132 * a VCE trap.
133 */
134 KMap *
kmap(Page * pg)135 kmap(Page *pg)
136 {
137 int s, printed = 0;
138 ulong pte, virt;
139 KMap *k;
140
141 s = splhi();
142 lock(&kmaplock);
143
144 if(kmapfree == 0) {
145 retry:
146 unlock(&kmaplock);
147 kmapinval(); /* try and free some */
148 lock(&kmaplock);
149 if(kmapfree == 0){
150 unlock(&kmaplock);
151 splx(s);
152 if(printed++ == 0){
153 /* using iprint here we get mixed up with other prints */
154 print("%d KMAP RETRY %#lux ktime %ld %ld %ld %ld %ld %ld %ld %ld\n",
155 m->machno, getcallerpc(&pg),
156 ktime[0], ktime[1], ktime[2], ktime[3],
157 ktime[4], ktime[5], ktime[6], ktime[7]);
158 delay(200);
159 }
160 splhi();
161 lock(&kmaplock);
162 goto retry;
163 }
164 }
165
166 k = kmapfree;
167 kmapfree = k->next;
168
169 k->pg = pg;
170 /*
171 * One for the allocation,
172 * One for kactive
173 */
174 k->pc = getcallerpc(&pg);
175 k->ref = 2;
176 k->konmach[m->machno] = m->kactive;
177 m->kactive = k;
178
179 virt = pg->va;
180 /* bits 14..12 form the secondary-cache virtual index */
181 virt &= PIDX;
182 virt |= KMAPADDR | ((k-kpte)<<KMAPSHIFT);
183
184 k->virt = virt;
185 pte = PPN(pg->pa)|PTECACHABILITY|PTEGLOBL|PTEWRITE|PTEVALID;
186 if(virt & BY2PG) {
187 k->phys0 = PTEGLOBL | PTECACHABILITY;
188 k->phys1 = pte;
189 }
190 else {
191 k->phys0 = pte;
192 k->phys1 = PTEGLOBL | PTECACHABILITY;
193 }
194
195 putktlb(k);
196 unlock(&kmaplock);
197
198 splx(s);
199 return k;
200 }
201
202 void
kunmap(KMap * k)203 kunmap(KMap *k)
204 {
205 int s;
206
207 s = splhi();
208 if(decref(k) == 0) {
209 k->virt = 0;
210 k->phys0 = 0;
211 k->phys1 = 0;
212 k->pg = 0;
213
214 lock(&kmaplock);
215 k->next = kmapfree;
216 kmapfree = k;
217 //nfree();
218 unlock(&kmaplock);
219 }
220 splx(s);
221 }
222
223 void
kfault(Ureg * ur)224 kfault(Ureg *ur) /* called from trap() */
225 {
226 ulong index, addr;
227 KMap *k, *f;
228
229 addr = ur->badvaddr;
230 index = (addr & ~KSEGM) >> KMAPSHIFT;
231 if(index >= KPTESIZE)
232 panic("kmapfault: %#lux", addr);
233
234 k = &kpte[index];
235 if(k->virt == 0)
236 panic("kmapfault: unmapped %#lux", addr);
237
238 for(f = m->kactive; f; f = f->konmach[m->machno])
239 if(f == k)
240 break;
241 if(f == 0) {
242 incref(k);
243 k->konmach[m->machno] = m->kactive;
244 m->kactive = k;
245 }
246 putktlb(k);
247 }
248
249 void
kmapinval(void)250 kmapinval(void)
251 {
252 int mno, i, curpid;
253 KMap *k, *next;
254 uchar *ktlbx;
255
256 if(m->machno < nelem(ktime))
257 ktime[m->machno] = MACHP(0)->ticks;
258 if(m->kactive == 0)
259 return;
260
261 curpid = PTEPID(TLBPID(tlbvirt()));
262 ktlbx = m->ktlbx;
263 for(i = 0; i < NTLB; i++, ktlbx++){
264 if(*ktlbx == 0)
265 continue;
266 TLBINVAL(i, curpid);
267 *ktlbx = 0;
268 }
269
270 mno = m->machno;
271 for(k = m->kactive; k; k = next) {
272 next = k->konmach[mno];
273 kunmap(k);
274 }
275
276 m->kactive = 0;
277 m->ktlbnext = TLBOFF;
278 }
279
280 struct
281 {
282 ulong va;
283 ulong pl;
284 ulong ph;
285 } wired[NWTLB+1]; /* +1 to avoid zero size if NWTLB==0 */
286 // = {
287 // PCIMEM,
288 // (PCIMEM>>6) | PTEUNCACHED|PTEGLOBL|PTEWRITE|PTEVALID,
289 // ((PCIMEM>>6) | PTEUNCACHED|PTEGLOBL|PTEWRITE|PTEVALID)+(1<<(PGSHIFT-6)),
290 //};
291
292 /*
293 * allocate a virtual address corresponding to physical addr and map them.
294 * run on cpu 0.
295 */
296 ulong
wiredpte(vlong addr)297 wiredpte(vlong addr)
298 {
299 int i;
300 ulong va;
301
302 for(i = 0; i < NWTLB; i++)
303 if(wired[i].va == 0)
304 break;
305 if(i >= NWTLB)
306 panic("wiredpte: not enough wired TLB entries");
307
308 va = WIREDADDR + i*256*MB;
309 wired[i].va = va;
310 wired[i].pl = (addr >> 6) | PTEUNCACHED|PTEGLOBL|PTEWRITE|PTEVALID;
311 wired[i].ph = wired[i].pl + (1<<(PGSHIFT-6));
312
313 puttlbx(i+WTLBOFF, va, wired[i].pl, wired[i].ph, PGSZ256M);
314 return va;
315 }
316
317 void
machwire(void)318 machwire(void)
319 {
320 int i;
321
322 if(m->machno == 0)
323 return;
324 for(i = 0; i < NWTLB; i++)
325 if(wired[i].va)
326 puttlbx(i+WTLBOFF, wired[i].va, wired[i].pl,
327 wired[i].ph, PGSZ256M);
328 }
329
330 void
mmuswitch(Proc * p)331 mmuswitch(Proc *p)
332 {
333 int tp;
334 static char lasttext[32];
335
336 if(Debugswitch && !p->kp){
337 if(strncmp(lasttext, p->text, sizeof lasttext) != 0)
338 iprint("[%s]", p->text);
339 strncpy(lasttext, p->text, sizeof lasttext);
340 }
341
342 if(p->newtlb) {
343 memset(p->pidonmach, 0, sizeof p->pidonmach);
344 p->newtlb = 0;
345 }
346 tp = p->pidonmach[m->machno];
347 if(tp == 0)
348 tp = newtlbpid(p);
349 puttlbx(0, KZERO|PTEPID(tp), 0, 0, PGSZ);
350 }
351
352 void
mmurelease(Proc * p)353 mmurelease(Proc *p)
354 {
355 memset(p->pidonmach, 0, sizeof p->pidonmach);
356 }
357
358 /*
359 * Process must be splhi
360 */
361 int
newtlbpid(Proc * p)362 newtlbpid(Proc *p)
363 {
364 int i, s;
365 Proc **h;
366
367 i = m->lastpid;
368 h = m->pidproc;
369 for(s = 0; s < NTLBPID; s++) {
370 i++;
371 if(i >= NTLBPID)
372 i = 1;
373 if(h[i] == 0)
374 break;
375 }
376
377 if(h[i])
378 purgetlb(i);
379 if(h[i] != 0)
380 panic("newtlb");
381
382 m->pidproc[i] = p;
383 p->pidonmach[m->machno] = i;
384 m->lastpid = i;
385
386 return i;
387 }
388
389 void
putmmu(ulong tlbvirt,ulong tlbphys,Page * pg)390 putmmu(ulong tlbvirt, ulong tlbphys, Page *pg)
391 {
392 short tp;
393 char *ctl;
394 Softtlb *entry;
395 int s;
396
397 s = splhi();
398 tp = up->pidonmach[m->machno];
399 if(tp == 0)
400 tp = newtlbpid(up);
401
402 tlbvirt |= PTEPID(tp);
403 if((tlbphys & PTEALGMASK) != PTEUNCACHED) {
404 tlbphys &= ~PTEALGMASK;
405 tlbphys |= PTECACHABILITY;
406 }
407
408 entry = putstlb(tlbvirt, tlbphys);
409 puttlb(entry->virt, entry->phys0, entry->phys1);
410
411 ctl = &pg->cachectl[m->machno];
412 switch(*ctl) {
413 case PG_TXTFLUSH:
414 icflush((void*)pg->va, BY2PG);
415 *ctl = PG_NOFLUSH;
416 break;
417 case PG_DATFLUSH:
418 dcflush((void*)pg->va, BY2PG);
419 *ctl = PG_NOFLUSH;
420 break;
421 case PG_NEWCOL:
422 cleancache(); /* Too expensive */
423 *ctl = PG_NOFLUSH;
424 break;
425 }
426 splx(s);
427 }
428
429 void
purgetlb(int pid)430 purgetlb(int pid)
431 {
432 int i, mno;
433 Proc *sp, **pidproc;
434 Softtlb *entry, *etab;
435
436 m->tlbpurge++;
437
438 /*
439 * find all pid entries that are no longer used by processes
440 */
441 mno = m->machno;
442 pidproc = m->pidproc;
443 for(i=1; i<NTLBPID; i++) {
444 sp = pidproc[i];
445 if(sp && sp->pidonmach[mno] != i)
446 pidproc[i] = 0;
447 }
448
449 /*
450 * shoot down the one we want
451 */
452 sp = pidproc[pid];
453 if(sp != 0)
454 sp->pidonmach[mno] = 0;
455 pidproc[pid] = 0;
456
457 /*
458 * clean out all dead pids from the stlb;
459 */
460 entry = m->stb;
461 for(etab = &entry[STLBSIZE]; entry < etab; entry++)
462 if(pidproc[TLBPID(entry->virt)] == 0)
463 entry->virt = 0;
464
465 /*
466 * clean up the hardware
467 */
468 for(i=tlbroff; i<NTLB; i++)
469 if(pidproc[TLBPID(gettlbvirt(i))] == 0)
470 TLBINVAL(i, pid);
471 }
472
473 void
flushmmu(void)474 flushmmu(void)
475 {
476 int s;
477
478 s = splhi();
479 up->newtlb = 1;
480 mmuswitch(up);
481 splx(s);
482 }
483
484 /* tlbvirt also has TLBPID() in its low byte as the asid */
485 Softtlb*
putstlb(ulong tlbvirt,ulong tlbphys)486 putstlb(ulong tlbvirt, ulong tlbphys)
487 {
488 int odd;
489 Softtlb *entry;
490
491 /* identical calculation in l.s/utlbmiss */
492 entry = &m->stb[stlbhash(tlbvirt)];
493 odd = tlbvirt & BY2PG; /* even/odd bit */
494 tlbvirt &= ~BY2PG; /* zero even/odd bit */
495 if(entry->virt != tlbvirt) { /* not my entry? overwrite it */
496 if(entry->virt != 0) {
497 m->hashcoll++;
498 if (Debughash)
499 iprint("putstlb: hash collision: %#lx old virt "
500 "%#lux new virt %#lux page %#lux\n",
501 entry - m->stb, entry->virt, tlbvirt,
502 tlbvirt >> (PGSHIFT+1));
503 }
504 entry->virt = tlbvirt;
505 entry->phys0 = 0;
506 entry->phys1 = 0;
507 }
508
509 if(odd)
510 entry->phys1 = tlbphys;
511 else
512 entry->phys0 = tlbphys;
513
514 if(entry->phys0 == 0 && entry->phys1 == 0)
515 entry->virt = 0;
516
517 return entry;
518 }
519
520 void
checkmmu(ulong,ulong)521 checkmmu(ulong, ulong)
522 {
523 }
524
525 void
countpagerefs(ulong *,int)526 countpagerefs(ulong*, int)
527 {
528 }
529
530 /*
531 * Return the number of bytes that can be accessed via KADDR(pa).
532 * If pa is not a valid argument to KADDR, return 0.
533 */
534 ulong
cankaddr(ulong pa)535 cankaddr(ulong pa)
536 {
537 if(pa >= KZERO || pa >= MEMSIZE)
538 return 0;
539 return MEMSIZE - pa;
540 }
541