1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6
7 #include "arm.h"
8
9 static KMap* kmapp(ulong pa);
10
11 #define L1X(va) FEXT((va), 20, 12)
12 #define L2X(va) FEXT((va), 12, 8)
13 #define L2AP(ap) l2ap(ap)
14 #define L1ptedramattrs soc.l1ptedramattrs
15 #define L2ptedramattrs soc.l2ptedramattrs
16
17 enum {
18 L1lo = UZERO/MiB, /* L1X(UZERO)? */
19 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
20 L2size = 256*sizeof(PTE),
21 KMAPADDR = 0xFFF00000,
22 };
23
24 /*
25 * Set up initial PTEs for cpu0 (called with mmu off)
26 */
27 void
mmuinit(void * a)28 mmuinit(void *a)
29 {
30 PTE *l1, *l2;
31 uintptr pa, pe, va;
32
33 l1 = (PTE*)a;
34 l2 = (PTE*)PADDR(L2);
35
36 /*
37 * map ram between KZERO and VIRTIO
38 */
39 va = KZERO;
40 pe = VIRTPCI - KZERO;
41 if(pe > soc.dramsize)
42 pe = soc.dramsize;
43 for(pa = PHYSDRAM; pa < PHYSDRAM+pe; pa += MiB){
44 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1ptedramattrs;
45 va += MiB;
46 }
47
48 /*
49 * identity map first MB of ram so mmu can be enabled
50 */
51 l1[L1X(PHYSDRAM)] = PHYSDRAM|Dom0|L1AP(Krw)|Section|L1ptedramattrs;
52
53 /*
54 * map i/o registers
55 */
56 va = VIRTIO;
57 for(pa = soc.physio; pa < soc.physio+IOSIZE; pa += MiB){
58 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1noexec;
59 va += MiB;
60 }
61 pa = soc.armlocal;
62 if(pa)
63 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1noexec;
64 /*
65 * pi4 hack: ether and pcie are in segment 0xFD5xxxxx not 0xFE5xxxxx
66 * gisb is in segment 0xFC4xxxxx not FE4xxxxx
67 */
68 va = VIRTIO + 0x500000;
69 pa = soc.physio - 0x1000000 + 0x500000;
70 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1noexec;
71 va = VIRTIO + 0x400000;
72 pa = soc.physio - 0x2000000 + 0x400000;
73 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1noexec;
74
75 /*
76 * double map exception vectors near top of virtual memory
77 */
78 va = HVECTORS;
79 l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
80 l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small|L2ptedramattrs;
81 }
82
83 void
mmuinit1()84 mmuinit1()
85 {
86 PTE *l1, *l2;
87 uintptr va;
88
89 l1 = m->mmul1;
90
91 /*
92 * undo identity map of first MB of ram
93 */
94 l1[L1X(PHYSDRAM)] = 0;
95 cachedwbtlb(&l1[L1X(PHYSDRAM)], sizeof(PTE));
96 mmuinvalidateaddr(PHYSDRAM);
97
98 /*
99 * make a local mapping for highest MB of virtual space
100 * containing kmap area and exception vectors
101 */
102 if(m->machno == 0)
103 m->kmapl2 = (PTE*)L2;
104 else{
105 va = HVECTORS;
106 m->kmapl2 = l2 = mallocalign(L2size, L2size, 0, 0);
107 l1[L1X(va)] = PADDR(l2)|Dom0|Coarse;
108 l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small|L2ptedramattrs;
109 cachedwbtlb(&l1[L1X(va)], sizeof(PTE));
110 mmuinvalidateaddr(va);
111 }
112 }
113
114 static void
mmul2empty(Proc * proc,int clear)115 mmul2empty(Proc* proc, int clear)
116 {
117 PTE *l1;
118 Page **l2, *page;
119 KMap *k;
120
121 l1 = m->mmul1;
122 l2 = &proc->mmul2;
123 for(page = *l2; page != nil; page = page->next){
124 if(clear){
125 k = kmap(page);
126 memset((void*)VA(k), 0, L2size);
127 kunmap(k);
128 }
129 l1[page->daddr] = Fault;
130 l2 = &page->next;
131 }
132 coherence();
133 *l2 = proc->mmul2cache;
134 proc->mmul2cache = proc->mmul2;
135 proc->mmul2 = nil;
136 }
137
138 static void
mmul1empty(void)139 mmul1empty(void)
140 {
141 PTE *l1;
142
143 /* clean out any user mappings still in l1 */
144 if(m->mmul1lo > 0){
145 if(m->mmul1lo == 1)
146 m->mmul1[L1lo] = Fault;
147 else
148 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
149 m->mmul1lo = 0;
150 }
151 if(m->mmul1hi > 0){
152 l1 = &m->mmul1[L1hi - m->mmul1hi];
153 if(m->mmul1hi == 1)
154 *l1 = Fault;
155 else
156 memset(l1, 0, m->mmul1hi*sizeof(PTE));
157 m->mmul1hi = 0;
158 }
159 if(m->kmapl2 != nil)
160 memset(m->kmapl2, 0, NKMAPS*sizeof(PTE));
161 }
162
163 void
mmuswitch(Proc * proc)164 mmuswitch(Proc* proc)
165 {
166 int x;
167 PTE *l1;
168 Page *page;
169
170 if(proc != nil && proc->newtlb){
171 mmul2empty(proc, 1);
172 proc->newtlb = 0;
173 }
174
175 mmul1empty();
176
177 /* move in new map */
178 l1 = m->mmul1;
179 if(proc != nil){
180 for(page = proc->mmul2; page != nil; page = page->next){
181 x = page->daddr;
182 l1[x] = PPN(page->pa)|Dom0|Coarse;
183 if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
184 if(x+1 - L1lo < L1hi - x)
185 m->mmul1lo = x+1 - L1lo;
186 else
187 m->mmul1hi = L1hi - x;
188 }
189 }
190 if(proc->nkmap)
191 memmove(m->kmapl2, proc->kmaptab, sizeof(proc->kmaptab));
192 }
193
194 /* make sure map is in memory */
195 /* could be smarter about how much? */
196 cachedwbtlb(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
197 if(proc != nil && proc->nkmap)
198 cachedwbtlb(m->kmapl2, sizeof(proc->kmaptab));
199
200 /* lose any possible stale tlb entries */
201 mmuinvalidate();
202 }
203
204 void
flushmmu(void)205 flushmmu(void)
206 {
207 int s;
208
209 s = splhi();
210 up->newtlb = 1;
211 mmuswitch(up);
212 splx(s);
213 }
214
215 void
mmurelease(Proc * proc)216 mmurelease(Proc* proc)
217 {
218 Page *page, *next;
219
220 mmul2empty(proc, 0);
221 for(page = proc->mmul2cache; page != nil; page = next){
222 next = page->next;
223 if(--page->ref)
224 panic("mmurelease: page->ref %d", page->ref);
225 pagechainhead(page);
226 }
227 if(proc->mmul2cache && palloc.r.p)
228 wakeup(&palloc.r);
229 proc->mmul2cache = nil;
230
231 mmul1empty();
232
233 /* make sure map is in memory */
234 /* could be smarter about how much? */
235 cachedwbtlb(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
236
237 /* lose any possible stale tlb entries */
238 mmuinvalidate();
239 }
240
241 void
putmmu(uintptr va,uintptr pa,Page * page)242 putmmu(uintptr va, uintptr pa, Page* page)
243 {
244 int x, s;
245 Page *pg;
246 PTE *l1, *pte;
247 KMap *k;
248
249 /*
250 * disable interrupts to prevent flushmmu (called from hzclock)
251 * from clearing page tables while we are setting them
252 */
253 s = splhi();
254 x = L1X(va);
255 l1 = &m->mmul1[x];
256 if(*l1 == Fault){
257 /* l2 pages only have 256 entries - wastes 3K per 1M of address space */
258 if(up->mmul2cache == nil){
259 spllo();
260 pg = newpage(0, 0, 0);
261 splhi();
262 /* if newpage slept, we might be on a different cpu */
263 l1 = &m->mmul1[x];
264 }else{
265 pg = up->mmul2cache;
266 up->mmul2cache = pg->next;
267 }
268 pg->daddr = x;
269 pg->next = up->mmul2;
270 up->mmul2 = pg;
271
272 /* force l2 page to memory */
273 k = kmap(pg);
274 memset((void*)VA(k), 0, L2size);
275 cachedwbtlb((void*)VA(k), L2size);
276 kunmap(k);
277
278 *l1 = PPN(pg->pa)|Dom0|Coarse;
279 cachedwbtlb(l1, sizeof *l1);
280
281 if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
282 if(x+1 - L1lo < L1hi - x)
283 m->mmul1lo = x+1 - L1lo;
284 else
285 m->mmul1hi = L1hi - x;
286 }
287 }
288 k = kmapp(PPN(*l1));
289 pte = (PTE*)VA(k);
290
291 /* protection bits are
292 * PTERONLY|PTEVALID;
293 * PTEWRITE|PTEVALID;
294 * PTEWRITE|PTEUNCACHED|PTEVALID;
295 */
296 x = Small;
297 if(!(pa & PTEUNCACHED))
298 x |= L2ptedramattrs;
299 if(pa & PTEWRITE)
300 x |= L2AP(Urw);
301 else
302 x |= L2AP(Uro);
303 pte[L2X(va)] = PPN(pa)|x;
304 cachedwbtlb(&pte[L2X(va)], sizeof(PTE));
305 kunmap(k);
306
307 /* clear out the current entry */
308 mmuinvalidateaddr(PPN(va));
309
310 if(page->cachectl[m->machno] == PG_TXTFLUSH){
311 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
312 if(cankaddr(page->pa))
313 cachedwbse((void*)(page->pa|KZERO), BY2PG);
314 cacheiinvse((void*)page->va, BY2PG);
315 page->cachectl[m->machno] = PG_NOFLUSH;
316 }
317 //checkmmu(va, PPN(pa));
318 splx(s);
319 }
320
321 void*
mmuuncache(void * v,usize size)322 mmuuncache(void* v, usize size)
323 {
324 int x;
325 PTE *pte;
326 uintptr va;
327
328 /*
329 * Simple helper for ucalloc().
330 * Uncache a Section, must already be
331 * valid in the MMU.
332 */
333 va = PTR2UINT(v);
334 assert(!(va & (1*MiB-1)) && size == 1*MiB);
335
336 x = L1X(va);
337 pte = &m->mmul1[x];
338 if((*pte & (Fine|Section|Coarse)) != Section)
339 return nil;
340 *pte &= ~L1ptedramattrs;
341 mmuinvalidateaddr(va);
342 cachedwbinvse(pte, 4);
343
344 return v;
345 }
346
347 /*
348 * Return the number of bytes that can be accessed via KADDR(pa).
349 * If pa is not a valid argument to KADDR, return 0.
350 */
351 uintptr
cankaddr(uintptr pa)352 cankaddr(uintptr pa)
353 {
354 if((pa - PHYSDRAM) < VIRTPCI-KZERO)
355 return PHYSDRAM + VIRTPCI-KZERO - pa;
356 return 0;
357 }
358
359 uintptr
mmukmap(uintptr va,uintptr pa,usize size)360 mmukmap(uintptr va, uintptr pa, usize size)
361 {
362 int o;
363 usize n;
364 PTE *pte, *pte0;
365
366 assert((va & (MiB-1)) == 0);
367 o = pa & (MiB-1);
368 pa -= o;
369 size += o;
370 pte = pte0 = &m->mmul1[L1X(va)];
371 for(n = 0; n < size; n += MiB)
372 if(*pte++ != Fault)
373 return 0;
374 pte = pte0;
375 for(n = 0; n < size; n += MiB){
376 *pte++ = (pa+n)|Dom0|L1AP(Krw)|Section|L1noexec;
377 mmuinvalidateaddr(va+n);
378 }
379 cachedwbtlb(pte0, (uintptr)pte - (uintptr)pte0);
380 return va + o;
381 }
382
383 uintptr
mmukmapx(uintptr va,uvlong pa,usize size)384 mmukmapx(uintptr va, uvlong pa, usize size)
385 {
386 int o;
387 usize n;
388 PTE ptex, *pte, *pte0;
389
390 assert((va & (16*MiB-1)) == 0);
391 assert(size <= 16*MiB);
392 o = (int)pa & (16*MiB-1);
393 pa -= o;
394 ptex = FEXT(pa,24,8)<<24 | FEXT(pa,32,4)<<20 | FEXT(pa,36,4)<<5;
395 pte = pte0 = &m->mmul1[L1X(va)];
396 for(n = 0; n < 16*MiB; n += MiB)
397 if(*pte++ != Fault)
398 return 0;
399 pte = pte0;
400 for(n = 0; n < 16*MiB; n += MiB)
401 *pte++ = ptex|L1AP(Krw)|Super|Section|L1noexec;
402 mmuinvalidateaddr(va);
403 cachedwbtlb(pte0, (uintptr)pte - (uintptr)pte0);
404 return va + o;
405 }
406
407 void
checkmmu(uintptr va,uintptr pa)408 checkmmu(uintptr va, uintptr pa)
409 {
410 int x;
411 PTE *l1, *pte;
412 KMap *k;
413
414 x = L1X(va);
415 l1 = &m->mmul1[x];
416 if(*l1 == Fault){
417 iprint("checkmmu cpu%d va=%lux l1 %p=%ux\n", m->machno, va, l1, *l1);
418 return;
419 }
420 k = kmapp(PPN(*l1));
421 pte = (PTE*)VA(k);
422 pte += L2X(va);
423 if(pa == ~0 || (pa != 0 && PPN(*pte) != pa))
424 iprint("checkmmu va=%lux pa=%lux l1 %p=%ux pte %p=%ux\n", va, pa, l1, *l1, pte, *pte);
425 kunmap(k);
426 }
427
428 static KMap*
kmapp(ulong pa)429 kmapp(ulong pa)
430 {
431 int s, i;
432 uintptr va;
433
434 if(cankaddr(pa))
435 return KADDR(pa);
436 if(up == nil)
437 panic("kmap without up %#p", getcallerpc(&pa));
438 s = splhi();
439 if(up->nkmap == NKMAPS)
440 panic("kmap overflow %#p", getcallerpc(&pa));
441 for(i = 0; i < NKMAPS; i++)
442 if(up->kmaptab[i] == 0)
443 break;
444 if(i == NKMAPS)
445 panic("can't happen");
446 up->nkmap++;
447 va = KMAPADDR + i*BY2PG;
448 up->kmaptab[i] = pa | L2AP(Krw)|Small|L2ptedramattrs;
449 m->kmapl2[i] = up->kmaptab[i];
450 cachedwbtlb(&m->kmapl2[i], sizeof(PTE));
451 mmuinvalidateaddr(va);
452 splx(s);
453 return (KMap*)va;
454 }
455
456 KMap*
kmap(Page * p)457 kmap(Page *p)
458 {
459 return kmapp(p->pa);
460 }
461
462 void
kunmap(KMap * k)463 kunmap(KMap *k)
464 {
465 int i;
466 uintptr va;
467
468 coherence();
469 va = (uintptr)k;
470 if(L1X(va) != L1X(KMAPADDR))
471 return;
472 /* wasteful: only needed for text pages aliased within data cache */
473 cachedwbse((void*)PPN(va), BY2PG);
474 i = L2X(va);
475 up->kmaptab[i] = 0;
476 m->kmapl2[i] = 0;
477 up->nkmap--;
478 cachedwbtlb(&m->kmapl2[i], sizeof(PTE));
479 mmuinvalidateaddr(va);
480 }
481