xref: /plan9/sys/src/9/teg2/mmu.c (revision 3de6a9c0b3d5cf34fc4090d0bf1930d83799a7fd)
1 /*
2  * arm arch v7 mmu
3  *
4  * we initially thought that we needn't flush the l2 cache since external
5  * devices needn't see page tables.  sadly, reality does not agree with
6  * the manuals.
7  *
8  * we use l1 and l2 cache ops here because they are empirically needed.
9  */
10 #include "u.h"
11 #include "../port/lib.h"
12 #include "mem.h"
13 #include "dat.h"
14 #include "fns.h"
15 
16 #include "arm.h"
17 
18 #define L1X(va)		FEXT((va), 20, 12)
19 #define L2X(va)		FEXT((va), 12, 8)
20 
21 enum {
22 	Debug		= 0,
23 
24 	L1lo		= UZERO/MiB,		/* L1X(UZERO)? */
25 #ifdef SMALL_ARM				/* well under 1GB of RAM? */
26 	L1hi		= (USTKTOP+MiB-1)/MiB,	/* L1X(USTKTOP+MiB-1)? */
27 #else
28 	/*
29 	 * on trimslice, top of 1GB ram can't be addressible, as high
30 	 * virtual memory (0xfff.....) contains high vectors.  We
31 	 * moved USTKTOP down another MB to utterly avoid KADDR(stack_base)
32 	 * mapping to high exception vectors.  USTKTOP is thus
33 	 * (0x40000000 - 64*KiB - MiB), which in kernel virtual space is
34 	 * (0x100000000ull - 64*KiB - MiB), but we need the whole user
35 	 * virtual address space to be unmapped in a new process.
36 	 */
37 	L1hi		= DRAMSIZE/MiB,
38 #endif
39 };
40 
41 #define ISHOLE(type)	((type) == 0)
42 
43 typedef struct Range Range;
44 struct Range {
45 	uintptr	startva;
46 	uvlong	endva;
47 	uintptr	startpa;
48 	uvlong	endpa;
49 	ulong	attrs;
50 	int	type;			/* L1 Section or Coarse? */
51 };
52 
53 static void mmul1empty(void);
54 
55 static char *
typename(int type)56 typename(int type)
57 {
58 	static char numb[20];
59 
60 	switch(type) {
61 	case Coarse:
62 		return "4KB-page table(s)";
63 	case Section:
64 		return "1MB section(s)";
65 	default:
66 		snprint(numb, sizeof numb, "type %d", type);
67 		return numb;
68 	}
69 }
70 
71 static void
prl1range(Range * rp)72 prl1range(Range *rp)
73 {
74 	int attrs;
75 
76 	iprint("l1 maps va (%#8.8lux-%#llux) -> ", rp->startva, rp->endva-1);
77 	if (rp->startva == rp->startpa)
78 		iprint("identity-mapped");
79 	else
80 		iprint("pa %#8.8lux", rp->startpa);
81 	iprint(" attrs ");
82 	attrs = rp->attrs;
83 	if (attrs) {
84 		if (attrs & Cached)
85 			iprint("C");
86 		if (attrs & Buffered)
87 			iprint("B");
88 		if (attrs & L1sharable)
89 			iprint("S1");
90 		if (attrs & L1wralloc)
91 			iprint("A1");
92 	} else
93 		iprint("\"\"");
94 	iprint(" %s\n", typename(rp->type));
95 	delay(100);
96 	rp->endva = 0;
97 }
98 
99 static void
l2dump(Range * rp,PTE pte)100 l2dump(Range *rp, PTE pte)
101 {
102 	USED(rp, pte);
103 }
104 
105 /* dump level 1 page table at virtual addr l1 */
106 void
mmudump(PTE * l1)107 mmudump(PTE *l1)
108 {
109 	int i, type, attrs;
110 	uintptr pa;
111 	uvlong va;
112 	PTE pte;
113 	Range rng;
114 
115 	/* dump first level of ptes */
116 	iprint("cpu%d l1 pt @ %#p:\n", m->machno, PADDR(l1));
117 	memset(&rng, 0, sizeof rng);
118 	for (va = i = 0; i < 4096; i++, va += MB) {
119 		pte = l1[i];
120 		type = pte & (Section|Coarse);
121 		if (type == Section)
122 			pa = pte & ~(MB - 1);
123 		else
124 			pa = pte & ~(KiB - 1);
125 		attrs = 0;
126 		if (!ISHOLE(type) && type == Section)
127 			attrs = pte & L1ptedramattrs;
128 
129 		/* if a range is open but this pte isn't part, close & open */
130 		if (!ISHOLE(type) &&
131 		    (pa != rng.endpa || type != rng.type || attrs != rng.attrs))
132 			if (rng.endva != 0) {	/* range is open? close it */
133 				prl1range(&rng);
134 				rng.type = 0;
135 				rng.attrs = 0;
136 			}
137 
138 		if (ISHOLE(type)) {		/* end of any open range? */
139 			if (rng.endva != 0)	/* range is open? close it */
140 				prl1range(&rng);
141 		} else {			/* continuation or new range */
142 			if (rng.endva == 0) {	/* no open range? start one */
143 				rng.startva = va;
144 				rng.startpa = pa;
145 				rng.type = type;
146 				rng.attrs = attrs;
147 			}
148 			rng.endva = va + MB;	/* continue the open range */
149 			rng.endpa = pa + MB;
150 		}
151 		if (type == Coarse)
152 			l2dump(&rng, pte);
153 	}
154 	if (rng.endva != 0)			/* close any open range */
155 		prl1range(&rng);
156 	iprint("\n");
157 }
158 
159 /*
160  * map `mbs' megabytes from virt to phys, uncached.
161  * device registers are sharable, except the private memory region:
162  * 2 4K pages, at 0x50040000 on the tegra2.
163  */
164 void
mmumap(uintptr virt,uintptr phys,int mbs)165 mmumap(uintptr virt, uintptr phys, int mbs)
166 {
167 	uint off;
168 	PTE *l1;
169 
170 	phys &= ~(MB-1);
171 	virt &= ~(MB-1);
172 	l1 = KADDR(ttbget());
173 	for (off = 0; mbs-- > 0; off += MB)
174 		l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) |
175 			Section | L1sharable;
176 	allcache->wbse(l1, L1SIZE);
177 	mmuinvalidate();
178 }
179 
180 /* identity map `mbs' megabytes from phys */
181 void
mmuidmap(uintptr phys,int mbs)182 mmuidmap(uintptr phys, int mbs)
183 {
184 	mmumap(phys, phys, mbs);
185 }
186 
187 PTE *
newl2page(void)188 newl2page(void)
189 {
190 	PTE *p;
191 
192 	if ((uintptr)l2pages >= HVECTORS - BY2PG)
193 		panic("l2pages");
194 	p = (PTE *)l2pages;
195 	l2pages += BY2PG;
196 	return p;
197 }
198 
199 /*
200  * replace an L1 section pte with an L2 page table and an L1 coarse pte,
201  * with the same attributes as the original pte and covering the same
202  * region of memory.
203  */
204 static void
expand(uintptr va)205 expand(uintptr va)
206 {
207 	int x;
208 	uintptr tva, pa;
209 	PTE oldpte;
210 	PTE *l1, *l2;
211 
212 	va &= ~(MB-1);
213 	x = L1X(va);
214 	l1 = &m->mmul1[x];
215 	oldpte = *l1;
216 	if (oldpte == Fault || (oldpte & (Coarse|Section)) != Section)
217 		return;			/* make idempotent */
218 
219 	/* wasteful - l2 pages only have 256 entries - fix */
220 	/*
221 	 * it may be very early, before any memory allocators are
222 	 * configured, so do a crude allocation from the top of memory.
223 	 */
224 	l2 = newl2page();
225 	memset(l2, 0, BY2PG);
226 
227 	/* write new L1 l2 entry back into L1 descriptors */
228 	*l1 = PPN(PADDR(l2))|Dom0|Coarse;
229 
230 	/* fill l2 page with l2 ptes with equiv attrs; copy AP bits */
231 	x = Small | oldpte & (Cached|Buffered) | (oldpte & (1<<15 | 3<<10)) >> 6;
232 	if (oldpte & L1sharable)
233 		x |= L2sharable;
234 	if (oldpte & L1wralloc)
235 		x |= L2wralloc;
236 	pa = oldpte & ~(MiB - 1);
237 	for(tva = va; tva < va + MiB; tva += BY2PG, pa += BY2PG)
238 		l2[L2X(tva)] = PPN(pa) | x;
239 
240 	/* force l2 page to memory */
241 	allcache->wbse(l2, BY2PG);
242 
243 	/* clear out the current entry */
244 	mmuinvalidateaddr(PPN(va));
245 
246 	allcache->wbinvse(l1, sizeof *l1);
247 	if ((*l1 & (Coarse|Section)) != Coarse)
248 		panic("explode %#p", va);
249 }
250 
251 /*
252  * cpu0's l1 page table has likely changed since we copied it in
253  * launchinit, notably to allocate uncached sections for ucalloc.
254  * so copy it again from cpu0's.
255  */
256 void
mmuninit(void)257 mmuninit(void)
258 {
259 	int s;
260 	PTE *l1, *newl1;
261 
262 	s = splhi();
263 	l1 = m->mmul1;
264 	newl1 = mallocalign(L1SIZE, L1SIZE, 0, 0);
265 	assert(newl1);
266 
267 	allcache->wbinvse((PTE *)L1, L1SIZE);	/* get cpu0's up-to-date copy */
268 	memmove(newl1, (PTE *)L1, L1SIZE);
269 	allcache->wbse(newl1, L1SIZE);
270 
271 	mmuinvalidate();
272 	coherence();
273 
274 	ttbput(PADDR(newl1));		/* switch */
275 	coherence();
276 	mmuinvalidate();
277 	coherence();
278 	m->mmul1 = newl1;
279 	coherence();
280 
281 	mmul1empty();
282 	coherence();
283 	mmuinvalidate();
284 	coherence();
285 
286 //	mmudump(m->mmul1);		/* DEBUG */
287 	splx(s);
288 	free(l1);
289 }
290 
291 /* l1 is base of my l1 descriptor table */
292 static PTE *
l2pteaddr(PTE * l1,uintptr va)293 l2pteaddr(PTE *l1, uintptr va)
294 {
295 	uintptr l2pa;
296 	PTE pte;
297 	PTE *l2;
298 
299 	expand(va);
300 	pte = l1[L1X(va)];
301 	if ((pte & (Coarse|Section)) != Coarse)
302 		panic("l2pteaddr l1 pte %#8.8ux @ %#p not Coarse",
303 			pte, &l1[L1X(va)]);
304 	l2pa = pte & ~(KiB - 1);
305 	l2 = (PTE *)KADDR(l2pa);
306 	return &l2[L2X(va)];
307 }
308 
309 void
mmuinit(void)310 mmuinit(void)
311 {
312 	ulong va;
313 	uintptr pa;
314 	PTE *l1, *l2;
315 
316 	if (m->machno != 0) {
317 		mmuninit();
318 		return;
319 	}
320 
321 	pa = ttbget();
322 	l1 = KADDR(pa);
323 
324 	/* identity map most of the io space */
325 	mmuidmap(PHYSIO, (PHYSIOEND - PHYSIO + MB - 1) / MB);
326 	/* move the rest to more convenient addresses */
327 	mmumap(VIRTNOR, PHYSNOR, 256);	/* 0x40000000 v -> 0xd0000000 p */
328 	mmumap(VIRTAHB, PHYSAHB, 256);	/* 0xb0000000 v -> 0xc0000000 p */
329 
330 	/* map high vectors to start of dram, but only 4K, not 1MB */
331 	pa -= MACHSIZE+BY2PG;		/* page tables must be page aligned */
332 	l2 = KADDR(pa);
333 	memset(l2, 0, 1024);
334 
335 	m->mmul1 = l1;		/* used by explode in l2pteaddr */
336 
337 	/* map private mem region (8K at soc.scu) without sharable bits */
338 	va = soc.scu;
339 	*l2pteaddr(l1, va) &= ~L2sharable;
340 	va += BY2PG;
341 	*l2pteaddr(l1, va) &= ~L2sharable;
342 
343 	/*
344 	 * below (and above!) the vectors in virtual space may be dram.
345 	 * populate the rest of l2 for the last MB.
346 	 */
347 	for (va = -MiB; va != 0; va += BY2PG)
348 		l2[L2X(va)] = PADDR(va) | L2AP(Krw) | Small | L2ptedramattrs;
349 	/* map high vectors page to 0; must match attributes of KZERO->0 map */
350 	l2[L2X(HVECTORS)] = PHYSDRAM | L2AP(Krw) | Small | L2ptedramattrs;
351 	coherence();
352 	l1[L1X(HVECTORS)] = pa | Dom0 | Coarse;	/* l1 -> ttb-machsize-4k */
353 
354 	/* make kernel text unwritable */
355 	for(va = KTZERO; va < (ulong)etext; va += BY2PG)
356 		*l2pteaddr(l1, va) |= L2apro;
357 
358 	allcache->wbinv();
359 	mmuinvalidate();
360 
361 	m->mmul1 = l1;
362 	coherence();
363 	mmul1empty();
364 	coherence();
365 //	mmudump(l1);			/* DEBUG */
366 }
367 
368 static void
mmul2empty(Proc * proc,int clear)369 mmul2empty(Proc* proc, int clear)
370 {
371 	PTE *l1;
372 	Page **l2, *page;
373 
374 	l1 = m->mmul1;
375 	l2 = &proc->mmul2;
376 	for(page = *l2; page != nil; page = page->next){
377 		if(clear)
378 			memset(UINT2PTR(page->va), 0, BY2PG);
379 		l1[page->daddr] = Fault;
380 		allcache->wbse(l1, sizeof *l1);
381 		l2 = &page->next;
382 	}
383 	*l2 = proc->mmul2cache;
384 	proc->mmul2cache = proc->mmul2;
385 	proc->mmul2 = nil;
386 }
387 
388 static void
mmul1empty(void)389 mmul1empty(void)
390 {
391 #ifdef notdef
392 /* there's a bug in here */
393 	PTE *l1;
394 
395 	/* clean out any user mappings still in l1 */
396 	if(m->mmul1lo > L1lo){
397 		if(m->mmul1lo == 1)
398 			m->mmul1[L1lo] = Fault;
399 		else
400 			memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
401 		m->mmul1lo = L1lo;
402 	}
403 	if(m->mmul1hi < L1hi){
404 		l1 = &m->mmul1[m->mmul1hi];
405 		if((L1hi - m->mmul1hi) == 1)
406 			*l1 = Fault;
407 		else
408 			memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
409 		m->mmul1hi = L1hi;
410 	}
411 #else
412 	memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
413 #endif /* notdef */
414 	allcache->wbse(&m->mmul1[L1lo], (L1hi - L1lo)*sizeof(PTE));
415 }
416 
417 void
mmuswitch(Proc * proc)418 mmuswitch(Proc* proc)
419 {
420 	int x;
421 	PTE *l1;
422 	Page *page;
423 
424 	/* do kprocs get here and if so, do they need to? */
425 	if(m->mmupid == proc->pid && !proc->newtlb)
426 		return;
427 	m->mmupid = proc->pid;
428 
429 	/* write back dirty and invalidate caches */
430 	l1cache->wbinv();
431 
432 	if(proc->newtlb){
433 		mmul2empty(proc, 1);
434 		proc->newtlb = 0;
435 	}
436 
437 	mmul1empty();
438 
439 	/* move in new map */
440 	l1 = m->mmul1;
441 	for(page = proc->mmul2; page != nil; page = page->next){
442 		x = page->daddr;
443 		l1[x] = PPN(page->pa)|Dom0|Coarse;
444 		/* know here that L1lo < x < L1hi */
445 		if(x+1 - m->mmul1lo < m->mmul1hi - x)
446 			m->mmul1lo = x+1;
447 		else
448 			m->mmul1hi = x;
449 	}
450 
451 	/* make sure map is in memory */
452 	/* could be smarter about how much? */
453 	allcache->wbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
454 
455 	/* lose any possible stale tlb entries */
456 	mmuinvalidate();
457 
458 	//print("mmuswitch l1lo %d l1hi %d %d\n",
459 	//	m->mmul1lo, m->mmul1hi, proc->kp);
460 
461 	wakewfi();		/* in case there's another runnable proc */
462 }
463 
464 void
flushmmu(void)465 flushmmu(void)
466 {
467 	int s;
468 
469 	s = splhi();
470 	up->newtlb = 1;
471 	mmuswitch(up);
472 	splx(s);
473 }
474 
475 void
mmurelease(Proc * proc)476 mmurelease(Proc* proc)
477 {
478 	Page *page, *next;
479 
480 	/* write back dirty and invalidate caches */
481 	l1cache->wbinv();
482 
483 	mmul2empty(proc, 0);
484 	for(page = proc->mmul2cache; page != nil; page = next){
485 		next = page->next;
486 		if(--page->ref)
487 			panic("mmurelease: page->ref %d", page->ref);
488 		pagechainhead(page);
489 	}
490 	if(proc->mmul2cache && palloc.r.p)
491 		wakeup(&palloc.r);
492 	proc->mmul2cache = nil;
493 
494 	mmul1empty();
495 
496 	/* make sure map is in memory */
497 	/* could be smarter about how much? */
498 	allcache->wbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
499 
500 	/* lose any possible stale tlb entries */
501 	mmuinvalidate();
502 }
503 
504 void
putmmu(uintptr va,uintptr pa,Page * page)505 putmmu(uintptr va, uintptr pa, Page* page)
506 {
507 	int x;
508 	Page *pg;
509 	PTE *l1, *pte;
510 
511 	x = L1X(va);
512 	l1 = &m->mmul1[x];
513 	if (Debug) {
514 		iprint("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
515 		iprint("mmul1 %#p l1 %#p *l1 %#ux x %d pid %ld\n",
516 			m->mmul1, l1, *l1, x, up->pid);
517 		if (*l1)
518 			panic("putmmu: old l1 pte non-zero; stuck?");
519 	}
520 	if(*l1 == Fault){
521 		/* wasteful - l2 pages only have 256 entries - fix */
522 		if(up->mmul2cache == nil){
523 			/* auxpg since we don't need much? memset if so */
524 			pg = newpage(1, 0, 0);
525 			pg->va = VA(kmap(pg));
526 		}
527 		else{
528 			pg = up->mmul2cache;
529 			up->mmul2cache = pg->next;
530 			memset(UINT2PTR(pg->va), 0, BY2PG);
531 		}
532 		pg->daddr = x;
533 		pg->next = up->mmul2;
534 		up->mmul2 = pg;
535 
536 		/* force l2 page to memory */
537 		allcache->wbse((void *)pg->va, BY2PG);
538 
539 		*l1 = PPN(pg->pa)|Dom0|Coarse;
540 		allcache->wbse(l1, sizeof *l1);
541 
542 		if (Debug)
543 			iprint("l1 %#p *l1 %#ux x %d pid %ld\n", l1, *l1, x, up->pid);
544 
545 		if(x >= m->mmul1lo && x < m->mmul1hi){
546 			if(x+1 - m->mmul1lo < m->mmul1hi - x)
547 				m->mmul1lo = x+1;
548 			else
549 				m->mmul1hi = x;
550 		}
551 	}
552 	pte = UINT2PTR(KADDR(PPN(*l1)));
553 	if (Debug) {
554 		iprint("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
555 		if (*(pte+L2X(va)))
556 			panic("putmmu: old l2 pte non-zero; stuck?");
557 	}
558 
559 	/* protection bits are
560 	 *	PTERONLY|PTEVALID;
561 	 *	PTEWRITE|PTEVALID;
562 	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
563 	 */
564 	x = Small;
565 	if(!(pa & PTEUNCACHED))
566 		x |= L2ptedramattrs;
567 	if(pa & PTEWRITE)
568 		x |= L2AP(Urw);
569 	else
570 		x |= L2AP(Uro);
571 	pte[L2X(va)] = PPN(pa)|x;
572 	allcache->wbse(&pte[L2X(va)], sizeof pte[0]);
573 
574 	/* clear out the current entry */
575 	mmuinvalidateaddr(PPN(va));
576 
577 	/*  write back dirty entries - we need this because the pio() in
578 	 *  fault.c is writing via a different virt addr and won't clean
579 	 *  its changes out of the dcache.  Page coloring doesn't work
580 	 *  on this mmu because the virtual cache is set associative
581 	 *  rather than direct mapped.
582 	 */
583 	l1cache->wb();
584 
585 	if(page->cachectl[0] == PG_TXTFLUSH){
586 		/* pio() sets PG_TXTFLUSH whenever a text pg has been written */
587 		cacheiinv();
588 		page->cachectl[0] = PG_NOFLUSH;
589 	}
590 	if (Debug)
591 		iprint("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
592 }
593 
594 void*
mmuuncache(void * v,usize size)595 mmuuncache(void* v, usize size)
596 {
597 	int x;
598 	PTE *pte;
599 	uintptr va;
600 
601 	/*
602 	 * Simple helper for ucalloc().
603 	 * Uncache a Section, must already be
604 	 * valid in the MMU.
605 	 */
606 	va = PTR2UINT(v);
607 	assert(!(va & (1*MiB-1)) && size == 1*MiB);
608 
609 	x = L1X(va);
610 	pte = &m->mmul1[x];
611 	if((*pte & (Section|Coarse)) != Section)
612 		return nil;
613 	*pte &= ~L1ptedramattrs;
614 	*pte |= L1sharable;
615 	mmuinvalidateaddr(va);
616 	allcache->wbse(pte, 4);
617 
618 	return v;
619 }
620 
621 uintptr
mmukmap(uintptr va,uintptr pa,usize size)622 mmukmap(uintptr va, uintptr pa, usize size)
623 {
624 	int x;
625 	PTE *pte;
626 
627 	/*
628 	 * Stub.
629 	 */
630 	assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
631 
632 	x = L1X(va);
633 	pte = &m->mmul1[x];
634 	if(*pte != Fault)
635 		return 0;
636 	*pte = pa|Dom0|L1AP(Krw)|Section;
637 	mmuinvalidateaddr(va);
638 	allcache->wbse(pte, 4);
639 
640 	return va;
641 }
642 
643 uintptr
mmukunmap(uintptr va,uintptr pa,usize size)644 mmukunmap(uintptr va, uintptr pa, usize size)
645 {
646 	int x;
647 	PTE *pte;
648 
649 	/*
650 	 * Stub.
651 	 */
652 	assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
653 
654 	x = L1X(va);
655 	pte = &m->mmul1[x];
656 	if(*pte != (pa|Dom0|L1AP(Krw)|Section))
657 		return 0;
658 	*pte = Fault;
659 	mmuinvalidateaddr(va);
660 	allcache->wbse(pte, 4);
661 
662 	return va;
663 }
664 
665 /*
666  * Return the number of bytes that can be accessed via KADDR(pa).
667  * If pa is not a valid argument to KADDR, return 0.
668  */
669 uintptr
cankaddr(uintptr pa)670 cankaddr(uintptr pa)
671 {
672 	if((PHYSDRAM == 0 || pa >= PHYSDRAM) && pa < PHYSDRAM+memsize)
673 		return PHYSDRAM+memsize - pa;
674 	return 0;
675 }
676 
677 /* from 386 */
678 void*
vmap(uintptr pa,usize size)679 vmap(uintptr pa, usize size)
680 {
681 	uintptr pae, va;
682 	usize o, osize;
683 
684 	/*
685 	 * XXX - replace with new vm stuff.
686 	 * Crock after crock - the first 4MB is mapped with 2MB pages
687 	 * so catch that and return good values because the current mmukmap
688 	 * will fail.
689 	 */
690 	if(pa+size < 4*MiB)
691 		return UINT2PTR(kseg0|pa);
692 
693 	osize = size;
694 	o = pa & (BY2PG-1);
695 	pa -= o;
696 	size += o;
697 	size = ROUNDUP(size, BY2PG);
698 
699 	va = kseg0|pa;
700 	pae = mmukmap(va, pa, size);
701 	if(pae == 0 || pae-size != pa)
702 		panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
703 			pa+o, osize, getcallerpc(&pa), pae);
704 
705 	return UINT2PTR(va+o);
706 }
707 
708 /* from 386 */
709 void
vunmap(void * v,usize size)710 vunmap(void* v, usize size)
711 {
712 	/*
713 	 * XXX - replace with new vm stuff.
714 	 * Can't do this until do real vmap for all space that
715 	 * might be used, e.g. stuff below 1MB which is currently
716 	 * mapped automagically at boot but that isn't used (or
717 	 * at least shouldn't be used) by the kernel.
718 	upafree(PADDR(v), size);
719 	 */
720 	USED(v, size);
721 }
722 
723 /*
724  * Notes.
725  * Everything is in domain 0;
726  * domain 0 access bits in the DAC register are set
727  * to Client, which means access is controlled by the
728  * permission values set in the PTE.
729  *
730  * L1 access control for the kernel is set to 1 (RW,
731  * no user mode access);
732  * L2 access control for the kernel is set to 1 (ditto)
733  * for all 4 AP sets;
734  * L1 user mode access is never set;
735  * L2 access control for user mode is set to either
736  * 2 (RO) or 3 (RW) depending on whether text or data,
737  * for all 4 AP sets.
738  * (To get kernel RO set AP to 0 and S bit in control
739  * register c1).
740  * Coarse L1 page-tables are used. They have 256 entries
741  * and so consume 1024 bytes per table.
742  * Small L2 page-tables are used. They have 1024 entries
743  * and so consume 4096 bytes per table.
744  *
745  * 4KiB. That's the size of 1) a page, 2) the
746  * size allocated for an L2 page-table page (note only 1KiB
747  * is needed per L2 page - to be dealt with later) and
748  * 3) the size of the area in L1 needed to hold the PTEs
749  * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).
750  */
751