xref: /plan9/sys/src/9/bcm/mmu.c (revision 12009bff671a91993ae58f16dab833e809f4a6f3)
1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 
7 #include "arm.h"
8 
9 #define L1X(va)		FEXT((va), 20, 12)
10 #define L2X(va)		FEXT((va), 12, 8)
11 
12 enum {
13 	L1lo		= UZERO/MiB,		/* L1X(UZERO)? */
14 	L1hi		= (USTKTOP+MiB-1)/MiB,	/* L1X(USTKTOP+MiB-1)? */
15 };
16 
17 void
mmuinit(void)18 mmuinit(void)
19 {
20 	PTE *l1, *l2;
21 	uintptr pa, va;
22 
23 	l1 = (PTE*)PADDR(L1);
24 	l2 = (PTE*)PADDR(L2);
25 
26 	/*
27 	 * map all of ram at KZERO
28 	 */
29 	va = KZERO;
30 	for(pa = PHYSDRAM; pa < PHYSDRAM+DRAMSIZE; pa += MiB){
31 		l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|Cached|Buffered;
32 		va += MiB;
33 	}
34 
35 	/*
36 	 * identity map first MB of ram so mmu can be enabled
37 	 */
38 	l1[L1X(PHYSDRAM)] = PHYSDRAM|Dom0|L1AP(Krw)|Section|Cached|Buffered;
39 
40 	/*
41 	 * map i/o registers
42 	 */
43 	va = VIRTIO;
44 	for(pa = PHYSIO; pa < PHYSIO+IOSIZE; pa += MiB){
45 		l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
46 		va += MiB;
47 	}
48 
49 	/*
50 	 * double map exception vectors at top of virtual memory
51 	 */
52 	va = HVECTORS;
53 	l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
54 	l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small;
55 }
56 
57 void
mmuinit1(void)58 mmuinit1(void)
59 {
60 	PTE *l1;
61 
62 	l1 = (PTE*)L1;
63 	m->mmul1 = l1;
64 
65 	/*
66 	 * undo identity map of first MB of ram
67 	 */
68 	l1[L1X(PHYSDRAM)] = 0;
69 	cachedwbse(&l1[L1X(PHYSDRAM)], sizeof(PTE));
70 	mmuinvalidate();
71 }
72 
73 static void
mmul2empty(Proc * proc,int clear)74 mmul2empty(Proc* proc, int clear)
75 {
76 	PTE *l1;
77 	Page **l2, *page;
78 
79 	l1 = m->mmul1;
80 	l2 = &proc->mmul2;
81 	for(page = *l2; page != nil; page = page->next){
82 		if(clear)
83 			memset(UINT2PTR(page->va), 0, BY2PG);
84 		l1[page->daddr] = Fault;
85 		l2 = &page->next;
86 	}
87 	*l2 = proc->mmul2cache;
88 	proc->mmul2cache = proc->mmul2;
89 	proc->mmul2 = nil;
90 }
91 
92 static void
mmul1empty(void)93 mmul1empty(void)
94 {
95 #ifdef notdef
96 /* there's a bug in here */
97 	PTE *l1;
98 
99 	/* clean out any user mappings still in l1 */
100 	if(m->mmul1lo > L1lo){
101 		if(m->mmul1lo == 1)
102 			m->mmul1[L1lo] = Fault;
103 		else
104 			memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
105 		m->mmul1lo = L1lo;
106 	}
107 	if(m->mmul1hi < L1hi){
108 		l1 = &m->mmul1[m->mmul1hi];
109 		if((L1hi - m->mmul1hi) == 1)
110 			*l1 = Fault;
111 		else
112 			memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
113 		m->mmul1hi = L1hi;
114 	}
115 #else
116 	memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
117 #endif /* notdef */
118 }
119 
120 void
mmuswitch(Proc * proc)121 mmuswitch(Proc* proc)
122 {
123 	int x;
124 	PTE *l1;
125 	Page *page;
126 
127 	/* do kprocs get here and if so, do they need to? */
128 	if(m->mmupid == proc->pid && !proc->newtlb)
129 		return;
130 	m->mmupid = proc->pid;
131 
132 	/* write back dirty and invalidate l1 caches */
133 	cacheuwbinv();
134 
135 	if(proc->newtlb){
136 		mmul2empty(proc, 1);
137 		proc->newtlb = 0;
138 	}
139 
140 	mmul1empty();
141 
142 	/* move in new map */
143 	l1 = m->mmul1;
144 	for(page = proc->mmul2; page != nil; page = page->next){
145 		x = page->daddr;
146 		l1[x] = PPN(page->pa)|Dom0|Coarse;
147 		/* know here that L1lo < x < L1hi */
148 		if(x+1 - m->mmul1lo < m->mmul1hi - x)
149 			m->mmul1lo = x+1;
150 		else
151 			m->mmul1hi = x;
152 	}
153 
154 	/* make sure map is in memory */
155 	/* could be smarter about how much? */
156 	cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
157 
158 	/* lose any possible stale tlb entries */
159 	mmuinvalidate();
160 }
161 
162 void
flushmmu(void)163 flushmmu(void)
164 {
165 	int s;
166 
167 	s = splhi();
168 	up->newtlb = 1;
169 	mmuswitch(up);
170 	splx(s);
171 }
172 
173 void
mmurelease(Proc * proc)174 mmurelease(Proc* proc)
175 {
176 	Page *page, *next;
177 
178 	/* write back dirty and invalidate l1 caches */
179 	cacheuwbinv();
180 
181 	mmul2empty(proc, 0);
182 	for(page = proc->mmul2cache; page != nil; page = next){
183 		next = page->next;
184 		if(--page->ref)
185 			panic("mmurelease: page->ref %d", page->ref);
186 		pagechainhead(page);
187 	}
188 	if(proc->mmul2cache && palloc.r.p)
189 		wakeup(&palloc.r);
190 	proc->mmul2cache = nil;
191 
192 	mmul1empty();
193 
194 	/* make sure map is in memory */
195 	/* could be smarter about how much? */
196 	cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
197 
198 	/* lose any possible stale tlb entries */
199 	mmuinvalidate();
200 }
201 
202 void
putmmu(uintptr va,uintptr pa,Page * page)203 putmmu(uintptr va, uintptr pa, Page* page)
204 {
205 	int x;
206 	Page *pg;
207 	PTE *l1, *pte;
208 
209 	x = L1X(va);
210 	l1 = &m->mmul1[x];
211 	if(*l1 == Fault){
212 		/* wasteful - l2 pages only have 256 entries - fix */
213 		if(up->mmul2cache == nil){
214 			/* auxpg since we don't need much? memset if so */
215 			pg = newpage(1, 0, 0);
216 			pg->va = VA(kmap(pg));
217 		}
218 		else{
219 			pg = up->mmul2cache;
220 			up->mmul2cache = pg->next;
221 			memset(UINT2PTR(pg->va), 0, BY2PG);
222 		}
223 		pg->daddr = x;
224 		pg->next = up->mmul2;
225 		up->mmul2 = pg;
226 
227 		/* force l2 page to memory */
228 		cachedwbse((void *)pg->va, BY2PG);
229 
230 		*l1 = PPN(pg->pa)|Dom0|Coarse;
231 		cachedwbse(l1, sizeof *l1);
232 
233 		if(x >= m->mmul1lo && x < m->mmul1hi){
234 			if(x+1 - m->mmul1lo < m->mmul1hi - x)
235 				m->mmul1lo = x+1;
236 			else
237 				m->mmul1hi = x;
238 		}
239 	}
240 	pte = UINT2PTR(KADDR(PPN(*l1)));
241 
242 	/* protection bits are
243 	 *	PTERONLY|PTEVALID;
244 	 *	PTEWRITE|PTEVALID;
245 	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
246 	 */
247 	x = Small;
248 	if(!(pa & PTEUNCACHED))
249 		x |= Cached|Buffered;
250 	if(pa & PTEWRITE)
251 		x |= L2AP(Urw);
252 	else
253 		x |= L2AP(Uro);
254 	pte[L2X(va)] = PPN(pa)|x;
255 	cachedwbse(&pte[L2X(va)], sizeof pte[0]);
256 
257 	/* clear out the current entry */
258 	mmuinvalidateaddr(PPN(va));
259 
260 	/*  write back dirty entries - we need this because the pio() in
261 	 *  fault.c is writing via a different virt addr and won't clean
262 	 *  its changes out of the dcache.  Page coloring doesn't work
263 	 *  on this mmu because the virtual cache is set associative
264 	 *  rather than direct mapped.
265 	 */
266 	cachedwbinv();
267 	if(page->cachectl[0] == PG_TXTFLUSH){
268 		/* pio() sets PG_TXTFLUSH whenever a text pg has been written */
269 		cacheiinv();
270 		page->cachectl[0] = PG_NOFLUSH;
271 	}
272 	checkmmu(va, PPN(pa));
273 }
274 
275 /*
276  * Return the number of bytes that can be accessed via KADDR(pa).
277  * If pa is not a valid argument to KADDR, return 0.
278  */
279 uintptr
cankaddr(uintptr pa)280 cankaddr(uintptr pa)
281 {
282 	if(pa < PHYSDRAM + memsize)		/* assumes PHYSDRAM is 0 */
283 		return PHYSDRAM + memsize - pa;
284 	return 0;
285 }
286 
287 uintptr
mmukmap(uintptr va,uintptr pa,usize size)288 mmukmap(uintptr va, uintptr pa, usize size)
289 {
290 	int o;
291 	usize n;
292 	PTE *pte, *pte0;
293 
294 	assert((va & (MiB-1)) == 0);
295 	o = pa & (MiB-1);
296 	pa -= o;
297 	size += o;
298 	pte = pte0 = &m->mmul1[L1X(va)];
299 	for(n = 0; n < size; n += MiB)
300 		if(*pte++ != Fault)
301 			return 0;
302 	pte = pte0;
303 	for(n = 0; n < size; n += MiB){
304 		*pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
305 		mmuinvalidateaddr(va+n);
306 	}
307 	cachedwbse(pte0, pte - pte0);
308 	return va + o;
309 }
310 
311 
312 void
checkmmu(uintptr va,uintptr pa)313 checkmmu(uintptr va, uintptr pa)
314 {
315 	USED(va);
316 	USED(pa);
317 }
318 
319