1 /* $NetBSD: pmap_bootstrap.c,v 1.35 2021/07/24 21:31:32 andvar Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.35 2021/07/24 21:31:32 andvar Exp $");
40
41 #include <sys/param.h>
42 #include <uvm/uvm_extern.h>
43
44 #include <machine/cpu.h>
45 #include <machine/pte.h>
46 #include <machine/vmparam.h>
47
48 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE))
49
50 extern char *etext;
51 extern paddr_t avail_start, avail_end;
52
53 /*
54 * Special purpose kernel virtual addresses, used for mapping
55 * physical pages for a variety of temporary or permanent purposes:
56 *
57 * CADDR1, CADDR2: pmap zero/copy operations
58 * vmmap: /dev/mem, crash dumps, parity error checking
59 * msgbufaddr: kernel message buffer
60 */
61 void *CADDR1, *CADDR2;
62 char *vmmap;
63 void *msgbufaddr;
64
65 void pmap_bootstrap(paddr_t, paddr_t);
66
67 /*
68 * Bootstrap the VM system.
69 *
70 * Called with MMU off so we must relocate all global references by `firstpa'
71 * (don't call any functions here!) `nextpa' is the first available physical
72 * memory address. Returns an updated first PA reflecting the memory we
73 * have allocated. MMU is still off when we return.
74 *
75 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
76 * XXX a PIC compiler would make this much easier.
77 */
78 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)79 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
80 {
81 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
82 paddr_t lkptpa;
83 u_int nptpages, kstsize;
84 st_entry_t protoste, *ste, *este;
85 pt_entry_t protopte, *pte, *epte;
86 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
87
88 /*
89 * Calculate important physical addresses:
90 *
91 * lwp0upa lwp0 u-area UPAGES pages
92 *
93 * kstpa kernel segment table 1 page (!040)
94 * N pages (040)
95 *
96 * kptmpa kernel PT map 1 page
97 *
98 * lkptpa last kernel PT page 1 page
99 *
100 * kptpa statically allocated
101 * kernel PT pages Sysptsize+ pages
102 *
103 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE and
104 * EIOMAPSIZE are the number of PTEs, hence we need to round
105 * the total to a page boundary with IO maps at the end. ]
106 *
107 * The KVA corresponding to any of these PAs is:
108 * (PA - firstpa + KERNBASE).
109 */
110 lwp0upa = nextpa;
111 nextpa += USPACE;
112 if (RELOC(mmutype, int) == MMU_68040)
113 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
114 else
115 kstsize = 1;
116 kstpa = nextpa;
117 nextpa += kstsize * PAGE_SIZE;
118 kptmpa = nextpa;
119 nextpa += PAGE_SIZE;
120 lkptpa = nextpa;
121 nextpa += PAGE_SIZE;
122 kptpa = nextpa;
123 nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG);
124 nextpa += nptpages * PAGE_SIZE;
125
126 /*
127 * Initialize segment table and kernel page table map.
128 *
129 * On 68030s and earlier MMUs the two are identical except for
130 * the valid bits so both are initialized with essentially the
131 * same values. On the 68040, which has a mandatory 3-level
132 * structure, the segment table holds the level 1 table and part
133 * (or all) of the level 2 table and hence is considerably
134 * different. Here the first level consists of 128 descriptors
135 * (512 bytes) each mapping 32mb of address space. Each of these
136 * points to blocks of 128 second level descriptors (512 bytes)
137 * each mapping 256kb. Note that there may be additional "segment
138 * table" pages depending on how large MAXKL2SIZE is.
139 *
140 * Portions of the last two segment of KVA space (0xFF800000 -
141 * 0xFFFFFFFF) are mapped for a couple of purposes.
142 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
143 * for the kernel page tables.
144 *
145 * XXX: It looks this was copied from hp300 and not sure if
146 * XXX: last physical page mapping is really needed on this port.
147 * The very last page (0xFFFFF000) in the second segment is mapped
148 * to the last physical page of RAM to give us a region in which
149 * PA == VA. We use the first part of this page for enabling
150 * and disabling mapping. The last part of this page also contains
151 * info left by the boot ROM.
152 *
153 * XXX cramming two levels of mapping into the single "segment"
154 * table on the 68040 is intended as a temporary hack to get things
155 * working. The 224mb of address space that this allows will most
156 * likely be insufficient in the future (at least for the kernel).
157 */
158 if (RELOC(mmutype, int) == MMU_68040) {
159 int nl1desc, nl2desc, i;
160
161 /*
162 * First invalidate the entire "segment table" pages
163 * (levels 1 and 2 have the same "invalid" value).
164 */
165 ste = (st_entry_t *)kstpa;
166 este = &ste[kstsize * NPTEPG];
167 while (ste < este)
168 *ste++ = SG_NV;
169 /*
170 * Initialize level 2 descriptors (which immediately
171 * follow the level 1 table). We need:
172 * NPTEPG / SG4_LEV3SIZE
173 * level 2 descriptors to map each of the nptpages
174 * pages of PTEs. Note that we set the "used" bit
175 * now to save the HW the expense of doing it.
176 */
177 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
178 ste = (st_entry_t *)kstpa;
179 ste = &ste[SG4_LEV1SIZE];
180 este = &ste[nl2desc];
181 protoste = kptpa | SG_U | SG_RW | SG_V;
182 while (ste < este) {
183 *ste++ = protoste;
184 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
185 }
186 /*
187 * Initialize level 1 descriptors. We need:
188 * howmany(nl2desc, SG4_LEV2SIZE)
189 * level 1 descriptors to map the `nl2desc' level 2's.
190 */
191 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
192 ste = (st_entry_t *)kstpa;
193 este = &ste[nl1desc];
194 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
195 while (ste < este) {
196 *ste++ = protoste;
197 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
198 }
199 /*
200 * Initialize the final level 1 descriptor to map the next
201 * block of level 2 descriptors for Sysptmap.
202 */
203 ste = (st_entry_t *)kstpa;
204 ste = &ste[SG4_LEV1SIZE - 1];
205 *ste = protoste;
206 /*
207 * Now initialize the final portion of that block of
208 * descriptors to map Sysmap and the "last PT page".
209 */
210 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
211 ste = (st_entry_t *)kstpa;
212 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE) * 2];
213 este = &ste[NPTEPG / SG4_LEV3SIZE];
214 protoste = kptmpa | SG_U | SG_RW | SG_V;
215 while (ste < este) {
216 *ste++ = protoste;
217 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
218 }
219 este = &ste[NPTEPG / SG4_LEV3SIZE];
220 protoste = lkptpa | SG_U | SG_RW | SG_V;
221 while (ste < este) {
222 *ste++ = protoste;
223 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
224 }
225 /*
226 * Calculate the free level 2 descriptor mask
227 * noting that we have used:
228 * 0: level 1 table
229 * 1 to nl1desc: map page tables
230 * nl1desc + 1: maps kptmpa and last-page page table
231 */
232 /* mark an entry for level 1 table */
233 stfree = ~l2tobm(0);
234 /* mark entries for map page tables */
235 for (i = 1; i <= nl1desc; i++)
236 stfree &= ~l2tobm(i);
237 /* mark an entry for kptmpa and lkptpa */
238 stfree &= ~l2tobm(i);
239 /* mark entries not available */
240 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
241 stfree &= ~l2tobm(i);
242
243 /*
244 * Initialize Sysptmap
245 */
246 pte = (pt_entry_t *)kptmpa;
247 epte = &pte[nptpages];
248 protopte = kptpa | PG_RW | PG_CI | PG_V;
249 while (pte < epte) {
250 *pte++ = protopte;
251 protopte += PAGE_SIZE;
252 }
253 /*
254 * Invalidate all remaining entries.
255 */
256 epte = (pt_entry_t *)kptmpa;
257 epte = &epte[TIB_SIZE];
258 while (pte < epte) {
259 *pte++ = PG_NV;
260 }
261 /*
262 * Initialize the last ones to point to Sysptmap and the page
263 * table page allocated earlier.
264 */
265 pte = (pt_entry_t *)kptmpa;
266 pte = &pte[SYSMAP_VA >> SEGSHIFT];
267 *pte = kptmpa | PG_RW | PG_CI | PG_V;
268 pte++; /* XXX should use [MAXADDR >> SEGSHIFT] */
269 *pte = lkptpa | PG_RW | PG_CI | PG_V;
270 } else {
271 /*
272 * Map the page table pages in both the HW segment table
273 * and the software Sysptmap.
274 */
275 ste = (st_entry_t *)kstpa;
276 pte = (pt_entry_t *)kptmpa;
277 epte = &pte[nptpages];
278 protoste = kptpa | SG_RW | SG_V;
279 protopte = kptpa | PG_RW | PG_CI | PG_V;
280 while (pte < epte) {
281 *ste++ = protoste;
282 *pte++ = protopte;
283 protoste += PAGE_SIZE;
284 protopte += PAGE_SIZE;
285 }
286 /*
287 * Invalidate all remaining entries in both.
288 */
289 este = (st_entry_t *)kstpa;
290 este = &este[TIA_SIZE];
291 while (ste < este)
292 *ste++ = SG_NV;
293 epte = (pt_entry_t *)kptmpa;
294 epte = &epte[TIB_SIZE];
295 while (pte < epte)
296 *pte++ = PG_NV;
297 /*
298 * Initialize the last ones to point to Sysptmap and the page
299 * table page allocated earlier.
300 */
301 ste = (st_entry_t *)kstpa;
302 ste = &ste[SYSMAP_VA >> SEGSHIFT];
303 pte = (pt_entry_t *)kptmpa;
304 pte = &pte[SYSMAP_VA >> SEGSHIFT];
305 *ste = kptmpa | SG_RW | SG_V;
306 *pte = kptmpa | PG_RW | PG_CI | PG_V;
307 ste++; /* XXX should use [MAXADDR >> SEGSHIFT] */
308 pte++; /* XXX should use [MAXADDR >> SEGSHIFT] */
309 *ste = lkptpa | SG_RW | SG_V;
310 *pte = lkptpa | PG_RW | PG_CI | PG_V;
311 }
312
313 /*
314 * Invalidate all but the final entry in the last kernel PT page.
315 * The final entry maps the last page of physical memory to
316 * prepare a page that is PA == VA to turn on the MMU.
317 *
318 * XXX: This looks copied from hp300 where PA != VA, but
319 * XXX: it's suspicious if this is also required on this port.
320 */
321 pte = (pt_entry_t *)lkptpa;
322 epte = &pte[NPTEPG];
323 while (pte < epte)
324 *pte++ = PG_NV;
325 /*
326 * Initialize kernel page table.
327 * Start by invalidating the `nptpages' that we have allocated.
328 */
329 pte = (pt_entry_t *)kptpa;
330 epte = &pte[nptpages * NPTEPG];
331 while (pte < epte)
332 *pte++ = PG_NV;
333 /*
334 * Validate PTEs for kernel text (RO).
335 */
336 pte = (pt_entry_t *)kptpa;
337 pte = &pte[m68k_btop(KERNBASE)];
338 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))];
339 protopte = firstpa | PG_RO | PG_V;
340 while (pte < epte) {
341 *pte++ = protopte;
342 protopte += PAGE_SIZE;
343 }
344 /*
345 * Validate PTEs for kernel data/bss, dynamic data allocated
346 * by us so far (kstpa - firstpa bytes), and pages for lwp0
347 * u-area and page table allocated below (RW).
348 */
349 epte = (pt_entry_t *)kptpa;
350 epte = &epte[m68k_btop(KERNBASE + kstpa - firstpa)];
351 protopte = (protopte & ~PG_PROT) | PG_RW;
352 /*
353 * Enable copy-back caching of data pages
354 */
355 if (RELOC(mmutype, int) == MMU_68040)
356 protopte |= PG_CCB;
357 while (pte < epte) {
358 *pte++ = protopte;
359 protopte += PAGE_SIZE;
360 }
361 /*
362 * Map the kernel segment table cache invalidated for 68040/68060.
363 * (for the 68040 not strictly necessary, but recommended by Motorola;
364 * for the 68060 mandatory)
365 */
366 epte = (pt_entry_t *)kptpa;
367 epte = &epte[m68k_btop(KERNBASE + nextpa - firstpa)];
368 protopte = (protopte & ~PG_PROT) | PG_RW;
369 if (RELOC(mmutype, int) == MMU_68040) {
370 protopte &= ~PG_CCB;
371 protopte |= PG_CIN;
372 }
373 while (pte < epte) {
374 *pte++ = protopte;
375 protopte += PAGE_SIZE;
376 }
377
378 /*
379 * Calculate important exported kernel addresses and related values.
380 */
381 /*
382 * Sysseg: base of kernel segment table
383 */
384 RELOC(Sysseg, st_entry_t *) =
385 (st_entry_t *)(kstpa - firstpa + KERNBASE);
386 RELOC(Sysseg_pa, paddr_t) = kstpa;
387 if (RELOC(mmutype, int) == MMU_68040)
388 RELOC(protostfree, u_int) = stfree;
389 /*
390 * Sysptmap: base of kernel page table map
391 */
392 RELOC(Sysptmap, pt_entry_t *) =
393 (pt_entry_t *)(kptmpa - firstpa + KERNBASE);
394 /*
395 * Sysmap: kernel page table (as mapped through Sysptmap)
396 * Allocated at the end of KVA space.
397 */
398 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
399
400 /*
401 * Remember the u-area address so it can be loaded in the lwp0
402 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
403 */
404 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa + KERNBASE;
405
406 /*
407 * VM data structures are now initialized, set up data for
408 * the pmap module.
409 *
410 * Note about avail_end: msgbuf is initialized just after
411 * avail_end in machdep.c.
412 * Since the last page is used for rebooting the system
413 * (code is copied there and execution continues from copied code
414 * before the MMU is disabled), the msgbuf will get trounced
415 * between reboots if it's placed in the last physical page.
416 * To work around this, we move avail_end back one more
417 * page so the msgbuf can be preserved.
418 */
419 RELOC(avail_start, paddr_t) = nextpa;
420 RELOC(avail_end, paddr_t) = firstpa
421 + m68k_ptob(RELOC(physmem, int))
422 - m68k_round_page(MSGBUFSIZE)
423 - PAGE_SIZE; /* if that start of last page??? */
424 RELOC(virtual_avail, vaddr_t) =
425 KERNBASE + (nextpa - firstpa);
426 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
427
428 /*
429 * Allocate some fixed, special purpose kernel virtual addresses
430 */
431 {
432 vaddr_t va = RELOC(virtual_avail, vaddr_t);
433
434 RELOC(CADDR1, void *) = (void *)va;
435 va += PAGE_SIZE;
436 RELOC(CADDR2, void *) = (void *)va;
437 va += PAGE_SIZE;
438 RELOC(vmmap, void *) = (void *)va;
439 va += PAGE_SIZE;
440 RELOC(msgbufaddr, void *) = (void *)va;
441 va += m68k_round_page(MSGBUFSIZE);
442 RELOC(virtual_avail, vaddr_t) = va;
443 }
444 }
445