1 /* $NetBSD: pmap_bootstrap.c,v 1.60 2021/07/24 21:31:33 andvar Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.60 2021/07/24 21:31:33 andvar Exp $");
40
41 #include <sys/param.h>
42 #include <uvm/uvm_extern.h>
43
44 #include <machine/cpu.h>
45 #include <machine/hp300spu.h>
46 #include <machine/pte.h>
47 #include <machine/vmparam.h>
48
49 #include <hp300/hp300/clockreg.h>
50
51 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
52
53 extern char *etext;
54 extern vaddr_t CLKbase, MMUbase;
55
56 extern int maxmem;
57 extern paddr_t avail_start, avail_end;
58
59 /*
60 * Special purpose kernel virtual addresses, used for mapping
61 * physical pages for a variety of temporary or permanent purposes:
62 *
63 * CADDR1, CADDR2: pmap zero/copy operations
64 * vmmap: /dev/mem, crash dumps, parity error checking
65 * msgbufaddr: kernel message buffer
66 */
67 void *CADDR1, *CADDR2;
68 char *vmmap;
69 void *msgbufaddr;
70
71 void pmap_bootstrap(paddr_t, paddr_t);
72
73 /*
74 * Bootstrap the VM system.
75 *
76 * Called with MMU off so we must relocate all global references by `firstpa'
77 * (don't call any functions here!) `nextpa' is the first available physical
78 * memory address. Returns an updated first PA reflecting the memory we
79 * have allocated. MMU is still off when we return.
80 *
81 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
82 * XXX a PIC compiler would make this much easier.
83 */
84 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)85 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
86 {
87 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
88 paddr_t lkptpa;
89 u_int nptpages, kstsize;
90 st_entry_t protoste, *ste, *este;
91 pt_entry_t protopte, *pte, *epte;
92 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
93
94 /*
95 * Calculate important physical addresses:
96 *
97 * lwp0upa lwp0 u-area UPAGES pages
98 *
99 * kstpa kernel segment table 1 page (!040)
100 * N pages (040)
101 *
102 * kptmpa kernel PT map 1 page
103 *
104 * lkptpa last kernel PT page 1 page
105 *
106 * kptpa statically allocated
107 * kernel PT pages Sysptsize+ pages
108 *
109 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE and
110 * EIOMAPSIZE are the number of PTEs, hence we need to round
111 * the total to a page boundary with IO maps at the end. ]
112 *
113 * The KVA corresponding to any of these PAs is:
114 * (PA - firstpa + KERNBASE).
115 */
116 lwp0upa = nextpa;
117 nextpa += USPACE;
118 if (RELOC(mmutype, int) == MMU_68040)
119 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
120 else
121 kstsize = 1;
122 kstpa = nextpa;
123 nextpa += kstsize * PAGE_SIZE;
124 kptmpa = nextpa;
125 nextpa += PAGE_SIZE;
126 lkptpa = nextpa;
127 nextpa += PAGE_SIZE;
128 kptpa = nextpa;
129 nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG) +
130 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
131 nextpa += nptpages * PAGE_SIZE;
132
133 /*
134 * Initialize segment table and kernel page table map.
135 *
136 * On 68030s and earlier MMUs the two are identical except for
137 * the valid bits so both are initialized with essentially the
138 * same values. On the 68040, which has a mandatory 3-level
139 * structure, the segment table holds the level 1 table and part
140 * (or all) of the level 2 table and hence is considerably
141 * different. Here the first level consists of 128 descriptors
142 * (512 bytes) each mapping 32mb of address space. Each of these
143 * points to blocks of 128 second level descriptors (512 bytes)
144 * each mapping 256kb. Note that there may be additional "segment
145 * table" pages depending on how large MAXKL2SIZE is.
146 *
147 * Portions of the last two segment of KVA space (0xFF800000 -
148 * 0xFFFFFFFF) are mapped for a couple of purposes.
149 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
150 * for the kernel page tables.
151 * The very last page (0xFFFFF000) in the second segment is mapped
152 * to the last physical page of RAM to give us a region in which
153 * PA == VA. We use the first part of this page for enabling
154 * and disabling mapping. The last part of this page also contains
155 * info left by the boot ROM.
156 *
157 * XXX cramming two levels of mapping into the single "segment"
158 * table on the 68040 is intended as a temporary hack to get things
159 * working. The 224mb of address space that this allows will most
160 * likely be insufficient in the future (at least for the kernel).
161 */
162 if (RELOC(mmutype, int) == MMU_68040) {
163 int nl1desc, nl2desc, i;
164
165 /*
166 * First invalidate the entire "segment table" pages
167 * (levels 1 and 2 have the same "invalid" value).
168 */
169 ste = (st_entry_t *)kstpa;
170 este = &ste[kstsize * NPTEPG];
171 while (ste < este)
172 *ste++ = SG_NV;
173 /*
174 * Initialize level 2 descriptors (which immediately
175 * follow the level 1 table). We need:
176 * NPTEPG / SG4_LEV3SIZE
177 * level 2 descriptors to map each of the nptpages
178 * pages of PTEs. Note that we set the "used" bit
179 * now to save the HW the expense of doing it.
180 */
181 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
182 ste = (st_entry_t *)kstpa;
183 ste = &ste[SG4_LEV1SIZE];
184 este = &ste[nl2desc];
185 protoste = kptpa | SG_U | SG_RW | SG_V;
186 while (ste < este) {
187 *ste++ = protoste;
188 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
189 }
190 /*
191 * Initialize level 1 descriptors. We need:
192 * howmany(nl2desc, SG4_LEV2SIZE)
193 * level 1 descriptors to map the `nl2desc' level 2's.
194 */
195 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
196 ste = (st_entry_t *)kstpa;
197 este = &ste[nl1desc];
198 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
199 while (ste < este) {
200 *ste++ = protoste;
201 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
202 }
203 /*
204 * Initialize the final level 1 descriptor to map the next
205 * block of level 2 descriptors for Sysptmap.
206 */
207 ste = (st_entry_t *)kstpa;
208 ste = &ste[SG4_LEV1SIZE - 1];
209 *ste = protoste;
210 /*
211 * Now initialize the final portion of that block of
212 * descriptors to map Sysmap and the "last PT page".
213 */
214 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
215 ste = (st_entry_t *)kstpa;
216 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE) * 2];
217 este = &ste[NPTEPG / SG4_LEV3SIZE];
218 protoste = kptmpa | SG_U | SG_RW | SG_V;
219 while (ste < este) {
220 *ste++ = protoste;
221 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
222 }
223 este = &ste[NPTEPG / SG4_LEV3SIZE];
224 protoste = lkptpa | SG_U | SG_RW | SG_V;
225 while (ste < este) {
226 *ste++ = protoste;
227 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
228 }
229 /*
230 * Calculate the free level 2 descriptor mask
231 * noting that we have used:
232 * 0: level 1 table
233 * 1 to nl1desc: map page tables
234 * nl1desc + 1: maps kptmpa and last-page page table
235 */
236 /* mark an entry for level 1 table */
237 stfree = ~l2tobm(0);
238 /* mark entries for map page tables */
239 for (i = 1; i <= nl1desc; i++)
240 stfree &= ~l2tobm(i);
241 /* mark an entry for kptmpa and lkptpa */
242 stfree &= ~l2tobm(i);
243 /* mark entries not available */
244 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
245 stfree &= ~l2tobm(i);
246
247 /*
248 * Initialize Sysptmap
249 */
250 pte = (pt_entry_t *)kptmpa;
251 epte = &pte[nptpages];
252 protopte = kptpa | PG_RW | PG_CI | PG_V;
253 while (pte < epte) {
254 *pte++ = protopte;
255 protopte += PAGE_SIZE;
256 }
257 /*
258 * Invalidate all remaining entries.
259 */
260 epte = (pt_entry_t *)kptmpa;
261 epte = &epte[TIB_SIZE];
262 while (pte < epte) {
263 *pte++ = PG_NV;
264 }
265 /*
266 * Initialize the last ones to point to Sysptmap and the page
267 * table page allocated earlier.
268 */
269 pte = (pt_entry_t *)kptmpa;
270 pte = &pte[SYSMAP_VA >> SEGSHIFT];
271 *pte = kptmpa | PG_RW | PG_CI | PG_V;
272 pte = (pt_entry_t *)kptmpa;
273 pte = &pte[MAXADDR >> SEGSHIFT];
274 *pte = lkptpa | PG_RW | PG_CI | PG_V;
275 } else {
276 /*
277 * Map the page table pages in both the HW segment table
278 * and the software Sysptmap.
279 */
280 ste = (st_entry_t *)kstpa;
281 pte = (pt_entry_t *)kptmpa;
282 epte = &pte[nptpages];
283 protoste = kptpa | SG_RW | SG_V;
284 protopte = kptpa | PG_RW | PG_CI | PG_V;
285 while (pte < epte) {
286 *ste++ = protoste;
287 *pte++ = protopte;
288 protoste += PAGE_SIZE;
289 protopte += PAGE_SIZE;
290 }
291 /*
292 * Invalidate all remaining entries in both.
293 */
294 este = (st_entry_t *)kstpa;
295 este = &este[TIA_SIZE];
296 while (ste < este)
297 *ste++ = SG_NV;
298 epte = (pt_entry_t *)kptmpa;
299 epte = &epte[TIB_SIZE];
300 while (pte < epte)
301 *pte++ = PG_NV;
302 /*
303 * Initialize the last ones to point to Sysptmap and the page
304 * table page allocated earlier.
305 */
306 ste = (st_entry_t *)kstpa;
307 ste = &ste[SYSMAP_VA >> SEGSHIFT];
308 pte = (pt_entry_t *)kptmpa;
309 pte = &pte[SYSMAP_VA >> SEGSHIFT];
310 *ste = kptmpa | SG_RW | SG_V;
311 *pte = kptmpa | PG_RW | PG_CI | PG_V;
312 ste = (st_entry_t *)kstpa;
313 ste = &ste[MAXADDR >> SEGSHIFT];
314 pte = (pt_entry_t *)kptmpa;
315 pte = &pte[MAXADDR >> SEGSHIFT];
316 *ste = lkptpa | SG_RW | SG_V;
317 *pte = lkptpa | PG_RW | PG_CI | PG_V;
318 }
319
320 /*
321 * Invalidate all but the final entry in the last kernel PT page.
322 * The final entry maps the last page of physical memory to
323 * prepare a page that is PA == VA to turn on the MMU.
324 */
325 pte = (pt_entry_t *)lkptpa;
326 epte = &pte[NPTEPG - 1];
327 while (pte < epte)
328 *pte++ = PG_NV;
329 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
330 /*
331 * Initialize kernel page table.
332 * Start by invalidating the `nptpages' that we have allocated.
333 */
334 pte = (pt_entry_t *)kptpa;
335 epte = &pte[nptpages * NPTEPG];
336 while (pte < epte)
337 *pte++ = PG_NV;
338 /*
339 * Validate PTEs for kernel text (RO).
340 * The first page of kernel text remains invalid; see locore.s
341 */
342 pte = (pt_entry_t *)kptpa;
343 pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)];
344 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
345 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
346 while (pte < epte) {
347 *pte++ = protopte;
348 protopte += PAGE_SIZE;
349 }
350 /*
351 * Validate PTEs for kernel data/bss, dynamic data allocated
352 * by us so far (kstpa - firstpa bytes), and pages for lwp0
353 * u-area and page table allocated below (RW).
354 */
355 epte = (pt_entry_t *)kptpa;
356 epte = &epte[m68k_btop(kstpa - firstpa)];
357 protopte = (protopte & ~PG_PROT) | PG_RW;
358 /*
359 * Enable copy-back caching of data pages
360 */
361 if (RELOC(mmutype, int) == MMU_68040)
362 protopte |= PG_CCB;
363 while (pte < epte) {
364 *pte++ = protopte;
365 protopte += PAGE_SIZE;
366 }
367 /*
368 * Map the kernel segment table cache invalidated for 68040/68060.
369 * (for the 68040 not strictly necessary, but recommended by Motorola;
370 * for the 68060 mandatory)
371 */
372 epte = (pt_entry_t *)kptpa;
373 epte = &epte[m68k_btop(nextpa - firstpa)];
374 protopte = (protopte & ~PG_PROT) | PG_RW;
375 if (RELOC(mmutype, int) == MMU_68040) {
376 protopte &= ~PG_CCB;
377 protopte |= PG_CIN;
378 }
379 while (pte < epte) {
380 *pte++ = protopte;
381 protopte += PAGE_SIZE;
382 }
383
384 /*
385 * Finally, validate the internal IO space PTEs (RW+CI).
386 * We do this here since the 320/350 MMU registers (also
387 * used, but to a lesser extent, on other models) are mapped
388 * in this range and it would be nice to be able to access
389 * them after the MMU is turned on.
390 */
391
392 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
393
394 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
395 epte = &pte[IIOMAPSIZE];
396 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
397 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
398 while (pte < epte) {
399 *pte++ = protopte;
400 protopte += PAGE_SIZE;
401 }
402 RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
403 pte += EIOMAPSIZE;
404 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
405
406 /*
407 * Calculate important exported kernel addresses and related values.
408 */
409 /*
410 * Sysseg: base of kernel segment table
411 */
412 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
413 RELOC(Sysseg_pa, paddr_t) = kstpa;
414 #if defined(M68040)
415 if (RELOC(mmutype, int) == MMU_68040)
416 RELOC(protostfree, u_int) = stfree;
417 #endif
418 /*
419 * Sysptmap: base of kernel page table map
420 */
421 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
422 /*
423 * Sysmap: kernel page table (as mapped through Sysptmap)
424 * Allocated at the end of KVA space.
425 */
426 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
427 /*
428 * CLKbase, MMUbase: important registers in internal IO space
429 * accessed from assembly language.
430 */
431 RELOC(CLKbase, vaddr_t) =
432 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
433 RELOC(MMUbase, vaddr_t) =
434 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
435
436 /*
437 * Remember the u-area address so it can be loaded in the lwp0
438 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
439 */
440 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
441
442 /*
443 * VM data structures are now initialized, set up data for
444 * the pmap module.
445 *
446 * Note about avail_end: msgbuf is initialized just after
447 * avail_end in machdep.c.
448 * Since the last page is used for rebooting the system
449 * (code is copied there and execution continues from copied code
450 * before the MMU is disabled), the msgbuf will get trounced
451 * between reboots if it's placed in the last physical page.
452 * To work around this, we move avail_end back one more
453 * page so the msgbuf can be preserved.
454 */
455 RELOC(avail_start, paddr_t) = nextpa;
456 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
457 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
458 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
459
460 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
461
462 /*
463 * Allocate some fixed, special purpose kernel virtual addresses
464 */
465 {
466 vaddr_t va = RELOC(virtual_avail, vaddr_t);
467
468 RELOC(CADDR1, void *) = (void *)va;
469 va += PAGE_SIZE;
470 RELOC(CADDR2, void *) = (void *)va;
471 va += PAGE_SIZE;
472 RELOC(vmmap, void *) = (void *)va;
473 va += PAGE_SIZE;
474 RELOC(msgbufaddr, void *) = (void *)va;
475 va += m68k_round_page(MSGBUFSIZE);
476 RELOC(virtual_avail, vaddr_t) = va;
477 }
478 }
479