1 /* $NetBSD: pmap_bootstrap.c,v 1.4 2024/01/02 07:46:49 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.4 2024/01/02 07:46:49 thorpej Exp $");
40
41 #include "opt_m68k_arch.h"
42
43 #include <sys/param.h>
44 #include <sys/kcore.h>
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/bootinfo.h>
48 #include <machine/cpu.h>
49 #include <machine/pte.h>
50 #include <machine/vmparam.h>
51
52 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
53
54 extern char *kernel_text;
55 extern char *etext;
56
57 extern paddr_t avail_start, avail_end;
58 extern paddr_t msgbufpa;
59
60 /*
61 * Special purpose kernel virtual addresses, used for mapping
62 * physical pages for a variety of temporary or permanent purposes:
63 *
64 * CADDR1, CADDR2: pmap zero/copy operations
65 * vmmap: /dev/mem, crash dumps, parity error checking
66 * msgbufaddr: kernel message buffer
67 */
68 void *CADDR1, *CADDR2;
69 char *vmmap;
70 void *msgbufaddr;
71
72 void pmap_bootstrap(paddr_t, paddr_t);
73
74 /*
75 * Bootstrap the VM system.
76 *
77 * Called with MMU off so we must relocate all global references by `firstpa'
78 * (don't call any functions here!) `nextpa' is the first available physical
79 * memory address. Returns an updated first PA reflecting the memory we
80 * have allocated. MMU is still off when we return.
81 *
82 * XXX On virt68k, the kernel is mapped VA==PA, so we can actually have to
83 * XXX worry about any of this RELOC() nonsense.
84 *
85 * XXX On virt68k, firstpa == 0, and firstpa != start of kernel text.
86 * XXX Same goes for KERNBASE. We intentionally leave the area between
87 * XXX KERNBASE and the start of kernel text unmapped.
88 *
89 * XXX On virt68k, we use TT registers to map I/O space.
90 *
91 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
92 * XXX a PIC compiler would make this much easier.
93 */
94 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)95 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
96 {
97 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
98 u_int nptpages, kstsize;
99 st_entry_t protoste, *ste, *este;
100 pt_entry_t protopte, *pte, *epte;
101 int i;
102 #if defined(M68040) || defined(M68060)
103 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
104 #endif
105
106 /*
107 * Calculate important physical addresses:
108 *
109 * lwp0upa lwp0 u-area UPAGES pages
110 *
111 * kstpa kernel segment table 1 page (!040)
112 * N pages (040)
113 *
114 * kptmpa kernel PT map 1 page
115 *
116 * kptpa statically allocated
117 * kernel PT pages Sysptsize+ pages
118 *
119 * The KVA corresponding to any of these PAs is:
120 * (PA - firstpa + KERNBASE).
121 */
122 lwp0upa = nextpa;
123 nextpa += USPACE;
124
125 #if defined(M68040) || defined(M68060)
126 if (RELOC(mmutype, int) == MMU_68040)
127 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
128 else
129 #endif
130 kstsize = 1;
131 kstpa = nextpa;
132 nextpa += kstsize * PAGE_SIZE;
133
134 kptmpa = nextpa;
135 nextpa += PAGE_SIZE;
136
137 kptpa = nextpa;
138 nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG);
139 nextpa += nptpages * PAGE_SIZE;
140
141 /*
142 * Clear all PTEs to zero
143 */
144 for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)nextpa; pte++)
145 *pte = 0;
146
147 /*
148 * Initialize segment table and kernel page table map.
149 *
150 * On 68030s and earlier MMUs the two are identical except for
151 * the valid bits so both are initialized with essentially the
152 * same values. On the 68040, which has a mandatory 3-level
153 * structure, the segment table holds the level 1 table and part
154 * (or all) of the level 2 table and hence is considerably
155 * different. Here the first level consists of 128 descriptors
156 * (512 bytes) each mapping 32mb of address space. Each of these
157 * points to blocks of 128 second level descriptors (512 bytes)
158 * each mapping 256kb. Note that there may be additional "segment
159 * table" pages depending on how large MAXKL2SIZE is.
160 *
161 * Portions of the last segment of KVA space (see vmparam.h)
162 * are mapped for the kernel page tables.
163 *
164 * XXX cramming two levels of mapping into the single "segment"
165 * table on the 68040 is intended as a temporary hack to get things
166 * working. The 224mb of address space that this allows will most
167 * likely be insufficient in the future (at least for the kernel).
168 */
169 #if defined(M68040) || defined(M68060)
170 if (RELOC(mmutype, int) == MMU_68040) {
171 int nl1desc, nl2desc;
172
173 /*
174 * First invalidate the entire "segment table" pages
175 * (levels 1 and 2 have the same "invalid" value).
176 */
177 ste = (st_entry_t *)kstpa;
178 este = &ste[kstsize * NPTEPG];
179 while (ste < este)
180 *ste++ = SG_NV;
181 /*
182 * Initialize level 2 descriptors (which immediately
183 * follow the level 1 table). We need:
184 * NPTEPG / SG4_LEV3SIZE
185 * level 2 descriptors to map each of the nptpages
186 * pages of PTEs. Note that we set the "used" bit
187 * now to save the HW the expense of doing it.
188 */
189 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
190 ste = (st_entry_t *)kstpa;
191 ste = &ste[SG4_LEV1SIZE];
192 este = &ste[nl2desc];
193 protoste = kptpa | SG_U | SG_RW | SG_V;
194 while (ste < este) {
195 *ste++ = protoste;
196 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
197 }
198 /*
199 * Initialize level 1 descriptors. We need:
200 * howmany(nl2desc, SG4_LEV2SIZE)
201 * level 1 descriptors to map the `nl2desc' level 2's.
202 */
203 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
204 ste = (st_entry_t *)kstpa;
205 este = &ste[nl1desc];
206 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
207 while (ste < este) {
208 *ste++ = protoste;
209 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
210 }
211 /*
212 * Initialize the final level 1 descriptor to map the next
213 * block of level 2 descriptors for Sysptmap.
214 */
215 ste = (st_entry_t *)kstpa;
216 ste = &ste[SG4_LEV1SIZE - 1];
217 *ste = protoste;
218 /*
219 * protoste contains the L2 table address that will
220 * map Sysmap.
221 */
222 ste = (st_entry_t *)(vaddr_t)(protoste & UTE40_PTA);
223 /*
224 * Get the offset into this table that will map
225 * Sysmap.
226 */
227 ste = &ste[LA40_PI(SYSMAP_VA)];
228 /*
229 * Now initialize this region.
230 */
231 este = &ste[NPTEPG / SG4_LEV3SIZE];
232 protoste = kptmpa | SG_U | SG_RW | SG_V;
233 while (ste < este) {
234 *ste++ = protoste;
235 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
236 }
237 /*
238 * Calculate the free level 2 descriptor mask
239 * noting that we have used:
240 * 0: level 1 table
241 * 1 to nl1desc: map page tables
242 * nl1desc + 1: maps kptmpa and last-page page table
243 */
244 /* mark an entry for level 1 table */
245 stfree = ~l2tobm(0);
246 /* mark entries for map page tables */
247 for (i = 1; i <= nl1desc; i++)
248 stfree &= ~l2tobm(i);
249 /* mark an entry for kptmpa and lkptpa */
250 stfree &= ~l2tobm(i);
251 /* mark entries not available */
252 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
253 stfree &= ~l2tobm(i);
254
255 /*
256 * Initialize Sysptmap
257 */
258 pte = (pt_entry_t *)kptmpa;
259 epte = &pte[nptpages];
260 protopte = kptpa | PG_RW | PG_CI | PG_U | PG_V;
261 while (pte < epte) {
262 *pte++ = protopte;
263 protopte += PAGE_SIZE;
264 }
265 /*
266 * Invalidate all remaining entries.
267 */
268 epte = (pt_entry_t *)kptmpa;
269 epte = &epte[TIB_SIZE];
270 while (pte < epte) {
271 *pte++ = PG_NV;
272 }
273 /*
274 * Initialize the last one to point to Sysptmap.
275 */
276 pte = (pt_entry_t *)kptmpa;
277 pte = &pte[SYSMAP_VA >> SEGSHIFT];
278 *pte = kptmpa | PG_RW | PG_CI | PG_V;
279 } else
280 #endif /* M68040 || M68060 */
281 {
282 /*
283 * Map the page table pages in both the HW segment table
284 * and the software Sysptmap.
285 */
286 ste = (st_entry_t *)kstpa;
287 pte = (pt_entry_t *)kptmpa;
288 epte = &pte[nptpages];
289 protoste = kptpa | SG_RW | SG_V;
290 protopte = kptpa | PG_RW | PG_CI | PG_V;
291 while (pte < epte) {
292 *ste++ = protoste;
293 *pte++ = protopte;
294 protoste += PAGE_SIZE;
295 protopte += PAGE_SIZE;
296 }
297 /*
298 * Invalidate all remaining entries in both.
299 */
300 este = (st_entry_t *)kstpa;
301 este = &este[TIA_SIZE];
302 while (ste < este)
303 *ste++ = SG_NV;
304 epte = (pt_entry_t *)kptmpa;
305 epte = &epte[TIB_SIZE];
306 while (pte < epte)
307 *pte++ = PG_NV;
308 /*
309 * Initialize the last one to point to Sysptmap.
310 */
311 ste = (st_entry_t *)kstpa;
312 ste = &ste[SYSMAP_VA >> SEGSHIFT];
313 pte = (pt_entry_t *)kptmpa;
314 pte = &pte[SYSMAP_VA >> SEGSHIFT];
315 *ste = kptmpa | SG_RW | SG_V;
316 *pte = kptmpa | PG_RW | PG_CI | PG_V;
317 }
318
319 /*
320 * Initialize kernel page table.
321 * Start by invalidating the `nptpages' that we have allocated.
322 */
323 pte = (pt_entry_t *)kptpa;
324 epte = &pte[nptpages * NPTEPG];
325 while (pte < epte)
326 *pte++ = PG_NV;
327 /*
328 * Validate PTEs for kernel text (RO).
329 */
330 pte = (pt_entry_t *)kptpa;
331 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
332 pte = &pte[m68k_btop(m68k_trunc_page(&kernel_text))];
333 protopte = m68k_trunc_page(&kernel_text) | PG_RO | PG_U | PG_V;
334 while (pte < epte) {
335 *pte++ = protopte;
336 protopte += PAGE_SIZE;
337 }
338 /*
339 * Validate PTEs for kernel data/bss, dynamic data allocated
340 * by us so far (right up to kstpa), and pages for lwp0
341 * u-area and page table allocated below (RW).
342 */
343 epte = (pt_entry_t *)kptpa;
344 epte = &epte[m68k_btop(kstpa - firstpa)];
345 protopte = (protopte & ~PG_PROT) | PG_RW;
346 /*
347 * Enable copy-back caching of data pages
348 */
349 if (RELOC(mmutype, int) == MMU_68040)
350 protopte |= PG_CCB;
351 while (pte < epte) {
352 *pte++ = protopte;
353 protopte += PAGE_SIZE;
354 }
355 /*
356 * Map the kernel segment table cache invalidated for 68040/68060.
357 * (for the 68040 not strictly necessary, but recommended by Motorola;
358 * for the 68060 mandatory)
359 */
360 epte = (pt_entry_t *)kptpa;
361 epte = &epte[m68k_btop(nextpa - firstpa)];
362 protopte = (protopte & ~PG_PROT) | PG_RW;
363 if (RELOC(mmutype, int) == MMU_68040) {
364 protopte &= ~PG_CMASK;
365 protopte |= PG_CI;
366 }
367 while (pte < epte) {
368 *pte++ = protopte;
369 protopte += PAGE_SIZE;
370 }
371
372 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
373
374 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
375
376 /*
377 * Calculate important exported kernel addresses and related values.
378 */
379 /*
380 * Sysseg: base of kernel segment table
381 */
382 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
383 RELOC(Sysseg_pa, paddr_t) = kstpa;
384 #if defined(M68040) || defined(M68060)
385 if (RELOC(mmutype, int) == MMU_68040)
386 RELOC(protostfree, u_int) = stfree;
387 #endif
388 /*
389 * Sysptmap: base of kernel page table map
390 */
391 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
392 /*
393 * Sysmap: kernel page table (as mapped through Sysptmap)
394 * Allocated at the end of KVA space.
395 */
396 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
397
398 /*
399 * Remember the u-area address so it can be loaded in the lwp0
400 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
401 */
402 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
403
404 /*
405 * Scoot the start of available forward to account for:
406 *
407 * (1) The kernel text, data, and bss.
408 *
409 * (2) The pages we stole above for pmap data
410 * structures.
411 */
412 RELOC(bootinfo_mem_segments_avail[0].mem_size, uint32_t) -=
413 nextpa - RELOC(bootinfo_mem_segments_avail[0].mem_addr, uint32_t);
414 RELOC(bootinfo_mem_segments_avail[0].mem_addr, uint32_t) = nextpa;
415
416 /*
417 * The kernel is linked at 8K so that we can leave VA==0
418 * unmapped. Use that space for the kernel message buffer.
419 */
420 RELOC(msgbufpa, paddr_t) = firstpa;
421
422 /*
423 * Initialize avail_start and avail_end.
424 */
425 i = RELOC(bootinfo_mem_nsegments, int) - 1;
426 RELOC(avail_start, paddr_t) =
427 RELOC(bootinfo_mem_segments_avail[0].mem_addr, uint32_t);
428 RELOC(avail_end, paddr_t) =
429 RELOC(bootinfo_mem_segments_avail[i].mem_addr, uint32_t) +
430 RELOC(bootinfo_mem_segments_avail[i].mem_size, uint32_t);
431
432 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
433
434 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
435
436 /*
437 * Allocate some fixed, special purpose kernel virtual addresses
438 */
439 {
440 vaddr_t va = RELOC(virtual_avail, vaddr_t);
441
442 RELOC(CADDR1, void *) = (void *)va;
443 va += PAGE_SIZE;
444 RELOC(CADDR2, void *) = (void *)va;
445 va += PAGE_SIZE;
446 RELOC(vmmap, void *) = (void *)va;
447 va += PAGE_SIZE;
448 RELOC(msgbufaddr, void *) = (void *)va;
449 va += m68k_round_page(MSGBUFSIZE);
450 RELOC(virtual_avail, vaddr_t) = va;
451 }
452 }
453