1 /* $NetBSD: pmap_bootstrap.c,v 1.62 2023/02/06 13:30:02 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.62 2023/02/06 13:30:02 tsutsui Exp $");
40
41 #include "opt_m68k_arch.h"
42
43 #include <sys/param.h>
44 #include <uvm/uvm_extern.h>
45
46 #include <machine/cpu.h>
47 #include <machine/pte.h>
48 #include <machine/vmparam.h>
49 #include <arch/x68k/x68k/iodevice.h>
50
51 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
52
53 extern char *etext;
54
55 extern int maxmem;
56 extern psize_t physmem;
57 extern paddr_t avail_start, avail_end;
58
59 /*
60 * Special purpose kernel virtual addresses, used for mapping
61 * physical pages for a variety of temporary or permanent purposes:
62 *
63 * CADDR1, CADDR2: pmap zero/copy operations
64 * vmmap: /dev/mem, crash dumps, parity error checking
65 * msgbufaddr: kernel message buffer
66 */
67 void *CADDR1, *CADDR2;
68 char *vmmap;
69 void *msgbufaddr;
70
71 void pmap_bootstrap(paddr_t, paddr_t);
72
73 /*
74 * Bootstrap the VM system.
75 *
76 * Called with MMU off so we must relocate all global references by `firstpa'
77 * (don't call any functions here!) `nextpa' is the first available physical
78 * memory address. Returns an updated first PA reflecting the memory we
79 * have allocated. MMU is still off when we return.
80 *
81 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
82 * XXX a PIC compiler would make this much easier.
83 */
84 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)85 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
86 {
87 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
88 u_int nptpages, kstsize;
89 st_entry_t protoste, *ste, *este;
90 pt_entry_t protopte, *pte, *epte;
91 #if defined(M68040) || defined(M68060)
92 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
93 #endif
94
95 /*
96 * Calculate important physical addresses:
97 *
98 * lwp0upa lwp0 u-area UPAGES pages
99 *
100 * kstpa kernel segment table 1 page (!040)
101 * N pages (040)
102 *
103 * kptmpa kernel PT map 1 page
104 *
105 * kptpa statically allocated
106 * kernel PT pages Sysptsize+ pages
107 *
108 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE
109 * is the number of PTEs, hence we need to round
110 * the total to a page boundary with IO maps at the end. ]
111 *
112 * The KVA corresponding to any of these PAs is:
113 * (PA - firstpa + KERNBASE).
114 */
115 lwp0upa = nextpa;
116 nextpa += USPACE;
117 if (RELOC(mmutype, int) == MMU_68040)
118 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
119 else
120 kstsize = 1;
121 kstpa = nextpa;
122 nextpa += kstsize * PAGE_SIZE;
123 kptmpa = nextpa;
124 nextpa += PAGE_SIZE;
125 kptpa = nextpa;
126 nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG) +
127 (IIOMAPSIZE + NPTEPG - 1) / NPTEPG;
128 nextpa += nptpages * PAGE_SIZE;
129
130 /*
131 * Clear all PTEs to zero
132 */
133 for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)nextpa; pte++)
134 *pte = 0;
135
136 /*
137 * Initialize segment table and kernel page table map.
138 *
139 * On 68030s and earlier MMUs the two are identical except for
140 * the valid bits so both are initialized with essentially the
141 * same values. On the 68040, which has a mandatory 3-level
142 * structure, the segment table holds the level 1 table and part
143 * (or all) of the level 2 table and hence is considerably
144 * different. Here the first level consists of 128 descriptors
145 * (512 bytes) each mapping 32mb of address space. Each of these
146 * points to blocks of 128 second level descriptors (512 bytes)
147 * each mapping 256kb. Note that there may be additional "segment
148 * table" pages depending on how large MAXKL2SIZE is.
149 *
150 * Portions of the last segment of KVA space (0xFFC00000 -
151 * 0xFFFFFFFF) are mapped for the kernel page tables.
152 *
153 * XXX cramming two levels of mapping into the single "segment"
154 * table on the 68040 is intended as a temporary hack to get things
155 * working. The 224mb of address space that this allows will most
156 * likely be insufficient in the future (at least for the kernel).
157 */
158 #if defined(M68040) || defined(M68060)
159 if (RELOC(mmutype, int) == MMU_68040) {
160 int nl1desc, nl2desc, i;
161
162 /*
163 * First invalidate the entire "segment table" pages
164 * (levels 1 and 2 have the same "invalid" value).
165 */
166 ste = (st_entry_t *)kstpa;
167 este = &ste[kstsize * NPTEPG];
168 while (ste < este)
169 *ste++ = SG_NV;
170 /*
171 * Initialize level 2 descriptors (which immediately
172 * follow the level 1 table). We need:
173 * NPTEPG / SG4_LEV3SIZE
174 * level 2 descriptors to map each of the nptpages
175 * pages of PTEs. Note that we set the "used" bit
176 * now to save the HW the expense of doing it.
177 */
178 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
179 ste = (st_entry_t *)kstpa;
180 ste = &ste[SG4_LEV1SIZE];
181 este = &ste[nl2desc];
182 protoste = kptpa | SG_U | SG_RW | SG_V;
183 while (ste < este) {
184 *ste++ = protoste;
185 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
186 }
187 /*
188 * Initialize level 1 descriptors. We need:
189 * howmany(nl2desc, SG4_LEV2SIZE)
190 * level 1 descriptors to map the `nl2desc' level 2's.
191 */
192 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
193 ste = (st_entry_t *)kstpa;
194 este = &ste[nl1desc];
195 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
196 while (ste < este) {
197 *ste++ = protoste;
198 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
199 }
200 /*
201 * Initialize the final level 1 descriptor to map the next
202 * block of level 2 descriptors for Sysptmap.
203 */
204 ste = (st_entry_t *)kstpa;
205 ste = &ste[SG4_LEV1SIZE - 1];
206 *ste = protoste;
207 /*
208 * Now initialize the final portion of that block of
209 * descriptors to map Sysmap.
210 */
211 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
212 ste = (st_entry_t *)kstpa;
213 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
214 este = &ste[NPTEPG / SG4_LEV3SIZE];
215 protoste = kptmpa | SG_U | SG_RW | SG_V;
216 while (ste < este) {
217 *ste++ = protoste;
218 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
219 }
220 /*
221 * Calculate the free level 2 descriptor mask
222 * noting that we have used:
223 * 0: level 1 table
224 * 1 to nl1desc: map page tables
225 * nl1desc + 1: maps kptmpa and last-page page table
226 */
227 /* mark an entry for level 1 table */
228 stfree = ~l2tobm(0);
229 /* mark entries for map page tables */
230 for (i = 1; i <= nl1desc; i++)
231 stfree &= ~l2tobm(i);
232 /* mark an entry for kptmpa */
233 stfree &= ~l2tobm(i);
234 /* mark entries not available */
235 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
236 stfree &= ~l2tobm(i);
237
238 /*
239 * Initialize Sysptmap
240 */
241 pte = (pt_entry_t *)kptmpa;
242 epte = &pte[nptpages];
243 protopte = kptpa | PG_RW | PG_CI | PG_V;
244 while (pte < epte) {
245 *pte++ = protopte;
246 protopte += PAGE_SIZE;
247 }
248 /*
249 * Invalidate all remaining entries.
250 */
251 epte = (pt_entry_t *)kptmpa;
252 epte = &epte[TIB_SIZE];
253 while (pte < epte) {
254 *pte++ = PG_NV;
255 }
256 /*
257 * Initialize the last one to point to Sysptmap.
258 */
259 pte = (pt_entry_t *)kptmpa;
260 pte = &pte[SYSMAP_VA >> SEGSHIFT];
261 *pte = kptmpa | PG_RW | PG_CI | PG_V;
262 } else
263 #endif /* M68040 || M68060 */
264 {
265 /*
266 * Map the page table pages in both the HW segment table
267 * and the software Sysptmap.
268 */
269 ste = (st_entry_t *)kstpa;
270 pte = (pt_entry_t *)kptmpa;
271 epte = &pte[nptpages];
272 protoste = kptpa | SG_RW | SG_V;
273 protopte = kptpa | PG_RW | PG_CI | PG_V;
274 while (pte < epte) {
275 *ste++ = protoste;
276 *pte++ = protopte;
277 protoste += PAGE_SIZE;
278 protopte += PAGE_SIZE;
279 }
280 /*
281 * Invalidate all remaining entries in both.
282 */
283 este = (st_entry_t *)kstpa;
284 este = &este[TIA_SIZE];
285 while (ste < este)
286 *ste++ = SG_NV;
287 epte = (pt_entry_t *)kptmpa;
288 epte = &epte[TIB_SIZE];
289 while (pte < epte)
290 *pte++ = PG_NV;
291 /*
292 * Initialize the last one to point to Sysptmap.
293 */
294 ste = (st_entry_t *)kstpa;
295 ste = &ste[SYSMAP_VA >> SEGSHIFT];
296 pte = (pt_entry_t *)kptmpa;
297 pte = &pte[SYSMAP_VA >> SEGSHIFT];
298 *ste = kptmpa | SG_RW | SG_V;
299 *pte = kptmpa | PG_RW | PG_CI | PG_V;
300 }
301
302 /*
303 * Initialize kernel page table.
304 * Start by invalidating the `nptpages' that we have allocated.
305 */
306 pte = (pt_entry_t *)kptpa;
307 epte = &pte[nptpages * NPTEPG];
308 while (pte < epte)
309 *pte++ = PG_NV;
310 /*
311 * Validate PTEs for kernel text (RO).
312 */
313 pte = (pt_entry_t *)kptpa;
314 pte = &pte[m68k_btop(KERNBASE)];
315 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
316 protopte = firstpa | PG_RO | PG_V;
317 while (pte < epte) {
318 *pte++ = protopte;
319 protopte += PAGE_SIZE;
320 }
321 /*
322 * Validate PTEs for kernel data/bss, dynamic data allocated
323 * by us so far (kstpa - firstpa bytes), and pages for lwp0
324 * u-area and page table allocated below (RW).
325 */
326 epte = (pt_entry_t *)kptpa;
327 epte = &epte[m68k_btop(kstpa - firstpa)];
328 protopte = (protopte & ~PG_PROT) | PG_RW;
329 /*
330 * Enable copy-back caching of data pages
331 */
332 if (RELOC(mmutype, int) == MMU_68040)
333 protopte |= PG_CCB;
334 while (pte < epte) {
335 *pte++ = protopte;
336 protopte += PAGE_SIZE;
337 }
338 /*
339 * Map the kernel segment table cache invalidated for 68040/68060.
340 * (for the 68040 not strictly necessary, but recommended by Motorola;
341 * for the 68060 mandatory)
342 */
343 epte = (pt_entry_t *)kptpa;
344 epte = &epte[m68k_btop(nextpa - firstpa)];
345 protopte = (protopte & ~PG_PROT) | PG_RW;
346 if (RELOC(mmutype, int) == MMU_68040) {
347 protopte &= ~PG_CCB;
348 protopte |= PG_CIN;
349 }
350 while (pte < epte) {
351 *pte++ = protopte;
352 protopte += PAGE_SIZE;
353 }
354
355 /*
356 * Finally, validate the internal IO space PTEs (RW+CI).
357 */
358
359 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
360
361 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
362 epte = &pte[IIOMAPSIZE];
363 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
364 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
365 while (pte < epte) {
366 *pte++ = protopte;
367 protopte += PAGE_SIZE;
368 }
369 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
370
371 /*
372 * Calculate important exported kernel addresses and related values.
373 */
374 /*
375 * Sysseg: base of kernel segment table
376 */
377 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
378 RELOC(Sysseg_pa, paddr_t) = kstpa;
379 #if defined(M68040) || defined(M68060)
380 if (RELOC(mmutype, int) == MMU_68040)
381 RELOC(protostfree, u_int) = stfree;
382 #endif
383 /*
384 * Sysptmap: base of kernel page table map
385 */
386 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
387 /*
388 * Sysmap: kernel page table (as mapped through Sysptmap)
389 * Allocated at the end of KVA space.
390 */
391 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
392
393 /*
394 * Remember the u-area address so it can be loaded in the lwp0
395 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
396 */
397 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
398
399 /*
400 * VM data structures are now initialized, set up data for
401 * the pmap module.
402 *
403 * Note about avail_end: msgbuf is initialized at the end of
404 * main memory region (not after avail_end) in machdep.c.
405 */
406 RELOC(avail_start, paddr_t) = nextpa;
407 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int));
408 RELOC(mem_size, psize_t) = m68k_ptob(RELOC(physmem, int));
409 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
410
411 /*
412 * Allocate some fixed, special purpose kernel virtual addresses
413 */
414 {
415 vaddr_t va = RELOC(virtual_avail, vaddr_t);
416
417 RELOC(CADDR1, void *) = (void *)va;
418 va += PAGE_SIZE;
419 RELOC(CADDR2, void *) = (void *)va;
420 va += PAGE_SIZE;
421 RELOC(vmmap, void *) = (void *)va;
422 va += PAGE_SIZE;
423 RELOC(msgbufaddr, void *) = (void *)va;
424 va += m68k_round_page(MSGBUFSIZE);
425 RELOC(virtual_avail, vaddr_t) = va;
426 }
427 }
428