1 /* $NetBSD: pmap_bootstrap.c,v 1.38 2023/01/15 05:08:33 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.38 2023/01/15 05:08:33 tsutsui Exp $");
40
41 #include "opt_m68k_arch.h"
42
43 #include <sys/param.h>
44 #include <uvm/uvm_extern.h>
45
46 #include <machine/cpu.h>
47 #include <machine/pte.h>
48 #include <machine/vmparam.h>
49
50 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
51
52 extern char *etext;
53
54 extern int maxmem;
55 extern psize_t physmem;
56 extern paddr_t avail_start, avail_end;
57
58 /*
59 * Special purpose kernel virtual addresses, used for mapping
60 * physical pages for a variety of temporary or permanent purposes:
61 *
62 * CADDR1, CADDR2: pmap zero/copy operations
63 * vmmap: /dev/mem, crash dumps, parity error checking
64 * msgbufaddr: kernel message buffer
65 */
66 void *CADDR1, *CADDR2;
67 char *vmmap;
68 void *msgbufaddr;
69
70 void pmap_bootstrap(paddr_t, paddr_t);
71
72 /*
73 * Bootstrap the VM system.
74 *
75 * Called with MMU off so we must relocate all global references by `firstpa'
76 * (don't call any functions here!) `nextpa' is the first available physical
77 * memory address. Returns an updated first PA reflecting the memory we
78 * have allocated. MMU is still off when we return.
79 *
80 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
81 * XXX a PIC compiler would make this much easier.
82 */
83 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)84 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
85 {
86 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
87 u_int nptpages, kstsize;
88 st_entry_t protoste, *ste, *este;
89 pt_entry_t protopte, *pte, *epte;
90 u_int iiomapsize;
91 #if defined(M68040)
92 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
93 #endif
94
95 /*
96 * Calculate important physical addresses:
97 *
98 * lwp0upa lwp0 u-area UPAGES pages
99 *
100 * kstpa kernel segment table 1 page (!040)
101 * N pages (040)
102 *
103 * kptmpa kernel PT map 1 page
104 *
105 * kptpa statically allocated
106 * kernel PT pages Sysptsize+ pages
107 *
108 * [ Sysptsize is the number of pages of PT, and iiomapsize
109 * is the number of PTEs, hence we need to round
110 * the total to a page boundary with IO maps at the end. ]
111 *
112 * The KVA corresponding to any of these PAs is:
113 * (PA - firstpa + KERNBASE).
114 */
115 iiomapsize = m68k_btop(RELOC(intiotop_phys, u_int) -
116 RELOC(intiobase_phys, u_int));
117
118 lwp0upa = nextpa;
119 nextpa += USPACE;
120 if (RELOC(mmutype, int) == MMU_68040)
121 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
122 else
123 kstsize = 1;
124 kstpa = nextpa;
125 nextpa += kstsize * PAGE_SIZE;
126 kptmpa = nextpa;
127 nextpa += PAGE_SIZE;
128 kptpa = nextpa;
129 nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG) +
130 (iiomapsize + NPTEPG - 1) / NPTEPG;
131 nextpa += nptpages * PAGE_SIZE;
132
133 /*
134 * Initialize segment table and kernel page table map.
135 *
136 * On 68030s and earlier MMUs the two are identical except for
137 * the valid bits so both are initialized with essentially the
138 * same values. On the 68040, which has a mandatory 3-level
139 * structure, the segment table holds the level 1 table and part
140 * (or all) of the level 2 table and hence is considerably
141 * different. Here the first level consists of 128 descriptors
142 * (512 bytes) each mapping 32mb of address space. Each of these
143 * points to blocks of 128 second level descriptors (512 bytes)
144 * each mapping 256kb. Note that there may be additional "segment
145 * table" pages depending on how large MAXKL2SIZE is.
146 *
147 * Portions of the last segment of KVA space (0x3FC00000 -
148 * 0x3FFFFFFF) are mapped for the kernel page tables.
149 *
150 * The rest of KVA space (0x40000000 - 0xFFFFFFFF) is mapped
151 * by tt0/tt1 registers for device I/O in locore.s.
152 *
153 * XXX cramming two levels of mapping into the single "segment"
154 * table on the 68040 is intended as a temporary hack to get things
155 * working. The 224mb of address space that this allows will most
156 * likely be insufficient in the future (at least for the kernel).
157 */
158 #if defined(M68040)
159 if (RELOC(mmutype, int) == MMU_68040) {
160 int nl1desc, nl2desc, i;
161
162 /*
163 * First invalidate the entire "segment table" pages
164 * (levels 1 and 2 have the same "invalid" value).
165 */
166 ste = (st_entry_t *)kstpa;
167 este = &ste[kstsize * NPTEPG];
168 while (ste < este)
169 *ste++ = SG_NV;
170 /*
171 * Initialize level 2 descriptors (which immediately
172 * follow the level 1 table). We need:
173 * NPTEPG / SG4_LEV3SIZE
174 * level 2 descriptors to map each of the nptpages
175 * pages of PTEs. Note that we set the "used" bit
176 * now to save the HW the expense of doing it.
177 */
178 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
179 ste = (st_entry_t *)kstpa;
180 ste = &ste[SG4_LEV1SIZE];
181 este = &ste[nl2desc];
182 protoste = kptpa | SG_U | SG_RW | SG_V;
183 while (ste < este) {
184 *ste++ = protoste;
185 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
186 }
187 /*
188 * Initialize level 1 descriptors. We need:
189 * howmany(nl2desc, SG4_LEV2SIZE)
190 * level 1 descriptors to map the `nl2desc' level 2's.
191 */
192 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
193 ste = (st_entry_t *)kstpa;
194 este = &ste[nl1desc];
195 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
196 while (ste < este) {
197 *ste++ = protoste;
198 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
199 }
200 /*
201 * Initialize the level 1 descriptor correspond to
202 * SYSMAP_VA to map the last block of level 2 descriptors.
203 */
204 ste = (st_entry_t *)kstpa;
205 ste = &ste[SYSMAP_VA >> SG4_SHIFT1];
206 *ste = protoste;
207 /*
208 * Now initialize the portion of that block of
209 * descriptors to map Sysptmap.
210 */
211 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
212 ste = (st_entry_t *)kstpa;
213 ste = &ste[i + ((SYSMAP_VA & SG4_MASK2) >> SG4_SHIFT2)];
214 este = &ste[NPTEPG / SG4_LEV3SIZE];
215 protoste = kptmpa | SG_U | SG_RW | SG_V;
216 while (ste < este) {
217 *ste++ = protoste;
218 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
219 }
220 /*
221 * Calculate the free level 2 descriptor mask
222 * noting that we have used:
223 * 0: level 1 table
224 * 1 to nl1desc: map page tables
225 * nl1desc + 1: maps kptmpa and last-page page table
226 */
227 /* mark an entry for level 1 table */
228 stfree = ~l2tobm(0);
229 /* mark entries for map page tables */
230 for (i = 1; i <= nl1desc; i++)
231 stfree &= ~l2tobm(i);
232 /* mark an entry for kptmpa and lkptpa */
233 stfree &= ~l2tobm(i);
234 /* mark entries not available */
235 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
236 stfree &= ~l2tobm(i);
237
238 /*
239 * Initialize Sysptmap
240 */
241 pte = (pt_entry_t *)kptmpa;
242 epte = &pte[nptpages];
243 protopte = kptpa | PG_RW | PG_CI | PG_V;
244 while (pte < epte) {
245 *pte++ = protopte;
246 protopte += PAGE_SIZE;
247 }
248 /*
249 * Invalidate all remaining entries.
250 */
251 epte = (pt_entry_t *)kptmpa;
252 epte = &epte[TIB_SIZE];
253 while (pte < epte) {
254 *pte++ = PG_NV;
255 }
256 /*
257 * Initialize the one corresponding SYSMAP_VA
258 * to point to Sysptmap.
259 */
260 pte = (pt_entry_t *)kptmpa;
261 pte = &pte[SYSMAP_VA >> SEGSHIFT];
262 *pte = kptmpa | PG_RW | PG_CI | PG_V;
263 } else
264 #endif
265 {
266 /*
267 * Map the page table pages in both the HW segment table
268 * and the software Sysptmap.
269 */
270 ste = (st_entry_t *)kstpa;
271 pte = (pt_entry_t *)kptmpa;
272 epte = &pte[nptpages];
273 protoste = kptpa | SG_RW | SG_V;
274 protopte = kptpa | PG_RW | PG_CI | PG_V;
275 while (pte < epte) {
276 *ste++ = protoste;
277 *pte++ = protopte;
278 protoste += PAGE_SIZE;
279 protopte += PAGE_SIZE;
280 }
281 /*
282 * Invalidate all remaining entries in both.
283 */
284 este = (st_entry_t *)kstpa;
285 este = &este[TIA_SIZE];
286 while (ste < este)
287 *ste++ = SG_NV;
288 epte = (pt_entry_t *)kptmpa;
289 epte = &epte[TIB_SIZE];
290 while (pte < epte)
291 *pte++ = PG_NV;
292 /*
293 * Initialize the one corresponding to SYSMAP_VA
294 * to point to Sysptmap.
295 */
296 ste = (st_entry_t *)kstpa;
297 ste = &ste[SYSMAP_VA >> SEGSHIFT];
298 pte = (pt_entry_t *)kptmpa;
299 pte = &pte[SYSMAP_VA >> SEGSHIFT];
300 *ste = kptmpa | SG_RW | SG_V;
301 *pte = kptmpa | PG_RW | PG_CI | PG_V;
302 }
303
304 /*
305 * Initialize kernel page table.
306 * Start by invalidating the `nptpages' that we have allocated.
307 */
308 pte = (pt_entry_t *)kptpa;
309 epte = &pte[nptpages * NPTEPG];
310 while (pte < epte)
311 *pte++ = PG_NV;
312 /*
313 * Validate PTEs for kernel text (RO).
314 */
315 pte = (pt_entry_t *)kptpa;
316 pte = &pte[m68k_btop(KERNBASE)];
317 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
318 protopte = firstpa | PG_RO | PG_V;
319 while (pte < epte) {
320 *pte++ = protopte;
321 protopte += PAGE_SIZE;
322 }
323 /*
324 * Validate PTEs for kernel data/bss, dynamic data allocated
325 * by us so far (kstpa - firstpa bytes), and pages for lwp0
326 * u-area and page table allocated below (RW).
327 */
328 epte = (pt_entry_t *)kptpa;
329 epte = &epte[m68k_btop(kstpa - firstpa)];
330 protopte = (protopte & ~PG_PROT) | PG_RW;
331 /*
332 * Enable copy-back caching of data pages
333 */
334 if (RELOC(mmutype, int) == MMU_68040)
335 protopte |= PG_CCB;
336 while (pte < epte) {
337 *pte++ = protopte;
338 protopte += PAGE_SIZE;
339 }
340 /*
341 * Map the kernel segment table cache invalidated for 68040/68060.
342 * (for the 68040 not strictly necessary, but recommended by Motorola;
343 * for the 68060 mandatory)
344 */
345 epte = (pt_entry_t *)kptpa;
346 epte = &epte[m68k_btop(nextpa - firstpa)];
347 protopte = (protopte & ~PG_PROT) | PG_RW;
348 #if defined(M68040)
349 if (RELOC(mmutype, int) == MMU_68040) {
350 protopte &= ~PG_CCB;
351 protopte |= PG_CIN;
352 }
353 #endif
354 while (pte < epte) {
355 *pte++ = protopte;
356 protopte += PAGE_SIZE;
357 }
358
359 /*
360 * Finally, validate the internal IO space PTEs (RW+CI).
361 */
362
363 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
364
365 protopte = RELOC(intiobase_phys, u_int) | PG_RW | PG_CI | PG_V;
366 epte = &pte[iiomapsize];
367 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
368 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
369 while (pte < epte) {
370 *pte++ = protopte;
371 protopte += PAGE_SIZE;
372 }
373 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
374
375 /*
376 * Calculate important exported kernel addresses and related values.
377 */
378 /*
379 * Sysseg: base of kernel segment table
380 */
381 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
382 RELOC(Sysseg_pa, paddr_t) = kstpa;
383 #if defined(M68040)
384 if (RELOC(mmutype, int) == MMU_68040)
385 RELOC(protostfree, u_int) = stfree;
386 #endif
387 /*
388 * Sysptmap: base of kernel page table map
389 */
390 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
391 /*
392 * Sysmap: kernel page table (as mapped through Sysptmap)
393 * Allocated at the end of KVA space.
394 */
395 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
396
397 /*
398 * Remember the u-area address so it can be loaded in the lwp0
399 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
400 */
401 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
402
403 /*
404 * VM data structures are now initialized, set up data for
405 * the pmap module.
406 *
407 * Note about avail_end: msgbuf is initialized just after
408 * avail_end in machdep.c.
409 */
410 RELOC(avail_start, paddr_t) = nextpa;
411 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
412 m68k_round_page(MSGBUFSIZE);
413 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
414
415 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
416
417 /*
418 * Allocate some fixed, special purpose kernel virtual addresses
419 */
420 {
421 vaddr_t va = RELOC(virtual_avail, vaddr_t);
422
423 RELOC(CADDR1, void *) = (void *)va;
424 va += PAGE_SIZE;
425 RELOC(CADDR2, void *) = (void *)va;
426 va += PAGE_SIZE;
427 RELOC(vmmap, void *) = (void *)va;
428 va += PAGE_SIZE;
429 RELOC(msgbufaddr, void *) = (void *)va;
430 va += m68k_round_page(MSGBUFSIZE);
431 RELOC(virtual_avail, vaddr_t) = va;
432 }
433 }
434