xref: /netbsd-src/sys/arch/next68k/next68k/pmap_bootstrap.c (revision eb8723967b2cd829d117f38962870e4faa0cec4f)
1 /*	$NetBSD: pmap_bootstrap.c,v 1.47 2023/02/11 02:31:34 tsutsui Exp $	*/
2 
3 /*
4  * This file was taken from mvme68k/mvme68k/pmap_bootstrap.c
5  * should probably be re-synced when needed.
6  * cvs id of source for the most recent syncing:
7  *	NetBSD: pmap_bootstrap.c,v 1.15 2000/11/20 19:35:30 scw Exp
8  *	NetBSD: pmap_bootstrap.c,v 1.17 2001/11/08 21:53:44 scw Exp
9  */
10 
11 
12 /*
13  * Copyright (c) 1991, 1993
14  *	The Regents of the University of California.  All rights reserved.
15  *
16  * This code is derived from software contributed to Berkeley by
17  * the Systems Programming Group of the University of Utah Computer
18  * Science Department.
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)pmap_bootstrap.c	8.1 (Berkeley) 6/10/93
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.47 2023/02/11 02:31:34 tsutsui Exp $");
49 
50 #include "opt_m68k_arch.h"
51 
52 #include <sys/param.h>
53 #include <sys/kcore.h>
54 #include <uvm/uvm_extern.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/pte.h>
58 #include <machine/vmparam.h>
59 
60 #include <next68k/next68k/seglist.h>
61 
62 #include <next68k/dev/intiovar.h>
63 
64 #define RELOC(v, t)	*((t*)((uintptr_t)&(v) + firstpa))
65 
66 extern char *etext;
67 
68 extern int maxmem;
69 extern paddr_t avail_start, avail_end;
70 extern phys_ram_seg_t mem_clusters[];
71 extern int mem_cluster_cnt;
72 extern paddr_t msgbufpa;
73 
74 /*
75  * Special purpose kernel virtual addresses, used for mapping
76  * physical pages for a variety of temporary or permanent purposes:
77  *
78  *	CADDR1, CADDR2:	pmap zero/copy operations
79  *	vmmap:		/dev/mem, crash dumps, parity error checking
80  *	msgbufaddr:	kernel message buffer
81  */
82 void *CADDR1, *CADDR2;
83 char *vmmap;
84 void *msgbufaddr;
85 
86 void pmap_bootstrap(paddr_t, paddr_t);
87 
88 /*
89  * Bootstrap the VM system.
90  *
91  * Called with MMU off so we must relocate all global references by `firstpa'
92  * (don't call any functions here!)  `nextpa' is the first available physical
93  * memory address.  Returns an updated first PA reflecting the memory we
94  * have allocated.  MMU is still off when we return.
95  *
96  * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
97  * XXX a PIC compiler would make this much easier.
98  */
99 void
pmap_bootstrap(paddr_t nextpa,paddr_t firstpa)100 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
101 {
102 	paddr_t lwp0upa, kstpa, kptmpa, kptpa;
103 	u_int nptpages, kstsize;
104 	st_entry_t protoste, *ste, *este;
105 	pt_entry_t protopte, *pte, *epte;
106 	psize_t size;
107 	int i;
108 #if defined(M68040) || defined(M68060)
109 	u_int stfree = 0;	/* XXX: gcc -Wuninitialized */
110 #endif
111 	u_int fbmapsize;
112 
113 	/*
114 	 * Initialize the mem_clusters[] array for the crash dump
115 	 * code.  While we're at it, compute the total amount of
116 	 * physical memory in the system.
117 	 */
118 	for (i = 0; i < VM_PHYSSEG_MAX; i++) {
119 		if (RELOC(phys_seg_list[i].ps_start, paddr_t) ==
120 		    RELOC(phys_seg_list[i].ps_end, paddr_t)) {
121 			/*
122 			 * No more memory.
123 			 */
124 			break;
125 		}
126 
127 		/*
128 		 * Make sure these are properly rounded.
129 		 */
130 		RELOC(phys_seg_list[i].ps_start, paddr_t) =
131 		    m68k_round_page(RELOC(phys_seg_list[i].ps_start,
132 					  paddr_t));
133 		RELOC(phys_seg_list[i].ps_end, paddr_t) =
134 		    m68k_trunc_page(RELOC(phys_seg_list[i].ps_end,
135 					  paddr_t));
136 
137 		size = RELOC(phys_seg_list[i].ps_end, paddr_t) -
138 		    RELOC(phys_seg_list[i].ps_start, paddr_t);
139 
140 		RELOC(mem_clusters[i].start, u_quad_t) =
141 		    RELOC(phys_seg_list[i].ps_start, paddr_t);
142 		RELOC(mem_clusters[i].size, u_quad_t) = size;
143 
144 		RELOC(physmem, int) += size >> PGSHIFT;
145 
146 		RELOC(mem_cluster_cnt, int) += 1;
147 	}
148 
149 	/*
150 	 * Calculate important physical addresses:
151 	 *
152 	 *	lwp0upa		lwp0 u-area		UPAGES pages
153 	 *
154 	 *	kstpa		kernel segment table	1 page (!040)
155 	 *						N pages (040)
156 	 *
157 	 *	kptmpa		kernel PT map		1 page
158 	 *
159 	 *	kptpa		statically allocated
160 	 *			kernel PT pages		Sysptsize+ pages
161 	 *
162 	 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE
163 	 *   is the number of PTEs, hence we need to round
164 	 *   the total to a page boundary with IO maps at the end. ]
165 	 *
166 	 * The KVA corresponding to any of these PAs is:
167 	 *	(PA - firstpa + KERNBASE).
168 	 */
169 	lwp0upa = nextpa;
170 	nextpa += USPACE;
171 #if defined(M68040) || defined(M68060)
172 	if (RELOC(mmutype, int) == MMU_68040)
173 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
174 	else
175 #endif
176 		kstsize = 1;
177 	kstpa = nextpa;
178 	nextpa += kstsize * PAGE_SIZE;
179 	kptmpa = nextpa;
180 	nextpa += PAGE_SIZE;
181 	kptpa = nextpa;
182 	fbmapsize = btoc(RELOC(fblimitpa, paddr_t) - RELOC(fbbasepa, paddr_t));
183 	nptpages = RELOC(Sysptsize, int) + howmany(RELOC(physmem, int), NPTEPG)
184 	    + (IIOMAPSIZE + fbmapsize + NPTEPG - 1) / NPTEPG;
185 	nextpa += nptpages * PAGE_SIZE;
186 
187 	/*
188 	 * Clear all PTEs to zero
189 	 */
190 	for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)nextpa; pte++)
191 		*pte = 0;
192 
193 	/*
194 	 * Initialize segment table and kernel page table map.
195 	 *
196 	 * On 68030s and earlier MMUs the two are identical except for
197 	 * the valid bits so both are initialized with essentially the
198 	 * same values.  On the 68040, which has a mandatory 3-level
199 	 * structure, the segment table holds the level 1 table and part
200 	 * (or all) of the level 2 table and hence is considerably
201 	 * different.  Here the first level consists of 128 descriptors
202 	 * (512 bytes) each mapping 32mb of address space.  Each of these
203 	 * points to blocks of 128 second level descriptors (512 bytes)
204 	 * each mapping 256kb.  Note that there may be additional "segment
205 	 * table" pages depending on how large MAXKL2SIZE is.
206 	 *
207 	 * Portions of the last segment of KVA space (0xFFC00000 -
208 	 * 0xFFFFFFFF) are mapped for the kernel page tables.
209 	 *
210 	 * XXX cramming two levels of mapping into the single "segment"
211 	 * table on the 68040 is intended as a temporary hack to get things
212 	 * working.  The 224mb of address space that this allows will most
213 	 * likely be insufficient in the future (at least for the kernel).
214 	 */
215 #if defined(M68040) || defined(M68060)
216 	if (RELOC(mmutype, int) == MMU_68040) {
217 		int nl1desc, nl2desc;
218 
219 		/*
220 		 * First invalidate the entire "segment table" pages
221 		 * (levels 1 and 2 have the same "invalid" value).
222 		 */
223 		ste = (st_entry_t *)kstpa;
224 		este = &ste[kstsize * NPTEPG];
225 		while (ste < este)
226 			*ste++ = SG_NV;
227 		/*
228 		 * Initialize level 2 descriptors (which immediately
229 		 * follow the level 1 table).  We need:
230 		 *	NPTEPG / SG4_LEV3SIZE
231 		 * level 2 descriptors to map each of the nptpages
232 		 * pages of PTEs.  Note that we set the "used" bit
233 		 * now to save the HW the expense of doing it.
234 		 */
235 		nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
236 		ste = (st_entry_t *)kstpa;
237 		ste = &ste[SG4_LEV1SIZE];
238 		este = &ste[nl2desc];
239 		protoste = kptpa | SG_U | SG_RW | SG_V;
240 		while (ste < este) {
241 			*ste++ = protoste;
242 			protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
243 		}
244 		/*
245 		 * Initialize level 1 descriptors.  We need:
246 		 *	howmany(nl2desc, SG4_LEV2SIZE)
247 		 * level 1 descriptors to map the `nl2desc' level 2's.
248 		 */
249 		nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
250 		ste = (st_entry_t *)kstpa;
251 		este = &ste[nl1desc];
252 		protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
253 		while (ste < este) {
254 			*ste++ = protoste;
255 			protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
256 		}
257 		/*
258 		 * Initialize the final level 1 descriptor to map the next
259 		 * block of level 2 descriptors for Sysptmap.
260 		 */
261 		ste = (st_entry_t *)kstpa;
262 		ste = &ste[SG4_LEV1SIZE - 1];
263 		*ste = protoste;
264 		/*
265 		 * Now initialize the final portion of that block of
266 		 * descriptors to map Sysmap.
267 		 */
268 		i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
269 		ste = (st_entry_t *)kstpa;
270 		ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
271 		este = &ste[NPTEPG / SG4_LEV3SIZE];
272 		protoste = kptmpa | SG_U | SG_RW | SG_V;
273 		while (ste < este) {
274 			*ste++ = protoste;
275 			protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
276 		}
277 		/*
278 		 * Calculate the free level 2 descriptor mask
279 		 * noting that we have used:
280 		 *	0:		level 1 table
281 		 *	1 to nl1desc:	map page tables
282 		 *	nl1desc + 1:	maps kptmpa and last-page page table
283 		 */
284 		/* mark an entry for level 1 table */
285 		stfree = ~l2tobm(0);
286 		/* mark entries for map page tables */
287 		for (i = 1; i <= nl1desc; i++)
288 			stfree &= ~l2tobm(i);
289 		/* mark an entry for kptmpa */
290 		stfree &= ~l2tobm(i);
291 		/* mark entries not available */
292 		for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
293 			stfree &= ~l2tobm(i);
294 
295 		/*
296 		 * Initialize Sysptmap
297 		 */
298 		pte = (pt_entry_t *)kptmpa;
299 		epte = &pte[nptpages];
300 		protopte = kptpa | PG_RW | PG_CI | PG_U | PG_V;
301 		while (pte < epte) {
302 			*pte++ = protopte;
303 			protopte += PAGE_SIZE;
304 		}
305 		/*
306 		 * Invalidate all remaining entries.
307 		 */
308 		epte = (pt_entry_t *)kptmpa;
309 		epte = &epte[TIB_SIZE];
310 		while (pte < epte) {
311 			*pte++ = PG_NV;
312 		}
313 		/*
314 		 * Initialize the last one to point to Sysptmap.
315 		 */
316 		pte = (pt_entry_t *)kptmpa;
317 		pte = &pte[SYSMAP_VA >> SEGSHIFT];
318 		*pte = kptmpa | PG_RW | PG_CI | PG_V;
319 	} else
320 #endif /* M68040 || M68060 */
321 	{
322 		/*
323 		 * Map the page table pages in both the HW segment table
324 		 * and the software Sysptmap.
325 		 */
326 		ste = (st_entry_t *)kstpa;
327 		pte = (pt_entry_t *)kptmpa;
328 		epte = &pte[nptpages];
329 		protoste = kptpa | SG_RW | SG_V;
330 		protopte = kptpa | PG_RW | PG_CI | PG_V;
331 		while (pte < epte) {
332 			*ste++ = protoste;
333 			*pte++ = protopte;
334 			protoste += PAGE_SIZE;
335 			protopte += PAGE_SIZE;
336 		}
337 		/*
338 		 * Invalidate all remaining entries in both.
339 		 */
340 		este = (st_entry_t *)kstpa;
341 		este = &este[TIA_SIZE];
342 		while (ste < este)
343 			*ste++ = SG_NV;
344 		epte = (pt_entry_t *)kptmpa;
345 		epte = &epte[TIB_SIZE];
346 		while (pte < epte)
347 			*pte++ = PG_NV;
348 		/*
349 		 * Initialize the last one to point to Sysptmap.
350 		 */
351 		ste = (st_entry_t *)kstpa;
352 		ste = &ste[SYSMAP_VA >> SEGSHIFT];
353 		pte = (pt_entry_t *)kptmpa;
354 		pte = &pte[SYSMAP_VA >> SEGSHIFT];
355 		*ste = kptmpa | SG_RW | SG_V;
356 		*pte = kptmpa | PG_RW | PG_CI | PG_V;
357 	}
358 
359 	/*
360 	 * Initialize kernel page table.
361 	 * Start by invalidating the `nptpages' that we have allocated.
362 	 */
363 	pte = (pt_entry_t *)kptpa;
364 	epte = &pte[nptpages * NPTEPG];
365 	while (pte < epte)
366 		*pte++ = PG_NV;
367 	/*
368 	 * Validate PTEs for kernel text (RO).
369 	 * The first page of kernel text remains invalid; see locore.s
370 	 */
371 	pte = (pt_entry_t *)kptpa;
372 	pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)];
373 	epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
374 	protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_U | PG_V;
375 	while (pte < epte) {
376 		*pte++ = protopte;
377 		protopte += PAGE_SIZE;
378 	}
379 	/*
380 	 * Validate PTEs for kernel data/bss, dynamic data allocated
381 	 * by us so far (kstpa - firstpa bytes), and pages for lwp0
382 	 * u-area and page table allocated below (RW).
383 	 */
384 	epte = (pt_entry_t *)kptpa;
385 	epte = &epte[m68k_btop(kstpa - firstpa)];
386 	protopte = (protopte & ~PG_PROT) | PG_RW;
387 	/*
388 	 * Enable copy-back caching of data pages
389 	 */
390 	if (RELOC(mmutype, int) == MMU_68040)
391 		protopte |= PG_CCB;
392 	while (pte < epte) {
393 		*pte++ = protopte;
394 		protopte += PAGE_SIZE;
395 	}
396 	/*
397 	 * Map the kernel segment table cache invalidated for 68040/68060.
398 	 * (for the 68040 not strictly necessary, but recommended by Motorola;
399 	 *  for the 68060 mandatory)
400 	 */
401 	epte = (pt_entry_t *)kptpa;
402 	epte = &epte[m68k_btop(nextpa - firstpa)];
403 	protopte = (protopte & ~PG_PROT) | PG_RW;
404 	if (RELOC(mmutype, int) == MMU_68040) {
405 		protopte &= ~PG_CMASK;
406 		protopte |= PG_CI;
407 	}
408 	while (pte < epte) {
409 		*pte++ = protopte;
410 		protopte += PAGE_SIZE;
411 	}
412 
413 	/*
414 	 * Finally, validate the internal IO space PTEs (RW+CI).
415 	 */
416 
417 #define	PTE2VA(pte)	m68k_ptob(pte - ((pt_entry_t *)kptpa))
418 
419 	protopte = INTIOBASE | PG_RW | PG_CI | PG_U | PG_M | PG_V;
420 	epte = &pte[IIOMAPSIZE];
421 	RELOC(intiobase, vaddr_t) = PTE2VA(pte);
422 	RELOC(intiolimit, vaddr_t) = PTE2VA(epte);
423 	while (pte < epte) {
424 		*pte++ = protopte;
425 		protopte += PAGE_SIZE;
426 	}
427 
428 	/* validate the framebuffer space PTEs */
429 
430 	protopte = RELOC(fbbasepa, paddr_t) |
431 	    PG_RW | PG_CWT | PG_U | PG_M | PG_V;
432 	epte = &pte[fbmapsize];
433 	RELOC(fbbase, vaddr_t) = PTE2VA(pte);
434 	RELOC(fblimit, vaddr_t) = PTE2VA(epte);
435 	while (pte < epte) {
436 		*pte++ = protopte;
437 		protopte += PAGE_SIZE;
438 	}
439 
440 	RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
441 
442 	/*
443 	 * Calculate important exported kernel addresses and related values.
444 	 */
445 	/*
446 	 * Sysseg: base of kernel segment table
447 	 */
448 	RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
449 	RELOC(Sysseg_pa, paddr_t) = kstpa;
450 #if defined(M68040) || defined(M68060)
451 	if (RELOC(mmutype, int) == MMU_68040)
452 		RELOC(protostfree, u_int) = stfree;
453 #endif
454 	/*
455 	 * Sysptmap: base of kernel page table map
456 	 */
457 	RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
458 	/*
459 	 * Sysmap: kernel page table (as mapped through Sysptmap)
460 	 * Allocated at the end of KVA space.
461 	 */
462 	RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
463 
464 	/*
465 	 * Remember the u-area address so it can be loaded in the lwp0
466 	 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
467 	 */
468 	RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
469 
470 	/*
471 	 * Scoot the start of available on-board RAM forward to
472 	 * account for:
473 	 *
474 	 *	(1) The bootstrap programs in low memory (so
475 	 *	    that we can jump back to them without
476 	 *	    reloading).
477 	 *
478 	 *	(2) The kernel text, data, and bss.
479 	 *
480 	 *	(3) The pages we stole above for pmap data
481 	 *	    structures.
482 	 */
483 	RELOC(phys_seg_list[0].ps_start, paddr_t) = nextpa;
484 
485 	/*
486 	 * Reserve space at the end of on-board RAM for the message
487 	 * buffer.  We force it into on-board RAM because VME RAM
488 	 * gets cleared very early on in locore.s (to initialise
489 	 * parity on boards that need it). This would clobber the
490 	 * messages from a previous running NetBSD system.
491 	 */
492 	RELOC(phys_seg_list[0].ps_end, paddr_t) -=
493 	    m68k_round_page(MSGBUFSIZE);
494 	RELOC(msgbufpa, paddr_t) =
495 	    RELOC(phys_seg_list[0].ps_end, paddr_t);
496 
497 	/*
498 	 * Initialize avail_start and avail_end.
499 	 */
500 	i = RELOC(mem_cluster_cnt, int) - 1;
501 	RELOC(avail_start, paddr_t) =
502 	    RELOC(phys_seg_list[0].ps_start, paddr_t);
503 	RELOC(avail_end, paddr_t) =
504 	    RELOC(phys_seg_list[i].ps_end, paddr_t);
505 
506 	RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
507 
508 	RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
509 
510 	/*
511 	 * Allocate some fixed, special purpose kernel virtual addresses
512 	 */
513 	{
514 		vaddr_t va = RELOC(virtual_avail, vaddr_t);
515 
516 		RELOC(CADDR1, void *) = (void *)va;
517 		va += PAGE_SIZE;
518 		RELOC(CADDR2, void *) = (void *)va;
519 		va += PAGE_SIZE;
520 		RELOC(vmmap, void *) = (void *)va;
521 		va += PAGE_SIZE;
522 		RELOC(msgbufaddr, void *) = (void *)va;
523 		va += m68k_round_page(MSGBUFSIZE);
524 		RELOC(virtual_avail, vaddr_t) = va;
525 	}
526 }
527