xref: /netbsd-src/sys/arch/powerpc/oea/pmap.c (revision 5bbca87a4d078aa04e84c5ee952f4509cd935846)
1 /*	$NetBSD: pmap.c,v 1.121 2023/12/15 09:42:33 rin Exp $	*/
2 /*-
3  * Copyright (c) 2001 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8  *
9  * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10  * of Kyma Systems LLC.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36  * Copyright (C) 1995, 1996 TooLs GmbH.
37  * All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *	This product includes software developed by TooLs GmbH.
50  * 4. The name of TooLs GmbH may not be used to endorse or promote products
51  *    derived from this software without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  */
64 
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.121 2023/12/15 09:42:33 rin Exp $");
67 
68 #define	PMAP_NOOPNAMES
69 
70 #ifdef _KERNEL_OPT
71 #include "opt_altivec.h"
72 #include "opt_multiprocessor.h"
73 #include "opt_pmap.h"
74 #include "opt_ppcarch.h"
75 #endif
76 
77 #include <sys/param.h>
78 #include <sys/proc.h>
79 #include <sys/pool.h>
80 #include <sys/queue.h>
81 #include <sys/device.h>		/* for evcnt */
82 #include <sys/systm.h>
83 #include <sys/atomic.h>
84 
85 #include <uvm/uvm.h>
86 #include <uvm/uvm_physseg.h>
87 
88 #include <machine/powerpc.h>
89 #include <powerpc/bat.h>
90 #include <powerpc/pcb.h>
91 #include <powerpc/psl.h>
92 #include <powerpc/spr.h>
93 #include <powerpc/oea/spr.h>
94 #include <powerpc/oea/sr_601.h>
95 
96 #ifdef ALTIVEC
97 extern int pmap_use_altivec;
98 #endif
99 
100 #ifdef PMAP_MEMLIMIT
101 static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
102 #else
103 static paddr_t pmap_memlimit = -PAGE_SIZE;		/* there is no limit */
104 #endif
105 
106 extern struct pmap kernel_pmap_;
107 static unsigned int pmap_pages_stolen;
108 static u_long pmap_pte_valid;
109 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
110 static u_long pmap_pvo_enter_depth;
111 static u_long pmap_pvo_remove_depth;
112 #endif
113 
114 #ifndef MSGBUFADDR
115 extern paddr_t msgbuf_paddr;
116 #endif
117 
118 static struct mem_region *mem, *avail;
119 static u_int mem_cnt, avail_cnt;
120 
121 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
122 # define	PMAP_OEA 1
123 #endif
124 
125 #if defined(PMAP_OEA)
126 #define	_PRIxpte	"lx"
127 #else
128 #define	_PRIxpte	PRIx64
129 #endif
130 #define	_PRIxpa		"lx"
131 #define	_PRIxva		"lx"
132 #define	_PRIsr  	"lx"
133 
134 #ifdef PMAP_NEEDS_FIXUP
135 #if defined(PMAP_OEA)
136 #define	PMAPNAME(name)	pmap32_##name
137 #elif defined(PMAP_OEA64)
138 #define	PMAPNAME(name)	pmap64_##name
139 #elif defined(PMAP_OEA64_BRIDGE)
140 #define	PMAPNAME(name)	pmap64bridge_##name
141 #else
142 #error unknown variant for pmap
143 #endif
144 #endif /* PMAP_NEEDS_FIXUP */
145 
146 #ifdef PMAPNAME
147 #define	STATIC			static
148 #define pmap_pte_spill		PMAPNAME(pte_spill)
149 #define pmap_real_memory	PMAPNAME(real_memory)
150 #define pmap_init		PMAPNAME(init)
151 #define pmap_virtual_space	PMAPNAME(virtual_space)
152 #define pmap_create		PMAPNAME(create)
153 #define pmap_reference		PMAPNAME(reference)
154 #define pmap_destroy		PMAPNAME(destroy)
155 #define pmap_copy		PMAPNAME(copy)
156 #define pmap_update		PMAPNAME(update)
157 #define pmap_enter		PMAPNAME(enter)
158 #define pmap_remove		PMAPNAME(remove)
159 #define pmap_kenter_pa		PMAPNAME(kenter_pa)
160 #define pmap_kremove		PMAPNAME(kremove)
161 #define pmap_extract		PMAPNAME(extract)
162 #define pmap_protect		PMAPNAME(protect)
163 #define pmap_unwire		PMAPNAME(unwire)
164 #define pmap_page_protect	PMAPNAME(page_protect)
165 #define	pmap_pv_protect		PMAPNAME(pv_protect)
166 #define pmap_query_bit		PMAPNAME(query_bit)
167 #define pmap_clear_bit		PMAPNAME(clear_bit)
168 
169 #define pmap_activate		PMAPNAME(activate)
170 #define pmap_deactivate		PMAPNAME(deactivate)
171 
172 #define pmap_pinit		PMAPNAME(pinit)
173 #define pmap_procwr		PMAPNAME(procwr)
174 
175 #define pmap_pool		PMAPNAME(pool)
176 #define pmap_pvo_pool		PMAPNAME(pvo_pool)
177 #define pmap_pvo_table		PMAPNAME(pvo_table)
178 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
179 #define pmap_pte_print		PMAPNAME(pte_print)
180 #define pmap_pteg_check		PMAPNAME(pteg_check)
181 #define pmap_print_mmruregs	PMAPNAME(print_mmuregs)
182 #define pmap_print_pte		PMAPNAME(print_pte)
183 #define pmap_pteg_dist		PMAPNAME(pteg_dist)
184 #endif
185 #if defined(DEBUG) || defined(PMAPCHECK)
186 #define	pmap_pvo_verify		PMAPNAME(pvo_verify)
187 #define pmapcheck		PMAPNAME(check)
188 #endif
189 #if defined(DEBUG) || defined(PMAPDEBUG)
190 #define pmapdebug		PMAPNAME(debug)
191 #endif
192 #define pmap_steal_memory	PMAPNAME(steal_memory)
193 #define pmap_bootstrap		PMAPNAME(bootstrap)
194 #define pmap_bootstrap1		PMAPNAME(bootstrap1)
195 #define pmap_bootstrap2		PMAPNAME(bootstrap2)
196 #else
197 #define	STATIC			/* nothing */
198 #endif /* PMAPNAME */
199 
200 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
201 STATIC void pmap_real_memory(paddr_t *, psize_t *);
202 STATIC void pmap_init(void);
203 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
204 STATIC pmap_t pmap_create(void);
205 STATIC void pmap_reference(pmap_t);
206 STATIC void pmap_destroy(pmap_t);
207 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
208 STATIC void pmap_update(pmap_t);
209 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
210 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
211 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
212 STATIC void pmap_kremove(vaddr_t, vsize_t);
213 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
214 
215 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
216 STATIC void pmap_unwire(pmap_t, vaddr_t);
217 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
218 STATIC void pmap_pv_protect(paddr_t, vm_prot_t);
219 STATIC bool pmap_query_bit(struct vm_page *, int);
220 STATIC bool pmap_clear_bit(struct vm_page *, int);
221 
222 STATIC void pmap_activate(struct lwp *);
223 STATIC void pmap_deactivate(struct lwp *);
224 
225 STATIC void pmap_pinit(pmap_t pm);
226 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
227 
228 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
229 STATIC void pmap_pte_print(volatile struct pte *);
230 STATIC void pmap_pteg_check(void);
231 STATIC void pmap_print_mmuregs(void);
232 STATIC void pmap_print_pte(pmap_t, vaddr_t);
233 STATIC void pmap_pteg_dist(void);
234 #endif
235 #if defined(DEBUG) || defined(PMAPCHECK)
236 STATIC void pmap_pvo_verify(void);
237 #endif
238 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
239 STATIC void pmap_bootstrap(paddr_t, paddr_t);
240 STATIC void pmap_bootstrap1(paddr_t, paddr_t);
241 STATIC void pmap_bootstrap2(void);
242 
243 #ifdef PMAPNAME
244 const struct pmap_ops PMAPNAME(ops) = {
245 	.pmapop_pte_spill = pmap_pte_spill,
246 	.pmapop_real_memory = pmap_real_memory,
247 	.pmapop_init = pmap_init,
248 	.pmapop_virtual_space = pmap_virtual_space,
249 	.pmapop_create = pmap_create,
250 	.pmapop_reference = pmap_reference,
251 	.pmapop_destroy = pmap_destroy,
252 	.pmapop_copy = pmap_copy,
253 	.pmapop_update = pmap_update,
254 	.pmapop_enter = pmap_enter,
255 	.pmapop_remove = pmap_remove,
256 	.pmapop_kenter_pa = pmap_kenter_pa,
257 	.pmapop_kremove = pmap_kremove,
258 	.pmapop_extract = pmap_extract,
259 	.pmapop_protect = pmap_protect,
260 	.pmapop_unwire = pmap_unwire,
261 	.pmapop_page_protect = pmap_page_protect,
262 	.pmapop_pv_protect = pmap_pv_protect,
263 	.pmapop_query_bit = pmap_query_bit,
264 	.pmapop_clear_bit = pmap_clear_bit,
265 	.pmapop_activate = pmap_activate,
266 	.pmapop_deactivate = pmap_deactivate,
267 	.pmapop_pinit = pmap_pinit,
268 	.pmapop_procwr = pmap_procwr,
269 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
270 	.pmapop_pte_print = pmap_pte_print,
271 	.pmapop_pteg_check = pmap_pteg_check,
272 	.pmapop_print_mmuregs = pmap_print_mmuregs,
273 	.pmapop_print_pte = pmap_print_pte,
274 	.pmapop_pteg_dist = pmap_pteg_dist,
275 #else
276 	.pmapop_pte_print = NULL,
277 	.pmapop_pteg_check = NULL,
278 	.pmapop_print_mmuregs = NULL,
279 	.pmapop_print_pte = NULL,
280 	.pmapop_pteg_dist = NULL,
281 #endif
282 #if defined(DEBUG) || defined(PMAPCHECK)
283 	.pmapop_pvo_verify = pmap_pvo_verify,
284 #else
285 	.pmapop_pvo_verify = NULL,
286 #endif
287 	.pmapop_steal_memory = pmap_steal_memory,
288 	.pmapop_bootstrap = pmap_bootstrap,
289 	.pmapop_bootstrap1 = pmap_bootstrap1,
290 	.pmapop_bootstrap2 = pmap_bootstrap2,
291 };
292 #endif /* !PMAPNAME */
293 
294 /*
295  * The following structure is aligned to 32 bytes, if reasonably possible.
296  */
297 struct pvo_entry {
298 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
299 	TAILQ_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
300 	struct pte pvo_pte;			/* Prebuilt PTE */
301 	pmap_t pvo_pmap;			/* ptr to owning pmap */
302 	vaddr_t pvo_vaddr;			/* VA of entry */
303 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
304 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
305 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
306 #define	PVO_MANAGED		0x0020		/* PVO e. for managed page */
307 #define	PVO_EXECUTABLE		0x0040		/* PVO e. for executable page */
308 #define	PVO_WIRED_P(pvo)	((pvo)->pvo_vaddr & PVO_WIRED)
309 #define	PVO_MANAGED_P(pvo)	((pvo)->pvo_vaddr & PVO_MANAGED)
310 #define	PVO_EXECUTABLE_P(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
311 #define	PVO_ENTER_INSERT	0		/* PVO has been removed */
312 #define	PVO_SPILL_UNSET		1		/* PVO has been evicted */
313 #define	PVO_SPILL_SET		2		/* PVO has been spilled */
314 #define	PVO_SPILL_INSERT	3		/* PVO has been inserted */
315 #define	PVO_PMAP_PAGE_PROTECT	4		/* PVO has changed */
316 #define	PVO_PMAP_PROTECT	5		/* PVO has changed */
317 #define	PVO_REMOVE		6		/* PVO has been removed */
318 #define	PVO_WHERE_MASK		15
319 #define	PVO_WHERE_SHFT		8
320 };
321 
322 #if defined(PMAP_OEA) && !defined(DIAGNOSTIC)
323 #define	PMAP_PVO_ENTRY_ALIGN	32
324 #else
325 #define	PMAP_PVO_ENTRY_ALIGN	__alignof(struct pvo_entry)
326 #endif
327 
328 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
329 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
330 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
331 #define	PVO_PTEGIDX_CLR(pvo)	\
332 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
333 #define	PVO_PTEGIDX_SET(pvo,i)	\
334 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
335 #define	PVO_WHERE(pvo,w)	\
336 	((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
337 	 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
338 
339 TAILQ_HEAD(pvo_tqhead, pvo_entry);
340 struct pvo_tqhead *pmap_pvo_table;	/* pvo entries by ptegroup index */
341 
342 struct pool pmap_pool;		/* pool for pmap structures */
343 struct pool pmap_pvo_pool;	/* pool for pvo entries */
344 
345 static void *pmap_pool_alloc(struct pool *, int);
346 static void pmap_pool_free(struct pool *, void *);
347 
348 static struct pool_allocator pmap_pool_allocator = {
349 	.pa_alloc = pmap_pool_alloc,
350 	.pa_free = pmap_pool_free,
351 	.pa_pagesz = 0,
352 };
353 
354 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
355 void pmap_pte_print(volatile struct pte *);
356 void pmap_pteg_check(void);
357 void pmap_pteg_dist(void);
358 void pmap_print_pte(pmap_t, vaddr_t);
359 void pmap_print_mmuregs(void);
360 #endif
361 
362 #if defined(DEBUG) || defined(PMAPCHECK)
363 #ifdef PMAPCHECK
364 int pmapcheck = 1;
365 #else
366 int pmapcheck = 0;
367 #endif
368 void pmap_pvo_verify(void);
369 static void pmap_pvo_check(const struct pvo_entry *);
370 #define	PMAP_PVO_CHECK(pvo)	 		\
371 	do {					\
372 		if (pmapcheck)			\
373 			pmap_pvo_check(pvo);	\
374 	} while (0)
375 #else
376 #define	PMAP_PVO_CHECK(pvo)	do { } while (/*CONSTCOND*/0)
377 #endif
378 static int pmap_pte_insert(int, struct pte *);
379 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
380 	vaddr_t, paddr_t, register_t, int);
381 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
382 static void pmap_pvo_free(struct pvo_entry *);
383 static void pmap_pvo_free_list(struct pvo_head *);
384 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
385 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
386 static struct pvo_entry *pmap_pvo_reclaim(void);
387 static void pvo_set_exec(struct pvo_entry *);
388 static void pvo_clear_exec(struct pvo_entry *);
389 
390 static void tlbia(void);
391 
392 static void pmap_release(pmap_t);
393 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
394 
395 static uint32_t pmap_pvo_reclaim_nextidx;
396 #ifdef DEBUG
397 static int pmap_pvo_reclaim_debugctr;
398 #endif
399 
400 #define	VSID_NBPW	(sizeof(uint32_t) * 8)
401 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
402 
403 static int pmap_initialized;
404 
405 #if defined(DEBUG) || defined(PMAPDEBUG)
406 #define	PMAPDEBUG_BOOT		0x0001
407 #define	PMAPDEBUG_PTE		0x0002
408 #define	PMAPDEBUG_EXEC		0x0008
409 #define	PMAPDEBUG_PVOENTER	0x0010
410 #define	PMAPDEBUG_PVOREMOVE	0x0020
411 #define	PMAPDEBUG_ACTIVATE	0x0100
412 #define	PMAPDEBUG_CREATE	0x0200
413 #define	PMAPDEBUG_ENTER		0x1000
414 #define	PMAPDEBUG_KENTER	0x2000
415 #define	PMAPDEBUG_KREMOVE	0x4000
416 #define	PMAPDEBUG_REMOVE	0x8000
417 
418 unsigned int pmapdebug = 0;
419 
420 # define DPRINTF(x, ...)	printf(x, __VA_ARGS__)
421 # define DPRINTFN(n, x, ...)	do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0)
422 #else
423 # define DPRINTF(x, ...)	do { } while (0)
424 # define DPRINTFN(n, x, ...)	do { } while (0)
425 #endif
426 
427 
428 #ifdef PMAPCOUNTERS
429 /*
430  * From pmap_subr.c
431  */
432 extern struct evcnt pmap_evcnt_mappings;
433 extern struct evcnt pmap_evcnt_unmappings;
434 
435 extern struct evcnt pmap_evcnt_kernel_mappings;
436 extern struct evcnt pmap_evcnt_kernel_unmappings;
437 
438 extern struct evcnt pmap_evcnt_mappings_replaced;
439 
440 extern struct evcnt pmap_evcnt_exec_mappings;
441 extern struct evcnt pmap_evcnt_exec_cached;
442 
443 extern struct evcnt pmap_evcnt_exec_synced;
444 extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
445 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
446 
447 extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
448 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
449 extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
450 extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
451 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
452 
453 extern struct evcnt pmap_evcnt_updates;
454 extern struct evcnt pmap_evcnt_collects;
455 extern struct evcnt pmap_evcnt_copies;
456 
457 extern struct evcnt pmap_evcnt_ptes_spilled;
458 extern struct evcnt pmap_evcnt_ptes_unspilled;
459 extern struct evcnt pmap_evcnt_ptes_evicted;
460 
461 extern struct evcnt pmap_evcnt_ptes_primary[8];
462 extern struct evcnt pmap_evcnt_ptes_secondary[8];
463 extern struct evcnt pmap_evcnt_ptes_removed;
464 extern struct evcnt pmap_evcnt_ptes_changed;
465 extern struct evcnt pmap_evcnt_pvos_reclaimed;
466 extern struct evcnt pmap_evcnt_pvos_failed;
467 
468 extern struct evcnt pmap_evcnt_zeroed_pages;
469 extern struct evcnt pmap_evcnt_copied_pages;
470 extern struct evcnt pmap_evcnt_idlezeroed_pages;
471 
472 #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
473 #define	PMAPCOUNT2(ev)	((ev).ev_count++)
474 #else
475 #define	PMAPCOUNT(ev)	((void) 0)
476 #define	PMAPCOUNT2(ev)	((void) 0)
477 #endif
478 
479 #define	TLBIE(va)	__asm volatile("tlbie %0" :: "r"(va) : "memory")
480 
481 /* XXXSL: this needs to be moved to assembler */
482 #define	TLBIEL(va)	__asm volatile("tlbie %0" :: "r"(va) : "memory")
483 
484 #ifdef MD_TLBSYNC
485 #define TLBSYNC()	MD_TLBSYNC()
486 #else
487 #define	TLBSYNC()	__asm volatile("tlbsync" ::: "memory")
488 #endif
489 #define	SYNC()		__asm volatile("sync" ::: "memory")
490 #define	EIEIO()		__asm volatile("eieio" ::: "memory")
491 #define	DCBST(va)	__asm volatile("dcbst 0,%0" :: "r"(va) : "memory")
492 #define	MFMSR()		mfmsr()
493 #define	MTMSR(psl)	mtmsr(psl)
494 #define	MFPVR()		mfpvr()
495 #define	MFSRIN(va)	mfsrin(va)
496 #define	MFTB()		mfrtcltbl()
497 
498 #if defined(DDB) && !defined(PMAP_OEA64)
499 static inline register_t
mfsrin(vaddr_t va)500 mfsrin(vaddr_t va)
501 {
502 	register_t sr;
503 	__asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
504 	return sr;
505 }
506 #endif	/* DDB && !PMAP_OEA64 */
507 
508 #if defined (PMAP_OEA64_BRIDGE)
509 extern void mfmsr64 (register64_t *result);
510 #endif /* PMAP_OEA64_BRIDGE */
511 
512 #define	PMAP_LOCK()		KERNEL_LOCK(1, NULL)
513 #define	PMAP_UNLOCK()		KERNEL_UNLOCK_ONE(NULL)
514 
515 static inline register_t
pmap_interrupts_off(void)516 pmap_interrupts_off(void)
517 {
518 	register_t msr = MFMSR();
519 	if (msr & PSL_EE)
520 		MTMSR(msr & ~PSL_EE);
521 	return msr;
522 }
523 
524 static void
pmap_interrupts_restore(register_t msr)525 pmap_interrupts_restore(register_t msr)
526 {
527 	if (msr & PSL_EE)
528 		MTMSR(msr);
529 }
530 
531 static inline u_int32_t
mfrtcltbl(void)532 mfrtcltbl(void)
533 {
534 #ifdef PPC_OEA601
535 	if ((MFPVR() >> 16) == MPC601)
536 		return (mfrtcl() >> 7);
537 	else
538 #endif
539 		return (mftbl());
540 }
541 
542 /*
543  * These small routines may have to be replaced,
544  * if/when we support processors other that the 604.
545  */
546 
547 void
tlbia(void)548 tlbia(void)
549 {
550 	char *i;
551 
552 	SYNC();
553 #if defined(PMAP_OEA)
554 	/*
555 	 * Why not use "tlbia"?  Because not all processors implement it.
556 	 *
557 	 * This needs to be a per-CPU callback to do the appropriate thing
558 	 * for the CPU. XXX
559 	 */
560 	for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
561 		TLBIE(i);
562 		EIEIO();
563 		SYNC();
564 	}
565 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
566 	/* This is specifically for the 970, 970UM v1.6 pp. 140. */
567 	for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
568 		TLBIEL(i);
569 		EIEIO();
570 		SYNC();
571 	}
572 #endif
573 	TLBSYNC();
574 	SYNC();
575 }
576 
577 static inline register_t
va_to_vsid(const struct pmap * pm,vaddr_t addr)578 va_to_vsid(const struct pmap *pm, vaddr_t addr)
579 {
580 	/*
581 	 * Rather than searching the STE groups for the VSID or extracting
582 	 * it from the SR, we know how we generate that from the ESID and
583 	 * so do that.
584 	 *
585 	 * This makes the code the same for OEA and OEA64, and also allows
586 	 * us to generate a correct-for-that-address-space VSID even if the
587 	 * pmap contains a different SR value at any given moment (e.g.
588 	 * kernel pmap on a 601 that is using I/O segments).
589 	 */
590 	return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
591 }
592 
593 static inline register_t
va_to_pteg(const struct pmap * pm,vaddr_t addr)594 va_to_pteg(const struct pmap *pm, vaddr_t addr)
595 {
596 	register_t hash;
597 
598 	hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
599 	return hash & pmap_pteg_mask;
600 }
601 
602 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
603 /*
604  * Given a PTE in the page table, calculate the VADDR that hashes to it.
605  * The only bit of magic is that the top 4 bits of the address doesn't
606  * technically exist in the PTE.  But we know we reserved 4 bits of the
607  * VSID for it so that's how we get it.
608  */
609 static vaddr_t
pmap_pte_to_va(volatile const struct pte * pt)610 pmap_pte_to_va(volatile const struct pte *pt)
611 {
612 	vaddr_t va;
613 	uintptr_t ptaddr = (uintptr_t) pt;
614 
615 	if (pt->pte_hi & PTE_HID)
616 		ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
617 
618 	/* PPC Bits 10-19  PPC64 Bits 42-51 */
619 #if defined(PMAP_OEA)
620 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
621 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
622 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
623 #endif
624 	va <<= ADDR_PIDX_SHFT;
625 
626 	/* PPC Bits 4-9  PPC64 Bits 36-41 */
627 	va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
628 
629 #if defined(PMAP_OEA64)
630 	/* PPC63 Bits 0-35 */
631 	/* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
632 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
633 	/* PPC Bits 0-3 */
634 	va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
635 #endif
636 
637 	return va;
638 }
639 #endif
640 
641 static inline struct pvo_head *
pa_to_pvoh(paddr_t pa,struct vm_page ** pg_p)642 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
643 {
644 	struct vm_page *pg;
645 	struct vm_page_md *md;
646 	struct pmap_page *pp;
647 
648 	pg = PHYS_TO_VM_PAGE(pa);
649 	if (pg_p != NULL)
650 		*pg_p = pg;
651 	if (pg == NULL) {
652 		if ((pp = pmap_pv_tracked(pa)) != NULL)
653 			return &pp->pp_pvoh;
654 		return NULL;
655 	}
656 	md = VM_PAGE_TO_MD(pg);
657 	return &md->mdpg_pvoh;
658 }
659 
660 static inline struct pvo_head *
vm_page_to_pvoh(struct vm_page * pg)661 vm_page_to_pvoh(struct vm_page *pg)
662 {
663 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
664 
665 	return &md->mdpg_pvoh;
666 }
667 
668 static inline void
pmap_pp_attr_clear(struct pmap_page * pp,int ptebit)669 pmap_pp_attr_clear(struct pmap_page *pp, int ptebit)
670 {
671 
672 	pp->pp_attrs &= ~ptebit;
673 }
674 
675 static inline void
pmap_attr_clear(struct vm_page * pg,int ptebit)676 pmap_attr_clear(struct vm_page *pg, int ptebit)
677 {
678 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
679 
680 	pmap_pp_attr_clear(&md->mdpg_pp, ptebit);
681 }
682 
683 static inline int
pmap_pp_attr_fetch(struct pmap_page * pp)684 pmap_pp_attr_fetch(struct pmap_page *pp)
685 {
686 
687 	return pp->pp_attrs;
688 }
689 
690 static inline int
pmap_attr_fetch(struct vm_page * pg)691 pmap_attr_fetch(struct vm_page *pg)
692 {
693 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
694 
695 	return pmap_pp_attr_fetch(&md->mdpg_pp);
696 }
697 
698 static inline void
pmap_attr_save(struct vm_page * pg,int ptebit)699 pmap_attr_save(struct vm_page *pg, int ptebit)
700 {
701 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
702 
703 	md->mdpg_attrs |= ptebit;
704 }
705 
706 static inline int
pmap_pte_compare(const volatile struct pte * pt,const struct pte * pvo_pt)707 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
708 {
709 	if (pt->pte_hi == pvo_pt->pte_hi
710 #if 0
711 	    && ((pt->pte_lo ^ pvo_pt->pte_lo) &
712 	        ~(PTE_REF|PTE_CHG)) == 0
713 #endif
714 	    )
715 		return 1;
716 	return 0;
717 }
718 
719 static inline void
pmap_pte_create(struct pte * pt,const struct pmap * pm,vaddr_t va,register_t pte_lo)720 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
721 {
722 	/*
723 	 * Construct the PTE.  Default to IMB initially.  Valid bit
724 	 * only gets set when the real pte is set in memory.
725 	 *
726 	 * Note: Don't set the valid bit for correct operation of tlb update.
727 	 */
728 #if defined(PMAP_OEA)
729 	pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
730 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
731 	pt->pte_lo = pte_lo;
732 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64)
733 	pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
734 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
735 	pt->pte_lo = (u_int64_t) pte_lo;
736 #endif /* PMAP_OEA */
737 }
738 
739 static inline void
pmap_pte_synch(volatile struct pte * pt,struct pte * pvo_pt)740 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
741 {
742 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
743 }
744 
745 static inline void
pmap_pte_clear(volatile struct pte * pt,vaddr_t va,int ptebit)746 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
747 {
748 	/*
749 	 * As shown in Section 7.6.3.2.3
750 	 */
751 	pt->pte_lo &= ~ptebit;
752 	TLBIE(va);
753 	SYNC();
754 	EIEIO();
755 	TLBSYNC();
756 	SYNC();
757 #ifdef MULTIPROCESSOR
758 	DCBST(pt);
759 #endif
760 }
761 
762 static inline void
pmap_pte_set(volatile struct pte * pt,struct pte * pvo_pt)763 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
764 {
765 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
766 	if (pvo_pt->pte_hi & PTE_VALID)
767 		panic("pte_set: setting an already valid pte %p", pvo_pt);
768 #endif
769 	pvo_pt->pte_hi |= PTE_VALID;
770 
771 	/*
772 	 * Update the PTE as defined in section 7.6.3.1
773 	 * Note that the REF/CHG bits are from pvo_pt and thus should
774 	 * have been saved so this routine can restore them (if desired).
775 	 */
776 	pt->pte_lo = pvo_pt->pte_lo;
777 	EIEIO();
778 	pt->pte_hi = pvo_pt->pte_hi;
779 	TLBSYNC();
780 	SYNC();
781 #ifdef MULTIPROCESSOR
782 	DCBST(pt);
783 #endif
784 	pmap_pte_valid++;
785 }
786 
787 static inline void
pmap_pte_unset(volatile struct pte * pt,struct pte * pvo_pt,vaddr_t va)788 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
789 {
790 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
791 	if ((pvo_pt->pte_hi & PTE_VALID) == 0)
792 		panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
793 	if ((pt->pte_hi & PTE_VALID) == 0)
794 		panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
795 #endif
796 
797 	pvo_pt->pte_hi &= ~PTE_VALID;
798 	/*
799 	 * Force the ref & chg bits back into the PTEs.
800 	 */
801 	SYNC();
802 	/*
803 	 * Invalidate the pte ... (Section 7.6.3.3)
804 	 */
805 	pt->pte_hi &= ~PTE_VALID;
806 	SYNC();
807 	TLBIE(va);
808 	SYNC();
809 	EIEIO();
810 	TLBSYNC();
811 	SYNC();
812 	/*
813 	 * Save the ref & chg bits ...
814 	 */
815 	pmap_pte_synch(pt, pvo_pt);
816 	pmap_pte_valid--;
817 }
818 
819 static inline void
pmap_pte_change(volatile struct pte * pt,struct pte * pvo_pt,vaddr_t va)820 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
821 {
822 	/*
823 	 * Invalidate the PTE
824 	 */
825 	pmap_pte_unset(pt, pvo_pt, va);
826 	pmap_pte_set(pt, pvo_pt);
827 }
828 
829 /*
830  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
831  * (either primary or secondary location).
832  *
833  * Note: both the destination and source PTEs must not have PTE_VALID set.
834  */
835 
836 static int
pmap_pte_insert(int ptegidx,struct pte * pvo_pt)837 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
838 {
839 	volatile struct pte *pt;
840 	int i;
841 
842 #if defined(DEBUG)
843 	DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
844 		ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo);
845 #endif
846 	/*
847 	 * First try primary hash.
848 	 */
849 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
850 		if ((pt->pte_hi & PTE_VALID) == 0) {
851 			pvo_pt->pte_hi &= ~PTE_HID;
852 			pmap_pte_set(pt, pvo_pt);
853 			return i;
854 		}
855 	}
856 
857 	/*
858 	 * Now try secondary hash.
859 	 */
860 	ptegidx ^= pmap_pteg_mask;
861 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
862 		if ((pt->pte_hi & PTE_VALID) == 0) {
863 			pvo_pt->pte_hi |= PTE_HID;
864 			pmap_pte_set(pt, pvo_pt);
865 			return i;
866 		}
867 	}
868 	return -1;
869 }
870 
871 /*
872  * Spill handler.
873  *
874  * Tries to spill a page table entry from the overflow area.
875  * This runs in either real mode (if dealing with a exception spill)
876  * or virtual mode when dealing with manually spilling one of the
877  * kernel's pte entries.
878  */
879 
880 int
pmap_pte_spill(struct pmap * pm,vaddr_t addr,bool isi_p)881 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool isi_p)
882 {
883 	struct pvo_tqhead *spvoh, *vpvoh;
884 	struct pvo_entry *pvo, *source_pvo, *victim_pvo;
885 	volatile struct pteg *pteg;
886 	volatile struct pte *pt;
887 	register_t msr, vsid, hash;
888 	int ptegidx, hid, i, j;
889 	int done = 0;
890 
891 	PMAP_LOCK();
892 	msr = pmap_interrupts_off();
893 
894 	/* XXXRO paranoid? */
895 	if (pm->pm_evictions == 0)
896 		goto out;
897 
898 	ptegidx = va_to_pteg(pm, addr);
899 
900 	/*
901 	 * Find source pvo.
902 	 */
903 	spvoh = &pmap_pvo_table[ptegidx];
904 	source_pvo = NULL;
905 	TAILQ_FOREACH(pvo, spvoh, pvo_olink) {
906 		/*
907 		 * We need to find pvo entry for this address...
908 		 */
909 		PMAP_PVO_CHECK(pvo);		/* sanity check */
910 
911 		/*
912 		 * If we haven't found the source and we come to a PVO with
913 		 * a valid PTE, then we know we can't find it because all
914 		 * evicted PVOs always are first in the list.
915 		 */
916 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) != 0)
917 			break;
918 
919 		if (pm == pvo->pvo_pmap && addr == PVO_VADDR(pvo)) {
920 			if (isi_p) {
921 				if (!PVO_EXECUTABLE_P(pvo))
922 					goto out;
923 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
924 				int sr __diagused =
925 				    PVO_VADDR(pvo) >> ADDR_SR_SHFT;
926 				KASSERT((pm->pm_sr[sr] & SR_NOEXEC) == 0);
927 #endif
928 			}
929 			KASSERT(!PVO_PTEGIDX_ISSET(pvo));
930 			/* XXXRO where check */
931 			source_pvo = pvo;
932 			break;
933 		}
934 	}
935 	if (source_pvo == NULL) {
936 		PMAPCOUNT(ptes_unspilled);
937 		goto out;
938 	}
939 
940 	/*
941 	 * Now we have found the entry to be spilled into the
942 	 * pteg.  Attempt to insert it into the page table.
943 	 */
944 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
945 	if (i >= 0) {
946 		PVO_PTEGIDX_SET(pvo, i);
947 		PMAP_PVO_CHECK(pvo);	/* sanity check */
948 		PVO_WHERE(pvo, SPILL_INSERT);
949 		pvo->pvo_pmap->pm_evictions--;
950 		PMAPCOUNT(ptes_spilled);
951 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) != 0
952 		    ? pmap_evcnt_ptes_secondary
953 		    : pmap_evcnt_ptes_primary)[i]);
954 
955 		TAILQ_REMOVE(spvoh, pvo, pvo_olink);
956 		TAILQ_INSERT_TAIL(spvoh, pvo, pvo_olink);
957 
958 		done = 1;
959 		goto out;
960 	}
961 
962 	/*
963 	 * Have to substitute some entry. Use the primary hash for this.
964 	 * Use low bits of timebase as random generator.
965 	 *
966 	 * XXX:
967 	 * Make sure we are not picking a kernel pte for replacement.
968 	 */
969 	hid = 0;
970 	i = MFTB() & 7;
971 	pteg = &pmap_pteg_table[ptegidx];
972  retry:
973 	for (j = 0; j < 8; j++, i = (i + 1) & 7) {
974 		pt = &pteg->pt[i];
975 
976 		if ((pt->pte_hi & PTE_VALID) == 0)
977 			break;
978 
979 		vsid = (pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT;
980 		hash = VSID_TO_HASH(vsid);
981 		if (hash < PHYSMAP_VSIDBITS)
982 			break;
983 	}
984 	if (j == 8) {
985 		if (hid != 0)
986 			panic("%s: no victim\n", __func__);
987 		hid = PTE_HID;
988 		pteg = &pmap_pteg_table[ptegidx ^ pmap_pteg_mask];
989 		goto retry;
990 	}
991 
992 	/*
993 	 * We also need the pvo entry of the victim we are replacing
994 	 * so save the R & C bits of the PTE.
995 	 */
996 	if ((pt->pte_hi & PTE_HID) == hid)
997 		vpvoh = spvoh;
998 	else
999 		vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask];
1000 	victim_pvo = NULL;
1001 	TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1002 		PMAP_PVO_CHECK(pvo);		/* sanity check */
1003 
1004 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1005 			continue;
1006 
1007 		if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1008 			victim_pvo = pvo;
1009 			break;
1010 		}
1011 	}
1012 	if (victim_pvo == NULL) {
1013 		panic("%s: victim p-pte (%p) has no pvo entry!",
1014 		    __func__, pt);
1015 	}
1016 
1017 	/*
1018 	 * The victim should be not be a kernel PVO/PTE entry.
1019 	 */
1020 	KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1021 	KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1022 	KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1023 
1024 	/*
1025 	 * We are invalidating the TLB entry for the EA for the
1026 	 * we are replacing even though its valid; If we don't
1027 	 * we lose any ref/chg bit changes contained in the TLB
1028 	 * entry.
1029 	 */
1030 	if (hid == 0)
1031 		source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1032 	else
1033 		source_pvo->pvo_pte.pte_hi |= PTE_HID;
1034 
1035 	/*
1036 	 * To enforce the PVO list ordering constraint that all
1037 	 * evicted entries should come before all valid entries,
1038 	 * move the source PVO to the tail of its list and the
1039 	 * victim PVO to the head of its list (which might not be
1040 	 * the same list, if the victim was using the secondary hash).
1041 	 */
1042 	TAILQ_REMOVE(spvoh, source_pvo, pvo_olink);
1043 	TAILQ_INSERT_TAIL(spvoh, source_pvo, pvo_olink);
1044 	TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1045 	TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1046 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1047 	pmap_pte_set(pt, &source_pvo->pvo_pte);
1048 	victim_pvo->pvo_pmap->pm_evictions++;
1049 	source_pvo->pvo_pmap->pm_evictions--;
1050 	PVO_WHERE(victim_pvo, SPILL_UNSET);
1051 	PVO_WHERE(source_pvo, SPILL_SET);
1052 
1053 	PVO_PTEGIDX_CLR(victim_pvo);
1054 	PVO_PTEGIDX_SET(source_pvo, i);
1055 	PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1056 	PMAPCOUNT(ptes_spilled);
1057 	PMAPCOUNT(ptes_evicted);
1058 	PMAPCOUNT(ptes_removed);
1059 
1060 	PMAP_PVO_CHECK(victim_pvo);
1061 	PMAP_PVO_CHECK(source_pvo);
1062 
1063 	done = 1;
1064 
1065  out:
1066 	pmap_interrupts_restore(msr);
1067 	PMAP_UNLOCK();
1068 	return done;
1069 }
1070 
1071 /*
1072  * Restrict given range to physical memory
1073  */
1074 void
pmap_real_memory(paddr_t * start,psize_t * size)1075 pmap_real_memory(paddr_t *start, psize_t *size)
1076 {
1077 	struct mem_region *mp;
1078 
1079 	for (mp = mem; mp->size; mp++) {
1080 		if (*start + *size > mp->start
1081 		    && *start < mp->start + mp->size) {
1082 			if (*start < mp->start) {
1083 				*size -= mp->start - *start;
1084 				*start = mp->start;
1085 			}
1086 			if (*start + *size > mp->start + mp->size)
1087 				*size = mp->start + mp->size - *start;
1088 			return;
1089 		}
1090 	}
1091 	*size = 0;
1092 }
1093 
1094 /*
1095  * Initialize anything else for pmap handling.
1096  * Called during vm_init().
1097  */
1098 void
pmap_init(void)1099 pmap_init(void)
1100 {
1101 
1102 	pmap_initialized = 1;
1103 }
1104 
1105 /*
1106  * How much virtual space does the kernel get?
1107  */
1108 void
pmap_virtual_space(vaddr_t * start,vaddr_t * end)1109 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1110 {
1111 	/*
1112 	 * For now, reserve one segment (minus some overhead) for kernel
1113 	 * virtual memory
1114 	 */
1115 	*start = VM_MIN_KERNEL_ADDRESS;
1116 	*end = VM_MAX_KERNEL_ADDRESS;
1117 }
1118 
1119 /*
1120  * Allocate, initialize, and return a new physical map.
1121  */
1122 pmap_t
pmap_create(void)1123 pmap_create(void)
1124 {
1125 	pmap_t pm;
1126 
1127 	pm = pool_get(&pmap_pool, PR_WAITOK | PR_ZERO);
1128 	KASSERT((vaddr_t)pm < PMAP_DIRECT_MAPPED_LEN);
1129 	pmap_pinit(pm);
1130 
1131 	DPRINTFN(CREATE, "pmap_create: pm %p:\n"
1132 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1133 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
1134 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1135 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
1136 	    pm,
1137 	    pm->pm_sr[0], pm->pm_sr[1],
1138 	    pm->pm_sr[2], pm->pm_sr[3],
1139 	    pm->pm_sr[4], pm->pm_sr[5],
1140 	    pm->pm_sr[6], pm->pm_sr[7],
1141 	    pm->pm_sr[8], pm->pm_sr[9],
1142 	    pm->pm_sr[10], pm->pm_sr[11],
1143 	    pm->pm_sr[12], pm->pm_sr[13],
1144 	    pm->pm_sr[14], pm->pm_sr[15]);
1145 	return pm;
1146 }
1147 
1148 /*
1149  * Initialize a preallocated and zeroed pmap structure.
1150  */
1151 void
pmap_pinit(pmap_t pm)1152 pmap_pinit(pmap_t pm)
1153 {
1154 	register_t entropy = MFTB();
1155 	register_t mask;
1156 	int i;
1157 
1158 	/*
1159 	 * Allocate some segment registers for this pmap.
1160 	 */
1161 	pm->pm_refs = 1;
1162 	PMAP_LOCK();
1163 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1164 		static register_t pmap_vsidcontext;
1165 		register_t hash;
1166 		unsigned int n;
1167 
1168 		/* Create a new value by multiplying by a prime adding in
1169 		 * entropy from the timebase register.  This is to make the
1170 		 * VSID more random so that the PT Hash function collides
1171 		 * less often. (note that the prime causes gcc to do shifts
1172 		 * instead of a multiply)
1173 		 */
1174 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1175 		hash = pmap_vsidcontext & (NPMAPS - 1);
1176 		if (hash == 0) {		/* 0 is special, avoid it */
1177 			entropy += 0xbadf00d;
1178 			continue;
1179 		}
1180 		n = hash >> 5;
1181 		mask = 1L << (hash & (VSID_NBPW-1));
1182 		hash = pmap_vsidcontext;
1183 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1184 			/* anything free in this bucket? */
1185 			if (~pmap_vsid_bitmap[n] == 0) {
1186 				entropy = hash ^ (hash >> 16);
1187 				continue;
1188 			}
1189 			i = ffs(~pmap_vsid_bitmap[n]) - 1;
1190 			mask = 1L << i;
1191 			hash &= ~(VSID_NBPW-1);
1192 			hash |= i;
1193 		}
1194 		hash &= PTE_VSID >> PTE_VSID_SHFT;
1195 		pmap_vsid_bitmap[n] |= mask;
1196 		pm->pm_vsid = hash;
1197 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1198 		for (i = 0; i < 16; i++)
1199 			pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1200 			    SR_NOEXEC;
1201 #endif
1202 		PMAP_UNLOCK();
1203 		return;
1204 	}
1205 	PMAP_UNLOCK();
1206 	panic("pmap_pinit: out of segments");
1207 }
1208 
1209 /*
1210  * Add a reference to the given pmap.
1211  */
1212 void
pmap_reference(pmap_t pm)1213 pmap_reference(pmap_t pm)
1214 {
1215 	atomic_inc_uint(&pm->pm_refs);
1216 }
1217 
1218 /*
1219  * Retire the given pmap from service.
1220  * Should only be called if the map contains no valid mappings.
1221  */
1222 void
pmap_destroy(pmap_t pm)1223 pmap_destroy(pmap_t pm)
1224 {
1225 	membar_release();
1226 	if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1227 		membar_acquire();
1228 		pmap_release(pm);
1229 		pool_put(&pmap_pool, pm);
1230 	}
1231 }
1232 
1233 /*
1234  * Release any resources held by the given physical map.
1235  * Called when a pmap initialized by pmap_pinit is being released.
1236  */
1237 void
pmap_release(pmap_t pm)1238 pmap_release(pmap_t pm)
1239 {
1240 	int idx, mask;
1241 
1242 	KASSERT(pm->pm_stats.resident_count == 0);
1243 	KASSERT(pm->pm_stats.wired_count == 0);
1244 
1245 	PMAP_LOCK();
1246 	if (pm->pm_sr[0] == 0)
1247 		panic("pmap_release");
1248 	idx = pm->pm_vsid & (NPMAPS-1);
1249 	mask = 1 << (idx % VSID_NBPW);
1250 	idx /= VSID_NBPW;
1251 
1252 	KASSERT(pmap_vsid_bitmap[idx] & mask);
1253 	pmap_vsid_bitmap[idx] &= ~mask;
1254 	PMAP_UNLOCK();
1255 }
1256 
1257 /*
1258  * Copy the range specified by src_addr/len
1259  * from the source map to the range dst_addr/len
1260  * in the destination map.
1261  *
1262  * This routine is only advisory and need not do anything.
1263  */
1264 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1265 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1266 	vsize_t len, vaddr_t src_addr)
1267 {
1268 	PMAPCOUNT(copies);
1269 }
1270 
1271 /*
1272  * Require that all active physical maps contain no
1273  * incorrect entries NOW.
1274  */
1275 void
pmap_update(struct pmap * pmap)1276 pmap_update(struct pmap *pmap)
1277 {
1278 	PMAPCOUNT(updates);
1279 	TLBSYNC();
1280 }
1281 
1282 static inline int
pmap_pvo_pte_index(const struct pvo_entry * pvo,int ptegidx)1283 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1284 {
1285 	int pteidx;
1286 	/*
1287 	 * We can find the actual pte entry without searching by
1288 	 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1289 	 * and by noticing the HID bit.
1290 	 */
1291 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1292 	if (pvo->pvo_pte.pte_hi & PTE_HID)
1293 		pteidx ^= pmap_pteg_mask * 8;
1294 	return pteidx;
1295 }
1296 
1297 volatile struct pte *
pmap_pvo_to_pte(const struct pvo_entry * pvo,int pteidx)1298 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1299 {
1300 	volatile struct pte *pt;
1301 
1302 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1303 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1304 		return NULL;
1305 #endif
1306 
1307 	/*
1308 	 * If we haven't been supplied the ptegidx, calculate it.
1309 	 */
1310 	if (pteidx == -1) {
1311 		int ptegidx;
1312 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1313 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1314 	}
1315 
1316 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1317 
1318 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1319 	return pt;
1320 #else
1321 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1322 		panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1323 		    "pvo but no valid pte index", pvo);
1324 	}
1325 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1326 		panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1327 		    "pvo but no valid pte", pvo);
1328 	}
1329 
1330 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1331 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1332 #if defined(DEBUG) || defined(PMAPCHECK)
1333 			pmap_pte_print(pt);
1334 #endif
1335 			panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1336 			    "pmap_pteg_table %p but invalid in pvo",
1337 			    pvo, pt);
1338 		}
1339 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1340 #if defined(DEBUG) || defined(PMAPCHECK)
1341 			pmap_pte_print(pt);
1342 #endif
1343 			panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1344 			    "not match pte %p in pmap_pteg_table",
1345 			    pvo, pt);
1346 		}
1347 		return pt;
1348 	}
1349 
1350 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1351 #if defined(DEBUG) || defined(PMAPCHECK)
1352 		pmap_pte_print(pt);
1353 #endif
1354 		panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1355 		    "pmap_pteg_table but valid in pvo", pvo, pt);
1356 	}
1357 	return NULL;
1358 #endif	/* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1359 }
1360 
1361 struct pvo_entry *
pmap_pvo_find_va(pmap_t pm,vaddr_t va,int * pteidx_p)1362 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1363 {
1364 	struct pvo_entry *pvo;
1365 	int ptegidx;
1366 
1367 	va &= ~ADDR_POFF;
1368 	ptegidx = va_to_pteg(pm, va);
1369 
1370 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1371 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1372 		if ((uintptr_t) pvo >= PMAP_DIRECT_MAPPED_LEN)
1373 			panic("pmap_pvo_find_va: invalid pvo %p on "
1374 			    "list %#x (%p)", pvo, ptegidx,
1375 			     &pmap_pvo_table[ptegidx]);
1376 #endif
1377 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1378 			if (pteidx_p)
1379 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1380 			return pvo;
1381 		}
1382 	}
1383 	if ((pm == pmap_kernel()) && (va < PMAP_DIRECT_MAPPED_LEN))
1384 		panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1385 		    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1386 	return NULL;
1387 }
1388 
1389 #if defined(DEBUG) || defined(PMAPCHECK)
1390 void
pmap_pvo_check(const struct pvo_entry * pvo)1391 pmap_pvo_check(const struct pvo_entry *pvo)
1392 {
1393 	struct pvo_head *pvo_head;
1394 	struct pvo_entry *pvo0;
1395 	volatile struct pte *pt;
1396 	int failed = 0;
1397 
1398 	PMAP_LOCK();
1399 
1400 	if ((uintptr_t)(pvo+1) >= PMAP_DIRECT_MAPPED_LEN)
1401 		panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1402 
1403 	if ((uintptr_t)(pvo->pvo_pmap+1) >= PMAP_DIRECT_MAPPED_LEN) {
1404 		printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1405 		    pvo, pvo->pvo_pmap);
1406 		failed = 1;
1407 	}
1408 
1409 	if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= PMAP_DIRECT_MAPPED_LEN ||
1410 	    (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1411 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1412 		    pvo, TAILQ_NEXT(pvo, pvo_olink));
1413 		failed = 1;
1414 	}
1415 
1416 	if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= PMAP_DIRECT_MAPPED_LEN ||
1417 	    (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1418 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1419 		    pvo, LIST_NEXT(pvo, pvo_vlink));
1420 		failed = 1;
1421 	}
1422 
1423 	if (PVO_MANAGED_P(pvo)) {
1424 		pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1425 		LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1426 			if (pvo0 == pvo)
1427 				break;
1428 		}
1429 		if (pvo0 == NULL) {
1430 			printf("pmap_pvo_check: pvo %p: not present "
1431 			       "on its vlist head %p\n", pvo, pvo_head);
1432 			failed = 1;
1433 		}
1434 	} else {
1435 		KASSERT(pvo->pvo_vaddr >= VM_MIN_KERNEL_ADDRESS);
1436 		if (__predict_false(pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS))
1437 			failed = 1;
1438 	}
1439 	if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1440 		printf("pmap_pvo_check: pvo %p: not present "
1441 		    "on its olist head\n", pvo);
1442 		failed = 1;
1443 	}
1444 	pt = pmap_pvo_to_pte(pvo, -1);
1445 	if (pt == NULL) {
1446 		if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1447 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1448 			    "no PTE\n", pvo);
1449 			failed = 1;
1450 		}
1451 	} else {
1452 		if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1453 		    (uintptr_t) pt >=
1454 		    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1455 			printf("pmap_pvo_check: pvo %p: pte %p not in "
1456 			    "pteg table\n", pvo, pt);
1457 			failed = 1;
1458 		}
1459 		if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1460 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1461 			    "no PTE\n", pvo);
1462 			failed = 1;
1463 		}
1464 		if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1465 			printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1466 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1467 			    pvo->pvo_pte.pte_hi,
1468 			    pt->pte_hi);
1469 			failed = 1;
1470 		}
1471 		if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1472 		    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1473 			printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1474 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1475 			    (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1476 			    (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1477 			failed = 1;
1478 		}
1479 		if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1480 			printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
1481 			    " doesn't not match PVO's VA %#" _PRIxva "\n",
1482 			    pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1483 			failed = 1;
1484 		}
1485 		if (failed)
1486 			pmap_pte_print(pt);
1487 	}
1488 	if (failed)
1489 		panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1490 		    pvo->pvo_pmap);
1491 
1492 	PMAP_UNLOCK();
1493 }
1494 #endif /* DEBUG || PMAPCHECK */
1495 
1496 /*
1497  * Search the PVO table looking for a non-wired entry.
1498  * If we find one, remove it and return it.
1499  */
1500 
1501 struct pvo_entry *
pmap_pvo_reclaim(void)1502 pmap_pvo_reclaim(void)
1503 {
1504 	struct pvo_tqhead *pvoh;
1505 	struct pvo_entry *pvo;
1506 	uint32_t idx, endidx;
1507 
1508 	endidx = pmap_pvo_reclaim_nextidx;
1509 	for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1510 	     idx = (idx + 1) & pmap_pteg_mask) {
1511 		pvoh = &pmap_pvo_table[idx];
1512 		TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1513 			if (!PVO_WIRED_P(pvo)) {
1514 				pmap_pvo_remove(pvo, -1, NULL);
1515 				pmap_pvo_reclaim_nextidx = idx;
1516 				PMAPCOUNT(pvos_reclaimed);
1517 				return pvo;
1518 			}
1519 		}
1520 	}
1521 	return NULL;
1522 }
1523 
1524 /*
1525  * This returns whether this is the first mapping of a page.
1526  */
1527 int
pmap_pvo_enter(pmap_t pm,struct pool * pl,struct pvo_head * pvo_head,vaddr_t va,paddr_t pa,register_t pte_lo,int flags)1528 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1529 	vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1530 {
1531 	struct pvo_entry *pvo;
1532 	struct pvo_tqhead *pvoh;
1533 	register_t msr;
1534 	int ptegidx;
1535 	int i;
1536 	int poolflags = PR_NOWAIT;
1537 
1538 	/*
1539 	 * Compute the PTE Group index.
1540 	 */
1541 	va &= ~ADDR_POFF;
1542 	ptegidx = va_to_pteg(pm, va);
1543 
1544 	msr = pmap_interrupts_off();
1545 
1546 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1547 	if (pmap_pvo_remove_depth > 0)
1548 		panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1549 	if (++pmap_pvo_enter_depth > 1)
1550 		panic("pmap_pvo_enter: called recursively!");
1551 #endif
1552 
1553 	/*
1554 	 * Remove any existing mapping for this page.  Reuse the
1555 	 * pvo entry if there a mapping.
1556 	 */
1557 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1558 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1559 #ifdef DEBUG
1560 			if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1561 			    ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1562 			    ~(PTE_REF|PTE_CHG)) == 0 &&
1563 			   va < VM_MIN_KERNEL_ADDRESS) {
1564 				printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1565 				    pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1566 				printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1567 				    pvo->pvo_pte.pte_hi,
1568 				    pm->pm_sr[va >> ADDR_SR_SHFT]);
1569 				pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1570 #ifdef DDBX
1571 				Debugger();
1572 #endif
1573 			}
1574 #endif
1575 			PMAPCOUNT(mappings_replaced);
1576 			pmap_pvo_remove(pvo, -1, NULL);
1577 			break;
1578 		}
1579 	}
1580 
1581 	/*
1582 	 * If we aren't overwriting an mapping, try to allocate
1583 	 */
1584 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1585 	--pmap_pvo_enter_depth;
1586 #endif
1587 	pmap_interrupts_restore(msr);
1588 	if (pvo == NULL) {
1589 		pvo = pool_get(pl, poolflags);
1590 	}
1591 	KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
1592 
1593 #ifdef DEBUG
1594 	/*
1595 	 * Exercise pmap_pvo_reclaim() a little.
1596 	 */
1597 	if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1598 	    pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1599 	    (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1600 		pool_put(pl, pvo);
1601 		pvo = NULL;
1602 	}
1603 #endif
1604 
1605 	msr = pmap_interrupts_off();
1606 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1607 	++pmap_pvo_enter_depth;
1608 #endif
1609 	if (pvo == NULL) {
1610 		pvo = pmap_pvo_reclaim();
1611 		if (pvo == NULL) {
1612 			if ((flags & PMAP_CANFAIL) == 0)
1613 				panic("pmap_pvo_enter: failed");
1614 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1615 			pmap_pvo_enter_depth--;
1616 #endif
1617 			PMAPCOUNT(pvos_failed);
1618 			pmap_interrupts_restore(msr);
1619 			return ENOMEM;
1620 		}
1621 	}
1622 
1623 	pvo->pvo_vaddr = va;
1624 	pvo->pvo_pmap = pm;
1625 	pvo->pvo_vaddr &= ~ADDR_POFF;
1626 	if (flags & VM_PROT_EXECUTE) {
1627 		PMAPCOUNT(exec_mappings);
1628 		pvo_set_exec(pvo);
1629 	}
1630 	if (flags & PMAP_WIRED)
1631 		pvo->pvo_vaddr |= PVO_WIRED;
1632 	if (pvo_head != NULL) {
1633 		pvo->pvo_vaddr |= PVO_MANAGED;
1634 		PMAPCOUNT(mappings);
1635 	} else {
1636 		PMAPCOUNT(kernel_mappings);
1637 	}
1638 	pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1639 
1640 	if (pvo_head != NULL)
1641 		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1642 	if (PVO_WIRED_P(pvo))
1643 		pvo->pvo_pmap->pm_stats.wired_count++;
1644 	pvo->pvo_pmap->pm_stats.resident_count++;
1645 #if defined(DEBUG)
1646 /*	if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1647 		DPRINTFN(PVOENTER,
1648 		    "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
1649 		    pvo, pm, va, pa);
1650 #endif
1651 
1652 	/*
1653 	 * We hope this succeeds but it isn't required.
1654 	 */
1655 	pvoh = &pmap_pvo_table[ptegidx];
1656 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1657 	if (i >= 0) {
1658 		PVO_PTEGIDX_SET(pvo, i);
1659 		PVO_WHERE(pvo, ENTER_INSERT);
1660 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1661 		    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1662 		TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1663 
1664 	} else {
1665 		/*
1666 		 * Since we didn't have room for this entry (which makes it
1667 		 * and evicted entry), place it at the head of the list.
1668 		 */
1669 		TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1670 		PMAPCOUNT(ptes_evicted);
1671 		pm->pm_evictions++;
1672 		/*
1673 		 * If this is a kernel page, make sure it's active.
1674 		 */
1675 		if (pm == pmap_kernel()) {
1676 			i = pmap_pte_spill(pm, va, false);
1677 			KASSERT(i);
1678 		}
1679 	}
1680 	PMAP_PVO_CHECK(pvo);		/* sanity check */
1681 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1682 	pmap_pvo_enter_depth--;
1683 #endif
1684 	pmap_interrupts_restore(msr);
1685 	return 0;
1686 }
1687 
1688 static void
pmap_pvo_remove(struct pvo_entry * pvo,int pteidx,struct pvo_head * pvol)1689 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1690 {
1691 	volatile struct pte *pt;
1692 	int ptegidx;
1693 
1694 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1695 	if (++pmap_pvo_remove_depth > 1)
1696 		panic("pmap_pvo_remove: called recursively!");
1697 #endif
1698 
1699 	/*
1700 	 * If we haven't been supplied the ptegidx, calculate it.
1701 	 */
1702 	if (pteidx == -1) {
1703 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1704 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1705 	} else {
1706 		ptegidx = pteidx >> 3;
1707 		if (pvo->pvo_pte.pte_hi & PTE_HID)
1708 			ptegidx ^= pmap_pteg_mask;
1709 	}
1710 	PMAP_PVO_CHECK(pvo);		/* sanity check */
1711 
1712 	/*
1713 	 * If there is an active pte entry, we need to deactivate it
1714 	 * (and save the ref & chg bits).
1715 	 */
1716 	pt = pmap_pvo_to_pte(pvo, pteidx);
1717 	if (pt != NULL) {
1718 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1719 		PVO_WHERE(pvo, REMOVE);
1720 		PVO_PTEGIDX_CLR(pvo);
1721 		PMAPCOUNT(ptes_removed);
1722 	} else {
1723 		KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1724 		pvo->pvo_pmap->pm_evictions--;
1725 	}
1726 
1727 	/*
1728 	 * Account for executable mappings.
1729 	 */
1730 	if (PVO_EXECUTABLE_P(pvo))
1731 		pvo_clear_exec(pvo);
1732 
1733 	/*
1734 	 * Update our statistics.
1735 	 */
1736 	pvo->pvo_pmap->pm_stats.resident_count--;
1737 	if (PVO_WIRED_P(pvo))
1738 		pvo->pvo_pmap->pm_stats.wired_count--;
1739 
1740 	/*
1741 	 * If the page is managed:
1742 	 * Save the REF/CHG bits into their cache.
1743 	 * Remove the PVO from the P/V list.
1744 	 */
1745 	if (PVO_MANAGED_P(pvo)) {
1746 		register_t ptelo = pvo->pvo_pte.pte_lo;
1747 		struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1748 
1749 		if (pg != NULL) {
1750 			/*
1751 			 * If this page was changed and it is mapped exec,
1752 			 * invalidate it.
1753 			 */
1754 			if ((ptelo & PTE_CHG) &&
1755 			    (pmap_attr_fetch(pg) & PTE_EXEC)) {
1756 				struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1757 				if (LIST_EMPTY(pvoh)) {
1758 					DPRINTFN(EXEC, "[pmap_pvo_remove: "
1759 					    "%#" _PRIxpa ": clear-exec]\n",
1760 					    VM_PAGE_TO_PHYS(pg));
1761 					pmap_attr_clear(pg, PTE_EXEC);
1762 					PMAPCOUNT(exec_uncached_pvo_remove);
1763 				} else {
1764 					DPRINTFN(EXEC, "[pmap_pvo_remove: "
1765 					    "%#" _PRIxpa ": syncicache]\n",
1766 					    VM_PAGE_TO_PHYS(pg));
1767 					pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1768 					    PAGE_SIZE);
1769 					PMAPCOUNT(exec_synced_pvo_remove);
1770 				}
1771 			}
1772 
1773 			pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1774 		}
1775 		LIST_REMOVE(pvo, pvo_vlink);
1776 		PMAPCOUNT(unmappings);
1777 	} else {
1778 		PMAPCOUNT(kernel_unmappings);
1779 	}
1780 
1781 	/*
1782 	 * Remove the PVO from its list and return it to the pool.
1783 	 */
1784 	TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1785 	if (pvol) {
1786 		LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1787 	}
1788 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1789 	pmap_pvo_remove_depth--;
1790 #endif
1791 }
1792 
1793 void
pmap_pvo_free(struct pvo_entry * pvo)1794 pmap_pvo_free(struct pvo_entry *pvo)
1795 {
1796 
1797 	pool_put(&pmap_pvo_pool, pvo);
1798 }
1799 
1800 void
pmap_pvo_free_list(struct pvo_head * pvol)1801 pmap_pvo_free_list(struct pvo_head *pvol)
1802 {
1803 	struct pvo_entry *pvo, *npvo;
1804 
1805 	for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1806 		npvo = LIST_NEXT(pvo, pvo_vlink);
1807 		LIST_REMOVE(pvo, pvo_vlink);
1808 		pmap_pvo_free(pvo);
1809 	}
1810 }
1811 
1812 /*
1813  * Mark a mapping as executable.
1814  * If this is the first executable mapping in the segment,
1815  * clear the noexec flag.
1816  */
1817 static void
pvo_set_exec(struct pvo_entry * pvo)1818 pvo_set_exec(struct pvo_entry *pvo)
1819 {
1820 	struct pmap *pm = pvo->pvo_pmap;
1821 
1822 	if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1823 		return;
1824 	}
1825 	pvo->pvo_vaddr |= PVO_EXECUTABLE;
1826 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1827 	{
1828 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1829 		if (pm->pm_exec[sr]++ == 0) {
1830 			pm->pm_sr[sr] &= ~SR_NOEXEC;
1831 		}
1832 	}
1833 #endif
1834 }
1835 
1836 /*
1837  * Mark a mapping as non-executable.
1838  * If this was the last executable mapping in the segment,
1839  * set the noexec flag.
1840  */
1841 static void
pvo_clear_exec(struct pvo_entry * pvo)1842 pvo_clear_exec(struct pvo_entry *pvo)
1843 {
1844 	struct pmap *pm = pvo->pvo_pmap;
1845 
1846 	if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1847 		return;
1848 	}
1849 	pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1850 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1851 	{
1852 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1853 		if (--pm->pm_exec[sr] == 0) {
1854 			pm->pm_sr[sr] |= SR_NOEXEC;
1855 		}
1856 	}
1857 #endif
1858 }
1859 
1860 /*
1861  * Insert physical page at pa into the given pmap at virtual address va.
1862  */
1863 int
pmap_enter(pmap_t pm,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1864 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1865 {
1866 	struct mem_region *mp;
1867 	struct pvo_head *pvo_head;
1868 	struct vm_page *pg;
1869 	register_t pte_lo;
1870 	int error;
1871 	u_int was_exec = 0;
1872 
1873 	PMAP_LOCK();
1874 
1875 	if (__predict_false(!pmap_initialized)) {
1876 		pvo_head = NULL;
1877 		pg = NULL;
1878 		was_exec = PTE_EXEC;
1879 
1880 	} else {
1881 		pvo_head = pa_to_pvoh(pa, &pg);
1882 	}
1883 
1884 	DPRINTFN(ENTER,
1885 	    "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1886 	    pm, va, pa, prot, flags);
1887 
1888 	/*
1889 	 * If this is a managed page, and it's the first reference to the
1890 	 * page clear the execness of the page.  Otherwise fetch the execness.
1891 	 */
1892 	if (pg != NULL)
1893 		was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1894 
1895 	DPRINTFN(ENTER, " was_exec=%d", was_exec);
1896 
1897 	/*
1898 	 * Assume the page is cache inhibited and access is guarded unless
1899 	 * it's in our available memory array.  If it is in the memory array,
1900 	 * assume it's in memory coherent memory.
1901 	 */
1902 	if (flags & PMAP_MD_PREFETCHABLE) {
1903 		pte_lo = 0;
1904 	} else
1905 		pte_lo = PTE_G;
1906 
1907 	if ((flags & PMAP_NOCACHE) == 0) {
1908 		for (mp = mem; mp->size; mp++) {
1909 			if (pa >= mp->start && pa < mp->start + mp->size) {
1910 				pte_lo = PTE_M;
1911 				break;
1912 			}
1913 		}
1914 #ifdef MULTIPROCESSOR
1915 		if (((mfpvr() >> 16) & 0xffff) == MPC603e)
1916 			pte_lo = PTE_M;
1917 #endif
1918 	} else {
1919 		pte_lo |= PTE_I;
1920 	}
1921 
1922 	if (prot & VM_PROT_WRITE)
1923 		pte_lo |= PTE_BW;
1924 	else
1925 		pte_lo |= PTE_BR;
1926 
1927 	/*
1928 	 * If this was in response to a fault, "pre-fault" the PTE's
1929 	 * changed/referenced bit appropriately.
1930 	 */
1931 	if (flags & VM_PROT_WRITE)
1932 		pte_lo |= PTE_CHG;
1933 	if (flags & VM_PROT_ALL)
1934 		pte_lo |= PTE_REF;
1935 
1936 	/*
1937 	 * We need to know if this page can be executable
1938 	 */
1939 	flags |= (prot & VM_PROT_EXECUTE);
1940 
1941 	/*
1942 	 * Record mapping for later back-translation and pte spilling.
1943 	 * This will overwrite any existing mapping.
1944 	 */
1945 	error = pmap_pvo_enter(pm, &pmap_pvo_pool, pvo_head, va, pa, pte_lo, flags);
1946 
1947 	/*
1948 	 * Flush the real page from the instruction cache if this page is
1949 	 * mapped executable and cacheable and has not been flushed since
1950 	 * the last time it was modified.
1951 	 */
1952 	if (error == 0 &&
1953             (flags & VM_PROT_EXECUTE) &&
1954             (pte_lo & PTE_I) == 0 &&
1955 	    was_exec == 0) {
1956 		DPRINTFN(ENTER, " %s", "syncicache");
1957 		PMAPCOUNT(exec_synced);
1958 		pmap_syncicache(pa, PAGE_SIZE);
1959 		if (pg != NULL) {
1960 			pmap_attr_save(pg, PTE_EXEC);
1961 			PMAPCOUNT(exec_cached);
1962 #if defined(DEBUG) || defined(PMAPDEBUG)
1963 			if (pmapdebug & PMAPDEBUG_ENTER)
1964 				printf(" marked-as-exec");
1965 			else if (pmapdebug & PMAPDEBUG_EXEC)
1966 				printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
1967 				    VM_PAGE_TO_PHYS(pg));
1968 #endif
1969 		}
1970 	}
1971 
1972 	DPRINTFN(ENTER, ": error=%d\n", error);
1973 
1974 	PMAP_UNLOCK();
1975 
1976 	return error;
1977 }
1978 
1979 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1980 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1981 {
1982 	struct mem_region *mp;
1983 	register_t pte_lo;
1984 	int error;
1985 
1986 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA)
1987 	if (va < VM_MIN_KERNEL_ADDRESS)
1988 		panic("pmap_kenter_pa: attempt to enter "
1989 		    "non-kernel address %#" _PRIxva "!", va);
1990 #endif
1991 
1992 	DPRINTFN(KENTER,
1993 	    "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot);
1994 
1995 	PMAP_LOCK();
1996 
1997 	/*
1998 	 * Assume the page is cache inhibited and access is guarded unless
1999 	 * it's in our available memory array.  If it is in the memory array,
2000 	 * assume it's in memory coherent memory.
2001 	 */
2002 	pte_lo = PTE_IG;
2003 	if ((flags & PMAP_NOCACHE) == 0) {
2004 		for (mp = mem; mp->size; mp++) {
2005 			if (pa >= mp->start && pa < mp->start + mp->size) {
2006 				pte_lo = PTE_M;
2007 				break;
2008 			}
2009 		}
2010 #ifdef MULTIPROCESSOR
2011 		if (((mfpvr() >> 16) & 0xffff) == MPC603e)
2012 			pte_lo = PTE_M;
2013 #endif
2014 	}
2015 
2016 	if (prot & VM_PROT_WRITE)
2017 		pte_lo |= PTE_BW;
2018 	else
2019 		pte_lo |= PTE_BR;
2020 
2021 	/*
2022 	 * We don't care about REF/CHG on PVOs on the unmanaged list.
2023 	 */
2024 	error = pmap_pvo_enter(pmap_kernel(), &pmap_pvo_pool,
2025 	    NULL, va, pa, pte_lo, prot|PMAP_WIRED);
2026 
2027 	if (error != 0)
2028 		panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
2029 		      va, pa, error);
2030 
2031 	PMAP_UNLOCK();
2032 }
2033 
2034 void
pmap_kremove(vaddr_t va,vsize_t len)2035 pmap_kremove(vaddr_t va, vsize_t len)
2036 {
2037 	if (va < VM_MIN_KERNEL_ADDRESS)
2038 		panic("pmap_kremove: attempt to remove "
2039 		    "non-kernel address %#" _PRIxva "!", va);
2040 
2041 	DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len);
2042 	pmap_remove(pmap_kernel(), va, va + len);
2043 }
2044 
2045 /*
2046  * Remove the given range of mapping entries.
2047  */
2048 void
pmap_remove(pmap_t pm,vaddr_t va,vaddr_t endva)2049 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2050 {
2051 	struct pvo_head pvol;
2052 	struct pvo_entry *pvo;
2053 	register_t msr;
2054 	int pteidx;
2055 
2056 	PMAP_LOCK();
2057 	LIST_INIT(&pvol);
2058 	msr = pmap_interrupts_off();
2059 	for (; va < endva; va += PAGE_SIZE) {
2060 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
2061 		if (pvo != NULL) {
2062 			pmap_pvo_remove(pvo, pteidx, &pvol);
2063 		}
2064 	}
2065 	pmap_interrupts_restore(msr);
2066 	pmap_pvo_free_list(&pvol);
2067 	PMAP_UNLOCK();
2068 }
2069 
2070 #if defined(PMAP_OEA)
2071 #ifdef PPC_OEA601
2072 bool
pmap_extract_ioseg601(vaddr_t va,paddr_t * pap)2073 pmap_extract_ioseg601(vaddr_t va, paddr_t *pap)
2074 {
2075 	if ((MFPVR() >> 16) != MPC601)
2076 		return false;
2077 
2078 	const register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2079 
2080 	if (SR601_VALID_P(sr) && SR601_PA_MATCH_P(sr, va)) {
2081 		if (pap)
2082 			*pap = va;
2083 		return true;
2084 	}
2085 	return false;
2086 }
2087 
2088 static bool
pmap_extract_battable601(vaddr_t va,paddr_t * pap)2089 pmap_extract_battable601(vaddr_t va, paddr_t *pap)
2090 {
2091 	const register_t batu = battable[va >> 23].batu;
2092 	const register_t batl = battable[va >> 23].batl;
2093 
2094 	if (BAT601_VALID_P(batl) && BAT601_VA_MATCH_P(batu, batl, va)) {
2095 		const register_t mask =
2096 		    (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
2097 		if (pap)
2098 			*pap = (batl & mask) | (va & ~mask);
2099 		return true;
2100 	}
2101 	return false;
2102 }
2103 #endif /* PPC_OEA601 */
2104 
2105 bool
pmap_extract_battable(vaddr_t va,paddr_t * pap)2106 pmap_extract_battable(vaddr_t va, paddr_t *pap)
2107 {
2108 #ifdef PPC_OEA601
2109 	if ((MFPVR() >> 16) == MPC601)
2110 		return pmap_extract_battable601(va, pap);
2111 #endif /* PPC_OEA601 */
2112 
2113 	if (oeacpufeat & OEACPU_NOBAT)
2114 		return false;
2115 
2116 	const register_t batu = battable[BAT_VA2IDX(va)].batu;
2117 
2118 	if (BAT_VALID_P(batu, 0) && BAT_VA_MATCH_P(batu, va)) {
2119 		const register_t batl = battable[BAT_VA2IDX(va)].batl;
2120 		const register_t mask =
2121 		    (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL;
2122 		if (pap)
2123 			*pap = (batl & mask) | (va & ~mask);
2124 		return true;
2125 	}
2126 	return false;
2127 }
2128 #endif /* PMAP_OEA */
2129 
2130 /*
2131  * Get the physical page address for the given pmap/virtual address.
2132  */
2133 bool
pmap_extract(pmap_t pm,vaddr_t va,paddr_t * pap)2134 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2135 {
2136 	struct pvo_entry *pvo;
2137 	register_t msr;
2138 
2139 	PMAP_LOCK();
2140 
2141 	/*
2142 	 * If this is the kernel pmap, check the battable and I/O
2143 	 * segments for a hit.  This is done only for regions outside
2144 	 * VM_MIN_KERNEL_ADDRESS-VM_MAX_KERNEL_ADDRESS.
2145 	 *
2146 	 * Be careful when checking VM_MAX_KERNEL_ADDRESS; you don't
2147 	 * want to wrap around to 0.
2148 	 */
2149 	if (pm == pmap_kernel() &&
2150 	    (va < VM_MIN_KERNEL_ADDRESS ||
2151 	     (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
2152 		KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
2153 #if defined(PMAP_OEA)
2154 #ifdef PPC_OEA601
2155 		if (pmap_extract_ioseg601(va, pap)) {
2156 			PMAP_UNLOCK();
2157 			return true;
2158 		}
2159 #endif /* PPC_OEA601 */
2160 		if (pmap_extract_battable(va, pap)) {
2161 			PMAP_UNLOCK();
2162 			return true;
2163 		}
2164 		/*
2165 		 * We still check the HTAB...
2166 		 */
2167 #elif defined(PMAP_OEA64_BRIDGE)
2168 		if (va < PMAP_DIRECT_MAPPED_LEN) {
2169 			if (pap)
2170 				*pap = va;
2171 			PMAP_UNLOCK();
2172 			return true;
2173 		}
2174 		/*
2175 		 * We still check the HTAB...
2176 		 */
2177 #elif defined(PMAP_OEA64)
2178 #error PPC_OEA64 not supported
2179 #endif /* PPC_OEA */
2180 	}
2181 
2182 	msr = pmap_interrupts_off();
2183 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2184 	if (pvo != NULL) {
2185 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2186 		if (pap)
2187 			*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2188 			    | (va & ADDR_POFF);
2189 	}
2190 	pmap_interrupts_restore(msr);
2191 	PMAP_UNLOCK();
2192 	return pvo != NULL;
2193 }
2194 
2195 /*
2196  * Lower the protection on the specified range of this pmap.
2197  */
2198 void
pmap_protect(pmap_t pm,vaddr_t va,vaddr_t endva,vm_prot_t prot)2199 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2200 {
2201 	struct pvo_entry *pvo;
2202 	volatile struct pte *pt;
2203 	register_t msr;
2204 	int pteidx;
2205 
2206 	/*
2207 	 * Since this routine only downgrades protection, we should
2208 	 * always be called with at least one bit not set.
2209 	 */
2210 	KASSERT(prot != VM_PROT_ALL);
2211 
2212 	/*
2213 	 * If there is no protection, this is equivalent to
2214 	 * remove the pmap from the pmap.
2215 	 */
2216 	if ((prot & VM_PROT_READ) == 0) {
2217 		pmap_remove(pm, va, endva);
2218 		return;
2219 	}
2220 
2221 	PMAP_LOCK();
2222 
2223 	msr = pmap_interrupts_off();
2224 	for (; va < endva; va += PAGE_SIZE) {
2225 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
2226 		if (pvo == NULL)
2227 			continue;
2228 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2229 
2230 		/*
2231 		 * Revoke executable if asked to do so.
2232 		 */
2233 		if ((prot & VM_PROT_EXECUTE) == 0)
2234 			pvo_clear_exec(pvo);
2235 
2236 #if 0
2237 		/*
2238 		 * If the page is already read-only, no change
2239 		 * needs to be made.
2240 		 */
2241 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2242 			continue;
2243 #endif
2244 		/*
2245 		 * Grab the PTE pointer before we diddle with
2246 		 * the cached PTE copy.
2247 		 */
2248 		pt = pmap_pvo_to_pte(pvo, pteidx);
2249 		/*
2250 		 * Change the protection of the page.
2251 		 */
2252 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
2253 		pvo->pvo_pte.pte_lo |= PTE_BR;
2254 
2255 		/*
2256 		 * If the PVO is in the page table, update
2257 		 * that pte at well.
2258 		 */
2259 		if (pt != NULL) {
2260 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2261 			PVO_WHERE(pvo, PMAP_PROTECT);
2262 			PMAPCOUNT(ptes_changed);
2263 		}
2264 
2265 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2266 	}
2267 	pmap_interrupts_restore(msr);
2268 	PMAP_UNLOCK();
2269 }
2270 
2271 void
pmap_unwire(pmap_t pm,vaddr_t va)2272 pmap_unwire(pmap_t pm, vaddr_t va)
2273 {
2274 	struct pvo_entry *pvo;
2275 	register_t msr;
2276 
2277 	PMAP_LOCK();
2278 	msr = pmap_interrupts_off();
2279 	pvo = pmap_pvo_find_va(pm, va, NULL);
2280 	if (pvo != NULL) {
2281 		if (PVO_WIRED_P(pvo)) {
2282 			pvo->pvo_vaddr &= ~PVO_WIRED;
2283 			pm->pm_stats.wired_count--;
2284 		}
2285 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2286 	}
2287 	pmap_interrupts_restore(msr);
2288 	PMAP_UNLOCK();
2289 }
2290 
2291 static void
pmap_pp_protect(struct pmap_page * pp,paddr_t pa,vm_prot_t prot)2292 pmap_pp_protect(struct pmap_page *pp, paddr_t pa, vm_prot_t prot)
2293 {
2294 	struct pvo_head *pvo_head, pvol;
2295 	struct pvo_entry *pvo, *next_pvo;
2296 	volatile struct pte *pt;
2297 	register_t msr;
2298 
2299 	PMAP_LOCK();
2300 
2301 	KASSERT(prot != VM_PROT_ALL);
2302 	LIST_INIT(&pvol);
2303 	msr = pmap_interrupts_off();
2304 
2305 	/*
2306 	 * When UVM reuses a page, it does a pmap_page_protect with
2307 	 * VM_PROT_NONE.  At that point, we can clear the exec flag
2308 	 * since we know the page will have different contents.
2309 	 */
2310 	if ((prot & VM_PROT_READ) == 0) {
2311 		DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
2312 		    pa);
2313 		if (pmap_pp_attr_fetch(pp) & PTE_EXEC) {
2314 			PMAPCOUNT(exec_uncached_page_protect);
2315 			pmap_pp_attr_clear(pp, PTE_EXEC);
2316 		}
2317 	}
2318 
2319 	pvo_head = &pp->pp_pvoh;
2320 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2321 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
2322 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2323 
2324 		/*
2325 		 * Downgrading to no mapping at all, we just remove the entry.
2326 		 */
2327 		if ((prot & VM_PROT_READ) == 0) {
2328 			pmap_pvo_remove(pvo, -1, &pvol);
2329 			continue;
2330 		}
2331 
2332 		/*
2333 		 * If EXEC permission is being revoked, just clear the
2334 		 * flag in the PVO.
2335 		 */
2336 		if ((prot & VM_PROT_EXECUTE) == 0)
2337 			pvo_clear_exec(pvo);
2338 
2339 		/*
2340 		 * If this entry is already RO, don't diddle with the
2341 		 * page table.
2342 		 */
2343 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2344 			PMAP_PVO_CHECK(pvo);
2345 			continue;
2346 		}
2347 
2348 		/*
2349 		 * Grab the PTE before the we diddle the bits so
2350 		 * pvo_to_pte can verify the pte contents are as
2351 		 * expected.
2352 		 */
2353 		pt = pmap_pvo_to_pte(pvo, -1);
2354 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
2355 		pvo->pvo_pte.pte_lo |= PTE_BR;
2356 		if (pt != NULL) {
2357 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2358 			PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2359 			PMAPCOUNT(ptes_changed);
2360 		}
2361 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2362 	}
2363 	pmap_interrupts_restore(msr);
2364 	pmap_pvo_free_list(&pvol);
2365 
2366 	PMAP_UNLOCK();
2367 }
2368 
2369 /*
2370  * Lower the protection on the specified physical page.
2371  */
2372 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)2373 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2374 {
2375 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
2376 
2377 	pmap_pp_protect(&md->mdpg_pp, VM_PAGE_TO_PHYS(pg), prot);
2378 }
2379 
2380 /*
2381  * Lower the protection on the physical page at the specified physical
2382  * address, which may not be managed and so may not have a struct
2383  * vm_page.
2384  */
2385 void
pmap_pv_protect(paddr_t pa,vm_prot_t prot)2386 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
2387 {
2388 	struct pmap_page *pp;
2389 
2390 	if ((pp = pmap_pv_tracked(pa)) == NULL)
2391 		return;
2392 	pmap_pp_protect(pp, pa, prot);
2393 }
2394 
2395 /*
2396  * Activate the address space for the specified process.  If the process
2397  * is the current process, load the new MMU context.
2398  */
2399 void
pmap_activate(struct lwp * l)2400 pmap_activate(struct lwp *l)
2401 {
2402 	struct pcb *pcb = lwp_getpcb(l);
2403 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2404 
2405 	DPRINTFN(ACTIVATE,
2406 	    "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
2407 
2408 	/*
2409 	 * XXX Normally performed in cpu_lwp_fork().
2410 	 */
2411 	pcb->pcb_pm = pmap;
2412 
2413 	/*
2414 	* In theory, the SR registers need only be valid on return
2415 	* to user space wait to do them there.
2416 	*/
2417 	if (l == curlwp) {
2418 		/* Store pointer to new current pmap. */
2419 		curpm = pmap;
2420 	}
2421 }
2422 
2423 /*
2424  * Deactivate the specified process's address space.
2425  */
2426 void
pmap_deactivate(struct lwp * l)2427 pmap_deactivate(struct lwp *l)
2428 {
2429 }
2430 
2431 bool
pmap_query_bit(struct vm_page * pg,int ptebit)2432 pmap_query_bit(struct vm_page *pg, int ptebit)
2433 {
2434 	struct pvo_entry *pvo;
2435 	volatile struct pte *pt;
2436 	register_t msr;
2437 
2438 	PMAP_LOCK();
2439 
2440 	if (pmap_attr_fetch(pg) & ptebit) {
2441 		PMAP_UNLOCK();
2442 		return true;
2443 	}
2444 
2445 	msr = pmap_interrupts_off();
2446 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2447 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2448 		/*
2449 		 * See if we saved the bit off.  If so cache, it and return
2450 		 * success.
2451 		 */
2452 		if (pvo->pvo_pte.pte_lo & ptebit) {
2453 			pmap_attr_save(pg, ptebit);
2454 			PMAP_PVO_CHECK(pvo);		/* sanity check */
2455 			pmap_interrupts_restore(msr);
2456 			PMAP_UNLOCK();
2457 			return true;
2458 		}
2459 	}
2460 	/*
2461 	 * No luck, now go thru the hard part of looking at the ptes
2462 	 * themselves.  Sync so any pending REF/CHG bits are flushed
2463 	 * to the PTEs.
2464 	 */
2465 	SYNC();
2466 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2467 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2468 		/*
2469 		 * See if this pvo have a valid PTE.  If so, fetch the
2470 		 * REF/CHG bits from the valid PTE.  If the appropriate
2471 		 * ptebit is set, cache, it and return success.
2472 		 */
2473 		pt = pmap_pvo_to_pte(pvo, -1);
2474 		if (pt != NULL) {
2475 			pmap_pte_synch(pt, &pvo->pvo_pte);
2476 			if (pvo->pvo_pte.pte_lo & ptebit) {
2477 				pmap_attr_save(pg, ptebit);
2478 				PMAP_PVO_CHECK(pvo);		/* sanity check */
2479 				pmap_interrupts_restore(msr);
2480 				PMAP_UNLOCK();
2481 				return true;
2482 			}
2483 		}
2484 	}
2485 	pmap_interrupts_restore(msr);
2486 	PMAP_UNLOCK();
2487 	return false;
2488 }
2489 
2490 bool
pmap_clear_bit(struct vm_page * pg,int ptebit)2491 pmap_clear_bit(struct vm_page *pg, int ptebit)
2492 {
2493 	struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2494 	struct pvo_entry *pvo;
2495 	volatile struct pte *pt;
2496 	register_t msr;
2497 	int rv = 0;
2498 
2499 	PMAP_LOCK();
2500 	msr = pmap_interrupts_off();
2501 
2502 	/*
2503 	 * Fetch the cache value
2504 	 */
2505 	rv |= pmap_attr_fetch(pg);
2506 
2507 	/*
2508 	 * Clear the cached value.
2509 	 */
2510 	pmap_attr_clear(pg, ptebit);
2511 
2512 	/*
2513 	 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2514 	 * can reset the right ones).  Note that since the pvo entries and
2515 	 * list heads are accessed via BAT0 and are never placed in the
2516 	 * page table, we don't have to worry about further accesses setting
2517 	 * the REF/CHG bits.
2518 	 */
2519 	SYNC();
2520 
2521 	/*
2522 	 * For each pvo entry, clear pvo's ptebit.  If this pvo have a
2523 	 * valid PTE.  If so, clear the ptebit from the valid PTE.
2524 	 */
2525 	LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2526 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2527 		pt = pmap_pvo_to_pte(pvo, -1);
2528 		if (pt != NULL) {
2529 			/*
2530 			 * Only sync the PTE if the bit we are looking
2531 			 * for is not already set.
2532 			 */
2533 			if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2534 				pmap_pte_synch(pt, &pvo->pvo_pte);
2535 			/*
2536 			 * If the bit we are looking for was already set,
2537 			 * clear that bit in the pte.
2538 			 */
2539 			if (pvo->pvo_pte.pte_lo & ptebit)
2540 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2541 		}
2542 		rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2543 		pvo->pvo_pte.pte_lo &= ~ptebit;
2544 		PMAP_PVO_CHECK(pvo);		/* sanity check */
2545 	}
2546 	pmap_interrupts_restore(msr);
2547 
2548 	/*
2549 	 * If we are clearing the modify bit and this page was marked EXEC
2550 	 * and the user of the page thinks the page was modified, then we
2551 	 * need to clean it from the icache if it's mapped or clear the EXEC
2552 	 * bit if it's not mapped.  The page itself might not have the CHG
2553 	 * bit set if the modification was done via DMA to the page.
2554 	 */
2555 	if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2556 		if (LIST_EMPTY(pvoh)) {
2557 			DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
2558 			    VM_PAGE_TO_PHYS(pg));
2559 			pmap_attr_clear(pg, PTE_EXEC);
2560 			PMAPCOUNT(exec_uncached_clear_modify);
2561 		} else {
2562 			DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
2563 			    VM_PAGE_TO_PHYS(pg));
2564 			pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2565 			PMAPCOUNT(exec_synced_clear_modify);
2566 		}
2567 	}
2568 	PMAP_UNLOCK();
2569 	return (rv & ptebit) != 0;
2570 }
2571 
2572 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)2573 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2574 {
2575 	struct pvo_entry *pvo;
2576 	size_t offset = va & ADDR_POFF;
2577 	int s;
2578 
2579 	PMAP_LOCK();
2580 	s = splvm();
2581 	while (len > 0) {
2582 		size_t seglen = PAGE_SIZE - offset;
2583 		if (seglen > len)
2584 			seglen = len;
2585 		pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2586 		if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
2587 			pmap_syncicache(
2588 			    (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2589 			PMAP_PVO_CHECK(pvo);
2590 		}
2591 		va += seglen;
2592 		len -= seglen;
2593 		offset = 0;
2594 	}
2595 	splx(s);
2596 	PMAP_UNLOCK();
2597 }
2598 
2599 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2600 void
pmap_pte_print(volatile struct pte * pt)2601 pmap_pte_print(volatile struct pte *pt)
2602 {
2603 	printf("PTE %p: ", pt);
2604 
2605 #if defined(PMAP_OEA)
2606 	/* High word: */
2607 	printf("%#" _PRIxpte ": [", pt->pte_hi);
2608 #else
2609 	printf("%#" _PRIxpte ": [", pt->pte_hi);
2610 #endif /* PMAP_OEA */
2611 
2612 	printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2613 	printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2614 
2615 	printf("%#" _PRIxpte " %#" _PRIxpte "",
2616 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2617 	    pt->pte_hi & PTE_API);
2618 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2619 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2620 #else
2621 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2622 #endif /* PMAP_OEA */
2623 
2624 	/* Low word: */
2625 #if defined (PMAP_OEA)
2626 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
2627 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2628 #else
2629 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
2630 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2631 #endif
2632 	printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2633 	printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2634 	printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2635 	printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2636 	printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2637 	printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2638 	switch (pt->pte_lo & PTE_PP) {
2639 	case PTE_BR: printf("br]\n"); break;
2640 	case PTE_BW: printf("bw]\n"); break;
2641 	case PTE_SO: printf("so]\n"); break;
2642 	case PTE_SW: printf("sw]\n"); break;
2643 	}
2644 }
2645 #endif
2646 
2647 #if defined(DDB)
2648 void
pmap_pteg_check(void)2649 pmap_pteg_check(void)
2650 {
2651 	volatile struct pte *pt;
2652 	int i;
2653 	int ptegidx;
2654 	u_int p_valid = 0;
2655 	u_int s_valid = 0;
2656 	u_int invalid = 0;
2657 
2658 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2659 		for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2660 			if (pt->pte_hi & PTE_VALID) {
2661 				if (pt->pte_hi & PTE_HID)
2662 					s_valid++;
2663 				else
2664 				{
2665 					p_valid++;
2666 				}
2667 			} else
2668 				invalid++;
2669 		}
2670 	}
2671 	printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2672 		p_valid, p_valid, s_valid, s_valid,
2673 		invalid, invalid);
2674 }
2675 
2676 void
pmap_print_mmuregs(void)2677 pmap_print_mmuregs(void)
2678 {
2679 	int i;
2680 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2681 	u_int cpuvers;
2682 #endif
2683 #ifndef PMAP_OEA64
2684 	vaddr_t addr;
2685 	register_t soft_sr[16];
2686 #endif
2687 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2688 	struct bat soft_ibat[4];
2689 	struct bat soft_dbat[4];
2690 #endif
2691 	paddr_t sdr1;
2692 
2693 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2694 	cpuvers = MFPVR() >> 16;
2695 #endif
2696 	__asm volatile ("mfsdr1 %0" : "=r"(sdr1));
2697 #ifndef PMAP_OEA64
2698 	addr = 0;
2699 	for (i = 0; i < 16; i++) {
2700 		soft_sr[i] = MFSRIN(addr);
2701 		addr += (1 << ADDR_SR_SHFT);
2702 	}
2703 #endif
2704 
2705 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2706 	/* read iBAT (601: uBAT) registers */
2707 	__asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2708 	__asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2709 	__asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2710 	__asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2711 	__asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2712 	__asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2713 	__asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2714 	__asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2715 
2716 
2717 	if (cpuvers != MPC601) {
2718 		/* read dBAT registers */
2719 		__asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2720 		__asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2721 		__asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2722 		__asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2723 		__asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2724 		__asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2725 		__asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2726 		__asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2727 	}
2728 #endif
2729 
2730 	printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
2731 #ifndef PMAP_OEA64
2732 	printf("SR[]:\t");
2733 	for (i = 0; i < 4; i++)
2734 		printf("0x%08lx,   ", soft_sr[i]);
2735 	printf("\n\t");
2736 	for ( ; i < 8; i++)
2737 		printf("0x%08lx,   ", soft_sr[i]);
2738 	printf("\n\t");
2739 	for ( ; i < 12; i++)
2740 		printf("0x%08lx,   ", soft_sr[i]);
2741 	printf("\n\t");
2742 	for ( ; i < 16; i++)
2743 		printf("0x%08lx,   ", soft_sr[i]);
2744 	printf("\n");
2745 #endif
2746 
2747 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2748 	printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2749 	for (i = 0; i < 4; i++) {
2750 		printf("0x%08lx 0x%08lx, ",
2751 			soft_ibat[i].batu, soft_ibat[i].batl);
2752 		if (i == 1)
2753 			printf("\n\t");
2754 	}
2755 	if (cpuvers != MPC601) {
2756 		printf("\ndBAT[]:\t");
2757 		for (i = 0; i < 4; i++) {
2758 			printf("0x%08lx 0x%08lx, ",
2759 				soft_dbat[i].batu, soft_dbat[i].batl);
2760 			if (i == 1)
2761 				printf("\n\t");
2762 		}
2763 	}
2764 	printf("\n");
2765 #endif /* PMAP_OEA... */
2766 }
2767 
2768 void
pmap_print_pte(pmap_t pm,vaddr_t va)2769 pmap_print_pte(pmap_t pm, vaddr_t va)
2770 {
2771 	struct pvo_entry *pvo;
2772 	volatile struct pte *pt;
2773 	int pteidx;
2774 
2775 	pvo = pmap_pvo_find_va(pm, va, &pteidx);
2776 	if (pvo != NULL) {
2777 		pt = pmap_pvo_to_pte(pvo, pteidx);
2778 		if (pt != NULL) {
2779 			printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
2780 				va, pt,
2781 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2782 				pt->pte_hi, pt->pte_lo);
2783 		} else {
2784 			printf("No valid PTE found\n");
2785 		}
2786 	} else {
2787 		printf("Address not in pmap\n");
2788 	}
2789 }
2790 
2791 void
pmap_pteg_dist(void)2792 pmap_pteg_dist(void)
2793 {
2794 	struct pvo_entry *pvo;
2795 	int ptegidx;
2796 	int depth;
2797 	int max_depth = 0;
2798 	unsigned int depths[64];
2799 
2800 	memset(depths, 0, sizeof(depths));
2801 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2802 		depth = 0;
2803 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2804 			depth++;
2805 		}
2806 		if (depth > max_depth)
2807 			max_depth = depth;
2808 		if (depth > 63)
2809 			depth = 63;
2810 		depths[depth]++;
2811 	}
2812 
2813 	for (depth = 0; depth < 64; depth++) {
2814 		printf("  [%2d]: %8u", depth, depths[depth]);
2815 		if ((depth & 3) == 3)
2816 			printf("\n");
2817 		if (depth == max_depth)
2818 			break;
2819 	}
2820 	if ((depth & 3) != 3)
2821 		printf("\n");
2822 	printf("Max depth found was %d\n", max_depth);
2823 }
2824 #endif /* DEBUG */
2825 
2826 #if defined(PMAPCHECK) || defined(DEBUG)
2827 void
pmap_pvo_verify(void)2828 pmap_pvo_verify(void)
2829 {
2830 	int ptegidx;
2831 	int s;
2832 
2833 	s = splvm();
2834 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2835 		struct pvo_entry *pvo;
2836 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2837 			if ((uintptr_t) pvo >= PMAP_DIRECT_MAPPED_LEN)
2838 				panic("pmap_pvo_verify: invalid pvo %p "
2839 				    "on list %#x", pvo, ptegidx);
2840 			pmap_pvo_check(pvo);
2841 		}
2842 	}
2843 	splx(s);
2844 }
2845 #endif /* PMAPCHECK */
2846 
2847 /*
2848  * Queue for unmanaged pages, used before uvm.page_init_done.
2849  * Reuse when pool shortage; see pmap_pool_alloc() below.
2850  */
2851 struct pup {
2852 	SIMPLEQ_ENTRY(pup) pup_link;
2853 };
2854 SIMPLEQ_HEAD(pup_head, pup);
2855 static struct pup_head pup_head = SIMPLEQ_HEAD_INITIALIZER(pup_head);
2856 
2857 static struct pup *
pmap_alloc_unmanaged(void)2858 pmap_alloc_unmanaged(void)
2859 {
2860 	struct pup *pup;
2861 	register_t msr;
2862 
2863 	PMAP_LOCK();
2864 	msr = pmap_interrupts_off();
2865 	pup = SIMPLEQ_FIRST(&pup_head);
2866 	if (pup != NULL)
2867 		SIMPLEQ_REMOVE_HEAD(&pup_head, pup_link);
2868 	pmap_interrupts_restore(msr);
2869 	PMAP_UNLOCK();
2870 	return pup;
2871 }
2872 
2873 static void
pmap_free_unmanaged(struct pup * pup)2874 pmap_free_unmanaged(struct pup *pup)
2875 {
2876 	register_t msr;
2877 
2878 	PMAP_LOCK();
2879 	msr = pmap_interrupts_off();
2880 	SIMPLEQ_INSERT_HEAD(&pup_head, pup, pup_link);
2881 	pmap_interrupts_restore(msr);
2882 	PMAP_UNLOCK();
2883 }
2884 
2885 void *
pmap_pool_alloc(struct pool * pp,int flags)2886 pmap_pool_alloc(struct pool *pp, int flags)
2887 {
2888 	struct vm_page *pg;
2889 	paddr_t pa;
2890 
2891 	if (__predict_false(!uvm.page_init_done))
2892 		return (void *)uvm_pageboot_alloc(PAGE_SIZE);
2893 
2894  retry:
2895 	pg = uvm_pagealloc_strat(NULL /*obj*/, 0 /*off*/, NULL /*anon*/,
2896 	    UVM_PGA_USERESERVE /*flags*/, UVM_PGA_STRAT_ONLY /*strat*/,
2897 	    VM_FREELIST_DIRECT_MAPPED /*free_list*/);
2898 	if (__predict_false(pg == NULL)) {
2899 		void *va = pmap_alloc_unmanaged();
2900 		if (va != NULL)
2901 			return va;
2902 
2903 		if ((flags & PR_WAITOK) == 0)
2904 			return NULL;
2905 		uvm_wait("plpg");
2906 		goto retry;
2907 	}
2908 	KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2909 	pa = VM_PAGE_TO_PHYS(pg);
2910 	return (void *)(uintptr_t)pa;
2911 }
2912 
2913 void
pmap_pool_free(struct pool * pp,void * va)2914 pmap_pool_free(struct pool *pp, void *va)
2915 {
2916 	struct vm_page *pg;
2917 
2918 	pg = PHYS_TO_VM_PAGE((paddr_t)va);
2919 	if (__predict_false(pg == NULL)) {
2920 		pmap_free_unmanaged(va);
2921 		return;
2922 	}
2923 	uvm_pagefree(pg);
2924 }
2925 
2926 /*
2927  * This routine in bootstraping to steal to-be-managed memory (which will
2928  * then be unmanaged).  We use it to grab from the first PMAP_DIRECT_MAPPED_LEN
2929  * for our pmap needs and above it for other stuff.
2930  */
2931 vaddr_t
pmap_steal_memory(vsize_t vsize,vaddr_t * vstartp,vaddr_t * vendp)2932 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2933 {
2934 	vsize_t size;
2935 	vaddr_t va;
2936 	paddr_t start, end, pa = 0;
2937 	int npgs, freelist;
2938 	uvm_physseg_t bank;
2939 
2940 	if (uvm.page_init_done == true)
2941 		panic("pmap_steal_memory: called _after_ bootstrap");
2942 
2943 	*vstartp = VM_MIN_KERNEL_ADDRESS;
2944 	*vendp = VM_MAX_KERNEL_ADDRESS;
2945 
2946 	size = round_page(vsize);
2947 	npgs = atop(size);
2948 
2949 	/*
2950 	 * PA 0 will never be among those given to UVM so we can use it
2951 	 * to indicate we couldn't steal any memory.
2952 	 */
2953 
2954 	for (bank = uvm_physseg_get_first();
2955 	     uvm_physseg_valid_p(bank);
2956 	     bank = uvm_physseg_get_next(bank)) {
2957 
2958 		freelist = uvm_physseg_get_free_list(bank);
2959 		start = uvm_physseg_get_start(bank);
2960 		end = uvm_physseg_get_end(bank);
2961 
2962 		if (freelist == VM_FREELIST_DIRECT_MAPPED &&
2963 		    (end - start) >= npgs) {
2964 			pa = ptoa(start);
2965 			break;
2966 		}
2967 	}
2968 
2969 	if (pa == 0)
2970 		panic("pmap_steal_memory: no approriate memory to steal!");
2971 
2972 	uvm_physseg_unplug(start, npgs);
2973 
2974 	va = (vaddr_t) pa;
2975 	memset((void *) va, 0, size);
2976 	pmap_pages_stolen += npgs;
2977 #ifdef DEBUG
2978 	if (pmapdebug && npgs > 1) {
2979 		u_int cnt = 0;
2980 	for (bank = uvm_physseg_get_first();
2981 	     uvm_physseg_valid_p(bank);
2982 	     bank = uvm_physseg_get_next(bank)) {
2983 		cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank);
2984 		}
2985 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2986 		    npgs, pmap_pages_stolen, cnt);
2987 	}
2988 #endif
2989 
2990 	return va;
2991 }
2992 
2993 /*
2994  * Find a chuck of memory with right size and alignment.
2995  */
2996 paddr_t
pmap_boot_find_memory(psize_t size,psize_t alignment,int at_end)2997 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2998 {
2999 	struct mem_region *mp;
3000 	paddr_t s, e;
3001 	int i, j;
3002 
3003 	size = round_page(size);
3004 
3005 	DPRINTFN(BOOT,
3006 	    "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
3007 	    size, alignment, at_end);
3008 
3009 	if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
3010 		panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
3011 		    alignment);
3012 
3013 	if (at_end) {
3014 		if (alignment != PAGE_SIZE)
3015 			panic("pmap_boot_find_memory: invalid ending "
3016 			    "alignment %#" _PRIxpa, alignment);
3017 
3018 		for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
3019 			s = mp->start + mp->size - size;
3020 			if (s >= mp->start && mp->size >= size) {
3021 				DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3022 				DPRINTFN(BOOT,
3023 				    "pmap_boot_find_memory: b-avail[%d] start "
3024 				    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3025 				     mp->start, mp->size);
3026 				mp->size -= size;
3027 				DPRINTFN(BOOT,
3028 				    "pmap_boot_find_memory: a-avail[%d] start "
3029 				    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3030 				     mp->start, mp->size);
3031 				return s;
3032 			}
3033 		}
3034 		panic("pmap_boot_find_memory: no available memory");
3035 	}
3036 
3037 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3038 		s = (mp->start + alignment - 1) & ~(alignment-1);
3039 		e = s + size;
3040 
3041 		/*
3042 		 * Is the calculated region entirely within the region?
3043 		 */
3044 		if (s < mp->start || e > mp->start + mp->size)
3045 			continue;
3046 
3047 		DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3048 		if (s == mp->start) {
3049 			/*
3050 			 * If the block starts at the beginning of region,
3051 			 * adjust the size & start. (the region may now be
3052 			 * zero in length)
3053 			 */
3054 			DPRINTFN(BOOT,
3055 			    "pmap_boot_find_memory: b-avail[%d] start "
3056 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3057 			mp->start += size;
3058 			mp->size -= size;
3059 			DPRINTFN(BOOT,
3060 			    "pmap_boot_find_memory: a-avail[%d] start "
3061 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3062 		} else if (e == mp->start + mp->size) {
3063 			/*
3064 			 * If the block starts at the beginning of region,
3065 			 * adjust only the size.
3066 			 */
3067 			DPRINTFN(BOOT,
3068 			    "pmap_boot_find_memory: b-avail[%d] start "
3069 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3070 			mp->size -= size;
3071 			DPRINTFN(BOOT,
3072 			    "pmap_boot_find_memory: a-avail[%d] start "
3073 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3074 		} else {
3075 			/*
3076 			 * Block is in the middle of the region, so we
3077 			 * have to split it in two.
3078 			 */
3079 			for (j = avail_cnt; j > i + 1; j--) {
3080 				avail[j] = avail[j-1];
3081 			}
3082 			DPRINTFN(BOOT,
3083 			    "pmap_boot_find_memory: b-avail[%d] start "
3084 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3085 			mp[1].start = e;
3086 			mp[1].size = mp[0].start + mp[0].size - e;
3087 			mp[0].size = s - mp[0].start;
3088 			avail_cnt++;
3089 			for (; i < avail_cnt; i++) {
3090 				DPRINTFN(BOOT,
3091 				    "pmap_boot_find_memory: a-avail[%d] "
3092 				    "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
3093 				     avail[i].start, avail[i].size);
3094 			}
3095 		}
3096 		KASSERT(s == (uintptr_t) s);
3097 		return s;
3098 	}
3099 	panic("pmap_boot_find_memory: not enough memory for "
3100 	    "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
3101 }
3102 
3103 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3104 #if defined (PMAP_OEA64_BRIDGE)
3105 int
pmap_setup_segment0_map(int use_large_pages,...)3106 pmap_setup_segment0_map(int use_large_pages, ...)
3107 {
3108     vaddr_t va, va_end;
3109 
3110     register_t pte_lo = 0x0;
3111     int ptegidx = 0;
3112     struct pte pte;
3113     va_list ap;
3114 
3115     /* Coherent + Supervisor RW, no user access */
3116     pte_lo = PTE_M;
3117 
3118     /* XXXSL
3119      * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3120      * these have to take priority.
3121      */
3122     for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3123         ptegidx = va_to_pteg(pmap_kernel(), va);
3124         pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3125         (void)pmap_pte_insert(ptegidx, &pte);
3126     }
3127 
3128     va_start(ap, use_large_pages);
3129     while (1) {
3130         paddr_t pa;
3131         size_t size;
3132 
3133         va = va_arg(ap, vaddr_t);
3134 
3135         if (va == 0)
3136             break;
3137 
3138         pa = va_arg(ap, paddr_t);
3139         size = va_arg(ap, size_t);
3140 
3141         for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) {
3142 #if 0
3143 	    printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__,  va, pa);
3144 #endif
3145             ptegidx = va_to_pteg(pmap_kernel(), va);
3146             pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3147             (void)pmap_pte_insert(ptegidx, &pte);
3148         }
3149     }
3150     va_end(ap);
3151 
3152     TLBSYNC();
3153     SYNC();
3154     return (0);
3155 }
3156 #endif /* PMAP_OEA64_BRIDGE */
3157 
3158 /*
3159  * Set up the bottom level of the data structures necessary for the kernel
3160  * to manage memory.  MMU hardware is programmed in pmap_bootstrap2().
3161  */
3162 void
pmap_bootstrap1(paddr_t kernelstart,paddr_t kernelend)3163 pmap_bootstrap1(paddr_t kernelstart, paddr_t kernelend)
3164 {
3165 	struct mem_region *mp, tmp;
3166 	paddr_t s, e;
3167 	psize_t size;
3168 	int i, j;
3169 
3170 	/*
3171 	 * Get memory.
3172 	 */
3173 	mem_regions(&mem, &avail);
3174 #if defined(DEBUG)
3175 	if (pmapdebug & PMAPDEBUG_BOOT) {
3176 		printf("pmap_bootstrap: memory configuration:\n");
3177 		for (mp = mem; mp->size; mp++) {
3178 			printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
3179 				mp->start, mp->size);
3180 		}
3181 		for (mp = avail; mp->size; mp++) {
3182 			printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
3183 				mp->start, mp->size);
3184 		}
3185 	}
3186 #endif
3187 
3188 	/*
3189 	 * Find out how much physical memory we have and in how many chunks.
3190 	 */
3191 	for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3192 		if (mp->start >= pmap_memlimit)
3193 			continue;
3194 		if (mp->start + mp->size > pmap_memlimit) {
3195 			size = pmap_memlimit - mp->start;
3196 			physmem += btoc(size);
3197 		} else {
3198 			physmem += btoc(mp->size);
3199 		}
3200 		mem_cnt++;
3201 	}
3202 
3203 	/*
3204 	 * Count the number of available entries.
3205 	 */
3206 	for (avail_cnt = 0, mp = avail; mp->size; mp++)
3207 		avail_cnt++;
3208 
3209 	/*
3210 	 * Page align all regions.
3211 	 */
3212 	kernelstart = trunc_page(kernelstart);
3213 	kernelend = round_page(kernelend);
3214 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3215 		s = round_page(mp->start);
3216 		mp->size -= (s - mp->start);
3217 		mp->size = trunc_page(mp->size);
3218 		mp->start = s;
3219 		e = mp->start + mp->size;
3220 
3221 		DPRINTFN(BOOT,
3222 		    "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3223 		    i, mp->start, mp->size);
3224 
3225 		/*
3226 		 * Don't allow the end to run beyond our artificial limit
3227 		 */
3228 		if (e > pmap_memlimit)
3229 			e = pmap_memlimit;
3230 
3231 		/*
3232 		 * Is this region empty or strange?  skip it.
3233 		 */
3234 		if (e <= s) {
3235 			mp->start = 0;
3236 			mp->size = 0;
3237 			continue;
3238 		}
3239 
3240 		/*
3241 		 * Does this overlap the beginning of kernel?
3242 		 *   Does extend past the end of the kernel?
3243 		 */
3244 		else if (s < kernelstart && e > kernelstart) {
3245 			if (e > kernelend) {
3246 				avail[avail_cnt].start = kernelend;
3247 				avail[avail_cnt].size = e - kernelend;
3248 				avail_cnt++;
3249 			}
3250 			mp->size = kernelstart - s;
3251 		}
3252 		/*
3253 		 * Check whether this region overlaps the end of the kernel.
3254 		 */
3255 		else if (s < kernelend && e > kernelend) {
3256 			mp->start = kernelend;
3257 			mp->size = e - kernelend;
3258 		}
3259 		/*
3260 		 * Look whether this regions is completely inside the kernel.
3261 		 * Nuke it if it does.
3262 		 */
3263 		else if (s >= kernelstart && e <= kernelend) {
3264 			mp->start = 0;
3265 			mp->size = 0;
3266 		}
3267 		/*
3268 		 * If the user imposed a memory limit, enforce it.
3269 		 */
3270 		else if (s >= pmap_memlimit) {
3271 			mp->start = -PAGE_SIZE;	/* let's know why */
3272 			mp->size = 0;
3273 		}
3274 		else {
3275 			mp->start = s;
3276 			mp->size = e - s;
3277 		}
3278 		DPRINTFN(BOOT,
3279 		    "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3280 		    i, mp->start, mp->size);
3281 	}
3282 
3283 	/*
3284 	 * Move (and uncount) all the null return to the end.
3285 	 */
3286 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3287 		if (mp->size == 0) {
3288 			tmp = avail[i];
3289 			avail[i] = avail[--avail_cnt];
3290 			avail[avail_cnt] = avail[i];
3291 		}
3292 	}
3293 
3294 	/*
3295 	 * (Bubble)sort them into ascending order.
3296 	 */
3297 	for (i = 0; i < avail_cnt; i++) {
3298 		for (j = i + 1; j < avail_cnt; j++) {
3299 			if (avail[i].start > avail[j].start) {
3300 				tmp = avail[i];
3301 				avail[i] = avail[j];
3302 				avail[j] = tmp;
3303 			}
3304 		}
3305 	}
3306 
3307 	/*
3308 	 * Make sure they don't overlap.
3309 	 */
3310 	for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3311 		if (mp[0].start + mp[0].size > mp[1].start) {
3312 			mp[0].size = mp[1].start - mp[0].start;
3313 		}
3314 		DPRINTFN(BOOT,
3315 		    "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3316 		    i, mp->start, mp->size);
3317 	}
3318 	DPRINTFN(BOOT,
3319 	    "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3320 	    i, mp->start, mp->size);
3321 
3322 #ifdef	PTEGCOUNT
3323 	pmap_pteg_cnt = PTEGCOUNT;
3324 #else /* PTEGCOUNT */
3325 
3326 	pmap_pteg_cnt = 0x1000;
3327 
3328 	while (pmap_pteg_cnt < physmem)
3329 		pmap_pteg_cnt <<= 1;
3330 
3331 	pmap_pteg_cnt >>= 1;
3332 #endif /* PTEGCOUNT */
3333 
3334 #ifdef DEBUG
3335 	DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt);
3336 #endif
3337 
3338 	/*
3339 	 * Find suitably aligned memory for PTEG hash table.
3340 	 */
3341 	size = pmap_pteg_cnt * sizeof(struct pteg);
3342 	pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
3343 
3344 #ifdef DEBUG
3345 	DPRINTFN(BOOT,
3346 		"PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table);
3347 #endif
3348 
3349 
3350 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3351 	if ( (uintptr_t) pmap_pteg_table + size > PMAP_DIRECT_MAPPED_LEN)
3352 		panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > PMAP_DIRECT_MAPPED_LEN",
3353 		    pmap_pteg_table, size);
3354 #endif
3355 
3356 	memset(__UNVOLATILE(pmap_pteg_table), 0,
3357 		pmap_pteg_cnt * sizeof(struct pteg));
3358 	pmap_pteg_mask = pmap_pteg_cnt - 1;
3359 
3360 	/*
3361 	 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3362 	 * with pages.  So we just steal them before giving them to UVM.
3363 	 */
3364 	size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
3365 	pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
3366 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3367 	if ( (uintptr_t) pmap_pvo_table + size > PMAP_DIRECT_MAPPED_LEN)
3368 		panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > PMAP_DIRECT_MAPPED_LEN",
3369 		    pmap_pvo_table, size);
3370 #endif
3371 
3372 	for (i = 0; i < pmap_pteg_cnt; i++)
3373 		TAILQ_INIT(&pmap_pvo_table[i]);
3374 
3375 #ifndef MSGBUFADDR
3376 	/*
3377 	 * Allocate msgbuf in high memory.
3378 	 */
3379 	msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
3380 #endif
3381 
3382 	for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3383 		paddr_t pfstart = atop(mp->start);
3384 		paddr_t pfend = atop(mp->start + mp->size);
3385 		if (mp->size == 0)
3386 			continue;
3387 		if (mp->start + mp->size <= PMAP_DIRECT_MAPPED_LEN) {
3388 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
3389 				VM_FREELIST_DIRECT_MAPPED);
3390 		} else if (mp->start >= PMAP_DIRECT_MAPPED_LEN) {
3391 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
3392 				VM_FREELIST_DEFAULT);
3393 		} else {
3394 			pfend = atop(PMAP_DIRECT_MAPPED_LEN);
3395 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
3396 				VM_FREELIST_DIRECT_MAPPED);
3397 			pfstart = atop(PMAP_DIRECT_MAPPED_LEN);
3398 			pfend = atop(mp->start + mp->size);
3399 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
3400 				VM_FREELIST_DEFAULT);
3401 		}
3402 	}
3403 
3404 	/*
3405 	 * Make sure kernel vsid is allocated as well as VSID 0.
3406 	 */
3407 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3408 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
3409 	pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3410 		|= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
3411 	pmap_vsid_bitmap[0] |= 1;
3412 
3413 	/*
3414 	 * Initialize kernel pmap.
3415 	 */
3416 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
3417 	for (i = 0; i < 16; i++) {
3418  		pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
3419 	}
3420 	pmap_kernel()->pm_vsid = KERNEL_VSIDBITS;
3421 
3422 	pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
3423 #ifdef KERNEL2_SR
3424 	pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
3425 #endif
3426 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3427 
3428 #if defined(PMAP_OEA) && defined(PPC_OEA601)
3429 	if ((MFPVR() >> 16) == MPC601) {
3430 		for (i = 0; i < 16; i++) {
3431 			if (iosrtable[i] & SR601_T) {
3432 				pmap_kernel()->pm_sr[i] = iosrtable[i];
3433 			}
3434 		}
3435 	}
3436 #endif /* PMAP_OEA && PPC_OEA601 */
3437 
3438 #ifdef ALTIVEC
3439 	pmap_use_altivec = cpu_altivec;
3440 #endif
3441 
3442 #ifdef DEBUG
3443 	if (pmapdebug & PMAPDEBUG_BOOT) {
3444 		u_int cnt;
3445 		uvm_physseg_t bank;
3446 		char pbuf[9];
3447 		for (cnt = 0, bank = uvm_physseg_get_first();
3448 		     uvm_physseg_valid_p(bank);
3449 		     bank = uvm_physseg_get_next(bank)) {
3450 			cnt += uvm_physseg_get_avail_end(bank) -
3451 			    uvm_physseg_get_avail_start(bank);
3452 			printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
3453 			    bank,
3454 			    ptoa(uvm_physseg_get_avail_start(bank)),
3455 			    ptoa(uvm_physseg_get_avail_end(bank)),
3456 			    ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
3457 		}
3458 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3459 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3460 		    pbuf, cnt);
3461 	}
3462 #endif
3463 
3464 	pool_init(&pmap_pvo_pool, sizeof(struct pvo_entry),
3465 	    PMAP_PVO_ENTRY_ALIGN, 0, 0, "pmap_pvopl",
3466 	    &pmap_pool_allocator, IPL_VM);
3467 
3468 	pool_setlowat(&pmap_pvo_pool, 1008);
3469 
3470 	pool_init(&pmap_pool, sizeof(struct pmap),
3471 	    __alignof(struct pmap), 0, 0, "pmap_pl",
3472 	    &pmap_pool_allocator, IPL_NONE);
3473 
3474 #if defined(PMAP_OEA64_BRIDGE)
3475 	{
3476 		struct pmap *pm = pmap_kernel();
3477 		uvm_physseg_t bank;
3478 		paddr_t pa;
3479 		struct pte pt;
3480 		unsigned int ptegidx;
3481 
3482 		for (bank = uvm_physseg_get_first();
3483 		     uvm_physseg_valid_p(bank);
3484 		     bank = uvm_physseg_get_next(bank)) {
3485 			if (uvm_physseg_get_free_list(bank) !=
3486 			    VM_FREELIST_DIRECT_MAPPED)
3487 				continue;
3488 			for (pa = uimax(ptoa(uvm_physseg_get_avail_start(bank)),
3489 					SEGMENT_LENGTH);
3490 			     pa < ptoa(uvm_physseg_get_avail_end(bank));
3491 			     pa += PAGE_SIZE) {
3492 				ptegidx = va_to_pteg(pm, pa);
3493 				pmap_pte_create(&pt, pm, pa, pa | PTE_M);
3494 				pmap_pte_insert(ptegidx, &pt);
3495 			}
3496 		}
3497 	}
3498 #endif
3499 
3500 #if defined(PMAP_NEED_MAPKERNEL)
3501 	{
3502 		struct pmap *pm = pmap_kernel();
3503 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3504 		extern int etext[], kernel_text[];
3505 		vaddr_t va, va_etext = (paddr_t) etext;
3506 #endif
3507 		paddr_t pa, pa_end;
3508 		register_t sr;
3509 		struct pte pt;
3510 		unsigned int ptegidx;
3511 		int bank;
3512 
3513 		sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
3514 		pm->pm_sr[0] = sr;
3515 
3516 		for (bank = 0; bank < vm_nphysseg; bank++) {
3517 			pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
3518 			pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
3519 			for (; pa < pa_end; pa += PAGE_SIZE) {
3520 				ptegidx = va_to_pteg(pm, pa);
3521 				pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
3522 				pmap_pte_insert(ptegidx, &pt);
3523 			}
3524 		}
3525 
3526 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3527 		va = (vaddr_t) kernel_text;
3528 
3529 		for (pa = kernelstart; va < va_etext;
3530 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
3531 			ptegidx = va_to_pteg(pm, va);
3532 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3533 			pmap_pte_insert(ptegidx, &pt);
3534 		}
3535 
3536 		for (; pa < kernelend;
3537 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
3538 			ptegidx = va_to_pteg(pm, va);
3539 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3540 			pmap_pte_insert(ptegidx, &pt);
3541 		}
3542 
3543 		for (va = 0, pa = 0; va < kernelstart;
3544 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
3545 			ptegidx = va_to_pteg(pm, va);
3546 			if (va < 0x3000)
3547 				pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3548 			else
3549 				pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3550 			pmap_pte_insert(ptegidx, &pt);
3551 		}
3552 		for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
3553 		    pa += PAGE_SIZE, va += PAGE_SIZE) {
3554 			ptegidx = va_to_pteg(pm, va);
3555 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3556 			pmap_pte_insert(ptegidx, &pt);
3557 		}
3558 #endif /* PMAP_NEED_FULL_MAPKERNEL */
3559 	}
3560 #endif /* PMAP_NEED_MAPKERNEL */
3561 }
3562 
3563 /*
3564  * Using the data structures prepared in pmap_bootstrap1(), program
3565  * the MMU hardware.
3566  */
3567 void
pmap_bootstrap2(void)3568 pmap_bootstrap2(void)
3569 {
3570 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
3571 	for (int i = 0; i < 16; i++) {
3572 		__asm volatile("mtsrin %0,%1"
3573 			:: "r"(pmap_kernel()->pm_sr[i]),
3574 			   "r"(i << ADDR_SR_SHFT));
3575 	}
3576 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3577 
3578 #if defined(PMAP_OEA)
3579 	__asm volatile("sync; mtsdr1 %0; isync"
3580 	    :
3581 	    : "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))
3582 	    : "memory");
3583 #elif defined(PMAP_OEA64) || defined(PMAP_OEA64_BRIDGE)
3584 	__asm volatile("sync; mtsdr1 %0; isync"
3585 	    :
3586 	    : "r"((uintptr_t)pmap_pteg_table |
3587 		(32 - __builtin_clz(pmap_pteg_mask >> 11)))
3588 	    : "memory");
3589 #endif
3590 	tlbia();
3591 
3592 #if defined(PMAPDEBUG)
3593 	if (pmapdebug)
3594 	    pmap_print_mmuregs();
3595 #endif
3596 }
3597 
3598 /*
3599  * This is not part of the defined PMAP interface and is specific to the
3600  * PowerPC architecture.  This is called during initppc, before the system
3601  * is really initialized.
3602  */
3603 void
pmap_bootstrap(paddr_t kernelstart,paddr_t kernelend)3604 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3605 {
3606 	pmap_bootstrap1(kernelstart, kernelend);
3607 	pmap_bootstrap2();
3608 }
3609