xref: /netbsd-src/sys/arch/hppa/hppa/pmap.c (revision 8e0d3cbdbcc1c812dd8d1f5dc7d25dab33fd8079)
1 /*	$NetBSD: pmap.c,v 1.122 2023/08/02 09:18:14 macallan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*	$OpenBSD: pmap.c,v 1.132 2008/04/18 06:42:21 djm Exp $	*/
33 
34 /*
35  * Copyright (c) 1998-2004 Michael Shalayeff
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57  * THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 /*
60  * References:
61  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
62  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
63  * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
64  *    Hewlett-Packard, February 1994, Third Edition
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.122 2023/08/02 09:18:14 macallan Exp $");
69 
70 #include "opt_cputype.h"
71 
72 #include <sys/param.h>
73 #include <sys/atomic.h>
74 #include <sys/mutex.h>
75 #include <sys/pool.h>
76 #include <sys/proc.h>
77 #include <sys/rwlock.h>
78 #include <sys/systm.h>
79 
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page_array.h>
82 
83 #include <machine/cpu.h>
84 #include <machine/cpufunc.h>
85 #include <machine/iomod.h>
86 #include <machine/pcb.h>
87 #include <machine/pmap.h>
88 #include <machine/psl.h>
89 #include <machine/pte.h>
90 #include <machine/reg.h>
91 
92 #include <hppa/hppa/hpt.h>
93 #include <hppa/hppa/machdep.h>
94 
95 #if defined(DDB)
96 #include <ddb/db_output.h>
97 #endif
98 
99 int		pmap_hptsize = 16 * PAGE_SIZE;	/* patchable */
100 vaddr_t		pmap_hpt;
101 
102 static struct pmap	kernel_pmap_store;
103 struct pmap		*const kernel_pmap_ptr = &kernel_pmap_store;
104 
105 int		hppa_sid_max = HPPA_SID_MAX;
106 struct pool	pmap_pool;
107 struct pool	pmap_pv_pool;
108 int		pmap_pvlowat = 252;
109 bool		pmap_initialized = false;
110 
111 static kmutex_t	pmaps_lock;
112 
113 static union pmap_pv_locks {
114 	kmutex_t	lock;
115 	char		padding[COHERENCY_UNIT];
116 } pmap_pv_locks[64] __aligned(COHERENCY_UNIT);
117 
118 #define	PMAP_PV_LOCK(md) \
119     ((uintptr_t)(md) >> 7 & (__arraycount(pmap_pv_locks) - 1))
120 
121 u_int	hppa_prot[8];
122 u_int	sid_counter;
123 
124 static const struct uvm_pagerops pmap_pager = {
125 	/* nothing */
126 };
127 
128 /*
129  * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
130  * Reference Manual" (HP part number 09740-90039) defines equivalent
131  * and non-equivalent virtual addresses in the cache.
132  *
133  * This macro evaluates to true iff the two space/virtual address
134  * combinations are non-equivalent aliases, and therefore will find
135  * two different locations in the cache.
136  *
137  * NB: currently, the CPU-specific desidhash() functions disable the
138  * use of the space in all cache hashing functions.  This means that
139  * this macro definition is stricter than it has to be (because it
140  * takes space into account), but one day cache space hashing should
141  * be re-enabled.  Cache space hashing should yield better performance
142  * through better utilization of the cache, assuming that most aliasing
143  * is the read-only kind, which we do allow in the cache.
144  */
145 #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
146   (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
147    ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
148 
149 /* Prototypes. */
150 struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t);
151 void pmap_pagefree(struct vm_page *);
152 
153 static inline void pmap_lock(struct pmap *);
154 static inline void pmap_unlock(struct pmap *);
155 static inline bool pmap_trylock(struct pmap *);
156 
157 static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *);
158 static inline uint32_t *pmap_sdir_get(pa_space_t);
159 
160 static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
161 static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
162 static inline pt_entry_t *pmap_pde_alloc(pmap_t, vaddr_t, struct vm_page **);
163 static inline struct vm_page *pmap_pde_ptp(pmap_t, volatile pt_entry_t *);
164 static inline void pmap_pde_release(pmap_t, vaddr_t, struct vm_page *);
165 
166 static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
167 static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
168 
169 void pmap_pte_flush(pmap_t, vaddr_t, pt_entry_t);
170 
171 static inline pt_entry_t pmap_pte_get(volatile pt_entry_t *, vaddr_t);
172 static inline void pmap_pte_set(volatile pt_entry_t *, vaddr_t, pt_entry_t);
173 
174 static inline pt_entry_t pmap_vp_find(pmap_t, vaddr_t);
175 
176 static inline struct pv_entry *pmap_pv_alloc(void);
177 static inline void pmap_pv_free(struct pv_entry *);
178 static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t,
179     vaddr_t , struct vm_page *, u_int);
180 static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t,
181     vaddr_t);
182 static inline void pmap_pv_lock(const struct vm_page_md *md);
183 static inline void pmap_pv_unlock(const struct vm_page_md *md);
184 static inline bool pmap_pv_locked(const struct vm_page_md *md);
185 
186 static inline void pmap_flush_page(struct vm_page *, bool);
187 static void pmap_resolve_alias(struct vm_page *, struct pmap *, vaddr_t,
188     pt_entry_t);
189 static void pmap_syncicache_page(struct vm_page *, pmap_t, vaddr_t);
190 
191 static void pmap_page_physload(paddr_t, paddr_t);
192 
193 void pmap_copy_page(paddr_t, paddr_t);
194 
195 #ifdef USE_HPT
196 static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t);
197 static inline uint32_t pmap_vtag(pmap_t, vaddr_t);
198 
199 #ifdef DDB
200 void pmap_hptdump(void);
201 #endif
202 #endif
203 
204 #ifdef DDB
205 void pmap_dump_table(pa_space_t, vaddr_t);
206 void pmap_dump_pv(paddr_t);
207 #endif
208 
209 #define	IS_IOPAGE_P(pa)		((pa) >= HPPA_IOBEGIN)
210 #define	IS_PVFEXEC_P(f)		(((f) & PVF_EXEC) != 0)
211 
212 /* un-invert PVF_REF */
213 #define pmap_pvh_attrs(a) \
214 	(((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
215 
216 static inline void
pmap_lock(struct pmap * pm)217 pmap_lock(struct pmap *pm)
218 {
219 
220 	rw_enter(pm->pm_lock, RW_WRITER);
221 }
222 
223 static inline void
pmap_unlock(struct pmap * pm)224 pmap_unlock(struct pmap *pm)
225 {
226 
227 	rw_exit(pm->pm_lock);
228 }
229 
230 static inline bool
pmap_trylock(struct pmap * pm)231 pmap_trylock(struct pmap *pm)
232 {
233 
234 	return rw_tryenter(pm->pm_lock, RW_WRITER);
235 }
236 
237 static inline void
pmap_pv_lock(const struct vm_page_md * md)238 pmap_pv_lock(const struct vm_page_md *md)
239 {
240 
241 	mutex_enter(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
242 }
243 
244 static inline void
pmap_pv_unlock(const struct vm_page_md * md)245 pmap_pv_unlock(const struct vm_page_md *md)
246 {
247 
248 	mutex_exit(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
249 }
250 
251 static inline bool
pmap_pv_locked(const struct vm_page_md * md)252 pmap_pv_locked(const struct vm_page_md *md)
253 {
254 
255 	return mutex_owned(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
256 }
257 
258 struct vm_page *
pmap_pagealloc(struct uvm_object * obj,voff_t off)259 pmap_pagealloc(struct uvm_object *obj, voff_t off)
260 {
261 	struct vm_page *pg;
262 
263 	if ((pg = uvm_pagealloc(obj, off, NULL,
264 	    UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
265 		printf("pmap_pagealloc fail\n");
266 
267 	return (pg);
268 }
269 
270 void
pmap_pagefree(struct vm_page * pg)271 pmap_pagefree(struct vm_page *pg)
272 {
273 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
274 	pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
275 
276 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
277     defined(HP8500_CPU) || defined(HP8600_CPU)
278 	pdtlb(HPPA_SID_KERNEL, pa);
279 	pitlb(HPPA_SID_KERNEL, pa);
280 #endif
281 	uvm_pagefree(pg);
282 }
283 
284 #ifdef USE_HPT
285 /*
286  * This hash function is the one used by the hardware TLB walker on the 7100LC.
287  */
288 static inline struct hpt_entry *
pmap_hash(pmap_t pmap,vaddr_t va)289 pmap_hash(pmap_t pmap, vaddr_t va)
290 {
291 
292 	return (struct hpt_entry *)(pmap_hpt +
293 	    (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1)));
294 }
295 
296 static inline uint32_t
pmap_vtag(pmap_t pmap,vaddr_t va)297 pmap_vtag(pmap_t pmap, vaddr_t va)
298 {
299 
300 	return (0x80000000 | (pmap->pm_space & 0xffff) |
301 	    ((va >> 1) & 0x7fff0000));
302 }
303 #endif
304 
305 static inline void
pmap_sdir_set(pa_space_t space,volatile uint32_t * pd)306 pmap_sdir_set(pa_space_t space, volatile uint32_t *pd)
307 {
308 	volatile uint32_t *vtop;
309 
310 	mfctl(CR_VTOP, vtop);
311 
312 	KASSERT(vtop != NULL);
313 
314 	vtop[space] = (uint32_t)pd;
315 }
316 
317 static inline uint32_t *
pmap_sdir_get(pa_space_t space)318 pmap_sdir_get(pa_space_t space)
319 {
320 	uint32_t *vtop;
321 
322 	mfctl(CR_VTOP, vtop);
323 	return ((uint32_t *)vtop[space]);
324 }
325 
326 static inline volatile pt_entry_t *
pmap_pde_get(volatile uint32_t * pd,vaddr_t va)327 pmap_pde_get(volatile uint32_t *pd, vaddr_t va)
328 {
329 
330 	return ((pt_entry_t *)pd[va >> 22]);
331 }
332 
333 static inline void
pmap_pde_set(pmap_t pm,vaddr_t va,paddr_t ptp)334 pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp)
335 {
336 	UVMHIST_FUNC(__func__);
337 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pm,
338 	    va, ptp, 0);
339 
340 	KASSERT((ptp & PGOFSET) == 0);
341 
342 	pm->pm_pdir[va >> 22] = ptp;
343 }
344 
345 static inline pt_entry_t *
pmap_pde_alloc(pmap_t pm,vaddr_t va,struct vm_page ** pdep)346 pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep)
347 {
348 	struct vm_page *pg;
349 	paddr_t pa;
350 
351 	UVMHIST_FUNC(__func__);
352 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pdep %#jx", (uintptr_t)pm,
353 	    va, (uintptr_t)pdep, 0);
354 
355 
356 	KASSERT(pm != pmap_kernel());
357 	KASSERT(rw_write_held(pm->pm_lock));
358 
359 	pg = pmap_pagealloc(&pm->pm_obj, va);
360 
361 	if (pg == NULL)
362 		return NULL;
363 
364 	pa = VM_PAGE_TO_PHYS(pg);
365 
366 	UVMHIST_LOG(maphist, "pde %#jx", pa, 0, 0, 0);
367 
368 	pg->flags &= ~PG_BUSY;		/* never busy */
369 	pg->wire_count = 1;		/* no mappings yet */
370 	pmap_pde_set(pm, va, pa);
371 	pm->pm_stats.resident_count++;	/* count PTP as resident */
372 	pm->pm_ptphint = pg;
373 	if (pdep)
374 		*pdep = pg;
375 	return ((pt_entry_t *)pa);
376 }
377 
378 static inline struct vm_page *
pmap_pde_ptp(pmap_t pm,volatile pt_entry_t * pde)379 pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde)
380 {
381 	paddr_t pa = (paddr_t)pde;
382 
383 	UVMHIST_FUNC(__func__);
384 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pdep %#jx", (uintptr_t)pm,
385 	    (uintptr_t)pde, 0, 0);
386 
387 	if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
388 		return (pm->pm_ptphint);
389 
390 	UVMHIST_LOG(maphist, "<--- done (%#jx)",
391 	    (uintptr_t)PHYS_TO_VM_PAGE(pa), 0, 0, 0);
392 
393 	return (PHYS_TO_VM_PAGE(pa));
394 }
395 
396 static inline void
pmap_pde_release(pmap_t pmap,vaddr_t va,struct vm_page * ptp)397 pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp)
398 {
399 	UVMHIST_FUNC(__func__);
400 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pmap,
401 	    va, (uintptr_t)ptp, 0);
402 
403 	KASSERT(pmap != pmap_kernel());
404 	if (--ptp->wire_count <= 1) {
405 		UVMHIST_LOG(maphist, "disposing ptp %#jx", (uintptr_t)ptp, 0,
406 		    0, 0);
407 		pmap_pde_set(pmap, va, 0);
408 		pmap->pm_stats.resident_count--;
409 		if (pmap->pm_ptphint == ptp)
410 			pmap->pm_ptphint = NULL;
411 		ptp->wire_count = 0;
412 
413 		KASSERT((ptp->flags & PG_BUSY) == 0);
414 
415 		pmap_pagefree(ptp);
416 	}
417 }
418 
419 static inline pt_entry_t
pmap_pte_get(volatile pt_entry_t * pde,vaddr_t va)420 pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
421 {
422 
423 	return (pde[(va >> 12) & 0x3ff]);
424 }
425 
426 static inline void
pmap_pte_set(volatile pt_entry_t * pde,vaddr_t va,pt_entry_t pte)427 pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
428 {
429 
430 	/* too verbose due to hppa_pagezero_{,un}map */
431 #if 0
432 	UVMHIST_FUNC(__func__);
433 	UVMHIST_CALLARGS(maphist, "pdep %#jx va %#jx pte %#jx", (uintptr_t)pde,
434 	    va, pte, 0);
435 #endif
436 
437 	KASSERT(pde != NULL);
438 	KASSERT(((paddr_t)pde & PGOFSET) == 0);
439 
440 	pde[(va >> 12) & 0x3ff] = pte;
441 }
442 
443 void
pmap_pte_flush(pmap_t pmap,vaddr_t va,pt_entry_t pte)444 pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte)
445 {
446 
447 	UVMHIST_FUNC(__func__);
448 	if (pmap != pmap_kernel() && va != 0) {
449 		UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pte %#jx",
450 		    (uintptr_t)pmap, va, (uintptr_t)pte, 0);
451 	}
452 
453 	fdcache(pmap->pm_space, va, PAGE_SIZE);
454 	if (pte & PTE_PROT(TLB_EXECUTE)) {
455 		ficache(pmap->pm_space, va, PAGE_SIZE);
456 		pitlb(pmap->pm_space, va);
457 	}
458 	pdtlb(pmap->pm_space, va);
459 #ifdef USE_HPT
460 	if (pmap_hpt) {
461 		struct hpt_entry *hpt;
462 		hpt = pmap_hash(pmap, va);
463 		if (hpt->hpt_valid &&
464 		    hpt->hpt_space == pmap->pm_space &&
465 		    hpt->hpt_vpn == ((va >> 1) & 0x7fff0000))
466 			hpt->hpt_space = 0xffff;
467 	}
468 #endif
469 }
470 
471 static inline pt_entry_t
pmap_vp_find(pmap_t pm,vaddr_t va)472 pmap_vp_find(pmap_t pm, vaddr_t va)
473 {
474 	volatile pt_entry_t *pde;
475 
476 	if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
477 		return (0);
478 
479 	return (pmap_pte_get(pde, va));
480 }
481 
482 #ifdef DDB
483 void
pmap_dump_table(pa_space_t space,vaddr_t sva)484 pmap_dump_table(pa_space_t space, vaddr_t sva)
485 {
486 	char buf[64];
487 	volatile pt_entry_t *pde = NULL;
488 	vaddr_t va = sva;
489 	vaddr_t pdemask = 1;
490 	pt_entry_t pte;
491 	uint32_t *pd;
492 
493 	if (space > hppa_sid_max)
494 		return;
495 
496 	pd = pmap_sdir_get(space);
497 	if (!pd)
498 		return;
499 
500 	do {
501 		if (pdemask != (va & PDE_MASK)) {
502 			pdemask = va & PDE_MASK;
503 			pde = pmap_pde_get(pd, va);
504 			if (!pde) {
505 				va = pdemask + PDE_SIZE;
506 				continue;
507 			}
508 			db_printf("%x:%8p:\n", space, pde);
509 		}
510 
511 		pte = pmap_pte_get(pde, va);
512 		if (pte) {
513 			snprintb(buf, sizeof(buf), TLB_BITS,
514 			   TLB_PROT(pte & PAGE_MASK));
515 			db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK,
516 			    buf);
517 		}
518 		va += PAGE_SIZE;
519 	} while (va != 0);
520 }
521 
522 void
pmap_dump_pv(paddr_t pa)523 pmap_dump_pv(paddr_t pa)
524 {
525 	struct vm_page *pg;
526 	struct vm_page_md *md;
527 	struct pv_entry *pve;
528 
529 	pg = PHYS_TO_VM_PAGE(pa);
530 	if (pg == NULL)
531 		return;
532 
533 	md = VM_PAGE_TO_MD(pg);
534 	db_printf("pg %p attr 0x%08x\n", pg, md->pvh_attrs);
535 	for (pve = md->pvh_list; pve; pve = pve->pv_next)
536 		db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
537 		    pve->pv_va & PV_VAMASK);
538 }
539 #endif
540 
541 static void
pmap_resolve_alias(struct vm_page * pg,struct pmap * pm,vaddr_t va,pt_entry_t pte)542 pmap_resolve_alias(struct vm_page *pg, struct pmap *pm, vaddr_t va,
543     pt_entry_t pte)
544 {
545 
546 	UVMHIST_FUNC(__func__);
547 	UVMHIST_CALLARGS(maphist, "pg %#jx pm %#jx va %#jx pte %#jx",
548 	    (uintptr_t)pg, (uintptr_t)pm, va, pte);
549 
550 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
551 	struct pv_entry *pve, *npve, **pvp;
552 
553  restart:
554 	pmap_pv_lock(md);
555 	pvp = &md->pvh_list;
556 	for (pve = md->pvh_list; pve; pve = npve) {
557 		const pmap_t ppm = pve->pv_pmap;
558 		const vaddr_t pva = pve->pv_va & PV_VAMASK;
559 
560 		UVMHIST_LOG(maphist, "... pm %#jx va %#jx", (uintptr_t)ppm,
561 		    pva, 0, 0);
562 
563 		npve = pve->pv_next;
564 
565 		volatile pt_entry_t *pde;
566 		pt_entry_t ppte;
567 		if (pve->pv_va & PV_KENTER) {
568 			/* Get the pte for this mapping */
569 			pde = pmap_pde_get(ppm->pm_pdir, pva);
570 			ppte = pmap_pte_get(pde, pva);
571 		} else {
572 			/*
573 			 * We have to seamlessly get a hold on the pmap's lock
574 			 * while holding the PV head lock, to know that the
575 			 * mapping is still in place and we can operate on it.
576 			 * If that can't be had, drop the PV head lock, wait
577 			 * for the pmap's lock to become available, and then
578 			 * try again.
579 			 */
580 			UVMHIST_LOG(maphist, "... pm %#jx va %#jx... checking",
581 			    (uintptr_t)ppm, pva, 0, 0);
582 
583 			bool locked = true;
584 			if (pm != ppm) {
585 				pmap_reference(ppm);
586 				locked = pmap_trylock(ppm);
587 			}
588 
589 			if (!locked) {
590 				pmap_pv_unlock(md);
591 				pmap_lock(ppm);
592 				/* nothing */
593 				pmap_unlock(ppm);
594 				pmap_destroy(ppm);
595 
596 				UVMHIST_LOG(maphist, "... failed lock", 0, 0, 0,
597 				    0);
598 				goto restart;
599 			}
600 			pde = pmap_pde_get(ppm->pm_pdir, pva);
601 			ppte = pmap_pte_get(pde, pva);
602 
603 			md->pvh_attrs |= pmap_pvh_attrs(ppte);
604 		}
605 
606 		const bool writeable =
607 		    ((pte | ppte) & PTE_PROT(TLB_WRITE)) != 0;
608 
609 		if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && writeable) {
610 			UVMHIST_LOG(maphist,
611 			    "aliased writeable mapping %#jx:%#jx",
612 			    ppm->pm_space, pva, 0, 0);
613 
614 			pmap_pte_flush(ppm, pva, ppte);
615 			if (ppte & PTE_PROT(TLB_WIRED))
616 				ppm->pm_stats.wired_count--;
617 			ppm->pm_stats.resident_count--;
618 
619 			if (pve->pv_va & PV_KENTER) {
620 				/*
621 				 * This is an unmanaged mapping, it must be
622 				 * preserved.  Move it back on the list and
623 				 * advance the end-of-list pointer.
624 				 */
625 				*pvp = pve;
626 				pvp = &pve->pv_next;
627 			} else {
628 				pmap_pte_set(pde, pva, 0);
629 
630 				/* Remove pve from list */
631 				*pvp = npve;
632 
633 				pmap_pv_unlock(md);
634 				pmap_pv_free(pve);
635 				if (pm != ppm) {
636 					pmap_unlock(ppm);
637 					pmap_destroy(ppm);
638 
639 				}
640 				UVMHIST_LOG(maphist, "... removed", 0,
641 				    0, 0, 0);
642 				goto restart;
643 			}
644 		} else {
645 			UVMHIST_LOG(maphist, "not aliased writeable mapping",
646 			    0,0,0,0);
647 
648 			if (!(pve->pv_va & PV_KENTER) && pm != ppm) {
649 				pmap_unlock(ppm);
650 				pmap_destroy(ppm);
651 			}
652 			*pvp = pve;
653 			pvp = &pve->pv_next;
654 		}
655 	}
656 	md->pvh_attrs &= ~PVF_EXEC;
657 	*pvp = NULL;
658 
659 #ifdef DEBUG
660 	int ret = 0;
661 	/* check for non-equ aliased mappings */
662 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
663 		vaddr_t pva = pve->pv_va & PV_VAMASK;
664 
665 		UVMHIST_LOG(maphist, "... pm %#jx va %#jx",
666 		    (uintptr_t)pve->pv_pmap, pva, 0, 0);
667 
668 		pte |= pmap_vp_find(pve->pv_pmap, pva);
669 		if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
670 		    (pte & PTE_PROT(TLB_WRITE))) {
671 			UVMHIST_LOG(maphist,
672 			    "aliased writable mapping %#jx:%#jx",
673 			    pve->pv_pmap->pm_space, pve->pv_va, 0, 0);
674 
675 			ret++;
676 		}
677 	}
678 	UVMHIST_LOG(maphist, "check returned %jd", ret, 0, 0, 0);
679 #endif
680 
681 	pmap_pv_unlock(md);
682 
683 	UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
684 
685 	return;
686 }
687 
688 /*
689  * This allocates and returns a new struct pv_entry.
690  */
691 static inline struct pv_entry *
pmap_pv_alloc(void)692 pmap_pv_alloc(void)
693 {
694 	struct pv_entry *pv;
695 
696 	pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
697 
698 	return (pv);
699 }
700 
701 static inline void
pmap_pv_free(struct pv_entry * pv)702 pmap_pv_free(struct pv_entry *pv)
703 {
704 
705 	if (pv->pv_ptp)
706 		pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK,
707 		    pv->pv_ptp);
708 
709 	pool_put(&pmap_pv_pool, pv);
710 }
711 
712 static inline void
pmap_pv_enter(struct vm_page * pg,struct pv_entry * pve,pmap_t pm,vaddr_t va,struct vm_page * pdep,u_int flags)713 pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
714     vaddr_t va, struct vm_page *pdep, u_int flags)
715 {
716 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
717 
718 	UVMHIST_FUNC(__func__);
719 	UVMHIST_CALLARGS(maphist, "pg %#jx pve %#jx pm %#jx va %#jx",
720 	    (uintptr_t)pg, (uintptr_t)pve, (uintptr_t)pm, va);
721 	UVMHIST_LOG(maphist, "...pdep %#jx flags %#jx",
722 	    (uintptr_t)pdep, flags, 0, 0);
723 
724 	KASSERT(pmap_pv_locked(md));
725 
726 	pve->pv_pmap = pm;
727 	pve->pv_va = va | flags;
728 	pve->pv_ptp = pdep;
729 	pve->pv_next = md->pvh_list;
730 	md->pvh_list = pve;
731 }
732 
733 static inline struct pv_entry *
pmap_pv_remove(struct vm_page * pg,pmap_t pmap,vaddr_t va)734 pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
735 {
736 	UVMHIST_FUNC(__func__);
737 	UVMHIST_CALLARGS(maphist, "pg %#jx pm %#jx va %#jx",
738 	    (uintptr_t)pg, (uintptr_t)pmap, va, 0);
739 
740 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
741 	struct pv_entry **pve, *pv;
742 
743 	KASSERT(pmap_pv_locked(md));
744 
745 	for (pv = *(pve = &md->pvh_list);
746 	    pv; pv = *(pve = &(*pve)->pv_next)) {
747 		if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
748 			*pve = pv->pv_next;
749 			break;
750 		}
751 	}
752 
753 	if (IS_PVFEXEC_P(md->pvh_attrs)) {
754 		if (md->pvh_list == NULL) {
755 			md->pvh_attrs &= ~PVF_EXEC;
756 		} else {
757 			pmap_syncicache_page(pg, pmap, va);
758 		}
759 	}
760 
761 	return (pv);
762 }
763 
764 #define	FIRST_16M atop(16 * 1024 * 1024)
765 
766 static void
pmap_page_physload(paddr_t spa,paddr_t epa)767 pmap_page_physload(paddr_t spa, paddr_t epa)
768 {
769 
770 	if (spa == epa)
771 		return;
772 
773 	if (spa < FIRST_16M && epa <= FIRST_16M) {
774 		uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA);
775 	} else if (spa < FIRST_16M && epa > FIRST_16M) {
776 		uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M,
777 		    VM_FREELIST_ISADMA);
778 		uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa,
779 		    VM_FREELIST_DEFAULT);
780 	} else {
781 		uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT);
782 	}
783 
784 	availphysmem += epa - spa;
785 }
786 
787 /*
788  * Bootstrap the system enough to run with virtual memory.
789  * Map the kernel's code, data and bss, and allocate the system page table.
790  * Called with mapping OFF.
791  *
792  * Parameters:
793  * vstart	PA of first available physical page
794  */
795 void
pmap_bootstrap(vaddr_t vstart)796 pmap_bootstrap(vaddr_t vstart)
797 {
798 	UVMHIST_FUNC(__func__);
799 	UVMHIST_CALLED(maphist);
800 
801 	vaddr_t va, addr;
802 	vsize_t size;
803 	extern paddr_t hppa_vtop;
804 	pmap_t kpm;
805 	int npdes, nkpdes;
806 	extern int resvphysmem;
807 	vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
808 	paddr_t ksrx, kerx, ksro, kero, ksrw, kerw;
809 	extern int usebtlb;
810 
811 	/* Provided by the linker script */
812 	extern int kernel_text, etext;
813 	extern int __rodata_start, __rodata_end;
814 	extern int __data_start;
815 
816 	uvm_md_init();
817 
818 	hppa_prot[UVM_PROT_NONE]  = TLB_AR_NA;
819 	hppa_prot[UVM_PROT_READ]  = TLB_AR_R;
820 	hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
821 	hppa_prot[UVM_PROT_RW]    = TLB_AR_RW;
822 	hppa_prot[UVM_PROT_EXEC]  = TLB_AR_RX;
823 	hppa_prot[UVM_PROT_RX]    = TLB_AR_RX;
824 	hppa_prot[UVM_PROT_WX]    = TLB_AR_RWX;
825 	hppa_prot[UVM_PROT_RWX]   = TLB_AR_RWX;
826 
827 	/*
828 	 * Initialize kernel pmap
829 	 */
830 	addr = round_page(vstart);
831 	kpm = pmap_kernel();
832 	memset(kpm, 0, sizeof(*kpm));
833 
834 	rw_init(&kpm->pm_obj_lock);
835 	uvm_obj_init(&kpm->pm_obj, &pmap_pager, false, 1);
836 	uvm_obj_setlock(&kpm->pm_obj, &kpm->pm_obj_lock);
837 
838 	kpm->pm_space = HPPA_SID_KERNEL;
839 	kpm->pm_pid = HPPA_PID_KERNEL;
840 	kpm->pm_pdir_pg = NULL;
841 	kpm->pm_pdir = (uint32_t *)addr;
842 
843 	memset((void *)addr, 0, PAGE_SIZE);
844 	fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
845 	addr += PAGE_SIZE;
846 
847 	/*
848 	 * Allocate various tables and structures.
849 	 */
850 	mtctl(addr, CR_VTOP);
851 	hppa_vtop = addr;
852 	size = round_page((hppa_sid_max + 1) * 4);
853 	memset((void *)addr, 0, size);
854 	fdcache(HPPA_SID_KERNEL, addr, size);
855 
856 	addr += size;
857 	pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
858 
859 	/*
860 	 * cpuid() found out how big the HPT should be, so align addr to
861 	 * what will be its beginning.  We don't waste the pages skipped
862 	 * for the alignment.
863 	 */
864 #ifdef USE_HPT
865 	if (pmap_hptsize) {
866 		struct hpt_entry *hptp;
867 		int i, error;
868 
869 		if (addr & (pmap_hptsize - 1))
870 			addr += pmap_hptsize;
871 		addr &= ~(pmap_hptsize - 1);
872 
873 		memset((void *)addr, 0, pmap_hptsize);
874 		hptp = (struct hpt_entry *)addr;
875 		for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) {
876 			hptp[i].hpt_valid = 0;
877 			hptp[i].hpt_space = 0xffff;
878 			hptp[i].hpt_vpn = 0;
879 		}
880 		pmap_hpt = addr;
881 		addr += pmap_hptsize;
882 
883 		UVMHIST_LOG(maphist, "hpt_table %#jx @ %#jx",
884 		    pmap_hptsize, addr, 0, 0);
885 
886 		if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
887 			printf("WARNING: HPT init error %d -- DISABLED\n",
888 			    error);
889 			pmap_hpt = 0;
890 		} else {
891 			UVMHIST_LOG(maphist,
892 			    "HPT installed for %jd entries @ %#jx",
893 			    pmap_hptsize / sizeof(struct hpt_entry), addr, 0,
894 			    0);
895 		}
896 	}
897 #endif
898 
899 	/* Setup vtop in lwp0 trapframe. */
900 	lwp0.l_md.md_regs->tf_vtop = hppa_vtop;
901 
902 	/* Pre-allocate PDEs for kernel virtual */
903 	nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE;
904 	/* ... and io space too */
905 	nkpdes += HPPA_IOLEN / PDE_SIZE;
906 	/* ... and all physmem (VA == PA) */
907 	npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE);
908 
909 	UVMHIST_LOG(maphist, "npdes %jd", npdes, 0, 0, 0);
910 
911 	/* map the pdes */
912 	for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
913 		/* last nkpdes are for the kernel virtual */
914 		if (npdes == nkpdes - 1)
915 			va = SYSCALLGATE;
916 		if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
917 			va = HPPA_IOBEGIN;
918 		/* now map the pde for the physmem */
919 		memset((void *)addr, 0, PAGE_SIZE);
920 
921 		UVMHIST_LOG(maphist, "pde premap 0x%08jx 0x%08jx", va,
922 		    addr, 0, 0);
923 		pmap_pde_set(kpm, va, addr);
924 		kpm->pm_stats.resident_count++; /* count PTP as resident */
925 	}
926 
927 	/*
928 	 * At this point we've finished reserving memory for the kernel.
929 	 */
930 	/* XXXNH */
931 	resvphysmem = atop(addr);
932 
933 	ksrx = (paddr_t) &kernel_text;
934 	kerx = (paddr_t) &etext;
935 	ksro = (paddr_t) &__rodata_start;
936 	kero = (paddr_t) &__rodata_end;
937 	ksrw = (paddr_t) &__data_start;
938 	kerw = addr;
939 
940 	/*
941 	 * The kernel text, data, and bss must be direct-mapped,
942 	 * because the kernel often runs in physical mode, and
943 	 * anyways the loader loaded the kernel into physical
944 	 * memory exactly where it was linked.
945 	 *
946 	 * All memory already allocated after bss, either by
947 	 * our caller or by this function itself, must also be
948 	 * direct-mapped, because it's completely unmanaged
949 	 * and was allocated in physical mode.
950 	 *
951 	 * BTLB entries are used to do this direct mapping.
952 	 * BTLB entries have a minimum and maximum possible size,
953 	 * and MD code gives us these sizes in units of pages.
954 	 */
955 
956 	btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
957 	btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
958 
959 	/*
960 	 * To try to conserve BTLB entries, take a hint from how
961 	 * the kernel was linked: take the kernel text start as
962 	 * our effective minimum BTLB entry size, assuming that
963 	 * the data segment was also aligned to that size.
964 	 *
965 	 * In practice, linking the kernel at 2MB, and aligning
966 	 * the data segment to a 2MB boundary, should control well
967 	 * how much of the BTLB the pmap uses.  However, this code
968 	 * should not rely on this 2MB magic number, nor should
969 	 * it rely on the data segment being aligned at all.  This
970 	 * is to allow (smaller) kernels (linked lower) to work fine.
971 	 */
972 	btlb_entry_min = (vaddr_t) &kernel_text;
973 
974 	if (usebtlb) {
975 #define BTLB_SET_SIZE 16
976 		vaddr_t btlb_entry_start[BTLB_SET_SIZE];
977 		vsize_t btlb_entry_size[BTLB_SET_SIZE];
978 		int btlb_entry_vm_prot[BTLB_SET_SIZE];
979 		int btlb_i;
980 		int btlb_j;
981 
982 		/*
983 		 * Now make BTLB entries to direct-map the kernel text
984 		 * read- and execute-only as much as possible.  Note that
985 		 * if the data segment isn't nicely aligned, the last
986 		 * BTLB entry for the kernel text may also cover some of
987 		 * the data segment, meaning it will have to allow writing.
988 		 */
989 		addr = ksrx;
990 
991 		UVMHIST_LOG(maphist,
992 		    "BTLB mapping text and rodata @ %#jx - %#jx", addr, kero,
993 		    0, 0);
994 
995 		btlb_j = 0;
996 		while (addr < (vaddr_t) kero) {
997 
998 			/* Set up the next BTLB entry. */
999 			KASSERT(btlb_j < BTLB_SET_SIZE);
1000 			btlb_entry_start[btlb_j] = addr;
1001 			btlb_entry_size[btlb_j] = btlb_entry_min;
1002 			btlb_entry_vm_prot[btlb_j] =
1003 			    VM_PROT_READ | VM_PROT_EXECUTE;
1004 			if (addr + btlb_entry_min > kero)
1005 				btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
1006 
1007 			/* Coalesce BTLB entries whenever possible. */
1008 			while (btlb_j > 0 &&
1009 			    btlb_entry_vm_prot[btlb_j] ==
1010 				btlb_entry_vm_prot[btlb_j - 1] &&
1011 			    btlb_entry_size[btlb_j] ==
1012 				btlb_entry_size[btlb_j - 1] &&
1013 			    !(btlb_entry_start[btlb_j - 1] &
1014 				((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
1015 			    (btlb_entry_size[btlb_j - 1] << 1) <=
1016 				btlb_entry_max)
1017 				btlb_entry_size[--btlb_j] <<= 1;
1018 
1019 			/* Move on. */
1020 			addr =
1021 			    btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1022 			btlb_j++;
1023 		}
1024 
1025 		/*
1026 		 * Now make BTLB entries to direct-map the kernel data,
1027 		 * bss, and all of the preallocated space read-write.
1028 		 *
1029 		 * Note that, unlike above, we're not concerned with
1030 		 * making these BTLB entries such that they finish as
1031 		 * close as possible to the end of the space we need
1032 		 * them to map.  Instead, to minimize the number of BTLB
1033 		 * entries we need, we make them as large as possible.
1034 		 * The only thing this wastes is kernel virtual space,
1035 		 * which is plentiful.
1036 		 */
1037 
1038 		UVMHIST_LOG(maphist, "mapping data, bss, etc @ %#jx - %#jx",
1039 		    addr, kerw, 0, 0);
1040 
1041 		while (addr < kerw) {
1042 
1043 			/* Make the next BTLB entry. */
1044 			KASSERT(btlb_j < BTLB_SET_SIZE);
1045 			size = btlb_entry_min;
1046 			while ((addr + size) < kerw &&
1047 				(size << 1) < btlb_entry_max &&
1048 			    !(addr & ((size << 1) - 1)))
1049 				size <<= 1;
1050 			btlb_entry_start[btlb_j] = addr;
1051 			btlb_entry_size[btlb_j] = size;
1052 			btlb_entry_vm_prot[btlb_j] =
1053 			    VM_PROT_READ | VM_PROT_WRITE;
1054 
1055 			/* Move on. */
1056 			addr =
1057 			    btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1058 			btlb_j++;
1059 		}
1060 
1061 		/* Now insert all of the BTLB entries. */
1062 		for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
1063 			int error;
1064 			int prot;
1065 
1066 			btlb_entry_got = btlb_entry_size[btlb_i];
1067 			prot = btlb_entry_vm_prot[btlb_i];
1068 
1069 			error = hppa_btlb_insert(kpm->pm_space,
1070 			    btlb_entry_start[btlb_i], btlb_entry_start[btlb_i],
1071 			    &btlb_entry_got,
1072 			    kpm->pm_pid | pmap_prot(kpm, prot));
1073 
1074 			if (error)
1075 				panic("%s: cannot insert BTLB entry",
1076 				    __func__);
1077 			if (btlb_entry_got != btlb_entry_size[btlb_i])
1078 				panic("%s: BTLB entry mapped wrong amount",
1079 				    __func__);
1080 		}
1081 
1082 		kerw =
1083 		    btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
1084 	}
1085 
1086 	/*
1087 	 * We now know the exact beginning of managed kernel virtual space.
1088 	 *
1089 	 * Finally, load physical pages into UVM.  There are three segments of
1090 	 * pages.
1091 	 */
1092 
1093 	availphysmem = 0;
1094 
1095 	pmap_page_physload(resvmem, atop(ksrx));
1096 	pmap_page_physload(atop(kero), atop(ksrw));
1097 	pmap_page_physload(atop(kerw), physmem);
1098 
1099 	mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
1100 
1101 	/* TODO optimize/inline the kenter */
1102 	for (va = PAGE_SIZE; va < ptoa(physmem); va += PAGE_SIZE) {
1103 		vm_prot_t prot = UVM_PROT_RW;
1104 
1105 		if (va < resvmem)
1106 			prot = UVM_PROT_RX;
1107 		else if (va >= ksrx && va < kerx)
1108 			prot = UVM_PROT_RX;
1109 		else if (va >= ksro && va < kero)
1110 			prot = UVM_PROT_R;
1111 #ifdef DIAGNOSTIC
1112 		else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE)
1113 			prot = UVM_PROT_NONE;
1114 #endif
1115 		pmap_kenter_pa(va, va, prot, PMAP_DIRECTMAP);
1116 	}
1117 
1118 	/* XXXNH update */
1119 	UVMHIST_LOG(maphist, "mapped %#jx - %#jx", ksro, kero, 0, 0);
1120 	UVMHIST_LOG(maphist, "mapped %#jx - %#jx", ksrw, kerw, 0, 0);
1121 
1122 }
1123 
1124 /*
1125  * Finishes the initialization of the pmap module.
1126  * This procedure is called from uvm_init() in uvm/uvm_init.c
1127  * to initialize any remaining data structures that the pmap module
1128  * needs to map virtual memory (VM is already ON).
1129  */
1130 void
pmap_init(void)1131 pmap_init(void)
1132 {
1133 	extern void gateway_page(void);
1134 	volatile pt_entry_t *pde;
1135 	int i;
1136 
1137 	UVMHIST_FUNC(__func__)
1138 	UVMHIST_CALLED(maphist);
1139 
1140 	sid_counter = HPPA_SID_KERNEL;
1141 
1142 	pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1143 	    &pool_allocator_nointr, IPL_NONE);
1144 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
1145 	    &pool_allocator_nointr, IPL_NONE);
1146 
1147 	pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
1148 	pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
1149 
1150 	/*
1151 	 * map SysCall gateway page once for everybody
1152 	 * NB: we'll have to remap the phys memory
1153 	 *     if we have any at SYSCALLGATE address (;
1154 	 *
1155 	 * no spls since no interrupts
1156 	 */
1157 	if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
1158 	    !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
1159 		panic("pmap_init: cannot allocate pde");
1160 
1161 	pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
1162 	    PTE_PROT(TLB_GATE_PROT));
1163 
1164 	for (i = 0; i < __arraycount(pmap_pv_locks); i++)
1165 		mutex_init(&pmap_pv_locks[i].lock, MUTEX_DEFAULT, IPL_VM);
1166 
1167 	pmap_initialized = true;
1168 
1169 	UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1170 }
1171 
1172 /*
1173  * How much virtual space does this kernel have?
1174  */
1175 void
pmap_virtual_space(vaddr_t * startp,vaddr_t * endp)1176 pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
1177 {
1178 
1179 	*startp = SYSCALLGATE + PAGE_SIZE;
1180 	*endp = VM_MAX_KERNEL_ADDRESS;
1181 }
1182 
1183 /*
1184  * pmap_create()
1185  *
1186  * Create and return a physical map.
1187  * The map is an actual physical map, and may be referenced by the hardware.
1188  */
1189 pmap_t
pmap_create(void)1190 pmap_create(void)
1191 {
1192 	pmap_t pmap;
1193 	pa_space_t space;
1194 
1195 	UVMHIST_FUNC(__func__)
1196 	UVMHIST_CALLED(maphist);
1197 
1198 	pmap = pool_get(&pmap_pool, PR_WAITOK);
1199 
1200 	UVMHIST_LOG(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
1201 
1202 	rw_init(&pmap->pm_obj_lock);
1203 	uvm_obj_init(&pmap->pm_obj, &pmap_pager, false, 1);
1204 	uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock);
1205 
1206 	mutex_enter(&pmaps_lock);
1207 
1208 	/*
1209 	 * Allocate space IDs for the pmap; we get the protection ID from this.
1210 	 * If all are allocated, there is nothing we can do.
1211 	 */
1212 	/* XXXNH can't this loop forever??? */
1213 	for (space = sid_counter; pmap_sdir_get(space);
1214 	    space = (space + 1) % hppa_sid_max)
1215 		;
1216 
1217 	if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL)
1218 		panic("pmap_create: no pages");
1219 	pmap->pm_ptphint = NULL;
1220 	pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
1221 	pmap_sdir_set(space, pmap->pm_pdir);
1222 
1223 	pmap->pm_space = space;
1224 	pmap->pm_pid = (space + 1) << 1;
1225 
1226 	pmap->pm_stats.resident_count = 1;
1227 	pmap->pm_stats.wired_count = 0;
1228 
1229 	mutex_exit(&pmaps_lock);
1230 
1231 	UVMHIST_LOG(maphist, "pm %#jx, space %jd, pid %jd",
1232 	    (uintptr_t)pmap, space, pmap->pm_pid, 0);
1233 
1234 	return (pmap);
1235 }
1236 
1237 /*
1238  * pmap_destroy(pmap)
1239  *	Gives up a reference to the specified pmap.  When the reference count
1240  *	reaches zero the pmap structure is added to the pmap free list.
1241  *	Should only be called if the map contains no valid mappings.
1242  */
1243 void
pmap_destroy(pmap_t pmap)1244 pmap_destroy(pmap_t pmap)
1245 {
1246 	UVMHIST_FUNC(__func__)
1247 	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
1248 
1249 #ifdef DIAGNOSTIC
1250 	struct uvm_page_array a;
1251 	struct vm_page *pg;
1252 	off_t off;
1253 #endif
1254 
1255 	membar_release();
1256 	if (atomic_dec_uint_nv(&pmap->pm_obj.uo_refs) > 0)
1257 		return;
1258 	membar_acquire();
1259 
1260 #ifdef DIAGNOSTIC
1261 	uvm_page_array_init(&a, &pmap->pm_obj, 0);
1262 	off = 0;
1263 	rw_enter(pmap->pm_lock, RW_WRITER);
1264 	while ((pg = uvm_page_array_fill_and_peek(&a, off, 0)) != NULL) {
1265 		pt_entry_t *pde, *epde;
1266 		struct vm_page *spg;
1267 		struct pv_entry *pv, *npv;
1268 		paddr_t pa;
1269 		vaddr_t va;
1270 
1271 		off = pg->offset + PAGE_SIZE;
1272 		uvm_page_array_advance(&a);
1273 		KASSERT(pg != pmap->pm_pdir_pg);
1274 		pa = VM_PAGE_TO_PHYS(pg);
1275 
1276 		UVMHIST_LOG(maphist, "pm %#jx: stray ptp %#jx w/ %jd entries:",
1277 		    (uintptr_t)pmap, pa, pg->wire_count - 1, 0);
1278 
1279 		pde = (pt_entry_t *)pa;
1280 		epde = (pt_entry_t *)(pa + PAGE_SIZE);
1281 		for (; pde < epde; pde++) {
1282 			if (*pde == 0)
1283 				continue;
1284 
1285 			spg = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
1286 			if (spg == NULL)
1287 				continue;
1288 
1289 			struct vm_page_md * const md = VM_PAGE_TO_MD(spg);
1290 			pmap_pv_lock(md);
1291 			for (pv = md->pvh_list; pv != NULL; pv = npv) {
1292 				npv = pv->pv_next;
1293 				if (pv->pv_pmap != pmap)
1294 					continue;
1295 
1296 				UVMHIST_LOG(maphist, " %#jx", pv->pv_va, 0, 0,
1297 				    0);
1298 
1299 				va = pv->pv_va & PV_VAMASK;
1300 				pmap_pv_unlock(md);
1301 				pmap_remove(pmap, va, va + PAGE_SIZE);
1302 				pmap_pv_lock(md);
1303 				/* List may have changed: restart. */
1304 				npv = md->pvh_list;
1305 			}
1306 			pmap_pv_unlock(md);
1307 		}
1308 	}
1309 	rw_exit(pmap->pm_lock);
1310 	uvm_page_array_fini(&a);
1311 #endif
1312 	pmap_sdir_set(pmap->pm_space, 0);
1313 	rw_enter(pmap->pm_lock, RW_WRITER);
1314 	pmap_pagefree(pmap->pm_pdir_pg);
1315 	rw_exit(pmap->pm_lock);
1316 
1317 	uvm_obj_destroy(&pmap->pm_obj, false);
1318 	rw_destroy(&pmap->pm_obj_lock);
1319 	pool_put(&pmap_pool, pmap);
1320 }
1321 
1322 /*
1323  * Add a reference to the specified pmap.
1324  */
1325 void
pmap_reference(pmap_t pmap)1326 pmap_reference(pmap_t pmap)
1327 {
1328 	UVMHIST_FUNC(__func__)
1329 	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
1330 
1331 	atomic_inc_uint(&pmap->pm_obj.uo_refs);
1332 }
1333 
1334 
1335 void
pmap_syncicache_page(struct vm_page * pg,pmap_t pm,vaddr_t va)1336 pmap_syncicache_page(struct vm_page *pg, pmap_t pm, vaddr_t va)
1337 {
1338 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1339 	struct pv_entry *pve = md->pvh_list;
1340 
1341 	for (; pve; pve = pve->pv_next) {
1342 		pmap_t fpm = pve->pv_pmap;
1343 		vaddr_t fva = pve->pv_va & PV_VAMASK;
1344 		pt_entry_t pte = pmap_vp_find(fpm, fva);
1345 
1346 		if ((pte & PTE_PROT(TLB_DIRTY)) == 0)
1347 			continue;
1348 
1349 		/* Don't attempt to use the mapping we're adding */
1350 		if (pm == fpm && va == fva)
1351 			continue;
1352 
1353 		fdcache(fpm->pm_space, fva, PAGE_SIZE);
1354 		ficache(fpm->pm_space, fva, PAGE_SIZE);
1355 		break;
1356 	}
1357 }
1358 
1359 /*
1360  * pmap_enter(pmap, va, pa, prot, flags)
1361  *	Create a translation for the virtual address (va) to the physical
1362  *	address (pa) in the pmap with the protection requested. If the
1363  *	translation is wired then we can not allow a page fault to occur
1364  *	for this mapping.
1365  */
1366 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1367 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1368 {
1369 	volatile pt_entry_t *pde;
1370 	pt_entry_t pte;
1371 	struct vm_page *pg = NULL, *ptp = NULL;
1372 	struct pv_entry *pve = NULL;
1373 	bool wired = (flags & PMAP_WIRED) != 0;
1374 
1375 	UVMHIST_FUNC(__func__);
1376 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx",
1377 	    (uintptr_t)pmap, va, pa, prot);
1378 	UVMHIST_LOG(maphist, "...flags %#jx", flags, 0, 0, 0);
1379 
1380 	pmap_lock(pmap);
1381 
1382 	if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
1383 	    !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
1384 		if (flags & PMAP_CANFAIL) {
1385 			pmap_unlock(pmap);
1386 			return (ENOMEM);
1387 		}
1388 
1389 		panic("pmap_enter: cannot allocate pde");
1390 	}
1391 
1392 	if (!ptp)
1393 		ptp = pmap_pde_ptp(pmap, pde);
1394 
1395 	if ((pte = pmap_pte_get(pde, va))) {
1396 		UVMHIST_LOG(maphist, "remapping %#jx -> %#jx", pte, pa, 0, 0);
1397 
1398 		pmap_pte_flush(pmap, va, pte);
1399 		if (wired && !(pte & PTE_PROT(TLB_WIRED)))
1400 			pmap->pm_stats.wired_count++;
1401 		else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
1402 			pmap->pm_stats.wired_count--;
1403 
1404 		pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1405 		if (PTE_PAGE(pte) == pa) {
1406 			UVMHIST_LOG(maphist, "same page", 0, 0, 0, 0);
1407 			goto enter;
1408 		}
1409 
1410 		if (pg != NULL) {
1411 			struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1412 
1413 			pmap_pv_lock(md);
1414 			pve = pmap_pv_remove(pg, pmap, va);
1415 			md->pvh_attrs |= pmap_pvh_attrs(pte);
1416 			pmap_pv_unlock(md);
1417 		}
1418 	} else {
1419 		UVMHIST_LOG(maphist, "new mapping %#jx -> %#jx",
1420 		    va, pa, 0, 0);
1421 		pte = PTE_PROT(TLB_REFTRAP);
1422 		pmap->pm_stats.resident_count++;
1423 		if (wired)
1424 			pmap->pm_stats.wired_count++;
1425 		if (ptp)
1426 			ptp->wire_count++;
1427 	}
1428 
1429 	if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
1430 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1431 
1432 		if (!pve && !(pve = pmap_pv_alloc())) {
1433 			if (flags & PMAP_CANFAIL) {
1434  				pmap_unlock(pmap);
1435 				return (ENOMEM);
1436 			}
1437 			panic("%s: no pv entries available", __func__);
1438 		}
1439 		pte |= PTE_PROT(pmap_prot(pmap, prot));
1440 		pmap_resolve_alias(pg, pmap, va, pte);
1441 
1442 		pmap_pv_lock(md);
1443 		pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
1444 		pmap_pv_unlock(md);
1445 	} else if (pve) {
1446 		pmap_pv_free(pve);
1447 	}
1448 
1449 enter:
1450 	/* preserve old ref & mod */
1451 	pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
1452 	    (pte & PTE_PROT(TLB_UNCACHEABLE|TLB_DIRTY|TLB_REFTRAP));
1453 
1454 	if (pg != NULL) {
1455 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1456 
1457 		if ((pte & PTE_PROT(TLB_EXECUTE)) != 0 &&
1458 		    !IS_PVFEXEC_P(md->pvh_attrs)) {
1459 			pmap_syncicache_page(pg, pmap, va);
1460 			md->pvh_attrs |= PVF_EXEC;
1461 		}
1462 	}
1463 
1464 	if (IS_IOPAGE_P(pa))
1465 		pte |= PTE_PROT(TLB_UNCACHEABLE);
1466 	if (wired)
1467 		pte |= PTE_PROT(TLB_WIRED);
1468 	pmap_pte_set(pde, va, pte);
1469 
1470 	pmap_unlock(pmap);
1471 
1472 	UVMHIST_LOG(maphist, "<--- done (0)", 0, 0, 0, 0);
1473 
1474 	return (0);
1475 }
1476 
1477 /*
1478  * pmap_remove(pmap, sva, eva)
1479  *	unmaps all virtual addresses in the virtual address
1480  *	range determined by [sva, eva) and pmap.
1481  *	sva and eva must be on machine independent page boundaries and
1482  *	sva must be less than or equal to eva.
1483  */
1484 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)1485 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1486 {
1487 	UVMHIST_FUNC(__func__);
1488 	UVMHIST_CALLARGS(maphist, "sva %#jx eva %#jx", sva, eva, 0, 0);
1489 
1490 	struct pv_entry *pve;
1491 	volatile pt_entry_t *pde = NULL;
1492 	pt_entry_t pte;
1493 	struct vm_page *pg, *ptp;
1494 	vaddr_t pdemask;
1495 	int batch;
1496 
1497 	pmap_lock(pmap);
1498 
1499 	for (batch = 0; sva < eva; sva += PAGE_SIZE) {
1500 		pdemask = sva & PDE_MASK;
1501 		if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1502 			sva = pdemask + PDE_SIZE - PAGE_SIZE;
1503 			continue;
1504 		}
1505 		batch = pdemask == sva && sva + PDE_SIZE <= eva;
1506 
1507 		if ((pte = pmap_pte_get(pde, sva))) {
1508 
1509 			/* TODO measure here the speed tradeoff
1510 			 * for flushing whole 4M vs per-page
1511 			 * in case of non-complete pde fill
1512 			 */
1513 			pmap_pte_flush(pmap, sva, pte);
1514 			if (pte & PTE_PROT(TLB_WIRED))
1515 				pmap->pm_stats.wired_count--;
1516 			pmap->pm_stats.resident_count--;
1517 
1518 			/* iff properly accounted pde will be dropped anyway */
1519 			if (!batch)
1520 				pmap_pte_set(pde, sva, 0);
1521 
1522 			if (pmap_initialized &&
1523 			    (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1524 				struct vm_page_md * const md =
1525 				    VM_PAGE_TO_MD(pg);
1526 
1527 				pmap_pv_lock(md);
1528 				pve = pmap_pv_remove(pg, pmap, sva);
1529 				md->pvh_attrs |= pmap_pvh_attrs(pte);
1530 				pmap_pv_unlock(md);
1531 
1532 				if (pve != NULL)
1533 					pmap_pv_free(pve);
1534 			} else {
1535 				if (IS_IOPAGE_P(PTE_PAGE(pte))) {
1536 					ptp = pmap_pde_ptp(pmap, pde);
1537 					if (ptp != NULL)
1538 						pmap_pde_release(pmap, sva,
1539 						    ptp);
1540 				}
1541 			}
1542 		}
1543 	}
1544 
1545 	pmap_unlock(pmap);
1546 
1547 	UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1548 }
1549 
1550 void
pmap_write_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1551 pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1552 {
1553 	UVMHIST_FUNC(__func__);
1554 	UVMHIST_CALLARGS(maphist, "pm %#jx sva %#jx eva %#jx prot %#jx",
1555 	    (uintptr_t)pmap, sva, eva, prot);
1556 
1557 	struct vm_page *pg;
1558 	volatile pt_entry_t *pde = NULL;
1559 	pt_entry_t pte;
1560 	u_int pteprot, pdemask;
1561 
1562 	sva = trunc_page(sva);
1563 	pteprot = PTE_PROT(pmap_prot(pmap, prot));
1564 
1565 	pmap_lock(pmap);
1566 
1567 	for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
1568 		if (pdemask != (sva & PDE_MASK)) {
1569 			pdemask = sva & PDE_MASK;
1570 			if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1571 				sva = pdemask + PDE_SIZE - PAGE_SIZE;
1572 				continue;
1573 			}
1574 		}
1575 		if ((pte = pmap_pte_get(pde, sva))) {
1576 			UVMHIST_LOG(maphist, "va% #jx pte %#jx", sva, pte,
1577 			    0, 0);
1578 			/*
1579 			 * Determine if mapping is changing.
1580 			 * If not, nothing to do.
1581 			 */
1582 			if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot)
1583 				continue;
1584 
1585 			pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1586 			if (pg != NULL) {
1587 				struct vm_page_md * const md =
1588 				    VM_PAGE_TO_MD(pg);
1589 
1590 				pmap_pv_lock(md);
1591 				md->pvh_attrs |= pmap_pvh_attrs(pte);
1592 				pmap_pv_unlock(md);
1593 			}
1594 
1595 			/* Add TLB_EXECUTE if PVF_EXEC ??? */
1596 			pmap_pte_flush(pmap, sva, pte);
1597 			pte &= ~PTE_PROT(TLB_AR_MASK);
1598 			pte |= pteprot;
1599 			pmap_pte_set(pde, sva, pte);
1600 		}
1601 	}
1602 
1603 	pmap_unlock(pmap);
1604 }
1605 
1606 void
pmap_page_remove(struct vm_page * pg)1607 pmap_page_remove(struct vm_page *pg)
1608 {
1609 	UVMHIST_FUNC(__func__)
1610 	UVMHIST_CALLARGS(maphist, "pg %#jx", (uintptr_t)pg, 0, 0, 0);
1611 
1612 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1613 	struct pv_entry *pve, *npve, **pvp;
1614 
1615 	if (md->pvh_list == NULL) {
1616 		KASSERT((md->pvh_attrs & PVF_EXEC) == 0);
1617 		return;
1618 	}
1619 
1620  restart:
1621 	pmap_pv_lock(md);
1622 	pvp = &md->pvh_list;
1623 	for (pve = md->pvh_list; pve; pve = npve) {
1624 		pmap_t pmap = pve->pv_pmap;
1625 		vaddr_t va = pve->pv_va & PV_VAMASK;
1626 		volatile pt_entry_t *pde;
1627 		pt_entry_t pte;
1628 		bool locked;
1629 
1630 		UVMHIST_LOG(maphist, "... pm %#jx va %#jx", (uintptr_t)pmap,
1631 		    va, 0, 0);
1632 
1633 		npve = pve->pv_next;
1634 		if (pve->pv_va & PV_KENTER) {
1635 			/*
1636 			 * This is an unmanaged mapping, it must be preserved.
1637 			 * Move it back on the list and advance the end-of-list
1638 			 * pointer.
1639 			 */
1640 			*pvp = pve;
1641 			pvp = &pve->pv_next;
1642 
1643 			/* Get the pte for this mapping */
1644 			pde = pmap_pde_get(pmap->pm_pdir, va);
1645 			pte = pmap_pte_get(pde, va);
1646 		} else {
1647 			/*
1648 			 * We have to seamlessly get a hold on the pmap's lock
1649 			 * while holding the PV head lock, to know that the
1650 			 * mapping is still in place and we can operate on it.
1651 			 * If that can't be had, drop the PV head lock, wait
1652 			 * for the pmap's lock to become available, and then
1653 			 * try again.
1654 			 */
1655 			UVMHIST_LOG(maphist, "... pm %#jx va %#jx... removing",
1656 			    (uintptr_t)pmap, va, 0, 0);
1657 
1658 			pmap_reference(pmap);
1659 			locked = pmap_trylock(pmap);
1660 			if (!locked) {
1661 				pmap_pv_unlock(md);
1662 				pmap_lock(pmap);
1663 				/* nothing */
1664 				pmap_unlock(pmap);
1665 				pmap_destroy(pmap);
1666 
1667 				UVMHIST_LOG(maphist, "... failed lock", 0, 0, 0,
1668 				    0);
1669 				goto restart;
1670 			}
1671 			pde = pmap_pde_get(pmap->pm_pdir, va);
1672 			pte = pmap_pte_get(pde, va);
1673 
1674 			md->pvh_attrs |= pmap_pvh_attrs(pte);
1675 		}
1676 
1677 		pmap_pte_flush(pmap, va, pte);
1678 		if (pte & PTE_PROT(TLB_WIRED))
1679 			pmap->pm_stats.wired_count--;
1680 		pmap->pm_stats.resident_count--;
1681 
1682 		if (!(pve->pv_va & PV_KENTER)) {
1683 			pmap_pte_set(pde, va, 0);
1684 
1685 			pmap_pv_unlock(md);
1686 			pmap_pv_free(pve);
1687 			pmap_unlock(pmap);
1688 			pmap_destroy(pmap);
1689 			UVMHIST_LOG(maphist, "... removed", 0, 0, 0, 0);
1690 			*pvp = npve;
1691 			goto restart;
1692 		}
1693 	}
1694 	md->pvh_attrs &= ~PVF_EXEC;
1695 	*pvp = NULL;
1696 
1697 	pmap_pv_unlock(md);
1698 
1699 	UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1700 }
1701 
1702 /*
1703  *	Routine:	pmap_unwire
1704  *	Function:	Change the wiring attribute for a map/virtual-address
1705  *			pair.
1706  *	In/out conditions:
1707  *			The mapping must already exist in the pmap.
1708  *
1709  * Change the wiring for a given virtual page. This routine currently is
1710  * only used to unwire pages and hence the mapping entry will exist.
1711  */
1712 void
pmap_unwire(pmap_t pmap,vaddr_t va)1713 pmap_unwire(pmap_t pmap, vaddr_t va)
1714 {
1715 	UVMHIST_FUNC(__func__);
1716 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pmap, va, 0, 0);
1717 
1718 	volatile pt_entry_t *pde;
1719 	pt_entry_t pte = 0;
1720 
1721 	pmap_lock(pmap);
1722 	if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1723 		pte = pmap_pte_get(pde, va);
1724 
1725 		KASSERT(pte);
1726 
1727 		if (pte & PTE_PROT(TLB_WIRED)) {
1728 			pte &= ~PTE_PROT(TLB_WIRED);
1729 			pmap->pm_stats.wired_count--;
1730 			pmap_pte_set(pde, va, pte);
1731 		}
1732 	}
1733 	pmap_unlock(pmap);
1734 
1735 	UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1736 }
1737 
1738 bool
pmap_changebit(struct vm_page * pg,u_int set,u_int clear)1739 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1740 {
1741 	UVMHIST_FUNC(__func__);
1742 	UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) set %#jx clear %#jx",
1743 	    (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), set, clear);
1744 
1745 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1746 	struct pv_entry *pve;
1747 	int res;
1748 
1749 	KASSERT((set & clear) == 0);
1750 	KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
1751 	KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
1752 
1753 	/* preserve other bits */
1754 	pmap_pv_lock(md);
1755 	res = md->pvh_attrs & (set | clear);
1756 	md->pvh_attrs ^= res;
1757 
1758 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1759 		pmap_t pmap = pve->pv_pmap;
1760 		vaddr_t va = pve->pv_va & PV_VAMASK;
1761 		volatile pt_entry_t *pde;
1762 		pt_entry_t opte, pte;
1763 
1764 		if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1765 			opte = pte = pmap_pte_get(pde, va);
1766 #ifdef DEBUG
1767 			if (!pte) {
1768 				UVMHIST_LOG(maphist, "zero pte for %#jx",
1769 				    va, 0, 0, 0);
1770 				continue;
1771 			}
1772 #endif
1773 			pte &= ~clear;
1774 			pte |= set;
1775 
1776 			if (!(pve->pv_va & PV_KENTER)) {
1777 				md->pvh_attrs |= pmap_pvh_attrs(pte);
1778 				res |= pmap_pvh_attrs(opte);
1779 			}
1780 
1781 			if (opte != pte) {
1782 				pmap_pte_flush(pmap, va, opte);
1783 				pmap_pte_set(pde, va, pte);
1784 			}
1785 		}
1786 	}
1787 	pmap_pv_unlock(md);
1788 
1789 	return ((res & (clear | set)) != 0);
1790 }
1791 
1792 bool
pmap_testbit(struct vm_page * pg,u_int bit)1793 pmap_testbit(struct vm_page *pg, u_int bit)
1794 {
1795 	UVMHIST_FUNC(__func__);
1796 	UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) bit %#jx",
1797 	    (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), bit, 0);
1798 
1799 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1800 	struct pv_entry *pve;
1801 	pt_entry_t pte;
1802 	int ret;
1803 
1804 	pmap_pv_lock(md);
1805 	for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
1806 	    pve = pve->pv_next) {
1807 		pmap_t pm = pve->pv_pmap;
1808 
1809 		pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK);
1810 		if (pve->pv_va & PV_KENTER)
1811 			continue;
1812 
1813 		md->pvh_attrs |= pmap_pvh_attrs(pte);
1814 	}
1815 	ret = ((md->pvh_attrs & bit) != 0);
1816 	pmap_pv_unlock(md);
1817 
1818 	return ret;
1819 }
1820 
1821 /*
1822  * pmap_extract(pmap, va, pap)
1823  *	fills in the physical address corresponding to the
1824  *	virtual address specified by pmap and va into the
1825  *	storage pointed to by pap and returns true if the
1826  *	virtual address is mapped. returns false in not mapped.
1827  */
1828 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)1829 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1830 {
1831 	UVMHIST_FUNC(__func__);
1832 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pmap, va, 0, 0);
1833 
1834 	pt_entry_t pte;
1835 
1836 
1837 	if (pmap != pmap_kernel()) {
1838 		pmap_lock(pmap);
1839 		pte = pmap_vp_find(pmap, va);
1840 		pmap_unlock(pmap);
1841 	} else {
1842 		pte = pmap_vp_find(pmap, va);
1843 	}
1844 
1845 	if (pte) {
1846 		if (pap)
1847 			*pap = (pte & ~PGOFSET) | (va & PGOFSET);
1848 		return true;
1849 	}
1850 
1851 	return false;
1852 }
1853 
1854 /*
1855  * pmap_activate(lwp)
1856  *
1857  *	Activates the vmspace for the given LWP.
1858  *	This is not necessarily the current LWP.
1859  */
1860 void
pmap_activate(struct lwp * l)1861 pmap_activate(struct lwp *l)
1862 {
1863 	struct proc *p = l->l_proc;
1864 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
1865 	pa_space_t space = pmap->pm_space;
1866 	struct pcb *pcb = lwp_getpcb(l);
1867 
1868 	/* space is cached for the copy{in,out}'s pleasure */
1869 	pcb->pcb_space = space;
1870 	fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb));
1871 
1872 	if (p == curproc)
1873 		mtctl(pmap->pm_pid, CR_PIDR2);
1874 }
1875 
1876 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)1877 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1878 {
1879 	const pmap_t pmap = p->p_vmspace->vm_map.pmap;
1880 	const pa_space_t space = pmap->pm_space;
1881 
1882 	fdcache(space, va, len);
1883 	ficache(space, va, len);
1884 	pdtlb(space, va);
1885 	pitlb(space, va);
1886 }
1887 
1888 static inline void
pmap_flush_page(struct vm_page * pg,bool purge)1889 pmap_flush_page(struct vm_page *pg, bool purge)
1890 {
1891 	UVMHIST_FUNC(__func__);
1892 	UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) purge %jd",
1893 	    (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), purge, 0);
1894 
1895 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1896 	struct pv_entry *pve;
1897 
1898 	/* purge cache for all possible mappings for the pa */
1899 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1900 		vaddr_t va = pve->pv_va & PV_VAMASK;
1901 		pa_space_t sp = pve->pv_pmap->pm_space;
1902 
1903 		if (purge)
1904 			pdcache(sp, va, PAGE_SIZE);
1905 		else
1906 			fdcache(sp, va, PAGE_SIZE);
1907 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1908     defined(HP8500_CPU) || defined(HP8600_CPU)
1909 		ficache(sp, va, PAGE_SIZE);
1910 		pdtlb(sp, va);
1911 		pitlb(sp, va);
1912 #endif
1913 	}
1914 }
1915 
1916 /*
1917  * pmap_zero_page(pa)
1918  *
1919  * Zeros the specified page.
1920  */
1921 void
pmap_zero_page(paddr_t pa)1922 pmap_zero_page(paddr_t pa)
1923 {
1924 
1925 	UVMHIST_FUNC(__func__);
1926 	UVMHIST_CALLARGS(maphist, "pa %#jx (pg %#jx)", pa,
1927 	    (uintptr_t)PHYS_TO_VM_PAGE(pa), 0, 0);
1928 
1929 	KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL);
1930 	KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_attrs & PVF_EXEC) == 0);
1931 
1932 	memset((void *)pa, 0, PAGE_SIZE);
1933 	fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1934 
1935 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1936     defined(HP8500_CPU) || defined(HP8600_CPU)
1937 	ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1938 	pdtlb(HPPA_SID_KERNEL, pa);
1939 	pitlb(HPPA_SID_KERNEL, pa);
1940 #endif
1941 }
1942 
1943 /*
1944  * pmap_copy_page(src, dst)
1945  *
1946  * pmap_copy_page copies the source page to the destination page.
1947  */
1948 void
pmap_copy_page(paddr_t spa,paddr_t dpa)1949 pmap_copy_page(paddr_t spa, paddr_t dpa)
1950 {
1951 	UVMHIST_FUNC(__func__);
1952 	UVMHIST_CALLARGS(maphist, "spa %#jx (pg %#jx) dpa %#jx (pg %#jx)",
1953 	    spa, (uintptr_t)PHYS_TO_VM_PAGE(spa),
1954 	    dpa, (uintptr_t)PHYS_TO_VM_PAGE(dpa));
1955 
1956 	struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa);
1957 
1958 	KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL);
1959 	KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_attrs & PVF_EXEC) == 0);
1960 
1961 	pmap_flush_page(srcpg, false);
1962 
1963 	memcpy((void *)dpa, (void *)spa, PAGE_SIZE);
1964 
1965 	pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1966 	fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1967 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1968     defined(HP8500_CPU) || defined(HP8600_CPU)
1969 	ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1970 	ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1971 	pdtlb(HPPA_SID_KERNEL, spa);
1972 	pdtlb(HPPA_SID_KERNEL, dpa);
1973 	pitlb(HPPA_SID_KERNEL, spa);
1974 	pitlb(HPPA_SID_KERNEL, dpa);
1975 #endif
1976 }
1977 
1978 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1979 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1980 {
1981 	UVMHIST_FUNC(__func__);
1982 	if (va != 0) {
1983 		UVMHIST_CALLARGS(maphist, "va %#jx pa %#jx prot %#jx flags %#jx",
1984 		    va, pa, prot, flags);
1985 	}
1986 
1987 	volatile pt_entry_t *pde;
1988 	pt_entry_t pte, opte;
1989 	struct vm_page *pg;
1990 
1991 	if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1992 	    !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1993 		panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1994 	opte = pmap_pte_get(pde, va);
1995 	pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
1996 	    pmap_prot(pmap_kernel(), prot & VM_PROT_ALL));
1997 	if (IS_IOPAGE_P(pa) || (flags & PMAP_NOCACHE))
1998 		pte |= PTE_PROT(TLB_UNCACHEABLE);
1999 
2000 	if ((flags & PMAP_DIRECTMAP) == 0) {
2001 		pmap_kernel()->pm_stats.wired_count++;
2002 		pmap_kernel()->pm_stats.resident_count++;
2003 	}
2004 	if (opte)
2005 		pmap_pte_flush(pmap_kernel(), va, opte);
2006 
2007 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL;
2008 	if (pg != NULL) {
2009 		KASSERT(pa < HPPA_IOBEGIN);
2010 
2011 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2012 		struct pv_entry *pve;
2013 
2014 		pve = pmap_pv_alloc();
2015 		if (!pve)
2016 			panic("%s: no pv entries available", __func__);
2017 		UVMHIST_LOG(maphist, "va %#jx pa %#jx pte %#jx TLB_KENTER",
2018 		    va, pa, pte, 0);
2019 
2020 		pmap_resolve_alias(pg, pmap_kernel(), va, pte);
2021 
2022 		pmap_pv_lock(md);
2023 		pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, PV_KENTER);
2024 		pmap_pv_unlock(md);
2025 	}
2026 	pmap_pte_set(pde, va, pte);
2027 
2028 	if (va != 0) {
2029 		UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
2030 	}
2031 
2032 }
2033 
2034 void
pmap_kremove(vaddr_t va,vsize_t size)2035 pmap_kremove(vaddr_t va, vsize_t size)
2036 {
2037 	UVMHIST_FUNC(__func__);
2038 	bool pzero = false;
2039 	if (va != 0) {
2040 		UVMHIST_CALLARGS(maphist, "va %#jx...%#jx", va, va + size, 0,
2041 		    0);
2042 		pzero = true;
2043 	}
2044 
2045 	struct pv_entry *pve;
2046 	vaddr_t eva, pdemask;
2047 	volatile pt_entry_t *pde = NULL;
2048 	pt_entry_t pte;
2049 	struct vm_page *pg;
2050 	pmap_t pmap = pmap_kernel();
2051 
2052 #ifdef DEBUG
2053 	/*
2054 	 * Don't allow the VA == PA mappings, apart from page zero, to be
2055 	 * removed. Page zero is given special treatment so that we get TLB
2056 	 * faults when the kernel tries to de-reference NULL or anything else
2057 	 * in the first page when it shouldn't.
2058 	 */
2059 	if (va != 0 && va < ptoa(physmem)) {
2060 		UVMHIST_LOG(maphist, "va %#jx size %#jx: unmapping physmem", va,
2061 		    size, 0, 0);
2062 		return;
2063 	}
2064 #endif
2065 
2066 	for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
2067 		if (pdemask != (va & PDE_MASK)) {
2068 			pdemask = va & PDE_MASK;
2069 			if (!(pde = pmap_pde_get(pmap->pm_pdir, va))) {
2070 				va = pdemask + PDE_SIZE - PAGE_SIZE;
2071 				continue;
2072 			}
2073 		}
2074 		if (!(pte = pmap_pte_get(pde, va))) {
2075 			UVMHIST_LOG(maphist, "unmapping unmapped %#jx",
2076 			    va, 0, 0, 0);
2077 			continue;
2078 		}
2079 
2080 		pmap_pte_flush(pmap, va, pte);
2081 		pmap_pte_set(pde, va, 0);
2082 
2083 		pmap->pm_stats.wired_count--;
2084 		pmap->pm_stats.resident_count--;
2085 
2086 		pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL;
2087 		if (pg != NULL) {
2088 			struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2089 
2090 			pmap_pv_lock(md);
2091 			pve = pmap_pv_remove(pg, pmap, va);
2092 			pmap_pv_unlock(md);
2093 
2094 			if (pve != NULL)
2095 				pmap_pv_free(pve);
2096 		}
2097 	}
2098 	if (pzero) {
2099 		UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
2100 	}
2101 }
2102 
2103 #if defined(USE_HPT)
2104 #if defined(DDB)
2105 /*
2106  * prints whole va->pa (aka HPT or HVT)
2107  */
2108 void
pmap_hptdump(void)2109 pmap_hptdump(void)
2110 {
2111 	struct hpt_entry *hpt, *ehpt;
2112 
2113 	hpt = (struct hpt_entry *)pmap_hpt;
2114 	ehpt = (struct hpt_entry *)((int)hpt + pmap_hptsize);
2115 	db_printf("HPT dump %p-%p:\n", hpt, ehpt);
2116 	for (; hpt < ehpt; hpt++)
2117 		if (hpt->hpt_valid) {
2118 			char buf[128];
2119 
2120 			snprintb(buf, sizeof(buf), TLB_BITS, hpt->hpt_tlbprot);
2121 
2122 			db_printf("hpt@%p: %x{%sv=%x:%x},%s,%x\n",
2123 			    hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
2124 			    hpt->hpt_space, hpt->hpt_vpn << 9,
2125 			    buf, tlbptob(hpt->hpt_tlbpage));
2126 		}
2127 }
2128 #endif
2129 #endif
2130