xref: /netbsd-src/sys/uvm/pmap/pmap_segtab.c (revision a5bd2734b950dcb7a7ec5d31a8303499fd062023)
1 /*	$NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 #define __PMAP_PRIVATE
99 
100 #include "opt_multiprocessor.h"
101 
102 #include <sys/param.h>
103 
104 #include <sys/atomic.h>
105 #include <sys/mutex.h>
106 #include <sys/proc.h>
107 #include <sys/systm.h>
108 
109 #include <uvm/uvm.h>
110 #include <uvm/pmap/pmap.h>
111 
112 #if defined(XSEGSHIFT) && XSEGSHIFT == SEGSHIFT
113 #undef XSEGSHIFT
114 #undef XSEGLENGTH
115 #undef NBXSEG
116 #undef NXSEGPG
117 #endif
118 
119 #define MULT_CTASSERT(a,b)	__CTASSERT((a) < (b) || ((a) % (b) == 0))
120 
121 __CTASSERT(sizeof(pmap_ptpage_t) == NBPG);
122 
123 #if defined(PMAP_HWPAGEWALKER)
124 #ifdef _LP64
125 MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG);
126 MULT_CTASSERT(NPDEPG, PMAP_PDETABSIZE);
127 #endif /* _LP64 */
128 MULT_CTASSERT(sizeof(pmap_pdetab_t *), sizeof(pd_entry_t));
129 MULT_CTASSERT(sizeof(pd_entry_t), sizeof(pmap_pdetab_t));
130 
131 #if 0
132 #ifdef _LP64
133 static const bool separate_pdetab_root_p = NPDEPG != PMAP_PDETABSIZE;
134 #else
135 static const bool separate_pdetab_root_p = true;
136 #endif /* _LP64 */
137 #endif
138 
139 typedef struct {
140 	pmap_pdetab_t *free_pdetab0;	/* free list kept locally */
141 	pmap_pdetab_t *free_pdetab;	/* free list kept locally */
142 #ifdef DEBUG
143 	uint32_t nget;
144 	uint32_t nput;
145 	uint32_t npage;
146 #define	PDETAB_ADD(n, v)	(pmap_segtab_info.pdealloc.n += (v))
147 #else
148 #define	PDETAB_ADD(n, v)	((void) 0)
149 #endif /* DEBUG */
150 } pmap_pdetab_alloc_t;
151 #endif /* PMAP_HWPAGEWALKER */
152 
153 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
154 #ifdef _LP64
155 __CTASSERT(NSEGPG >= PMAP_SEGTABSIZE);
156 __CTASSERT(NSEGPG % PMAP_SEGTABSIZE == 0);
157 #endif
158 __CTASSERT(NBPG >= sizeof(pmap_segtab_t));
159 
160 typedef struct {
161 	pmap_segtab_t *free_segtab0;	/* free list kept locally */
162 	pmap_segtab_t *free_segtab;	/* free list kept locally */
163 #ifdef DEBUG
164 	uint32_t nget;
165 	uint32_t nput;
166 	uint32_t npage;
167 #define	SEGTAB_ADD(n, v)	(pmap_segtab_info.segalloc.n += (v))
168 #else
169 #define	SEGTAB_ADD(n, v)	((void) 0)
170 #endif
171 } pmap_segtab_alloc_t;
172 #endif /* !PMAP_HWPAGEWALKER || !PMAP_MAP_PDETABPAGE */
173 
174 struct pmap_segtab_info {
175 #if defined(PMAP_HWPAGEWALKER)
176 	pmap_pdetab_alloc_t pdealloc;
177 #endif
178 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
179 	pmap_segtab_alloc_t segalloc;
180 #endif
181 #ifdef PMAP_PPG_CACHE
182 	struct pgflist ptp_pgflist;	/* Keep a list of idle page tables. */
183 #endif
184 } pmap_segtab_info = {
185 #ifdef PMAP_PPG_CACHE
186 	.ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
187 #endif
188 };
189 
190 kmutex_t pmap_segtab_lock __cacheline_aligned;
191 
192 #ifndef PMAP_HWPAGEWALKER
193 /*
194  * Check that a seg_ppg[] array is empty.
195  *
196  * This is used when allocating or freeing a pmap_segtab_t.  The stb
197  * should be unused -- meaning, none of the seg_ppg[] pointers are
198  * not NULL, as it transitions from either freshly allocated segtab from
199  * pmap pool, an unused allocated page segtab alloc from the SMP case,
200  * where two CPUs attempt to allocate the same underlying segtab, the
201  * release of a segtab entry to the freelist, or for SMP, where reserve
202  * also frees a freshly allocated but unused entry.
203  */
204 static void
pmap_check_stb(pmap_segtab_t * stb,const char * caller,const char * why)205 pmap_check_stb(pmap_segtab_t *stb, const char *caller, const char *why)
206 {
207 #ifdef DEBUG
208 	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
209 		if (stb->seg_ppg[i] != NULL) {
210 #define DEBUG_NOISY
211 #ifdef DEBUG_NOISY
212 			UVMHIST_FUNC(__func__);
213 			UVMHIST_CALLARGS(pmapxtabhist, "stb=%#jx",
214 			    (uintptr_t)stb, 0, 0, 0);
215 			for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
216 				if (stb->seg_ppg[j] != NULL)
217 					printf("%s: stb->seg_ppg[%zu] = %p\n",
218 					    caller, j, stb->seg_ppg[j]);
219 #endif
220 			panic("%s: pm_segtab.seg_ppg[%zu] != 0 (%p): %s",
221 			    caller, i, stb->seg_ppg[i], why);
222 		}
223 	}
224 #endif
225 }
226 #endif /* PMAP_HWPAGEWALKER */
227 
228 static inline struct vm_page *
pmap_pte_pagealloc(void)229 pmap_pte_pagealloc(void)
230 {
231 	struct vm_page *pg;
232 
233 	pg = pmap_md_alloc_poolpage(UVM_PGA_ZERO | UVM_PGA_USERESERVE);
234 	if (pg) {
235 #ifdef UVM_PAGE_TRKOWN
236 		pg->owner_tag = NULL;
237 #endif
238 		UVM_PAGE_OWN(pg, "pmap-ptp");
239 	}
240 
241 	return pg;
242 }
243 
244 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
245 static vaddr_t
pmap_pde_to_va(pd_entry_t pde)246 pmap_pde_to_va(pd_entry_t pde)
247 {
248 	if (!pte_pde_valid_p(pde))
249 		return 0;
250 
251 	paddr_t pa = pte_pde_to_paddr(pde);
252 	return pmap_md_direct_map_paddr(pa);
253 }
254 
255 #ifdef _LP64
256 static pmap_pdetab_t *
pmap_pde_to_pdetab(pd_entry_t pde)257 pmap_pde_to_pdetab(pd_entry_t pde)
258 {
259 
260 	return (pmap_pdetab_t *)pmap_pde_to_va(pde);
261 }
262 #endif
263 
264 static pmap_ptpage_t *
pmap_pde_to_ptpage(pd_entry_t pde)265 pmap_pde_to_ptpage(pd_entry_t pde)
266 {
267 
268 	return (pmap_ptpage_t *)pmap_pde_to_va(pde);
269 }
270 #endif
271 
272 #ifdef _LP64
273 __CTASSERT((XSEGSHIFT - SEGSHIFT) % (PGSHIFT-3) == 0);
274 #endif
275 
276 static inline pmap_ptpage_t *
pmap_ptpage(struct pmap * pmap,vaddr_t va)277 pmap_ptpage(struct pmap *pmap, vaddr_t va)
278 {
279 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
280 	vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
281 	pmap_pdetab_t *ptb = pmap->pm_pdetab;
282 
283 //	UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", ptb, 0, 0, 0);
284 
285 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
286 	    "pmap_kernel: %s, va %#" PRIxVADDR,
287 	    pmap == pmap_kernel() ? "true" : "false",
288 	    pmap == pmap_kernel() ? va : 0);
289 
290 #ifdef _LP64
291 	for (size_t segshift = XSEGSHIFT;
292 	    segshift > SEGSHIFT;
293 	    segshift -= PGSHIFT - 3, pdetab_mask = NSEGPG - 1) {
294 		ptb = pmap_pde_to_pdetab(ptb->pde_pde[(va >> segshift) & pdetab_mask]);
295 		if (ptb == NULL)
296 			return NULL;
297 	}
298 #endif
299 	return pmap_pde_to_ptpage(ptb->pde_pde[(va >> SEGSHIFT) & pdetab_mask]);
300 #else
301 	vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
302 	pmap_segtab_t *stb = pmap->pm_segtab;
303 
304 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
305 	    "pmap %p va %#" PRIxVADDR, pmap, va);
306 #ifdef _LP64
307 	for (size_t segshift = XSEGSHIFT;
308 	    segshift > SEGSHIFT;
309 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
310 		stb = stb->seg_seg[(va >> segshift) & segtab_mask];
311 		if (stb == NULL)
312 			return NULL;
313 	}
314 #endif
315 	return stb->seg_ppg[(va >> SEGSHIFT) & segtab_mask];
316 #endif
317 }
318 
319 #if defined(PMAP_HWPAGEWALKER)
320 bool
pmap_pdetab_fixup(struct pmap * pmap,vaddr_t va)321 pmap_pdetab_fixup(struct pmap *pmap, vaddr_t va)
322 {
323 	struct pmap * const kpm = pmap_kernel();
324 	pmap_pdetab_t * const kptb = kpm->pm_pdetab;
325 	pmap_pdetab_t * const uptb = pmap->pm_pdetab;
326 	size_t idx = PMAP_PDETABSIZE - 1;
327 #if !defined(PMAP_MAP_PDETABPAGE)
328 	__CTASSERT(PMAP_PDETABSIZE == PMAP_SEGTABSIZE);
329 	pmap_segtab_t * const kstb = &pmap_kern_segtab;
330 	pmap_segtab_t * const ustb = pmap->pm_segtab;
331 #endif
332 
333 	// Regardless of how many levels deep this page table is, we only
334 	// need to verify the first level PDEs match up.
335 #ifdef XSEGSHIFT
336 	idx &= va >> XSEGSHIFT;
337 #else
338 	idx &= va >> SEGSHIFT;
339 #endif
340 	if (uptb->pde_pde[idx] != kptb->pde_pde[idx]) {
341 		pte_pde_set(&uptb->pde_pde[idx], kptb->pde_pde[idx]);
342 #if !defined(PMAP_MAP_PDETABPAGE)
343 		ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP
344 #endif
345 		return true;
346 	}
347 	return false;
348 }
349 #endif /* PMAP_HWPAGEWALKER */
350 
351 
352 static void
pmap_page_attach(pmap_t pmap,vaddr_t kva,struct vm_page * pg,struct pglist * pglist,voff_t off)353 pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg,
354     struct pglist *pglist, voff_t off)
355 {
356 	UVMHIST_FUNC(__func__);
357 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx pg %#jx list %#jx",
358 	    (uintptr_t)pmap, (uintptr_t)kva, (uintptr_t)pg, (uintptr_t)pglist);
359 
360 	struct uvm_object * const uobj = &pmap->pm_uobject;
361 	if (pg == NULL) {
362 		paddr_t pa;
363 
364 		bool ok __diagused = pmap_extract(pmap_kernel(), kva, &pa);
365 		KASSERT(ok);
366 
367 		pg = PHYS_TO_VM_PAGE(pa);
368 		KASSERT(pg != NULL);
369 	}
370 
371 	UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
372 	    (uintptr_t)kva, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)pglist);
373 
374 	pmap_lock(pmap);
375 	TAILQ_INSERT_TAIL(pglist, pg, pageq.queue);
376 	uobj->uo_npages++;
377 	pmap_unlock(pmap);
378 
379 	/*
380 	 * Now set each vm_page that maps this page to point to the
381 	 * pmap and set the offset to what we want.
382 	 */
383 	KASSERTMSG(pg->uobject == NULL, "pg %p pg->uobject %p", pg, pg->uobject);
384 	pg->uobject = uobj;
385 	pg->offset = off;
386 }
387 
388 static struct vm_page *
pmap_page_detach(pmap_t pmap,struct pglist * list,vaddr_t va)389 pmap_page_detach(pmap_t pmap, struct pglist *list, vaddr_t va)
390 {
391 	UVMHIST_FUNC(__func__);
392 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx list %#jx",
393 	    (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)list, 0);
394 
395 	paddr_t pa;
396 	bool ok __diagused = pmap_extract(pmap_kernel(), va, &pa);
397 	KASSERT(ok);
398 
399 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
400 	struct uvm_object * const uobj = &pmap->pm_uobject;
401 
402 	UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
403 	    (uintptr_t)va, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)list);
404 
405 	KASSERTMSG(pg->uobject == uobj, "pg->uobject %p vs uobj %p",
406 	    pg->uobject, uobj);
407 
408 	pmap_lock(pmap);
409 	TAILQ_REMOVE(list, pg, pageq.queue);
410 	uobj->uo_npages--;
411 	pmap_unlock(pmap);
412 
413 	pg->uobject = NULL;
414 	pg->offset = 0;
415 
416 	return pg;
417 }
418 
419 #ifndef PMAP_PPG_CACHE
420 static void
pmap_segtab_pagefree(pmap_t pmap,struct pglist * list,vaddr_t kva,size_t size)421 pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size)
422 {
423 #ifdef PMAP_MAP_PTEPAGE
424 	UVMHIST_FUNC(__func__);
425 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx list %#jx kva %#jx size %#jx",
426 	    (uintptr_t)pmap, (uintptr_t)list, kva, size);
427 	KASSERT(size == PAGE_SIZE);
428 	if (size == PAGE_SIZE) {
429 		UVMHIST_LOG(pmapxtabhist, "about to detach (kva %#jx)",
430 		    kva, 0, 0, 0);
431 		uvm_pagefree(pmap_page_detach(pmap, list, kva));
432 		return;
433 	}
434 #endif
435 	for (size_t i = 0; i < size; i += PAGE_SIZE) {
436 		(void)pmap_page_detach(pmap, list, kva + i);
437 	}
438 
439 	uvm_km_free(kernel_map, kva, size, UVM_KMF_WIRED);
440 }
441 #endif
442 
443 pt_entry_t *
pmap_pte_lookup(pmap_t pmap,vaddr_t va)444 pmap_pte_lookup(pmap_t pmap, vaddr_t va)
445 {
446 	pmap_ptpage_t * const ppg = pmap_ptpage(pmap, va);
447 	if (ppg == NULL)
448 		return NULL;
449 
450 	const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
451 
452 	return ppg->ppg_ptes + pte_idx;
453 }
454 
455 
456 static pmap_ptpage_t *
pmap_ptpage_alloc(pmap_t pmap,int flags,paddr_t * pa_p)457 pmap_ptpage_alloc(pmap_t pmap, int flags, paddr_t *pa_p)
458 {
459 	UVMHIST_FUNC(__func__);
460 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx flags %#jx pa_p %#jx", (uintptr_t)pmap,
461 	    (uintptr_t)flags, (uintptr_t)pa_p, 0);
462 
463 	pmap_ptpage_t *ppg = NULL;
464 
465 #ifdef PMAP_MAP_PTEPAGE
466 	struct vm_page *pg = NULL;
467 	paddr_t pa;
468 #ifdef PMAP_PPG_CACHE
469 	ppg = pmap_pgcache_alloc(&pmap_segtab_info.ppg_flist);
470 #endif
471 	if (ppg == NULL) {
472 		pg = pmap_pte_pagealloc();
473 		if (pg == NULL) {
474 			if (flags & PMAP_CANFAIL)
475 				return NULL;
476 			panic("%s: cannot allocate page table page ",
477 			    __func__);
478 		}
479 		pa = VM_PAGE_TO_PHYS(pg);
480 		ppg = (pmap_ptpage_t *)PMAP_MAP_PTEPAGE(pa);
481 	} else {
482 		bool ok __diagused = pmap_extract(pmap_kernel(), (vaddr_t)ppg, &pa);
483 		KASSERT(ok);
484 	}
485 
486 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
487 	pmap_page_attach(pmap, (vaddr_t)ppg, pg, &pmap->pm_ppg_list, 0);
488 
489 	*pa_p = pa;
490 #else
491 	vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
492 	    UVM_KMF_WIRED | UVM_KMF_WAITVA
493 	    | (flags & PMAP_CANFAIL ? UVM_KMF_CANFAIL : 0));
494 	if (kva == 0) {
495 		if (flags & PMAP_CANFAIL)
496 			return NULL;
497 		panic("%s: cannot allocate page table page", __func__);
498 	}
499 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
500 	pmap_page_attach(pmap, kva, NULL, &pmap->pm_ppg_list, 0);
501 	ppg = (pmap_ptpage_t *)kva;
502 #endif
503 
504 	UVMHIST_LOG(pmapxtabhist, "... ppg %#jx", (uintptr_t)ppg, 0, 0, 0);
505 
506 	return ppg;
507 }
508 
509 static void
pmap_ptpage_free(pmap_t pmap,pmap_ptpage_t * ppg,const char * caller)510 pmap_ptpage_free(pmap_t pmap, pmap_ptpage_t *ppg, const char *caller)
511 {
512 	UVMHIST_FUNC(__func__);
513 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx va %#jx", (uintptr_t)pmap,
514 	    (uintptr_t)ppg, 0, 0);
515 
516 	const vaddr_t kva = (vaddr_t)ppg;
517 	/*
518 	 * All pte arrays should be page aligned.
519 	 */
520 	if ((kva & PAGE_MASK) != 0) {
521 		panic("%s: pte entry at %p not page aligned", caller, ppg);
522 	}
523 
524 #ifdef DEBUG
525 	for (size_t j = 0; j < NPTEPG; j++) {
526 		if (ppg->ppg_ptes[j] != 0) {
527 			UVMHIST_LOG(pmapxtabhist,
528 			    "pte entry %#jx not 0 (%#jx)",
529 			    (uintptr_t)&ppg->ppg_ptes[j],
530 			    (uintptr_t)ppg->ppg_ptes[j], 0, 0);
531 			for (size_t i = j + 1; i < NPTEPG; i++)
532 				if (ppg->ppg_ptes[i] != 0)
533 					UVMHIST_LOG(pmapxtabhist,
534 					    "pte[%zu] = %#"PRIxPTE,
535 					    i, ppg->ppg_ptes[i], 0, 0);
536 
537 			panic("%s: pte entry at %p not 0 (%#" PRIxPTE ")",
538 			    __func__, &ppg->ppg_ptes[j],
539 			    ppg->ppg_ptes[j]);
540 		}
541 	}
542 #endif
543 	//pmap_md_vca_clean(pg, (vaddr_t)ppg, NBPG);
544 #ifdef PMAP_PPG_CACHE
545 	UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
546 	pmap_page_detach(pmap, &pmap->pm_ppg_list, kva);
547 	pmap_segtab_pagecache(&pmap_segtab_info.ppg_flist, ppg);
548 #else
549 	pmap_segtab_pagefree(pmap, &pmap->pm_ppg_list, kva, PAGE_SIZE);
550 #endif /* PMAP_PPG_CACHE */
551 }
552 
553 
554 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
555 
556 static pmap_pdetab_t *
pmap_pdetab_alloc(struct pmap * pmap)557 pmap_pdetab_alloc(struct pmap *pmap)
558 {
559 	UVMHIST_FUNC(__func__);
560 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
561 
562 	pmap_pdetab_t *ptb;
563 #ifdef UVMHIST
564 	bool found_on_freelist = false;
565 #endif
566 
567  again:
568 	mutex_spin_enter(&pmap_segtab_lock);
569 	UVMHIST_LOG(pmapxtabhist, "free_pdetab %#jx",
570 	    (uintptr_t)pmap_segtab_info.pdealloc.free_pdetab, 0, 0, 0);
571 	if (__predict_true((ptb = pmap_segtab_info.pdealloc.free_pdetab) != NULL)) {
572 		pmap_segtab_info.pdealloc.free_pdetab = ptb->pde_next;
573 
574 		UVMHIST_LOG(pmapxtabhist, "freelist ptb=%#jx",
575 		    (uintptr_t)ptb, 0, 0, 0);
576 
577 		PDETAB_ADD(nget, 1);
578 		ptb->pde_next = NULL;
579 #ifdef UVMHIST
580 		found_on_freelist = true;
581 #endif
582 	}
583 	mutex_spin_exit(&pmap_segtab_lock);
584 
585 	struct vm_page *ptb_pg = NULL;
586 	if (__predict_false(ptb == NULL)) {
587 		ptb_pg = pmap_pte_pagealloc();
588 
589 		UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx",
590 		    (uintptr_t)ptb_pg, 0, 0, 0);
591 		if (__predict_false(ptb_pg == NULL)) {
592 			/*
593 			 * XXX What else can we do?  Could we deadlock here?
594 			 */
595 			uvm_wait("pdetab");
596 			goto again;
597 		}
598 
599 		UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx 2",
600 		    (uintptr_t)ptb_pg, 0, 0, 0);
601 		PDETAB_ADD(npage, 1);
602 		const paddr_t ptb_pa = VM_PAGE_TO_PHYS(ptb_pg);
603 		UVMHIST_LOG(pmapxtabhist, "ptb_pa=%#jx",  (uintptr_t)ptb_pa, 0, 0, 0);
604 		ptb = (pmap_pdetab_t *)PMAP_MAP_PDETABPAGE(ptb_pa);
605 		UVMHIST_LOG(pmapxtabhist, "new ptb=%#jx", (uintptr_t)ptb, 0,
606 		    0, 0);
607 
608 		if (pte_invalid_pde() != 0) {
609 			for (size_t i = 0; i < NPDEPG; i++) {
610 				ptb->pde_pde[i] = pte_invalid_pde();
611 			}
612 		}
613 	}
614 
615 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
616 	pmap_page_attach(pmap, (vaddr_t)ptb, ptb_pg, &pmap->pm_pdetab_list, 0);
617 
618 	UVMHIST_LOG(pmapxtabhist, "... ptb %#jx found on freelist %d",
619 	    (uintptr_t)ptb, found_on_freelist, 0, 0);
620 
621 	return ptb;
622 }
623 
624 
625 #else
626 /*
627  *	Create and return a physical map.
628  *
629  *	If the size specified for the map
630  *	is zero, the map is an actual physical
631  *	map, and may be referenced by the
632  *	hardware.
633  *
634  *	If the size specified is non-zero,
635  *	the map will be used in software only, and
636  *	is bounded by that size.
637  */
638 static pmap_segtab_t *
pmap_segtab_alloc(struct pmap * pmap)639 pmap_segtab_alloc(struct pmap *pmap)
640 {
641 	UVMHIST_FUNC(__func__);
642 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
643 
644 	pmap_segtab_t *stb;
645 	bool found_on_freelist = false;
646 
647  again:
648 	mutex_spin_enter(&pmap_segtab_lock);
649 	if (__predict_true((stb = pmap_segtab_info.segalloc.free_segtab) != NULL)) {
650 		pmap_segtab_info.segalloc.free_segtab = stb->seg_next;
651 		SEGTAB_ADD(nget, 1);
652 		stb->seg_next = NULL;
653 		found_on_freelist = true;
654 		UVMHIST_LOG(pmapxtabhist, "freelist stb=%#jx",
655 		    (uintptr_t)stb, 0, 0, 0);
656 	}
657 	mutex_spin_exit(&pmap_segtab_lock);
658 
659 	struct vm_page *stb_pg = NULL;
660 	if (__predict_false(stb == NULL)) {
661 		stb_pg = pmap_pte_pagealloc();
662 
663 		if (__predict_false(stb_pg == NULL)) {
664 			/*
665 			 * XXX What else can we do?  Could we deadlock here?
666 			 */
667 			uvm_wait("segtab");
668 			goto again;
669 		}
670 		SEGTAB_ADD(npage, 1);
671 		const paddr_t stb_pa = VM_PAGE_TO_PHYS(stb_pg);
672 
673 		stb = (pmap_segtab_t *)PMAP_MAP_SEGTABPAGE(stb_pa);
674 		UVMHIST_LOG(pmapxtabhist, "new stb=%#jx", (uintptr_t)stb, 0,
675 		    0, 0);
676 #if 0
677 CTASSERT(NBPG / sizeof(*stb) == 1);
678 		const size_t n = NBPG / sizeof(*stb);
679 		if (n > 1) {
680 			/*
681 			 * link all the segtabs in this page together
682 			 */
683 			for (size_t i = 1; i < n - 1; i++) {
684 				stb[i].seg_next = &stb[i + 1];
685 			}
686 			/*
687 			 * Now link the new segtabs into the free segtab list.
688 			 */
689 			mutex_spin_enter(&pmap_segtab_lock);
690 			stb[n - 1].seg_next = pmap_segtab_info.segalloc.free_segtab;
691 			pmap_segtab_info.segalloc.free_segtab = stb + 1;
692 			SEGTAB_ADD(nput, n - 1);
693 			mutex_spin_exit(&pmap_segtab_lock);
694 		}
695 #endif
696 	}
697 
698 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
699 	pmap_page_attach(pmap, (vaddr_t)stb, stb_pg, &pmap->pm_segtab_list, 0);
700 
701 	pmap_check_stb(stb, __func__,
702 	    found_on_freelist ? "from free list" : "allocated");
703 
704 	UVMHIST_LOG(pmapxtabhist, "... stb %#jx found on freelist %zu",
705 	    (uintptr_t)stb, found_on_freelist, 0, 0);
706 
707 	return stb;
708 }
709 #endif
710 
711 #if defined(PMAP_HWPAGEWALKER)
712 static void
pmap_pdetab_free(pmap_pdetab_t * ptb)713 pmap_pdetab_free(pmap_pdetab_t *ptb)
714 {
715 	UVMHIST_FUNC(__func__);
716 	UVMHIST_CALLARGS(pmaphist, "ptb %#jx", (uintptr_t)ptb, 0, 0, 0);
717 	/*
718 	 * Insert the pdetab into the pdetab freelist.
719 	 */
720 	mutex_spin_enter(&pmap_segtab_lock);
721 	ptb->pde_next = pmap_segtab_info.pdealloc.free_pdetab;
722 	pmap_segtab_info.pdealloc.free_pdetab = ptb;
723 	PDETAB_ADD(nput, 1);
724 	mutex_spin_exit(&pmap_segtab_lock);
725 
726 }
727 #endif
728 
729 
730 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
731 /*
732  * Insert the segtab into the segtab freelist.
733  */
734 static void
pmap_segtab_free(pmap_segtab_t * stb)735 pmap_segtab_free(pmap_segtab_t *stb)
736 {
737 	UVMHIST_FUNC(__func__);
738 	UVMHIST_CALLARGS(pmaphist, "stb %#jx", (uintptr_t)stb, 0, 0, 0);
739 
740 	/*
741 	 * Insert the segtab into the segtab freelist.
742 	 */
743 	mutex_spin_enter(&pmap_segtab_lock);
744 	stb->seg_next = pmap_segtab_info.segalloc.free_segtab;
745 	pmap_segtab_info.segalloc.free_segtab = stb;
746 	SEGTAB_ADD(nput, 1);
747 	mutex_spin_exit(&pmap_segtab_lock);
748 }
749 #endif
750 
751 #if defined(PMAP_HWPAGEWALKER)
752 static void
pmap_pdetab_release(pmap_t pmap,pmap_pdetab_t ** ptb_p,bool free_ptb,vaddr_t va,vsize_t vinc)753 pmap_pdetab_release(pmap_t pmap, pmap_pdetab_t **ptb_p, bool free_ptb,
754     vaddr_t va, vsize_t vinc)
755 {
756 	const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
757 	pmap_pdetab_t *ptb = *ptb_p;
758 
759 	UVMHIST_FUNC(__func__);
760 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx ptb_p %#jx ptb %#jx free %jd",
761 	    (uintptr_t)pmap, (uintptr_t)ptb_p, (uintptr_t)ptb, free_ptb);
762 	UVMHIST_LOG(pmapxtabhist, " va=%#jx vinc=%#jx",
763 	    (uintptr_t)va, (uintptr_t)vinc, 0, 0);
764 
765 	for (size_t i = (va / vinc) & pdetab_mask;
766 	    i < PMAP_PDETABSIZE;
767 	    i++, va += vinc) {
768 #ifdef _LP64
769 		if (vinc > NBSEG) {
770 			if (pte_pde_valid_p(ptb->pde_pde[i])) {
771 				pmap_pdetab_t *nptb =
772 				    pmap_pde_to_pdetab(ptb->pde_pde[i]);
773 				UVMHIST_LOG(pmapxtabhist,
774 				    " va %#jx ptp->pde_pde[%jd] (*%#jx) = %#jx "
775 				    "recursing", va, i, &ptb->pde_pde[i],
776 				    ptb->pde_pde[i]);
777 				pmap_pdetab_release(pmap, &nptb, true,
778 				    va, vinc / NPDEPG);
779 				ptb->pde_pde[i] = pte_invalid_pde();
780 				KASSERT(nptb == NULL);
781 			}
782 			continue;
783 		}
784 #endif
785 		KASSERT(vinc == NBSEG);
786 
787 		/* get pointer to PT page */
788 		pmap_ptpage_t *ppg = pmap_pde_to_ptpage(ptb->pde_pde[i]);
789 		UVMHIST_LOG(pmapxtabhist,
790 		    "   va %#jx ptb->pde_pde[%jd] (*%#jx) = %#jx", va, i,
791 		    (uintptr_t)&ptb->pde_pde[i], ptb->pde_pde[i]);
792 		if (ppg == NULL)
793 			continue;
794 
795 		UVMHIST_LOG(pmapxtabhist, " zeroing tab (%#jx)[%jd] (%#jx)",
796 		    (uintptr_t)ptb->pde_pde, i, (uintptr_t)&ptb->pde_pde[i], 0);
797 
798 		ptb->pde_pde[i] = pte_invalid_pde();
799 
800 		pmap_ptpage_free(pmap, ppg, __func__);
801 	}
802 
803 	if (free_ptb) {
804 		UVMHIST_LOG(pmapxtabhist, " ptbp %#jx ptb %#jx",
805 		    (uintptr_t)ptb_p, (uintptr_t)ptb, 0, 0);
806 		const vaddr_t kva = (vaddr_t)ptb;
807 		UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
808 		pmap_page_detach(pmap, &pmap->pm_pdetab_list, kva);
809 		pmap_pdetab_free(ptb);
810 		*ptb_p = NULL;
811 	}
812 }
813 #endif
814 
815 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
816 static void
pmap_segtab_release(pmap_t pmap,pmap_segtab_t ** stb_p,bool free_stb,pte_callback_t callback,uintptr_t flags,vaddr_t va,vsize_t vinc)817 pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stb_p, bool free_stb,
818     pte_callback_t callback, uintptr_t flags, vaddr_t va, vsize_t vinc)
819 {
820 	pmap_segtab_t *stb = *stb_p;
821 
822 	UVMHIST_FUNC(__func__);
823 	UVMHIST_CALLARGS(pmapxtabhist, "pm=%#jx stb_p=%#jx free=%jd",
824 	    (uintptr_t)pmap, (uintptr_t)stb, free_stb, 0);
825 	UVMHIST_LOG(pmapxtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
826 	    (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
827 
828 	for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
829 	    i < PMAP_SEGTABSIZE;
830 	    i++, va += vinc) {
831 #ifdef _LP64
832 		if (vinc > NBSEG) {
833 			if (stb->seg_seg[i] != NULL) {
834 				UVMHIST_LOG(pmapxtabhist,
835 				    " recursing %jd", i, 0, 0, 0);
836 				pmap_segtab_release(pmap, &stb->seg_seg[i],
837 				    true, callback, flags, va, vinc / NSEGPG);
838 				KASSERT(stb->seg_seg[i] == NULL);
839 			}
840 			continue;
841 		}
842 #endif
843 		KASSERT(vinc == NBSEG);
844 
845 		/* get pointer to segment map */
846 		pmap_ptpage_t *ppg = stb->seg_ppg[i];
847 		if (ppg == NULL)
848 			continue;
849 
850 		/*
851 		 * If our caller wants a callback, do so.
852 		 */
853 		if (callback != NULL) {
854 			(*callback)(pmap, va, va + vinc, ppg->ppg_ptes, flags);
855 		}
856 		pmap_ptpage_free(pmap, ppg, __func__);
857 		stb->seg_ppg[i] = NULL;
858 		UVMHIST_LOG(pmapxtabhist, " zeroing tab[%jd]", i, 0, 0, 0);
859 	}
860 
861 	if (free_stb) {
862 		pmap_check_stb(stb, __func__,
863 		    vinc == NBSEG ? "release seg" : "release xseg");
864 
865 		const vaddr_t kva = (vaddr_t)stb;
866 		UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
867 		pmap_page_detach(pmap, &pmap->pm_segtab_list, kva);
868 		pmap_segtab_free(stb);
869 		*stb_p = NULL;
870 	}
871 }
872 #endif
873 
874 
875 
876 /*
877  * Allocate the top segment table for the pmap.
878  */
879 void
pmap_segtab_init(pmap_t pmap)880 pmap_segtab_init(pmap_t pmap)
881 {
882 	UVMHIST_FUNC(__func__);
883 	UVMHIST_CALLARGS(pmaphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
884 
885 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
886 	/*
887 	 * Constantly converting from extracted PA to VA is somewhat expensive
888 	 * for systems with hardware page walkers and without an inexpensive
889 	 * way to access arbitrary virtual addresses, so we allocate an extra
890 	 * root segtab so that it can contain non-virtual addresses.
891 	 */
892 	pmap->pm_segtab = pmap_segtab_alloc(pmap);
893 #endif
894 #if defined(PMAP_HWPAGEWALKER)
895 	pmap->pm_pdetab = pmap_pdetab_alloc(pmap);
896 	pmap_md_pdetab_init(pmap);
897 #endif
898 }
899 
900 /*
901  *	Retire the given physical map from service.
902  *	Should only be called if the map contains
903  *	no valid mappings.
904  */
905 void
pmap_segtab_destroy(pmap_t pmap,pte_callback_t func,uintptr_t flags)906 pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
907 {
908 	KASSERT(pmap != pmap_kernel());
909 #ifdef _LP64
910 	const vsize_t vinc = NBXSEG;
911 #else
912 	const vsize_t vinc = NBSEG;
913 #endif
914 
915 #if defined(PMAP_HWPAGEWALKER)
916 	if (pmap->pm_pdetab != NULL) {
917 		pmap_md_pdetab_fini(pmap);
918 		pmap_pdetab_release(pmap, &pmap->pm_pdetab,
919 		    true, pmap->pm_minaddr, vinc);
920 	}
921 #endif
922 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
923 	if (pmap->pm_segtab != NULL) {
924 		pmap_segtab_release(pmap, &pmap->pm_segtab,
925 		    func == NULL, func, flags, pmap->pm_minaddr, vinc);
926 	}
927 #endif
928 
929 #if defined(PMAP_HWPAGEWALKER)
930 #if !defined(PMAP_MAP_PDETABPAGE)
931 	KASSERT((pmap->pm_segtab == NULL) == (pmap->pm_pdetab == NULL));
932 #endif
933 	KASSERT(pmap->pm_pdetab == NULL);
934 #endif
935 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
936 	KASSERT(pmap->pm_segtab == NULL);
937 #endif
938 
939 }
940 
941 /*
942  *	Make a new pmap (vmspace) active for the given process.
943  */
944 void
pmap_segtab_activate(struct pmap * pm,struct lwp * l)945 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
946 {
947 	if (l == curlwp) {
948 		KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
949 		pmap_md_xtab_activate(pm, l);
950 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
951 		struct cpu_info * const ci = l->l_cpu;
952 		if (pm == pmap_kernel()) {
953 			ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
954 #ifdef _LP64
955 			ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
956 #endif
957 		} else {
958 			ci->ci_pmap_user_segtab = pm->pm_segtab;
959 #ifdef _LP64
960 			ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
961 #endif
962 		}
963 #endif
964 	}
965 }
966 
967 void
pmap_segtab_deactivate(pmap_t pm)968 pmap_segtab_deactivate(pmap_t pm)
969 {
970 	pmap_md_xtab_deactivate(pm);
971 
972 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
973 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
974 #ifdef _LP64
975 	curcpu()->ci_pmap_user_seg0tab = NULL;
976 #endif
977 #endif
978 }
979 
980 /*
981  *	Act on the given range of addresses from the specified map.
982  *
983  *	It is assumed that the start and end are properly rounded to
984  *	the page size.
985  */
986 void
pmap_pte_process(pmap_t pmap,vaddr_t sva,vaddr_t eva,pte_callback_t callback,uintptr_t flags)987 pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
988     pte_callback_t callback, uintptr_t flags)
989 {
990 #if 0
991 	printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
992 	    __func__, pmap, sva, eva, callback, flags);
993 #endif
994 	while (sva < eva) {
995 		vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
996 		if (lastseg_va == 0 || lastseg_va > eva)
997 			lastseg_va = eva;
998 
999 		/*
1000 		 * If VA belongs to an unallocated segment,
1001 		 * skip to the next segment boundary.
1002 		 */
1003 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
1004 		if (ptep != NULL) {
1005 			/*
1006 			 * Callback to deal with the ptes for this segment.
1007 			 */
1008 			(*callback)(pmap, sva, lastseg_va, ptep, flags);
1009 		}
1010 		/*
1011 		 * In theory we could release pages with no entries,
1012 		 * but that takes more effort than we want here.
1013 		 */
1014 		sva = lastseg_va;
1015 	}
1016 }
1017 
1018 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1019 static pd_entry_t *
pmap_pdetab_reserve(struct pmap * pmap,vaddr_t va)1020 pmap_pdetab_reserve(struct pmap *pmap, vaddr_t va)
1021 #elif defined(PMAP_HWPAGEWALKER)
1022 static pmap_ptpage_t **
1023 pmap_segtab_reserve(struct pmap *pmap, vaddr_t va, pd_entry_t **pde_p)
1024 #else
1025 static pmap_ptpage_t **
1026 pmap_segtab_reserve(struct pmap *pmap, vaddr_t va)
1027 #endif
1028 {
1029 	UVMHIST_FUNC(__func__);
1030 	UVMHIST_CALLARGS(pmaphist, "pm %#jx va %#jx", (uintptr_t)pmap,
1031 	    (uintptr_t)va, 0, 0);
1032 
1033 #if defined(PMAP_HWPAGEWALKER)
1034 	pmap_pdetab_t *ptb = pmap->pm_pdetab;
1035 	UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", (uintptr_t)ptb, 0, 0, 0);
1036 #endif
1037 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1038 	vaddr_t segtab_mask = PMAP_PDETABSIZE - 1;
1039 #ifdef _LP64
1040 	for (size_t segshift = XSEGSHIFT;
1041 	    segshift > SEGSHIFT;
1042 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1043 		pd_entry_t * const pde_p =
1044 		    &ptb->pde_pde[(va >> segshift) & segtab_mask];
1045 		pd_entry_t opde = *pde_p;
1046 
1047 		UVMHIST_LOG(pmaphist,
1048 		    "ptb %#jx segshift %jd pde_p %#jx opde %#jx",
1049 		    ptb, segshift, pde_p, opde);
1050 
1051 		if (__predict_false(!pte_pde_valid_p(opde))) {
1052 			ptb = pmap_pdetab_alloc(pmap);
1053 			pd_entry_t npde = pte_pde_pdetab(
1054 			    pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)ptb),
1055 			    pmap == pmap_kernel());
1056 			opde = pte_pde_cas(pde_p, opde, npde);
1057 			if (__predict_false(pte_pde_valid_p(opde))) {
1058 				const vaddr_t kva = (vaddr_t)ptb;
1059 				UVMHIST_LOG(pmapxtabhist, "about to detach",
1060 				    0, 0, 0, 0);
1061 				pmap_page_detach(pmap, &pmap->pm_pdetab_list,
1062 				    kva);
1063 				pmap_pdetab_free(ptb);
1064 			} else {
1065 				opde = npde;
1066 			}
1067 		}
1068 		ptb = pmap_pde_to_pdetab(opde);
1069 		UVMHIST_LOG(pmaphist, "opde %#jx ptb %#jx", opde, ptb, 0, 0);
1070 	}
1071 #elif defined(XSEGSHIFT)
1072 	size_t segshift = XSEGSHIFT;
1073 
1074 	pd_entry_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1075 	KASSERT(pte_pde_valid_p(opde));
1076 	ptb = pmap_pde_to_pdetab(opde);
1077 	segtab_mask = NSEGPG - 1;
1078 #endif /* _LP64 */
1079 	const size_t idx = (va >> SEGSHIFT) & segtab_mask;
1080 
1081 	UVMHIST_LOG(pmaphist, "... returning %#jx (idx %jd)", (uintptr_t)&ptb->pde_pde[idx], idx, 0, 0);
1082 
1083 	return &ptb->pde_pde[idx];
1084 #else /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1085 	pmap_segtab_t *stb = pmap->pm_segtab;
1086 	vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
1087 #ifdef _LP64
1088 	for (size_t segshift = XSEGSHIFT;
1089 	    segshift > SEGSHIFT;
1090 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1091 		size_t idx = (va >> segshift) & segtab_mask;
1092 		pmap_segtab_t ** const stb_p = &stb->seg_seg[idx];
1093 #if defined(PMAP_HWPAGEWALKER)
1094 		pmap_pdetab_t ** const ptb_p = &ptb->pde_pde[idx];
1095 #endif	/* PMAP_HWPAGEWALKER */
1096 		if (__predict_false((stb = *stb_p) == NULL)) {
1097 			stb = pmap_segtab_alloc(pmap);
1098 #ifdef MULTIPROCESSOR
1099 			pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, stb);
1100 			if (__predict_false(ostb != NULL)) {
1101 				const vaddr_t kva = (vaddr_t)stb;
1102 				UVMHIST_LOG(pmapxtabhist, "about to detach",
1103 				    0, 0, 0, 0);
1104 				pmap_page_detach(pmap, &pmap->pm_segtab_list,
1105 				    kva);
1106 				pmap_segtab_free(stb);
1107 				stb = ostb;
1108 			}
1109 #else
1110 			*stb_p = stb;
1111 #endif /* MULTIPROCESSOR */
1112 		}
1113 	}
1114 #elif defined(PMAP_HWPAGEWALKER)
1115 	pmap_segtab_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1116 	KASSERT(pte_pde_valid_p(opde));
1117 	ptb = pmap_pde_to_pdetab(opde);
1118 	segtab_mask = NSEGPG - 1;
1119 
1120 #endif /* _LP64 */
1121 	size_t idx = (va >> SEGSHIFT) & segtab_mask;
1122 #if defined(PMAP_HWPAGEWALKER)
1123 #if defined(XSEGSHIFT) && (XSEGSHIFT != SEGSHIFT)
1124 	*pte_p = &pmap->pm_segtab
1125 #else /* XSEGSHIFT */
1126 	*pde_p = &ptb->pde_pde[idx];
1127 #endif /* XSEGSHIFT */
1128 #endif /* PMAP_HWPAGEWALKER */
1129 	return &stb->seg_ppg[idx];
1130 #endif
1131 }
1132 
1133 
1134 /*
1135  *	Return a pointer for the pte that corresponds to the specified virtual
1136  *	address (va) in the target physical map, allocating if needed.
1137  */
1138 pt_entry_t *
pmap_pte_reserve(pmap_t pmap,vaddr_t va,int flags)1139 pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
1140 {
1141 	UVMHIST_FUNC(__func__);
1142 	UVMHIST_CALLARGS(pmaphist, "pm=%#jx va=%#jx flags=%#jx",
1143 	    (uintptr_t)pmap, (uintptr_t)va, flags, 0);
1144 	pmap_ptpage_t *ppg;
1145 	paddr_t pa = 0;
1146 
1147 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1148 	pd_entry_t * const pde_p = pmap_pdetab_reserve(pmap, va);
1149 	ppg = pmap_pde_to_ptpage(*pde_p);
1150 #elif defined(PMAP_HWPAGEWALKER)
1151 	pd_entry_t *pde_p;
1152 	pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va, &pde_p);
1153 	ppg = *ppg_p;
1154 #else
1155 	pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va);
1156 	ppg = *ppg_p;
1157 #endif
1158 
1159 	if (__predict_false(ppg == NULL)) {
1160 		ppg = pmap_ptpage_alloc(pmap, flags, &pa);
1161 		if (__predict_false(ppg == NULL))
1162 			return NULL;
1163 
1164 #if defined(PMAP_HWPAGEWALKER)
1165 		pd_entry_t npde = pte_pde_ptpage(pa, pmap == pmap_kernel());
1166 #endif
1167 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1168 		pd_entry_t opde = pte_pde_cas(pde_p, pte_invalid_pde(), npde);
1169 		if (__predict_false(pte_pde_valid_p(opde))) {
1170 			pmap_ptpage_free(pmap, ppg, __func__);
1171 			ppg = pmap_pde_to_ptpage(opde);
1172 		}
1173 #else
1174 #ifdef MULTIPROCESSOR
1175 		pmap_ptpage_t *oppg = atomic_cas_ptr(ppg_p, NULL, ppg);
1176 		/*
1177 		 * If another thread allocated the segtab needed for this va
1178 		 * free the page we just allocated.
1179 		 */
1180 		if (__predict_false(oppg != NULL)) {
1181 			pmap_ptpage_free(pmap, ppg, __func__);
1182 			ppg = oppg;
1183 #if defined(PMAP_HWPAGEWALKER)
1184 		} else {
1185 			pte_pde_set(pde_p, npde);
1186 #endif
1187 		}
1188 #else /* !MULTIPROCESSOR */
1189 		*ppg_p = ppg;
1190 #endif /* MULTIPROCESSOR */
1191 #endif /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1192 	}
1193 
1194 	const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
1195 
1196 	return ppg->ppg_ptes + pte_idx;
1197 }
1198