1*a5bd2734Sskrll /* $NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $ */
2b1425120Schristos
3b1425120Schristos /*-
4b1425120Schristos * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5b1425120Schristos * All rights reserved.
6b1425120Schristos *
7b1425120Schristos * This code is derived from software contributed to The NetBSD Foundation
8b1425120Schristos * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9b1425120Schristos * NASA Ames Research Center and by Chris G. Demetriou.
10b1425120Schristos *
11b1425120Schristos * Redistribution and use in source and binary forms, with or without
12b1425120Schristos * modification, are permitted provided that the following conditions
13b1425120Schristos * are met:
14b1425120Schristos * 1. Redistributions of source code must retain the above copyright
15b1425120Schristos * notice, this list of conditions and the following disclaimer.
16b1425120Schristos * 2. Redistributions in binary form must reproduce the above copyright
17b1425120Schristos * notice, this list of conditions and the following disclaimer in the
18b1425120Schristos * documentation and/or other materials provided with the distribution.
19b1425120Schristos *
20b1425120Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21b1425120Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22b1425120Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23b1425120Schristos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24b1425120Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25b1425120Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26b1425120Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27b1425120Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28b1425120Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29b1425120Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30b1425120Schristos * POSSIBILITY OF SUCH DAMAGE.
31b1425120Schristos */
32b1425120Schristos
33b1425120Schristos /*
34b1425120Schristos * Copyright (c) 1992, 1993
35b1425120Schristos * The Regents of the University of California. All rights reserved.
36b1425120Schristos *
37b1425120Schristos * This code is derived from software contributed to Berkeley by
38b1425120Schristos * the Systems Programming Group of the University of Utah Computer
39b1425120Schristos * Science Department and Ralph Campbell.
40b1425120Schristos *
41b1425120Schristos * Redistribution and use in source and binary forms, with or without
42b1425120Schristos * modification, are permitted provided that the following conditions
43b1425120Schristos * are met:
44b1425120Schristos * 1. Redistributions of source code must retain the above copyright
45b1425120Schristos * notice, this list of conditions and the following disclaimer.
46b1425120Schristos * 2. Redistributions in binary form must reproduce the above copyright
47b1425120Schristos * notice, this list of conditions and the following disclaimer in the
48b1425120Schristos * documentation and/or other materials provided with the distribution.
49b1425120Schristos * 3. Neither the name of the University nor the names of its contributors
50b1425120Schristos * may be used to endorse or promote products derived from this software
51b1425120Schristos * without specific prior written permission.
52b1425120Schristos *
53b1425120Schristos * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54b1425120Schristos * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55b1425120Schristos * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56b1425120Schristos * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57b1425120Schristos * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58b1425120Schristos * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59b1425120Schristos * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60b1425120Schristos * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61b1425120Schristos * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62b1425120Schristos * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63b1425120Schristos * SUCH DAMAGE.
64b1425120Schristos *
65b1425120Schristos * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66b1425120Schristos */
67b1425120Schristos
68b1425120Schristos #include <sys/cdefs.h>
69b1425120Schristos
70*a5bd2734Sskrll __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $");
71b1425120Schristos
72b1425120Schristos /*
73b1425120Schristos * Manages physical address maps.
74b1425120Schristos *
75b1425120Schristos * In addition to hardware address maps, this
76b1425120Schristos * module is called upon to provide software-use-only
77b1425120Schristos * maps which may or may not be stored in the same
78b1425120Schristos * form as hardware maps. These pseudo-maps are
79b1425120Schristos * used to store intermediate results from copy
80b1425120Schristos * operations to and from address spaces.
81b1425120Schristos *
82b1425120Schristos * Since the information managed by this module is
83b1425120Schristos * also stored by the logical address mapping module,
84b1425120Schristos * this module may throw away valid virtual-to-physical
85b1425120Schristos * mappings at almost any time. However, invalidations
86b1425120Schristos * of virtual-to-physical mappings must be done as
87b1425120Schristos * requested.
88b1425120Schristos *
89b1425120Schristos * In order to cope with hardware architectures which
90b1425120Schristos * make virtual-to-physical map invalidates expensive,
91b1425120Schristos * this module may delay invalidate or reduced protection
92b1425120Schristos * operations until such time as they are actually
93b1425120Schristos * necessary. This module is given full information as
94b1425120Schristos * to which processors are currently using which maps,
95b1425120Schristos * and to when physical maps must be made correct.
96b1425120Schristos */
97b1425120Schristos
98b1425120Schristos #define __PMAP_PRIVATE
99b1425120Schristos
100b1425120Schristos #include "opt_multiprocessor.h"
101b1425120Schristos
102b1425120Schristos #include <sys/param.h>
10339914130Sskrll
104b1425120Schristos #include <sys/atomic.h>
10539914130Sskrll #include <sys/mutex.h>
10639914130Sskrll #include <sys/proc.h>
10739914130Sskrll #include <sys/systm.h>
108b1425120Schristos
109b1425120Schristos #include <uvm/uvm.h>
110196ee94dSskrll #include <uvm/pmap/pmap.h>
111b1425120Schristos
112196ee94dSskrll #if defined(XSEGSHIFT) && XSEGSHIFT == SEGSHIFT
113196ee94dSskrll #undef XSEGSHIFT
114196ee94dSskrll #undef XSEGLENGTH
115196ee94dSskrll #undef NBXSEG
116196ee94dSskrll #undef NXSEGPG
117196ee94dSskrll #endif
118b1425120Schristos
119196ee94dSskrll #define MULT_CTASSERT(a,b) __CTASSERT((a) < (b) || ((a) % (b) == 0))
120196ee94dSskrll
121196ee94dSskrll __CTASSERT(sizeof(pmap_ptpage_t) == NBPG);
122196ee94dSskrll
123196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
124196ee94dSskrll #ifdef _LP64
125196ee94dSskrll MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG);
126196ee94dSskrll MULT_CTASSERT(NPDEPG, PMAP_PDETABSIZE);
127196ee94dSskrll #endif /* _LP64 */
128196ee94dSskrll MULT_CTASSERT(sizeof(pmap_pdetab_t *), sizeof(pd_entry_t));
129196ee94dSskrll MULT_CTASSERT(sizeof(pd_entry_t), sizeof(pmap_pdetab_t));
130196ee94dSskrll
131196ee94dSskrll #if 0
132196ee94dSskrll #ifdef _LP64
133196ee94dSskrll static const bool separate_pdetab_root_p = NPDEPG != PMAP_PDETABSIZE;
134196ee94dSskrll #else
135196ee94dSskrll static const bool separate_pdetab_root_p = true;
136196ee94dSskrll #endif /* _LP64 */
137196ee94dSskrll #endif
138196ee94dSskrll
139196ee94dSskrll typedef struct {
140196ee94dSskrll pmap_pdetab_t *free_pdetab0; /* free list kept locally */
141196ee94dSskrll pmap_pdetab_t *free_pdetab; /* free list kept locally */
142196ee94dSskrll #ifdef DEBUG
143196ee94dSskrll uint32_t nget;
144196ee94dSskrll uint32_t nput;
145196ee94dSskrll uint32_t npage;
146196ee94dSskrll #define PDETAB_ADD(n, v) (pmap_segtab_info.pdealloc.n += (v))
147196ee94dSskrll #else
148196ee94dSskrll #define PDETAB_ADD(n, v) ((void) 0)
149196ee94dSskrll #endif /* DEBUG */
150196ee94dSskrll } pmap_pdetab_alloc_t;
151196ee94dSskrll #endif /* PMAP_HWPAGEWALKER */
152196ee94dSskrll
153196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
154196ee94dSskrll #ifdef _LP64
155196ee94dSskrll __CTASSERT(NSEGPG >= PMAP_SEGTABSIZE);
156196ee94dSskrll __CTASSERT(NSEGPG % PMAP_SEGTABSIZE == 0);
157196ee94dSskrll #endif
158196ee94dSskrll __CTASSERT(NBPG >= sizeof(pmap_segtab_t));
159196ee94dSskrll
160196ee94dSskrll typedef struct {
161196ee94dSskrll pmap_segtab_t *free_segtab0; /* free list kept locally */
162b1425120Schristos pmap_segtab_t *free_segtab; /* free list kept locally */
163b1425120Schristos #ifdef DEBUG
164196ee94dSskrll uint32_t nget;
165196ee94dSskrll uint32_t nput;
166196ee94dSskrll uint32_t npage;
167196ee94dSskrll #define SEGTAB_ADD(n, v) (pmap_segtab_info.segalloc.n += (v))
168b1425120Schristos #else
169b1425120Schristos #define SEGTAB_ADD(n, v) ((void) 0)
170b1425120Schristos #endif
171196ee94dSskrll } pmap_segtab_alloc_t;
172196ee94dSskrll #endif /* !PMAP_HWPAGEWALKER || !PMAP_MAP_PDETABPAGE */
173196ee94dSskrll
174196ee94dSskrll struct pmap_segtab_info {
175196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
176196ee94dSskrll pmap_pdetab_alloc_t pdealloc;
177196ee94dSskrll #endif
178196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
179196ee94dSskrll pmap_segtab_alloc_t segalloc;
180196ee94dSskrll #endif
181196ee94dSskrll #ifdef PMAP_PPG_CACHE
182b1425120Schristos struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */
183b1425120Schristos #endif
184b1425120Schristos } pmap_segtab_info = {
185196ee94dSskrll #ifdef PMAP_PPG_CACHE
186b1425120Schristos .ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
187b1425120Schristos #endif
188b1425120Schristos };
189b1425120Schristos
190b1425120Schristos kmutex_t pmap_segtab_lock __cacheline_aligned;
191b1425120Schristos
192196ee94dSskrll #ifndef PMAP_HWPAGEWALKER
19358e1cf78Smrg /*
194196ee94dSskrll * Check that a seg_ppg[] array is empty.
19558e1cf78Smrg *
1961974c7e7Sskrll * This is used when allocating or freeing a pmap_segtab_t. The stb
197196ee94dSskrll * should be unused -- meaning, none of the seg_ppg[] pointers are
19858e1cf78Smrg * not NULL, as it transitions from either freshly allocated segtab from
19958e1cf78Smrg * pmap pool, an unused allocated page segtab alloc from the SMP case,
20058e1cf78Smrg * where two CPUs attempt to allocate the same underlying segtab, the
20158e1cf78Smrg * release of a segtab entry to the freelist, or for SMP, where reserve
20258e1cf78Smrg * also frees a freshly allocated but unused entry.
20358e1cf78Smrg */
2044df36874Smrg static void
pmap_check_stb(pmap_segtab_t * stb,const char * caller,const char * why)2051974c7e7Sskrll pmap_check_stb(pmap_segtab_t *stb, const char *caller, const char *why)
2064df36874Smrg {
2074df36874Smrg #ifdef DEBUG
2084df36874Smrg for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
209196ee94dSskrll if (stb->seg_ppg[i] != NULL) {
21058e1cf78Smrg #define DEBUG_NOISY
2114df36874Smrg #ifdef DEBUG_NOISY
21258e1cf78Smrg UVMHIST_FUNC(__func__);
213196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "stb=%#jx",
2141974c7e7Sskrll (uintptr_t)stb, 0, 0, 0);
2154df36874Smrg for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
216196ee94dSskrll if (stb->seg_ppg[j] != NULL)
217196ee94dSskrll printf("%s: stb->seg_ppg[%zu] = %p\n",
218196ee94dSskrll caller, j, stb->seg_ppg[j]);
2194df36874Smrg #endif
220196ee94dSskrll panic("%s: pm_segtab.seg_ppg[%zu] != 0 (%p): %s",
221196ee94dSskrll caller, i, stb->seg_ppg[i], why);
2224df36874Smrg }
2234df36874Smrg }
2244df36874Smrg #endif
2254df36874Smrg }
226196ee94dSskrll #endif /* PMAP_HWPAGEWALKER */
22758e1cf78Smrg
228b1425120Schristos static inline struct vm_page *
pmap_pte_pagealloc(void)229b1425120Schristos pmap_pte_pagealloc(void)
230b1425120Schristos {
231b1425120Schristos struct vm_page *pg;
232b1425120Schristos
233196ee94dSskrll pg = pmap_md_alloc_poolpage(UVM_PGA_ZERO | UVM_PGA_USERESERVE);
234b1425120Schristos if (pg) {
235b1425120Schristos #ifdef UVM_PAGE_TRKOWN
236b1425120Schristos pg->owner_tag = NULL;
237b1425120Schristos #endif
238b1425120Schristos UVM_PAGE_OWN(pg, "pmap-ptp");
239b1425120Schristos }
240b1425120Schristos
241b1425120Schristos return pg;
242b1425120Schristos }
243b1425120Schristos
244196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
245196ee94dSskrll static vaddr_t
pmap_pde_to_va(pd_entry_t pde)246196ee94dSskrll pmap_pde_to_va(pd_entry_t pde)
247b1425120Schristos {
248196ee94dSskrll if (!pte_pde_valid_p(pde))
249196ee94dSskrll return 0;
250196ee94dSskrll
251196ee94dSskrll paddr_t pa = pte_pde_to_paddr(pde);
252196ee94dSskrll return pmap_md_direct_map_paddr(pa);
253196ee94dSskrll }
254196ee94dSskrll
255196ee94dSskrll #ifdef _LP64
256196ee94dSskrll static pmap_pdetab_t *
pmap_pde_to_pdetab(pd_entry_t pde)257196ee94dSskrll pmap_pde_to_pdetab(pd_entry_t pde)
258196ee94dSskrll {
259196ee94dSskrll
260196ee94dSskrll return (pmap_pdetab_t *)pmap_pde_to_va(pde);
261196ee94dSskrll }
262196ee94dSskrll #endif
263196ee94dSskrll
264196ee94dSskrll static pmap_ptpage_t *
pmap_pde_to_ptpage(pd_entry_t pde)265196ee94dSskrll pmap_pde_to_ptpage(pd_entry_t pde)
266196ee94dSskrll {
267196ee94dSskrll
268196ee94dSskrll return (pmap_ptpage_t *)pmap_pde_to_va(pde);
269196ee94dSskrll }
270196ee94dSskrll #endif
271196ee94dSskrll
272196ee94dSskrll #ifdef _LP64
273196ee94dSskrll __CTASSERT((XSEGSHIFT - SEGSHIFT) % (PGSHIFT-3) == 0);
274196ee94dSskrll #endif
275196ee94dSskrll
276196ee94dSskrll static inline pmap_ptpage_t *
pmap_ptpage(struct pmap * pmap,vaddr_t va)277196ee94dSskrll pmap_ptpage(struct pmap *pmap, vaddr_t va)
278196ee94dSskrll {
279196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
280196ee94dSskrll vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
281196ee94dSskrll pmap_pdetab_t *ptb = pmap->pm_pdetab;
282196ee94dSskrll
283196ee94dSskrll // UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", ptb, 0, 0, 0);
284196ee94dSskrll
285*a5bd2734Sskrll KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
286*a5bd2734Sskrll "pmap_kernel: %s, va %#" PRIxVADDR,
287*a5bd2734Sskrll pmap == pmap_kernel() ? "true" : "false",
288*a5bd2734Sskrll pmap == pmap_kernel() ? va : 0);
289196ee94dSskrll
290196ee94dSskrll #ifdef _LP64
291196ee94dSskrll for (size_t segshift = XSEGSHIFT;
292196ee94dSskrll segshift > SEGSHIFT;
293196ee94dSskrll segshift -= PGSHIFT - 3, pdetab_mask = NSEGPG - 1) {
294196ee94dSskrll ptb = pmap_pde_to_pdetab(ptb->pde_pde[(va >> segshift) & pdetab_mask]);
295196ee94dSskrll if (ptb == NULL)
296196ee94dSskrll return NULL;
297196ee94dSskrll }
298196ee94dSskrll #endif
299196ee94dSskrll return pmap_pde_to_ptpage(ptb->pde_pde[(va >> SEGSHIFT) & pdetab_mask]);
300196ee94dSskrll #else
301196ee94dSskrll vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
3021974c7e7Sskrll pmap_segtab_t *stb = pmap->pm_segtab;
303196ee94dSskrll
3045dbdb89bSskrll KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
3055dbdb89bSskrll "pmap %p va %#" PRIxVADDR, pmap, va);
306b1425120Schristos #ifdef _LP64
307196ee94dSskrll for (size_t segshift = XSEGSHIFT;
308196ee94dSskrll segshift > SEGSHIFT;
309196ee94dSskrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
310196ee94dSskrll stb = stb->seg_seg[(va >> segshift) & segtab_mask];
3111974c7e7Sskrll if (stb == NULL)
312b1425120Schristos return NULL;
313196ee94dSskrll }
314196ee94dSskrll #endif
315196ee94dSskrll return stb->seg_ppg[(va >> SEGSHIFT) & segtab_mask];
316196ee94dSskrll #endif
317196ee94dSskrll }
318196ee94dSskrll
319196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
320196ee94dSskrll bool
pmap_pdetab_fixup(struct pmap * pmap,vaddr_t va)321196ee94dSskrll pmap_pdetab_fixup(struct pmap *pmap, vaddr_t va)
322196ee94dSskrll {
323196ee94dSskrll struct pmap * const kpm = pmap_kernel();
324196ee94dSskrll pmap_pdetab_t * const kptb = kpm->pm_pdetab;
325196ee94dSskrll pmap_pdetab_t * const uptb = pmap->pm_pdetab;
326196ee94dSskrll size_t idx = PMAP_PDETABSIZE - 1;
327196ee94dSskrll #if !defined(PMAP_MAP_PDETABPAGE)
328196ee94dSskrll __CTASSERT(PMAP_PDETABSIZE == PMAP_SEGTABSIZE);
329196ee94dSskrll pmap_segtab_t * const kstb = &pmap_kern_segtab;
330196ee94dSskrll pmap_segtab_t * const ustb = pmap->pm_segtab;
331b1425120Schristos #endif
332b1425120Schristos
333196ee94dSskrll // Regardless of how many levels deep this page table is, we only
334196ee94dSskrll // need to verify the first level PDEs match up.
335196ee94dSskrll #ifdef XSEGSHIFT
336196ee94dSskrll idx &= va >> XSEGSHIFT;
337196ee94dSskrll #else
338196ee94dSskrll idx &= va >> SEGSHIFT;
339196ee94dSskrll #endif
340196ee94dSskrll if (uptb->pde_pde[idx] != kptb->pde_pde[idx]) {
341196ee94dSskrll pte_pde_set(&uptb->pde_pde[idx], kptb->pde_pde[idx]);
342196ee94dSskrll #if !defined(PMAP_MAP_PDETABPAGE)
343196ee94dSskrll ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP
344196ee94dSskrll #endif
345196ee94dSskrll return true;
346b1425120Schristos }
347196ee94dSskrll return false;
348196ee94dSskrll }
349196ee94dSskrll #endif /* PMAP_HWPAGEWALKER */
350196ee94dSskrll
351196ee94dSskrll
352196ee94dSskrll static void
pmap_page_attach(pmap_t pmap,vaddr_t kva,struct vm_page * pg,struct pglist * pglist,voff_t off)353196ee94dSskrll pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg,
354196ee94dSskrll struct pglist *pglist, voff_t off)
355196ee94dSskrll {
356196ee94dSskrll UVMHIST_FUNC(__func__);
357196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx pg %#jx list %#jx",
358196ee94dSskrll (uintptr_t)pmap, (uintptr_t)kva, (uintptr_t)pg, (uintptr_t)pglist);
359196ee94dSskrll
360196ee94dSskrll struct uvm_object * const uobj = &pmap->pm_uobject;
361196ee94dSskrll if (pg == NULL) {
362196ee94dSskrll paddr_t pa;
363196ee94dSskrll
364196ee94dSskrll bool ok __diagused = pmap_extract(pmap_kernel(), kva, &pa);
365196ee94dSskrll KASSERT(ok);
366196ee94dSskrll
367196ee94dSskrll pg = PHYS_TO_VM_PAGE(pa);
368196ee94dSskrll KASSERT(pg != NULL);
369196ee94dSskrll }
370196ee94dSskrll
371196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
372196ee94dSskrll (uintptr_t)kva, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)pglist);
373196ee94dSskrll
374196ee94dSskrll pmap_lock(pmap);
375196ee94dSskrll TAILQ_INSERT_TAIL(pglist, pg, pageq.queue);
376196ee94dSskrll uobj->uo_npages++;
377196ee94dSskrll pmap_unlock(pmap);
378196ee94dSskrll
379196ee94dSskrll /*
380196ee94dSskrll * Now set each vm_page that maps this page to point to the
381196ee94dSskrll * pmap and set the offset to what we want.
382196ee94dSskrll */
383196ee94dSskrll KASSERTMSG(pg->uobject == NULL, "pg %p pg->uobject %p", pg, pg->uobject);
384196ee94dSskrll pg->uobject = uobj;
385196ee94dSskrll pg->offset = off;
386196ee94dSskrll }
387196ee94dSskrll
388196ee94dSskrll static struct vm_page *
pmap_page_detach(pmap_t pmap,struct pglist * list,vaddr_t va)389196ee94dSskrll pmap_page_detach(pmap_t pmap, struct pglist *list, vaddr_t va)
390196ee94dSskrll {
391196ee94dSskrll UVMHIST_FUNC(__func__);
392196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx list %#jx",
393196ee94dSskrll (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)list, 0);
394196ee94dSskrll
395196ee94dSskrll paddr_t pa;
396196ee94dSskrll bool ok __diagused = pmap_extract(pmap_kernel(), va, &pa);
397196ee94dSskrll KASSERT(ok);
398196ee94dSskrll
399196ee94dSskrll struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
400196ee94dSskrll struct uvm_object * const uobj = &pmap->pm_uobject;
401196ee94dSskrll
402196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
403196ee94dSskrll (uintptr_t)va, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)list);
404196ee94dSskrll
405196ee94dSskrll KASSERTMSG(pg->uobject == uobj, "pg->uobject %p vs uobj %p",
406196ee94dSskrll pg->uobject, uobj);
407196ee94dSskrll
408196ee94dSskrll pmap_lock(pmap);
409196ee94dSskrll TAILQ_REMOVE(list, pg, pageq.queue);
410196ee94dSskrll uobj->uo_npages--;
411196ee94dSskrll pmap_unlock(pmap);
412196ee94dSskrll
413196ee94dSskrll pg->uobject = NULL;
414196ee94dSskrll pg->offset = 0;
415196ee94dSskrll
416196ee94dSskrll return pg;
417196ee94dSskrll }
418196ee94dSskrll
419196ee94dSskrll #ifndef PMAP_PPG_CACHE
420196ee94dSskrll static void
pmap_segtab_pagefree(pmap_t pmap,struct pglist * list,vaddr_t kva,size_t size)421196ee94dSskrll pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size)
422196ee94dSskrll {
423196ee94dSskrll #ifdef PMAP_MAP_PTEPAGE
424196ee94dSskrll UVMHIST_FUNC(__func__);
425196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx list %#jx kva %#jx size %#jx",
426196ee94dSskrll (uintptr_t)pmap, (uintptr_t)list, kva, size);
427196ee94dSskrll KASSERT(size == PAGE_SIZE);
428196ee94dSskrll if (size == PAGE_SIZE) {
429196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach (kva %#jx)",
430196ee94dSskrll kva, 0, 0, 0);
431196ee94dSskrll uvm_pagefree(pmap_page_detach(pmap, list, kva));
432196ee94dSskrll return;
433196ee94dSskrll }
434196ee94dSskrll #endif
435196ee94dSskrll for (size_t i = 0; i < size; i += PAGE_SIZE) {
436196ee94dSskrll (void)pmap_page_detach(pmap, list, kva + i);
437196ee94dSskrll }
438196ee94dSskrll
439196ee94dSskrll uvm_km_free(kernel_map, kva, size, UVM_KMF_WIRED);
440196ee94dSskrll }
441196ee94dSskrll #endif
442b1425120Schristos
443b1425120Schristos pt_entry_t *
pmap_pte_lookup(pmap_t pmap,vaddr_t va)444b1425120Schristos pmap_pte_lookup(pmap_t pmap, vaddr_t va)
445b1425120Schristos {
446196ee94dSskrll pmap_ptpage_t * const ppg = pmap_ptpage(pmap, va);
447196ee94dSskrll if (ppg == NULL)
448b1425120Schristos return NULL;
449b1425120Schristos
450196ee94dSskrll const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
451196ee94dSskrll
452196ee94dSskrll return ppg->ppg_ptes + pte_idx;
453b1425120Schristos }
454b1425120Schristos
455196ee94dSskrll
456196ee94dSskrll static pmap_ptpage_t *
pmap_ptpage_alloc(pmap_t pmap,int flags,paddr_t * pa_p)457196ee94dSskrll pmap_ptpage_alloc(pmap_t pmap, int flags, paddr_t *pa_p)
45858e1cf78Smrg {
45958e1cf78Smrg UVMHIST_FUNC(__func__);
460196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx flags %#jx pa_p %#jx", (uintptr_t)pmap,
461196ee94dSskrll (uintptr_t)flags, (uintptr_t)pa_p, 0);
46258e1cf78Smrg
463196ee94dSskrll pmap_ptpage_t *ppg = NULL;
46458e1cf78Smrg
465196ee94dSskrll #ifdef PMAP_MAP_PTEPAGE
466196ee94dSskrll struct vm_page *pg = NULL;
467196ee94dSskrll paddr_t pa;
468196ee94dSskrll #ifdef PMAP_PPG_CACHE
469196ee94dSskrll ppg = pmap_pgcache_alloc(&pmap_segtab_info.ppg_flist);
470b1425120Schristos #endif
471196ee94dSskrll if (ppg == NULL) {
472196ee94dSskrll pg = pmap_pte_pagealloc();
473196ee94dSskrll if (pg == NULL) {
474196ee94dSskrll if (flags & PMAP_CANFAIL)
475196ee94dSskrll return NULL;
476196ee94dSskrll panic("%s: cannot allocate page table page ",
477196ee94dSskrll __func__);
478196ee94dSskrll }
479196ee94dSskrll pa = VM_PAGE_TO_PHYS(pg);
480196ee94dSskrll ppg = (pmap_ptpage_t *)PMAP_MAP_PTEPAGE(pa);
481196ee94dSskrll } else {
482196ee94dSskrll bool ok __diagused = pmap_extract(pmap_kernel(), (vaddr_t)ppg, &pa);
483196ee94dSskrll KASSERT(ok);
484b1425120Schristos }
48558e1cf78Smrg
486196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
487196ee94dSskrll pmap_page_attach(pmap, (vaddr_t)ppg, pg, &pmap->pm_ppg_list, 0);
488196ee94dSskrll
489196ee94dSskrll *pa_p = pa;
490b1425120Schristos #else
491196ee94dSskrll vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
492196ee94dSskrll UVM_KMF_WIRED | UVM_KMF_WAITVA
493196ee94dSskrll | (flags & PMAP_CANFAIL ? UVM_KMF_CANFAIL : 0));
494196ee94dSskrll if (kva == 0) {
495196ee94dSskrll if (flags & PMAP_CANFAIL)
496196ee94dSskrll return NULL;
497196ee94dSskrll panic("%s: cannot allocate page table page", __func__);
498196ee94dSskrll }
499196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
500196ee94dSskrll pmap_page_attach(pmap, kva, NULL, &pmap->pm_ppg_list, 0);
501196ee94dSskrll ppg = (pmap_ptpage_t *)kva;
502b1425120Schristos #endif
503b1425120Schristos
504196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "... ppg %#jx", (uintptr_t)ppg, 0, 0, 0);
505196ee94dSskrll
506196ee94dSskrll return ppg;
507b1425120Schristos }
508b1425120Schristos
509196ee94dSskrll static void
pmap_ptpage_free(pmap_t pmap,pmap_ptpage_t * ppg,const char * caller)510196ee94dSskrll pmap_ptpage_free(pmap_t pmap, pmap_ptpage_t *ppg, const char *caller)
511196ee94dSskrll {
512196ee94dSskrll UVMHIST_FUNC(__func__);
513196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx va %#jx", (uintptr_t)pmap,
514196ee94dSskrll (uintptr_t)ppg, 0, 0);
515196ee94dSskrll
516196ee94dSskrll const vaddr_t kva = (vaddr_t)ppg;
517196ee94dSskrll /*
518196ee94dSskrll * All pte arrays should be page aligned.
519196ee94dSskrll */
520196ee94dSskrll if ((kva & PAGE_MASK) != 0) {
521196ee94dSskrll panic("%s: pte entry at %p not page aligned", caller, ppg);
522196ee94dSskrll }
523196ee94dSskrll
524196ee94dSskrll #ifdef DEBUG
525196ee94dSskrll for (size_t j = 0; j < NPTEPG; j++) {
526196ee94dSskrll if (ppg->ppg_ptes[j] != 0) {
527196ee94dSskrll UVMHIST_LOG(pmapxtabhist,
528196ee94dSskrll "pte entry %#jx not 0 (%#jx)",
529196ee94dSskrll (uintptr_t)&ppg->ppg_ptes[j],
530196ee94dSskrll (uintptr_t)ppg->ppg_ptes[j], 0, 0);
531196ee94dSskrll for (size_t i = j + 1; i < NPTEPG; i++)
532196ee94dSskrll if (ppg->ppg_ptes[i] != 0)
533196ee94dSskrll UVMHIST_LOG(pmapxtabhist,
534196ee94dSskrll "pte[%zu] = %#"PRIxPTE,
535196ee94dSskrll i, ppg->ppg_ptes[i], 0, 0);
536196ee94dSskrll
537196ee94dSskrll panic("%s: pte entry at %p not 0 (%#" PRIxPTE ")",
538196ee94dSskrll __func__, &ppg->ppg_ptes[j],
539196ee94dSskrll ppg->ppg_ptes[j]);
540196ee94dSskrll }
541196ee94dSskrll }
542196ee94dSskrll #endif
543196ee94dSskrll //pmap_md_vca_clean(pg, (vaddr_t)ppg, NBPG);
544196ee94dSskrll #ifdef PMAP_PPG_CACHE
545196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
546196ee94dSskrll pmap_page_detach(pmap, &pmap->pm_ppg_list, kva);
547196ee94dSskrll pmap_segtab_pagecache(&pmap_segtab_info.ppg_flist, ppg);
548196ee94dSskrll #else
549196ee94dSskrll pmap_segtab_pagefree(pmap, &pmap->pm_ppg_list, kva, PAGE_SIZE);
550196ee94dSskrll #endif /* PMAP_PPG_CACHE */
551196ee94dSskrll }
552196ee94dSskrll
553196ee94dSskrll
554196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
555196ee94dSskrll
556196ee94dSskrll static pmap_pdetab_t *
pmap_pdetab_alloc(struct pmap * pmap)557196ee94dSskrll pmap_pdetab_alloc(struct pmap *pmap)
558196ee94dSskrll {
559196ee94dSskrll UVMHIST_FUNC(__func__);
560196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
561196ee94dSskrll
562196ee94dSskrll pmap_pdetab_t *ptb;
5639d814dceSskrll #ifdef UVMHIST
564196ee94dSskrll bool found_on_freelist = false;
565196ee94dSskrll #endif
566196ee94dSskrll
567196ee94dSskrll again:
568196ee94dSskrll mutex_spin_enter(&pmap_segtab_lock);
569196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "free_pdetab %#jx",
570196ee94dSskrll (uintptr_t)pmap_segtab_info.pdealloc.free_pdetab, 0, 0, 0);
571196ee94dSskrll if (__predict_true((ptb = pmap_segtab_info.pdealloc.free_pdetab) != NULL)) {
572196ee94dSskrll pmap_segtab_info.pdealloc.free_pdetab = ptb->pde_next;
573196ee94dSskrll
574196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "freelist ptb=%#jx",
575196ee94dSskrll (uintptr_t)ptb, 0, 0, 0);
576196ee94dSskrll
577196ee94dSskrll PDETAB_ADD(nget, 1);
578196ee94dSskrll ptb->pde_next = NULL;
5799d814dceSskrll #ifdef UVMHIST
580196ee94dSskrll found_on_freelist = true;
581196ee94dSskrll #endif
582196ee94dSskrll }
583196ee94dSskrll mutex_spin_exit(&pmap_segtab_lock);
584196ee94dSskrll
585196ee94dSskrll struct vm_page *ptb_pg = NULL;
586196ee94dSskrll if (__predict_false(ptb == NULL)) {
587196ee94dSskrll ptb_pg = pmap_pte_pagealloc();
588196ee94dSskrll
589196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx",
590196ee94dSskrll (uintptr_t)ptb_pg, 0, 0, 0);
591196ee94dSskrll if (__predict_false(ptb_pg == NULL)) {
592196ee94dSskrll /*
593196ee94dSskrll * XXX What else can we do? Could we deadlock here?
594196ee94dSskrll */
595196ee94dSskrll uvm_wait("pdetab");
596196ee94dSskrll goto again;
597196ee94dSskrll }
598196ee94dSskrll
599196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx 2",
600196ee94dSskrll (uintptr_t)ptb_pg, 0, 0, 0);
601196ee94dSskrll PDETAB_ADD(npage, 1);
602196ee94dSskrll const paddr_t ptb_pa = VM_PAGE_TO_PHYS(ptb_pg);
603196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "ptb_pa=%#jx", (uintptr_t)ptb_pa, 0, 0, 0);
604196ee94dSskrll ptb = (pmap_pdetab_t *)PMAP_MAP_PDETABPAGE(ptb_pa);
605196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "new ptb=%#jx", (uintptr_t)ptb, 0,
606196ee94dSskrll 0, 0);
607196ee94dSskrll
608196ee94dSskrll if (pte_invalid_pde() != 0) {
609196ee94dSskrll for (size_t i = 0; i < NPDEPG; i++) {
610196ee94dSskrll ptb->pde_pde[i] = pte_invalid_pde();
611196ee94dSskrll }
612b1425120Schristos }
613b1425120Schristos }
614b1425120Schristos
615196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
616196ee94dSskrll pmap_page_attach(pmap, (vaddr_t)ptb, ptb_pg, &pmap->pm_pdetab_list, 0);
617196ee94dSskrll
618196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "... ptb %#jx found on freelist %d",
619196ee94dSskrll (uintptr_t)ptb, found_on_freelist, 0, 0);
620196ee94dSskrll
621196ee94dSskrll return ptb;
622196ee94dSskrll }
623196ee94dSskrll
624196ee94dSskrll
625196ee94dSskrll #else
626b1425120Schristos /*
627b1425120Schristos * Create and return a physical map.
628b1425120Schristos *
629b1425120Schristos * If the size specified for the map
630b1425120Schristos * is zero, the map is an actual physical
631b1425120Schristos * map, and may be referenced by the
632b1425120Schristos * hardware.
633b1425120Schristos *
634b1425120Schristos * If the size specified is non-zero,
635b1425120Schristos * the map will be used in software only, and
636b1425120Schristos * is bounded by that size.
637b1425120Schristos */
638b1425120Schristos static pmap_segtab_t *
pmap_segtab_alloc(struct pmap * pmap)639196ee94dSskrll pmap_segtab_alloc(struct pmap *pmap)
640b1425120Schristos {
641196ee94dSskrll UVMHIST_FUNC(__func__);
642196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
643196ee94dSskrll
6441974c7e7Sskrll pmap_segtab_t *stb;
6454df36874Smrg bool found_on_freelist = false;
646b1425120Schristos
647b1425120Schristos again:
648b1425120Schristos mutex_spin_enter(&pmap_segtab_lock);
649196ee94dSskrll if (__predict_true((stb = pmap_segtab_info.segalloc.free_segtab) != NULL)) {
650196ee94dSskrll pmap_segtab_info.segalloc.free_segtab = stb->seg_next;
651b1425120Schristos SEGTAB_ADD(nget, 1);
652196ee94dSskrll stb->seg_next = NULL;
6534df36874Smrg found_on_freelist = true;
654196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "freelist stb=%#jx",
6551974c7e7Sskrll (uintptr_t)stb, 0, 0, 0);
656b1425120Schristos }
657b1425120Schristos mutex_spin_exit(&pmap_segtab_lock);
658b1425120Schristos
659196ee94dSskrll struct vm_page *stb_pg = NULL;
6601974c7e7Sskrll if (__predict_false(stb == NULL)) {
661196ee94dSskrll stb_pg = pmap_pte_pagealloc();
662b1425120Schristos
6631974c7e7Sskrll if (__predict_false(stb_pg == NULL)) {
664b1425120Schristos /*
665b1425120Schristos * XXX What else can we do? Could we deadlock here?
666b1425120Schristos */
66701e9893fSskrll uvm_wait("segtab");
668b1425120Schristos goto again;
669b1425120Schristos }
670b1425120Schristos SEGTAB_ADD(npage, 1);
6711974c7e7Sskrll const paddr_t stb_pa = VM_PAGE_TO_PHYS(stb_pg);
672b1425120Schristos
673196ee94dSskrll stb = (pmap_segtab_t *)PMAP_MAP_SEGTABPAGE(stb_pa);
674196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "new stb=%#jx", (uintptr_t)stb, 0,
675196ee94dSskrll 0, 0);
676196ee94dSskrll #if 0
677196ee94dSskrll CTASSERT(NBPG / sizeof(*stb) == 1);
6781974c7e7Sskrll const size_t n = NBPG / sizeof(*stb);
679b1425120Schristos if (n > 1) {
680b1425120Schristos /*
681b1425120Schristos * link all the segtabs in this page together
682b1425120Schristos */
683b1425120Schristos for (size_t i = 1; i < n - 1; i++) {
684196ee94dSskrll stb[i].seg_next = &stb[i + 1];
685b1425120Schristos }
686b1425120Schristos /*
687b1425120Schristos * Now link the new segtabs into the free segtab list.
688b1425120Schristos */
689b1425120Schristos mutex_spin_enter(&pmap_segtab_lock);
690196ee94dSskrll stb[n - 1].seg_next = pmap_segtab_info.segalloc.free_segtab;
691196ee94dSskrll pmap_segtab_info.segalloc.free_segtab = stb + 1;
692b1425120Schristos SEGTAB_ADD(nput, n - 1);
693b1425120Schristos mutex_spin_exit(&pmap_segtab_lock);
694b1425120Schristos }
695196ee94dSskrll #endif
696b1425120Schristos }
697b1425120Schristos
698196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
699196ee94dSskrll pmap_page_attach(pmap, (vaddr_t)stb, stb_pg, &pmap->pm_segtab_list, 0);
700196ee94dSskrll
7011974c7e7Sskrll pmap_check_stb(stb, __func__,
7024df36874Smrg found_on_freelist ? "from free list" : "allocated");
7034df36874Smrg
704196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "... stb %#jx found on freelist %zu",
705196ee94dSskrll (uintptr_t)stb, found_on_freelist, 0, 0);
706196ee94dSskrll
7071974c7e7Sskrll return stb;
708b1425120Schristos }
709196ee94dSskrll #endif
710196ee94dSskrll
711196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
712196ee94dSskrll static void
pmap_pdetab_free(pmap_pdetab_t * ptb)713196ee94dSskrll pmap_pdetab_free(pmap_pdetab_t *ptb)
714196ee94dSskrll {
715196ee94dSskrll UVMHIST_FUNC(__func__);
716196ee94dSskrll UVMHIST_CALLARGS(pmaphist, "ptb %#jx", (uintptr_t)ptb, 0, 0, 0);
717196ee94dSskrll /*
718196ee94dSskrll * Insert the pdetab into the pdetab freelist.
719196ee94dSskrll */
720196ee94dSskrll mutex_spin_enter(&pmap_segtab_lock);
721196ee94dSskrll ptb->pde_next = pmap_segtab_info.pdealloc.free_pdetab;
722196ee94dSskrll pmap_segtab_info.pdealloc.free_pdetab = ptb;
723196ee94dSskrll PDETAB_ADD(nput, 1);
724196ee94dSskrll mutex_spin_exit(&pmap_segtab_lock);
725196ee94dSskrll
726196ee94dSskrll }
727196ee94dSskrll #endif
728196ee94dSskrll
729196ee94dSskrll
730196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
731196ee94dSskrll /*
732196ee94dSskrll * Insert the segtab into the segtab freelist.
733196ee94dSskrll */
734196ee94dSskrll static void
pmap_segtab_free(pmap_segtab_t * stb)735196ee94dSskrll pmap_segtab_free(pmap_segtab_t *stb)
736196ee94dSskrll {
737196ee94dSskrll UVMHIST_FUNC(__func__);
738196ee94dSskrll UVMHIST_CALLARGS(pmaphist, "stb %#jx", (uintptr_t)stb, 0, 0, 0);
739196ee94dSskrll
740196ee94dSskrll /*
741196ee94dSskrll * Insert the segtab into the segtab freelist.
742196ee94dSskrll */
743196ee94dSskrll mutex_spin_enter(&pmap_segtab_lock);
744196ee94dSskrll stb->seg_next = pmap_segtab_info.segalloc.free_segtab;
745196ee94dSskrll pmap_segtab_info.segalloc.free_segtab = stb;
746196ee94dSskrll SEGTAB_ADD(nput, 1);
747196ee94dSskrll mutex_spin_exit(&pmap_segtab_lock);
748196ee94dSskrll }
749196ee94dSskrll #endif
750196ee94dSskrll
751196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
752196ee94dSskrll static void
pmap_pdetab_release(pmap_t pmap,pmap_pdetab_t ** ptb_p,bool free_ptb,vaddr_t va,vsize_t vinc)753196ee94dSskrll pmap_pdetab_release(pmap_t pmap, pmap_pdetab_t **ptb_p, bool free_ptb,
754196ee94dSskrll vaddr_t va, vsize_t vinc)
755196ee94dSskrll {
756196ee94dSskrll const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
757196ee94dSskrll pmap_pdetab_t *ptb = *ptb_p;
758196ee94dSskrll
759196ee94dSskrll UVMHIST_FUNC(__func__);
760196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx ptb_p %#jx ptb %#jx free %jd",
761196ee94dSskrll (uintptr_t)pmap, (uintptr_t)ptb_p, (uintptr_t)ptb, free_ptb);
762196ee94dSskrll UVMHIST_LOG(pmapxtabhist, " va=%#jx vinc=%#jx",
763196ee94dSskrll (uintptr_t)va, (uintptr_t)vinc, 0, 0);
764196ee94dSskrll
765196ee94dSskrll for (size_t i = (va / vinc) & pdetab_mask;
766196ee94dSskrll i < PMAP_PDETABSIZE;
767196ee94dSskrll i++, va += vinc) {
768196ee94dSskrll #ifdef _LP64
769196ee94dSskrll if (vinc > NBSEG) {
770196ee94dSskrll if (pte_pde_valid_p(ptb->pde_pde[i])) {
771196ee94dSskrll pmap_pdetab_t *nptb =
772196ee94dSskrll pmap_pde_to_pdetab(ptb->pde_pde[i]);
773196ee94dSskrll UVMHIST_LOG(pmapxtabhist,
774196ee94dSskrll " va %#jx ptp->pde_pde[%jd] (*%#jx) = %#jx "
775196ee94dSskrll "recursing", va, i, &ptb->pde_pde[i],
776196ee94dSskrll ptb->pde_pde[i]);
777196ee94dSskrll pmap_pdetab_release(pmap, &nptb, true,
778196ee94dSskrll va, vinc / NPDEPG);
779196ee94dSskrll ptb->pde_pde[i] = pte_invalid_pde();
780196ee94dSskrll KASSERT(nptb == NULL);
781196ee94dSskrll }
782196ee94dSskrll continue;
783196ee94dSskrll }
784196ee94dSskrll #endif
785196ee94dSskrll KASSERT(vinc == NBSEG);
786196ee94dSskrll
787196ee94dSskrll /* get pointer to PT page */
788196ee94dSskrll pmap_ptpage_t *ppg = pmap_pde_to_ptpage(ptb->pde_pde[i]);
789196ee94dSskrll UVMHIST_LOG(pmapxtabhist,
790196ee94dSskrll " va %#jx ptb->pde_pde[%jd] (*%#jx) = %#jx", va, i,
791196ee94dSskrll (uintptr_t)&ptb->pde_pde[i], ptb->pde_pde[i]);
792196ee94dSskrll if (ppg == NULL)
793196ee94dSskrll continue;
794196ee94dSskrll
795196ee94dSskrll UVMHIST_LOG(pmapxtabhist, " zeroing tab (%#jx)[%jd] (%#jx)",
796196ee94dSskrll (uintptr_t)ptb->pde_pde, i, (uintptr_t)&ptb->pde_pde[i], 0);
797196ee94dSskrll
798196ee94dSskrll ptb->pde_pde[i] = pte_invalid_pde();
799196ee94dSskrll
800196ee94dSskrll pmap_ptpage_free(pmap, ppg, __func__);
801196ee94dSskrll }
802196ee94dSskrll
803196ee94dSskrll if (free_ptb) {
804196ee94dSskrll UVMHIST_LOG(pmapxtabhist, " ptbp %#jx ptb %#jx",
805196ee94dSskrll (uintptr_t)ptb_p, (uintptr_t)ptb, 0, 0);
806196ee94dSskrll const vaddr_t kva = (vaddr_t)ptb;
807196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
808196ee94dSskrll pmap_page_detach(pmap, &pmap->pm_pdetab_list, kva);
809196ee94dSskrll pmap_pdetab_free(ptb);
810196ee94dSskrll *ptb_p = NULL;
811196ee94dSskrll }
812196ee94dSskrll }
813196ee94dSskrll #endif
814196ee94dSskrll
815196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
816196ee94dSskrll static void
pmap_segtab_release(pmap_t pmap,pmap_segtab_t ** stb_p,bool free_stb,pte_callback_t callback,uintptr_t flags,vaddr_t va,vsize_t vinc)817196ee94dSskrll pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stb_p, bool free_stb,
818196ee94dSskrll pte_callback_t callback, uintptr_t flags, vaddr_t va, vsize_t vinc)
819196ee94dSskrll {
820196ee94dSskrll pmap_segtab_t *stb = *stb_p;
821196ee94dSskrll
822196ee94dSskrll UVMHIST_FUNC(__func__);
823196ee94dSskrll UVMHIST_CALLARGS(pmapxtabhist, "pm=%#jx stb_p=%#jx free=%jd",
824196ee94dSskrll (uintptr_t)pmap, (uintptr_t)stb, free_stb, 0);
825196ee94dSskrll UVMHIST_LOG(pmapxtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
826196ee94dSskrll (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
827196ee94dSskrll
828196ee94dSskrll for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
829196ee94dSskrll i < PMAP_SEGTABSIZE;
830196ee94dSskrll i++, va += vinc) {
831196ee94dSskrll #ifdef _LP64
832196ee94dSskrll if (vinc > NBSEG) {
833196ee94dSskrll if (stb->seg_seg[i] != NULL) {
834196ee94dSskrll UVMHIST_LOG(pmapxtabhist,
835196ee94dSskrll " recursing %jd", i, 0, 0, 0);
836196ee94dSskrll pmap_segtab_release(pmap, &stb->seg_seg[i],
837196ee94dSskrll true, callback, flags, va, vinc / NSEGPG);
838196ee94dSskrll KASSERT(stb->seg_seg[i] == NULL);
839196ee94dSskrll }
840196ee94dSskrll continue;
841196ee94dSskrll }
842196ee94dSskrll #endif
843196ee94dSskrll KASSERT(vinc == NBSEG);
844196ee94dSskrll
845196ee94dSskrll /* get pointer to segment map */
846196ee94dSskrll pmap_ptpage_t *ppg = stb->seg_ppg[i];
847196ee94dSskrll if (ppg == NULL)
848196ee94dSskrll continue;
849196ee94dSskrll
850196ee94dSskrll /*
851196ee94dSskrll * If our caller wants a callback, do so.
852196ee94dSskrll */
853196ee94dSskrll if (callback != NULL) {
854196ee94dSskrll (*callback)(pmap, va, va + vinc, ppg->ppg_ptes, flags);
855196ee94dSskrll }
856196ee94dSskrll pmap_ptpage_free(pmap, ppg, __func__);
857196ee94dSskrll stb->seg_ppg[i] = NULL;
858196ee94dSskrll UVMHIST_LOG(pmapxtabhist, " zeroing tab[%jd]", i, 0, 0, 0);
859196ee94dSskrll }
860196ee94dSskrll
861196ee94dSskrll if (free_stb) {
862196ee94dSskrll pmap_check_stb(stb, __func__,
863196ee94dSskrll vinc == NBSEG ? "release seg" : "release xseg");
864196ee94dSskrll
865196ee94dSskrll const vaddr_t kva = (vaddr_t)stb;
866196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
867196ee94dSskrll pmap_page_detach(pmap, &pmap->pm_segtab_list, kva);
868196ee94dSskrll pmap_segtab_free(stb);
869196ee94dSskrll *stb_p = NULL;
870196ee94dSskrll }
871196ee94dSskrll }
872196ee94dSskrll #endif
873196ee94dSskrll
874196ee94dSskrll
875b1425120Schristos
876b1425120Schristos /*
877b1425120Schristos * Allocate the top segment table for the pmap.
878b1425120Schristos */
879b1425120Schristos void
pmap_segtab_init(pmap_t pmap)880b1425120Schristos pmap_segtab_init(pmap_t pmap)
881b1425120Schristos {
882196ee94dSskrll UVMHIST_FUNC(__func__);
883196ee94dSskrll UVMHIST_CALLARGS(pmaphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
884b1425120Schristos
885196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
886196ee94dSskrll /*
887196ee94dSskrll * Constantly converting from extracted PA to VA is somewhat expensive
888196ee94dSskrll * for systems with hardware page walkers and without an inexpensive
889196ee94dSskrll * way to access arbitrary virtual addresses, so we allocate an extra
890196ee94dSskrll * root segtab so that it can contain non-virtual addresses.
891196ee94dSskrll */
892196ee94dSskrll pmap->pm_segtab = pmap_segtab_alloc(pmap);
893196ee94dSskrll #endif
894196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
895196ee94dSskrll pmap->pm_pdetab = pmap_pdetab_alloc(pmap);
896196ee94dSskrll pmap_md_pdetab_init(pmap);
897196ee94dSskrll #endif
898b1425120Schristos }
899b1425120Schristos
900b1425120Schristos /*
901b1425120Schristos * Retire the given physical map from service.
902b1425120Schristos * Should only be called if the map contains
903b1425120Schristos * no valid mappings.
904b1425120Schristos */
905b1425120Schristos void
pmap_segtab_destroy(pmap_t pmap,pte_callback_t func,uintptr_t flags)906b1425120Schristos pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
907b1425120Schristos {
908196ee94dSskrll KASSERT(pmap != pmap_kernel());
909b1425120Schristos #ifdef _LP64
910b1425120Schristos const vsize_t vinc = NBXSEG;
911b1425120Schristos #else
912b1425120Schristos const vsize_t vinc = NBSEG;
913b1425120Schristos #endif
914196ee94dSskrll
915196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
916196ee94dSskrll if (pmap->pm_pdetab != NULL) {
917ea44ee84Sskrll pmap_md_pdetab_fini(pmap);
918196ee94dSskrll pmap_pdetab_release(pmap, &pmap->pm_pdetab,
919196ee94dSskrll true, pmap->pm_minaddr, vinc);
920196ee94dSskrll }
921196ee94dSskrll #endif
922196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
923196ee94dSskrll if (pmap->pm_segtab != NULL) {
924b1425120Schristos pmap_segtab_release(pmap, &pmap->pm_segtab,
925b1425120Schristos func == NULL, func, flags, pmap->pm_minaddr, vinc);
926b1425120Schristos }
927196ee94dSskrll #endif
928196ee94dSskrll
929196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
930196ee94dSskrll #if !defined(PMAP_MAP_PDETABPAGE)
931196ee94dSskrll KASSERT((pmap->pm_segtab == NULL) == (pmap->pm_pdetab == NULL));
932196ee94dSskrll #endif
933196ee94dSskrll KASSERT(pmap->pm_pdetab == NULL);
934196ee94dSskrll #endif
935196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
936196ee94dSskrll KASSERT(pmap->pm_segtab == NULL);
937196ee94dSskrll #endif
938196ee94dSskrll
939196ee94dSskrll }
940b1425120Schristos
941b1425120Schristos /*
942b1425120Schristos * Make a new pmap (vmspace) active for the given process.
943b1425120Schristos */
944b1425120Schristos void
pmap_segtab_activate(struct pmap * pm,struct lwp * l)945b1425120Schristos pmap_segtab_activate(struct pmap *pm, struct lwp *l)
946b1425120Schristos {
947b1425120Schristos if (l == curlwp) {
948b1425120Schristos KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
949196ee94dSskrll pmap_md_xtab_activate(pm, l);
950196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
951196ee94dSskrll struct cpu_info * const ci = l->l_cpu;
952b1425120Schristos if (pm == pmap_kernel()) {
9535528d7fdSmatt ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
954b1425120Schristos #ifdef _LP64
9555528d7fdSmatt ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
956b1425120Schristos #endif
957b1425120Schristos } else {
9585528d7fdSmatt ci->ci_pmap_user_segtab = pm->pm_segtab;
959b1425120Schristos #ifdef _LP64
9605528d7fdSmatt ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
961b1425120Schristos #endif
962b1425120Schristos }
963196ee94dSskrll #endif
964b1425120Schristos }
965b1425120Schristos }
966b1425120Schristos
967599fa058Sskrll void
pmap_segtab_deactivate(pmap_t pm)968599fa058Sskrll pmap_segtab_deactivate(pmap_t pm)
969599fa058Sskrll {
970599fa058Sskrll pmap_md_xtab_deactivate(pm);
971599fa058Sskrll
972196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
973599fa058Sskrll curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
974599fa058Sskrll #ifdef _LP64
975599fa058Sskrll curcpu()->ci_pmap_user_seg0tab = NULL;
976599fa058Sskrll #endif
977196ee94dSskrll #endif
978599fa058Sskrll }
979599fa058Sskrll
980b1425120Schristos /*
981b1425120Schristos * Act on the given range of addresses from the specified map.
982b1425120Schristos *
983b1425120Schristos * It is assumed that the start and end are properly rounded to
984b1425120Schristos * the page size.
985b1425120Schristos */
986b1425120Schristos void
pmap_pte_process(pmap_t pmap,vaddr_t sva,vaddr_t eva,pte_callback_t callback,uintptr_t flags)987b1425120Schristos pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
988b1425120Schristos pte_callback_t callback, uintptr_t flags)
989b1425120Schristos {
990b1425120Schristos #if 0
991b1425120Schristos printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
992b1425120Schristos __func__, pmap, sva, eva, callback, flags);
993b1425120Schristos #endif
994b1425120Schristos while (sva < eva) {
995b1425120Schristos vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
996b1425120Schristos if (lastseg_va == 0 || lastseg_va > eva)
997b1425120Schristos lastseg_va = eva;
998b1425120Schristos
999b1425120Schristos /*
1000b1425120Schristos * If VA belongs to an unallocated segment,
1001b1425120Schristos * skip to the next segment boundary.
1002b1425120Schristos */
1003d2a9676eSskrll pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
1004d2a9676eSskrll if (ptep != NULL) {
1005b1425120Schristos /*
1006b1425120Schristos * Callback to deal with the ptes for this segment.
1007b1425120Schristos */
1008d2a9676eSskrll (*callback)(pmap, sva, lastseg_va, ptep, flags);
1009b1425120Schristos }
1010b1425120Schristos /*
1011b1425120Schristos * In theory we could release pages with no entries,
1012b1425120Schristos * but that takes more effort than we want here.
1013b1425120Schristos */
1014b1425120Schristos sva = lastseg_va;
1015b1425120Schristos }
1016b1425120Schristos }
1017b1425120Schristos
1018196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1019196ee94dSskrll static pd_entry_t *
pmap_pdetab_reserve(struct pmap * pmap,vaddr_t va)1020196ee94dSskrll pmap_pdetab_reserve(struct pmap *pmap, vaddr_t va)
1021196ee94dSskrll #elif defined(PMAP_HWPAGEWALKER)
1022196ee94dSskrll static pmap_ptpage_t **
1023196ee94dSskrll pmap_segtab_reserve(struct pmap *pmap, vaddr_t va, pd_entry_t **pde_p)
1024196ee94dSskrll #else
1025196ee94dSskrll static pmap_ptpage_t **
1026196ee94dSskrll pmap_segtab_reserve(struct pmap *pmap, vaddr_t va)
1027196ee94dSskrll #endif
1028196ee94dSskrll {
1029196ee94dSskrll UVMHIST_FUNC(__func__);
1030196ee94dSskrll UVMHIST_CALLARGS(pmaphist, "pm %#jx va %#jx", (uintptr_t)pmap,
1031196ee94dSskrll (uintptr_t)va, 0, 0);
1032196ee94dSskrll
1033196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
1034196ee94dSskrll pmap_pdetab_t *ptb = pmap->pm_pdetab;
1035196ee94dSskrll UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", (uintptr_t)ptb, 0, 0, 0);
1036196ee94dSskrll #endif
1037196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1038196ee94dSskrll vaddr_t segtab_mask = PMAP_PDETABSIZE - 1;
1039196ee94dSskrll #ifdef _LP64
1040196ee94dSskrll for (size_t segshift = XSEGSHIFT;
1041196ee94dSskrll segshift > SEGSHIFT;
1042196ee94dSskrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1043196ee94dSskrll pd_entry_t * const pde_p =
1044196ee94dSskrll &ptb->pde_pde[(va >> segshift) & segtab_mask];
1045196ee94dSskrll pd_entry_t opde = *pde_p;
1046196ee94dSskrll
1047196ee94dSskrll UVMHIST_LOG(pmaphist,
1048196ee94dSskrll "ptb %#jx segshift %jd pde_p %#jx opde %#jx",
1049196ee94dSskrll ptb, segshift, pde_p, opde);
1050196ee94dSskrll
1051196ee94dSskrll if (__predict_false(!pte_pde_valid_p(opde))) {
1052196ee94dSskrll ptb = pmap_pdetab_alloc(pmap);
1053196ee94dSskrll pd_entry_t npde = pte_pde_pdetab(
1054196ee94dSskrll pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)ptb),
1055196ee94dSskrll pmap == pmap_kernel());
1056196ee94dSskrll opde = pte_pde_cas(pde_p, opde, npde);
1057196ee94dSskrll if (__predict_false(pte_pde_valid_p(opde))) {
1058196ee94dSskrll const vaddr_t kva = (vaddr_t)ptb;
1059196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach",
1060196ee94dSskrll 0, 0, 0, 0);
1061196ee94dSskrll pmap_page_detach(pmap, &pmap->pm_pdetab_list,
1062196ee94dSskrll kva);
1063196ee94dSskrll pmap_pdetab_free(ptb);
1064196ee94dSskrll } else {
1065196ee94dSskrll opde = npde;
1066196ee94dSskrll }
1067196ee94dSskrll }
1068196ee94dSskrll ptb = pmap_pde_to_pdetab(opde);
1069196ee94dSskrll UVMHIST_LOG(pmaphist, "opde %#jx ptb %#jx", opde, ptb, 0, 0);
1070196ee94dSskrll }
1071196ee94dSskrll #elif defined(XSEGSHIFT)
1072196ee94dSskrll size_t segshift = XSEGSHIFT;
1073196ee94dSskrll
1074196ee94dSskrll pd_entry_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1075196ee94dSskrll KASSERT(pte_pde_valid_p(opde));
1076196ee94dSskrll ptb = pmap_pde_to_pdetab(opde);
1077196ee94dSskrll segtab_mask = NSEGPG - 1;
1078196ee94dSskrll #endif /* _LP64 */
1079196ee94dSskrll const size_t idx = (va >> SEGSHIFT) & segtab_mask;
1080196ee94dSskrll
1081196ee94dSskrll UVMHIST_LOG(pmaphist, "... returning %#jx (idx %jd)", (uintptr_t)&ptb->pde_pde[idx], idx, 0, 0);
1082196ee94dSskrll
1083196ee94dSskrll return &ptb->pde_pde[idx];
1084196ee94dSskrll #else /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1085196ee94dSskrll pmap_segtab_t *stb = pmap->pm_segtab;
1086196ee94dSskrll vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
1087196ee94dSskrll #ifdef _LP64
1088196ee94dSskrll for (size_t segshift = XSEGSHIFT;
1089196ee94dSskrll segshift > SEGSHIFT;
1090196ee94dSskrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1091196ee94dSskrll size_t idx = (va >> segshift) & segtab_mask;
1092196ee94dSskrll pmap_segtab_t ** const stb_p = &stb->seg_seg[idx];
1093196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
1094196ee94dSskrll pmap_pdetab_t ** const ptb_p = &ptb->pde_pde[idx];
1095196ee94dSskrll #endif /* PMAP_HWPAGEWALKER */
1096196ee94dSskrll if (__predict_false((stb = *stb_p) == NULL)) {
1097196ee94dSskrll stb = pmap_segtab_alloc(pmap);
1098196ee94dSskrll #ifdef MULTIPROCESSOR
1099196ee94dSskrll pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, stb);
1100196ee94dSskrll if (__predict_false(ostb != NULL)) {
1101196ee94dSskrll const vaddr_t kva = (vaddr_t)stb;
1102196ee94dSskrll UVMHIST_LOG(pmapxtabhist, "about to detach",
1103196ee94dSskrll 0, 0, 0, 0);
1104196ee94dSskrll pmap_page_detach(pmap, &pmap->pm_segtab_list,
1105196ee94dSskrll kva);
1106196ee94dSskrll pmap_segtab_free(stb);
1107196ee94dSskrll stb = ostb;
1108196ee94dSskrll }
1109196ee94dSskrll #else
1110196ee94dSskrll *stb_p = stb;
1111196ee94dSskrll #endif /* MULTIPROCESSOR */
1112196ee94dSskrll }
1113196ee94dSskrll }
1114196ee94dSskrll #elif defined(PMAP_HWPAGEWALKER)
1115196ee94dSskrll pmap_segtab_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1116196ee94dSskrll KASSERT(pte_pde_valid_p(opde));
1117196ee94dSskrll ptb = pmap_pde_to_pdetab(opde);
1118196ee94dSskrll segtab_mask = NSEGPG - 1;
1119196ee94dSskrll
1120196ee94dSskrll #endif /* _LP64 */
1121196ee94dSskrll size_t idx = (va >> SEGSHIFT) & segtab_mask;
1122196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
1123196ee94dSskrll #if defined(XSEGSHIFT) && (XSEGSHIFT != SEGSHIFT)
1124196ee94dSskrll *pte_p = &pmap->pm_segtab
1125196ee94dSskrll #else /* XSEGSHIFT */
1126196ee94dSskrll *pde_p = &ptb->pde_pde[idx];
1127196ee94dSskrll #endif /* XSEGSHIFT */
1128196ee94dSskrll #endif /* PMAP_HWPAGEWALKER */
1129196ee94dSskrll return &stb->seg_ppg[idx];
1130196ee94dSskrll #endif
1131196ee94dSskrll }
1132196ee94dSskrll
1133196ee94dSskrll
1134b1425120Schristos /*
1135b1425120Schristos * Return a pointer for the pte that corresponds to the specified virtual
1136b1425120Schristos * address (va) in the target physical map, allocating if needed.
1137b1425120Schristos */
1138b1425120Schristos pt_entry_t *
pmap_pte_reserve(pmap_t pmap,vaddr_t va,int flags)1139b1425120Schristos pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
1140b1425120Schristos {
1141a152fe0bSmrg UVMHIST_FUNC(__func__);
1142196ee94dSskrll UVMHIST_CALLARGS(pmaphist, "pm=%#jx va=%#jx flags=%#jx",
1143196ee94dSskrll (uintptr_t)pmap, (uintptr_t)va, flags, 0);
1144196ee94dSskrll pmap_ptpage_t *ppg;
1145196ee94dSskrll paddr_t pa = 0;
1146a152fe0bSmrg
1147196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1148196ee94dSskrll pd_entry_t * const pde_p = pmap_pdetab_reserve(pmap, va);
1149196ee94dSskrll ppg = pmap_pde_to_ptpage(*pde_p);
1150196ee94dSskrll #elif defined(PMAP_HWPAGEWALKER)
1151196ee94dSskrll pd_entry_t *pde_p;
1152196ee94dSskrll pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va, &pde_p);
1153196ee94dSskrll ppg = *ppg_p;
1154196ee94dSskrll #else
1155196ee94dSskrll pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va);
1156196ee94dSskrll ppg = *ppg_p;
1157196ee94dSskrll #endif
1158196ee94dSskrll
1159196ee94dSskrll if (__predict_false(ppg == NULL)) {
1160196ee94dSskrll ppg = pmap_ptpage_alloc(pmap, flags, &pa);
1161196ee94dSskrll if (__predict_false(ppg == NULL))
1162196ee94dSskrll return NULL;
1163196ee94dSskrll
1164196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
1165196ee94dSskrll pd_entry_t npde = pte_pde_ptpage(pa, pmap == pmap_kernel());
1166196ee94dSskrll #endif
1167196ee94dSskrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1168be8a8777Sskrll pd_entry_t opde = pte_pde_cas(pde_p, pte_invalid_pde(), npde);
1169196ee94dSskrll if (__predict_false(pte_pde_valid_p(opde))) {
1170196ee94dSskrll pmap_ptpage_free(pmap, ppg, __func__);
1171196ee94dSskrll ppg = pmap_pde_to_ptpage(opde);
1172b1425120Schristos }
1173b1425120Schristos #else
1174b1425120Schristos #ifdef MULTIPROCESSOR
1175196ee94dSskrll pmap_ptpage_t *oppg = atomic_cas_ptr(ppg_p, NULL, ppg);
1176b1425120Schristos /*
1177b1425120Schristos * If another thread allocated the segtab needed for this va
1178b1425120Schristos * free the page we just allocated.
1179b1425120Schristos */
1180196ee94dSskrll if (__predict_false(oppg != NULL)) {
1181196ee94dSskrll pmap_ptpage_free(pmap, ppg, __func__);
1182196ee94dSskrll ppg = oppg;
1183196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
1184196ee94dSskrll } else {
1185196ee94dSskrll pte_pde_set(pde_p, npde);
1186b1425120Schristos #endif
1187b1425120Schristos }
1188196ee94dSskrll #else /* !MULTIPROCESSOR */
1189196ee94dSskrll *ppg_p = ppg;
1190196ee94dSskrll #endif /* MULTIPROCESSOR */
1191196ee94dSskrll #endif /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1192b1425120Schristos }
1193b1425120Schristos
1194196ee94dSskrll const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
1195196ee94dSskrll
1196196ee94dSskrll return ppg->ppg_ptes + pte_idx;
1197b1425120Schristos }
1198