xref: /netbsd-src/sys/uvm/pmap/pmap_segtab.c (revision 404ee5b9334f618040b6cdef96a0ff35a6fc4636)
1 /*	$NetBSD: pmap_segtab.c,v 1.11 2019/10/20 07:22:51 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.11 2019/10/20 07:22:51 skrll Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 #define __PMAP_PRIVATE
99 
100 #include "opt_multiprocessor.h"
101 
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/proc.h>
105 #include <sys/mutex.h>
106 #include <sys/atomic.h>
107 
108 #include <uvm/uvm.h>
109 
110 CTASSERT(NBPG >= sizeof(pmap_segtab_t));
111 
112 struct pmap_segtab_info {
113 	pmap_segtab_t *free_segtab;	/* free list kept locally */
114 #ifdef DEBUG
115 	uint32_t nget_segtab;
116 	uint32_t nput_segtab;
117 	uint32_t npage_segtab;
118 #define	SEGTAB_ADD(n, v)	(pmap_segtab_info.n ## _segtab += (v))
119 #else
120 #define	SEGTAB_ADD(n, v)	((void) 0)
121 #endif
122 #ifdef PMAP_PTP_CACHE
123 	struct pgflist ptp_pgflist;	/* Keep a list of idle page tables. */
124 #endif
125 } pmap_segtab_info = {
126 #ifdef PMAP_PTP_CACHE
127 	.ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
128 #endif
129 };
130 
131 kmutex_t pmap_segtab_lock __cacheline_aligned;
132 
133 static void
134 pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why)
135 {
136 #ifdef DEBUG
137 	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
138 		if (stp->seg_tab[i] != 0) {
139 #ifdef DEBUG_NOISY
140 			for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
141 				printf("%s: pm_segtab.seg_tab[%zu] = 0x%p\n",
142 				    caller, j, stp->seg_tab[j]);
143 #endif
144 			panic("%s: pm_segtab.seg_tab[%zu] != 0 (0x%p): %s",
145 			    caller, i, stp->seg_tab[i], why);
146 		}
147 	}
148 #endif
149 }
150 
151 static inline struct vm_page *
152 pmap_pte_pagealloc(void)
153 {
154 	struct vm_page *pg;
155 
156 	pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO|UVM_PGA_USERESERVE);
157 	if (pg) {
158 #ifdef UVM_PAGE_TRKOWN
159 		pg->owner_tag = NULL;
160 #endif
161 		UVM_PAGE_OWN(pg, "pmap-ptp");
162 	}
163 
164 	return pg;
165 }
166 
167 static inline pt_entry_t *
168 pmap_segmap(struct pmap *pmap, vaddr_t va)
169 {
170 	pmap_segtab_t *stp = pmap->pm_segtab;
171 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
172 	    "pmap %p va %#" PRIxVADDR, pmap, va);
173 #ifdef _LP64
174 	stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
175 	if (stp == NULL)
176 		return NULL;
177 #endif
178 
179 	return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
180 }
181 
182 pt_entry_t *
183 pmap_pte_lookup(pmap_t pmap, vaddr_t va)
184 {
185 	pt_entry_t *pte = pmap_segmap(pmap, va);
186 	if (pte == NULL)
187 		return NULL;
188 
189 	return pte + ((va >> PGSHIFT) & (NPTEPG - 1));
190 }
191 
192 static void
193 pmap_segtab_free(pmap_segtab_t *stp)
194 {
195 	/*
196 	 * Insert the segtab into the segtab freelist.
197 	 */
198 	mutex_spin_enter(&pmap_segtab_lock);
199 	stp->seg_seg[0] = pmap_segtab_info.free_segtab;
200 	pmap_segtab_info.free_segtab = stp;
201 	SEGTAB_ADD(nput, 1);
202 	mutex_spin_exit(&pmap_segtab_lock);
203 }
204 
205 static void
206 pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp,
207 	pte_callback_t callback, uintptr_t flags,
208 	vaddr_t va, vsize_t vinc)
209 {
210 	pmap_segtab_t *stp = *stp_p;
211 
212 	for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
213 	     i < PMAP_SEGTABSIZE;
214 	     i++, va += vinc) {
215 #ifdef _LP64
216 		if (vinc > NBSEG) {
217 			if (stp->seg_seg[i] != NULL) {
218 				pmap_segtab_release(pmap, &stp->seg_seg[i],
219 				    true, callback, flags, va, vinc / NSEGPG);
220 				KASSERT(stp->seg_seg[i] == NULL);
221 			}
222 			continue;
223 		}
224 #endif
225 		KASSERT(vinc == NBSEG);
226 
227 		/* get pointer to segment map */
228 		pt_entry_t *pte = stp->seg_tab[i];
229 		if (pte == NULL)
230 			continue;
231 
232 		/*
233 		 * If our caller want a callback, do so.
234 		 */
235 		if (callback != NULL) {
236 			(*callback)(pmap, va, va + vinc, pte, flags);
237 		}
238 #ifdef DEBUG
239 		for (size_t j = 0; j < NPTEPG; j++) {
240 			if (!pte_zero_p(pte[j]))
241 				panic("%s: pte entry %p not 0 (%#"PRIxPTE")",
242 				    __func__, &pte[j], pte_value(pte[j]));
243 		}
244 #endif
245 		// PMAP_UNMAP_POOLPAGE should handle any VCA issues itself
246 		paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
247 		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
248 #ifdef PMAP_PTP_CACHE
249 		mutex_spin_enter(&pmap_segtab_lock);
250 		LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, listq.list);
251 		mutex_spin_exit(&pmap_segtab_lock);
252 #else
253 		uvm_pagefree(pg);
254 #endif
255 
256 		stp->seg_tab[i] = NULL;
257 	}
258 
259 	if (free_stp) {
260 		pmap_check_stp(stp, __func__,
261 			       vinc == NBSEG ? "release seg" : "release xseg");
262 		pmap_segtab_free(stp);
263 		*stp_p = NULL;
264 	}
265 }
266 
267 /*
268  *	Create and return a physical map.
269  *
270  *	If the size specified for the map
271  *	is zero, the map is an actual physical
272  *	map, and may be referenced by the
273  *	hardware.
274  *
275  *	If the size specified is non-zero,
276  *	the map will be used in software only, and
277  *	is bounded by that size.
278  */
279 static pmap_segtab_t *
280 pmap_segtab_alloc(void)
281 {
282 	pmap_segtab_t *stp;
283 	bool found_on_freelist = false;
284 
285  again:
286 	mutex_spin_enter(&pmap_segtab_lock);
287 	if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) {
288 		pmap_segtab_info.free_segtab = stp->seg_seg[0];
289 		stp->seg_seg[0] = NULL;
290 		SEGTAB_ADD(nget, 1);
291 		found_on_freelist = true;
292 	}
293 	mutex_spin_exit(&pmap_segtab_lock);
294 
295 	if (__predict_false(stp == NULL)) {
296 		struct vm_page * const stp_pg = pmap_pte_pagealloc();
297 
298 		if (__predict_false(stp_pg == NULL)) {
299 			/*
300 			 * XXX What else can we do?  Could we deadlock here?
301 			 */
302 			uvm_wait("segtab");
303 			goto again;
304 		}
305 		SEGTAB_ADD(npage, 1);
306 		const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg);
307 
308 		stp = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stp_pa);
309 		const size_t n = NBPG / sizeof(*stp);
310 		if (n > 1) {
311 			/*
312 			 * link all the segtabs in this page together
313 			 */
314 			for (size_t i = 1; i < n - 1; i++) {
315 				stp[i].seg_seg[0] = &stp[i+1];
316 			}
317 			/*
318 			 * Now link the new segtabs into the free segtab list.
319 			 */
320 			mutex_spin_enter(&pmap_segtab_lock);
321 			stp[n-1].seg_seg[0] = pmap_segtab_info.free_segtab;
322 			pmap_segtab_info.free_segtab = stp + 1;
323 			SEGTAB_ADD(nput, n - 1);
324 			mutex_spin_exit(&pmap_segtab_lock);
325 		}
326 	}
327 
328 	pmap_check_stp(stp, __func__,
329 		       found_on_freelist ? "from free list" : "allocated");
330 
331 	return stp;
332 }
333 
334 /*
335  * Allocate the top segment table for the pmap.
336  */
337 void
338 pmap_segtab_init(pmap_t pmap)
339 {
340 
341 	pmap->pm_segtab = pmap_segtab_alloc();
342 }
343 
344 /*
345  *	Retire the given physical map from service.
346  *	Should only be called if the map contains
347  *	no valid mappings.
348  */
349 void
350 pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
351 {
352 	if (pmap->pm_segtab == NULL)
353 		return;
354 
355 #ifdef _LP64
356 	const vsize_t vinc = NBXSEG;
357 #else
358 	const vsize_t vinc = NBSEG;
359 #endif
360 	pmap_segtab_release(pmap, &pmap->pm_segtab,
361 	    func == NULL, func, flags, pmap->pm_minaddr, vinc);
362 }
363 
364 /*
365  *	Make a new pmap (vmspace) active for the given process.
366  */
367 void
368 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
369 {
370 	if (l == curlwp) {
371 		struct cpu_info * const ci = l->l_cpu;
372 		KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
373 		if (pm == pmap_kernel()) {
374 			ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
375 #ifdef _LP64
376 			ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
377 #endif
378 		} else {
379 			ci->ci_pmap_user_segtab = pm->pm_segtab;
380 #ifdef _LP64
381 			ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
382 #endif
383 		}
384 	}
385 }
386 
387 /*
388  *	Act on the given range of addresses from the specified map.
389  *
390  *	It is assumed that the start and end are properly rounded to
391  *	the page size.
392  */
393 void
394 pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
395     pte_callback_t callback, uintptr_t flags)
396 {
397 #if 0
398 	printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
399 	    __func__, pmap, sva, eva, callback, flags);
400 #endif
401 	while (sva < eva) {
402 		vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
403 		if (lastseg_va == 0 || lastseg_va > eva)
404 			lastseg_va = eva;
405 
406 		/*
407 		 * If VA belongs to an unallocated segment,
408 		 * skip to the next segment boundary.
409 		 */
410 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
411 		if (ptep != NULL) {
412 			/*
413 			 * Callback to deal with the ptes for this segment.
414 			 */
415 			(*callback)(pmap, sva, lastseg_va, ptep, flags);
416 		}
417 		/*
418 		 * In theory we could release pages with no entries,
419 		 * but that takes more effort than we want here.
420 		 */
421 		sva = lastseg_va;
422 	}
423 }
424 
425 /*
426  *	Return a pointer for the pte that corresponds to the specified virtual
427  *	address (va) in the target physical map, allocating if needed.
428  */
429 pt_entry_t *
430 pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
431 {
432 	pmap_segtab_t *stp = pmap->pm_segtab;
433 	pt_entry_t *pte;
434 
435 	pte = pmap_pte_lookup(pmap, va);
436 	if (__predict_false(pte == NULL)) {
437 #ifdef _LP64
438 		pmap_segtab_t ** const stp_p =
439 		    &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
440 		if (__predict_false((stp = *stp_p) == NULL)) {
441 			pmap_segtab_t *nstp = pmap_segtab_alloc();
442 #ifdef MULTIPROCESSOR
443 			pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp);
444 			if (__predict_false(ostp != NULL)) {
445 				pmap_check_stp(nstp, __func__, "reserve");
446 				pmap_segtab_free(nstp);
447 				nstp = ostp;
448 			}
449 #else
450 			*stp_p = nstp;
451 #endif /* MULTIPROCESSOR */
452 			stp = nstp;
453 		}
454 		KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]);
455 #endif /* _LP64 */
456 		struct vm_page *pg = NULL;
457 #ifdef PMAP_PTP_CACHE
458 		mutex_spin_enter(&pmap_segtab_lock);
459 		if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) {
460 			LIST_REMOVE(pg, listq.list);
461 			KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg);
462 		}
463 		mutex_spin_exit(&pmap_segtab_lock);
464 #endif
465 		if (pg == NULL)
466 			pg = pmap_pte_pagealloc();
467 		if (pg == NULL) {
468 			if (flags & PMAP_CANFAIL)
469 				return NULL;
470 			panic("%s: cannot allocate page table page "
471 			    "for va %" PRIxVADDR, __func__, va);
472 		}
473 
474 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
475 		pte = (pt_entry_t *)PMAP_MAP_POOLPAGE(pa);
476 		pt_entry_t ** const pte_p =
477 		    &stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
478 #ifdef MULTIPROCESSOR
479 		pt_entry_t *opte = atomic_cas_ptr(pte_p, NULL, pte);
480 		/*
481 		 * If another thread allocated the segtab needed for this va
482 		 * free the page we just allocated.
483 		 */
484 		if (__predict_false(opte != NULL)) {
485 #ifdef PMAP_PTP_CACHE
486 			mutex_spin_enter(&pmap_segtab_lock);
487 			LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist,
488 			    pg, listq.list);
489 			mutex_spin_exit(&pmap_segtab_lock);
490 #else
491 			PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
492 			uvm_pagefree(pg);
493 #endif
494 			pte = opte;
495 		}
496 #else
497 		*pte_p = pte;
498 #endif
499 		KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]);
500 
501 #ifdef DEBUG
502 		for (size_t i = 0; i < NPTEPG; i++) {
503 			if (!pte_zero_p(pte[i]))
504 				panic("%s: new segmap %p not empty @ %zu",
505 				    __func__, pte, i);
506 		}
507 #endif
508 		pte += (va >> PGSHIFT) & (NPTEPG - 1);
509 	}
510 
511 	return pte;
512 }
513