xref: /netbsd-src/sys/uvm/pmap/pmap_segtab.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: pmap_segtab.c,v 1.14 2020/02/24 12:20:30 rin Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.14 2020/02/24 12:20:30 rin Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 #define __PMAP_PRIVATE
99 
100 #include "opt_multiprocessor.h"
101 
102 #include <sys/param.h>
103 
104 #include <sys/atomic.h>
105 #include <sys/mutex.h>
106 #include <sys/proc.h>
107 #include <sys/systm.h>
108 
109 #include <uvm/uvm.h>
110 
111 CTASSERT(NBPG >= sizeof(pmap_segtab_t));
112 
113 struct pmap_segtab_info {
114 	pmap_segtab_t *free_segtab;	/* free list kept locally */
115 #ifdef DEBUG
116 	uint32_t nget_segtab;
117 	uint32_t nput_segtab;
118 	uint32_t npage_segtab;
119 #define	SEGTAB_ADD(n, v)	(pmap_segtab_info.n ## _segtab += (v))
120 #else
121 #define	SEGTAB_ADD(n, v)	((void) 0)
122 #endif
123 #ifdef PMAP_PTP_CACHE
124 	struct pgflist ptp_pgflist;	/* Keep a list of idle page tables. */
125 #endif
126 } pmap_segtab_info = {
127 #ifdef PMAP_PTP_CACHE
128 	.ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
129 #endif
130 };
131 
132 kmutex_t pmap_segtab_lock __cacheline_aligned;
133 
134 static void
135 pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why)
136 {
137 #ifdef DEBUG
138 	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
139 		if (stp->seg_tab[i] != 0) {
140 #ifdef DEBUG_NOISY
141 			for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
142 				printf("%s: pm_segtab.seg_tab[%zu] = %p\n",
143 				    caller, j, stp->seg_tab[j]);
144 #endif
145 			panic("%s: pm_segtab.seg_tab[%zu] != 0 (%p): %s",
146 			    caller, i, stp->seg_tab[i], why);
147 		}
148 	}
149 #endif
150 }
151 
152 static inline struct vm_page *
153 pmap_pte_pagealloc(void)
154 {
155 	struct vm_page *pg;
156 
157 	pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO|UVM_PGA_USERESERVE);
158 	if (pg) {
159 #ifdef UVM_PAGE_TRKOWN
160 		pg->owner_tag = NULL;
161 #endif
162 		UVM_PAGE_OWN(pg, "pmap-ptp");
163 	}
164 
165 	return pg;
166 }
167 
168 static inline pt_entry_t *
169 pmap_segmap(struct pmap *pmap, vaddr_t va)
170 {
171 	pmap_segtab_t *stp = pmap->pm_segtab;
172 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
173 	    "pmap %p va %#" PRIxVADDR, pmap, va);
174 #ifdef _LP64
175 	stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
176 	if (stp == NULL)
177 		return NULL;
178 #endif
179 
180 	return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
181 }
182 
183 pt_entry_t *
184 pmap_pte_lookup(pmap_t pmap, vaddr_t va)
185 {
186 	pt_entry_t *pte = pmap_segmap(pmap, va);
187 	if (pte == NULL)
188 		return NULL;
189 
190 	return pte + ((va >> PGSHIFT) & (NPTEPG - 1));
191 }
192 
193 static void
194 pmap_segtab_free(pmap_segtab_t *stp)
195 {
196 	/*
197 	 * Insert the segtab into the segtab freelist.
198 	 */
199 	mutex_spin_enter(&pmap_segtab_lock);
200 	stp->seg_seg[0] = pmap_segtab_info.free_segtab;
201 	pmap_segtab_info.free_segtab = stp;
202 	SEGTAB_ADD(nput, 1);
203 	mutex_spin_exit(&pmap_segtab_lock);
204 }
205 
206 static void
207 pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp,
208 	pte_callback_t callback, uintptr_t flags,
209 	vaddr_t va, vsize_t vinc)
210 {
211 	pmap_segtab_t *stp = *stp_p;
212 
213 	for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
214 	     i < PMAP_SEGTABSIZE;
215 	     i++, va += vinc) {
216 #ifdef _LP64
217 		if (vinc > NBSEG) {
218 			if (stp->seg_seg[i] != NULL) {
219 				pmap_segtab_release(pmap, &stp->seg_seg[i],
220 				    true, callback, flags, va, vinc / NSEGPG);
221 				KASSERT(stp->seg_seg[i] == NULL);
222 			}
223 			continue;
224 		}
225 #endif
226 		KASSERT(vinc == NBSEG);
227 
228 		/* get pointer to segment map */
229 		pt_entry_t *pte = stp->seg_tab[i];
230 		if (pte == NULL)
231 			continue;
232 
233 		/*
234 		 * If our caller want a callback, do so.
235 		 */
236 		if (callback != NULL) {
237 			(*callback)(pmap, va, va + vinc, pte, flags);
238 		}
239 #ifdef DEBUG
240 		for (size_t j = 0; j < NPTEPG; j++) {
241 			if (!pte_zero_p(pte[j]))
242 				panic("%s: pte entry %p not 0 (%#"PRIxPTE")",
243 				    __func__, &pte[j], pte_value(pte[j]));
244 		}
245 #endif
246 		// PMAP_UNMAP_POOLPAGE should handle any VCA issues itself
247 		paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
248 		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
249 #ifdef PMAP_PTP_CACHE
250 		mutex_spin_enter(&pmap_segtab_lock);
251 		LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, pageq.list);
252 		mutex_spin_exit(&pmap_segtab_lock);
253 #else
254 		uvm_pagefree(pg);
255 #endif
256 
257 		stp->seg_tab[i] = NULL;
258 	}
259 
260 	if (free_stp) {
261 		pmap_check_stp(stp, __func__,
262 			       vinc == NBSEG ? "release seg" : "release xseg");
263 		pmap_segtab_free(stp);
264 		*stp_p = NULL;
265 	}
266 }
267 
268 /*
269  *	Create and return a physical map.
270  *
271  *	If the size specified for the map
272  *	is zero, the map is an actual physical
273  *	map, and may be referenced by the
274  *	hardware.
275  *
276  *	If the size specified is non-zero,
277  *	the map will be used in software only, and
278  *	is bounded by that size.
279  */
280 static pmap_segtab_t *
281 pmap_segtab_alloc(void)
282 {
283 	pmap_segtab_t *stp;
284 	bool found_on_freelist = false;
285 
286  again:
287 	mutex_spin_enter(&pmap_segtab_lock);
288 	if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) {
289 		pmap_segtab_info.free_segtab = stp->seg_seg[0];
290 		stp->seg_seg[0] = NULL;
291 		SEGTAB_ADD(nget, 1);
292 		found_on_freelist = true;
293 	}
294 	mutex_spin_exit(&pmap_segtab_lock);
295 
296 	if (__predict_false(stp == NULL)) {
297 		struct vm_page * const stp_pg = pmap_pte_pagealloc();
298 
299 		if (__predict_false(stp_pg == NULL)) {
300 			/*
301 			 * XXX What else can we do?  Could we deadlock here?
302 			 */
303 			uvm_wait("segtab");
304 			goto again;
305 		}
306 		SEGTAB_ADD(npage, 1);
307 		const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg);
308 
309 		stp = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stp_pa);
310 		const size_t n = NBPG / sizeof(*stp);
311 		if (n > 1) {
312 			/*
313 			 * link all the segtabs in this page together
314 			 */
315 			for (size_t i = 1; i < n - 1; i++) {
316 				stp[i].seg_seg[0] = &stp[i+1];
317 			}
318 			/*
319 			 * Now link the new segtabs into the free segtab list.
320 			 */
321 			mutex_spin_enter(&pmap_segtab_lock);
322 			stp[n-1].seg_seg[0] = pmap_segtab_info.free_segtab;
323 			pmap_segtab_info.free_segtab = stp + 1;
324 			SEGTAB_ADD(nput, n - 1);
325 			mutex_spin_exit(&pmap_segtab_lock);
326 		}
327 	}
328 
329 	pmap_check_stp(stp, __func__,
330 		       found_on_freelist ? "from free list" : "allocated");
331 
332 	return stp;
333 }
334 
335 /*
336  * Allocate the top segment table for the pmap.
337  */
338 void
339 pmap_segtab_init(pmap_t pmap)
340 {
341 
342 	pmap->pm_segtab = pmap_segtab_alloc();
343 }
344 
345 /*
346  *	Retire the given physical map from service.
347  *	Should only be called if the map contains
348  *	no valid mappings.
349  */
350 void
351 pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
352 {
353 	if (pmap->pm_segtab == NULL)
354 		return;
355 
356 #ifdef _LP64
357 	const vsize_t vinc = NBXSEG;
358 #else
359 	const vsize_t vinc = NBSEG;
360 #endif
361 	pmap_segtab_release(pmap, &pmap->pm_segtab,
362 	    func == NULL, func, flags, pmap->pm_minaddr, vinc);
363 }
364 
365 /*
366  *	Make a new pmap (vmspace) active for the given process.
367  */
368 void
369 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
370 {
371 	if (l == curlwp) {
372 		struct cpu_info * const ci = l->l_cpu;
373 		KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
374 		if (pm == pmap_kernel()) {
375 			ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
376 #ifdef _LP64
377 			ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
378 #endif
379 		} else {
380 			ci->ci_pmap_user_segtab = pm->pm_segtab;
381 #ifdef _LP64
382 			ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
383 #endif
384 		}
385 	}
386 }
387 
388 /*
389  *	Act on the given range of addresses from the specified map.
390  *
391  *	It is assumed that the start and end are properly rounded to
392  *	the page size.
393  */
394 void
395 pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
396     pte_callback_t callback, uintptr_t flags)
397 {
398 #if 0
399 	printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
400 	    __func__, pmap, sva, eva, callback, flags);
401 #endif
402 	while (sva < eva) {
403 		vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
404 		if (lastseg_va == 0 || lastseg_va > eva)
405 			lastseg_va = eva;
406 
407 		/*
408 		 * If VA belongs to an unallocated segment,
409 		 * skip to the next segment boundary.
410 		 */
411 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
412 		if (ptep != NULL) {
413 			/*
414 			 * Callback to deal with the ptes for this segment.
415 			 */
416 			(*callback)(pmap, sva, lastseg_va, ptep, flags);
417 		}
418 		/*
419 		 * In theory we could release pages with no entries,
420 		 * but that takes more effort than we want here.
421 		 */
422 		sva = lastseg_va;
423 	}
424 }
425 
426 /*
427  *	Return a pointer for the pte that corresponds to the specified virtual
428  *	address (va) in the target physical map, allocating if needed.
429  */
430 pt_entry_t *
431 pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
432 {
433 	pmap_segtab_t *stp = pmap->pm_segtab;
434 	pt_entry_t *pte;
435 
436 	pte = pmap_pte_lookup(pmap, va);
437 	if (__predict_false(pte == NULL)) {
438 #ifdef _LP64
439 		pmap_segtab_t ** const stp_p =
440 		    &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
441 		if (__predict_false((stp = *stp_p) == NULL)) {
442 			pmap_segtab_t *nstp = pmap_segtab_alloc();
443 #ifdef MULTIPROCESSOR
444 			pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp);
445 			if (__predict_false(ostp != NULL)) {
446 				pmap_check_stp(nstp, __func__, "reserve");
447 				pmap_segtab_free(nstp);
448 				nstp = ostp;
449 			}
450 #else
451 			*stp_p = nstp;
452 #endif /* MULTIPROCESSOR */
453 			stp = nstp;
454 		}
455 		KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]);
456 #endif /* _LP64 */
457 		struct vm_page *pg = NULL;
458 #ifdef PMAP_PTP_CACHE
459 		mutex_spin_enter(&pmap_segtab_lock);
460 		if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) {
461 			LIST_REMOVE(pg, pageq.list);
462 			KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg);
463 		}
464 		mutex_spin_exit(&pmap_segtab_lock);
465 #endif
466 		if (pg == NULL)
467 			pg = pmap_pte_pagealloc();
468 		if (pg == NULL) {
469 			if (flags & PMAP_CANFAIL)
470 				return NULL;
471 			panic("%s: cannot allocate page table page "
472 			    "for va %" PRIxVADDR, __func__, va);
473 		}
474 
475 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
476 		pte = (pt_entry_t *)PMAP_MAP_POOLPAGE(pa);
477 		pt_entry_t ** const pte_p =
478 		    &stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
479 #ifdef MULTIPROCESSOR
480 		pt_entry_t *opte = atomic_cas_ptr(pte_p, NULL, pte);
481 		/*
482 		 * If another thread allocated the segtab needed for this va
483 		 * free the page we just allocated.
484 		 */
485 		if (__predict_false(opte != NULL)) {
486 #ifdef PMAP_PTP_CACHE
487 			mutex_spin_enter(&pmap_segtab_lock);
488 			LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist,
489 			    pg, pageq.list);
490 			mutex_spin_exit(&pmap_segtab_lock);
491 #else
492 			PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
493 			uvm_pagefree(pg);
494 #endif
495 			pte = opte;
496 		}
497 #else
498 		*pte_p = pte;
499 #endif
500 		KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]);
501 
502 #ifdef DEBUG
503 		for (size_t i = 0; i < NPTEPG; i++) {
504 			if (!pte_zero_p(pte[i]))
505 				panic("%s: new segmap %p not empty @ %zu",
506 				    __func__, pte, i);
507 		}
508 #endif
509 		pte += (va >> PGSHIFT) & (NPTEPG - 1);
510 	}
511 
512 	return pte;
513 }
514