xref: /netbsd-src/sys/arch/mips/mips/pmap_machdep.c (revision 196ee94dc2caba7b7142fe526608756e9504ee2b)
1 /*	$NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 /* XXX simonb 2002/02/26
99  *
100  * MIPS3_PLUS is used to conditionally compile the r4k MMU support.
101  * This is bogus - for example, some IDT MIPS-II CPUs have r4k style
102  * MMUs (and 32-bit ones at that).
103  *
104  * On the other hand, it's not likely that we'll ever support the R6000
105  * (is it?), so maybe that can be an "if MIPS2 or greater" check.
106  *
107  * Also along these lines are using totally separate functions for
108  * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU
109  * checks in the current functions.
110  *
111  * These warnings probably applies to other files under sys/arch/mips.
112  */
113 
114 #include "opt_cputype.h"
115 #include "opt_mips_cache.h"
116 #include "opt_multiprocessor.h"
117 #include "opt_sysv.h"
118 
119 #define __MUTEX_PRIVATE
120 #define __PMAP_PRIVATE
121 
122 #include <sys/param.h>
123 #include <sys/atomic.h>
124 #include <sys/buf.h>
125 #include <sys/cpu.h>
126 #include <sys/kernel.h>
127 #include <sys/mutex.h>
128 #include <sys/pool.h>
129 #include <sys/proc.h>
130 #include <sys/systm.h>
131 #ifdef SYSVSHM
132 #include <sys/shm.h>
133 #endif
134 
135 #include <uvm/uvm.h>
136 #include <uvm/uvm_physseg.h>
137 
138 #include <mips/cache.h>
139 #include <mips/cpuregs.h>
140 #include <mips/locore.h>
141 #include <mips/pte.h>
142 
143 CTASSERT(MIPS_KSEG0_START < 0);
144 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0);
145 CTASSERT(MIPS_KSEG1_START < 0);
146 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
147 CTASSERT(MIPS_KSEG2_START < 0);
148 CTASSERT(MIPS_MAX_MEM_ADDR < 0);
149 CTASSERT(MIPS_RESERVED_ADDR < 0);
150 CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
151 CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
152 CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
153 CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
154 CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
155 CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
156 CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
157 #ifdef _LP64
158 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBXSEG == 0);
159 #else
160 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBSEG == 0);
161 #endif
162 
163 //PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
164 PMAP_COUNTER(zeroed_pages, "pages zeroed");
165 PMAP_COUNTER(copied_pages, "pages copied");
166 extern struct evcnt pmap_evcnt_page_cache_evictions;
167 
168 u_int pmap_page_cache_alias_mask;
169 
170 #define pmap_md_cache_indexof(x)	(((vaddr_t)(x)) & pmap_page_cache_alias_mask)
171 
172 static register_t
pmap_md_map_ephemeral_page(struct vm_page_md * mdpg,bool locked_p,int prot,pt_entry_t * old_pte_p)173 pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot,
174     pt_entry_t *old_pte_p)
175 {
176 	KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
177 
178 	struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
179 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
180 	pv_entry_t pv = &mdpg->mdpg_first;
181 	register_t va = 0;
182 
183 	UVMHIST_FUNC(__func__);
184 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, prot=%d, ptep=%#jx)",
185 	    (uintptr_t)pg, prot, (uintptr_t)old_pte_p, 0);
186 
187 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
188 
189 	if (!MIPS_CACHE_VIRTUAL_ALIAS || !mips_cache_badalias(pv->pv_va, pa)) {
190 #ifdef _LP64
191 		va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
192 #else
193 		if (pa < MIPS_PHYS_MASK) {
194 			va = MIPS_PHYS_TO_KSEG0(pa);
195 		}
196 #endif
197 	}
198 	if (va == 0) {
199 		/*
200 		 * Make sure to use a congruent mapping to the last mapped
201 		 * address so we don't have to worry about virtual aliases.
202 		 */
203 		kpreempt_disable(); // paired with the one in unmap
204 		struct cpu_info * const ci = curcpu();
205 		if (MIPS_CACHE_VIRTUAL_ALIAS) {
206 			KASSERT(ci->ci_pmap_dstbase != 0);
207 			KASSERT(ci->ci_pmap_srcbase != 0);
208 
209 			const u_int __diagused mask = pmap_page_cache_alias_mask;
210 			KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0,
211 			    "%#"PRIxVADDR, ci->ci_pmap_dstbase);
212 			KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0,
213 			    "%#"PRIxVADDR, ci->ci_pmap_srcbase);
214 		}
215 		vaddr_t nva = (prot & VM_PROT_WRITE
216 			? ci->ci_pmap_dstbase
217 			: ci->ci_pmap_srcbase)
218 		    + pmap_md_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS
219 			? pv->pv_va
220 			: pa);
221 
222 		va = (intptr_t)nva;
223 		/*
224 		 * Now to make and write the new PTE to map the PA.
225 		 */
226 		const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0);
227 		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), va);
228 		*old_pte_p = *ptep;		// save
229 		bool rv __diagused;
230 		*ptep = npte;			// update page table
231 
232 		// update the TLB directly making sure we force the new entry
233 		// into it.
234 		rv = tlb_update_addr(va, KERNEL_PID, npte, true);
235 		KASSERTMSG(rv == 1, "va %#"PRIxREGISTER" pte=%#"PRIxPTE" rv=%d",
236 		    va, pte_value(npte), rv);
237 	}
238 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
239 		/*
240 		 * If we are forced to use an incompatible alias, flush the
241 		 * page from the cache so we will copy the correct contents.
242 		 */
243 		if (!locked_p)
244 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
245 		if (VM_PAGEMD_CACHED_P(mdpg)
246 		    && mips_cache_badalias(pv->pv_va, va)) {
247 			register_t ova = (intptr_t)trunc_page(pv->pv_va);
248 			mips_dcache_wbinv_range_index(ova, PAGE_SIZE);
249 			/*
250 			 * If there is no active mapping, remember this new one.
251 			 */
252 			if (pv->pv_pmap == NULL)
253 				pv->pv_va = va;
254 		}
255 		if (!locked_p)
256 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
257 	}
258 
259 	UVMHIST_LOG(pmaphist, " <-- done (va=%#lx)", va, 0, 0, 0);
260 
261 	return va;
262 }
263 
264 static void
pmap_md_unmap_ephemeral_page(struct vm_page_md * mdpg,bool locked_p,register_t va,pt_entry_t old_pte)265 pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, bool locked_p,
266     register_t va, pt_entry_t old_pte)
267 {
268 	KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
269 
270 	pv_entry_t pv = &mdpg->mdpg_first;
271 
272 	UVMHIST_FUNC(__func__);
273 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, va=%#lx, pte=%#"PRIxPTE")",
274 	    (uintptr_t)VM_MD_TO_PAGE(mdpg), va, pte_value(old_pte), 0);
275 
276 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
277 
278 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
279 		if (!locked_p)
280 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
281 		/*
282 		 * If this page was previously uncached or we had to use an
283 		 * incompatible alias, flush it from the cache.
284 		 */
285 		if (VM_PAGEMD_UNCACHED_P(mdpg)
286 		    || (pv->pv_pmap != NULL
287 			&& mips_cache_badalias(pv->pv_va, va))) {
288 			mips_dcache_wbinv_range(va, PAGE_SIZE);
289 		}
290 		if (!locked_p)
291 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
292 	}
293 	/*
294 	 * If we had to map using a page table entry, restore it now.
295 	 */
296 	if (!pmap_md_direct_mapped_vaddr_p(va)) {
297 		*pmap_pte_lookup(pmap_kernel(), va) = old_pte;
298 		if (pte_valid_p(old_pte)) {
299 			// Update the TLB with the old mapping.
300 			tlb_update_addr(va, KERNEL_PID, old_pte, 0);
301 		} else {
302 			// Invalidate TLB entry if the old pte wasn't valid.
303 			tlb_invalidate_addr(va, KERNEL_PID);
304 		}
305 		kpreempt_enable();	// Restore preemption
306 	}
307 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
308 }
309 
310 static void
pmap_md_vca_page_wbinv(struct vm_page_md * mdpg,bool locked_p)311 pmap_md_vca_page_wbinv(struct vm_page_md *mdpg, bool locked_p)
312 {
313 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
314 	pt_entry_t pte;
315 
316 	const register_t va = pmap_md_map_ephemeral_page(mdpg, locked_p,
317 	    VM_PROT_READ, &pte);
318 
319 	mips_dcache_wbinv_range(va, PAGE_SIZE);
320 
321 	pmap_md_unmap_ephemeral_page(mdpg, locked_p, va, pte);
322 }
323 
324 bool
pmap_md_ok_to_steal_p(const uvm_physseg_t bank,size_t npgs)325 pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
326 {
327 #ifndef _LP64
328 	if (uvm_physseg_get_avail_start(bank) + npgs >= atop(MIPS_PHYS_MASK + 1)) {
329 		aprint_debug("%s: seg not enough in KSEG0 for %zu pages\n",
330 		    __func__, npgs);
331 		return false;
332 	}
333 #endif
334 	return true;
335 }
336 
337 /*
338  *	Bootstrap the system enough to run with virtual memory.
339  */
340 void
pmap_bootstrap(void)341 pmap_bootstrap(void)
342 {
343 	vsize_t bufsz;
344 	size_t sysmap_size;
345 	pt_entry_t *sysmap;
346 
347 	if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors) {
348 		pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
349 		pmap_page_cache_alias_mask = uimax(
350 		    mips_cache_info.mci_cache_alias_mask,
351 		    mips_cache_info.mci_icache_alias_mask);
352 	}
353 
354 #ifdef MULTIPROCESSOR
355 	pmap_t pm = pmap_kernel();
356 	kcpuset_create(&pm->pm_onproc, true);
357 	kcpuset_create(&pm->pm_active, true);
358 	KASSERT(pm->pm_onproc != NULL);
359 	KASSERT(pm->pm_active != NULL);
360 	kcpuset_set(pm->pm_onproc, cpu_number());
361 	kcpuset_set(pm->pm_active, cpu_number());
362 #endif
363 
364 	pmap_bootstrap_common();
365 
366 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
367 
368 	/*
369 	 * Compute the number of pages kmem_arena will have.
370 	 */
371 	kmeminit_nkmempages();
372 
373 	/*
374 	 * Figure out how many PTE's are necessary to map the kernel.
375 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
376 	 */
377 
378 	/* Get size of buffer cache and set an upper limit */
379 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
380 	bufsz = buf_memcalc();
381 	buf_setvalimit(bufsz);
382 
383 	sysmap_size = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
384 	    bufsz + 16 * NCARGS + pager_map_size) / NBPG +
385 	    (maxproc * UPAGES) + nkmempages;
386 
387 #ifdef SYSVSHM
388 	sysmap_size += shminfo.shmall;
389 #endif
390 #ifdef KSEG2IOBUFSIZE
391 	sysmap_size += (KSEG2IOBUFSIZE >> PGSHIFT);
392 #endif
393 #ifdef _LP64
394 	/*
395 	 * If we are using tmpfs, then we might want to use a great deal of
396 	 * our memory with it.  Make sure we have enough VM to do that.
397 	 */
398 	sysmap_size += physmem;
399 #else
400 	/* XXX: else runs out of space on 256MB sbmips!! */
401 	sysmap_size += 20000;
402 #endif
403 	/* Roundup to a even number of pte page tables */
404 	sysmap_size = (sysmap_size + NPTEPG - 1) & -NPTEPG;
405 
406 	/*
407 	 * Initialize `FYI' variables.	Note we're relying on
408 	 * the fact that BSEARCH sorts the vm_physmem[] array
409 	 * for us.  Must do this before uvm_pageboot_alloc()
410 	 * can be called.
411 	 */
412 	pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
413 	pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
414 	pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;
415 
416 #ifndef _LP64
417 	if (pmap_limits.virtual_end > VM_MAX_KERNEL_ADDRESS
418 	    || pmap_limits.virtual_end < VM_MIN_KERNEL_ADDRESS) {
419 		printf("%s: changing last kernel VA from %#"PRIxVADDR
420 		    " to %#"PRIxVADDR"\n", __func__,
421 		    pmap_limits.virtual_end, VM_MAX_KERNEL_ADDRESS);
422 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
423 		sysmap_size =
424 		    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
425 	}
426 #endif
427 	pmap_pvlist_lock_init(mips_cache_info.mci_pdcache_line_size);
428 
429 	/*
430 	 * Now actually allocate the kernel PTE array (must be done
431 	 * after pmap_limits.virtual_end is initialized).
432 	 */
433 	sysmap = (pt_entry_t *)
434 	    uvm_pageboot_alloc(sizeof(pt_entry_t) * sysmap_size);
435 
436 	vaddr_t va = VM_MIN_KERNEL_ADDRESS;
437 #ifdef _LP64
438 	/*
439 	 * Do we need more than one XSEG's worth virtual address space?
440 	 * If so, we have to allocate the additional pmap_segtab_t's for them
441 	 * and insert them into the kernel's top level segtab.
442 	 */
443 	const size_t xsegs = (sysmap_size * NBPG + NBXSEG - 1) / NBXSEG;
444 	if (xsegs > 1) {
445 		printf("%s: %zu xsegs required for %zu pages\n",
446 		    __func__, xsegs, sysmap_size);
447 		pmap_segtab_t *stb = (pmap_segtab_t *)
448 		    uvm_pageboot_alloc(sizeof(pmap_segtab_t) * (xsegs - 1));
449 		for (size_t i = 1; i <= xsegs; i++, stb++) {
450 			pmap_kern_segtab.seg_seg[i] = stb;
451 		}
452 	}
453 	pmap_segtab_t ** const xstb = pmap_kern_segtab.seg_seg;
454 #else
455 	const size_t xsegs = 1;
456 	pmap_segtab_t * const stb = &pmap_kern_segtab;
457 #endif
458 	KASSERT(curcpu()->ci_pmap_kern_segtab == &pmap_kern_segtab);
459 
460 	for (size_t k = 0, i = 0; k < xsegs; k++) {
461 #ifdef _LP64
462 		pmap_segtab_t * const stb =
463 		    xstb[(va >> XSEGSHIFT) & (NSEGPG - 1)];
464 #endif
465 		bool done = false;
466 
467 		for (size_t j = (va >> SEGSHIFT) & (NSEGPG - 1);
468 		     !done && i < sysmap_size;
469 		     i += NPTEPG, j++, va += NBSEG) {
470 			/*
471 			 * Now set the page table pointer...
472 			 */
473 			stb->seg_ppg[j] = (pmap_ptpage_t *)&sysmap[i];
474 #ifdef _LP64
475 			/*
476 			 * If we are at end of this XSEG, terminate the loop
477 			 * so we advance to the next one.
478 			 */
479 			done = (j + 1 == NSEGPG);
480 #endif
481 		}
482 	}
483 	KASSERT(pmap_pte_lookup(pmap_kernel(), VM_MIN_KERNEL_ADDRESS) == sysmap);
484 
485 	/* update the top of the kernel VM - pmap_growkernel not required */
486 	pmap_curmaxkvaddr = pmap_limits.virtual_end;
487 	/*
488 	 * Initialize the pools.
489 	 */
490 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
491 	    &pool_allocator_nointr, IPL_NONE);
492 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
493 	    &pmap_pv_page_allocator, IPL_NONE);
494 
495 	tlb_set_asid(KERNEL_PID, pmap_kernel());
496 
497 #ifdef MIPS3_PLUS	/* XXX mmu XXX */
498 	/*
499 	 * The R4?00 stores only one copy of the Global bit in the
500 	 * translation lookaside buffer for each 2 page entry.
501 	 * Thus invalid entries must have the Global bit set so
502 	 * when Entry LO and Entry HI G bits are anded together
503 	 * they will produce a global bit to store in the tlb.
504 	 */
505 	if (MIPS_HAS_R4K_MMU) {
506 		while (sysmap_size-- > 0) {
507 			*sysmap++ = MIPS3_PG_G;
508 		}
509 	}
510 #endif	/* MIPS3_PLUS */
511 }
512 
513 void
pmap_md_alloc_ephemeral_address_space(struct cpu_info * ci)514 pmap_md_alloc_ephemeral_address_space(struct cpu_info *ci)
515 {
516 	struct mips_cache_info * const mci = &mips_cache_info;
517 
518 	/*
519 	 * If we have more memory than can be mapped by KSEG0, we need to
520 	 * allocate enough VA so we can map pages with the right color
521 	 * (to avoid cache alias problems).
522 	 */
523 	if (false
524 #ifndef _LP64
525 	    || pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START
526 #endif
527 	    || MIPS_CACHE_VIRTUAL_ALIAS
528 	    || MIPS_ICACHE_VIRTUAL_ALIAS) {
529 		vsize_t size = uimax(mci->mci_pdcache_way_size, mci->mci_picache_way_size);
530 		const u_int __diagused mask = pmap_page_cache_alias_mask;
531 
532 		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, size, size,
533 		    UVM_KMF_VAONLY);
534 
535 		KASSERT(ci->ci_pmap_dstbase);
536 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_dstbase));
537 		KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0, "%#"PRIxVADDR,
538 		    ci->ci_pmap_dstbase);
539 
540 		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map, size, size,
541 		    UVM_KMF_VAONLY);
542 		KASSERT(ci->ci_pmap_srcbase);
543 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_srcbase));
544 		KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0, "%#"PRIxVADDR,
545 		    ci->ci_pmap_srcbase);
546 	}
547 }
548 
549 void
pmap_md_init(void)550 pmap_md_init(void)
551 {
552 	pmap_md_alloc_ephemeral_address_space(curcpu());
553 
554 #if defined(MIPS3) && 0
555 	if (MIPS_HAS_R4K_MMU) {
556 		/*
557 		 * XXX
558 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
559 		 * on MIPS3 CPUs to avoid possible virtual cache aliases
560 		 * and uncached mappings in pmap_enter_pv().
561 		 *
562 		 * Ideally, read only shared mapping won't cause aliases
563 		 * so pmap_enter_pv() should handle any shared read only
564 		 * mappings without uncached ops like ARM pmap.
565 		 *
566 		 * On the other hand, R4000 and R4400 have the virtual
567 		 * coherency exceptions which will happen even on read only
568 		 * mappings, so we always have to disable sosend_loan()
569 		 * on such CPUs.
570 		 */
571 		sock_loan_thresh = -1;
572 	}
573 #endif
574 }
575 
576 /*
577  * XXXJRT -- need a version for each cache type.
578  */
579 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)580 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
581 {
582 	if (MIPS_HAS_R4K_MMU) {
583 		/*
584 		 * XXX
585 		 * shouldn't need to do this for physical d$?
586 		 * should need to do this for virtual i$ if prot == EXEC?
587 		 */
588 		if (p == curlwp->l_proc
589 		    && mips_cache_info.mci_pdcache_way_mask < PAGE_SIZE)
590 		    /* XXX check icache mask too? */
591 			mips_icache_sync_range((intptr_t)va, len);
592 		else
593 			mips_icache_sync_range_index((intptr_t)va, len);
594 	} else {
595 		pmap_t pmap = p->p_vmspace->vm_map.pmap;
596 		kpreempt_disable();
597 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
598 		pt_entry_t entry = (ptep != NULL ? *ptep : 0);
599 		kpreempt_enable();
600 		if (!pte_valid_p(entry))
601 			return;
602 
603 		mips_icache_sync_range(
604 		    MIPS_PHYS_TO_KSEG0(pte_to_paddr(entry) + (va & PGOFSET)),
605 		    len);
606 	}
607 }
608 
609 /*
610  *	pmap_zero_page zeros the specified page.
611  */
612 void
pmap_zero_page(paddr_t dst_pa)613 pmap_zero_page(paddr_t dst_pa)
614 {
615 	pt_entry_t dst_pte;
616 
617 	UVMHIST_FUNC(__func__);
618 	UVMHIST_CALLARGS(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
619 	PMAP_COUNT(zeroed_pages);
620 
621 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
622 	struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
623 
624 	KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
625 
626 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
627 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
628 
629 	mips_pagezero(dst_va);
630 
631 	pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
632 
633 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
634 }
635 
636 /*
637  *	pmap_copy_page copies the specified page.
638  */
639 void
pmap_copy_page(paddr_t src_pa,paddr_t dst_pa)640 pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
641 {
642 	pt_entry_t src_pte, dst_pte;
643 
644 	UVMHIST_FUNC(__func__);
645 	UVMHIST_CALLARGS(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa,
646 	    0, 0);
647 	PMAP_COUNT(copied_pages);
648 
649 	struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
650 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
651 
652 	struct vm_page_md * const src_mdpg = VM_PAGE_TO_MD(src_pg);
653 	struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
654 
655 	const register_t src_va = pmap_md_map_ephemeral_page(src_mdpg, false,
656 	    VM_PROT_READ, &src_pte);
657 
658 	KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(dst_mdpg));
659 	KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
660 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
661 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
662 
663 	mips_pagecopy(dst_va, src_va);
664 
665 	pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
666 	pmap_md_unmap_ephemeral_page(src_mdpg, false, src_va, src_pte);
667 
668 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
669 }
670 
671 void
pmap_md_page_syncicache(struct vm_page_md * mdpg,const kcpuset_t * onproc)672 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
673 {
674 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
675 	struct mips_options * const opts = &mips_options;
676 	if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
677 		return;
678 
679 	/*
680 	 * If onproc is empty, we could do a
681 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
682 	 * mappings of the page and clear its execness.  Then
683 	 * the next time page is faulted, it will get icache
684 	 * synched.  But this is easier. :)
685 	 */
686 	if (MIPS_HAS_R4K_MMU) {
687 		if (VM_PAGEMD_CACHED_P(mdpg)) {
688 			/* This was probably mapped cached by UBC so flush it */
689 			pt_entry_t pte;
690 			const register_t tva = pmap_md_map_ephemeral_page(mdpg,
691 			    false, VM_PROT_READ, &pte);
692 
693 			UVMHIST_LOG(pmaphist, "  va %#"PRIxVADDR, tva, 0, 0, 0);
694 			mips_dcache_wbinv_range(tva, PAGE_SIZE);
695 			mips_icache_sync_range(tva, PAGE_SIZE);
696 
697 			pmap_md_unmap_ephemeral_page(mdpg, false, tva, pte);
698 		}
699 	} else {
700 		KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
701 
702 		struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
703 		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
704 		    PAGE_SIZE);
705 	}
706 #ifdef MULTIPROCESSOR
707 	pv_entry_t pv = &mdpg->mdpg_first;
708 	const register_t va = (intptr_t)trunc_page(pv->pv_va);
709 	pmap_tlb_syncicache(va, onproc);
710 #endif
711 }
712 
713 struct vm_page *
pmap_md_alloc_poolpage(int flags)714 pmap_md_alloc_poolpage(int flags)
715 {
716 	/*
717 	 * The VM_FREELIST used for pool pages is only set on 32bit
718 	 * kernels.  This is to make sure that we only allocate pages
719 	 * that can be mapped via KSEG0.  On 64bit kernels, all memory
720 	 * can be mapped via XKPHYS so just use the default freelist.
721 	 */
722 	if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
723 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
724 		    UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
725 
726 	return uvm_pagealloc(NULL, 0, NULL, flags);
727 }
728 
729 vaddr_t
pmap_md_map_poolpage(paddr_t pa,size_t len)730 pmap_md_map_poolpage(paddr_t pa, size_t len)
731 {
732 
733 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
734 	vaddr_t va = pmap_md_pool_phystov(pa);
735 	KASSERT(cold || pg != NULL);
736 	if (pg != NULL) {
737 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
738 		pv_entry_t pv = &mdpg->mdpg_first;
739 		vaddr_t last_va = trunc_page(pv->pv_va);
740 
741 		KASSERT(len == PAGE_SIZE || last_va == pa);
742 		KASSERT(pv->pv_pmap == NULL);
743 		KASSERT(pv->pv_next == NULL);
744 		KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
745 
746 		/*
747 		 * If this page was last mapped with an address that
748 		 * might cause aliases, flush the page from the cache.
749 		 */
750 		if (MIPS_CACHE_VIRTUAL_ALIAS
751 		    && mips_cache_badalias(last_va, va)) {
752 			pmap_md_vca_page_wbinv(mdpg, false);
753 		}
754 
755 		pv->pv_va = va;
756 	}
757 	return va;
758 }
759 
760 paddr_t
pmap_md_unmap_poolpage(vaddr_t va,size_t len)761 pmap_md_unmap_poolpage(vaddr_t va, size_t len)
762 {
763 	KASSERT(len == PAGE_SIZE);
764 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
765 
766 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
767 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
768 
769 	KASSERT(pg);
770 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
771 
772 	KASSERT(VM_PAGEMD_CACHED_P(mdpg));
773 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
774 
775 	pv_entry_t pv = &mdpg->mdpg_first;
776 
777 	/* Note last mapped address for future color check */
778 	pv->pv_va = va;
779 
780 	KASSERT(pv->pv_pmap == NULL);
781 	KASSERT(pv->pv_next == NULL);
782 
783 	return pa;
784 }
785 
786 bool
pmap_md_direct_mapped_vaddr_p(register_t va)787 pmap_md_direct_mapped_vaddr_p(register_t va)
788 {
789 #ifndef __mips_o32
790 	if (MIPS_XKPHYS_P(va))
791 		return true;
792 #endif
793 	return MIPS_KSEG0_P(va);
794 }
795 
796 paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(register_t va)797 pmap_md_direct_mapped_vaddr_to_paddr(register_t va)
798 {
799 	if (MIPS_KSEG0_P(va)) {
800 		return MIPS_KSEG0_TO_PHYS(va);
801 	}
802 #ifndef __mips_o32
803 	if (MIPS_XKPHYS_P(va)) {
804 		return MIPS_XKPHYS_TO_PHYS(va);
805 	}
806 #endif
807 	panic("%s: va %#"PRIxREGISTER" not direct mapped!", __func__, va);
808 }
809 
810 bool
pmap_md_io_vaddr_p(vaddr_t va)811 pmap_md_io_vaddr_p(vaddr_t va)
812 {
813 #ifdef _LP64
814 	if (MIPS_XKPHYS_P(va)) {
815 		return MIPS_XKPHYS_TO_CCA(va) == CCA_UNCACHED;
816 	}
817 #endif
818 	return MIPS_KSEG1_P(va);
819 }
820 
821 void
pmap_md_icache_sync_range_index(vaddr_t va,vsize_t len)822 pmap_md_icache_sync_range_index(vaddr_t va, vsize_t len)
823 {
824 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
825 	mips_icache_sync_range_index(va, len);
826 }
827 
828 void
pmap_md_icache_sync_all(void)829 pmap_md_icache_sync_all(void)
830 {
831 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
832 	mips_icache_sync_all();
833 }
834 
835 #ifdef MULTIPROCESSOR
836 void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)837 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
838 {
839 	if (ci->ci_index != 0)
840 		return;
841 	const u_int icache_way_pages =
842 	    mips_cache_info.mci_picache_way_size >> PGSHIFT;
843 
844 	KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask));
845 	pmap_tlb_synci_page_mask = icache_way_pages - 1;
846 	pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
847 	printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
848 	    pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask, icache_way_pages);
849 }
850 #endif
851 
852 
853 bool
pmap_md_tlb_check_entry(void * ctx,vaddr_t va,tlb_asid_t asid,pt_entry_t pte)854 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
855 {
856 	pmap_t pm = ctx;
857 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
858 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
859 
860 	if (asid != pai->pai_asid)
861 		return true;
862 	if (!pte_valid_p(pte)) {
863 		KASSERT(MIPS_HAS_R4K_MMU);
864 		KASSERTMSG(pte == MIPS3_PG_G, "va %#"PRIxVADDR" pte %#"PRIxPTE,
865 		    va, pte_value(pte));
866 		return true;
867 	}
868 
869 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
870 	KASSERTMSG(ptep != NULL, "va %#"PRIxVADDR" asid %u pte %#"PRIxPTE,
871 	    va, asid, pte_value(pte));
872 	const pt_entry_t opte = *ptep;
873 	pt_entry_t xpte = opte;
874 	if (MIPS_HAS_R4K_MMU) {
875 		xpte &= ~(MIPS3_PG_WIRED|MIPS3_PG_RO);
876 	} else {
877 		xpte &= ~(MIPS1_PG_WIRED|MIPS1_PG_RO);
878 	}
879 
880         KASSERTMSG(pte == xpte,
881             "pmap=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#"PRIxPTE
882 	    ") != real pte (%#"PRIxPTE"/%#"PRIxPTE") @ %p",
883             pm, va, asid, pte_value(pte), pte_value(xpte), pte_value(opte),
884 	    ptep);
885 
886         return true;
887 }
888 
889 void
tlb_walk(void * ctx,tlb_walkfunc_t func)890 tlb_walk(void *ctx, tlb_walkfunc_t func)
891 {
892 	kpreempt_disable();
893 	for (size_t i = 0; i < mips_options.mips_num_tlb_entries; i++) {
894 		struct tlbmask tlbmask;
895 		tlb_asid_t asid;
896 		vaddr_t va;
897 		tlb_read_entry(i, &tlbmask);
898 		if (MIPS_HAS_R4K_MMU) {
899 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS3_PG_ASID);
900 			va = tlbmask.tlb_hi & MIPS3_PG_HVPN;
901 		} else {
902 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS1_TLB_PID);
903 			va = tlbmask.tlb_hi & MIPS1_PG_FRAME;
904 		}
905 		if ((pt_entry_t)tlbmask.tlb_lo0 != 0) {
906 			pt_entry_t pte = tlbmask.tlb_lo0;
907 			tlb_asid_t asid0 = (pte_global_p(pte) ? KERNEL_PID : asid);
908 			if (!(*func)(ctx, va, asid0, pte))
909 				break;
910 		}
911 #if (PGSHIFT & 1) == 0
912 		if (MIPS_HAS_R4K_MMU && (pt_entry_t)tlbmask.tlb_lo1 != 0) {
913 			pt_entry_t pte = tlbmask.tlb_lo1;
914 			tlb_asid_t asid1 = (pte_global_p(pte) ? KERNEL_PID : asid);
915 			if (!(*func)(ctx, va + MIPS3_PG_ODDPG, asid1, pte))
916 				break;
917 		}
918 #endif
919 	}
920 	kpreempt_enable();
921 }
922 
923 bool
pmap_md_vca_add(struct vm_page_md * mdpg,vaddr_t va,pt_entry_t * ptep)924 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *ptep)
925 {
926 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
927 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
928 		return false;
929 
930 	/*
931 	 * There is at least one other VA mapping this page.
932 	 * Check if they are cache index compatible.
933 	 */
934 
935 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
936 	pv_entry_t pv = &mdpg->mdpg_first;
937 #if defined(PMAP_NO_PV_UNCACHED)
938 	/*
939 	 * Instead of mapping uncached, which some platforms
940 	 * cannot support, remove incompatible mappings from others pmaps.
941 	 * When this address is touched again, the uvm will
942 	 * fault it in.  Because of this, each page will only
943 	 * be mapped with one index at any given time.
944 	 *
945 	 * We need to deal with all entries on the list - if the first is
946 	 * incompatible with the new mapping then they all will be.
947 	 */
948 	if (__predict_true(!mips_cache_badalias(pv->pv_va, va))) {
949 		return false;
950 	}
951 	KASSERT(pv->pv_pmap != NULL);
952 	bool ret = false;
953 	for (pv_entry_t npv = pv; npv && npv->pv_pmap;) {
954 		if (PV_ISKENTER_P(npv)) {
955 			npv = npv->pv_next;
956 			continue;
957 		}
958 		ret = true;
959 		vaddr_t nva = trunc_page(npv->pv_va);
960 		pmap_t npm = npv->pv_pmap;
961 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
962 		pmap_remove(npm, nva, nva + PAGE_SIZE);
963 
964 		/*
965 		 * pmap_update is not required here as we're the pmap
966 		 * and we know that the invalidation happened or the
967 		 * asid has been released (and activation is deferred)
968 		 *
969 		 * A deferred activation should NOT occur here.
970 		 */
971 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg);
972 
973 		npv = pv;
974 	}
975 	KASSERT(ret == true);
976 
977 	return ret;
978 #else	/* !PMAP_NO_PV_UNCACHED */
979 	if (VM_PAGEMD_CACHED_P(mdpg)) {
980 		/*
981 		 * If this page is cached, then all mappings
982 		 * have the same cache alias so we only need
983 		 * to check the first page to see if it's
984 		 * incompatible with the new mapping.
985 		 *
986 		 * If the mappings are incompatible, map this
987 		 * page as uncached and re-map all the current
988 		 * mapping as uncached until all pages can
989 		 * share the same cache index again.
990 		 */
991 		if (mips_cache_badalias(pv->pv_va, va)) {
992 			pmap_page_cache(mdpg, false);
993 			pmap_md_vca_page_wbinv(mdpg, true);
994 			*ptep = pte_cached_change(*ptep, false);
995 			PMAP_COUNT(page_cache_evictions);
996 		}
997 	} else {
998 		*ptep = pte_cached_change(*ptep, false);
999 		PMAP_COUNT(page_cache_evictions);
1000 	}
1001 	return false;
1002 #endif	/* !PMAP_NO_PV_UNCACHED */
1003 }
1004 
1005 void
pmap_md_vca_clean(struct vm_page_md * mdpg,int op)1006 pmap_md_vca_clean(struct vm_page_md *mdpg, int op)
1007 {
1008 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1009 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
1010 		return;
1011 
1012 	UVMHIST_LOG(pmaphist, "(mdpg=%#jx, op=%d)", (uintptr_t)mdpg, op, 0, 0);
1013 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1014 
1015 	if (op == PMAP_WB || op == PMAP_WBINV) {
1016 		pmap_md_vca_page_wbinv(mdpg, true);
1017 	} else if (op == PMAP_INV) {
1018 		KASSERT(op == PMAP_INV && false);
1019 		//mips_dcache_inv_range_index(va, PAGE_SIZE);
1020 	}
1021 }
1022 
1023 /*
1024  * In the PMAP_NO_PV_CACHED case, all conflicts are resolved at mapping
1025  * so nothing needs to be done in removal.
1026  */
1027 void
pmap_md_vca_remove(struct vm_page * pg,vaddr_t va,bool dirty,bool last)1028 pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty, bool last)
1029 {
1030 #if !defined(PMAP_NO_PV_UNCACHED)
1031 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1032 	if (!MIPS_HAS_R4K_MMU
1033 	    || !MIPS_CACHE_VIRTUAL_ALIAS
1034 	    || !VM_PAGEMD_UNCACHED_P(mdpg))
1035 		return;
1036 
1037 	KASSERT(kpreempt_disabled());
1038 	KASSERT((va & PAGE_MASK) == 0);
1039 
1040 	/*
1041 	 * Page is currently uncached, check if alias mapping has been
1042 	 * removed.  If it was, then reenable caching.
1043 	 */
1044 	(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
1045 	pv_entry_t pv = &mdpg->mdpg_first;
1046 	pv_entry_t pv0 = pv->pv_next;
1047 
1048 	for (; pv0; pv0 = pv0->pv_next) {
1049 		if (mips_cache_badalias(pv->pv_va, pv0->pv_va))
1050 			break;
1051 	}
1052 	if (pv0 == NULL)
1053 		pmap_page_cache(mdpg, true);
1054 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1055 #endif
1056 }
1057 
1058 paddr_t
pmap_md_pool_vtophys(vaddr_t va)1059 pmap_md_pool_vtophys(vaddr_t va)
1060 {
1061 #ifdef _LP64
1062 	if (MIPS_XKPHYS_P(va))
1063 		return MIPS_XKPHYS_TO_PHYS(va);
1064 #endif
1065 	KASSERT(MIPS_KSEG0_P(va));
1066 	return MIPS_KSEG0_TO_PHYS(va);
1067 }
1068 
1069 vaddr_t
pmap_md_pool_phystov(paddr_t pa)1070 pmap_md_pool_phystov(paddr_t pa)
1071 {
1072 #ifdef _LP64
1073 	KASSERT(mips_options.mips3_xkphys_cached);
1074 	return MIPS_PHYS_TO_XKPHYS_CACHED(pa);
1075 #else
1076 	KASSERT((pa & ~MIPS_PHYS_MASK) == 0);
1077 	return MIPS_PHYS_TO_KSEG0(pa);
1078 #endif
1079 }
1080