xref: /netbsd-src/sys/arch/mips/mips/pmap_machdep.c (revision 196ee94dc2caba7b7142fe526608756e9504ee2b)
1*196ee94dSskrll /*	$NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $	*/
2d7e78fcfSmatt 
3d7e78fcfSmatt /*-
4d7e78fcfSmatt  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5d7e78fcfSmatt  * All rights reserved.
6d7e78fcfSmatt  *
7d7e78fcfSmatt  * This code is derived from software contributed to The NetBSD Foundation
8d7e78fcfSmatt  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9d7e78fcfSmatt  * NASA Ames Research Center and by Chris G. Demetriou.
10d7e78fcfSmatt  *
11d7e78fcfSmatt  * Redistribution and use in source and binary forms, with or without
12d7e78fcfSmatt  * modification, are permitted provided that the following conditions
13d7e78fcfSmatt  * are met:
14d7e78fcfSmatt  * 1. Redistributions of source code must retain the above copyright
15d7e78fcfSmatt  *    notice, this list of conditions and the following disclaimer.
16d7e78fcfSmatt  * 2. Redistributions in binary form must reproduce the above copyright
17d7e78fcfSmatt  *    notice, this list of conditions and the following disclaimer in the
18d7e78fcfSmatt  *    documentation and/or other materials provided with the distribution.
19d7e78fcfSmatt  *
20d7e78fcfSmatt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21d7e78fcfSmatt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22d7e78fcfSmatt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23d7e78fcfSmatt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24d7e78fcfSmatt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25d7e78fcfSmatt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26d7e78fcfSmatt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27d7e78fcfSmatt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28d7e78fcfSmatt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29d7e78fcfSmatt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30d7e78fcfSmatt  * POSSIBILITY OF SUCH DAMAGE.
31d7e78fcfSmatt  */
32d7e78fcfSmatt 
33d7e78fcfSmatt /*
34d7e78fcfSmatt  * Copyright (c) 1992, 1993
35d7e78fcfSmatt  *	The Regents of the University of California.  All rights reserved.
36d7e78fcfSmatt  *
37d7e78fcfSmatt  * This code is derived from software contributed to Berkeley by
38d7e78fcfSmatt  * the Systems Programming Group of the University of Utah Computer
39d7e78fcfSmatt  * Science Department and Ralph Campbell.
40d7e78fcfSmatt  *
41d7e78fcfSmatt  * Redistribution and use in source and binary forms, with or without
42d7e78fcfSmatt  * modification, are permitted provided that the following conditions
43d7e78fcfSmatt  * are met:
44d7e78fcfSmatt  * 1. Redistributions of source code must retain the above copyright
45d7e78fcfSmatt  *    notice, this list of conditions and the following disclaimer.
46d7e78fcfSmatt  * 2. Redistributions in binary form must reproduce the above copyright
47d7e78fcfSmatt  *    notice, this list of conditions and the following disclaimer in the
48d7e78fcfSmatt  *    documentation and/or other materials provided with the distribution.
49d7e78fcfSmatt  * 3. Neither the name of the University nor the names of its contributors
50d7e78fcfSmatt  *    may be used to endorse or promote products derived from this software
51d7e78fcfSmatt  *    without specific prior written permission.
52d7e78fcfSmatt  *
53d7e78fcfSmatt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54d7e78fcfSmatt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55d7e78fcfSmatt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56d7e78fcfSmatt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57d7e78fcfSmatt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58d7e78fcfSmatt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59d7e78fcfSmatt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60d7e78fcfSmatt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61d7e78fcfSmatt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62d7e78fcfSmatt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63d7e78fcfSmatt  * SUCH DAMAGE.
64d7e78fcfSmatt  *
65d7e78fcfSmatt  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66d7e78fcfSmatt  */
67d7e78fcfSmatt 
68d7e78fcfSmatt #include <sys/cdefs.h>
69d7e78fcfSmatt 
70*196ee94dSskrll __KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $");
71d7e78fcfSmatt 
72d7e78fcfSmatt /*
73d7e78fcfSmatt  *	Manages physical address maps.
74d7e78fcfSmatt  *
75d7e78fcfSmatt  *	In addition to hardware address maps, this
76d7e78fcfSmatt  *	module is called upon to provide software-use-only
77d7e78fcfSmatt  *	maps which may or may not be stored in the same
78d7e78fcfSmatt  *	form as hardware maps.  These pseudo-maps are
79d7e78fcfSmatt  *	used to store intermediate results from copy
80d7e78fcfSmatt  *	operations to and from address spaces.
81d7e78fcfSmatt  *
82d7e78fcfSmatt  *	Since the information managed by this module is
83d7e78fcfSmatt  *	also stored by the logical address mapping module,
84d7e78fcfSmatt  *	this module may throw away valid virtual-to-physical
85d7e78fcfSmatt  *	mappings at almost any time.  However, invalidations
86d7e78fcfSmatt  *	of virtual-to-physical mappings must be done as
87d7e78fcfSmatt  *	requested.
88d7e78fcfSmatt  *
89d7e78fcfSmatt  *	In order to cope with hardware architectures which
90d7e78fcfSmatt  *	make virtual-to-physical map invalidates expensive,
91d7e78fcfSmatt  *	this module may delay invalidate or reduced protection
92d7e78fcfSmatt  *	operations until such time as they are actually
93d7e78fcfSmatt  *	necessary.  This module is given full information as
94d7e78fcfSmatt  *	to which processors are currently using which maps,
95d7e78fcfSmatt  *	and to when physical maps must be made correct.
96d7e78fcfSmatt  */
97d7e78fcfSmatt 
98d7e78fcfSmatt /* XXX simonb 2002/02/26
99d7e78fcfSmatt  *
100d7e78fcfSmatt  * MIPS3_PLUS is used to conditionally compile the r4k MMU support.
101d7e78fcfSmatt  * This is bogus - for example, some IDT MIPS-II CPUs have r4k style
102d7e78fcfSmatt  * MMUs (and 32-bit ones at that).
103d7e78fcfSmatt  *
104d7e78fcfSmatt  * On the other hand, it's not likely that we'll ever support the R6000
105d7e78fcfSmatt  * (is it?), so maybe that can be an "if MIPS2 or greater" check.
106d7e78fcfSmatt  *
107d7e78fcfSmatt  * Also along these lines are using totally separate functions for
108d7e78fcfSmatt  * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU
109d7e78fcfSmatt  * checks in the current functions.
110d7e78fcfSmatt  *
111d7e78fcfSmatt  * These warnings probably applies to other files under sys/arch/mips.
112d7e78fcfSmatt  */
113d7e78fcfSmatt 
114d7e78fcfSmatt #include "opt_cputype.h"
115d7e78fcfSmatt #include "opt_mips_cache.h"
116bb312a98Sskrll #include "opt_multiprocessor.h"
117bb312a98Sskrll #include "opt_sysv.h"
118d7e78fcfSmatt 
119d7e78fcfSmatt #define __MUTEX_PRIVATE
120d7e78fcfSmatt #define __PMAP_PRIVATE
121d7e78fcfSmatt 
122d7e78fcfSmatt #include <sys/param.h>
123d7e78fcfSmatt #include <sys/atomic.h>
124d7e78fcfSmatt #include <sys/buf.h>
125d7e78fcfSmatt #include <sys/cpu.h>
126d7e78fcfSmatt #include <sys/kernel.h>
127d7e78fcfSmatt #include <sys/mutex.h>
128d7e78fcfSmatt #include <sys/pool.h>
129d7e78fcfSmatt #include <sys/proc.h>
130d7e78fcfSmatt #include <sys/systm.h>
131d7e78fcfSmatt #ifdef SYSVSHM
132d7e78fcfSmatt #include <sys/shm.h>
133d7e78fcfSmatt #endif
134d7e78fcfSmatt 
135d7e78fcfSmatt #include <uvm/uvm.h>
1363b1622faScherry #include <uvm/uvm_physseg.h>
137d7e78fcfSmatt 
138d7e78fcfSmatt #include <mips/cache.h>
139d7e78fcfSmatt #include <mips/cpuregs.h>
140d7e78fcfSmatt #include <mips/locore.h>
141d7e78fcfSmatt #include <mips/pte.h>
142d7e78fcfSmatt 
143d7e78fcfSmatt CTASSERT(MIPS_KSEG0_START < 0);
144d7e78fcfSmatt CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0);
145d7e78fcfSmatt CTASSERT(MIPS_KSEG1_START < 0);
146d7e78fcfSmatt CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
147d7e78fcfSmatt CTASSERT(MIPS_KSEG2_START < 0);
148d7e78fcfSmatt CTASSERT(MIPS_MAX_MEM_ADDR < 0);
149d7e78fcfSmatt CTASSERT(MIPS_RESERVED_ADDR < 0);
150d7e78fcfSmatt CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
151d7e78fcfSmatt CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
152d7e78fcfSmatt CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
153d7e78fcfSmatt CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
154d7e78fcfSmatt CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
155d7e78fcfSmatt CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
156d7e78fcfSmatt CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
157d7e78fcfSmatt #ifdef _LP64
158d7e78fcfSmatt CTASSERT(VM_MIN_KERNEL_ADDRESS % NBXSEG == 0);
159d7e78fcfSmatt #else
160d7e78fcfSmatt CTASSERT(VM_MIN_KERNEL_ADDRESS % NBSEG == 0);
161d7e78fcfSmatt #endif
162d7e78fcfSmatt 
163d7e78fcfSmatt //PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
164d7e78fcfSmatt PMAP_COUNTER(zeroed_pages, "pages zeroed");
165d7e78fcfSmatt PMAP_COUNTER(copied_pages, "pages copied");
166d7e78fcfSmatt extern struct evcnt pmap_evcnt_page_cache_evictions;
167d7e78fcfSmatt 
168e6f09596Sskrll u_int pmap_page_cache_alias_mask;
169e6f09596Sskrll 
170e6f09596Sskrll #define pmap_md_cache_indexof(x)	(((vaddr_t)(x)) & pmap_page_cache_alias_mask)
171e6f09596Sskrll 
172d7e78fcfSmatt static register_t
pmap_md_map_ephemeral_page(struct vm_page_md * mdpg,bool locked_p,int prot,pt_entry_t * old_pte_p)17331d27c36Sskrll pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot,
174d7e78fcfSmatt     pt_entry_t *old_pte_p)
175d7e78fcfSmatt {
17631d27c36Sskrll 	KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
17731d27c36Sskrll 
17831d27c36Sskrll 	struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
179d7e78fcfSmatt 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
180d7e78fcfSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
181d7e78fcfSmatt 	register_t va = 0;
182d7e78fcfSmatt 
18331d27c36Sskrll 	UVMHIST_FUNC(__func__);
18431d27c36Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, prot=%d, ptep=%#jx)",
18531d27c36Sskrll 	    (uintptr_t)pg, prot, (uintptr_t)old_pte_p, 0);
186d7e78fcfSmatt 
187d7e78fcfSmatt 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
188d7e78fcfSmatt 
189d7e78fcfSmatt 	if (!MIPS_CACHE_VIRTUAL_ALIAS || !mips_cache_badalias(pv->pv_va, pa)) {
1904dc2832eSskrll #ifdef _LP64
191d7e78fcfSmatt 		va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
192d7e78fcfSmatt #else
193d7e78fcfSmatt 		if (pa < MIPS_PHYS_MASK) {
194d7e78fcfSmatt 			va = MIPS_PHYS_TO_KSEG0(pa);
195d7e78fcfSmatt 		}
196d7e78fcfSmatt #endif
197d7e78fcfSmatt 	}
198d7e78fcfSmatt 	if (va == 0) {
199d7e78fcfSmatt 		/*
200d7e78fcfSmatt 		 * Make sure to use a congruent mapping to the last mapped
201d7e78fcfSmatt 		 * address so we don't have to worry about virtual aliases.
202d7e78fcfSmatt 		 */
203d7e78fcfSmatt 		kpreempt_disable(); // paired with the one in unmap
204d7e78fcfSmatt 		struct cpu_info * const ci = curcpu();
205e6f09596Sskrll 		if (MIPS_CACHE_VIRTUAL_ALIAS) {
206d7e78fcfSmatt 			KASSERT(ci->ci_pmap_dstbase != 0);
207e6f09596Sskrll 			KASSERT(ci->ci_pmap_srcbase != 0);
208d7e78fcfSmatt 
209e6f09596Sskrll 			const u_int __diagused mask = pmap_page_cache_alias_mask;
210e6f09596Sskrll 			KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0,
211e6f09596Sskrll 			    "%#"PRIxVADDR, ci->ci_pmap_dstbase);
212e6f09596Sskrll 			KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0,
213e6f09596Sskrll 			    "%#"PRIxVADDR, ci->ci_pmap_srcbase);
214e6f09596Sskrll 		}
2154dc2832eSskrll 		vaddr_t nva = (prot & VM_PROT_WRITE
216d7e78fcfSmatt 			? ci->ci_pmap_dstbase
217d7e78fcfSmatt 			: ci->ci_pmap_srcbase)
218e6f09596Sskrll 		    + pmap_md_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS
219d7e78fcfSmatt 			? pv->pv_va
220d7e78fcfSmatt 			: pa);
221d7e78fcfSmatt 
2224dc2832eSskrll 		va = (intptr_t)nva;
223d7e78fcfSmatt 		/*
224d7e78fcfSmatt 		 * Now to make and write the new PTE to map the PA.
225d7e78fcfSmatt 		 */
226d7e78fcfSmatt 		const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0);
227d7e78fcfSmatt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), va);
228d7e78fcfSmatt 		*old_pte_p = *ptep;		// save
229d7e78fcfSmatt 		bool rv __diagused;
230d7e78fcfSmatt 		*ptep = npte;			// update page table
231d7e78fcfSmatt 
232d7e78fcfSmatt 		// update the TLB directly making sure we force the new entry
233d7e78fcfSmatt 		// into it.
234d7e78fcfSmatt 		rv = tlb_update_addr(va, KERNEL_PID, npte, true);
235d7e78fcfSmatt 		KASSERTMSG(rv == 1, "va %#"PRIxREGISTER" pte=%#"PRIxPTE" rv=%d",
236d7e78fcfSmatt 		    va, pte_value(npte), rv);
237d7e78fcfSmatt 	}
238d7e78fcfSmatt 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
239d7e78fcfSmatt 		/*
240d7e78fcfSmatt 		 * If we are forced to use an incompatible alias, flush the
241d7e78fcfSmatt 		 * page from the cache so we will copy the correct contents.
242d7e78fcfSmatt 		 */
243d7e78fcfSmatt 		if (!locked_p)
244d7e78fcfSmatt 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
245d7e78fcfSmatt 		if (VM_PAGEMD_CACHED_P(mdpg)
246d7e78fcfSmatt 		    && mips_cache_badalias(pv->pv_va, va)) {
2474dc2832eSskrll 			register_t ova = (intptr_t)trunc_page(pv->pv_va);
2484dc2832eSskrll 			mips_dcache_wbinv_range_index(ova, PAGE_SIZE);
249d7e78fcfSmatt 			/*
250d7e78fcfSmatt 			 * If there is no active mapping, remember this new one.
251d7e78fcfSmatt 			 */
252d7e78fcfSmatt 			if (pv->pv_pmap == NULL)
253d7e78fcfSmatt 				pv->pv_va = va;
254d7e78fcfSmatt 		}
255d7e78fcfSmatt 		if (!locked_p)
256d7e78fcfSmatt 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
257d7e78fcfSmatt 	}
258d7e78fcfSmatt 
259d7e78fcfSmatt 	UVMHIST_LOG(pmaphist, " <-- done (va=%#lx)", va, 0, 0, 0);
260d7e78fcfSmatt 
261d7e78fcfSmatt 	return va;
262d7e78fcfSmatt }
263d7e78fcfSmatt 
264d7e78fcfSmatt static void
pmap_md_unmap_ephemeral_page(struct vm_page_md * mdpg,bool locked_p,register_t va,pt_entry_t old_pte)26531d27c36Sskrll pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, bool locked_p,
26631d27c36Sskrll     register_t va, pt_entry_t old_pte)
267d7e78fcfSmatt {
26831d27c36Sskrll 	KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
26931d27c36Sskrll 
270d7e78fcfSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
271d7e78fcfSmatt 
27231d27c36Sskrll 	UVMHIST_FUNC(__func__);
27331d27c36Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, va=%#lx, pte=%#"PRIxPTE")",
27431d27c36Sskrll 	    (uintptr_t)VM_MD_TO_PAGE(mdpg), va, pte_value(old_pte), 0);
275d7e78fcfSmatt 
276d7e78fcfSmatt 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
277d7e78fcfSmatt 
278d7e78fcfSmatt 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
279d7e78fcfSmatt 		if (!locked_p)
280d7e78fcfSmatt 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
281d7e78fcfSmatt 		/*
282d7e78fcfSmatt 		 * If this page was previously uncached or we had to use an
283d7e78fcfSmatt 		 * incompatible alias, flush it from the cache.
284d7e78fcfSmatt 		 */
285d7e78fcfSmatt 		if (VM_PAGEMD_UNCACHED_P(mdpg)
286d7e78fcfSmatt 		    || (pv->pv_pmap != NULL
287d7e78fcfSmatt 			&& mips_cache_badalias(pv->pv_va, va))) {
288d7e78fcfSmatt 			mips_dcache_wbinv_range(va, PAGE_SIZE);
289d7e78fcfSmatt 		}
290d7e78fcfSmatt 		if (!locked_p)
291d7e78fcfSmatt 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
292d7e78fcfSmatt 	}
293d7e78fcfSmatt 	/*
294d7e78fcfSmatt 	 * If we had to map using a page table entry, restore it now.
295d7e78fcfSmatt 	 */
296d7e78fcfSmatt 	if (!pmap_md_direct_mapped_vaddr_p(va)) {
297d7e78fcfSmatt 		*pmap_pte_lookup(pmap_kernel(), va) = old_pte;
298d7e78fcfSmatt 		if (pte_valid_p(old_pte)) {
299d7e78fcfSmatt 			// Update the TLB with the old mapping.
300d7e78fcfSmatt 			tlb_update_addr(va, KERNEL_PID, old_pte, 0);
301d7e78fcfSmatt 		} else {
302d7e78fcfSmatt 			// Invalidate TLB entry if the old pte wasn't valid.
303d7e78fcfSmatt 			tlb_invalidate_addr(va, KERNEL_PID);
304d7e78fcfSmatt 		}
305d7e78fcfSmatt 		kpreempt_enable();	// Restore preemption
306d7e78fcfSmatt 	}
307d7e78fcfSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
308d7e78fcfSmatt }
309d7e78fcfSmatt 
310d7e78fcfSmatt static void
pmap_md_vca_page_wbinv(struct vm_page_md * mdpg,bool locked_p)31131d27c36Sskrll pmap_md_vca_page_wbinv(struct vm_page_md *mdpg, bool locked_p)
312d7e78fcfSmatt {
3139cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
314d7e78fcfSmatt 	pt_entry_t pte;
315d7e78fcfSmatt 
31631d27c36Sskrll 	const register_t va = pmap_md_map_ephemeral_page(mdpg, locked_p,
317d7e78fcfSmatt 	    VM_PROT_READ, &pte);
318d7e78fcfSmatt 
319d7e78fcfSmatt 	mips_dcache_wbinv_range(va, PAGE_SIZE);
320d7e78fcfSmatt 
32131d27c36Sskrll 	pmap_md_unmap_ephemeral_page(mdpg, locked_p, va, pte);
322d7e78fcfSmatt }
323d7e78fcfSmatt 
324d7e78fcfSmatt bool
pmap_md_ok_to_steal_p(const uvm_physseg_t bank,size_t npgs)3253b1622faScherry pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
326d7e78fcfSmatt {
327d7e78fcfSmatt #ifndef _LP64
3283b1622faScherry 	if (uvm_physseg_get_avail_start(bank) + npgs >= atop(MIPS_PHYS_MASK + 1)) {
3293b1622faScherry 		aprint_debug("%s: seg not enough in KSEG0 for %zu pages\n",
3303b1622faScherry 		    __func__, npgs);
331d7e78fcfSmatt 		return false;
332d7e78fcfSmatt 	}
333d7e78fcfSmatt #endif
334d7e78fcfSmatt 	return true;
335d7e78fcfSmatt }
336d7e78fcfSmatt 
337d7e78fcfSmatt /*
338d7e78fcfSmatt  *	Bootstrap the system enough to run with virtual memory.
339d7e78fcfSmatt  */
340d7e78fcfSmatt void
pmap_bootstrap(void)341d7e78fcfSmatt pmap_bootstrap(void)
342d7e78fcfSmatt {
343d7e78fcfSmatt 	vsize_t bufsz;
344d7e78fcfSmatt 	size_t sysmap_size;
345d7e78fcfSmatt 	pt_entry_t *sysmap;
346d7e78fcfSmatt 
347e6f09596Sskrll 	if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors) {
348d7e78fcfSmatt 		pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
349d1579b2dSriastradh 		pmap_page_cache_alias_mask = uimax(
350e6f09596Sskrll 		    mips_cache_info.mci_cache_alias_mask,
351e6f09596Sskrll 		    mips_cache_info.mci_icache_alias_mask);
352e6f09596Sskrll 	}
353d7e78fcfSmatt 
354d7e78fcfSmatt #ifdef MULTIPROCESSOR
355d7e78fcfSmatt 	pmap_t pm = pmap_kernel();
356d7e78fcfSmatt 	kcpuset_create(&pm->pm_onproc, true);
357d7e78fcfSmatt 	kcpuset_create(&pm->pm_active, true);
358d7e78fcfSmatt 	KASSERT(pm->pm_onproc != NULL);
359d7e78fcfSmatt 	KASSERT(pm->pm_active != NULL);
360d7e78fcfSmatt 	kcpuset_set(pm->pm_onproc, cpu_number());
361d7e78fcfSmatt 	kcpuset_set(pm->pm_active, cpu_number());
362d7e78fcfSmatt #endif
36329807ee5Sthorpej 
36429807ee5Sthorpej 	pmap_bootstrap_common();
36529807ee5Sthorpej 
366d7e78fcfSmatt 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
367d7e78fcfSmatt 
368d7e78fcfSmatt 	/*
369d7e78fcfSmatt 	 * Compute the number of pages kmem_arena will have.
370d7e78fcfSmatt 	 */
371d7e78fcfSmatt 	kmeminit_nkmempages();
372d7e78fcfSmatt 
373d7e78fcfSmatt 	/*
374d7e78fcfSmatt 	 * Figure out how many PTE's are necessary to map the kernel.
375d7e78fcfSmatt 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
376d7e78fcfSmatt 	 */
377d7e78fcfSmatt 
378d7e78fcfSmatt 	/* Get size of buffer cache and set an upper limit */
379d7e78fcfSmatt 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
380d7e78fcfSmatt 	bufsz = buf_memcalc();
381d7e78fcfSmatt 	buf_setvalimit(bufsz);
382d7e78fcfSmatt 
383d7e78fcfSmatt 	sysmap_size = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
384d7e78fcfSmatt 	    bufsz + 16 * NCARGS + pager_map_size) / NBPG +
385d7e78fcfSmatt 	    (maxproc * UPAGES) + nkmempages;
386d7e78fcfSmatt 
387d7e78fcfSmatt #ifdef SYSVSHM
388d7e78fcfSmatt 	sysmap_size += shminfo.shmall;
389d7e78fcfSmatt #endif
390d7e78fcfSmatt #ifdef KSEG2IOBUFSIZE
391d7e78fcfSmatt 	sysmap_size += (KSEG2IOBUFSIZE >> PGSHIFT);
392d7e78fcfSmatt #endif
393d7e78fcfSmatt #ifdef _LP64
394d7e78fcfSmatt 	/*
395d7e78fcfSmatt 	 * If we are using tmpfs, then we might want to use a great deal of
396d7e78fcfSmatt 	 * our memory with it.  Make sure we have enough VM to do that.
397d7e78fcfSmatt 	 */
398d7e78fcfSmatt 	sysmap_size += physmem;
399d7e78fcfSmatt #else
400d7e78fcfSmatt 	/* XXX: else runs out of space on 256MB sbmips!! */
401d7e78fcfSmatt 	sysmap_size += 20000;
402d7e78fcfSmatt #endif
4033398ec50Sskrll 	/* Roundup to a even number of pte page tables */
404d7e78fcfSmatt 	sysmap_size = (sysmap_size + NPTEPG - 1) & -NPTEPG;
405d7e78fcfSmatt 
406d7e78fcfSmatt 	/*
407d7e78fcfSmatt 	 * Initialize `FYI' variables.	Note we're relying on
408d7e78fcfSmatt 	 * the fact that BSEARCH sorts the vm_physmem[] array
409d7e78fcfSmatt 	 * for us.  Must do this before uvm_pageboot_alloc()
410d7e78fcfSmatt 	 * can be called.
411d7e78fcfSmatt 	 */
4123b1622faScherry 	pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
4133b1622faScherry 	pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
414d7e78fcfSmatt 	pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;
415d7e78fcfSmatt 
416d7e78fcfSmatt #ifndef _LP64
417d7e78fcfSmatt 	if (pmap_limits.virtual_end > VM_MAX_KERNEL_ADDRESS
418d7e78fcfSmatt 	    || pmap_limits.virtual_end < VM_MIN_KERNEL_ADDRESS) {
419d7e78fcfSmatt 		printf("%s: changing last kernel VA from %#"PRIxVADDR
420d7e78fcfSmatt 		    " to %#"PRIxVADDR"\n", __func__,
421d7e78fcfSmatt 		    pmap_limits.virtual_end, VM_MAX_KERNEL_ADDRESS);
422d7e78fcfSmatt 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
423d7e78fcfSmatt 		sysmap_size =
424d7e78fcfSmatt 		    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
425d7e78fcfSmatt 	}
426d7e78fcfSmatt #endif
427d7e78fcfSmatt 	pmap_pvlist_lock_init(mips_cache_info.mci_pdcache_line_size);
428d7e78fcfSmatt 
429d7e78fcfSmatt 	/*
430d7e78fcfSmatt 	 * Now actually allocate the kernel PTE array (must be done
431d7e78fcfSmatt 	 * after pmap_limits.virtual_end is initialized).
432d7e78fcfSmatt 	 */
433d7e78fcfSmatt 	sysmap = (pt_entry_t *)
434d7e78fcfSmatt 	    uvm_pageboot_alloc(sizeof(pt_entry_t) * sysmap_size);
435d7e78fcfSmatt 
436d7e78fcfSmatt 	vaddr_t va = VM_MIN_KERNEL_ADDRESS;
437d7e78fcfSmatt #ifdef _LP64
438d7e78fcfSmatt 	/*
439d7e78fcfSmatt 	 * Do we need more than one XSEG's worth virtual address space?
440d7e78fcfSmatt 	 * If so, we have to allocate the additional pmap_segtab_t's for them
441d7e78fcfSmatt 	 * and insert them into the kernel's top level segtab.
442d7e78fcfSmatt 	 */
443d7e78fcfSmatt 	const size_t xsegs = (sysmap_size * NBPG + NBXSEG - 1) / NBXSEG;
444d7e78fcfSmatt 	if (xsegs > 1) {
445d7e78fcfSmatt 		printf("%s: %zu xsegs required for %zu pages\n",
446d7e78fcfSmatt 		    __func__, xsegs, sysmap_size);
4471974c7e7Sskrll 		pmap_segtab_t *stb = (pmap_segtab_t *)
448d7e78fcfSmatt 		    uvm_pageboot_alloc(sizeof(pmap_segtab_t) * (xsegs - 1));
4491974c7e7Sskrll 		for (size_t i = 1; i <= xsegs; i++, stb++) {
4501974c7e7Sskrll 			pmap_kern_segtab.seg_seg[i] = stb;
451d7e78fcfSmatt 		}
452d7e78fcfSmatt 	}
4531974c7e7Sskrll 	pmap_segtab_t ** const xstb = pmap_kern_segtab.seg_seg;
454d7e78fcfSmatt #else
455d7e78fcfSmatt 	const size_t xsegs = 1;
4561974c7e7Sskrll 	pmap_segtab_t * const stb = &pmap_kern_segtab;
457d7e78fcfSmatt #endif
458d7e78fcfSmatt 	KASSERT(curcpu()->ci_pmap_kern_segtab == &pmap_kern_segtab);
459d7e78fcfSmatt 
460d7e78fcfSmatt 	for (size_t k = 0, i = 0; k < xsegs; k++) {
461d7e78fcfSmatt #ifdef _LP64
4621974c7e7Sskrll 		pmap_segtab_t * const stb =
4631974c7e7Sskrll 		    xstb[(va >> XSEGSHIFT) & (NSEGPG - 1)];
464d7e78fcfSmatt #endif
465d7e78fcfSmatt 		bool done = false;
466d7e78fcfSmatt 
467d7e78fcfSmatt 		for (size_t j = (va >> SEGSHIFT) & (NSEGPG - 1);
468d7e78fcfSmatt 		     !done && i < sysmap_size;
469d7e78fcfSmatt 		     i += NPTEPG, j++, va += NBSEG) {
470d7e78fcfSmatt 			/*
471d7e78fcfSmatt 			 * Now set the page table pointer...
472d7e78fcfSmatt 			 */
473*196ee94dSskrll 			stb->seg_ppg[j] = (pmap_ptpage_t *)&sysmap[i];
474d7e78fcfSmatt #ifdef _LP64
475d7e78fcfSmatt 			/*
476d7e78fcfSmatt 			 * If we are at end of this XSEG, terminate the loop
477d7e78fcfSmatt 			 * so we advance to the next one.
478d7e78fcfSmatt 			 */
479d7e78fcfSmatt 			done = (j + 1 == NSEGPG);
480d7e78fcfSmatt #endif
481d7e78fcfSmatt 		}
482d7e78fcfSmatt 	}
483d7e78fcfSmatt 	KASSERT(pmap_pte_lookup(pmap_kernel(), VM_MIN_KERNEL_ADDRESS) == sysmap);
484d7e78fcfSmatt 
485a99e7efcSskrll 	/* update the top of the kernel VM - pmap_growkernel not required */
486a99e7efcSskrll 	pmap_curmaxkvaddr = pmap_limits.virtual_end;
487d7e78fcfSmatt 	/*
488d7e78fcfSmatt 	 * Initialize the pools.
489d7e78fcfSmatt 	 */
490d7e78fcfSmatt 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
491d7e78fcfSmatt 	    &pool_allocator_nointr, IPL_NONE);
492d7e78fcfSmatt 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
493d7e78fcfSmatt 	    &pmap_pv_page_allocator, IPL_NONE);
494d7e78fcfSmatt 
495bf021c82Sskrll 	tlb_set_asid(KERNEL_PID, pmap_kernel());
496d7e78fcfSmatt 
497d7e78fcfSmatt #ifdef MIPS3_PLUS	/* XXX mmu XXX */
498d7e78fcfSmatt 	/*
499d7e78fcfSmatt 	 * The R4?00 stores only one copy of the Global bit in the
500d7e78fcfSmatt 	 * translation lookaside buffer for each 2 page entry.
501d7e78fcfSmatt 	 * Thus invalid entries must have the Global bit set so
502d7e78fcfSmatt 	 * when Entry LO and Entry HI G bits are anded together
503d7e78fcfSmatt 	 * they will produce a global bit to store in the tlb.
504d7e78fcfSmatt 	 */
505d7e78fcfSmatt 	if (MIPS_HAS_R4K_MMU) {
506d7e78fcfSmatt 		while (sysmap_size-- > 0) {
507d7e78fcfSmatt 			*sysmap++ = MIPS3_PG_G;
508d7e78fcfSmatt 		}
509d7e78fcfSmatt 	}
510d7e78fcfSmatt #endif	/* MIPS3_PLUS */
511d7e78fcfSmatt }
512d7e78fcfSmatt 
513d7e78fcfSmatt void
pmap_md_alloc_ephemeral_address_space(struct cpu_info * ci)514d7e78fcfSmatt pmap_md_alloc_ephemeral_address_space(struct cpu_info *ci)
515d7e78fcfSmatt {
516d7e78fcfSmatt 	struct mips_cache_info * const mci = &mips_cache_info;
517d7e78fcfSmatt 
518d7e78fcfSmatt 	/*
519d7e78fcfSmatt 	 * If we have more memory than can be mapped by KSEG0, we need to
520d7e78fcfSmatt 	 * allocate enough VA so we can map pages with the right color
521d7e78fcfSmatt 	 * (to avoid cache alias problems).
522d7e78fcfSmatt 	 */
523d7e78fcfSmatt 	if (false
524d7e78fcfSmatt #ifndef _LP64
525d7e78fcfSmatt 	    || pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START
526d7e78fcfSmatt #endif
527d7e78fcfSmatt 	    || MIPS_CACHE_VIRTUAL_ALIAS
528d7e78fcfSmatt 	    || MIPS_ICACHE_VIRTUAL_ALIAS) {
529d1579b2dSriastradh 		vsize_t size = uimax(mci->mci_pdcache_way_size, mci->mci_picache_way_size);
530e6f09596Sskrll 		const u_int __diagused mask = pmap_page_cache_alias_mask;
531e6f09596Sskrll 
532e6f09596Sskrll 		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, size, size,
533e6f09596Sskrll 		    UVM_KMF_VAONLY);
534e6f09596Sskrll 
535d7e78fcfSmatt 		KASSERT(ci->ci_pmap_dstbase);
536e6f09596Sskrll 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_dstbase));
537e6f09596Sskrll 		KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0, "%#"PRIxVADDR,
538e6f09596Sskrll 		    ci->ci_pmap_dstbase);
539e6f09596Sskrll 
540e6f09596Sskrll 		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map, size, size,
541e6f09596Sskrll 		    UVM_KMF_VAONLY);
542d7e78fcfSmatt 		KASSERT(ci->ci_pmap_srcbase);
543e6f09596Sskrll 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_srcbase));
544e6f09596Sskrll 		KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0, "%#"PRIxVADDR,
545e6f09596Sskrll 		    ci->ci_pmap_srcbase);
546d7e78fcfSmatt 	}
547d7e78fcfSmatt }
548d7e78fcfSmatt 
549d7e78fcfSmatt void
pmap_md_init(void)550d7e78fcfSmatt pmap_md_init(void)
551d7e78fcfSmatt {
552d7e78fcfSmatt 	pmap_md_alloc_ephemeral_address_space(curcpu());
553d7e78fcfSmatt 
554d7e78fcfSmatt #if defined(MIPS3) && 0
555d7e78fcfSmatt 	if (MIPS_HAS_R4K_MMU) {
556d7e78fcfSmatt 		/*
557d7e78fcfSmatt 		 * XXX
558d7e78fcfSmatt 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
559d7e78fcfSmatt 		 * on MIPS3 CPUs to avoid possible virtual cache aliases
560d7e78fcfSmatt 		 * and uncached mappings in pmap_enter_pv().
561d7e78fcfSmatt 		 *
562d7e78fcfSmatt 		 * Ideally, read only shared mapping won't cause aliases
563d7e78fcfSmatt 		 * so pmap_enter_pv() should handle any shared read only
564d7e78fcfSmatt 		 * mappings without uncached ops like ARM pmap.
565d7e78fcfSmatt 		 *
566d7e78fcfSmatt 		 * On the other hand, R4000 and R4400 have the virtual
567d7e78fcfSmatt 		 * coherency exceptions which will happen even on read only
568d7e78fcfSmatt 		 * mappings, so we always have to disable sosend_loan()
569d7e78fcfSmatt 		 * on such CPUs.
570d7e78fcfSmatt 		 */
571d7e78fcfSmatt 		sock_loan_thresh = -1;
572d7e78fcfSmatt 	}
573d7e78fcfSmatt #endif
574d7e78fcfSmatt }
575d7e78fcfSmatt 
576d7e78fcfSmatt /*
577d7e78fcfSmatt  * XXXJRT -- need a version for each cache type.
578d7e78fcfSmatt  */
579d7e78fcfSmatt void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)580d7e78fcfSmatt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
581d7e78fcfSmatt {
582d7e78fcfSmatt 	if (MIPS_HAS_R4K_MMU) {
583d7e78fcfSmatt 		/*
584d7e78fcfSmatt 		 * XXX
585d7e78fcfSmatt 		 * shouldn't need to do this for physical d$?
586d7e78fcfSmatt 		 * should need to do this for virtual i$ if prot == EXEC?
587d7e78fcfSmatt 		 */
588d7e78fcfSmatt 		if (p == curlwp->l_proc
589d7e78fcfSmatt 		    && mips_cache_info.mci_pdcache_way_mask < PAGE_SIZE)
590d7e78fcfSmatt 		    /* XXX check icache mask too? */
591e149b2e6Sskrll 			mips_icache_sync_range((intptr_t)va, len);
592d7e78fcfSmatt 		else
593e149b2e6Sskrll 			mips_icache_sync_range_index((intptr_t)va, len);
594d7e78fcfSmatt 	} else {
595d7e78fcfSmatt 		pmap_t pmap = p->p_vmspace->vm_map.pmap;
596d7e78fcfSmatt 		kpreempt_disable();
597d7e78fcfSmatt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
598d7e78fcfSmatt 		pt_entry_t entry = (ptep != NULL ? *ptep : 0);
599d7e78fcfSmatt 		kpreempt_enable();
600d7e78fcfSmatt 		if (!pte_valid_p(entry))
601d7e78fcfSmatt 			return;
602d7e78fcfSmatt 
603d7e78fcfSmatt 		mips_icache_sync_range(
604d7e78fcfSmatt 		    MIPS_PHYS_TO_KSEG0(pte_to_paddr(entry) + (va & PGOFSET)),
605d7e78fcfSmatt 		    len);
606d7e78fcfSmatt 	}
607d7e78fcfSmatt }
608d7e78fcfSmatt 
609d7e78fcfSmatt /*
610d7e78fcfSmatt  *	pmap_zero_page zeros the specified page.
611d7e78fcfSmatt  */
612d7e78fcfSmatt void
pmap_zero_page(paddr_t dst_pa)613d7e78fcfSmatt pmap_zero_page(paddr_t dst_pa)
614d7e78fcfSmatt {
615d7e78fcfSmatt 	pt_entry_t dst_pte;
616d7e78fcfSmatt 
61731d27c36Sskrll 	UVMHIST_FUNC(__func__);
61831d27c36Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
619d7e78fcfSmatt 	PMAP_COUNT(zeroed_pages);
620d7e78fcfSmatt 
621d7e78fcfSmatt 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
62231d27c36Sskrll 	struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
623d7e78fcfSmatt 
62431d27c36Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
6258fb032d5Sskrll 
62631d27c36Sskrll 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
627d7e78fcfSmatt 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
628d7e78fcfSmatt 
629d7e78fcfSmatt 	mips_pagezero(dst_va);
630d7e78fcfSmatt 
63131d27c36Sskrll 	pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
632d7e78fcfSmatt 
633d7e78fcfSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
634d7e78fcfSmatt }
635d7e78fcfSmatt 
636d7e78fcfSmatt /*
637d7e78fcfSmatt  *	pmap_copy_page copies the specified page.
638d7e78fcfSmatt  */
639d7e78fcfSmatt void
pmap_copy_page(paddr_t src_pa,paddr_t dst_pa)640d7e78fcfSmatt pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
641d7e78fcfSmatt {
642d7e78fcfSmatt 	pt_entry_t src_pte, dst_pte;
643d7e78fcfSmatt 
64431d27c36Sskrll 	UVMHIST_FUNC(__func__);
64531d27c36Sskrll 	UVMHIST_CALLARGS(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa,
64631d27c36Sskrll 	    0, 0);
647d7e78fcfSmatt 	PMAP_COUNT(copied_pages);
648d7e78fcfSmatt 
649d7e78fcfSmatt 	struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
650d7e78fcfSmatt 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
651d7e78fcfSmatt 
65231d27c36Sskrll 	struct vm_page_md * const src_mdpg = VM_PAGE_TO_MD(src_pg);
65331d27c36Sskrll 	struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
65431d27c36Sskrll 
65531d27c36Sskrll 	const register_t src_va = pmap_md_map_ephemeral_page(src_mdpg, false,
656d7e78fcfSmatt 	    VM_PROT_READ, &src_pte);
657d7e78fcfSmatt 
65831d27c36Sskrll 	KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(dst_mdpg));
65931d27c36Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
66031d27c36Sskrll 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
661d7e78fcfSmatt 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
662d7e78fcfSmatt 
663d7e78fcfSmatt 	mips_pagecopy(dst_va, src_va);
664d7e78fcfSmatt 
66531d27c36Sskrll 	pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
66631d27c36Sskrll 	pmap_md_unmap_ephemeral_page(src_mdpg, false, src_va, src_pte);
667d7e78fcfSmatt 
668d7e78fcfSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
669d7e78fcfSmatt }
670d7e78fcfSmatt 
671d7e78fcfSmatt void
pmap_md_page_syncicache(struct vm_page_md * mdpg,const kcpuset_t * onproc)67231d27c36Sskrll pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
673d7e78fcfSmatt {
6749cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
675d7e78fcfSmatt 	struct mips_options * const opts = &mips_options;
676d7e78fcfSmatt 	if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
677d7e78fcfSmatt 		return;
678d7e78fcfSmatt 
679d7e78fcfSmatt 	/*
680d7e78fcfSmatt 	 * If onproc is empty, we could do a
681d7e78fcfSmatt 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
682d7e78fcfSmatt 	 * mappings of the page and clear its execness.  Then
683d7e78fcfSmatt 	 * the next time page is faulted, it will get icache
684d7e78fcfSmatt 	 * synched.  But this is easier. :)
685d7e78fcfSmatt 	 */
686d7e78fcfSmatt 	if (MIPS_HAS_R4K_MMU) {
687d7e78fcfSmatt 		if (VM_PAGEMD_CACHED_P(mdpg)) {
688328eb1abSskrll 			/* This was probably mapped cached by UBC so flush it */
689b24d4bacSskrll 			pt_entry_t pte;
69031d27c36Sskrll 			const register_t tva = pmap_md_map_ephemeral_page(mdpg,
69131d27c36Sskrll 			    false, VM_PROT_READ, &pte);
692b24d4bacSskrll 
693b24d4bacSskrll 			UVMHIST_LOG(pmaphist, "  va %#"PRIxVADDR, tva, 0, 0, 0);
694b24d4bacSskrll 			mips_dcache_wbinv_range(tva, PAGE_SIZE);
695b24d4bacSskrll 			mips_icache_sync_range(tva, PAGE_SIZE);
696b24d4bacSskrll 
69731d27c36Sskrll 			pmap_md_unmap_ephemeral_page(mdpg, false, tva, pte);
698d7e78fcfSmatt 		}
699d7e78fcfSmatt 	} else {
70031d27c36Sskrll 		KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
70131d27c36Sskrll 
70231d27c36Sskrll 		struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
703d7e78fcfSmatt 		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
704d7e78fcfSmatt 		    PAGE_SIZE);
705d7e78fcfSmatt 	}
706d7e78fcfSmatt #ifdef MULTIPROCESSOR
707b24d4bacSskrll 	pv_entry_t pv = &mdpg->mdpg_first;
708b24d4bacSskrll 	const register_t va = (intptr_t)trunc_page(pv->pv_va);
709d7e78fcfSmatt 	pmap_tlb_syncicache(va, onproc);
710d7e78fcfSmatt #endif
711d7e78fcfSmatt }
712d7e78fcfSmatt 
713d7e78fcfSmatt struct vm_page *
pmap_md_alloc_poolpage(int flags)714d7e78fcfSmatt pmap_md_alloc_poolpage(int flags)
715d7e78fcfSmatt {
716d7e78fcfSmatt 	/*
717c4cbc27bSsimonb 	 * The VM_FREELIST used for pool pages is only set on 32bit
718c4cbc27bSsimonb 	 * kernels.  This is to make sure that we only allocate pages
719c4cbc27bSsimonb 	 * that can be mapped via KSEG0.  On 64bit kernels, all memory
720c4cbc27bSsimonb 	 * can be mapped via XKPHYS so just use the default freelist.
721d7e78fcfSmatt 	 */
722d7e78fcfSmatt 	if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
723d7e78fcfSmatt 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
724d7e78fcfSmatt 		    UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
725d7e78fcfSmatt 
726d7e78fcfSmatt 	return uvm_pagealloc(NULL, 0, NULL, flags);
727d7e78fcfSmatt }
728d7e78fcfSmatt 
729d7e78fcfSmatt vaddr_t
pmap_md_map_poolpage(paddr_t pa,size_t len)730d7e78fcfSmatt pmap_md_map_poolpage(paddr_t pa, size_t len)
731d7e78fcfSmatt {
732d7e78fcfSmatt 
733d7e78fcfSmatt 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
734d7e78fcfSmatt 	vaddr_t va = pmap_md_pool_phystov(pa);
735d7e78fcfSmatt 	KASSERT(cold || pg != NULL);
736d7e78fcfSmatt 	if (pg != NULL) {
737d7e78fcfSmatt 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
738d7e78fcfSmatt 		pv_entry_t pv = &mdpg->mdpg_first;
739d7e78fcfSmatt 		vaddr_t last_va = trunc_page(pv->pv_va);
740d7e78fcfSmatt 
7418fb032d5Sskrll 		KASSERT(len == PAGE_SIZE || last_va == pa);
742d7e78fcfSmatt 		KASSERT(pv->pv_pmap == NULL);
7438fb032d5Sskrll 		KASSERT(pv->pv_next == NULL);
7448fb032d5Sskrll 		KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
745d7e78fcfSmatt 
746d7e78fcfSmatt 		/*
747d7e78fcfSmatt 		 * If this page was last mapped with an address that
748d7e78fcfSmatt 		 * might cause aliases, flush the page from the cache.
749d7e78fcfSmatt 		 */
750d7e78fcfSmatt 		if (MIPS_CACHE_VIRTUAL_ALIAS
751d7e78fcfSmatt 		    && mips_cache_badalias(last_va, va)) {
75231d27c36Sskrll 			pmap_md_vca_page_wbinv(mdpg, false);
753d7e78fcfSmatt 		}
754d7e78fcfSmatt 
755d7e78fcfSmatt 		pv->pv_va = va;
756d7e78fcfSmatt 	}
757d7e78fcfSmatt 	return va;
758d7e78fcfSmatt }
759d7e78fcfSmatt 
760d7e78fcfSmatt paddr_t
pmap_md_unmap_poolpage(vaddr_t va,size_t len)761d7e78fcfSmatt pmap_md_unmap_poolpage(vaddr_t va, size_t len)
762d7e78fcfSmatt {
763d7e78fcfSmatt 	KASSERT(len == PAGE_SIZE);
764d7e78fcfSmatt 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
765d7e78fcfSmatt 
766d7e78fcfSmatt 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
767d7e78fcfSmatt 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
768d7e78fcfSmatt 
769d7e78fcfSmatt 	KASSERT(pg);
77085080b4dSskrll 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
771d7e78fcfSmatt 
772d7e78fcfSmatt 	KASSERT(VM_PAGEMD_CACHED_P(mdpg));
7738fb032d5Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
7748fb032d5Sskrll 
77508e736b4Sskrll 	pv_entry_t pv = &mdpg->mdpg_first;
77608e736b4Sskrll 
77708e736b4Sskrll 	/* Note last mapped address for future color check */
77808e736b4Sskrll 	pv->pv_va = va;
7796d401601Sskrll 
7808fb032d5Sskrll 	KASSERT(pv->pv_pmap == NULL);
7818fb032d5Sskrll 	KASSERT(pv->pv_next == NULL);
782d7e78fcfSmatt 
783d7e78fcfSmatt 	return pa;
784d7e78fcfSmatt }
785d7e78fcfSmatt 
786d7e78fcfSmatt bool
pmap_md_direct_mapped_vaddr_p(register_t va)787d7e78fcfSmatt pmap_md_direct_mapped_vaddr_p(register_t va)
788d7e78fcfSmatt {
789d7e78fcfSmatt #ifndef __mips_o32
790d7e78fcfSmatt 	if (MIPS_XKPHYS_P(va))
791d7e78fcfSmatt 		return true;
792d7e78fcfSmatt #endif
793d7e78fcfSmatt 	return MIPS_KSEG0_P(va);
794d7e78fcfSmatt }
795d7e78fcfSmatt 
796d7e78fcfSmatt paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(register_t va)797d7e78fcfSmatt pmap_md_direct_mapped_vaddr_to_paddr(register_t va)
798d7e78fcfSmatt {
799d7e78fcfSmatt 	if (MIPS_KSEG0_P(va)) {
800d7e78fcfSmatt 		return MIPS_KSEG0_TO_PHYS(va);
801d7e78fcfSmatt 	}
802d7e78fcfSmatt #ifndef __mips_o32
803d7e78fcfSmatt 	if (MIPS_XKPHYS_P(va)) {
804d7e78fcfSmatt 		return MIPS_XKPHYS_TO_PHYS(va);
805d7e78fcfSmatt 	}
806d7e78fcfSmatt #endif
807d7e78fcfSmatt 	panic("%s: va %#"PRIxREGISTER" not direct mapped!", __func__, va);
808d7e78fcfSmatt }
809d7e78fcfSmatt 
810d7e78fcfSmatt bool
pmap_md_io_vaddr_p(vaddr_t va)811d7e78fcfSmatt pmap_md_io_vaddr_p(vaddr_t va)
812d7e78fcfSmatt {
813d7e78fcfSmatt #ifdef _LP64
814d7e78fcfSmatt 	if (MIPS_XKPHYS_P(va)) {
815d7e78fcfSmatt 		return MIPS_XKPHYS_TO_CCA(va) == CCA_UNCACHED;
816d7e78fcfSmatt 	}
817d7e78fcfSmatt #endif
818d7e78fcfSmatt 	return MIPS_KSEG1_P(va);
819d7e78fcfSmatt }
820d7e78fcfSmatt 
821d7e78fcfSmatt void
pmap_md_icache_sync_range_index(vaddr_t va,vsize_t len)822d7e78fcfSmatt pmap_md_icache_sync_range_index(vaddr_t va, vsize_t len)
823d7e78fcfSmatt {
8249cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
825d7e78fcfSmatt 	mips_icache_sync_range_index(va, len);
826d7e78fcfSmatt }
827d7e78fcfSmatt 
828d7e78fcfSmatt void
pmap_md_icache_sync_all(void)829d7e78fcfSmatt pmap_md_icache_sync_all(void)
830d7e78fcfSmatt {
8319cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
832d7e78fcfSmatt 	mips_icache_sync_all();
833d7e78fcfSmatt }
834d7e78fcfSmatt 
835d7e78fcfSmatt #ifdef MULTIPROCESSOR
836d7e78fcfSmatt void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)837d7e78fcfSmatt pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
838d7e78fcfSmatt {
839d7e78fcfSmatt 	if (ci->ci_index != 0)
840d7e78fcfSmatt 		return;
841d7e78fcfSmatt 	const u_int icache_way_pages =
842d7e78fcfSmatt 	    mips_cache_info.mci_picache_way_size >> PGSHIFT;
843d7e78fcfSmatt 
844d7e78fcfSmatt 	KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask));
845d7e78fcfSmatt 	pmap_tlb_synci_page_mask = icache_way_pages - 1;
846d7e78fcfSmatt 	pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
847d7e78fcfSmatt 	printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
848d7e78fcfSmatt 	    pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask, icache_way_pages);
849d7e78fcfSmatt }
850d7e78fcfSmatt #endif
851d7e78fcfSmatt 
852d7e78fcfSmatt 
853d7e78fcfSmatt bool
pmap_md_tlb_check_entry(void * ctx,vaddr_t va,tlb_asid_t asid,pt_entry_t pte)854d7e78fcfSmatt pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
855d7e78fcfSmatt {
856d7e78fcfSmatt 	pmap_t pm = ctx;
857d7e78fcfSmatt 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
858d7e78fcfSmatt 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
859d7e78fcfSmatt 
860d7e78fcfSmatt 	if (asid != pai->pai_asid)
861d7e78fcfSmatt 		return true;
862d7e78fcfSmatt 	if (!pte_valid_p(pte)) {
863d7e78fcfSmatt 		KASSERT(MIPS_HAS_R4K_MMU);
864d7e78fcfSmatt 		KASSERTMSG(pte == MIPS3_PG_G, "va %#"PRIxVADDR" pte %#"PRIxPTE,
865d7e78fcfSmatt 		    va, pte_value(pte));
866d7e78fcfSmatt 		return true;
867d7e78fcfSmatt 	}
868d7e78fcfSmatt 
869d7e78fcfSmatt 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
870d7e78fcfSmatt 	KASSERTMSG(ptep != NULL, "va %#"PRIxVADDR" asid %u pte %#"PRIxPTE,
871d7e78fcfSmatt 	    va, asid, pte_value(pte));
872d7e78fcfSmatt 	const pt_entry_t opte = *ptep;
873d7e78fcfSmatt 	pt_entry_t xpte = opte;
874d7e78fcfSmatt 	if (MIPS_HAS_R4K_MMU) {
875d7e78fcfSmatt 		xpte &= ~(MIPS3_PG_WIRED|MIPS3_PG_RO);
876d7e78fcfSmatt 	} else {
877d7e78fcfSmatt 		xpte &= ~(MIPS1_PG_WIRED|MIPS1_PG_RO);
878d7e78fcfSmatt 	}
879d7e78fcfSmatt 
880d7e78fcfSmatt         KASSERTMSG(pte == xpte,
881d7e78fcfSmatt             "pmap=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#"PRIxPTE
882d7e78fcfSmatt 	    ") != real pte (%#"PRIxPTE"/%#"PRIxPTE") @ %p",
883d7e78fcfSmatt             pm, va, asid, pte_value(pte), pte_value(xpte), pte_value(opte),
884d7e78fcfSmatt 	    ptep);
885d7e78fcfSmatt 
886d7e78fcfSmatt         return true;
887d7e78fcfSmatt }
888d7e78fcfSmatt 
889d7e78fcfSmatt void
tlb_walk(void * ctx,tlb_walkfunc_t func)890d7e78fcfSmatt tlb_walk(void *ctx, tlb_walkfunc_t func)
891d7e78fcfSmatt {
892d7e78fcfSmatt 	kpreempt_disable();
893d7e78fcfSmatt 	for (size_t i = 0; i < mips_options.mips_num_tlb_entries; i++) {
894d7e78fcfSmatt 		struct tlbmask tlbmask;
895d7e78fcfSmatt 		tlb_asid_t asid;
896d7e78fcfSmatt 		vaddr_t va;
897d7e78fcfSmatt 		tlb_read_entry(i, &tlbmask);
898d7e78fcfSmatt 		if (MIPS_HAS_R4K_MMU) {
899d7e78fcfSmatt 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS3_PG_ASID);
900d7e78fcfSmatt 			va = tlbmask.tlb_hi & MIPS3_PG_HVPN;
901d7e78fcfSmatt 		} else {
902d7e78fcfSmatt 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS1_TLB_PID);
903d7e78fcfSmatt 			va = tlbmask.tlb_hi & MIPS1_PG_FRAME;
904d7e78fcfSmatt 		}
905d7e78fcfSmatt 		if ((pt_entry_t)tlbmask.tlb_lo0 != 0) {
906d7e78fcfSmatt 			pt_entry_t pte = tlbmask.tlb_lo0;
907d7e78fcfSmatt 			tlb_asid_t asid0 = (pte_global_p(pte) ? KERNEL_PID : asid);
908d7e78fcfSmatt 			if (!(*func)(ctx, va, asid0, pte))
909d7e78fcfSmatt 				break;
910d7e78fcfSmatt 		}
911d7e78fcfSmatt #if (PGSHIFT & 1) == 0
912d7e78fcfSmatt 		if (MIPS_HAS_R4K_MMU && (pt_entry_t)tlbmask.tlb_lo1 != 0) {
913d7e78fcfSmatt 			pt_entry_t pte = tlbmask.tlb_lo1;
914d7e78fcfSmatt 			tlb_asid_t asid1 = (pte_global_p(pte) ? KERNEL_PID : asid);
915d7e78fcfSmatt 			if (!(*func)(ctx, va + MIPS3_PG_ODDPG, asid1, pte))
916d7e78fcfSmatt 				break;
917d7e78fcfSmatt 		}
918d7e78fcfSmatt #endif
919d7e78fcfSmatt 	}
920d7e78fcfSmatt 	kpreempt_enable();
921d7e78fcfSmatt }
922d7e78fcfSmatt 
923d7e78fcfSmatt bool
pmap_md_vca_add(struct vm_page_md * mdpg,vaddr_t va,pt_entry_t * ptep)92431d27c36Sskrll pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *ptep)
925d7e78fcfSmatt {
9269cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
927d7e78fcfSmatt 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
928d7e78fcfSmatt 		return false;
929d7e78fcfSmatt 
930d7e78fcfSmatt 	/*
931d7e78fcfSmatt 	 * There is at least one other VA mapping this page.
932d7e78fcfSmatt 	 * Check if they are cache index compatible.
933d7e78fcfSmatt 	 */
934d7e78fcfSmatt 
935d7e78fcfSmatt 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
936d7e78fcfSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
937d7e78fcfSmatt #if defined(PMAP_NO_PV_UNCACHED)
938d7e78fcfSmatt 	/*
939d7e78fcfSmatt 	 * Instead of mapping uncached, which some platforms
940d7e78fcfSmatt 	 * cannot support, remove incompatible mappings from others pmaps.
941d7e78fcfSmatt 	 * When this address is touched again, the uvm will
942d7e78fcfSmatt 	 * fault it in.  Because of this, each page will only
943d7e78fcfSmatt 	 * be mapped with one index at any given time.
944d7e78fcfSmatt 	 *
945d7e78fcfSmatt 	 * We need to deal with all entries on the list - if the first is
946d7e78fcfSmatt 	 * incompatible with the new mapping then they all will be.
947d7e78fcfSmatt 	 */
948d7e78fcfSmatt 	if (__predict_true(!mips_cache_badalias(pv->pv_va, va))) {
949d7e78fcfSmatt 		return false;
950d7e78fcfSmatt 	}
9518fb032d5Sskrll 	KASSERT(pv->pv_pmap != NULL);
9528fb032d5Sskrll 	bool ret = false;
9537246cf07Sskrll 	for (pv_entry_t npv = pv; npv && npv->pv_pmap;) {
954202efe03Sskrll 		if (PV_ISKENTER_P(npv)) {
95587240e32Sskrll 			npv = npv->pv_next;
956d7e78fcfSmatt 			continue;
9576a9bc54eSskrll 		}
9588fb032d5Sskrll 		ret = true;
959d7e78fcfSmatt 		vaddr_t nva = trunc_page(npv->pv_va);
960d7e78fcfSmatt 		pmap_t npm = npv->pv_pmap;
961d7e78fcfSmatt 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
962d7e78fcfSmatt 		pmap_remove(npm, nva, nva + PAGE_SIZE);
96315dfb837Sskrll 
96415dfb837Sskrll 		/*
96515dfb837Sskrll 		 * pmap_update is not required here as we're the pmap
96615dfb837Sskrll 		 * and we know that the invalidation happened or the
96715dfb837Sskrll 		 * asid has been released (and activation is deferred)
96815dfb837Sskrll 		 *
96915dfb837Sskrll 		 * A deferred activation should NOT occur here.
97015dfb837Sskrll 		 */
971d7e78fcfSmatt 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg);
9726a9bc54eSskrll 
9736a9bc54eSskrll 		npv = pv;
974d7e78fcfSmatt 	}
9758fb032d5Sskrll 	KASSERT(ret == true);
9768fb032d5Sskrll 
9778fb032d5Sskrll 	return ret;
978d7e78fcfSmatt #else	/* !PMAP_NO_PV_UNCACHED */
979d7e78fcfSmatt 	if (VM_PAGEMD_CACHED_P(mdpg)) {
980d7e78fcfSmatt 		/*
981d7e78fcfSmatt 		 * If this page is cached, then all mappings
982d7e78fcfSmatt 		 * have the same cache alias so we only need
983d7e78fcfSmatt 		 * to check the first page to see if it's
984d7e78fcfSmatt 		 * incompatible with the new mapping.
985d7e78fcfSmatt 		 *
986d7e78fcfSmatt 		 * If the mappings are incompatible, map this
987d7e78fcfSmatt 		 * page as uncached and re-map all the current
988d7e78fcfSmatt 		 * mapping as uncached until all pages can
989d7e78fcfSmatt 		 * share the same cache index again.
990d7e78fcfSmatt 		 */
991d7e78fcfSmatt 		if (mips_cache_badalias(pv->pv_va, va)) {
99231d27c36Sskrll 			pmap_page_cache(mdpg, false);
99331d27c36Sskrll 			pmap_md_vca_page_wbinv(mdpg, true);
994d7e78fcfSmatt 			*ptep = pte_cached_change(*ptep, false);
995d7e78fcfSmatt 			PMAP_COUNT(page_cache_evictions);
996d7e78fcfSmatt 		}
997d7e78fcfSmatt 	} else {
998d7e78fcfSmatt 		*ptep = pte_cached_change(*ptep, false);
999d7e78fcfSmatt 		PMAP_COUNT(page_cache_evictions);
1000d7e78fcfSmatt 	}
1001d7e78fcfSmatt 	return false;
1002d7e78fcfSmatt #endif	/* !PMAP_NO_PV_UNCACHED */
1003d7e78fcfSmatt }
1004d7e78fcfSmatt 
1005d7e78fcfSmatt void
pmap_md_vca_clean(struct vm_page_md * mdpg,int op)100631d27c36Sskrll pmap_md_vca_clean(struct vm_page_md *mdpg, int op)
1007d7e78fcfSmatt {
10089cd6c57aSskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1009d7e78fcfSmatt 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
1010d7e78fcfSmatt 		return;
1011d7e78fcfSmatt 
101231d27c36Sskrll 	UVMHIST_LOG(pmaphist, "(mdpg=%#jx, op=%d)", (uintptr_t)mdpg, op, 0, 0);
101331d27c36Sskrll 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1014d7e78fcfSmatt 
1015d7e78fcfSmatt 	if (op == PMAP_WB || op == PMAP_WBINV) {
101631d27c36Sskrll 		pmap_md_vca_page_wbinv(mdpg, true);
1017d7e78fcfSmatt 	} else if (op == PMAP_INV) {
1018d7e78fcfSmatt 		KASSERT(op == PMAP_INV && false);
1019d7e78fcfSmatt 		//mips_dcache_inv_range_index(va, PAGE_SIZE);
1020d7e78fcfSmatt 	}
1021d7e78fcfSmatt }
1022d7e78fcfSmatt 
1023d7e78fcfSmatt /*
1024d7e78fcfSmatt  * In the PMAP_NO_PV_CACHED case, all conflicts are resolved at mapping
1025d7e78fcfSmatt  * so nothing needs to be done in removal.
1026d7e78fcfSmatt  */
1027d7e78fcfSmatt void
pmap_md_vca_remove(struct vm_page * pg,vaddr_t va,bool dirty,bool last)1028d7e78fcfSmatt pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty, bool last)
1029d7e78fcfSmatt {
1030d7e78fcfSmatt #if !defined(PMAP_NO_PV_UNCACHED)
1031d7e78fcfSmatt 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1032d7e78fcfSmatt 	if (!MIPS_HAS_R4K_MMU
1033d7e78fcfSmatt 	    || !MIPS_CACHE_VIRTUAL_ALIAS
1034d7e78fcfSmatt 	    || !VM_PAGEMD_UNCACHED_P(mdpg))
1035d7e78fcfSmatt 		return;
1036d7e78fcfSmatt 
1037d7e78fcfSmatt 	KASSERT(kpreempt_disabled());
1038d7e78fcfSmatt 	KASSERT((va & PAGE_MASK) == 0);
1039d7e78fcfSmatt 
1040d7e78fcfSmatt 	/*
1041d7e78fcfSmatt 	 * Page is currently uncached, check if alias mapping has been
1042d7e78fcfSmatt 	 * removed.  If it was, then reenable caching.
1043d7e78fcfSmatt 	 */
1044d7e78fcfSmatt 	(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
1045d7e78fcfSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
1046d7e78fcfSmatt 	pv_entry_t pv0 = pv->pv_next;
1047d7e78fcfSmatt 
1048d7e78fcfSmatt 	for (; pv0; pv0 = pv0->pv_next) {
1049d7e78fcfSmatt 		if (mips_cache_badalias(pv->pv_va, pv0->pv_va))
1050d7e78fcfSmatt 			break;
1051d7e78fcfSmatt 	}
1052d7e78fcfSmatt 	if (pv0 == NULL)
105331d27c36Sskrll 		pmap_page_cache(mdpg, true);
1054d7e78fcfSmatt 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1055d7e78fcfSmatt #endif
1056d7e78fcfSmatt }
1057d7e78fcfSmatt 
1058d7e78fcfSmatt paddr_t
pmap_md_pool_vtophys(vaddr_t va)1059d7e78fcfSmatt pmap_md_pool_vtophys(vaddr_t va)
1060d7e78fcfSmatt {
1061d7e78fcfSmatt #ifdef _LP64
1062d7e78fcfSmatt 	if (MIPS_XKPHYS_P(va))
1063d7e78fcfSmatt 		return MIPS_XKPHYS_TO_PHYS(va);
1064d7e78fcfSmatt #endif
1065d7e78fcfSmatt 	KASSERT(MIPS_KSEG0_P(va));
1066d7e78fcfSmatt 	return MIPS_KSEG0_TO_PHYS(va);
1067d7e78fcfSmatt }
1068d7e78fcfSmatt 
1069d7e78fcfSmatt vaddr_t
pmap_md_pool_phystov(paddr_t pa)1070d7e78fcfSmatt pmap_md_pool_phystov(paddr_t pa)
1071d7e78fcfSmatt {
1072d7e78fcfSmatt #ifdef _LP64
1073d7e78fcfSmatt 	KASSERT(mips_options.mips3_xkphys_cached);
1074d7e78fcfSmatt 	return MIPS_PHYS_TO_XKPHYS_CACHED(pa);
1075d7e78fcfSmatt #else
1076d7e78fcfSmatt 	KASSERT((pa & ~MIPS_PHYS_MASK) == 0);
1077d7e78fcfSmatt 	return MIPS_PHYS_TO_KSEG0(pa);
1078c61115d3Sskrll #endif
1079d7e78fcfSmatt }
1080