xref: /netbsd-src/sys/arch/mips/mips/pmap_machdep.c (revision eceb233b9bd0dfebb902ed73b531ae6964fa3f9b)
1 /*	$NetBSD: pmap_machdep.c,v 1.30 2020/09/10 17:26:38 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.30 2020/09/10 17:26:38 skrll Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 /* XXX simonb 2002/02/26
99  *
100  * MIPS3_PLUS is used to conditionally compile the r4k MMU support.
101  * This is bogus - for example, some IDT MIPS-II CPUs have r4k style
102  * MMUs (and 32-bit ones at that).
103  *
104  * On the other hand, it's not likely that we'll ever support the R6000
105  * (is it?), so maybe that can be an "if MIPS2 or greater" check.
106  *
107  * Also along these lines are using totally separate functions for
108  * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU
109  * checks in the current functions.
110  *
111  * These warnings probably applies to other files under sys/arch/mips.
112  */
113 
114 #include "opt_cputype.h"
115 #include "opt_mips_cache.h"
116 #include "opt_multiprocessor.h"
117 #include "opt_sysv.h"
118 
119 #define __MUTEX_PRIVATE
120 #define __PMAP_PRIVATE
121 
122 #include <sys/param.h>
123 #include <sys/atomic.h>
124 #include <sys/buf.h>
125 #include <sys/cpu.h>
126 #include <sys/kernel.h>
127 #include <sys/mutex.h>
128 #include <sys/pool.h>
129 #include <sys/proc.h>
130 #include <sys/systm.h>
131 #ifdef SYSVSHM
132 #include <sys/shm.h>
133 #endif
134 
135 #include <uvm/uvm.h>
136 #include <uvm/uvm_physseg.h>
137 
138 #include <mips/cache.h>
139 #include <mips/cpuregs.h>
140 #include <mips/locore.h>
141 #include <mips/pte.h>
142 
143 CTASSERT(MIPS_KSEG0_START < 0);
144 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0);
145 CTASSERT(MIPS_KSEG1_START < 0);
146 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
147 CTASSERT(MIPS_KSEG2_START < 0);
148 CTASSERT(MIPS_MAX_MEM_ADDR < 0);
149 CTASSERT(MIPS_RESERVED_ADDR < 0);
150 CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
151 CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
152 CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
153 CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
154 CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
155 CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
156 CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
157 #ifdef _LP64
158 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBXSEG == 0);
159 #else
160 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBSEG == 0);
161 #endif
162 
163 //PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
164 PMAP_COUNTER(zeroed_pages, "pages zeroed");
165 PMAP_COUNTER(copied_pages, "pages copied");
166 extern struct evcnt pmap_evcnt_page_cache_evictions;
167 
168 u_int pmap_page_cache_alias_mask;
169 
170 #define pmap_md_cache_indexof(x)	(((vaddr_t)(x)) & pmap_page_cache_alias_mask)
171 
172 static register_t
173 pmap_md_map_ephemeral_page(struct vm_page *pg, bool locked_p, int prot,
174     pt_entry_t *old_pte_p)
175 {
176 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
177 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
178 	pv_entry_t pv = &mdpg->mdpg_first;
179 	register_t va = 0;
180 
181 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
182 	UVMHIST_LOG(pmaphist, "(pg=%p, prot=%d, ptep=%p)",
183 	    pg, prot, old_pte_p, 0);
184 
185 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
186 
187 	if (!MIPS_CACHE_VIRTUAL_ALIAS || !mips_cache_badalias(pv->pv_va, pa)) {
188 #ifdef _LP64
189 		va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
190 #else
191 		if (pa < MIPS_PHYS_MASK) {
192 			va = MIPS_PHYS_TO_KSEG0(pa);
193 		}
194 #endif
195 	}
196 	if (va == 0) {
197 		/*
198 		 * Make sure to use a congruent mapping to the last mapped
199 		 * address so we don't have to worry about virtual aliases.
200 		 */
201 		kpreempt_disable(); // paired with the one in unmap
202 		struct cpu_info * const ci = curcpu();
203 		if (MIPS_CACHE_VIRTUAL_ALIAS) {
204 			KASSERT(ci->ci_pmap_dstbase != 0);
205 			KASSERT(ci->ci_pmap_srcbase != 0);
206 
207 			const u_int __diagused mask = pmap_page_cache_alias_mask;
208 			KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0,
209 			    "%#"PRIxVADDR, ci->ci_pmap_dstbase);
210 			KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0,
211 			    "%#"PRIxVADDR, ci->ci_pmap_srcbase);
212 		}
213 		vaddr_t nva = (prot & VM_PROT_WRITE
214 			? ci->ci_pmap_dstbase
215 			: ci->ci_pmap_srcbase)
216 		    + pmap_md_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS
217 			? pv->pv_va
218 			: pa);
219 
220 		va = (intptr_t)nva;
221 		/*
222 		 * Now to make and write the new PTE to map the PA.
223 		 */
224 		const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0);
225 		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), va);
226 		*old_pte_p = *ptep;		// save
227 		bool rv __diagused;
228 		*ptep = npte;			// update page table
229 
230 		// update the TLB directly making sure we force the new entry
231 		// into it.
232 		rv = tlb_update_addr(va, KERNEL_PID, npte, true);
233 		KASSERTMSG(rv == 1, "va %#"PRIxREGISTER" pte=%#"PRIxPTE" rv=%d",
234 		    va, pte_value(npte), rv);
235 	}
236 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
237 		/*
238 		 * If we are forced to use an incompatible alias, flush the
239 		 * page from the cache so we will copy the correct contents.
240 		 */
241 		if (!locked_p)
242 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
243 		if (VM_PAGEMD_CACHED_P(mdpg)
244 		    && mips_cache_badalias(pv->pv_va, va)) {
245 			register_t ova = (intptr_t)trunc_page(pv->pv_va);
246 			mips_dcache_wbinv_range_index(ova, PAGE_SIZE);
247 			/*
248 			 * If there is no active mapping, remember this new one.
249 			 */
250 			if (pv->pv_pmap == NULL)
251 				pv->pv_va = va;
252 		}
253 		if (!locked_p)
254 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
255 	}
256 
257 	UVMHIST_LOG(pmaphist, " <-- done (va=%#lx)", va, 0, 0, 0);
258 
259 	return va;
260 }
261 
262 static void
263 pmap_md_unmap_ephemeral_page(struct vm_page *pg, bool locked_p, register_t va,
264 	pt_entry_t old_pte)
265 {
266 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
267 	pv_entry_t pv = &mdpg->mdpg_first;
268 
269 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
270 	UVMHIST_LOG(pmaphist, "(pg=%p, va=%#lx, pte=%#"PRIxPTE")",
271 	    pg, va, pte_value(old_pte), 0);
272 
273 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
274 
275 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
276 		if (!locked_p)
277 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
278 		/*
279 		 * If this page was previously uncached or we had to use an
280 		 * incompatible alias, flush it from the cache.
281 		 */
282 		if (VM_PAGEMD_UNCACHED_P(mdpg)
283 		    || (pv->pv_pmap != NULL
284 			&& mips_cache_badalias(pv->pv_va, va))) {
285 			mips_dcache_wbinv_range(va, PAGE_SIZE);
286 		}
287 		if (!locked_p)
288 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
289 	}
290 	/*
291 	 * If we had to map using a page table entry, restore it now.
292 	 */
293 	if (!pmap_md_direct_mapped_vaddr_p(va)) {
294 		*pmap_pte_lookup(pmap_kernel(), va) = old_pte;
295 		if (pte_valid_p(old_pte)) {
296 			// Update the TLB with the old mapping.
297 			tlb_update_addr(va, KERNEL_PID, old_pte, 0);
298 		} else {
299 			// Invalidate TLB entry if the old pte wasn't valid.
300 			tlb_invalidate_addr(va, KERNEL_PID);
301 		}
302 		kpreempt_enable();	// Restore preemption
303 	}
304 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
305 }
306 
307 static void
308 pmap_md_vca_page_wbinv(struct vm_page *pg, bool locked_p)
309 {
310 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
311 	pt_entry_t pte;
312 
313 	const register_t va = pmap_md_map_ephemeral_page(pg, locked_p,
314 	    VM_PROT_READ, &pte);
315 
316 	mips_dcache_wbinv_range(va, PAGE_SIZE);
317 
318 	pmap_md_unmap_ephemeral_page(pg, locked_p, va, pte);
319 }
320 
321 bool
322 pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
323 {
324 #ifndef _LP64
325 	if (uvm_physseg_get_avail_start(bank) + npgs >= atop(MIPS_PHYS_MASK + 1)) {
326 		aprint_debug("%s: seg not enough in KSEG0 for %zu pages\n",
327 		    __func__, npgs);
328 		return false;
329 	}
330 #endif
331 	return true;
332 }
333 
334 /*
335  *	Bootstrap the system enough to run with virtual memory.
336  *	firstaddr is the first unused kseg0 address (not page aligned).
337  */
338 void
339 pmap_bootstrap(void)
340 {
341 	vsize_t bufsz;
342 	size_t sysmap_size;
343 	pt_entry_t *sysmap;
344 
345 	if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors) {
346 		pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
347 		pmap_page_cache_alias_mask = uimax(
348 		    mips_cache_info.mci_cache_alias_mask,
349 		    mips_cache_info.mci_icache_alias_mask);
350 	}
351 
352 #ifdef MULTIPROCESSOR
353 	pmap_t pm = pmap_kernel();
354 	kcpuset_create(&pm->pm_onproc, true);
355 	kcpuset_create(&pm->pm_active, true);
356 	KASSERT(pm->pm_onproc != NULL);
357 	KASSERT(pm->pm_active != NULL);
358 	kcpuset_set(pm->pm_onproc, cpu_number());
359 	kcpuset_set(pm->pm_active, cpu_number());
360 #endif
361 
362 	pmap_bootstrap_common();
363 
364 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
365 
366 	/*
367 	 * Compute the number of pages kmem_arena will have.
368 	 */
369 	kmeminit_nkmempages();
370 
371 	/*
372 	 * Figure out how many PTE's are necessary to map the kernel.
373 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
374 	 */
375 
376 	/* Get size of buffer cache and set an upper limit */
377 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
378 	bufsz = buf_memcalc();
379 	buf_setvalimit(bufsz);
380 
381 	sysmap_size = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
382 	    bufsz + 16 * NCARGS + pager_map_size) / NBPG +
383 	    (maxproc * UPAGES) + nkmempages;
384 
385 #ifdef SYSVSHM
386 	sysmap_size += shminfo.shmall;
387 #endif
388 #ifdef KSEG2IOBUFSIZE
389 	sysmap_size += (KSEG2IOBUFSIZE >> PGSHIFT);
390 #endif
391 #ifdef _LP64
392 	/*
393 	 * If we are using tmpfs, then we might want to use a great deal of
394 	 * our memory with it.  Make sure we have enough VM to do that.
395 	 */
396 	sysmap_size += physmem;
397 #else
398 	/* XXX: else runs out of space on 256MB sbmips!! */
399 	sysmap_size += 20000;
400 #endif
401 	/* Roundup to a even number of pte page tables */
402 	sysmap_size = (sysmap_size + NPTEPG - 1) & -NPTEPG;
403 
404 	/*
405 	 * Initialize `FYI' variables.	Note we're relying on
406 	 * the fact that BSEARCH sorts the vm_physmem[] array
407 	 * for us.  Must do this before uvm_pageboot_alloc()
408 	 * can be called.
409 	 */
410 	pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
411 	pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
412 	pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;
413 
414 #ifndef _LP64
415 	if (pmap_limits.virtual_end > VM_MAX_KERNEL_ADDRESS
416 	    || pmap_limits.virtual_end < VM_MIN_KERNEL_ADDRESS) {
417 		printf("%s: changing last kernel VA from %#"PRIxVADDR
418 		    " to %#"PRIxVADDR"\n", __func__,
419 		    pmap_limits.virtual_end, VM_MAX_KERNEL_ADDRESS);
420 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
421 		sysmap_size =
422 		    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
423 	}
424 #endif
425 	pmap_pvlist_lock_init(mips_cache_info.mci_pdcache_line_size);
426 
427 	/*
428 	 * Now actually allocate the kernel PTE array (must be done
429 	 * after pmap_limits.virtual_end is initialized).
430 	 */
431 	sysmap = (pt_entry_t *)
432 	    uvm_pageboot_alloc(sizeof(pt_entry_t) * sysmap_size);
433 
434 	vaddr_t va = VM_MIN_KERNEL_ADDRESS;
435 #ifdef _LP64
436 	/*
437 	 * Do we need more than one XSEG's worth virtual address space?
438 	 * If so, we have to allocate the additional pmap_segtab_t's for them
439 	 * and insert them into the kernel's top level segtab.
440 	 */
441 	const size_t xsegs = (sysmap_size * NBPG + NBXSEG - 1) / NBXSEG;
442 	if (xsegs > 1) {
443 		printf("%s: %zu xsegs required for %zu pages\n",
444 		    __func__, xsegs, sysmap_size);
445 		pmap_segtab_t *stp = (pmap_segtab_t *)
446 		    uvm_pageboot_alloc(sizeof(pmap_segtab_t) * (xsegs - 1));
447 		for (size_t i = 1; i <= xsegs; i++, stp++) {
448 			pmap_kern_segtab.seg_seg[i] = stp;
449 		}
450 	}
451 	pmap_segtab_t ** const xstp = pmap_kern_segtab.seg_seg;
452 #else
453 	const size_t xsegs = 1;
454 	pmap_segtab_t * const stp = &pmap_kern_segtab;
455 #endif
456 	KASSERT(curcpu()->ci_pmap_kern_segtab == &pmap_kern_segtab);
457 
458 	for (size_t k = 0, i = 0; k < xsegs; k++) {
459 #ifdef _LP64
460 		pmap_segtab_t * const stp =
461 		    xstp[(va >> XSEGSHIFT) & (NSEGPG - 1)];
462 #endif
463 		bool done = false;
464 
465 		for (size_t j = (va >> SEGSHIFT) & (NSEGPG - 1);
466 		     !done && i < sysmap_size;
467 		     i += NPTEPG, j++, va += NBSEG) {
468 			/*
469 			 * Now set the page table pointer...
470 			 */
471 			stp->seg_tab[j] = &sysmap[i];
472 #ifdef _LP64
473 			/*
474 			 * If we are at end of this XSEG, terminate the loop
475 			 * so we advance to the next one.
476 			 */
477 			done = (j + 1 == NSEGPG);
478 #endif
479 		}
480 	}
481 	KASSERT(pmap_pte_lookup(pmap_kernel(), VM_MIN_KERNEL_ADDRESS) == sysmap);
482 
483 	/*
484 	 * Initialize the pools.
485 	 */
486 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
487 	    &pool_allocator_nointr, IPL_NONE);
488 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
489 	    &pmap_pv_page_allocator, IPL_NONE);
490 
491 	tlb_set_asid(0);
492 
493 #ifdef MIPS3_PLUS	/* XXX mmu XXX */
494 	/*
495 	 * The R4?00 stores only one copy of the Global bit in the
496 	 * translation lookaside buffer for each 2 page entry.
497 	 * Thus invalid entries must have the Global bit set so
498 	 * when Entry LO and Entry HI G bits are anded together
499 	 * they will produce a global bit to store in the tlb.
500 	 */
501 	if (MIPS_HAS_R4K_MMU) {
502 		while (sysmap_size-- > 0) {
503 			*sysmap++ = MIPS3_PG_G;
504 		}
505 	}
506 #endif	/* MIPS3_PLUS */
507 }
508 
509 void
510 pmap_md_alloc_ephemeral_address_space(struct cpu_info *ci)
511 {
512 	struct mips_cache_info * const mci = &mips_cache_info;
513 
514 	/*
515 	 * If we have more memory than can be mapped by KSEG0, we need to
516 	 * allocate enough VA so we can map pages with the right color
517 	 * (to avoid cache alias problems).
518 	 */
519 	if (false
520 #ifndef _LP64
521 	    || pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START
522 #endif
523 	    || MIPS_CACHE_VIRTUAL_ALIAS
524 	    || MIPS_ICACHE_VIRTUAL_ALIAS) {
525 		vsize_t size = uimax(mci->mci_pdcache_way_size, mci->mci_picache_way_size);
526 		const u_int __diagused mask = pmap_page_cache_alias_mask;
527 
528 		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, size, size,
529 		    UVM_KMF_VAONLY);
530 
531 		KASSERT(ci->ci_pmap_dstbase);
532 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_dstbase));
533 		KASSERTMSG((ci->ci_pmap_dstbase & mask) == 0, "%#"PRIxVADDR,
534 		    ci->ci_pmap_dstbase);
535 
536 		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map, size, size,
537 		    UVM_KMF_VAONLY);
538 		KASSERT(ci->ci_pmap_srcbase);
539 		KASSERT(!pmap_md_direct_mapped_vaddr_p(ci->ci_pmap_srcbase));
540 		KASSERTMSG((ci->ci_pmap_srcbase & mask) == 0, "%#"PRIxVADDR,
541 		    ci->ci_pmap_srcbase);
542 	}
543 }
544 
545 void
546 pmap_md_init(void)
547 {
548 	pmap_md_alloc_ephemeral_address_space(curcpu());
549 
550 #if defined(MIPS3) && 0
551 	if (MIPS_HAS_R4K_MMU) {
552 		/*
553 		 * XXX
554 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
555 		 * on MIPS3 CPUs to avoid possible virtual cache aliases
556 		 * and uncached mappings in pmap_enter_pv().
557 		 *
558 		 * Ideally, read only shared mapping won't cause aliases
559 		 * so pmap_enter_pv() should handle any shared read only
560 		 * mappings without uncached ops like ARM pmap.
561 		 *
562 		 * On the other hand, R4000 and R4400 have the virtual
563 		 * coherency exceptions which will happen even on read only
564 		 * mappings, so we always have to disable sosend_loan()
565 		 * on such CPUs.
566 		 */
567 		sock_loan_thresh = -1;
568 	}
569 #endif
570 }
571 
572 /*
573  * XXXJRT -- need a version for each cache type.
574  */
575 void
576 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
577 {
578 	if (MIPS_HAS_R4K_MMU) {
579 		/*
580 		 * XXX
581 		 * shouldn't need to do this for physical d$?
582 		 * should need to do this for virtual i$ if prot == EXEC?
583 		 */
584 		if (p == curlwp->l_proc
585 		    && mips_cache_info.mci_pdcache_way_mask < PAGE_SIZE)
586 		    /* XXX check icache mask too? */
587 			mips_icache_sync_range((intptr_t)va, len);
588 		else
589 			mips_icache_sync_range_index((intptr_t)va, len);
590 	} else {
591 		pmap_t pmap = p->p_vmspace->vm_map.pmap;
592 		kpreempt_disable();
593 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
594 		pt_entry_t entry = (ptep != NULL ? *ptep : 0);
595 		kpreempt_enable();
596 		if (!pte_valid_p(entry))
597 			return;
598 
599 		mips_icache_sync_range(
600 		    MIPS_PHYS_TO_KSEG0(pte_to_paddr(entry) + (va & PGOFSET)),
601 		    len);
602 	}
603 }
604 
605 /*
606  *	pmap_zero_page zeros the specified page.
607  */
608 void
609 pmap_zero_page(paddr_t dst_pa)
610 {
611 	pt_entry_t dst_pte;
612 
613 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
614 	UVMHIST_LOG(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
615 	PMAP_COUNT(zeroed_pages);
616 
617 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
618 
619 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
620 
621 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
622 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
623 
624 	mips_pagezero(dst_va);
625 
626 	pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
627 
628 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
629 }
630 
631 /*
632  *	pmap_copy_page copies the specified page.
633  */
634 void
635 pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
636 {
637 	pt_entry_t src_pte, dst_pte;
638 
639 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
640 	UVMHIST_LOG(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa, 0, 0);
641 	PMAP_COUNT(copied_pages);
642 
643 	struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
644 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
645 
646 	const register_t src_va = pmap_md_map_ephemeral_page(src_pg, false,
647 	    VM_PROT_READ, &src_pte);
648 
649 	KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(dst_pg)));
650 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
651 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
652 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
653 
654 	mips_pagecopy(dst_va, src_va);
655 
656 	pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
657 	pmap_md_unmap_ephemeral_page(src_pg, false, src_va, src_pte);
658 
659 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
660 }
661 
662 void
663 pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
664 {
665 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
666 	struct mips_options * const opts = &mips_options;
667 	if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
668 		return;
669 
670 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
671 
672 	/*
673 	 * If onproc is empty, we could do a
674 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
675 	 * mappings of the page and clear its execness.  Then
676 	 * the next time page is faulted, it will get icache
677 	 * synched.  But this is easier. :)
678 	 */
679 	if (MIPS_HAS_R4K_MMU) {
680 		if (VM_PAGEMD_CACHED_P(mdpg)) {
681 			/* This was probably mapped cached by UBC so flush it */
682 			pt_entry_t pte;
683 			const register_t tva = pmap_md_map_ephemeral_page(pg, false,
684 			    VM_PROT_READ, &pte);
685 
686 			UVMHIST_LOG(pmaphist, "  va %#"PRIxVADDR, tva, 0, 0, 0);
687 			mips_dcache_wbinv_range(tva, PAGE_SIZE);
688 			mips_icache_sync_range(tva, PAGE_SIZE);
689 
690 			pmap_md_unmap_ephemeral_page(pg, false, tva, pte);
691 		}
692 	} else {
693 		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
694 		    PAGE_SIZE);
695 	}
696 #ifdef MULTIPROCESSOR
697 	pv_entry_t pv = &mdpg->mdpg_first;
698 	const register_t va = (intptr_t)trunc_page(pv->pv_va);
699 	pmap_tlb_syncicache(va, onproc);
700 #endif
701 }
702 
703 struct vm_page *
704 pmap_md_alloc_poolpage(int flags)
705 {
706 	/*
707 	 * The VM_FREELIST used for pool pages is only set on 32bit
708 	 * kernels.  This is to make sure that we only allocate pages
709 	 * that can be mapped via KSEG0.  On 64bit kernels, all memory
710 	 * can be mapped via XKPHYS so just use the default freelist.
711 	 */
712 	if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
713 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
714 		    UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
715 
716 	return uvm_pagealloc(NULL, 0, NULL, flags);
717 }
718 
719 vaddr_t
720 pmap_md_map_poolpage(paddr_t pa, size_t len)
721 {
722 
723 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
724 	vaddr_t va = pmap_md_pool_phystov(pa);
725 	KASSERT(cold || pg != NULL);
726 	if (pg != NULL) {
727 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
728 		pv_entry_t pv = &mdpg->mdpg_first;
729 		vaddr_t last_va = trunc_page(pv->pv_va);
730 
731 		KASSERT(len == PAGE_SIZE || last_va == pa);
732 		KASSERT(pv->pv_pmap == NULL);
733 		KASSERT(pv->pv_next == NULL);
734 		KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
735 
736 		/*
737 		 * If this page was last mapped with an address that
738 		 * might cause aliases, flush the page from the cache.
739 		 */
740 		if (MIPS_CACHE_VIRTUAL_ALIAS
741 		    && mips_cache_badalias(last_va, va)) {
742 			pmap_md_vca_page_wbinv(pg, false);
743 		}
744 
745 		pv->pv_va = va;
746 	}
747 	return va;
748 }
749 
750 paddr_t
751 pmap_md_unmap_poolpage(vaddr_t va, size_t len)
752 {
753 	KASSERT(len == PAGE_SIZE);
754 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
755 
756 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
757 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
758 
759 	KASSERT(pg);
760 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
761 
762 	KASSERT(VM_PAGEMD_CACHED_P(mdpg));
763 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
764 
765 	pv_entry_t pv = &mdpg->mdpg_first;
766 
767 	/* Note last mapped address for future color check */
768 	pv->pv_va = va;
769 
770 	KASSERT(pv->pv_pmap == NULL);
771 	KASSERT(pv->pv_next == NULL);
772 
773 	return pa;
774 }
775 
776 bool
777 pmap_md_direct_mapped_vaddr_p(register_t va)
778 {
779 #ifndef __mips_o32
780 	if (MIPS_XKPHYS_P(va))
781 		return true;
782 #endif
783 	return MIPS_KSEG0_P(va);
784 }
785 
786 paddr_t
787 pmap_md_direct_mapped_vaddr_to_paddr(register_t va)
788 {
789 	if (MIPS_KSEG0_P(va)) {
790 		return MIPS_KSEG0_TO_PHYS(va);
791 	}
792 #ifndef __mips_o32
793 	if (MIPS_XKPHYS_P(va)) {
794 		return MIPS_XKPHYS_TO_PHYS(va);
795 	}
796 #endif
797 	panic("%s: va %#"PRIxREGISTER" not direct mapped!", __func__, va);
798 }
799 
800 bool
801 pmap_md_io_vaddr_p(vaddr_t va)
802 {
803 #ifdef _LP64
804 	if (MIPS_XKPHYS_P(va)) {
805 		return MIPS_XKPHYS_TO_CCA(va) == CCA_UNCACHED;
806 	}
807 #endif
808 	return MIPS_KSEG1_P(va);
809 }
810 
811 void
812 pmap_md_icache_sync_range_index(vaddr_t va, vsize_t len)
813 {
814 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
815 	mips_icache_sync_range_index(va, len);
816 }
817 
818 void
819 pmap_md_icache_sync_all(void)
820 {
821 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
822 	mips_icache_sync_all();
823 }
824 
825 #ifdef MULTIPROCESSOR
826 void
827 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
828 {
829 	if (ci->ci_index != 0)
830 		return;
831 	const u_int icache_way_pages =
832 	    mips_cache_info.mci_picache_way_size >> PGSHIFT;
833 
834 	KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask));
835 	pmap_tlb_synci_page_mask = icache_way_pages - 1;
836 	pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
837 	printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
838 	    pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask, icache_way_pages);
839 }
840 #endif
841 
842 
843 bool
844 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
845 {
846 	pmap_t pm = ctx;
847 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
848 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
849 
850 	if (asid != pai->pai_asid)
851 		return true;
852 	if (!pte_valid_p(pte)) {
853 		KASSERT(MIPS_HAS_R4K_MMU);
854 		KASSERTMSG(pte == MIPS3_PG_G, "va %#"PRIxVADDR" pte %#"PRIxPTE,
855 		    va, pte_value(pte));
856 		return true;
857 	}
858 
859 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
860 	KASSERTMSG(ptep != NULL, "va %#"PRIxVADDR" asid %u pte %#"PRIxPTE,
861 	    va, asid, pte_value(pte));
862 	const pt_entry_t opte = *ptep;
863 	pt_entry_t xpte = opte;
864 	if (MIPS_HAS_R4K_MMU) {
865 		xpte &= ~(MIPS3_PG_WIRED|MIPS3_PG_RO);
866 	} else {
867 		xpte &= ~(MIPS1_PG_WIRED|MIPS1_PG_RO);
868 	}
869 
870         KASSERTMSG(pte == xpte,
871             "pmap=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#"PRIxPTE
872 	    ") != real pte (%#"PRIxPTE"/%#"PRIxPTE") @ %p",
873             pm, va, asid, pte_value(pte), pte_value(xpte), pte_value(opte),
874 	    ptep);
875 
876         return true;
877 }
878 
879 void
880 tlb_walk(void *ctx, tlb_walkfunc_t func)
881 {
882 	kpreempt_disable();
883 	for (size_t i = 0; i < mips_options.mips_num_tlb_entries; i++) {
884 		struct tlbmask tlbmask;
885 		tlb_asid_t asid;
886 		vaddr_t va;
887 		tlb_read_entry(i, &tlbmask);
888 		if (MIPS_HAS_R4K_MMU) {
889 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS3_PG_ASID);
890 			va = tlbmask.tlb_hi & MIPS3_PG_HVPN;
891 		} else {
892 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS1_TLB_PID);
893 			va = tlbmask.tlb_hi & MIPS1_PG_FRAME;
894 		}
895 		if ((pt_entry_t)tlbmask.tlb_lo0 != 0) {
896 			pt_entry_t pte = tlbmask.tlb_lo0;
897 			tlb_asid_t asid0 = (pte_global_p(pte) ? KERNEL_PID : asid);
898 			if (!(*func)(ctx, va, asid0, pte))
899 				break;
900 		}
901 #if (PGSHIFT & 1) == 0
902 		if (MIPS_HAS_R4K_MMU && (pt_entry_t)tlbmask.tlb_lo1 != 0) {
903 			pt_entry_t pte = tlbmask.tlb_lo1;
904 			tlb_asid_t asid1 = (pte_global_p(pte) ? KERNEL_PID : asid);
905 			if (!(*func)(ctx, va + MIPS3_PG_ODDPG, asid1, pte))
906 				break;
907 		}
908 #endif
909 	}
910 	kpreempt_enable();
911 }
912 
913 bool
914 pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
915 {
916 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
917 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
918 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
919 		return false;
920 
921 	/*
922 	 * There is at least one other VA mapping this page.
923 	 * Check if they are cache index compatible.
924 	 */
925 
926 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
927 	pv_entry_t pv = &mdpg->mdpg_first;
928 #if defined(PMAP_NO_PV_UNCACHED)
929 	/*
930 	 * Instead of mapping uncached, which some platforms
931 	 * cannot support, remove incompatible mappings from others pmaps.
932 	 * When this address is touched again, the uvm will
933 	 * fault it in.  Because of this, each page will only
934 	 * be mapped with one index at any given time.
935 	 *
936 	 * We need to deal with all entries on the list - if the first is
937 	 * incompatible with the new mapping then they all will be.
938 	 */
939 	if (__predict_true(!mips_cache_badalias(pv->pv_va, va))) {
940 		return false;
941 	}
942 	KASSERT(pv->pv_pmap != NULL);
943 	bool ret = false;
944 	for (pv_entry_t npv = pv; npv && npv->pv_pmap;) {
945 		if (PV_ISKENTER_P(npv)) {
946 			npv = npv->pv_next;
947 			continue;
948 		}
949 		ret = true;
950 		vaddr_t nva = trunc_page(npv->pv_va);
951 		pmap_t npm = npv->pv_pmap;
952 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
953 		pmap_remove(npm, nva, nva + PAGE_SIZE);
954 
955 		/*
956 		 * pmap_update is not required here as we're the pmap
957 		 * and we know that the invalidation happened or the
958 		 * asid has been released (and activation is deferred)
959 		 *
960 		 * A deferred activation should NOT occur here.
961 		 */
962 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg);
963 
964 		npv = pv;
965 	}
966 	KASSERT(ret == true);
967 
968 	return ret;
969 #else	/* !PMAP_NO_PV_UNCACHED */
970 	if (VM_PAGEMD_CACHED_P(mdpg)) {
971 		/*
972 		 * If this page is cached, then all mappings
973 		 * have the same cache alias so we only need
974 		 * to check the first page to see if it's
975 		 * incompatible with the new mapping.
976 		 *
977 		 * If the mappings are incompatible, map this
978 		 * page as uncached and re-map all the current
979 		 * mapping as uncached until all pages can
980 		 * share the same cache index again.
981 		 */
982 		if (mips_cache_badalias(pv->pv_va, va)) {
983 			pmap_page_cache(pg, false);
984 			pmap_md_vca_page_wbinv(pg, true);
985 			*ptep = pte_cached_change(*ptep, false);
986 			PMAP_COUNT(page_cache_evictions);
987 		}
988 	} else {
989 		*ptep = pte_cached_change(*ptep, false);
990 		PMAP_COUNT(page_cache_evictions);
991 	}
992 	return false;
993 #endif	/* !PMAP_NO_PV_UNCACHED */
994 }
995 
996 void
997 pmap_md_vca_clean(struct vm_page *pg, int op)
998 {
999 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1000 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
1001 		return;
1002 
1003 	UVMHIST_LOG(pmaphist, "(pg=%p, op=%d)", pg, op, 0, 0);
1004 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(VM_PAGE_TO_MD(pg)));
1005 
1006 	if (op == PMAP_WB || op == PMAP_WBINV) {
1007 		pmap_md_vca_page_wbinv(pg, true);
1008 	} else if (op == PMAP_INV) {
1009 		KASSERT(op == PMAP_INV && false);
1010 		//mips_dcache_inv_range_index(va, PAGE_SIZE);
1011 	}
1012 }
1013 
1014 /*
1015  * In the PMAP_NO_PV_CACHED case, all conflicts are resolved at mapping
1016  * so nothing needs to be done in removal.
1017  */
1018 void
1019 pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty, bool last)
1020 {
1021 #if !defined(PMAP_NO_PV_UNCACHED)
1022 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1023 	if (!MIPS_HAS_R4K_MMU
1024 	    || !MIPS_CACHE_VIRTUAL_ALIAS
1025 	    || !VM_PAGEMD_UNCACHED_P(mdpg))
1026 		return;
1027 
1028 	KASSERT(kpreempt_disabled());
1029 	KASSERT(!VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1030 	KASSERT((va & PAGE_MASK) == 0);
1031 
1032 	/*
1033 	 * Page is currently uncached, check if alias mapping has been
1034 	 * removed.  If it was, then reenable caching.
1035 	 */
1036 	(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
1037 	pv_entry_t pv = &mdpg->mdpg_first;
1038 	pv_entry_t pv0 = pv->pv_next;
1039 
1040 	for (; pv0; pv0 = pv0->pv_next) {
1041 		if (mips_cache_badalias(pv->pv_va, pv0->pv_va))
1042 			break;
1043 	}
1044 	if (pv0 == NULL)
1045 		pmap_page_cache(pg, true);
1046 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1047 #endif
1048 }
1049 
1050 paddr_t
1051 pmap_md_pool_vtophys(vaddr_t va)
1052 {
1053 #ifdef _LP64
1054 	if (MIPS_XKPHYS_P(va))
1055 		return MIPS_XKPHYS_TO_PHYS(va);
1056 #endif
1057 	KASSERT(MIPS_KSEG0_P(va));
1058 	return MIPS_KSEG0_TO_PHYS(va);
1059 }
1060 
1061 vaddr_t
1062 pmap_md_pool_phystov(paddr_t pa)
1063 {
1064 #ifdef _LP64
1065 	KASSERT(mips_options.mips3_xkphys_cached);
1066 	return MIPS_PHYS_TO_XKPHYS_CACHED(pa);
1067 #else
1068 	KASSERT((pa & ~MIPS_PHYS_MASK) == 0);
1069 	return MIPS_PHYS_TO_KSEG0(pa);
1070 #endif
1071 }
1072