xref: /netbsd-src/sys/arch/powerpc/powerpc/pmap_subr.c (revision 36c20c467240dd31afd7de63c00b9022f47f0980)
1 /*	$NetBSD: pmap_subr.c,v 1.30 2020/07/06 10:31:24 rin Exp $	*/
2 /*-
3  * Copyright (c) 2001 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: pmap_subr.c,v 1.30 2020/07/06 10:31:24 rin Exp $");
33 
34 #ifdef _KERNEL_OPT
35 #include "opt_altivec.h"
36 #include "opt_multiprocessor.h"
37 #include "opt_pmap.h"
38 #include "opt_ppcarch.h"
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/device.h>
45 #include <sys/systm.h>
46 
47 #include <uvm/uvm.h>
48 
49 #if defined (PPC_OEA) || defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
50 #include <powerpc/oea/vmparam.h>
51 #ifdef ALTIVEC
52 #include <powerpc/altivec.h>
53 #endif
54 #endif
55 #include <powerpc/psl.h>
56 
57 #define	MFMSR()		mfmsr()
58 #define	MTMSR(psl)	__asm volatile("sync; mtmsr %0; isync" :: "r"(psl))
59 
60 #ifdef PMAPCOUNTERS
61 #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
62 #define	PMAPCOUNT2(ev)	((ev).ev_count++)
63 
64 struct evcnt pmap_evcnt_zeroed_pages =
65     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
66 	"pages zeroed");
67 struct evcnt pmap_evcnt_copied_pages =
68     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
69 	"pages copied");
70 struct evcnt pmap_evcnt_idlezeroed_pages =
71     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
72 	"pages idle zeroed");
73 
74 EVCNT_ATTACH_STATIC(pmap_evcnt_zeroed_pages);
75 EVCNT_ATTACH_STATIC(pmap_evcnt_copied_pages);
76 EVCNT_ATTACH_STATIC(pmap_evcnt_idlezeroed_pages);
77 
78 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
79 
80 struct evcnt pmap_evcnt_mappings =
81     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
82 	    "pmap", "pages mapped");
83 struct evcnt pmap_evcnt_unmappings =
84     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
85 	    "pmap", "pages unmapped");
86 
87 struct evcnt pmap_evcnt_kernel_mappings =
88     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
89 	    "pmap", "kernel pages mapped");
90 struct evcnt pmap_evcnt_kernel_unmappings =
91     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
92 	    "pmap", "kernel pages unmapped");
93 
94 struct evcnt pmap_evcnt_mappings_replaced =
95     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
96 	    "pmap", "page mappings replaced");
97 
98 struct evcnt pmap_evcnt_exec_mappings =
99     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
100 	    "pmap", "exec pages mapped");
101 struct evcnt pmap_evcnt_exec_cached =
102     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
103 	    "pmap", "exec pages cached");
104 
105 struct evcnt pmap_evcnt_exec_synced =
106     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
107 	    "pmap", "exec pages synced");
108 struct evcnt pmap_evcnt_exec_synced_clear_modify =
109     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
110 	    "pmap", "exec pages synced (CM)");
111 struct evcnt pmap_evcnt_exec_synced_pvo_remove =
112     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
113 	    "pmap", "exec pages synced (PR)");
114 
115 struct evcnt pmap_evcnt_exec_uncached_page_protect =
116     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
117 	    "pmap", "exec pages uncached (PP)");
118 struct evcnt pmap_evcnt_exec_uncached_clear_modify =
119     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
120 	    "pmap", "exec pages uncached (CM)");
121 struct evcnt pmap_evcnt_exec_uncached_zero_page =
122     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
123 	    "pmap", "exec pages uncached (ZP)");
124 struct evcnt pmap_evcnt_exec_uncached_copy_page =
125     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
126 	    "pmap", "exec pages uncached (CP)");
127 struct evcnt pmap_evcnt_exec_uncached_pvo_remove =
128     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
129 	    "pmap", "exec pages uncached (PR)");
130 
131 struct evcnt pmap_evcnt_updates =
132     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
133 	    "pmap", "updates");
134 struct evcnt pmap_evcnt_collects =
135     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
136 	    "pmap", "collects");
137 struct evcnt pmap_evcnt_copies =
138     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
139 	    "pmap", "copies");
140 
141 struct evcnt pmap_evcnt_ptes_spilled =
142     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
143 	    "pmap", "ptes spilled from overflow");
144 struct evcnt pmap_evcnt_ptes_unspilled =
145     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
146 	    "pmap", "ptes not spilled");
147 struct evcnt pmap_evcnt_ptes_evicted =
148     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
149 	    "pmap", "ptes evicted");
150 
151 struct evcnt pmap_evcnt_ptes_primary[8] = {
152     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
153 	    "pmap", "ptes added at primary[0]"),
154     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
155 	    "pmap", "ptes added at primary[1]"),
156     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
157 	    "pmap", "ptes added at primary[2]"),
158     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
159 	    "pmap", "ptes added at primary[3]"),
160 
161     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
162 	    "pmap", "ptes added at primary[4]"),
163     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
164 	    "pmap", "ptes added at primary[5]"),
165     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
166 	    "pmap", "ptes added at primary[6]"),
167     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
168 	    "pmap", "ptes added at primary[7]"),
169 };
170 struct evcnt pmap_evcnt_ptes_secondary[8] = {
171     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
172 	    "pmap", "ptes added at secondary[0]"),
173     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
174 	    "pmap", "ptes added at secondary[1]"),
175     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
176 	    "pmap", "ptes added at secondary[2]"),
177     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
178 	    "pmap", "ptes added at secondary[3]"),
179 
180     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
181 	    "pmap", "ptes added at secondary[4]"),
182     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
183 	    "pmap", "ptes added at secondary[5]"),
184     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
185 	    "pmap", "ptes added at secondary[6]"),
186     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
187 	    "pmap", "ptes added at secondary[7]"),
188 };
189 struct evcnt pmap_evcnt_ptes_removed =
190     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
191 	    "pmap", "ptes removed");
192 struct evcnt pmap_evcnt_ptes_changed =
193     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
194 	    "pmap", "ptes changed");
195 struct evcnt pmap_evcnt_pvos_reclaimed =
196     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
197 	    "pmap", "pvos reclaimed");
198 struct evcnt pmap_evcnt_pvos_failed =
199     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
200 	    "pmap", "pvo allocation failures");
201 
202 EVCNT_ATTACH_STATIC(pmap_evcnt_mappings);
203 EVCNT_ATTACH_STATIC(pmap_evcnt_mappings_replaced);
204 EVCNT_ATTACH_STATIC(pmap_evcnt_unmappings);
205 
206 EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_mappings);
207 EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_unmappings);
208 
209 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_mappings);
210 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_cached);
211 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced);
212 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_clear_modify);
213 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_pvo_remove);
214 
215 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_page_protect);
216 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_clear_modify);
217 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_zero_page);
218 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_copy_page);
219 EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_pvo_remove);
220 
221 EVCNT_ATTACH_STATIC(pmap_evcnt_updates);
222 EVCNT_ATTACH_STATIC(pmap_evcnt_collects);
223 EVCNT_ATTACH_STATIC(pmap_evcnt_copies);
224 
225 EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_spilled);
226 EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_unspilled);
227 EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_evicted);
228 EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_removed);
229 EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_changed);
230 
231 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 0);
232 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 1);
233 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 2);
234 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 3);
235 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 4);
236 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 5);
237 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 6);
238 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 7);
239 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 0);
240 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 1);
241 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 2);
242 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 3);
243 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 4);
244 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 5);
245 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 6);
246 EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 7);
247 
248 EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_reclaimed);
249 EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_failed);
250 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */
251 #else
252 #define	PMAPCOUNT(ev)	((void) 0)
253 #define	PMAPCOUNT2(ev)	((void) 0)
254 #endif /* PMAPCOUNTERS */
255 
256 /*
257  * This file uses a sick & twisted method to deal with the common pmap
258  * operations of zero'ing, copying, and syncing the page with the
259  * instruction cache.
260  *
261  * When a PowerPC CPU takes an exception (interrupt or trap), that
262  * exception is handled with the MMU off.  The handler has to explicitly
263  * renable the MMU before continuing.  The state of the MMU will be restored
264  * when the exception is returned from.
265  *
266  * Therefore if we disable the MMU we know that doesn't affect any exception.
267  * So it's safe for us to disable the MMU so we can deal with physical
268  * addresses without having to map any pages via a BAT or into a page table.
269  *
270  * It's also safe to do regardless of IPL.
271  *
272  * However while relocation is off, we MUST not access the kernel stack in
273  * any manner since it will probably no longer be mapped.  This mean no
274  * calls while relocation is off.  The AltiVEC routines need to handle the
275  * MSR fiddling themselves so they can save things on the stack.
276  */
277 
278 /*
279  * Fill the given physical page with zeroes.
280  */
281 void
pmap_zero_page(paddr_t pa)282 pmap_zero_page(paddr_t pa)
283 {
284 	size_t linewidth;
285 	register_t msr = 0; /* XXX: gcc */
286 
287 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
288 	{
289 		/*
290 		 * If we are zeroing this page, we must clear the EXEC-ness
291 		 * of this page since the page contents will have changed.
292 		 */
293 		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
294 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
295 		KDASSERT(pg != NULL);
296 		KDASSERT(LIST_EMPTY(&md->mdpg_pvoh));
297 #ifdef PMAPCOUNTERS
298 		if (md->mdpg_attrs & PTE_EXEC) {
299 			PMAPCOUNT(exec_uncached_zero_page);
300 		}
301 #endif
302 		md->mdpg_attrs &= ~PTE_EXEC;
303 	}
304 #endif
305 
306 	PMAPCOUNT(zeroed_pages);
307 
308 #ifdef ALTIVEC
309 	if (pmap_use_altivec) {
310 		vzeropage(pa);
311 		return;
312 	}
313 #endif
314 
315 	/*
316 	 * Turn off data relocation (DMMU off).
317 	 */
318 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
319 	if (pa >= SEGMENT_LENGTH) {
320 #endif
321 		msr = MFMSR();
322 		MTMSR(msr & ~PSL_DR);
323 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
324 	}
325 #endif
326 
327 	/*
328 	 * Zero the page.  Since DR is off, the address is assumed to
329 	 * valid but we know that UVM will never pass a uncacheable page.
330 	 * Don't use dcbz if we don't know the cache width.
331 	 */
332 	if ((linewidth = curcpu()->ci_ci.dcache_line_size) == 0) {
333 		long *dp = (long *)pa;
334 		long * const ep = dp + PAGE_SIZE/sizeof(dp[0]);
335 		do {
336 			dp[0] = 0; dp[1] = 0; dp[2] = 0; dp[3] = 0;
337 			dp[4] = 0; dp[5] = 0; dp[6] = 0; dp[7] = 0;
338 		} while ((dp += 8) < ep);
339 	} else {
340 		size_t i = 0;
341 		do {
342 			__asm ("dcbz %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
343 			__asm ("dcbz %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
344 		} while (i < PAGE_SIZE);
345 	}
346 
347 	/*
348 	 * Restore data relocation (DMMU on).
349 	 */
350 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
351 	if (pa >= SEGMENT_LENGTH)
352 #endif
353 		MTMSR(msr);
354 }
355 
356 /*
357  * Copy the given physical source page to its destination.
358  */
359 void
pmap_copy_page(paddr_t src,paddr_t dst)360 pmap_copy_page(paddr_t src, paddr_t dst)
361 {
362 	const register_t *sp;
363 	register_t *dp;
364 	register_t msr;
365 	size_t i;
366 
367 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
368 	{
369 		/*
370 		 * If we are copying to the destination page, we must clear
371 		 * the EXEC-ness of this page since the page contents have
372 		 * changed.
373 		 */
374 		struct vm_page *pg = PHYS_TO_VM_PAGE(dst);
375 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
376 		KDASSERT(pg != NULL);
377 		KDASSERT(LIST_EMPTY(&md->mdpg_pvoh));
378 #ifdef PMAPCOUNTERS
379 		if (md->mdpg_attrs & PTE_EXEC) {
380 			PMAPCOUNT(exec_uncached_copy_page);
381 		}
382 #endif
383 		md->mdpg_attrs &= ~PTE_EXEC;
384 	}
385 #endif
386 
387 	PMAPCOUNT(copied_pages);
388 
389 #ifdef ALTIVEC
390 	if (pmap_use_altivec) {
391 		vcopypage(dst, src);
392 		return;
393 	}
394 #endif
395 
396 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
397 	if (src < SEGMENT_LENGTH && dst < SEGMENT_LENGTH) {
398 		/*
399 		 * Copy the page (memcpy is optimized, right? :)
400 		 */
401 		memcpy((void *) dst, (void *) src, PAGE_SIZE);
402 		return;
403 	}
404 #endif
405 
406 	/*
407 	 * Turn off data relocation (DMMU off).
408 	 */
409 	msr = MFMSR();
410 	MTMSR(msr & ~PSL_DR);
411 
412 	/*
413 	 * Copy the page.  Don't use memcpy as we can't refer to the
414 	 * kernel stack at this point.
415 	 */
416 	sp = (const register_t *) src;
417 	dp = (register_t *) dst;
418 	for (i = 0; i < PAGE_SIZE/sizeof(dp[0]); i += 8, dp += 8, sp += 8) {
419 		dp[0] = sp[0]; dp[1] = sp[1]; dp[2] = sp[2]; dp[3] = sp[3];
420 		dp[4] = sp[4]; dp[5] = sp[5]; dp[6] = sp[6]; dp[7] = sp[7];
421 	}
422 
423 	/*
424 	 * Restore data relocation (DMMU on).
425 	 */
426 	MTMSR(msr);
427 }
428 
429 void
pmap_syncicache(paddr_t pa,psize_t len)430 pmap_syncicache(paddr_t pa, psize_t len)
431 {
432 
433 /*
434  * XXX
435  * disabling the MULTIPROCESSOR case because:
436  * - _syncicache() takes a virtual addresses
437  * - this causes crashes on G5
438  */
439 #ifdef MULTIPROCESSOR__
440 	__syncicache((void *)pa, len);
441 #else
442 	const size_t linewidth = curcpu()->ci_ci.icache_line_size;
443 	register_t msr;
444 	size_t i;
445 
446 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
447 	if (pa + len <= SEGMENT_LENGTH) {
448 		__syncicache((void *)pa, len);
449 		return;
450 	}
451 #endif
452 
453 	/*
454 	 * Turn off instruction and data relocation (MMU off).
455 	 */
456 	msr = MFMSR();
457 	MTMSR(msr & ~(PSL_IR|PSL_DR));
458 
459 	/*
460 	 * Make sure to start on a cache boundary.
461 	 */
462 	len += pa - (pa & ~linewidth);
463 	pa &= ~linewidth;
464 
465 	/*
466 	 * Write out the data cache
467 	 */
468 	i = 0;
469 	do {
470 		__asm ("dcbst %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
471 	} while (i < len);
472 
473 	/*
474 	 * Wait for it to finish
475 	 */
476 	__asm volatile("sync");
477 
478 	/*
479 	 * Now invalidate the instruction cache.
480 	 */
481 	i = 0;
482 	do {
483 		__asm ("icbi %0,%1" :: "b"(pa), "r"(i)); i += linewidth;
484 	} while (i < len);
485 
486 	/*
487 	 * Restore relocation (MMU on).  (this will do the required
488 	 * sync and isync).
489 	 */
490 	MTMSR(msr);
491 #endif	/* !MULTIPROCESSOR */
492 }
493 
494 bool
pmap_pageidlezero(paddr_t pa)495 pmap_pageidlezero(paddr_t pa)
496 {
497 	register_t msr;
498 	register_t *dp = (register_t *) pa;
499 	struct cpu_info * const ci = curcpu();
500 	bool rv = true;
501 	int i;
502 
503 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
504 	if (pa < SEGMENT_LENGTH) {
505 		for (i = 0; i < PAGE_SIZE / sizeof(dp[0]); i++) {
506 			if (ci->ci_want_resched)
507 				return false;
508 			*dp++ = 0;
509 		}
510 		PMAPCOUNT(idlezeroed_pages);
511 		return true;
512 	}
513 #endif
514 
515 	/*
516 	 * Turn off instruction and data relocation (MMU off).
517 	 */
518 	msr = MFMSR();
519 	MTMSR(msr & ~(PSL_IR|PSL_DR));
520 
521 	/*
522 	 * Zero the page until a process becomes runnable.
523 	 */
524 	for (i = 0; i < PAGE_SIZE / sizeof(dp[0]); i++) {
525 		if (ci->ci_want_resched) {
526 			rv = false;
527 			break;
528 		}
529 		*dp++ = 0;
530 	}
531 
532 	/*
533 	 * Restore relocation (MMU on).
534 	 */
535 	MTMSR(msr);
536 #ifdef PMAPCOUNTERS
537 	if (rv)
538 		PMAPCOUNT(idlezeroed_pages);
539 #endif
540 	return rv;
541 }
542