xref: /netbsd-src/sys/arch/arm/arm32/pmap.c (revision 5e4c038a45edbc7d63b7c2daa76e29f88b64a4e3)
1 /*	$NetBSD: pmap.c,v 1.99 2002/06/02 14:44:42 drochner Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * Copyright (c) 2001 Richard Earnshaw
6  * Copyright (c) 2001 Christopher Gilbert
7  * All rights reserved.
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the company nor the name of the author may be used to
15  *    endorse or promote products derived from this software without specific
16  *    prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*-
32  * Copyright (c) 1999 The NetBSD Foundation, Inc.
33  * All rights reserved.
34  *
35  * This code is derived from software contributed to The NetBSD Foundation
36  * by Charles M. Hannum.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *        This product includes software developed by the NetBSD
49  *        Foundation, Inc. and its contributors.
50  * 4. Neither the name of The NetBSD Foundation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * Copyright (c) 1994-1998 Mark Brinicombe.
69  * Copyright (c) 1994 Brini.
70  * All rights reserved.
71  *
72  * This code is derived from software written for Brini by Mark Brinicombe
73  *
74  * Redistribution and use in source and binary forms, with or without
75  * modification, are permitted provided that the following conditions
76  * are met:
77  * 1. Redistributions of source code must retain the above copyright
78  *    notice, this list of conditions and the following disclaimer.
79  * 2. Redistributions in binary form must reproduce the above copyright
80  *    notice, this list of conditions and the following disclaimer in the
81  *    documentation and/or other materials provided with the distribution.
82  * 3. All advertising materials mentioning features or use of this software
83  *    must display the following acknowledgement:
84  *	This product includes software developed by Mark Brinicombe.
85  * 4. The name of the author may not be used to endorse or promote products
86  *    derived from this software without specific prior written permission.
87  *
88  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97  *
98  * RiscBSD kernel project
99  *
100  * pmap.c
101  *
102  * Machine dependant vm stuff
103  *
104  * Created      : 20/09/94
105  */
106 
107 /*
108  * Performance improvements, UVM changes, overhauls and part-rewrites
109  * were contributed by Neil A. Carson <neil@causality.com>.
110  */
111 
112 /*
113  * The dram block info is currently referenced from the bootconfig.
114  * This should be placed in a separate structure.
115  */
116 
117 /*
118  * Special compilation symbols
119  * PMAP_DEBUG		- Build in pmap_debug_level code
120  */
121 
122 /* Include header files */
123 
124 #include "opt_pmap_debug.h"
125 #include "opt_ddb.h"
126 
127 #include <sys/types.h>
128 #include <sys/param.h>
129 #include <sys/kernel.h>
130 #include <sys/systm.h>
131 #include <sys/proc.h>
132 #include <sys/malloc.h>
133 #include <sys/user.h>
134 #include <sys/pool.h>
135 #include <sys/cdefs.h>
136 
137 #include <uvm/uvm.h>
138 
139 #include <machine/bootconfig.h>
140 #include <machine/bus.h>
141 #include <machine/pmap.h>
142 #include <machine/pcb.h>
143 #include <machine/param.h>
144 #include <arm/arm32/katelib.h>
145 
146 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.99 2002/06/02 14:44:42 drochner Exp $");
147 #ifdef PMAP_DEBUG
148 #define	PDEBUG(_lev_,_stat_) \
149 	if (pmap_debug_level >= (_lev_)) \
150         	((_stat_))
151 int pmap_debug_level = -2;
152 void pmap_dump_pvlist(vaddr_t phys, char *m);
153 
154 /*
155  * for switching to potentially finer grained debugging
156  */
157 #define	PDB_FOLLOW	0x0001
158 #define	PDB_INIT	0x0002
159 #define	PDB_ENTER	0x0004
160 #define	PDB_REMOVE	0x0008
161 #define	PDB_CREATE	0x0010
162 #define	PDB_PTPAGE	0x0020
163 #define	PDB_GROWKERN	0x0040
164 #define	PDB_BITS	0x0080
165 #define	PDB_COLLECT	0x0100
166 #define	PDB_PROTECT	0x0200
167 #define	PDB_MAP_L1	0x0400
168 #define	PDB_BOOTSTRAP	0x1000
169 #define	PDB_PARANOIA	0x2000
170 #define	PDB_WIRING	0x4000
171 #define	PDB_PVDUMP	0x8000
172 
173 int debugmap = 0;
174 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
175 #define	NPDEBUG(_lev_,_stat_) \
176 	if (pmapdebug & (_lev_)) \
177         	((_stat_))
178 
179 #else	/* PMAP_DEBUG */
180 #define	PDEBUG(_lev_,_stat_) /* Nothing */
181 #define NPDEBUG(_lev_,_stat_) /* Nothing */
182 #endif	/* PMAP_DEBUG */
183 
184 struct pmap     kernel_pmap_store;
185 
186 /*
187  * linked list of all non-kernel pmaps
188  */
189 
190 static LIST_HEAD(, pmap) pmaps;
191 
192 /*
193  * pool that pmap structures are allocated from
194  */
195 
196 struct pool pmap_pmap_pool;
197 
198 static pt_entry_t *csrc_pte, *cdst_pte;
199 static vaddr_t csrcp, cdstp;
200 
201 char *memhook;
202 extern caddr_t msgbufaddr;
203 
204 boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
205 /*
206  * locking data structures
207  */
208 
209 static struct lock pmap_main_lock;
210 static struct simplelock pvalloc_lock;
211 static struct simplelock pmaps_lock;
212 #ifdef LOCKDEBUG
213 #define PMAP_MAP_TO_HEAD_LOCK() \
214      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
215 #define PMAP_MAP_TO_HEAD_UNLOCK() \
216      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
217 
218 #define PMAP_HEAD_TO_MAP_LOCK() \
219      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
220 #define PMAP_HEAD_TO_MAP_UNLOCK() \
221      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
222 #else
223 #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
224 #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
225 #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
226 #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
227 #endif /* LOCKDEBUG */
228 
229 /*
230  * pv_page management structures: locked by pvalloc_lock
231  */
232 
233 TAILQ_HEAD(pv_pagelist, pv_page);
234 static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
235 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
236 static int pv_nfpvents;			/* # of free pv entries */
237 static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
238 static vaddr_t pv_cachedva;		/* cached VA for later use */
239 
240 #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
241 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
242 					/* high water mark */
243 
244 /*
245  * local prototypes
246  */
247 
248 static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
249 static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
250 #define ALLOCPV_NEED	0	/* need PV now */
251 #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
252 #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
253 static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
254 static void		 pmap_enter_pv __P((struct vm_page *,
255 					    struct pv_entry *, struct pmap *,
256 					    vaddr_t, struct vm_page *, int));
257 static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
258 static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
259 static void		 pmap_free_pv_doit __P((struct pv_entry *));
260 static void		 pmap_free_pvpage __P((void));
261 static boolean_t	 pmap_is_curpmap __P((struct pmap *));
262 static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *,
263 			vaddr_t));
264 #define PMAP_REMOVE_ALL		0	/* remove all mappings */
265 #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
266 
267 static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
268 	u_int, u_int));
269 
270 /*
271  * Structure that describes and L1 table.
272  */
273 struct l1pt {
274 	SIMPLEQ_ENTRY(l1pt)	pt_queue;	/* Queue pointers */
275 	struct pglist		pt_plist;	/* Allocated page list */
276 	vaddr_t			pt_va;		/* Allocated virtual address */
277 	int			pt_flags;	/* Flags */
278 };
279 #define	PTFLAG_STATIC		0x01		/* Statically allocated */
280 #define	PTFLAG_KPT		0x02		/* Kernel pt's are mapped */
281 #define	PTFLAG_CLEAN		0x04		/* L1 is clean */
282 
283 static void pmap_free_l1pt __P((struct l1pt *));
284 static int pmap_allocpagedir __P((struct pmap *));
285 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
286 static void pmap_remove_all __P((struct vm_page *));
287 
288 static int pmap_alloc_ptpt(struct pmap *);
289 static void pmap_free_ptpt(struct pmap *);
290 
291 static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t));
292 static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t));
293 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
294 
295 extern paddr_t physical_start;
296 extern paddr_t physical_freestart;
297 extern paddr_t physical_end;
298 extern paddr_t physical_freeend;
299 extern unsigned int free_pages;
300 extern int max_processes;
301 
302 vaddr_t virtual_avail;
303 vaddr_t virtual_end;
304 vaddr_t pmap_curmaxkvaddr;
305 
306 vaddr_t avail_start;
307 vaddr_t avail_end;
308 
309 extern pv_addr_t systempage;
310 
311 /* Variables used by the L1 page table queue code */
312 SIMPLEQ_HEAD(l1pt_queue, l1pt);
313 static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
314 static int l1pt_static_queue_count;	    /* items in the static l1 queue */
315 static int l1pt_static_create_count;	    /* static l1 items created */
316 static struct l1pt_queue l1pt_queue;	    /* head of our l1 queue */
317 static int l1pt_queue_count;		    /* items in the l1 queue */
318 static int l1pt_create_count;		    /* stat - L1's create count */
319 static int l1pt_reuse_count;		    /* stat - L1's reused count */
320 
321 /* Local function prototypes (not used outside this file) */
322 void pmap_pinit __P((struct pmap *));
323 void pmap_freepagedir __P((struct pmap *));
324 
325 /* Other function prototypes */
326 extern void bzero_page __P((vaddr_t));
327 extern void bcopy_page __P((vaddr_t, vaddr_t));
328 
329 struct l1pt *pmap_alloc_l1pt __P((void));
330 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
331      vaddr_t l2pa, boolean_t));
332 
333 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
334 static void pmap_unmap_ptes __P((struct pmap *));
335 
336 __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
337     pt_entry_t *, boolean_t));
338 static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
339     pt_entry_t *, boolean_t));
340 static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
341     pt_entry_t *, boolean_t));
342 
343 /*
344  * real definition of pv_entry.
345  */
346 
347 struct pv_entry {
348 	struct pv_entry *pv_next;       /* next pv_entry */
349 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
350 	vaddr_t         pv_va;          /* virtual address for mapping */
351 	int             pv_flags;       /* flags */
352 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
353 };
354 
355 /*
356  * pv_entrys are dynamically allocated in chunks from a single page.
357  * we keep track of how many pv_entrys are in use for each page and
358  * we can free pv_entry pages if needed.  there is one lock for the
359  * entire allocation system.
360  */
361 
362 struct pv_page_info {
363 	TAILQ_ENTRY(pv_page) pvpi_list;
364 	struct pv_entry *pvpi_pvfree;
365 	int pvpi_nfree;
366 };
367 
368 /*
369  * number of pv_entry's in a pv_page
370  * (note: won't work on systems where NPBG isn't a constant)
371  */
372 
373 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
374 			sizeof(struct pv_entry))
375 
376 /*
377  * a pv_page: where pv_entrys are allocated from
378  */
379 
380 struct pv_page {
381 	struct pv_page_info pvinfo;
382 	struct pv_entry pvents[PVE_PER_PVPAGE];
383 };
384 
385 #ifdef MYCROFT_HACK
386 int mycroft_hack = 0;
387 #endif
388 
389 /* Function to set the debug level of the pmap code */
390 
391 #ifdef PMAP_DEBUG
392 void
393 pmap_debug(int level)
394 {
395 	pmap_debug_level = level;
396 	printf("pmap_debug: level=%d\n", pmap_debug_level);
397 }
398 #endif	/* PMAP_DEBUG */
399 
400 __inline static boolean_t
401 pmap_is_curpmap(struct pmap *pmap)
402 {
403 
404 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
405 	    pmap == pmap_kernel())
406 		return (TRUE);
407 
408 	return (FALSE);
409 }
410 
411 #include "isadma.h"
412 
413 #if NISADMA > 0
414 /*
415  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
416  * pages into the system, memory intersects with any of these ranges,
417  * the intersecting memory will be loaded into a lower-priority free list.
418  */
419 bus_dma_segment_t *pmap_isa_dma_ranges;
420 int pmap_isa_dma_nranges;
421 
422 /*
423  * Check if a memory range intersects with an ISA DMA range, and
424  * return the page-rounded intersection if it does.  The intersection
425  * will be placed on a lower-priority free list.
426  */
427 static boolean_t
428 pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap,
429     psize_t *sizep)
430 {
431 	bus_dma_segment_t *ds;
432 	int i;
433 
434 	if (pmap_isa_dma_ranges == NULL)
435 		return (FALSE);
436 
437 	for (i = 0, ds = pmap_isa_dma_ranges;
438 	     i < pmap_isa_dma_nranges; i++, ds++) {
439 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
440 			/*
441 			 * Beginning of region intersects with this range.
442 			 */
443 			*pap = trunc_page(pa);
444 			*sizep = round_page(min(pa + size,
445 			    ds->ds_addr + ds->ds_len) - pa);
446 			return (TRUE);
447 		}
448 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
449 			/*
450 			 * End of region intersects with this range.
451 			 */
452 			*pap = trunc_page(ds->ds_addr);
453 			*sizep = round_page(min((pa + size) - ds->ds_addr,
454 			    ds->ds_len));
455 			return (TRUE);
456 		}
457 	}
458 
459 	/*
460 	 * No intersection found.
461 	 */
462 	return (FALSE);
463 }
464 #endif /* NISADMA > 0 */
465 
466 /*
467  * p v _ e n t r y   f u n c t i o n s
468  */
469 
470 /*
471  * pv_entry allocation functions:
472  *   the main pv_entry allocation functions are:
473  *     pmap_alloc_pv: allocate a pv_entry structure
474  *     pmap_free_pv: free one pv_entry
475  *     pmap_free_pvs: free a list of pv_entrys
476  *
477  * the rest are helper functions
478  */
479 
480 /*
481  * pmap_alloc_pv: inline function to allocate a pv_entry structure
482  * => we lock pvalloc_lock
483  * => if we fail, we call out to pmap_alloc_pvpage
484  * => 3 modes:
485  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
486  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
487  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
488  *			one now
489  *
490  * "try" is for optional functions like pmap_copy().
491  */
492 
493 __inline static struct pv_entry *
494 pmap_alloc_pv(struct pmap *pmap, int mode)
495 {
496 	struct pv_page *pvpage;
497 	struct pv_entry *pv;
498 
499 	simple_lock(&pvalloc_lock);
500 
501 	pvpage = TAILQ_FIRST(&pv_freepages);
502 
503 	if (pvpage != NULL) {
504 		pvpage->pvinfo.pvpi_nfree--;
505 		if (pvpage->pvinfo.pvpi_nfree == 0) {
506 			/* nothing left in this one? */
507 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
508 		}
509 		pv = pvpage->pvinfo.pvpi_pvfree;
510 		KASSERT(pv);
511 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
512 		pv_nfpvents--;  /* took one from pool */
513 	} else {
514 		pv = NULL;		/* need more of them */
515 	}
516 
517 	/*
518 	 * if below low water mark or we didn't get a pv_entry we try and
519 	 * create more pv_entrys ...
520 	 */
521 
522 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
523 		if (pv == NULL)
524 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
525 					       mode : ALLOCPV_NEED);
526 		else
527 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
528 	}
529 
530 	simple_unlock(&pvalloc_lock);
531 	return(pv);
532 }
533 
534 /*
535  * pmap_alloc_pvpage: maybe allocate a new pvpage
536  *
537  * if need_entry is false: try and allocate a new pv_page
538  * if need_entry is true: try and allocate a new pv_page and return a
539  *	new pv_entry from it.   if we are unable to allocate a pv_page
540  *	we make a last ditch effort to steal a pv_page from some other
541  *	mapping.    if that fails, we panic...
542  *
543  * => we assume that the caller holds pvalloc_lock
544  */
545 
546 static struct pv_entry *
547 pmap_alloc_pvpage(struct pmap *pmap, int mode)
548 {
549 	struct vm_page *pg;
550 	struct pv_page *pvpage;
551 	struct pv_entry *pv;
552 	int s;
553 
554 	/*
555 	 * if we need_entry and we've got unused pv_pages, allocate from there
556 	 */
557 
558 	pvpage = TAILQ_FIRST(&pv_unusedpgs);
559 	if (mode != ALLOCPV_NONEED && pvpage != NULL) {
560 
561 		/* move it to pv_freepages list */
562 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
563 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
564 
565 		/* allocate a pv_entry */
566 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
567 		pv = pvpage->pvinfo.pvpi_pvfree;
568 		KASSERT(pv);
569 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
570 
571 		pv_nfpvents--;  /* took one from pool */
572 		return(pv);
573 	}
574 
575 	/*
576 	 *  see if we've got a cached unmapped VA that we can map a page in.
577 	 * if not, try to allocate one.
578 	 */
579 
580 
581 	if (pv_cachedva == 0) {
582 		s = splvm();
583 		pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
584 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
585 		splx(s);
586 		if (pv_cachedva == 0) {
587 			return (NULL);
588 		}
589 	}
590 
591 	pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
592 	    UVM_PGA_USERESERVE);
593 
594 	if (pg == NULL)
595 		return (NULL);
596 	pg->flags &= ~PG_BUSY;	/* never busy */
597 
598 	/*
599 	 * add a mapping for our new pv_page and free its entrys (save one!)
600 	 *
601 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
602 	 * pmap is already locked!  (...but entering the mapping is safe...)
603 	 */
604 
605 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
606 		VM_PROT_READ|VM_PROT_WRITE);
607 	pmap_update(pmap_kernel());
608 	pvpage = (struct pv_page *) pv_cachedva;
609 	pv_cachedva = 0;
610 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
611 }
612 
613 /*
614  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
615  *
616  * => caller must hold pvalloc_lock
617  * => if need_entry is true, we allocate and return one pv_entry
618  */
619 
620 static struct pv_entry *
621 pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
622 {
623 	int tofree, lcv;
624 
625 	/* do we need to return one? */
626 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
627 
628 	pvp->pvinfo.pvpi_pvfree = NULL;
629 	pvp->pvinfo.pvpi_nfree = tofree;
630 	for (lcv = 0 ; lcv < tofree ; lcv++) {
631 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
632 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
633 	}
634 	if (need_entry)
635 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
636 	else
637 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
638 	pv_nfpvents += tofree;
639 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
640 }
641 
642 /*
643  * pmap_free_pv_doit: actually free a pv_entry
644  *
645  * => do not call this directly!  instead use either
646  *    1. pmap_free_pv ==> free a single pv_entry
647  *    2. pmap_free_pvs => free a list of pv_entrys
648  * => we must be holding pvalloc_lock
649  */
650 
651 __inline static void
652 pmap_free_pv_doit(struct pv_entry *pv)
653 {
654 	struct pv_page *pvp;
655 
656 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
657 	pv_nfpvents++;
658 	pvp->pvinfo.pvpi_nfree++;
659 
660 	/* nfree == 1 => fully allocated page just became partly allocated */
661 	if (pvp->pvinfo.pvpi_nfree == 1) {
662 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
663 	}
664 
665 	/* free it */
666 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
667 	pvp->pvinfo.pvpi_pvfree = pv;
668 
669 	/*
670 	 * are all pv_page's pv_entry's free?  move it to unused queue.
671 	 */
672 
673 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
674 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
675 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
676 	}
677 }
678 
679 /*
680  * pmap_free_pv: free a single pv_entry
681  *
682  * => we gain the pvalloc_lock
683  */
684 
685 __inline static void
686 pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
687 {
688 	simple_lock(&pvalloc_lock);
689 	pmap_free_pv_doit(pv);
690 
691 	/*
692 	 * Can't free the PV page if the PV entries were associated with
693 	 * the kernel pmap; the pmap is already locked.
694 	 */
695 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
696 	    pmap != pmap_kernel())
697 		pmap_free_pvpage();
698 
699 	simple_unlock(&pvalloc_lock);
700 }
701 
702 /*
703  * pmap_free_pvs: free a list of pv_entrys
704  *
705  * => we gain the pvalloc_lock
706  */
707 
708 __inline static void
709 pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
710 {
711 	struct pv_entry *nextpv;
712 
713 	simple_lock(&pvalloc_lock);
714 
715 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
716 		nextpv = pvs->pv_next;
717 		pmap_free_pv_doit(pvs);
718 	}
719 
720 	/*
721 	 * Can't free the PV page if the PV entries were associated with
722 	 * the kernel pmap; the pmap is already locked.
723 	 */
724 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
725 	    pmap != pmap_kernel())
726 		pmap_free_pvpage();
727 
728 	simple_unlock(&pvalloc_lock);
729 }
730 
731 
732 /*
733  * pmap_free_pvpage: try and free an unused pv_page structure
734  *
735  * => assume caller is holding the pvalloc_lock and that
736  *	there is a page on the pv_unusedpgs list
737  * => if we can't get a lock on the kmem_map we try again later
738  */
739 
740 static void
741 pmap_free_pvpage(void)
742 {
743 	int s;
744 	struct vm_map *map;
745 	struct vm_map_entry *dead_entries;
746 	struct pv_page *pvp;
747 
748 	s = splvm(); /* protect kmem_map */
749 
750 	pvp = TAILQ_FIRST(&pv_unusedpgs);
751 
752 	/*
753 	 * note: watch out for pv_initpage which is allocated out of
754 	 * kernel_map rather than kmem_map.
755 	 */
756 	if (pvp == pv_initpage)
757 		map = kernel_map;
758 	else
759 		map = kmem_map;
760 	if (vm_map_lock_try(map)) {
761 
762 		/* remove pvp from pv_unusedpgs */
763 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
764 
765 		/* unmap the page */
766 		dead_entries = NULL;
767 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
768 		    &dead_entries);
769 		vm_map_unlock(map);
770 
771 		if (dead_entries != NULL)
772 			uvm_unmap_detach(dead_entries, 0);
773 
774 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
775 	}
776 	if (pvp == pv_initpage)
777 		/* no more initpage, we've freed it */
778 		pv_initpage = NULL;
779 
780 	splx(s);
781 }
782 
783 /*
784  * main pv_entry manipulation functions:
785  *   pmap_enter_pv: enter a mapping onto a vm_page list
786  *   pmap_remove_pv: remove a mappiing from a vm_page list
787  *
788  * NOTE: pmap_enter_pv expects to lock the pvh itself
789  *       pmap_remove_pv expects te caller to lock the pvh before calling
790  */
791 
792 /*
793  * pmap_enter_pv: enter a mapping onto a vm_page lst
794  *
795  * => caller should hold the proper lock on pmap_main_lock
796  * => caller should have pmap locked
797  * => we will gain the lock on the vm_page and allocate the new pv_entry
798  * => caller should adjust ptp's wire_count before calling
799  * => caller should not adjust pmap's wire_count
800  */
801 
802 __inline static void
803 pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
804     vaddr_t va, struct vm_page *ptp, int flags)
805 {
806 	pve->pv_pmap = pmap;
807 	pve->pv_va = va;
808 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
809 	pve->pv_flags = flags;
810 	simple_lock(&pg->mdpage.pvh_slock);	/* lock vm_page */
811 	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
812 	pg->mdpage.pvh_list = pve;		/* ... locked list */
813 	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
814 	if (pve->pv_flags & PVF_WIRED)
815 		++pmap->pm_stats.wired_count;
816 }
817 
818 /*
819  * pmap_remove_pv: try to remove a mapping from a pv_list
820  *
821  * => caller should hold proper lock on pmap_main_lock
822  * => pmap should be locked
823  * => caller should hold lock on vm_page [so that attrs can be adjusted]
824  * => caller should adjust ptp's wire_count and free PTP if needed
825  * => caller should NOT adjust pmap's wire_count
826  * => we return the removed pve
827  */
828 
829 __inline static struct pv_entry *
830 pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
831 {
832 	struct pv_entry *pve, **prevptr;
833 
834 	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
835 	pve = *prevptr;
836 	while (pve) {
837 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
838 			*prevptr = pve->pv_next;		/* remove it! */
839 			if (pve->pv_flags & PVF_WIRED)
840 			    --pmap->pm_stats.wired_count;
841 			break;
842 		}
843 		prevptr = &pve->pv_next;		/* previous pointer */
844 		pve = pve->pv_next;			/* advance */
845 	}
846 	return(pve);				/* return removed pve */
847 }
848 
849 /*
850  *
851  * pmap_modify_pv: Update pv flags
852  *
853  * => caller should hold lock on vm_page [so that attrs can be adjusted]
854  * => caller should NOT adjust pmap's wire_count
855  * => caller must call pmap_vac_me_harder() if writable status of a page
856  *    may have changed.
857  * => we return the old flags
858  *
859  * Modify a physical-virtual mapping in the pv table
860  */
861 
862 static /* __inline */ u_int
863 pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
864     u_int bic_mask, u_int eor_mask)
865 {
866 	struct pv_entry *npv;
867 	u_int flags, oflags;
868 
869 	/*
870 	 * There is at least one VA mapping this page.
871 	 */
872 
873 	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
874 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
875 			oflags = npv->pv_flags;
876 			npv->pv_flags = flags =
877 			    ((oflags & ~bic_mask) ^ eor_mask);
878 			if ((flags ^ oflags) & PVF_WIRED) {
879 				if (flags & PVF_WIRED)
880 					++pmap->pm_stats.wired_count;
881 				else
882 					--pmap->pm_stats.wired_count;
883 			}
884 			return (oflags);
885 		}
886 	}
887 	return (0);
888 }
889 
890 /*
891  * Map the specified level 2 pagetable into the level 1 page table for
892  * the given pmap to cover a chunk of virtual address space starting from the
893  * address specified.
894  */
895 static __inline void
896 pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
897 {
898 	vaddr_t ptva;
899 
900 	/* Calculate the index into the L1 page table. */
901 	ptva = (va >> L1_S_SHIFT) & ~3;
902 
903 	/* Map page table into the L1. */
904 	pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
905 	pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
906 	pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
907 	pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
908 
909 	/* Map the page table into the page table area. */
910 	if (selfref)
911 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
912 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
913 }
914 
915 #if 0
916 static __inline void
917 pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
918 {
919 	vaddr_t ptva;
920 
921 	/* Calculate the index into the L1 page table. */
922 	ptva = (va >> L1_S_SHIFT) & ~3;
923 
924 	/* Unmap page table from the L1. */
925 	pmap->pm_pdir[ptva + 0] = 0;
926 	pmap->pm_pdir[ptva + 1] = 0;
927 	pmap->pm_pdir[ptva + 2] = 0;
928 	pmap->pm_pdir[ptva + 3] = 0;
929 
930 	/* Unmap the page table from the page table area. */
931 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
932 }
933 #endif
934 
935 /*
936  *	Used to map a range of physical addresses into kernel
937  *	virtual address space.
938  *
939  *	For now, VM is already on, we only need to map the
940  *	specified memory.
941  */
942 vaddr_t
943 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
944 {
945 	while (spa < epa) {
946 		pmap_kenter_pa(va, spa, prot);
947 		va += NBPG;
948 		spa += NBPG;
949 	}
950 	pmap_update(pmap_kernel());
951 	return(va);
952 }
953 
954 
955 /*
956  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
957  *
958  * bootstrap the pmap system. This is called from initarm and allows
959  * the pmap system to initailise any structures it requires.
960  *
961  * Currently this sets up the kernel_pmap that is statically allocated
962  * and also allocated virtual addresses for certain page hooks.
963  * Currently the only one page hook is allocated that is used
964  * to zero physical pages of memory.
965  * It also initialises the start and end address of the kernel data space.
966  */
967 extern paddr_t physical_freestart;
968 extern paddr_t physical_freeend;
969 
970 char *boot_head;
971 
972 void
973 pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
974 {
975 	pt_entry_t *pte;
976 	int loop;
977 	paddr_t start, end;
978 #if NISADMA > 0
979 	paddr_t istart;
980 	psize_t isize;
981 #endif
982 
983 	pmap_kernel()->pm_pdir = kernel_l1pt;
984 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
985 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
986 	simple_lock_init(&pmap_kernel()->pm_lock);
987 	pmap_kernel()->pm_obj.pgops = NULL;
988 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
989 	pmap_kernel()->pm_obj.uo_npages = 0;
990 	pmap_kernel()->pm_obj.uo_refs = 1;
991 
992 	/*
993 	 * Initialize PAGE_SIZE-dependent variables.
994 	 */
995 	uvm_setpagesize();
996 
997 	loop = 0;
998 	while (loop < bootconfig.dramblocks) {
999 		start = (paddr_t)bootconfig.dram[loop].address;
1000 		end = start + (bootconfig.dram[loop].pages * NBPG);
1001 		if (start < physical_freestart)
1002 			start = physical_freestart;
1003 		if (end > physical_freeend)
1004 			end = physical_freeend;
1005 #if 0
1006 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
1007 #endif
1008 #if NISADMA > 0
1009 		if (pmap_isa_dma_range_intersect(start, end - start,
1010 		    &istart, &isize)) {
1011 			/*
1012 			 * Place the pages that intersect with the
1013 			 * ISA DMA range onto the ISA DMA free list.
1014 			 */
1015 #if 0
1016 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
1017 			    istart + isize - 1);
1018 #endif
1019 			uvm_page_physload(atop(istart),
1020 			    atop(istart + isize), atop(istart),
1021 			    atop(istart + isize), VM_FREELIST_ISADMA);
1022 
1023 			/*
1024 			 * Load the pieces that come before
1025 			 * the intersection into the default
1026 			 * free list.
1027 			 */
1028 			if (start < istart) {
1029 #if 0
1030 				printf("    BEFORE 0x%lx -> 0x%lx\n",
1031 				    start, istart - 1);
1032 #endif
1033 				uvm_page_physload(atop(start),
1034 				    atop(istart), atop(start),
1035 				    atop(istart), VM_FREELIST_DEFAULT);
1036 			}
1037 
1038 			/*
1039 			 * Load the pieces that come after
1040 			 * the intersection into the default
1041 			 * free list.
1042 			 */
1043 			if ((istart + isize) < end) {
1044 #if 0
1045 				printf("     AFTER 0x%lx -> 0x%lx\n",
1046 				    (istart + isize), end - 1);
1047 #endif
1048 				uvm_page_physload(atop(istart + isize),
1049 				    atop(end), atop(istart + isize),
1050 				    atop(end), VM_FREELIST_DEFAULT);
1051 			}
1052 		} else {
1053 			uvm_page_physload(atop(start), atop(end),
1054 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
1055 		}
1056 #else	/* NISADMA > 0 */
1057 		uvm_page_physload(atop(start), atop(end),
1058 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
1059 #endif /* NISADMA > 0 */
1060 		++loop;
1061 	}
1062 
1063 	virtual_avail = KERNEL_VM_BASE;
1064 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1065 
1066 	/*
1067 	 * now we allocate the "special" VAs which are used for tmp mappings
1068 	 * by the pmap (and other modules).  we allocate the VAs by advancing
1069 	 * virtual_avail (note that there are no pages mapped at these VAs).
1070 	 * we find the PTE that maps the allocated VA via the linear PTE
1071 	 * mapping.
1072 	 */
1073 
1074 	pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
1075 
1076 	csrcp = virtual_avail; csrc_pte = pte;
1077 	virtual_avail += PAGE_SIZE; pte++;
1078 
1079 	cdstp = virtual_avail; cdst_pte = pte;
1080 	virtual_avail += PAGE_SIZE; pte++;
1081 
1082 	memhook = (char *) virtual_avail;	/* don't need pte */
1083 	virtual_avail += PAGE_SIZE; pte++;
1084 
1085 	msgbufaddr = (caddr_t) virtual_avail;	/* don't need pte */
1086 	virtual_avail += round_page(MSGBUFSIZE);
1087 	pte += atop(round_page(MSGBUFSIZE));
1088 
1089 	/*
1090 	 * init the static-global locks and global lists.
1091 	 */
1092 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
1093 	simple_lock_init(&pvalloc_lock);
1094 	simple_lock_init(&pmaps_lock);
1095 	LIST_INIT(&pmaps);
1096 	TAILQ_INIT(&pv_freepages);
1097 	TAILQ_INIT(&pv_unusedpgs);
1098 
1099 	/*
1100 	 * initialize the pmap pool.
1101 	 */
1102 
1103 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1104 		  &pool_allocator_nointr);
1105 
1106 	cpu_dcache_wbinv_all();
1107 }
1108 
1109 /*
1110  * void pmap_init(void)
1111  *
1112  * Initialize the pmap module.
1113  * Called by vm_init() in vm/vm_init.c in order to initialise
1114  * any structures that the pmap system needs to map virtual memory.
1115  */
1116 
1117 extern int physmem;
1118 
1119 void
1120 pmap_init(void)
1121 {
1122 
1123 	/*
1124 	 * Set the available memory vars - These do not map to real memory
1125 	 * addresses and cannot as the physical memory is fragmented.
1126 	 * They are used by ps for %mem calculations.
1127 	 * One could argue whether this should be the entire memory or just
1128 	 * the memory that is useable in a user process.
1129 	 */
1130 	avail_start = 0;
1131 	avail_end = physmem * NBPG;
1132 
1133 	/*
1134 	 * now we need to free enough pv_entry structures to allow us to get
1135 	 * the kmem_map/kmem_object allocated and inited (done after this
1136 	 * function is finished).  to do this we allocate one bootstrap page out
1137 	 * of kernel_map and use it to provide an initial pool of pv_entry
1138 	 * structures.   we never free this page.
1139 	 */
1140 
1141 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1142 	if (pv_initpage == NULL)
1143 		panic("pmap_init: pv_initpage");
1144 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
1145 	pv_nfpvents = 0;
1146 	(void) pmap_add_pvpage(pv_initpage, FALSE);
1147 
1148 	pmap_initialized = TRUE;
1149 
1150 	/* Initialise our L1 page table queues and counters */
1151 	SIMPLEQ_INIT(&l1pt_static_queue);
1152 	l1pt_static_queue_count = 0;
1153 	l1pt_static_create_count = 0;
1154 	SIMPLEQ_INIT(&l1pt_queue);
1155 	l1pt_queue_count = 0;
1156 	l1pt_create_count = 0;
1157 	l1pt_reuse_count = 0;
1158 }
1159 
1160 /*
1161  * pmap_postinit()
1162  *
1163  * This routine is called after the vm and kmem subsystems have been
1164  * initialised. This allows the pmap code to perform any initialisation
1165  * that can only be done one the memory allocation is in place.
1166  */
1167 
1168 void
1169 pmap_postinit(void)
1170 {
1171 	int loop;
1172 	struct l1pt *pt;
1173 
1174 #ifdef PMAP_STATIC_L1S
1175 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1176 #else	/* PMAP_STATIC_L1S */
1177 	for (loop = 0; loop < max_processes; ++loop) {
1178 #endif	/* PMAP_STATIC_L1S */
1179 		/* Allocate a L1 page table */
1180 		pt = pmap_alloc_l1pt();
1181 		if (!pt)
1182 			panic("Cannot allocate static L1 page tables\n");
1183 
1184 		/* Clean it */
1185 		bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1186 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1187 		/* Add the page table to the queue */
1188 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1189 		++l1pt_static_queue_count;
1190 		++l1pt_static_create_count;
1191 	}
1192 }
1193 
1194 
1195 /*
1196  * Create and return a physical map.
1197  *
1198  * If the size specified for the map is zero, the map is an actual physical
1199  * map, and may be referenced by the hardware.
1200  *
1201  * If the size specified is non-zero, the map will be used in software only,
1202  * and is bounded by that size.
1203  */
1204 
1205 pmap_t
1206 pmap_create(void)
1207 {
1208 	struct pmap *pmap;
1209 
1210 	/*
1211 	 * Fetch pmap entry from the pool
1212 	 */
1213 
1214 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1215 	/* XXX is this really needed! */
1216 	memset(pmap, 0, sizeof(*pmap));
1217 
1218 	simple_lock_init(&pmap->pm_obj.vmobjlock);
1219 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
1220 	TAILQ_INIT(&pmap->pm_obj.memq);
1221 	pmap->pm_obj.uo_npages = 0;
1222 	pmap->pm_obj.uo_refs = 1;
1223 	pmap->pm_stats.wired_count = 0;
1224 	pmap->pm_stats.resident_count = 1;
1225 	pmap->pm_ptphint = NULL;
1226 
1227 	/* Now init the machine part of the pmap */
1228 	pmap_pinit(pmap);
1229 	return(pmap);
1230 }
1231 
1232 /*
1233  * pmap_alloc_l1pt()
1234  *
1235  * This routine allocates physical and virtual memory for a L1 page table
1236  * and wires it.
1237  * A l1pt structure is returned to describe the allocated page table.
1238  *
1239  * This routine is allowed to fail if the required memory cannot be allocated.
1240  * In this case NULL is returned.
1241  */
1242 
1243 struct l1pt *
1244 pmap_alloc_l1pt(void)
1245 {
1246 	paddr_t pa;
1247 	vaddr_t va;
1248 	struct l1pt *pt;
1249 	int error;
1250 	struct vm_page *m;
1251 	pt_entry_t *pte;
1252 
1253 	/* Allocate virtual address space for the L1 page table */
1254 	va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1255 	if (va == 0) {
1256 #ifdef DIAGNOSTIC
1257 		PDEBUG(0,
1258 		    printf("pmap: Cannot allocate pageable memory for L1\n"));
1259 #endif	/* DIAGNOSTIC */
1260 		return(NULL);
1261 	}
1262 
1263 	/* Allocate memory for the l1pt structure */
1264 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1265 
1266 	/*
1267 	 * Allocate pages from the VM system.
1268 	 */
1269 	error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
1270 	    L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1271 	if (error) {
1272 #ifdef DIAGNOSTIC
1273 		PDEBUG(0,
1274 		    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1275 		    error));
1276 #endif	/* DIAGNOSTIC */
1277 		/* Release the resources we already have claimed */
1278 		free(pt, M_VMPMAP);
1279 		uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1280 		return(NULL);
1281 	}
1282 
1283 	/* Map our physical pages into our virtual space */
1284 	pt->pt_va = va;
1285 	m = TAILQ_FIRST(&pt->pt_plist);
1286 	while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1287 		pa = VM_PAGE_TO_PHYS(m);
1288 
1289 		pte = vtopte(va);
1290 
1291 		/*
1292 		 * Assert that the PTE is invalid.  If it's invalid,
1293 		 * then we are guaranteed that there won't be an entry
1294 		 * for this VA in the TLB.
1295 		 */
1296 		KDASSERT(pmap_pte_v(pte) == 0);
1297 
1298 		*pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) |
1299 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1300 
1301 		va += NBPG;
1302 		m = m->pageq.tqe_next;
1303 	}
1304 
1305 #ifdef DIAGNOSTIC
1306 	if (m)
1307 		panic("pmap_alloc_l1pt: pglist not empty\n");
1308 #endif	/* DIAGNOSTIC */
1309 
1310 	pt->pt_flags = 0;
1311 	return(pt);
1312 }
1313 
1314 /*
1315  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1316  */
1317 static void
1318 pmap_free_l1pt(struct l1pt *pt)
1319 {
1320 	/* Separate the physical memory for the virtual space */
1321 	pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1322 	pmap_update(pmap_kernel());
1323 
1324 	/* Return the physical memory */
1325 	uvm_pglistfree(&pt->pt_plist);
1326 
1327 	/* Free the virtual space */
1328 	uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1329 
1330 	/* Free the l1pt structure */
1331 	free(pt, M_VMPMAP);
1332 }
1333 
1334 /*
1335  * pmap_alloc_ptpt:
1336  *
1337  *	Allocate the page table that maps the PTE array.
1338  */
1339 static int
1340 pmap_alloc_ptpt(struct pmap *pmap)
1341 {
1342 	struct vm_page *pg;
1343 	pt_entry_t *pte;
1344 
1345 	KASSERT(pmap->pm_vptpt == 0);
1346 
1347 	pmap->pm_vptpt = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
1348 	if (pmap->pm_vptpt == 0) {
1349 		PDEBUG(0,
1350 		    printf("pmap_alloc_ptpt: no KVA for PTPT\n"));
1351 		return (ENOMEM);
1352 	}
1353 
1354 	for (;;) {
1355 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1356 		if (pg != NULL)
1357 			break;
1358 		uvm_wait("pmap_ptpt");
1359 	}
1360 
1361 	pmap->pm_pptpt = VM_PAGE_TO_PHYS(pg);
1362 
1363 	pte = vtopte(pmap->pm_vptpt);
1364 
1365 	KDASSERT(pmap_pte_v(pte) == 0);
1366 
1367 	*pte = L2_S_PROTO | pmap->pm_pptpt |
1368 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1369 
1370 	return (0);
1371 }
1372 
1373 /*
1374  * pmap_free_ptpt:
1375  *
1376  *	Free the page table that maps the PTE array.
1377  */
1378 static void
1379 pmap_free_ptpt(struct pmap *pmap)
1380 {
1381 
1382 	pmap_kremove(pmap->pm_vptpt, L2_TABLE_SIZE);
1383 	pmap_update(pmap_kernel());
1384 
1385 	uvm_pagefree(PHYS_TO_VM_PAGE(pmap->pm_pptpt));
1386 
1387 	uvm_km_free(kernel_map, pmap->pm_vptpt, L2_TABLE_SIZE);
1388 }
1389 
1390 /*
1391  * Allocate a page directory.
1392  * This routine will either allocate a new page directory from the pool
1393  * of L1 page tables currently held by the kernel or it will allocate
1394  * a new one via pmap_alloc_l1pt().
1395  * It will then initialise the l1 page table for use.
1396  */
1397 static int
1398 pmap_allocpagedir(struct pmap *pmap)
1399 {
1400 	paddr_t pa;
1401 	struct l1pt *pt;
1402 	int error;
1403 
1404 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1405 
1406 	/* Do we have any spare L1's lying around ? */
1407 	if (l1pt_static_queue_count) {
1408 		--l1pt_static_queue_count;
1409 		pt = SIMPLEQ_FIRST(&l1pt_static_queue);
1410 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1411 	} else if (l1pt_queue_count) {
1412 		--l1pt_queue_count;
1413 		pt = SIMPLEQ_FIRST(&l1pt_queue);
1414 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1415 		++l1pt_reuse_count;
1416 	} else {
1417 		pt = pmap_alloc_l1pt();
1418 		if (!pt)
1419 			return(ENOMEM);
1420 		++l1pt_create_count;
1421 	}
1422 
1423 	/* Store the pointer to the l1 descriptor in the pmap. */
1424 	pmap->pm_l1pt = pt;
1425 
1426 	/* Get the physical address of the start of the l1 */
1427 	pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1428 
1429 	/* Store the virtual address of the l1 in the pmap. */
1430 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1431 
1432 	/* Clean the L1 if it is dirty */
1433 	if (!(pt->pt_flags & PTFLAG_CLEAN))
1434 		bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1435 
1436 	/* Allocate a page table to map all the page tables for this pmap */
1437 	if ((error = pmap_alloc_ptpt(pmap)) != 0) {
1438 		pmap_freepagedir(pmap);
1439 		return (error);
1440 	}
1441 
1442 	/* need to lock this all up for growkernel */
1443 	simple_lock(&pmaps_lock);
1444 
1445 	/* Duplicate the kernel mappings. */
1446 	bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1447 		(char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1448 		KERNEL_PD_SIZE);
1449 
1450 	/* Wire in this page table */
1451 	pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
1452 
1453 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
1454 
1455 	/*
1456 	 * Map the kernel page tables into the new PT map.
1457 	 */
1458 	bcopy((char *)(PTE_BASE
1459 	    + (PTE_BASE >> (PGSHIFT - 2))
1460 	    + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
1461 	    (char *)pmap->pm_vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
1462 	    (KERNEL_PD_SIZE >> 2));
1463 
1464 	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1465 	simple_unlock(&pmaps_lock);
1466 
1467 	return(0);
1468 }
1469 
1470 
1471 /*
1472  * Initialize a preallocated and zeroed pmap structure,
1473  * such as one in a vmspace structure.
1474  */
1475 
1476 void
1477 pmap_pinit(struct pmap *pmap)
1478 {
1479 	int backoff = 6;
1480 	int retry = 10;
1481 
1482 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1483 
1484 	/* Keep looping until we succeed in allocating a page directory */
1485 	while (pmap_allocpagedir(pmap) != 0) {
1486 		/*
1487 		 * Ok we failed to allocate a suitable block of memory for an
1488 		 * L1 page table. This means that either:
1489 		 * 1. 16KB of virtual address space could not be allocated
1490 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
1491 		 *    could not be allocated.
1492 		 *
1493 		 * Since we cannot fail we will sleep for a while and try
1494 		 * again.
1495 		 *
1496 		 * Searching for a suitable L1 PT is expensive:
1497 		 * to avoid hogging the system when memory is really
1498 		 * scarce, use an exponential back-off so that
1499 		 * eventually we won't retry more than once every 8
1500 		 * seconds.  This should allow other processes to run
1501 		 * to completion and free up resources.
1502 		 */
1503 		(void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1504 		    NULL);
1505 		if (--retry == 0) {
1506 			retry = 10;
1507 			if (backoff)
1508 				--backoff;
1509 		}
1510 	}
1511 
1512 	if (vector_page < KERNEL_BASE) {
1513 		/*
1514 		 * Map the vector page.  This will also allocate and map
1515 		 * an L2 table for it.
1516 		 */
1517 		pmap_enter(pmap, vector_page, systempage.pv_pa,
1518 		    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1519 		pmap_update(pmap);
1520 	}
1521 }
1522 
1523 
1524 void
1525 pmap_freepagedir(struct pmap *pmap)
1526 {
1527 	/* Free the memory used for the page table mapping */
1528 	if (pmap->pm_vptpt != 0)
1529 		pmap_free_ptpt(pmap);
1530 
1531 	/* junk the L1 page table */
1532 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1533 		/* Add the page table to the queue */
1534 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1535 		++l1pt_static_queue_count;
1536 	} else if (l1pt_queue_count < 8) {
1537 		/* Add the page table to the queue */
1538 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1539 		++l1pt_queue_count;
1540 	} else
1541 		pmap_free_l1pt(pmap->pm_l1pt);
1542 }
1543 
1544 
1545 /*
1546  * Retire the given physical map from service.
1547  * Should only be called if the map contains no valid mappings.
1548  */
1549 
1550 void
1551 pmap_destroy(struct pmap *pmap)
1552 {
1553 	struct vm_page *page;
1554 	int count;
1555 
1556 	if (pmap == NULL)
1557 		return;
1558 
1559 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1560 
1561 	/*
1562 	 * Drop reference count
1563 	 */
1564 	simple_lock(&pmap->pm_obj.vmobjlock);
1565 	count = --pmap->pm_obj.uo_refs;
1566 	simple_unlock(&pmap->pm_obj.vmobjlock);
1567 	if (count > 0) {
1568 		return;
1569 	}
1570 
1571 	/*
1572 	 * reference count is zero, free pmap resources and then free pmap.
1573 	 */
1574 
1575 	/*
1576 	 * remove it from global list of pmaps
1577 	 */
1578 
1579 	simple_lock(&pmaps_lock);
1580 	LIST_REMOVE(pmap, pm_list);
1581 	simple_unlock(&pmaps_lock);
1582 
1583 	if (vector_page < KERNEL_BASE) {
1584 		/* Remove the vector page mapping */
1585 		pmap_remove(pmap, vector_page, vector_page + NBPG);
1586 		pmap_update(pmap);
1587 	}
1588 
1589 	/*
1590 	 * Free any page tables still mapped
1591 	 * This is only temporay until pmap_enter can count the number
1592 	 * of mappings made in a page table. Then pmap_remove() can
1593 	 * reduce the count and free the pagetable when the count
1594 	 * reaches zero.  Note that entries in this list should match the
1595 	 * contents of the ptpt, however this is faster than walking a 1024
1596 	 * entries looking for pt's
1597 	 * taken from i386 pmap.c
1598 	 */
1599 	/*
1600 	 * vmobjlock must be held while freeing pages
1601 	 */
1602 	simple_lock(&pmap->pm_obj.vmobjlock);
1603 	while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
1604 		KASSERT((page->flags & PG_BUSY) == 0);
1605 		page->wire_count = 0;
1606 		uvm_pagefree(page);
1607 	}
1608 	simple_unlock(&pmap->pm_obj.vmobjlock);
1609 
1610 	/* Free the page dir */
1611 	pmap_freepagedir(pmap);
1612 
1613 	/* return the pmap to the pool */
1614 	pool_put(&pmap_pmap_pool, pmap);
1615 }
1616 
1617 
1618 /*
1619  * void pmap_reference(struct pmap *pmap)
1620  *
1621  * Add a reference to the specified pmap.
1622  */
1623 
1624 void
1625 pmap_reference(struct pmap *pmap)
1626 {
1627 	if (pmap == NULL)
1628 		return;
1629 
1630 	simple_lock(&pmap->pm_lock);
1631 	pmap->pm_obj.uo_refs++;
1632 	simple_unlock(&pmap->pm_lock);
1633 }
1634 
1635 /*
1636  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1637  *
1638  * Return the start and end addresses of the kernel's virtual space.
1639  * These values are setup in pmap_bootstrap and are updated as pages
1640  * are allocated.
1641  */
1642 
1643 void
1644 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1645 {
1646 	*start = virtual_avail;
1647 	*end = virtual_end;
1648 }
1649 
1650 /*
1651  * Activate the address space for the specified process.  If the process
1652  * is the current process, load the new MMU context.
1653  */
1654 void
1655 pmap_activate(struct proc *p)
1656 {
1657 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1658 	struct pcb *pcb = &p->p_addr->u_pcb;
1659 
1660 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1661 	    (paddr_t *)&pcb->pcb_pagedir);
1662 
1663 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1664 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1665 
1666 	if (p == curproc) {
1667 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1668 		setttb((u_int)pcb->pcb_pagedir);
1669 	}
1670 }
1671 
1672 /*
1673  * Deactivate the address space of the specified process.
1674  */
1675 void
1676 pmap_deactivate(struct proc *p)
1677 {
1678 }
1679 
1680 /*
1681  * Perform any deferred pmap operations.
1682  */
1683 void
1684 pmap_update(struct pmap *pmap)
1685 {
1686 
1687 	/*
1688 	 * We haven't deferred any pmap operations, but we do need to
1689 	 * make sure TLB/cache operations have completed.
1690 	 */
1691 	cpu_cpwait();
1692 }
1693 
1694 /*
1695  * pmap_clean_page()
1696  *
1697  * This is a local function used to work out the best strategy to clean
1698  * a single page referenced by its entry in the PV table. It's used by
1699  * pmap_copy_page, pmap_zero page and maybe some others later on.
1700  *
1701  * Its policy is effectively:
1702  *  o If there are no mappings, we don't bother doing anything with the cache.
1703  *  o If there is one mapping, we clean just that page.
1704  *  o If there are multiple mappings, we clean the entire cache.
1705  *
1706  * So that some functions can be further optimised, it returns 0 if it didn't
1707  * clean the entire cache, or 1 if it did.
1708  *
1709  * XXX One bug in this routine is that if the pv_entry has a single page
1710  * mapped at 0x00000000 a whole cache clean will be performed rather than
1711  * just the 1 page. Since this should not occur in everyday use and if it does
1712  * it will just result in not the most efficient clean for the page.
1713  */
1714 static int
1715 pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1716 {
1717 	struct pmap *pmap;
1718 	struct pv_entry *npv;
1719 	int cache_needs_cleaning = 0;
1720 	vaddr_t page_to_clean = 0;
1721 
1722 	if (pv == NULL)
1723 		/* nothing mapped in so nothing to flush */
1724 		return (0);
1725 
1726 	/* Since we flush the cache each time we change curproc, we
1727 	 * only need to flush the page if it is in the current pmap.
1728 	 */
1729 	if (curproc)
1730 		pmap = curproc->p_vmspace->vm_map.pmap;
1731 	else
1732 		pmap = pmap_kernel();
1733 
1734 	for (npv = pv; npv; npv = npv->pv_next) {
1735 		if (npv->pv_pmap == pmap) {
1736 			/* The page is mapped non-cacheable in
1737 			 * this map.  No need to flush the cache.
1738 			 */
1739 			if (npv->pv_flags & PVF_NC) {
1740 #ifdef DIAGNOSTIC
1741 				if (cache_needs_cleaning)
1742 					panic("pmap_clean_page: "
1743 							"cache inconsistency");
1744 #endif
1745 				break;
1746 			}
1747 #if 0
1748 			/*
1749 			 * XXX Can't do this because pmap_protect doesn't
1750 			 * XXX clean the page when it does a write-protect.
1751 			 */
1752 			else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1753 				continue;
1754 #endif
1755 			if (cache_needs_cleaning){
1756 				page_to_clean = 0;
1757 				break;
1758 			}
1759 			else
1760 				page_to_clean = npv->pv_va;
1761 			cache_needs_cleaning = 1;
1762 		}
1763 	}
1764 
1765 	if (page_to_clean)
1766 		cpu_idcache_wbinv_range(page_to_clean, NBPG);
1767 	else if (cache_needs_cleaning) {
1768 		cpu_idcache_wbinv_all();
1769 		return (1);
1770 	}
1771 	return (0);
1772 }
1773 
1774 /*
1775  * pmap_zero_page()
1776  *
1777  * Zero a given physical page by mapping it at a page hook point.
1778  * In doing the zero page op, the page we zero is mapped cachable, as with
1779  * StrongARM accesses to non-cached pages are non-burst making writing
1780  * _any_ bulk data very slow.
1781  */
1782 #if ARM_MMU_GENERIC == 1
1783 void
1784 pmap_zero_page_generic(paddr_t phys)
1785 {
1786 #ifdef DEBUG
1787 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1788 
1789 	if (pg->mdpage.pvh_list != NULL)
1790 		panic("pmap_zero_page: page has mappings");
1791 #endif
1792 
1793 	KDASSERT((phys & PGOFSET) == 0);
1794 
1795 	/*
1796 	 * Hook in the page, zero it, and purge the cache for that
1797 	 * zeroed page. Invalidate the TLB as needed.
1798 	 */
1799 	*cdst_pte = L2_S_PROTO | phys |
1800 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1801 	cpu_tlb_flushD_SE(cdstp);
1802 	cpu_cpwait();
1803 	bzero_page(cdstp);
1804 	cpu_dcache_wbinv_range(cdstp, NBPG);
1805 }
1806 #endif /* ARM_MMU_GENERIC == 1 */
1807 
1808 #if ARM_MMU_XSCALE == 1
1809 void
1810 pmap_zero_page_xscale(paddr_t phys)
1811 {
1812 #ifdef DEBUG
1813 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1814 
1815 	if (pg->mdpage.pvh_list != NULL)
1816 		panic("pmap_zero_page: page has mappings");
1817 #endif
1818 
1819 	KDASSERT((phys & PGOFSET) == 0);
1820 
1821 	/*
1822 	 * Hook in the page, zero it, and purge the cache for that
1823 	 * zeroed page. Invalidate the TLB as needed.
1824 	 */
1825 	*cdst_pte = L2_S_PROTO | phys |
1826 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1827 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
1828 	cpu_tlb_flushD_SE(cdstp);
1829 	cpu_cpwait();
1830 	bzero_page(cdstp);
1831 	xscale_cache_clean_minidata();
1832 }
1833 #endif /* ARM_MMU_XSCALE == 1 */
1834 
1835 /* pmap_pageidlezero()
1836  *
1837  * The same as above, except that we assume that the page is not
1838  * mapped.  This means we never have to flush the cache first.  Called
1839  * from the idle loop.
1840  */
1841 boolean_t
1842 pmap_pageidlezero(paddr_t phys)
1843 {
1844 	int i, *ptr;
1845 	boolean_t rv = TRUE;
1846 #ifdef DEBUG
1847 	struct vm_page *pg;
1848 
1849 	pg = PHYS_TO_VM_PAGE(phys);
1850 	if (pg->mdpage.pvh_list != NULL)
1851 		panic("pmap_pageidlezero: page has mappings");
1852 #endif
1853 
1854 	KDASSERT((phys & PGOFSET) == 0);
1855 
1856 	/*
1857 	 * Hook in the page, zero it, and purge the cache for that
1858 	 * zeroed page. Invalidate the TLB as needed.
1859 	 */
1860 	*cdst_pte = L2_S_PROTO | phys |
1861 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1862 	cpu_tlb_flushD_SE(cdstp);
1863 	cpu_cpwait();
1864 
1865 	for (i = 0, ptr = (int *)cdstp;
1866 			i < (NBPG / sizeof(int)); i++) {
1867 		if (sched_whichqs != 0) {
1868 			/*
1869 			 * A process has become ready.  Abort now,
1870 			 * so we don't keep it waiting while we
1871 			 * do slow memory access to finish this
1872 			 * page.
1873 			 */
1874 			rv = FALSE;
1875 			break;
1876 		}
1877 		*ptr++ = 0;
1878 	}
1879 
1880 	if (rv)
1881 		/*
1882 		 * if we aborted we'll rezero this page again later so don't
1883 		 * purge it unless we finished it
1884 		 */
1885 		cpu_dcache_wbinv_range(cdstp, NBPG);
1886 	return (rv);
1887 }
1888 
1889 /*
1890  * pmap_copy_page()
1891  *
1892  * Copy one physical page into another, by mapping the pages into
1893  * hook points. The same comment regarding cachability as in
1894  * pmap_zero_page also applies here.
1895  */
1896 #if ARM_MMU_GENERIC == 1
1897 void
1898 pmap_copy_page_generic(paddr_t src, paddr_t dst)
1899 {
1900 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1901 #ifdef DEBUG
1902 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1903 
1904 	if (dst_pg->mdpage.pvh_list != NULL)
1905 		panic("pmap_copy_page: dst page has mappings");
1906 #endif
1907 
1908 	KDASSERT((src & PGOFSET) == 0);
1909 	KDASSERT((dst & PGOFSET) == 0);
1910 
1911 	/*
1912 	 * Clean the source page.  Hold the source page's lock for
1913 	 * the duration of the copy so that no other mappings can
1914 	 * be created while we have a potentially aliased mapping.
1915 	 */
1916 	simple_lock(&src_pg->mdpage.pvh_slock);
1917 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1918 
1919 	/*
1920 	 * Map the pages into the page hook points, copy them, and purge
1921 	 * the cache for the appropriate page. Invalidate the TLB
1922 	 * as required.
1923 	 */
1924 	*csrc_pte = L2_S_PROTO | src |
1925 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
1926 	*cdst_pte = L2_S_PROTO | dst |
1927 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1928 	cpu_tlb_flushD_SE(csrcp);
1929 	cpu_tlb_flushD_SE(cdstp);
1930 	cpu_cpwait();
1931 	bcopy_page(csrcp, cdstp);
1932 	cpu_dcache_inv_range(csrcp, NBPG);
1933 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1934 	cpu_dcache_wbinv_range(cdstp, NBPG);
1935 }
1936 #endif /* ARM_MMU_GENERIC == 1 */
1937 
1938 #if ARM_MMU_XSCALE == 1
1939 void
1940 pmap_copy_page_xscale(paddr_t src, paddr_t dst)
1941 {
1942 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1943 #ifdef DEBUG
1944 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1945 
1946 	if (dst_pg->mdpage.pvh_list != NULL)
1947 		panic("pmap_copy_page: dst page has mappings");
1948 #endif
1949 
1950 	KDASSERT((src & PGOFSET) == 0);
1951 	KDASSERT((dst & PGOFSET) == 0);
1952 
1953 	/*
1954 	 * Clean the source page.  Hold the source page's lock for
1955 	 * the duration of the copy so that no other mappings can
1956 	 * be created while we have a potentially aliased mapping.
1957 	 */
1958 	simple_lock(&src_pg->mdpage.pvh_slock);
1959 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1960 
1961 	/*
1962 	 * Map the pages into the page hook points, copy them, and purge
1963 	 * the cache for the appropriate page. Invalidate the TLB
1964 	 * as required.
1965 	 */
1966 	*csrc_pte = L2_S_PROTO | src |
1967 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
1968 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
1969 	*cdst_pte = L2_S_PROTO | dst |
1970 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1971 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
1972 	cpu_tlb_flushD_SE(csrcp);
1973 	cpu_tlb_flushD_SE(cdstp);
1974 	cpu_cpwait();
1975 	bcopy_page(csrcp, cdstp);
1976 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1977 	xscale_cache_clean_minidata();
1978 }
1979 #endif /* ARM_MMU_XSCALE == 1 */
1980 
1981 #if 0
1982 void
1983 pmap_pte_addref(struct pmap *pmap, vaddr_t va)
1984 {
1985 	pd_entry_t *pde;
1986 	paddr_t pa;
1987 	struct vm_page *m;
1988 
1989 	if (pmap == pmap_kernel())
1990 		return;
1991 
1992 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
1993 	pa = pmap_pte_pa(pde);
1994 	m = PHYS_TO_VM_PAGE(pa);
1995 	++m->wire_count;
1996 #ifdef MYCROFT_HACK
1997 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1998 	    pmap, va, pde, pa, m, m->wire_count);
1999 #endif
2000 }
2001 
2002 void
2003 pmap_pte_delref(struct pmap *pmap, vaddr_t va)
2004 {
2005 	pd_entry_t *pde;
2006 	paddr_t pa;
2007 	struct vm_page *m;
2008 
2009 	if (pmap == pmap_kernel())
2010 		return;
2011 
2012 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
2013 	pa = pmap_pte_pa(pde);
2014 	m = PHYS_TO_VM_PAGE(pa);
2015 	--m->wire_count;
2016 #ifdef MYCROFT_HACK
2017 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2018 	    pmap, va, pde, pa, m, m->wire_count);
2019 #endif
2020 	if (m->wire_count == 0) {
2021 #ifdef MYCROFT_HACK
2022 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
2023 		    pmap, va, pde, pa, m);
2024 #endif
2025 		pmap_unmap_in_l1(pmap, va);
2026 		uvm_pagefree(m);
2027 		--pmap->pm_stats.resident_count;
2028 	}
2029 }
2030 #else
2031 #define	pmap_pte_addref(pmap, va)
2032 #define	pmap_pte_delref(pmap, va)
2033 #endif
2034 
2035 /*
2036  * Since we have a virtually indexed cache, we may need to inhibit caching if
2037  * there is more than one mapping and at least one of them is writable.
2038  * Since we purge the cache on every context switch, we only need to check for
2039  * other mappings within the same pmap, or kernel_pmap.
2040  * This function is also called when a page is unmapped, to possibly reenable
2041  * caching on any remaining mappings.
2042  *
2043  * The code implements the following logic, where:
2044  *
2045  * KW = # of kernel read/write pages
2046  * KR = # of kernel read only pages
2047  * UW = # of user read/write pages
2048  * UR = # of user read only pages
2049  * OW = # of user read/write pages in another pmap, then
2050  *
2051  * KC = kernel mapping is cacheable
2052  * UC = user mapping is cacheable
2053  *
2054  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
2055  *                   +---------------------------------------------
2056  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
2057  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
2058  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
2059  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
2060  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
2061  *
2062  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2063  */
2064 __inline static void
2065 pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2066 	boolean_t clear_cache)
2067 {
2068 	if (pmap == pmap_kernel())
2069 		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
2070 	else
2071 		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2072 }
2073 
2074 static void
2075 pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2076 	boolean_t clear_cache)
2077 {
2078 	int user_entries = 0;
2079 	int user_writable = 0;
2080 	int user_cacheable = 0;
2081 	int kernel_entries = 0;
2082 	int kernel_writable = 0;
2083 	int kernel_cacheable = 0;
2084 	struct pv_entry *pv;
2085 	struct pmap *last_pmap = pmap;
2086 
2087 #ifdef DIAGNOSTIC
2088 	if (pmap != pmap_kernel())
2089 		panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2090 #endif
2091 
2092 	/*
2093 	 * Pass one, see if there are both kernel and user pmaps for
2094 	 * this page.  Calculate whether there are user-writable or
2095 	 * kernel-writable pages.
2096 	 */
2097 	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
2098 		if (pv->pv_pmap != pmap) {
2099 			user_entries++;
2100 			if (pv->pv_flags & PVF_WRITE)
2101 				user_writable++;
2102 			if ((pv->pv_flags & PVF_NC) == 0)
2103 				user_cacheable++;
2104 		} else {
2105 			kernel_entries++;
2106 			if (pv->pv_flags & PVF_WRITE)
2107 				kernel_writable++;
2108 			if ((pv->pv_flags & PVF_NC) == 0)
2109 				kernel_cacheable++;
2110 		}
2111 	}
2112 
2113 	/*
2114 	 * We know we have just been updating a kernel entry, so if
2115 	 * all user pages are already cacheable, then there is nothing
2116 	 * further to do.
2117 	 */
2118 	if (kernel_entries == 0 &&
2119 	    user_cacheable == user_entries)
2120 		return;
2121 
2122 	if (user_entries) {
2123 		/*
2124 		 * Scan over the list again, for each entry, if it
2125 		 * might not be set correctly, call pmap_vac_me_user
2126 		 * to recalculate the settings.
2127 		 */
2128 		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
2129 			/*
2130 			 * We know kernel mappings will get set
2131 			 * correctly in other calls.  We also know
2132 			 * that if the pmap is the same as last_pmap
2133 			 * then we've just handled this entry.
2134 			 */
2135 			if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2136 				continue;
2137 			/*
2138 			 * If there are kernel entries and this page
2139 			 * is writable but non-cacheable, then we can
2140 			 * skip this entry also.
2141 			 */
2142 			if (kernel_entries > 0 &&
2143 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
2144 			    (PVF_NC | PVF_WRITE))
2145 				continue;
2146 			/*
2147 			 * Similarly if there are no kernel-writable
2148 			 * entries and the page is already
2149 			 * read-only/cacheable.
2150 			 */
2151 			if (kernel_writable == 0 &&
2152 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
2153 				continue;
2154 			/*
2155 			 * For some of the remaining cases, we know
2156 			 * that we must recalculate, but for others we
2157 			 * can't tell if they are correct or not, so
2158 			 * we recalculate anyway.
2159 			 */
2160 			pmap_unmap_ptes(last_pmap);
2161 			last_pmap = pv->pv_pmap;
2162 			ptes = pmap_map_ptes(last_pmap);
2163 			pmap_vac_me_user(last_pmap, pg, ptes,
2164 			    pmap_is_curpmap(last_pmap));
2165 		}
2166 		/* Restore the pte mapping that was passed to us.  */
2167 		if (last_pmap != pmap) {
2168 			pmap_unmap_ptes(last_pmap);
2169 			ptes = pmap_map_ptes(pmap);
2170 		}
2171 		if (kernel_entries == 0)
2172 			return;
2173 	}
2174 
2175 	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2176 	return;
2177 }
2178 
2179 static void
2180 pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2181 	boolean_t clear_cache)
2182 {
2183 	struct pmap *kpmap = pmap_kernel();
2184 	struct pv_entry *pv, *npv;
2185 	int entries = 0;
2186 	int writable = 0;
2187 	int cacheable_entries = 0;
2188 	int kern_cacheable = 0;
2189 	int other_writable = 0;
2190 
2191 	pv = pg->mdpage.pvh_list;
2192 	KASSERT(ptes != NULL);
2193 
2194 	/*
2195 	 * Count mappings and writable mappings in this pmap.
2196 	 * Include kernel mappings as part of our own.
2197 	 * Keep a pointer to the first one.
2198 	 */
2199 	for (npv = pv; npv; npv = npv->pv_next) {
2200 		/* Count mappings in the same pmap */
2201 		if (pmap == npv->pv_pmap ||
2202 		    kpmap == npv->pv_pmap) {
2203 			if (entries++ == 0)
2204 				pv = npv;
2205 			/* Cacheable mappings */
2206 			if ((npv->pv_flags & PVF_NC) == 0) {
2207 				cacheable_entries++;
2208 				if (kpmap == npv->pv_pmap)
2209 					kern_cacheable++;
2210 			}
2211 			/* Writable mappings */
2212 			if (npv->pv_flags & PVF_WRITE)
2213 				++writable;
2214 		} else if (npv->pv_flags & PVF_WRITE)
2215 			other_writable = 1;
2216 	}
2217 
2218 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2219 		"writable %d cacheable %d %s\n", pmap, entries, writable,
2220 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
2221 
2222 	/*
2223 	 * Enable or disable caching as necessary.
2224 	 * Note: the first entry might be part of the kernel pmap,
2225 	 * so we can't assume this is indicative of the state of the
2226 	 * other (maybe non-kpmap) entries.
2227 	 */
2228 	if ((entries > 1 && writable) ||
2229 	    (entries > 0 && pmap == kpmap && other_writable)) {
2230 		if (cacheable_entries == 0)
2231 		    return;
2232 		for (npv = pv; npv; npv = npv->pv_next) {
2233 			if ((pmap == npv->pv_pmap
2234 			    || kpmap == npv->pv_pmap) &&
2235 			    (npv->pv_flags & PVF_NC) == 0) {
2236 				ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
2237  				npv->pv_flags |= PVF_NC;
2238 				/*
2239 				 * If this page needs flushing from the
2240 				 * cache, and we aren't going to do it
2241 				 * below, do it now.
2242 				 */
2243 				if ((cacheable_entries < 4 &&
2244 				    (clear_cache || npv->pv_pmap == kpmap)) ||
2245 				    (npv->pv_pmap == kpmap &&
2246 				    !clear_cache && kern_cacheable < 4)) {
2247 					cpu_idcache_wbinv_range(npv->pv_va,
2248 					    NBPG);
2249 					cpu_tlb_flushID_SE(npv->pv_va);
2250 				}
2251 			}
2252 		}
2253 		if ((clear_cache && cacheable_entries >= 4) ||
2254 		    kern_cacheable >= 4) {
2255 			cpu_idcache_wbinv_all();
2256 			cpu_tlb_flushID();
2257 		}
2258 		cpu_cpwait();
2259 	} else if (entries > 0) {
2260 		/*
2261 		 * Turn cacheing back on for some pages.  If it is a kernel
2262 		 * page, only do so if there are no other writable pages.
2263 		 */
2264 		for (npv = pv; npv; npv = npv->pv_next) {
2265 			if ((pmap == npv->pv_pmap ||
2266 			    (kpmap == npv->pv_pmap && other_writable == 0)) &&
2267 			    (npv->pv_flags & PVF_NC)) {
2268 				ptes[arm_btop(npv->pv_va)] |=
2269 				    pte_l2_s_cache_mode;
2270 				npv->pv_flags &= ~PVF_NC;
2271 			}
2272 		}
2273 	}
2274 }
2275 
2276 /*
2277  * pmap_remove()
2278  *
2279  * pmap_remove is responsible for nuking a number of mappings for a range
2280  * of virtual address space in the current pmap. To do this efficiently
2281  * is interesting, because in a number of cases a wide virtual address
2282  * range may be supplied that contains few actual mappings. So, the
2283  * optimisations are:
2284  *  1. Try and skip over hunks of address space for which an L1 entry
2285  *     does not exist.
2286  *  2. Build up a list of pages we've hit, up to a maximum, so we can
2287  *     maybe do just a partial cache clean. This path of execution is
2288  *     complicated by the fact that the cache must be flushed _before_
2289  *     the PTE is nuked, being a VAC :-)
2290  *  3. Maybe later fast-case a single page, but I don't think this is
2291  *     going to make _that_ much difference overall.
2292  */
2293 
2294 #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
2295 
2296 void
2297 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
2298 {
2299 	int cleanlist_idx = 0;
2300 	struct pagelist {
2301 		vaddr_t va;
2302 		pt_entry_t *pte;
2303 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2304 	pt_entry_t *pte = 0, *ptes;
2305 	paddr_t pa;
2306 	int pmap_active;
2307 	struct vm_page *pg;
2308 
2309 	/* Exit quick if there is no pmap */
2310 	if (!pmap)
2311 		return;
2312 
2313 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
2314 	    pmap, sva, eva));
2315 
2316 	/*
2317 	 * we lock in the pmap => vm_page direction
2318 	 */
2319 	PMAP_MAP_TO_HEAD_LOCK();
2320 
2321 	ptes = pmap_map_ptes(pmap);
2322 	/* Get a page table pointer */
2323 	while (sva < eva) {
2324 		if (pmap_pde_page(pmap_pde(pmap, sva)))
2325 			break;
2326 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2327 	}
2328 
2329 	pte = &ptes[arm_btop(sva)];
2330 	/* Note if the pmap is active thus require cache and tlb cleans */
2331 	pmap_active = pmap_is_curpmap(pmap);
2332 
2333 	/* Now loop along */
2334 	while (sva < eva) {
2335 		/* Check if we can move to the next PDE (l1 chunk) */
2336 		if (!(sva & L2_ADDR_BITS))
2337 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2338 				sva += L1_S_SIZE;
2339 				pte += arm_btop(L1_S_SIZE);
2340 				continue;
2341 			}
2342 
2343 		/* We've found a valid PTE, so this page of PTEs has to go. */
2344 		if (pmap_pte_v(pte)) {
2345 			/* Update statistics */
2346 			--pmap->pm_stats.resident_count;
2347 
2348 			/*
2349 			 * Add this page to our cache remove list, if we can.
2350 			 * If, however the cache remove list is totally full,
2351 			 * then do a complete cache invalidation taking note
2352 			 * to backtrack the PTE table beforehand, and ignore
2353 			 * the lists in future because there's no longer any
2354 			 * point in bothering with them (we've paid the
2355 			 * penalty, so will carry on unhindered). Otherwise,
2356 			 * when we fall out, we just clean the list.
2357 			 */
2358 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2359 			pa = pmap_pte_pa(pte);
2360 
2361 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2362 				/* Add to the clean list. */
2363 				cleanlist[cleanlist_idx].pte = pte;
2364 				cleanlist[cleanlist_idx].va = sva;
2365 				cleanlist_idx++;
2366 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2367 				int cnt;
2368 
2369 				/* Nuke everything if needed. */
2370 				if (pmap_active) {
2371 					cpu_idcache_wbinv_all();
2372 					cpu_tlb_flushID();
2373 				}
2374 
2375 				/*
2376 				 * Roll back the previous PTE list,
2377 				 * and zero out the current PTE.
2378 				 */
2379 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
2380 					*cleanlist[cnt].pte = 0;
2381 					pmap_pte_delref(pmap, cleanlist[cnt].va);
2382 				}
2383 				*pte = 0;
2384 				pmap_pte_delref(pmap, sva);
2385 				cleanlist_idx++;
2386 			} else {
2387 				/*
2388 				 * We've already nuked the cache and
2389 				 * TLB, so just carry on regardless,
2390 				 * and we won't need to do it again
2391 				 */
2392 				*pte = 0;
2393 				pmap_pte_delref(pmap, sva);
2394 			}
2395 
2396 			/*
2397 			 * Update flags. In a number of circumstances,
2398 			 * we could cluster a lot of these and do a
2399 			 * number of sequential pages in one go.
2400 			 */
2401 			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2402 				struct pv_entry *pve;
2403 				simple_lock(&pg->mdpage.pvh_slock);
2404 				pve = pmap_remove_pv(pg, pmap, sva);
2405 				pmap_free_pv(pmap, pve);
2406 				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2407 				simple_unlock(&pg->mdpage.pvh_slock);
2408 			}
2409 		}
2410 		sva += NBPG;
2411 		pte++;
2412 	}
2413 
2414 	pmap_unmap_ptes(pmap);
2415 	/*
2416 	 * Now, if we've fallen through down to here, chances are that there
2417 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2418 	 */
2419 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2420 		u_int cnt;
2421 
2422 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2423 			if (pmap_active) {
2424 				cpu_idcache_wbinv_range(cleanlist[cnt].va,
2425 				    NBPG);
2426 				*cleanlist[cnt].pte = 0;
2427 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
2428 			} else
2429 				*cleanlist[cnt].pte = 0;
2430 			pmap_pte_delref(pmap, cleanlist[cnt].va);
2431 		}
2432 	}
2433 	PMAP_MAP_TO_HEAD_UNLOCK();
2434 }
2435 
2436 /*
2437  * Routine:	pmap_remove_all
2438  * Function:
2439  *		Removes this physical page from
2440  *		all physical maps in which it resides.
2441  *		Reflects back modify bits to the pager.
2442  */
2443 
2444 static void
2445 pmap_remove_all(struct vm_page *pg)
2446 {
2447 	struct pv_entry *pv, *npv;
2448 	struct pmap *pmap;
2449 	pt_entry_t *pte, *ptes;
2450 
2451 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
2452 
2453 	/* set vm_page => pmap locking */
2454 	PMAP_HEAD_TO_MAP_LOCK();
2455 
2456 	simple_lock(&pg->mdpage.pvh_slock);
2457 
2458 	pv = pg->mdpage.pvh_list;
2459 	if (pv == NULL) {
2460 		PDEBUG(0, printf("free page\n"));
2461 		simple_unlock(&pg->mdpage.pvh_slock);
2462 		PMAP_HEAD_TO_MAP_UNLOCK();
2463 		return;
2464 	}
2465 	pmap_clean_page(pv, FALSE);
2466 
2467 	while (pv) {
2468 		pmap = pv->pv_pmap;
2469 		ptes = pmap_map_ptes(pmap);
2470 		pte = &ptes[arm_btop(pv->pv_va)];
2471 
2472 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2473 		    pv->pv_va, pv->pv_flags));
2474 #ifdef DEBUG
2475 		if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
2476 		    pmap_pte_v(pte) == 0 ||
2477 		    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
2478 			panic("pmap_remove_all: bad mapping");
2479 #endif	/* DEBUG */
2480 
2481 		/*
2482 		 * Update statistics
2483 		 */
2484 		--pmap->pm_stats.resident_count;
2485 
2486 		/* Wired bit */
2487 		if (pv->pv_flags & PVF_WIRED)
2488 			--pmap->pm_stats.wired_count;
2489 
2490 		/*
2491 		 * Invalidate the PTEs.
2492 		 * XXX: should cluster them up and invalidate as many
2493 		 * as possible at once.
2494 		 */
2495 
2496 #ifdef needednotdone
2497 reduce wiring count on page table pages as references drop
2498 #endif
2499 
2500 		*pte = 0;
2501 		pmap_pte_delref(pmap, pv->pv_va);
2502 
2503 		npv = pv->pv_next;
2504 		pmap_free_pv(pmap, pv);
2505 		pv = npv;
2506 		pmap_unmap_ptes(pmap);
2507 	}
2508 	pg->mdpage.pvh_list = NULL;
2509 	simple_unlock(&pg->mdpage.pvh_slock);
2510 	PMAP_HEAD_TO_MAP_UNLOCK();
2511 
2512 	PDEBUG(0, printf("done\n"));
2513 	cpu_tlb_flushID();
2514 	cpu_cpwait();
2515 }
2516 
2517 
2518 /*
2519  * Set the physical protection on the specified range of this map as requested.
2520  */
2521 
2522 void
2523 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
2524 {
2525 	pt_entry_t *pte = NULL, *ptes;
2526 	struct vm_page *pg;
2527 	int armprot;
2528 	int flush = 0;
2529 	paddr_t pa;
2530 
2531 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2532 	    pmap, sva, eva, prot));
2533 
2534 	if (~prot & VM_PROT_READ) {
2535 		/* Just remove the mappings. */
2536 		pmap_remove(pmap, sva, eva);
2537 		/* pmap_update not needed as it should be called by the caller
2538 		 * of pmap_protect */
2539 		return;
2540 	}
2541 	if (prot & VM_PROT_WRITE) {
2542 		/*
2543 		 * If this is a read->write transition, just ignore it and let
2544 		 * uvm_fault() take care of it later.
2545 		 */
2546 		return;
2547 	}
2548 
2549 	/* Need to lock map->head */
2550 	PMAP_MAP_TO_HEAD_LOCK();
2551 
2552 	ptes = pmap_map_ptes(pmap);
2553 
2554 	/*
2555 	 * OK, at this point, we know we're doing write-protect operation.
2556 	 * If the pmap is active, write-back the range.
2557 	 */
2558 	if (pmap_is_curpmap(pmap))
2559 		cpu_dcache_wb_range(sva, eva - sva);
2560 
2561 	/*
2562 	 * We need to acquire a pointer to a page table page before entering
2563 	 * the following loop.
2564 	 */
2565 	while (sva < eva) {
2566 		if (pmap_pde_page(pmap_pde(pmap, sva)))
2567 			break;
2568 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2569 	}
2570 
2571 	pte = &ptes[arm_btop(sva)];
2572 
2573 	while (sva < eva) {
2574 		/* only check once in a while */
2575 		if ((sva & L2_ADDR_BITS) == 0) {
2576 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2577 				/* We can race ahead here, to the next pde. */
2578 				sva += L1_S_SIZE;
2579 				pte += arm_btop(L1_S_SIZE);
2580 				continue;
2581 			}
2582 		}
2583 
2584 		if (!pmap_pte_v(pte))
2585 			goto next;
2586 
2587 		flush = 1;
2588 
2589 		armprot = 0;
2590 		if (sva < VM_MAXUSER_ADDRESS)
2591 			armprot |= L2_S_PROT_U;
2592 		else if (sva < VM_MAX_ADDRESS)
2593 			armprot |= L2_S_PROT_W;  /* XXX Ekk what is this ? */
2594 		*pte = (*pte & 0xfffff00f) | armprot;
2595 
2596 		pa = pmap_pte_pa(pte);
2597 
2598 		/* Get the physical page index */
2599 
2600 		/* Clear write flag */
2601 		if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2602 			simple_lock(&pg->mdpage.pvh_slock);
2603 			(void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
2604 			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2605 			simple_unlock(&pg->mdpage.pvh_slock);
2606 		}
2607 
2608 next:
2609 		sva += NBPG;
2610 		pte++;
2611 	}
2612 	pmap_unmap_ptes(pmap);
2613 	PMAP_MAP_TO_HEAD_UNLOCK();
2614 	if (flush)
2615 		cpu_tlb_flushID();
2616 }
2617 
2618 /*
2619  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2620  * int flags)
2621  *
2622  *      Insert the given physical page (p) at
2623  *      the specified virtual address (v) in the
2624  *      target physical map with the protection requested.
2625  *
2626  *      If specified, the page will be wired down, meaning
2627  *      that the related pte can not be reclaimed.
2628  *
2629  *      NB:  This is the only routine which MAY NOT lazy-evaluate
2630  *      or lose information.  That is, this routine must actually
2631  *      insert this page into the given map NOW.
2632  */
2633 
2634 int
2635 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2636     int flags)
2637 {
2638 	pt_entry_t *ptes, opte, npte;
2639 	paddr_t opa;
2640 	boolean_t wired = (flags & PMAP_WIRED) != 0;
2641 	struct vm_page *pg;
2642 	struct pv_entry *pve;
2643 	int error, nflags;
2644 
2645 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2646 	    va, pa, pmap, prot, wired));
2647 
2648 #ifdef DIAGNOSTIC
2649 	/* Valid address ? */
2650 	if (va >= (pmap_curmaxkvaddr))
2651 		panic("pmap_enter: too big");
2652 	if (pmap != pmap_kernel() && va != 0) {
2653 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2654 			panic("pmap_enter: kernel page in user map");
2655 	} else {
2656 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2657 			panic("pmap_enter: user page in kernel map");
2658 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2659 			panic("pmap_enter: entering PT page");
2660 	}
2661 #endif
2662 
2663 	KDASSERT(((va | pa) & PGOFSET) == 0);
2664 
2665 	/*
2666 	 * Get a pointer to the page.  Later on in this function, we
2667 	 * test for a managed page by checking pg != NULL.
2668 	 */
2669 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2670 
2671 	/* get lock */
2672 	PMAP_MAP_TO_HEAD_LOCK();
2673 
2674 	/*
2675 	 * map the ptes.  If there's not already an L2 table for this
2676 	 * address, allocate one.
2677 	 */
2678 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
2679 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
2680 		struct vm_page *ptp;
2681 
2682 		/* kernel should be pre-grown */
2683 		KASSERT(pmap != pmap_kernel());
2684 
2685 		/* if failure is allowed then don't try too hard */
2686 		ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
2687 		if (ptp == NULL) {
2688 			if (flags & PMAP_CANFAIL) {
2689 				error = ENOMEM;
2690 				goto out;
2691 			}
2692 			panic("pmap_enter: get ptp failed");
2693 		}
2694 	}
2695 	opte = ptes[arm_btop(va)];
2696 
2697 	nflags = 0;
2698 	if (prot & VM_PROT_WRITE)
2699 		nflags |= PVF_WRITE;
2700 	if (wired)
2701 		nflags |= PVF_WIRED;
2702 
2703 	/* Is the pte valid ? If so then this page is already mapped */
2704 	if (l2pte_valid(opte)) {
2705 		/* Get the physical address of the current page mapped */
2706 		opa = l2pte_pa(opte);
2707 
2708 		/* Are we mapping the same page ? */
2709 		if (opa == pa) {
2710 			/* Has the wiring changed ? */
2711 			if (pg != NULL) {
2712 				simple_lock(&pg->mdpage.pvh_slock);
2713 				(void) pmap_modify_pv(pmap, va, pg,
2714 				    PVF_WRITE | PVF_WIRED, nflags);
2715 				simple_unlock(&pg->mdpage.pvh_slock);
2716  			}
2717 		} else {
2718 			struct vm_page *opg;
2719 
2720 			/* We are replacing the page with a new one. */
2721 			cpu_idcache_wbinv_range(va, NBPG);
2722 
2723 			/*
2724 			 * If it is part of our managed memory then we
2725 			 * must remove it from the PV list
2726 			 */
2727 			if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2728 				simple_lock(&opg->mdpage.pvh_slock);
2729 				pve = pmap_remove_pv(opg, pmap, va);
2730 				simple_unlock(&opg->mdpage.pvh_slock);
2731 			} else {
2732 				pve = NULL;
2733 			}
2734 
2735 			goto enter;
2736 		}
2737 	} else {
2738 		opa = 0;
2739 		pve = NULL;
2740 		pmap_pte_addref(pmap, va);
2741 
2742 		/* pte is not valid so we must be hooking in a new page */
2743 		++pmap->pm_stats.resident_count;
2744 
2745 	enter:
2746 		/*
2747 		 * Enter on the PV list if part of our managed memory
2748 		 */
2749 		if (pg != NULL) {
2750 			if (pve == NULL) {
2751 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2752 				if (pve == NULL) {
2753 					if (flags & PMAP_CANFAIL) {
2754 						error = ENOMEM;
2755 						goto out;
2756 					}
2757 					panic("pmap_enter: no pv entries "
2758 					    "available");
2759 				}
2760 			}
2761 			/* enter_pv locks pvh when adding */
2762 			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
2763 		} else {
2764 			if (pve != NULL)
2765 				pmap_free_pv(pmap, pve);
2766 		}
2767 	}
2768 
2769 	/* Construct the pte, giving the correct access. */
2770 	npte = pa;
2771 
2772 	/* VA 0 is magic. */
2773 	if (pmap != pmap_kernel() && va != vector_page)
2774 		npte |= L2_S_PROT_U;
2775 
2776 	if (pg != NULL) {
2777 #ifdef DIAGNOSTIC
2778 		if ((flags & VM_PROT_ALL) & ~prot)
2779 			panic("pmap_enter: access_type exceeds prot");
2780 #endif
2781 		npte |= pte_l2_s_cache_mode;
2782 		if (flags & VM_PROT_WRITE) {
2783 			npte |= L2_S_PROTO | L2_S_PROT_W;
2784 			pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
2785 		} else if (flags & VM_PROT_ALL) {
2786 			npte |= L2_S_PROTO;
2787 			pg->mdpage.pvh_attrs |= PVF_REF;
2788 		} else
2789 			npte |= L2_TYPE_INV;
2790 	} else {
2791 		if (prot & VM_PROT_WRITE)
2792 			npte |= L2_S_PROTO | L2_S_PROT_W;
2793 		else if (prot & VM_PROT_ALL)
2794 			npte |= L2_S_PROTO;
2795 		else
2796 			npte |= L2_TYPE_INV;
2797 	}
2798 
2799 	ptes[arm_btop(va)] = npte;
2800 
2801 	if (pg != NULL) {
2802 		simple_lock(&pg->mdpage.pvh_slock);
2803  		pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
2804 		simple_unlock(&pg->mdpage.pvh_slock);
2805 	}
2806 
2807 	/* Better flush the TLB ... */
2808 	cpu_tlb_flushID_SE(va);
2809 	error = 0;
2810 out:
2811 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
2812 	PMAP_MAP_TO_HEAD_UNLOCK();
2813 
2814 	return error;
2815 }
2816 
2817 /*
2818  * pmap_kenter_pa: enter a kernel mapping
2819  *
2820  * => no need to lock anything assume va is already allocated
2821  * => should be faster than normal pmap enter function
2822  */
2823 void
2824 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2825 {
2826 	pt_entry_t *pte;
2827 
2828 	pte = vtopte(va);
2829 	KASSERT(!pmap_pte_v(pte));
2830 
2831 	*pte = L2_S_PROTO | pa |
2832 	    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
2833 }
2834 
2835 void
2836 pmap_kremove(vaddr_t va, vsize_t len)
2837 {
2838 	pt_entry_t *pte;
2839 
2840 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2841 
2842 		/*
2843 		 * We assume that we will only be called with small
2844 		 * regions of memory.
2845 		 */
2846 
2847 		KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2848 		pte = vtopte(va);
2849 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
2850 		*pte = 0;
2851 		cpu_tlb_flushID_SE(va);
2852 	}
2853 }
2854 
2855 /*
2856  * pmap_page_protect:
2857  *
2858  * Lower the permission for all mappings to a given page.
2859  */
2860 
2861 void
2862 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2863 {
2864 
2865 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
2866 	    VM_PAGE_TO_PHYS(pg), prot));
2867 
2868 	switch(prot) {
2869 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
2870 	case VM_PROT_READ|VM_PROT_WRITE:
2871 		return;
2872 
2873 	case VM_PROT_READ:
2874 	case VM_PROT_READ|VM_PROT_EXECUTE:
2875 		pmap_clearbit(pg, PVF_WRITE);
2876 		break;
2877 
2878 	default:
2879 		pmap_remove_all(pg);
2880 		break;
2881 	}
2882 }
2883 
2884 
2885 /*
2886  * Routine:	pmap_unwire
2887  * Function:	Clear the wired attribute for a map/virtual-address
2888  *		pair.
2889  * In/out conditions:
2890  *		The mapping must already exist in the pmap.
2891  */
2892 
2893 void
2894 pmap_unwire(struct pmap *pmap, vaddr_t va)
2895 {
2896 	pt_entry_t *ptes;
2897 	struct vm_page *pg;
2898 	paddr_t pa;
2899 
2900 	PMAP_MAP_TO_HEAD_LOCK();
2901 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
2902 
2903 	if (pmap_pde_v(pmap_pde(pmap, va))) {
2904 #ifdef DIAGNOSTIC
2905 		if (l2pte_valid(ptes[arm_btop(va)]) == 0)
2906 			panic("pmap_unwire: invalid L2 PTE");
2907 #endif
2908 		/* Extract the physical address of the page */
2909 		pa = l2pte_pa(ptes[arm_btop(va)]);
2910 
2911 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
2912 			goto out;
2913 
2914 		/* Update the wired bit in the pv entry for this page. */
2915 		simple_lock(&pg->mdpage.pvh_slock);
2916 		(void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
2917 		simple_unlock(&pg->mdpage.pvh_slock);
2918 	}
2919 #ifdef DIAGNOSTIC
2920 	else {
2921 		panic("pmap_unwire: invalid L1 PTE");
2922 	}
2923 #endif
2924  out:
2925 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
2926 	PMAP_MAP_TO_HEAD_UNLOCK();
2927 }
2928 
2929 /*
2930  * Routine:  pmap_extract
2931  * Function:
2932  *           Extract the physical page address associated
2933  *           with the given map/virtual_address pair.
2934  */
2935 boolean_t
2936 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
2937 {
2938 	pd_entry_t *pde;
2939 	pt_entry_t *pte, *ptes;
2940 	paddr_t pa;
2941 
2942 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
2943 
2944 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
2945 
2946 	pde = pmap_pde(pmap, va);
2947 	pte = &ptes[arm_btop(va)];
2948 
2949 	if (pmap_pde_section(pde)) {
2950 		pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
2951 		PDEBUG(5, printf("section pa=0x%08lx\n", pa));
2952 		goto out;
2953 	} else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
2954 		PDEBUG(5, printf("no mapping\n"));
2955 		goto failed;
2956 	}
2957 
2958 	if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
2959 		pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
2960 		PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
2961 		goto out;
2962 	}
2963 
2964 	pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
2965 	PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
2966 
2967  out:
2968 	if (pap != NULL)
2969 		*pap = pa;
2970 
2971 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
2972 	return (TRUE);
2973 
2974  failed:
2975 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
2976 	return (FALSE);
2977 }
2978 
2979 
2980 /*
2981  * pmap_copy:
2982  *
2983  *	Copy the range specified by src_addr/len from the source map to the
2984  *	range dst_addr/len in the destination map.
2985  *
2986  *	This routine is only advisory and need not do anything.
2987  */
2988 /* Call deleted in <arm/arm32/pmap.h> */
2989 
2990 #if defined(PMAP_DEBUG)
2991 void
2992 pmap_dump_pvlist(phys, m)
2993 	vaddr_t phys;
2994 	char *m;
2995 {
2996 	struct vm_page *pg;
2997 	struct pv_entry *pv;
2998 
2999 	if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
3000 		printf("INVALID PA\n");
3001 		return;
3002 	}
3003 	simple_lock(&pg->mdpage.pvh_slock);
3004 	printf("%s %08lx:", m, phys);
3005 	if (pg->mdpage.pvh_list == NULL) {
3006 		simple_unlock(&pg->mdpage.pvh_slock);
3007 		printf(" no mappings\n");
3008 		return;
3009 	}
3010 
3011 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
3012 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3013 		    pv->pv_va, pv->pv_flags);
3014 
3015 	printf("\n");
3016 	simple_unlock(&pg->mdpage.pvh_slock);
3017 }
3018 
3019 #endif	/* PMAP_DEBUG */
3020 
3021 static pt_entry_t *
3022 pmap_map_ptes(struct pmap *pmap)
3023 {
3024 	struct proc *p;
3025 
3026     	/* the kernel's pmap is always accessible */
3027 	if (pmap == pmap_kernel()) {
3028 		return (pt_entry_t *)PTE_BASE;
3029 	}
3030 
3031 	if (pmap_is_curpmap(pmap)) {
3032 		simple_lock(&pmap->pm_obj.vmobjlock);
3033 		return (pt_entry_t *)PTE_BASE;
3034 	}
3035 
3036 	p = curproc;
3037 	KDASSERT(p != NULL);
3038 
3039 	/* need to lock both curpmap and pmap: use ordered locking */
3040 	if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
3041 		simple_lock(&pmap->pm_obj.vmobjlock);
3042 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3043 	} else {
3044 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3045 		simple_lock(&pmap->pm_obj.vmobjlock);
3046 	}
3047 
3048 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
3049 	    FALSE);
3050 	cpu_tlb_flushD();
3051 	cpu_cpwait();
3052 	return (pt_entry_t *)APTE_BASE;
3053 }
3054 
3055 /*
3056  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3057  */
3058 
3059 static void
3060 pmap_unmap_ptes(struct pmap *pmap)
3061 {
3062 
3063 	if (pmap == pmap_kernel()) {
3064 		return;
3065 	}
3066 	if (pmap_is_curpmap(pmap)) {
3067 		simple_unlock(&pmap->pm_obj.vmobjlock);
3068 	} else {
3069 		KDASSERT(curproc != NULL);
3070 		simple_unlock(&pmap->pm_obj.vmobjlock);
3071 		simple_unlock(
3072 		    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3073 	}
3074 }
3075 
3076 /*
3077  * Modify pte bits for all ptes corresponding to the given physical address.
3078  * We use `maskbits' rather than `clearbits' because we're always passing
3079  * constants and the latter would require an extra inversion at run-time.
3080  */
3081 
3082 static void
3083 pmap_clearbit(struct vm_page *pg, u_int maskbits)
3084 {
3085 	struct pv_entry *pv;
3086 	pt_entry_t *ptes;
3087 	vaddr_t va;
3088 	int tlbentry;
3089 
3090 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
3091 	    VM_PAGE_TO_PHYS(pg), maskbits));
3092 
3093 	tlbentry = 0;
3094 
3095 	PMAP_HEAD_TO_MAP_LOCK();
3096 	simple_lock(&pg->mdpage.pvh_slock);
3097 
3098 	/*
3099 	 * Clear saved attributes (modify, reference)
3100 	 */
3101 	pg->mdpage.pvh_attrs &= ~maskbits;
3102 
3103 	if (pg->mdpage.pvh_list == NULL) {
3104 		simple_unlock(&pg->mdpage.pvh_slock);
3105 		PMAP_HEAD_TO_MAP_UNLOCK();
3106 		return;
3107 	}
3108 
3109 	/*
3110 	 * Loop over all current mappings setting/clearing as appropos
3111 	 */
3112 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
3113 		va = pv->pv_va;
3114 		pv->pv_flags &= ~maskbits;
3115 		ptes = pmap_map_ptes(pv->pv_pmap);	/* locks pmap */
3116 		KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
3117 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
3118 			if ((pv->pv_flags & PVF_NC)) {
3119 				/*
3120 				 * Entry is not cacheable: reenable
3121 				 * the cache, nothing to flush
3122 				 *
3123 				 * Don't turn caching on again if this
3124 				 * is a modified emulation.  This
3125 				 * would be inconsitent with the
3126 				 * settings created by
3127 				 * pmap_vac_me_harder().
3128 				 *
3129 				 * There's no need to call
3130 				 * pmap_vac_me_harder() here: all
3131 				 * pages are loosing their write
3132 				 * permission.
3133 				 *
3134 				 */
3135 				if (maskbits & PVF_WRITE) {
3136 					ptes[arm_btop(va)] |=
3137 					    pte_l2_s_cache_mode;
3138 					pv->pv_flags &= ~PVF_NC;
3139 				}
3140 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
3141 				/*
3142 				 * Entry is cacheable: check if pmap is
3143 				 * current if it is flush it,
3144 				 * otherwise it won't be in the cache
3145 				 */
3146 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
3147 			}
3148 
3149 			/* make the pte read only */
3150 			ptes[arm_btop(va)] &= ~L2_S_PROT_W;
3151 		}
3152 
3153 		if (maskbits & PVF_REF)
3154 			ptes[arm_btop(va)] =
3155 			    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV;
3156 
3157 		if (pmap_is_curpmap(pv->pv_pmap)) {
3158 			/*
3159 			 * if we had cacheable pte's we'd clean the
3160 			 * pte out to memory here
3161 			 *
3162 			 * flush tlb entry as it's in the current pmap
3163 			 */
3164 			cpu_tlb_flushID_SE(pv->pv_va);
3165 		}
3166 		pmap_unmap_ptes(pv->pv_pmap);		/* unlocks pmap */
3167 	}
3168 	cpu_cpwait();
3169 
3170 	simple_unlock(&pg->mdpage.pvh_slock);
3171 	PMAP_HEAD_TO_MAP_UNLOCK();
3172 }
3173 
3174 /*
3175  * pmap_clear_modify:
3176  *
3177  *	Clear the "modified" attribute for a page.
3178  */
3179 boolean_t
3180 pmap_clear_modify(struct vm_page *pg)
3181 {
3182 	boolean_t rv;
3183 
3184 	if (pg->mdpage.pvh_attrs & PVF_MOD) {
3185 		rv = TRUE;
3186 		pmap_clearbit(pg, PVF_MOD);
3187 	} else
3188 		rv = FALSE;
3189 
3190 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3191 	    VM_PAGE_TO_PHYS(pg), rv));
3192 
3193 	return (rv);
3194 }
3195 
3196 /*
3197  * pmap_clear_reference:
3198  *
3199  *	Clear the "referenced" attribute for a page.
3200  */
3201 boolean_t
3202 pmap_clear_reference(struct vm_page *pg)
3203 {
3204 	boolean_t rv;
3205 
3206 	if (pg->mdpage.pvh_attrs & PVF_REF) {
3207 		rv = TRUE;
3208 		pmap_clearbit(pg, PVF_REF);
3209 	} else
3210 		rv = FALSE;
3211 
3212 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3213 	    VM_PAGE_TO_PHYS(pg), rv));
3214 
3215 	return (rv);
3216 }
3217 
3218 /*
3219  * pmap_is_modified:
3220  *
3221  *	Test if a page has the "modified" attribute.
3222  */
3223 /* See <arm/arm32/pmap.h> */
3224 
3225 /*
3226  * pmap_is_referenced:
3227  *
3228  *	Test if a page has the "referenced" attribute.
3229  */
3230 /* See <arm/arm32/pmap.h> */
3231 
3232 int
3233 pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
3234 {
3235 	pt_entry_t *ptes;
3236 	struct vm_page *pg;
3237 	paddr_t pa;
3238 	u_int flags;
3239 	int rv = 0;
3240 
3241 	PDEBUG(2, printf("pmap_modified_emulation\n"));
3242 
3243 	PMAP_MAP_TO_HEAD_LOCK();
3244 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
3245 
3246 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3247 		PDEBUG(2, printf("L1 PTE invalid\n"));
3248 		goto out;
3249 	}
3250 
3251 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3252 
3253 	/* Check for a invalid pte */
3254 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3255 		goto out;
3256 
3257 	/* This can happen if user code tries to access kernel memory. */
3258 	if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
3259 		goto out;
3260 
3261 	/* Extract the physical address of the page */
3262 	pa = l2pte_pa(ptes[arm_btop(va)]);
3263 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3264 		goto out;
3265 
3266 	/* Get the current flags for this page. */
3267 	simple_lock(&pg->mdpage.pvh_slock);
3268 
3269 	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
3270 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3271 
3272 	/*
3273 	 * Do the flags say this page is writable ? If not then it is a
3274 	 * genuine write fault. If yes then the write fault is our fault
3275 	 * as we did not reflect the write access in the PTE. Now we know
3276 	 * a write has occurred we can correct this and also set the
3277 	 * modified bit
3278 	 */
3279 	if (~flags & PVF_WRITE) {
3280 	    	simple_unlock(&pg->mdpage.pvh_slock);
3281 		goto out;
3282 	}
3283 
3284 	PDEBUG(0,
3285 	    printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
3286 	    va, ptes[arm_btop(va)]));
3287 	pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
3288 
3289 	/*
3290 	 * Re-enable write permissions for the page.  No need to call
3291 	 * pmap_vac_me_harder(), since this is just a
3292 	 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
3293 	 * We've already set the cacheable bits based on the assumption
3294 	 * that we can write to this page.
3295 	 */
3296 	ptes[arm_btop(va)] =
3297 	    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
3298 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3299 
3300 	simple_unlock(&pg->mdpage.pvh_slock);
3301 
3302 	cpu_tlb_flushID_SE(va);
3303 	cpu_cpwait();
3304 	rv = 1;
3305  out:
3306 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
3307 	PMAP_MAP_TO_HEAD_UNLOCK();
3308 	return (rv);
3309 }
3310 
3311 int
3312 pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
3313 {
3314 	pt_entry_t *ptes;
3315 	struct vm_page *pg;
3316 	paddr_t pa;
3317 	int rv = 0;
3318 
3319 	PDEBUG(2, printf("pmap_handled_emulation\n"));
3320 
3321 	PMAP_MAP_TO_HEAD_LOCK();
3322 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
3323 
3324 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3325 		PDEBUG(2, printf("L1 PTE invalid\n"));
3326 		goto out;
3327 	}
3328 
3329 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3330 
3331 	/* Check for invalid pte */
3332 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3333 		goto out;
3334 
3335 	/* This can happen if user code tries to access kernel memory. */
3336 	if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
3337 		goto out;
3338 
3339 	/* Extract the physical address of the page */
3340 	pa = l2pte_pa(ptes[arm_btop(va)]);
3341 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3342 		goto out;
3343 
3344 	simple_lock(&pg->mdpage.pvh_slock);
3345 
3346 	/*
3347 	 * Ok we just enable the pte and mark the attibs as handled
3348 	 * XXX Should we traverse the PV list and enable all PTEs?
3349 	 */
3350 	PDEBUG(0,
3351 	    printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
3352 	    va, ptes[arm_btop(va)]));
3353 	pg->mdpage.pvh_attrs |= PVF_REF;
3354 
3355 	ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
3356 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3357 
3358 	simple_unlock(&pg->mdpage.pvh_slock);
3359 
3360 	cpu_tlb_flushID_SE(va);
3361 	cpu_cpwait();
3362 	rv = 1;
3363  out:
3364 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
3365 	PMAP_MAP_TO_HEAD_UNLOCK();
3366 	return (rv);
3367 }
3368 
3369 /*
3370  * pmap_collect: free resources held by a pmap
3371  *
3372  * => optional function.
3373  * => called when a process is swapped out to free memory.
3374  */
3375 
3376 void
3377 pmap_collect(struct pmap *pmap)
3378 {
3379 }
3380 
3381 /*
3382  * Routine:	pmap_procwr
3383  *
3384  * Function:
3385  *	Synchronize caches corresponding to [addr, addr+len) in p.
3386  *
3387  */
3388 void
3389 pmap_procwr(struct proc *p, vaddr_t va, int len)
3390 {
3391 	/* We only need to do anything if it is the current process. */
3392 	if (p == curproc)
3393 		cpu_icache_sync_range(va, len);
3394 }
3395 /*
3396  * PTP functions
3397  */
3398 
3399 /*
3400  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3401  *
3402  * => pmap should NOT be pmap_kernel()
3403  * => pmap should be locked
3404  */
3405 
3406 static struct vm_page *
3407 pmap_get_ptp(struct pmap *pmap, vaddr_t va)
3408 {
3409 	struct vm_page *ptp;
3410 
3411 	if (pmap_pde_page(pmap_pde(pmap, va))) {
3412 
3413 		/* valid... check hint (saves us a PA->PG lookup) */
3414 		if (pmap->pm_ptphint &&
3415 		    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
3416 		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3417 			return (pmap->pm_ptphint);
3418 		ptp = uvm_pagelookup(&pmap->pm_obj, va);
3419 #ifdef DIAGNOSTIC
3420 		if (ptp == NULL)
3421 			panic("pmap_get_ptp: unmanaged user PTP");
3422 #endif
3423 		pmap->pm_ptphint = ptp;
3424 		return(ptp);
3425 	}
3426 
3427 	/* allocate a new PTP (updates ptphint) */
3428 	return(pmap_alloc_ptp(pmap, va));
3429 }
3430 
3431 /*
3432  * pmap_alloc_ptp: allocate a PTP for a PMAP
3433  *
3434  * => pmap should already be locked by caller
3435  * => we use the ptp's wire_count to count the number of active mappings
3436  *	in the PTP (we start it at one to prevent any chance this PTP
3437  *	will ever leak onto the active/inactive queues)
3438  */
3439 
3440 /*__inline */ static struct vm_page *
3441 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
3442 {
3443 	struct vm_page *ptp;
3444 
3445 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3446 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3447 	if (ptp == NULL)
3448 		return (NULL);
3449 
3450 	/* got one! */
3451 	ptp->flags &= ~PG_BUSY;	/* never busy */
3452 	ptp->wire_count = 1;	/* no mappings yet */
3453 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
3454 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
3455 	pmap->pm_ptphint = ptp;
3456 	return (ptp);
3457 }
3458 
3459 vaddr_t
3460 pmap_growkernel(vaddr_t maxkvaddr)
3461 {
3462 	struct pmap *kpm = pmap_kernel(), *pm;
3463 	int s;
3464 	paddr_t ptaddr;
3465 	struct vm_page *ptp;
3466 
3467 	if (maxkvaddr <= pmap_curmaxkvaddr)
3468 		goto out;		/* we are OK */
3469 	NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3470 		    pmap_curmaxkvaddr, maxkvaddr));
3471 
3472 	/*
3473 	 * whoops!   we need to add kernel PTPs
3474 	 */
3475 
3476 	s = splhigh();	/* to be safe */
3477 	simple_lock(&kpm->pm_obj.vmobjlock);
3478 	/* due to the way the arm pmap works we map 4MB at a time */
3479 	for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
3480 	     pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
3481 
3482 		if (uvm.page_init_done == FALSE) {
3483 
3484 			/*
3485 			 * we're growing the kernel pmap early (from
3486 			 * uvm_pageboot_alloc()).  this case must be
3487 			 * handled a little differently.
3488 			 */
3489 
3490 			if (uvm_page_physget(&ptaddr) == FALSE)
3491 				panic("pmap_growkernel: out of memory");
3492 			pmap_zero_page(ptaddr);
3493 
3494 			/* map this page in */
3495 			pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
3496 
3497 			/* count PTP as resident */
3498 			kpm->pm_stats.resident_count++;
3499 			continue;
3500 		}
3501 
3502 		/*
3503 		 * THIS *MUST* BE CODED SO AS TO WORK IN THE
3504 		 * pmap_initialized == FALSE CASE!  WE MAY BE
3505 		 * INVOKED WHILE pmap_init() IS RUNNING!
3506 		 */
3507 
3508 		if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
3509 			panic("pmap_growkernel: alloc ptp failed");
3510 
3511 		/* distribute new kernel PTP to all active pmaps */
3512 		simple_lock(&pmaps_lock);
3513 		LIST_FOREACH(pm, &pmaps, pm_list) {
3514 			pmap_map_in_l1(pm, pmap_curmaxkvaddr,
3515 			    VM_PAGE_TO_PHYS(ptp), TRUE);
3516 		}
3517 
3518 		simple_unlock(&pmaps_lock);
3519 	}
3520 
3521 	/*
3522 	 * flush out the cache, expensive but growkernel will happen so
3523 	 * rarely
3524 	 */
3525 	cpu_tlb_flushD();
3526 	cpu_cpwait();
3527 
3528 	simple_unlock(&kpm->pm_obj.vmobjlock);
3529 	splx(s);
3530 
3531 out:
3532 	return (pmap_curmaxkvaddr);
3533 }
3534 
3535 /************************ Utility routines ****************************/
3536 
3537 /*
3538  * vector_page_setprot:
3539  *
3540  *	Manipulate the protection of the vector page.
3541  */
3542 void
3543 vector_page_setprot(int prot)
3544 {
3545 	pt_entry_t *pte;
3546 
3547 	pte = vtopte(vector_page);
3548 
3549 	*pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
3550 	cpu_tlb_flushD_SE(vector_page);
3551 	cpu_cpwait();
3552 }
3553 
3554 /************************ Bootstrapping routines ****************************/
3555 
3556 /*
3557  * This list exists for the benefit of pmap_map_chunk().  It keeps track
3558  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3559  * find them as necessary.
3560  *
3561  * Note that the data on this list is not valid after initarm() returns.
3562  */
3563 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3564 
3565 static vaddr_t
3566 kernel_pt_lookup(paddr_t pa)
3567 {
3568 	pv_addr_t *pv;
3569 
3570 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3571 		if (pv->pv_pa == pa)
3572 			return (pv->pv_va);
3573 	}
3574 	return (0);
3575 }
3576 
3577 /*
3578  * pmap_map_section:
3579  *
3580  *	Create a single section mapping.
3581  */
3582 void
3583 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3584 {
3585 	pd_entry_t *pde = (pd_entry_t *) l1pt;
3586 	pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3587 
3588 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
3589 
3590 	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3591 	    L1_S_PROT(PTE_KERNEL, prot) | fl;
3592 }
3593 
3594 /*
3595  * pmap_map_entry:
3596  *
3597  *	Create a single page mapping.
3598  */
3599 void
3600 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3601 {
3602 	pd_entry_t *pde = (pd_entry_t *) l1pt;
3603 	pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3604 	pt_entry_t *pte;
3605 
3606 	KASSERT(((va | pa) & PGOFSET) == 0);
3607 
3608 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3609 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3610 
3611 	pte = (pt_entry_t *)
3612 	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3613 	if (pte == NULL)
3614 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3615 
3616 	pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3617 	    L2_S_PROT(PTE_KERNEL, prot) | fl;
3618 }
3619 
3620 /*
3621  * pmap_link_l2pt:
3622  *
3623  *	Link the L2 page table specified by "pa" into the L1
3624  *	page table at the slot for "va".
3625  */
3626 void
3627 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3628 {
3629 	pd_entry_t *pde = (pd_entry_t *) l1pt;
3630 	u_int slot = va >> L1_S_SHIFT;
3631 
3632 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3633 
3634 	pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
3635 	pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
3636 	pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
3637 	pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
3638 
3639 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3640 }
3641 
3642 /*
3643  * pmap_map_chunk:
3644  *
3645  *	Map a chunk of memory using the most efficient mappings
3646  *	possible (section, large page, small page) into the
3647  *	provided L1 and L2 tables at the specified virtual address.
3648  */
3649 vsize_t
3650 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3651     int prot, int cache)
3652 {
3653 	pd_entry_t *pde = (pd_entry_t *) l1pt;
3654 	pt_entry_t *pte, fl;
3655 	vsize_t resid;
3656 	int i;
3657 
3658 	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3659 
3660 	if (l1pt == 0)
3661 		panic("pmap_map_chunk: no L1 table provided");
3662 
3663 #ifdef VERBOSE_INIT_ARM
3664 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3665 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3666 #endif
3667 
3668 	size = resid;
3669 
3670 	while (resid > 0) {
3671 		/* See if we can use a section mapping. */
3672 		if (((pa | va) & L1_S_OFFSET) == 0 &&
3673 		    resid >= L1_S_SIZE) {
3674 			fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3675 #ifdef VERBOSE_INIT_ARM
3676 			printf("S");
3677 #endif
3678 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3679 			    L1_S_PROT(PTE_KERNEL, prot) | fl;
3680 			va += L1_S_SIZE;
3681 			pa += L1_S_SIZE;
3682 			resid -= L1_S_SIZE;
3683 			continue;
3684 		}
3685 
3686 		/*
3687 		 * Ok, we're going to use an L2 table.  Make sure
3688 		 * one is actually in the corresponding L1 slot
3689 		 * for the current VA.
3690 		 */
3691 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3692 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3693 
3694 		pte = (pt_entry_t *)
3695 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3696 		if (pte == NULL)
3697 			panic("pmap_map_chunk: can't find L2 table for VA"
3698 			    "0x%08lx", va);
3699 
3700 		/* See if we can use a L2 large page mapping. */
3701 		if (((pa | va) & L2_L_OFFSET) == 0 &&
3702 		    resid >= L2_L_SIZE) {
3703 			fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
3704 #ifdef VERBOSE_INIT_ARM
3705 			printf("L");
3706 #endif
3707 			for (i = 0; i < 16; i++) {
3708 				pte[((va >> PGSHIFT) & 0x3f0) + i] =
3709 				    L2_L_PROTO | pa |
3710 				    L2_L_PROT(PTE_KERNEL, prot) | fl;
3711 			}
3712 			va += L2_L_SIZE;
3713 			pa += L2_L_SIZE;
3714 			resid -= L2_L_SIZE;
3715 			continue;
3716 		}
3717 
3718 		/* Use a small page mapping. */
3719 		fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3720 #ifdef VERBOSE_INIT_ARM
3721 		printf("P");
3722 #endif
3723 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3724 		    L2_S_PROT(PTE_KERNEL, prot) | fl;
3725 		va += NBPG;
3726 		pa += NBPG;
3727 		resid -= NBPG;
3728 	}
3729 #ifdef VERBOSE_INIT_ARM
3730 	printf("\n");
3731 #endif
3732 	return (size);
3733 }
3734 
3735 /********************** PTE initialization routines **************************/
3736 
3737 /*
3738  * These routines are called when the CPU type is identified to set up
3739  * the PTE prototypes, cache modes, etc.
3740  *
3741  * The variables are always here, just in case LKMs need to reference
3742  * them (though, they shouldn't).
3743  */
3744 
3745 pt_entry_t	pte_l1_s_cache_mode;
3746 pt_entry_t	pte_l1_s_cache_mask;
3747 
3748 pt_entry_t	pte_l2_l_cache_mode;
3749 pt_entry_t	pte_l2_l_cache_mask;
3750 
3751 pt_entry_t	pte_l2_s_cache_mode;
3752 pt_entry_t	pte_l2_s_cache_mask;
3753 
3754 pt_entry_t	pte_l2_s_prot_u;
3755 pt_entry_t	pte_l2_s_prot_w;
3756 pt_entry_t	pte_l2_s_prot_mask;
3757 
3758 pt_entry_t	pte_l1_s_proto;
3759 pt_entry_t	pte_l1_c_proto;
3760 pt_entry_t	pte_l2_s_proto;
3761 
3762 void		(*pmap_copy_page_func)(paddr_t, paddr_t);
3763 void		(*pmap_zero_page_func)(paddr_t);
3764 
3765 #if ARM_MMU_GENERIC == 1
3766 void
3767 pmap_pte_init_generic(void)
3768 {
3769 
3770 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
3771 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
3772 
3773 	pte_l2_l_cache_mode = L2_B|L2_C;
3774 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
3775 
3776 	pte_l2_s_cache_mode = L2_B|L2_C;
3777 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
3778 
3779 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
3780 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
3781 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
3782 
3783 	pte_l1_s_proto = L1_S_PROTO_generic;
3784 	pte_l1_c_proto = L1_C_PROTO_generic;
3785 	pte_l2_s_proto = L2_S_PROTO_generic;
3786 
3787 	pmap_copy_page_func = pmap_copy_page_generic;
3788 	pmap_zero_page_func = pmap_zero_page_generic;
3789 }
3790 
3791 #if defined(CPU_ARM9)
3792 void
3793 pmap_pte_init_arm9(void)
3794 {
3795 
3796 	/*
3797 	 * ARM9 is compatible with generic, but we want to use
3798 	 * write-through caching for now.
3799 	 */
3800 	pmap_pte_init_generic();
3801 
3802 	pte_l1_s_cache_mode = L1_S_C;
3803 	pte_l2_l_cache_mode = L2_C;
3804 	pte_l2_s_cache_mode = L2_C;
3805 }
3806 #endif /* CPU_ARM9 */
3807 #endif /* ARM_MMU_GENERIC == 1 */
3808 
3809 #if ARM_MMU_XSCALE == 1
3810 void
3811 pmap_pte_init_xscale(void)
3812 {
3813 	uint32_t auxctl;
3814 
3815 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
3816 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
3817 
3818 	pte_l2_l_cache_mode = L2_B|L2_C;
3819 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
3820 
3821 	pte_l2_s_cache_mode = L2_B|L2_C;
3822 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
3823 
3824 #ifdef XSCALE_CACHE_WRITE_THROUGH
3825 	/*
3826 	 * Some versions of the XScale core have various bugs in
3827 	 * their cache units, the work-around for which is to run
3828 	 * the cache in write-through mode.  Unfortunately, this
3829 	 * has a major (negative) impact on performance.  So, we
3830 	 * go ahead and run fast-and-loose, in the hopes that we
3831 	 * don't line up the planets in a way that will trip the
3832 	 * bugs.
3833 	 *
3834 	 * However, we give you the option to be slow-but-correct.
3835 	 */
3836 	pte_l1_s_cache_mode = L1_S_C;
3837 	pte_l2_l_cache_mode = L2_C;
3838 	pte_l2_s_cache_mode = L2_C;
3839 #endif /* XSCALE_CACHE_WRITE_THROUGH */
3840 
3841 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
3842 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
3843 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
3844 
3845 	pte_l1_s_proto = L1_S_PROTO_xscale;
3846 	pte_l1_c_proto = L1_C_PROTO_xscale;
3847 	pte_l2_s_proto = L2_S_PROTO_xscale;
3848 
3849 	pmap_copy_page_func = pmap_copy_page_xscale;
3850 	pmap_zero_page_func = pmap_zero_page_xscale;
3851 
3852 	/*
3853 	 * Disable ECC protection of page table access, for now.
3854 	 */
3855 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
3856 		: "=r" (auxctl));
3857 	auxctl &= ~XSCALE_AUXCTL_P;
3858 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
3859 		:
3860 		: "r" (auxctl));
3861 }
3862 
3863 /*
3864  * xscale_setup_minidata:
3865  *
3866  *	Set up the mini-data cache clean area.  We require the
3867  *	caller to allocate the right amount of physically and
3868  *	virtually contiguous space.
3869  */
3870 void
3871 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
3872 {
3873 	extern vaddr_t xscale_minidata_clean_addr;
3874 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
3875 	pd_entry_t *pde = (pd_entry_t *) l1pt;
3876 	pt_entry_t *pte;
3877 	vsize_t size;
3878 	uint32_t auxctl;
3879 
3880 	xscale_minidata_clean_addr = va;
3881 
3882 	/* Round it to page size. */
3883 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
3884 
3885 	for (; size != 0;
3886 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
3887 		pte = (pt_entry_t *)
3888 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3889 		if (pte == NULL)
3890 			panic("xscale_setup_minidata: can't find L2 table for "
3891 			    "VA 0x%08lx", va);
3892 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3893 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
3894 		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
3895 	}
3896 
3897 	/*
3898 	 * Configure the mini-data cache for write-back with
3899 	 * read/write-allocate.
3900 	 *
3901 	 * NOTE: In order to reconfigure the mini-data cache, we must
3902 	 * make sure it contains no valid data!  In order to do that,
3903 	 * we must issue a global data cache invalidate command!
3904 	 *
3905 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
3906 	 * THIS IS VERY IMPORTANT!
3907 	 */
3908 
3909 	/* Invalidate data and mini-data. */
3910 	__asm __volatile("mcr p15, 0, %0, c7, c6, 0"
3911 		:
3912 		: "r" (auxctl));
3913 
3914 
3915 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
3916 		: "=r" (auxctl));
3917 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
3918 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
3919 		:
3920 		: "r" (auxctl));
3921 }
3922 #endif /* ARM_MMU_XSCALE == 1 */
3923