xref: /netbsd-src/sys/arch/sh3/include/pmap.h (revision 9fbd88883c38d0c0fbfcbe66d76fe6b0fab3f9de)
1 /*	$NetBSD: pmap.h,v 1.14 2001/09/10 21:19:23 chris Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgment:
17  *      This product includes software developed by Charles D. Cranor and
18  *      Washington University.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * pmap.h: see pmap.c for the history of this pmap module.
36  */
37 
38 #ifndef _SH3_PMAP_H_
39 #define _SH3_PMAP_H_
40 
41 #include <machine/cpufunc.h>
42 #include <machine/pte.h>
43 #include <uvm/uvm_object.h>
44 
45 /*
46  * see pte.h for a description of i386 MMU terminology and hardware
47  * interface.
48  *
49  * a pmap describes a processes' 4GB virtual address space.  this
50  * virtual address space can be broken up into 1024 4MB regions which
51  * are described by PDEs in the PDP.  the PDEs are defined as follows:
52  *
53  * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
54  * (the following assumes that KERNBASE is 0xf0000000)
55  *
56  * PDE#s	VA range		usage
57  * 0->959	0x0 -> 0xefc00000	user address space, note that the
58  *					max user address is 0xefbfe000
59  *					the final two pages in the last 4MB
60  *					used to be reserved for the UAREA
61  *					but now are no longer used
62  * 959		0xefc00000->		recursive mapping of PDP (used for
63  *			0xf0000000	linear mapping of PTPs)
64  * 960->1023	0xf0000000->		kernel address space (constant
65  *			0xffc00000	across all pmap's/processes)
66  * 1023		0xffc00000->		"alternate" recursive PDP mapping
67  *			<end>		(for other pmaps)
68  *
69  *
70  * note: a recursive PDP mapping provides a way to map all the PTEs for
71  * a 4GB address space into a linear chunk of virtual memory.  in other
72  * words, the PTE for page 0 is the first int mapped into the 4MB recursive
73  * area.  the PTE for page 1 is the second int.  the very last int in the
74  * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
75  * address).
76  *
77  * all pmap's PD's must have the same values in slots 960->1023 so that
78  * the kernel is always mapped in every process.  these values are loaded
79  * into the PD at pmap creation time.
80  *
81  * at any one time only one pmap can be active on a processor.  this is
82  * the pmap whose PDP is pointed to by processor register %cr3.  this pmap
83  * will have all its PTEs mapped into memory at the recursive mapping
84  * point (slot #959 as show above).  when the pmap code wants to find the
85  * PTE for a virtual address, all it has to do is the following:
86  *
87  * address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
88  *                = 0xefc00000 + (VA / 4096) * 4
89  *
90  * what happens if the pmap layer is asked to perform an operation
91  * on a pmap that is not the one which is currently active?  in that
92  * case we take the PA of the PDP of non-active pmap and put it in
93  * slot 1023 of the active pmap.  this causes the non-active pmap's
94  * PTEs to get mapped in the final 4MB of the 4GB address space
95  * (e.g. starting at 0xffc00000).
96  *
97  * the following figure shows the effects of the recursive PDP mapping:
98  *
99  *   PDP (%cr3)
100  *   +----+
101  *   |   0| -> PTP#0 that maps VA 0x0 -> 0x400000
102  *   |    |
103  *   |    |
104  *   | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000
105  *   | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000)
106  *   |    |
107  *   |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
108  *   +----+
109  *
110  * note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE"
111  * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
112  *
113  * starting at VA 0xefc00000 the current active PDP (%cr3) acts as a
114  * PTP:
115  *
116  * PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000
117  *   +----+
118  *   |   0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000
119  *   |    |
120  *   |    |
121  *   | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000
122  *   | 960| -> maps contents of first kernel PTP
123  *   |    |
124  *   |1023|
125  *   +----+
126  *
127  * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
128  * defined as "PDP_BASE".... within that mapping there are two
129  * defines:
130  *   "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
131  *      which points back to itself.
132  *   "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
133  *      establishes the recursive mapping of the alternate pmap.
134  *      to set the alternate PDP, one just has to put the correct
135  *	PA info in *APDP_PDE.
136  *
137  * note that in the APTE_BASE space, the APDP appears at VA
138  * "APDP_BASE" (0xfffff000).
139  */
140 
141 /*
142  * the following defines identify the slots used as described above.
143  */
144 
145 #define PDSLOT_PTE	((u_int)0x33f)	/* PTDPTDI for recursive PDP map */
146 #define PDSLOT_KERN	((u_int)0x340)	/* KPTDI start of kernel space */
147 #define PDSLOT_APTE	((u_int)0x37f)	/* alternative recursive slot */
148 
149 /*
150  * the following defines give the virtual addresses of various MMU
151  * data structures:
152  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
153  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
154  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
155  */
156 
157 #define PTE_BASE	((pt_entry_t *)  (PDSLOT_PTE * NBPD) )
158 #define APTE_BASE	((pt_entry_t *)  (PDSLOT_APTE * NBPD) )
159 #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
160 #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
161 #define PDP_PDE		(PDP_BASE + PDSLOT_PTE)
162 #define APDP_PDE	(PDP_BASE + PDSLOT_APTE)
163 
164 /*
165  * XXXCDC: tmp xlate from old names:
166  * PTDPTDI -> PDSLOT_PTE
167  * KPTDI -> PDSLOT_KERN
168  * APTDPTDI -> PDSLOT_APTE
169  */
170 
171 /*
172  * the follow define determines how many PTPs should be set up for the
173  * kernel by locore.s at boot time.  this should be large enough to
174  * get the VM system running.  once the VM system is running, the
175  * pmap module can add more PTPs to the kernel area on demand.
176  */
177 
178 #ifndef NKPTP
179 #define NKPTP		8	/* 32MB to start */
180 #endif
181 #define NKPTP_MIN	8	/* smallest value we allow */
182 #define NKPTP_MAX	63	/* (1024 - (0xd0000000/NBPD) - 1) */
183 				/* largest value (-1 for APTP space) */
184 
185 /*
186  * various address macros
187  *
188  *  vtopte: return a pointer to the PTE mapping a VA
189  *  kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
190  *  ptetov: given a pointer to a PTE, return the VA that it maps
191  *  vtophys: translate a VA to the PA mapped to it
192  *
193  * plus alternative versions of the above
194  */
195 
196 #define vtopte(VA)	(PTE_BASE + sh3_btop(VA))
197 #define kvtopte(VA)	vtopte(VA)
198 #define ptetov(PT)	(sh3_ptob(PT - PTE_BASE))
199 #define avtopte(VA)	(APTE_BASE + sh3_btop(VA))
200 #define ptetoav(PT)	(sh3_ptob(PT - APTE_BASE))
201 #define avtophys(VA)	((*avtopte(VA) & PG_FRAME) | \
202 			 ((unsigned)(VA) & ~PG_FRAME))
203 
204 /*
205  * pdei/ptei: generate index into PDP/PTP from a VA
206  */
207 #define	pdei(VA)	(((VA) & PD_MASK) >> PDSHIFT)
208 #define	ptei(VA)	(((VA) & PT_MASK) >> PGSHIFT)
209 
210 /*
211  * PTP macros:
212  *   a PTP's index is the PD index of the PDE that points to it
213  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
214  *   a PTP's VA is the first VA mapped by that PTP
215  *
216  * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
217  *           NBPD == number of bytes a PTP can map (4MB)
218  */
219 
220 #define ptp_i2o(I)	((I) * NBPG)	/* index => offset */
221 #define ptp_o2i(O)	((O) / NBPG)	/* offset => index */
222 #define ptp_i2v(I)	((I) * NBPD)	/* index => VA */
223 #define ptp_v2i(V)	((V) / NBPD)	/* VA => index (same as pdei) */
224 
225 /*
226  * PG_AVAIL usage: we make use of the ignored bits of the PTE
227  */
228 
229 #define PG_PVLIST	PG_AVAIL1	/* mapping has entry on pvlist */
230 
231 #ifdef _KERNEL
232 /*
233  * pmap data structures: see pmap.c for details of locking.
234  */
235 
236 struct pmap;
237 typedef struct pmap *pmap_t;
238 
239 /*
240  * we maintain a list of all non-kernel pmaps
241  */
242 
243 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
244 
245 /*
246  * the pmap structure
247  *
248  * note that the pm_obj contains the simple_lock, the reference count,
249  * page list, and number of PTPs within the pmap.
250  */
251 
252 struct pmap {
253 	struct uvm_object pm_obj;	/* object (lck by object lock) */
254 #define	pm_lock	pm_obj.vmobjlock
255 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
256 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
257 	u_int32_t pm_pdirpa;		/* PA of PD (read-only after create) */
258 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
259 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
260 
261 	int pm_flags;			/* see below */
262 };
263 
264 /* pm_flags */
265 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
266 
267 /*
268  * for each managed physical page we maintain a list of <PMAP,VA>'s
269  * which it is mapped at.  the list is headed by a pv_head structure.
270  * there is one pv_head per managed phys page (allocated at boot time).
271  * the pv_head structure points to a list of pv_entry structures (each
272  * describes one mapping).
273  */
274 
275 struct pv_entry;
276 
277 struct pv_head {
278 	struct simplelock pvh_lock;	/* locks every pv on this list */
279 	struct pv_entry *pvh_list;	/* head of list (locked by pvh_lock) */
280 };
281 
282 /* These are kept in the vm_physseg array. */
283 #define	PGA_REFERENCED	0x01		/* page is referenced */
284 #define	PGA_MODIFIED	0x02		/* page is modified */
285 
286 struct pv_entry {			/* locked by its list's pvh_lock */
287 	struct pv_entry *pv_next;	/* next entry */
288 	struct pmap *pv_pmap;		/* the pmap */
289 	vaddr_t pv_va;			/* the virtual address */
290 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
291 };
292 
293 /*
294  * pv_entrys are dynamically allocated in chunks from a single page.
295  * we keep track of how many pv_entrys are in use for each page and
296  * we can free pv_entry pages if needed.  there is one lock for the
297  * entire allocation system.
298  */
299 
300 struct pv_page_info {
301 	TAILQ_ENTRY(pv_page) pvpi_list;
302 	struct pv_entry *pvpi_pvfree;
303 	int pvpi_nfree;
304 };
305 
306 /*
307  * number of pv_entry's in a pv_page
308  * (note: won't work on systems where NPBG isn't a constant)
309  */
310 
311 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
312 			sizeof(struct pv_entry))
313 
314 /*
315  * a pv_page: where pv_entrys are allocated from
316  */
317 
318 struct pv_page {
319 	struct pv_page_info pvinfo;
320 	struct pv_entry pvents[PVE_PER_PVPAGE];
321 };
322 
323 /*
324  * pmap_remove_record: a record of VAs that have been unmapped, used to
325  * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
326  */
327 
328 #define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
329 
330 struct pmap_remove_record {
331 	int prr_npages;
332 	vaddr_t prr_vas[PMAP_RR_MAX];
333 };
334 
335 /*
336  * pmap_transfer_location: used to pass the current location in the
337  * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
338  * a pmap_copy].
339  */
340 
341 struct pmap_transfer_location {
342 	vaddr_t addr;			/* the address (page-aligned) */
343 	pt_entry_t *pte;		/* the PTE that maps address */
344 	struct vm_page *ptp;		/* the PTP that the PTE lives in */
345 };
346 
347 /*
348  * global kernel variables
349  */
350 
351 /* PTDpaddr: is the physical address of the kernel's PDP */
352 extern u_long PTDpaddr;
353 
354 extern struct pmap kernel_pmap_store;	/* kernel pmap */
355 extern int nkpde;			/* current # of PDEs for kernel */
356 extern int pmap_pg_g;			/* do we support PG_G? */
357 
358 /*
359  * macros
360  */
361 
362 /* XXX XXX XXX */
363 #ifdef SH4
364 #define	TLBFLUSH()			(cacheflush(), tlbflush())
365 #else
366 #define	TLBFLUSH()			tlbflush()
367 #endif
368 
369 #define	pmap_kernel()			(&kernel_pmap_store)
370 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
371 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
372 #define	pmap_update(pmap)		/* nothing (yet) */
373 
374 #define	pmap_is_referenced(pg)		pmap_test_attrs(pg, PGA_REFERENCED)
375 #define	pmap_is_modified(pg)		pmap_test_attrs(pg, PGA_MODIFIED)
376 
377 #define pmap_copy(DP,SP,D,L,S)		pmap_transfer(DP,SP,D,L,S, FALSE)
378 #define pmap_move(DP,SP,D,L,S)		pmap_transfer(DP,SP,D,L,S, TRUE)
379 #define pmap_phys_address(ppn)		sh3_ptob(ppn)
380 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
381 
382 
383 /*
384  * prototypes
385  */
386 
387 void		pmap_activate __P((struct proc *));
388 void		pmap_bootstrap __P((vaddr_t));
389 boolean_t	pmap_change_attrs __P((struct vm_page *, int, int));
390 void		pmap_deactivate __P((struct proc *));
391 void		pmap_page_remove  __P((struct vm_page *));
392 static void	pmap_protect __P((struct pmap *, vaddr_t,
393 				vaddr_t, vm_prot_t));
394 void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
395 boolean_t	pmap_test_attrs __P((struct vm_page *, int));
396 void		pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
397 				   vsize_t, vaddr_t, boolean_t));
398 static void	pmap_update_pg __P((vaddr_t));
399 static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
400 void		pmap_write_protect __P((struct pmap *, vaddr_t,
401 				vaddr_t, vm_prot_t));
402 
403 vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
404 
405 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
406 
407 /*
408  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
409  */
410 /*
411  * XXX Indeed, first, we should refine physical address v.s. virtual
412  *	address mapping.
413  * See
414  *	uvm_km.c:uvm_km_free_poolpage1,
415  *	vm_page.h:PHYS_TO_VM_PAGE, vm_physseg_find
416  *	machdep.c:pmap_bootstrap (uvm_page_physload, etc)
417  */
418 #if 0
419 /* broken */
420 #define PMAP_MAP_POOLPAGE(pa)	SH3_PHYS_TO_P1SEG((pa))
421 #define PMAP_UNMAP_POOLPAGE(va)	SH3_P1SEG_TO_PHYS((va))
422 #else
423 #define PMAP_MAP_POOLPAGE(pa)	(pa)
424 #define PMAP_UNMAP_POOLPAGE(va)	(va)
425 #endif
426 
427 /*
428  * inline functions
429  */
430 
431 /*
432  * pmap_update_pg: flush one page from the TLB
433  */
434 
435 __inline static void
436 pmap_update_pg(va)
437 	vaddr_t va;
438 {
439 #ifdef SH4
440 #if 1
441 	tlbflush();
442 	cacheflush();
443 #else
444 	u_int32_t *addr, data;
445 
446 	addr = (void *)(0xf6000080 | (va & 0x00003f00)); /* 13-8 */
447 	data =         (0x00000000 | (va & 0xfffff000)); /* 31-17, 11-10 */
448 	*addr = data;
449 #endif
450 #else
451 	u_int32_t *addr, data;
452 
453 	addr = (void *)(0xf2000080 | (va & 0x0001f000)); /* 16-12 */
454 	data =         (0x00000000 | (va & 0xfffe0c00)); /* 31-17, 11-10 */
455 
456 	*addr = data;
457 #endif
458 }
459 
460 /*
461  * pmap_update_2pg: flush two pages from the TLB
462  */
463 
464 __inline static void
465 pmap_update_2pg(va, vb)
466 	vaddr_t va, vb;
467 {
468 #ifdef SH4
469 	tlbflush();
470 	cacheflush();
471 #else
472 	pmap_update_pg(va);
473 	pmap_update_pg(vb);
474 #endif
475 }
476 
477 /*
478  * pmap_protect: change the protection of pages in a pmap
479  *
480  * => this function is a frontend for pmap_remove/pmap_write_protect
481  * => we only have to worry about making the page more protected.
482  *	unprotecting a page is done on-demand at fault time.
483  */
484 
485 __inline static void
486 pmap_protect(pmap, sva, eva, prot)
487 	struct pmap *pmap;
488 	vaddr_t sva, eva;
489 	vm_prot_t prot;
490 {
491 	if ((prot & VM_PROT_WRITE) == 0) {
492 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
493 			pmap_write_protect(pmap, sva, eva, prot);
494 		} else {
495 			pmap_remove(pmap, sva, eva);
496 		}
497 	}
498 }
499 
500 vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
501 paddr_t vtophys __P((vaddr_t));
502 void pmap_emulate_reference __P((struct proc *, vaddr_t, int, int));
503 
504 /* XXX */
505 #define PG_U 0		/* referenced bit */
506 
507 #endif /* _KERNEL */
508 #endif /* _SH3_PMAP_H_ */
509