xref: /netbsd-src/sys/arch/vax/include/pmap.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: pmap.h,v 1.71 2007/02/22 06:51:30 thorpej Exp $	   */
2 
3 /*
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * Changed for the VAX port. /IC
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
38  */
39 
40 /*
41  * Copyright (c) 1987 Carnegie-Mellon University
42  *
43  * Changed for the VAX port. /IC
44  *
45  * This code is derived from software contributed to Berkeley by
46  * the Systems Programming Group of the University of Utah Computer
47  * Science Department.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
78  */
79 
80 
81 #ifndef PMAP_H
82 #define PMAP_H
83 
84 #include <machine/pte.h>
85 #include <machine/mtpr.h>
86 #include <machine/pcb.h>
87 
88 /*
89  * Some constants to make life easier.
90  */
91 #define LTOHPS		(PGSHIFT - VAX_PGSHIFT)
92 #define LTOHPN		(1 << LTOHPS)
93 
94 /*
95  * Link struct if more than one process share pmap (like vfork).
96  * This is rarely used.
97  */
98 struct pm_share {
99 	struct pm_share	*ps_next;
100 	struct pcb	*ps_pcb;
101 };
102 
103 /*
104  * Pmap structure
105  *  pm_stack holds lowest allocated memory for the process stack.
106  */
107 
108 typedef struct pmap {
109 	struct pte	*pm_p1ap;	/* Base of alloced p1 pte space */
110 	int		 pm_count;	/* reference count */
111 	struct pm_share	*pm_share;	/* PCBs using this pmap */
112 	struct pte	*pm_p0br;	/* page 0 base register */
113 	long		 pm_p0lr;	/* page 0 length register */
114 	struct pte	*pm_p1br;	/* page 1 base register */
115 	long		 pm_p1lr;	/* page 1 length register */
116 	struct simplelock pm_lock;	/* Lock entry in MP environment */
117 	struct pmap_statistics	 pm_stats;	/* Some statistics */
118 } *pmap_t;
119 
120 /*
121  * For each struct vm_page, there is a list of all currently valid virtual
122  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
123  */
124 
125 struct pv_entry {
126 	struct pv_entry *pv_next;	/* next pv_entry */
127 	vaddr_t		 pv_vaddr;	/* address for this physical page */
128 	struct pmap	*pv_pmap;	/* pmap this entry belongs to */
129 	int		 pv_attr;	/* write/modified bits */
130 };
131 
132 extern	struct  pv_entry *pv_table;
133 
134 /* Mapping macros used when allocating SPT */
135 #define MAPVIRT(ptr, count)				\
136 	ptr = virtual_avail;		\
137 	virtual_avail += (count) * VAX_NBPG;
138 
139 #define MAPPHYS(ptr, count, perm)			\
140 	ptr = avail_start + KERNBASE;	\
141 	avail_start += (count) * VAX_NBPG;
142 
143 #ifdef	_KERNEL
144 
145 extern	struct pmap kernel_pmap_store;
146 
147 #define pmap_kernel()			(&kernel_pmap_store)
148 
149 #endif	/* _KERNEL */
150 
151 
152 /*
153  * Real nice (fast) routines to get the virtual address of a physical page
154  * (and vice versa).
155  */
156 #define PMAP_MAP_POOLPAGE(pa)	((pa) | KERNBASE)
157 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
158 
159 #define PMAP_STEAL_MEMORY
160 
161 /*
162  * This is the by far most used pmap routine. Make it inline.
163  */
164 __inline static bool
165 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
166 {
167 	int	*pte, sva;
168 
169 	if (va & KERNBASE) {
170 		paddr_t pa;
171 
172 		pa = kvtophys(va); /* Is 0 if not mapped */
173 		if (pap)
174 			*pap = pa;
175 		if (pa)
176 			return (true);
177 		return (false);
178 	}
179 
180 	sva = PG_PFNUM(va);
181 	if (va < 0x40000000) {
182 		if (sva > (pmap->pm_p0lr & ~AST_MASK))
183 			goto fail;
184 		pte = (int *)pmap->pm_p0br;
185 	} else {
186 		if (sva < pmap->pm_p1lr)
187 			goto fail;
188 		pte = (int *)pmap->pm_p1br;
189 	}
190 	if (kvtopte(&pte[sva])->pg_pfn && pte[sva]) {
191 		if (pap)
192 			*pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
193 		return (true);
194 	}
195   fail:
196 	if (pap)
197 		*pap = 0;
198 	return (false);
199 }
200 
201 bool pmap_clear_modify_long(struct pv_entry *);
202 bool pmap_clear_reference_long(struct pv_entry *);
203 bool pmap_is_modified_long(struct pv_entry *);
204 void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
205 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
206 
207 __inline static bool
208 pmap_is_referenced(struct vm_page *pg)
209 {
210 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
211 	bool rv = (pv->pv_attr & PG_V) != 0;
212 
213 	return rv;
214 }
215 
216 __inline static bool
217 pmap_clear_reference(struct vm_page *pg)
218 {
219 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
220 	bool rv = (pv->pv_attr & PG_V) != 0;
221 
222 	pv->pv_attr &= ~PG_V;
223 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
224 		rv |= pmap_clear_reference_long(pv);
225 	return rv;
226 }
227 
228 __inline static bool
229 pmap_clear_modify(struct vm_page *pg)
230 {
231 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
232 	bool rv = (pv->pv_attr & PG_M) != 0;
233 
234 	pv->pv_attr &= ~PG_M;
235 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
236 		rv |= pmap_clear_modify_long(pv);
237 	return rv;
238 }
239 
240 __inline static bool
241 pmap_is_modified(struct vm_page *pg)
242 {
243 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
244 	if (pv->pv_attr & PG_M)
245 		return 1;
246 	else
247 		return pmap_is_modified_long(pv);
248 }
249 
250 __inline static void
251 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
252 {
253 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
254 
255 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
256 		pmap_page_protect_long(pv, prot);
257 }
258 
259 __inline static void
260 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
261 {
262 	if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
263 	    (start & KERNBASE) != 0)
264 		pmap_protect_long(pmap, start, end, prot);
265 }
266 
267 static __inline void
268 pmap_remove_all(struct pmap *pmap)
269 {
270 	/* Nothing. */
271 }
272 
273 /* Routines that are best to define as macros */
274 #define pmap_phys_address(phys)		((u_int)(phys) << PGSHIFT)
275 #define pmap_copy(a,b,c,d,e)		/* Dont do anything */
276 #define pmap_update(pmap)		/* nothing (yet) */
277 #define pmap_collect(pmap)		/* No need so far */
278 #define pmap_remove(pmap, start, slut)	pmap_protect(pmap, start, slut, 0)
279 #define pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
280 #define pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
281 #define pmap_reference(pmap)		(pmap)->pm_count++
282 
283 /* These can be done as efficient inline macros */
284 #define pmap_copy_page(src, dst)			\
285 	__asm("addl3 $0x80000000,%0,%%r0;"		\
286 		"addl3 $0x80000000,%1,%%r1;"		\
287 		"movc3 $4096,(%%r0),(%%r1)"		\
288 	    :: "r"(src), "r"(dst)			\
289 	    : "r0","r1","r2","r3","r4","r5");
290 
291 #define pmap_zero_page(phys)				\
292 	__asm("addl3 $0x80000000,%0,%%r0;"		\
293 		"movc5 $0,(%%r0),$0,$4096,(%%r0)"	\
294 	    :: "r"(phys)				\
295 	    : "r0","r1","r2","r3","r4","r5");
296 
297 /* Prototypes */
298 void	pmap_bootstrap __P((void));
299 vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int));
300 
301 #endif /* PMAP_H */
302