xref: /openbsd-src/sys/arch/alpha/include/pmap.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: pmap.h,v 1.40 2016/04/20 05:24:18 landry Exp $ */
2 /* $NetBSD: pmap.h,v 1.37 2000/11/19 03:16:35 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center and by Chris G. Demetriou.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1987 Carnegie-Mellon University
36  * Copyright (c) 1991, 1993
37  *	The Regents of the University of California.  All rights reserved.
38  *
39  * This code is derived from software contributed to Berkeley by
40  * the Systems Programming Group of the University of Utah Computer
41  * Science Department.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
68  */
69 
70 #ifndef	_PMAP_MACHINE_
71 #define	_PMAP_MACHINE_
72 
73 #include <sys/mutex.h>
74 #include <machine/pte.h>
75 
76 #ifdef _KERNEL
77 
78 #include <sys/queue.h>
79 
80 /*
81  * Machine-dependent virtual memory state.
82  *
83  * If we ever support processor numbers higher than 63, we'll have to
84  * rethink the CPU mask.
85  *
86  * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
87  * Their size is based on the PCS count from the HWRPB, and indexed
88  * by processor ID (from `whami').
89  *
90  * The kernel pmap is a special case; it gets statically-allocated
91  * arrays which hold enough for ALPHA_MAXPROCS.
92  */
93 struct pmap_asn_info {
94 	unsigned int		pma_asn;	/* address space number */
95 	unsigned long		pma_asngen;	/* ASN generation number */
96 };
97 
98 struct pmap {
99 	TAILQ_ENTRY(pmap)	pm_list;	/* list of all pmaps */
100 	pt_entry_t		*pm_lev1map;	/* level 1 map */
101 	int			pm_count;	/* pmap reference count */
102 	struct mutex		pm_mtx;		/* lock on pmap */
103 	struct pmap_statistics	pm_stats;	/* pmap statistics */
104 	unsigned long		pm_cpus;	/* mask of CPUs using pmap */
105 	unsigned long		pm_needisync;	/* mask of CPUs needing isync */
106 	struct pmap_asn_info	pm_asni[1];	/* ASN information */
107 			/*	variable length		*/
108 };
109 typedef struct pmap	*pmap_t;
110 
111 /*
112  * Compute the sizeof of a pmap structure.  Subtract one because one
113  * ASN info structure is already included in the pmap structure itself.
114  */
115 #define PMAP_SIZEOF(x)							\
116 	(ALIGN(sizeof(struct pmap) +					\
117 	       (sizeof(struct pmap_asn_info) * ((x) - 1))))
118 
119 #define	PMAP_ASN_RESERVED	0	/* reserved for Lev1map users */
120 
121 extern struct pmap	kernel_pmap_store[];
122 
123 #define pmap_kernel()	kernel_pmap_store
124 
125 /*
126  * For each vm_page_t, there is a list of all currently valid virtual
127  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
128  */
129 typedef struct pv_entry {
130 	struct pv_entry *pv_next;	/* next pv_entry on list */
131 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
132 	vaddr_t		pv_va;		/* virtual address for mapping */
133 	pt_entry_t	*pv_pte;	/* PTE that maps the VA */
134 } *pv_entry_t;
135 
136 /* pvh_attrs */
137 #define	PGA_MODIFIED		0x01		/* modified */
138 #define	PGA_REFERENCED		0x02		/* referenced */
139 
140 /* pvh_usage */
141 #define	PGU_NORMAL		0		/* free or normal use */
142 #define	PGU_PVENT		1		/* PV entries */
143 #define	PGU_L1PT		2		/* level 1 page table */
144 #define	PGU_L2PT		3		/* level 2 page table */
145 #define	PGU_L3PT		4		/* level 3 page table */
146 
147 #define	PGU_ISPTPAGE(pgu)	((pgu) >= PGU_L1PT)
148 
149 #if defined(NEW_SCC_DRIVER)
150 #if defined(DEC_KN8AE)
151 #define	_PMAP_MAY_USE_PROM_CONSOLE
152 #endif
153 #else /* ! NEW_SCC_DRIVER */
154 #if defined(DEC_3000_300)		\
155  || defined(DEC_3000_500)		\
156  || defined(DEC_KN8AE) 				/* XXX */
157 #define _PMAP_MAY_USE_PROM_CONSOLE		/* XXX */
158 #endif						/* XXX */
159 #endif /* NEW_SCC_DRIVER */
160 
161 #if defined(MULTIPROCESSOR)
162 void	pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, u_long *);
163 void	pmap_tlb_shootnow(u_long);
164 void	pmap_do_tlb_shootdown(struct cpu_info *, struct trapframe *);
165 #define	PMAP_TLB_SHOOTDOWN_CPUSET_DECL		u_long shootset = 0;
166 #define	PMAP_TLB_SHOOTDOWN(pm, va, pte)					\
167 	pmap_tlb_shootdown((pm), (va), (pte), &shootset)
168 #define	PMAP_TLB_SHOOTNOW()						\
169 	pmap_tlb_shootnow(shootset)
170 #else
171 #define	PMAP_TLB_SHOOTDOWN_CPUSET_DECL		/* nothing */
172 #define	PMAP_TLB_SHOOTDOWN(pm, va, pte)		/* nothing */
173 #define	PMAP_TLB_SHOOTNOW()			/* nothing */
174 #endif /* MULTIPROCESSOR */
175 
176 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
177 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
178 
179 #define pmap_copy(dp, sp, da, l, sa)	/* nothing */
180 #define pmap_update(pmap)		/* nothing (yet) */
181 
182 #define pmap_proc_iflush(p, va, len)	/* nothing */
183 #define pmap_unuse_final(p)		/* nothing */
184 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
185 
186 extern	pt_entry_t *VPT;		/* Virtual Page Table */
187 
188 #define	PMAP_STEAL_MEMORY		/* enable pmap_steal_memory() */
189 #define PMAP_GROWKERNEL			/* enable pmap_growkernel() */
190 
191 /*
192  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
193  */
194 #define	pmap_map_direct(pg)	ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg))
195 #define	pmap_unmap_direct(va)	PHYS_TO_VM_PAGE(ALPHA_K0SEG_TO_PHYS((va)))
196 #define	__HAVE_PMAP_DIRECT
197 
198 paddr_t vtophys(vaddr_t);
199 
200 /* Machine-specific functions. */
201 void	pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
202 int	pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type);
203 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
204 int	pmap_uses_prom_console(void);
205 #endif
206 
207 #define	pmap_pte_pa(pte)	(PG_PFNUM(*(pte)) << PAGE_SHIFT)
208 #define	pmap_pte_prot(pte)	(*(pte) & PG_PROT)
209 #define	pmap_pte_w(pte)		(*(pte) & PG_WIRED)
210 #define	pmap_pte_v(pte)		(*(pte) & PG_V)
211 #define	pmap_pte_pv(pte)	(*(pte) & PG_PVLIST)
212 #define	pmap_pte_asm(pte)	(*(pte) & PG_ASM)
213 #define	pmap_pte_exec(pte)	(*(pte) & PG_EXEC)
214 
215 #define	pmap_pte_set_w(pte, v)						\
216 do {									\
217 	if (v)								\
218 		*(pte) |= PG_WIRED;					\
219 	else								\
220 		*(pte) &= ~PG_WIRED;					\
221 } while (0)
222 
223 #define	pmap_pte_w_chg(pte, nw)	((nw) ^ pmap_pte_w(pte))
224 
225 #define	pmap_pte_set_prot(pte, np)					\
226 do {									\
227 	*(pte) &= ~PG_PROT;						\
228 	*(pte) |= (np);							\
229 } while (0)
230 
231 #define	pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
232 
233 static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *);
234 static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *);
235 
236 #define	pmap_l1pte(pmap, v)						\
237 	(&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))])
238 
239 static __inline pt_entry_t *
240 pmap_l2pte(pmap, v, l1pte)
241 	pmap_t pmap;
242 	vaddr_t v;
243 	pt_entry_t *l1pte;
244 {
245 	pt_entry_t *lev2map;
246 
247 	if (l1pte == NULL) {
248 		l1pte = pmap_l1pte(pmap, v);
249 		if (pmap_pte_v(l1pte) == 0)
250 			return (NULL);
251 	}
252 
253 	lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
254 	return (&lev2map[l2pte_index(v)]);
255 }
256 
257 static __inline pt_entry_t *
258 pmap_l3pte(pmap, v, l2pte)
259 	pmap_t pmap;
260 	vaddr_t v;
261 	pt_entry_t *l2pte;
262 {
263 	pt_entry_t *l1pte, *lev2map, *lev3map;
264 
265 	if (l2pte == NULL) {
266 		l1pte = pmap_l1pte(pmap, v);
267 		if (pmap_pte_v(l1pte) == 0)
268 			return (NULL);
269 
270 		lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
271 		l2pte = &lev2map[l2pte_index(v)];
272 		if (pmap_pte_v(l2pte) == 0)
273 			return (NULL);
274 	}
275 
276 	lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
277 	return (&lev3map[l3pte_index(v)]);
278 }
279 
280 /*
281  * Macro for processing deferred I-stream synchronization.
282  *
283  * The pmap module may defer syncing the user I-stream until the
284  * return to userspace, since the IMB PALcode op can be quite
285  * expensive.  Since user instructions won't be executed until the
286  * return to userspace, this can be deferred until just before userret().
287  */
288 #define	PMAP_USERRET(pmap)						\
289 do {									\
290 	u_long cpu_mask = (1UL << cpu_number());			\
291 									\
292 	if ((pmap)->pm_needisync & cpu_mask) {				\
293 		atomic_clearbits_ulong(&(pmap)->pm_needisync,		\
294 		    cpu_mask);						\
295 		alpha_pal_imb();					\
296 	}								\
297 } while (0)
298 
299 #endif /* _KERNEL */
300 
301 /*
302  * pmap-specific data stored in the vm_page structure.
303  */
304 struct vm_page_md {
305 	struct mutex pvh_mtx;
306 	struct pv_entry *pvh_list;	/* pv entry list */
307 	int pvh_attrs;			/* page attributes */
308 };
309 
310 #define	VM_MDPAGE_INIT(pg)						\
311 do {									\
312 	mtx_init(&(pg)->mdpage.pvh_mtx, IPL_VM);			\
313 	(pg)->mdpage.pvh_list = NULL;					\
314 	(pg)->mdpage.pvh_attrs = 0;					\
315 } while (0)
316 
317 #endif /* _PMAP_MACHINE_ */
318