xref: /netbsd-src/sys/arch/hppa/include/pmap.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: pmap.h,v 1.37 2013/01/07 16:57:28 chs Exp $	*/
2 
3 /*	$OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $	*/
4 
5 /*
6  * Copyright (c) 2002-2004 Michael Shalayeff
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  *	Pmap header for hppa.
33  */
34 
35 #ifndef	_HPPA_PMAP_H_
36 #define	_HPPA_PMAP_H_
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_cputype.h"
40 #endif
41 
42 #include <sys/mutex.h>
43 #include <machine/pte.h>
44 #include <machine/cpufunc.h>
45 
46 #include <uvm/uvm_pglist.h>
47 #include <uvm/uvm_object.h>
48 
49 #ifdef	_KERNEL
50 
51 #define PMAP_NEED_PROCWR
52 
53 struct pmap {
54 	struct uvm_object pm_obj;	/* object (lck by object lock) */
55 #define	pm_lock	pm_obj.vmobjlock
56 	kmutex_t	pm_obj_lock;	/* lock for pm_obj */
57 	struct vm_page	*pm_ptphint;
58 	struct vm_page	*pm_pdir_pg;	/* vm_page for pdir */
59 	volatile uint32_t *pm_pdir;	/* page dir (read-only after create) */
60 	pa_space_t	pm_space;	/* space id (read-only after create) */
61 	u_int		pm_pid;		/* prot id (read-only after create) */
62 
63 	struct pmap_statistics	pm_stats;
64 };
65 
66 #define	PVF_MOD		PTE_PROT(TLB_DIRTY)	/* pg/mp is modified */
67 #define	PVF_REF		PTE_PROT(TLB_REFTRAP)	/* pg/mp (inv) is referenced */
68 #define	PVF_WRITE	PTE_PROT(TLB_WRITE)	/* pg/mp is writable */
69 #define	PVF_EXEC	PTE_PROT(TLB_EXECUTE)	/* pg/mp is executable */
70 #define	PVF_UNCACHEABLE	PTE_PROT(TLB_UNCACHEABLE)	/* pg/mp is uncacheable */
71 
72 #define	HPPA_MAX_PID	0xfffa
73 #define	HPPA_SID_MAX	0x7ffd
74 
75 /*
76  * DON'T CHANGE THIS - this is assumed in lots of places.
77  */
78 #define	HPPA_SID_KERNEL	0
79 #define	HPPA_PID_KERNEL	2
80 
81 struct pv_entry {			/* locked by its list's pvh_lock */
82 	struct pv_entry	*pv_next;
83 	struct pmap	*pv_pmap;	/* the pmap */
84 	vaddr_t		pv_va;		/* the virtual address + flags */
85 #define	PV_VAMASK	(~(PAGE_SIZE - 1))
86 #define	PV_KENTER	0x001
87 
88 	struct vm_page	*pv_ptp;	/* the vm_page of the PTP */
89 };
90 
91 extern int pmap_hptsize;
92 extern struct pdc_hwtlb pdc_hwtlb;
93 
94 /*
95  * pool quickmaps
96  */
97 static inline vaddr_t hppa_map_poolpage(paddr_t pa)
98 {
99 	return (vaddr_t)pa;
100 }
101 
102 static inline paddr_t hppa_unmap_poolpage(vaddr_t va)
103 {
104 	pdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
105 
106 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
107     defined(HP8500_CPU) || defined(HP8600_CPU)
108 	pdtlb(HPPA_SID_KERNEL, va);
109 #endif
110 
111 	return (paddr_t)va;
112 }
113 
114 #define	PMAP_MAP_POOLPAGE(pa)	hppa_map_poolpage(pa)
115 #define	PMAP_UNMAP_POOLPAGE(va)	hppa_unmap_poolpage(va)
116 
117 /*
118  * according to the parisc manual aliased va's should be
119  * different by high 12 bits only.
120  */
121 #define	PMAP_PREFER(o,h,s,td)	pmap_prefer((o), (h), (td))
122 
123 static inline void
124 pmap_prefer(vaddr_t fo, vaddr_t *va, int td)
125 {
126 	vaddr_t newva;
127 
128 	newva = (*va & HPPA_PGAMASK) | (fo & HPPA_PGAOFF);
129 	if (td) {
130 		if (newva > *va)
131 			newva -= HPPA_PGALIAS;
132 	} else {
133 		if (newva < *va)
134 			newva += HPPA_PGALIAS;
135 	}
136 	*va = newva;
137 }
138 
139 #define	pmap_sid2pid(s)			(((s) + 1) << 1)
140 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
141 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
142 #define	pmap_update(p)
143 
144 #define	pmap_copy(dpmap,spmap,da,len,sa)
145 
146 #define	pmap_clear_modify(pg)	pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
147 #define	pmap_clear_reference(pg) \
148 				pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
149 #define	pmap_is_modified(pg)	pmap_testbit(pg, PTE_PROT(TLB_DIRTY))
150 #define	pmap_is_referenced(pg)	pmap_testbit(pg, PTE_PROT(TLB_REFTRAP))
151 #define	pmap_phys_address(ppn)	((ppn) << PAGE_SHIFT)
152 
153 void	pmap_activate(struct lwp *);
154 
155 void pmap_bootstrap(vaddr_t);
156 bool pmap_changebit(struct vm_page *, u_int, u_int);
157 bool pmap_testbit(struct vm_page *, u_int);
158 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
159 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
160 void pmap_page_remove(struct vm_page *pg);
161 
162 void pmap_procwr(struct proc *, vaddr_t, size_t);
163 
164 static inline void
165 pmap_deactivate(struct lwp *l)
166 {
167 	/* Nothing. */
168 }
169 
170 static inline void
171 pmap_remove_all(struct pmap *pmap)
172 {
173 	/* Nothing. */
174 }
175 
176 static inline int
177 pmap_prot(struct pmap *pmap, int prot)
178 {
179 	extern u_int hppa_prot[];
180 	return (hppa_prot[prot] | (pmap == pmap_kernel() ? 0 : TLB_USER));
181 }
182 
183 static inline void
184 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
185 {
186 	if ((prot & UVM_PROT_WRITE) == 0) {
187 		if (prot & (UVM_PROT_RX))
188 			pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE));
189 		else
190 			pmap_page_remove(pg);
191 	}
192 }
193 
194 static inline void
195 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
196 {
197 	if ((prot & UVM_PROT_WRITE) == 0) {
198 		if (prot & (UVM_PROT_RX))
199 			pmap_write_protect(pmap, sva, eva, prot);
200 		else
201 			pmap_remove(pmap, sva, eva);
202 	}
203 }
204 
205 #define	pmap_sid(pmap, va) \
206 	((((va) & 0xc0000000) != 0xc0000000) ? \
207 	 (pmap)->pm_space : HPPA_SID_KERNEL)
208 
209 #define __HAVE_VM_PAGE_MD
210 
211 struct pv_entry;
212 
213 struct vm_page_md {
214 	struct pv_entry	*pvh_list;	/* head of list */
215 	u_int		pvh_attrs;	/* to preserve ref/mod */
216 };
217 
218 #define	VM_MDPAGE_INIT(pg) \
219 do {									\
220 	(pg)->mdpage.pvh_list = NULL;					\
221 	(pg)->mdpage.pvh_attrs = 0;					\
222 } while (0)
223 
224 #endif /* _KERNEL */
225 
226 #endif /* _HPPA_PMAP_H_ */
227