xref: /netbsd-src/sys/arch/xen/include/xenpmap.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: xenpmap.h,v 1.17 2007/11/28 16:40:40 ad Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 2004 Christian Limpach.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Christian Limpach.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 
35 #ifndef _XEN_XENPMAP_H_
36 #define _XEN_XENPMAP_H_
37 
38 #define	INVALID_P2M_ENTRY	(~0UL)
39 
40 void xpq_queue_machphys_update(paddr_t, paddr_t);
41 void xpq_queue_invlpg(vaddr_t);
42 void xpq_queue_pde_update(pd_entry_t *, pd_entry_t);
43 void xpq_queue_pte_update(pt_entry_t *, pt_entry_t);
44 void xpq_queue_pt_switch(paddr_t);
45 void xpq_flush_queue(void);
46 void xpq_queue_set_ldt(vaddr_t, uint32_t);
47 void xpq_queue_tlb_flush(void);
48 void xpq_queue_pin_table(paddr_t);
49 void xpq_queue_unpin_table(paddr_t);
50 int  xpq_update_foreign(pt_entry_t *, pt_entry_t, int);
51 
52 extern paddr_t *xpmap_phys_to_machine_mapping;
53 
54 #ifndef XEN
55 #define	PDE_GET(_pdp)						\
56 	*(_pdp)
57 #define PDE_SET(_pdp,_mapdp,_npde)				\
58 	*(_mapdp) = (_npde)
59 #define PDE_CLEAR(_pdp,_mapdp)					\
60 	*(_mapdp) = 0
61 #define PTE_SET(_ptp,_maptp,_npte)				\
62 	*(_maptp) = (_npte)
63 #define PTE_CLEAR(_ptp,_maptp)					\
64 	*(_maptp) = 0
65 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte)			\
66 	(_opte) = atomic_swap_ulong((volatile unsigned long *)(_maptp), (_npte))
67 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte)			\
68 	(_opte) = atomic_swap_ulong((volatile unsigned long *)((_maptp), 0)
69 #define PDE_CLEARBITS(_pdp,_mapdp,_bits)			\
70 	*(_mapdp) &= ~(_bits)
71 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits)			\
72 	atomic_and_ulong((volatile unsigned long *)(_maptp), ~(_bits))
73 #define PTE_SETBITS(_ptp,_maptp,_bits)				\
74 	*(_maptp) |= (_bits)
75 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits)			\
76 	atomic_or_ulong((volatile unsigned long *)(_maptp), (_bits))
77 #else
78 paddr_t *xpmap_phys_to_machine_mapping;
79 
80 #define	PDE_GET(_pdp)						\
81 	(pmap_valid_entry(*(_pdp)) ? xpmap_mtop(*(_pdp)) : *(_pdp))
82 #define PDE_SET(_pdp,_mapdp,_npde) do {				\
83 	int _s = splvm();					\
84 	xpq_queue_pde_update((_mapdp), xpmap_ptom((_npde)));	\
85 	xpq_flush_queue();					\
86 	splx(_s);						\
87 } while (/*CONSTCOND*/0)
88 #define PDE_CLEAR(_pdp,_mapdp) do {				\
89 	int _s = splvm();					\
90 	xpq_queue_pde_update((_mapdp), 0);			\
91 	xpq_flush_queue();					\
92 	splx(_s);						\
93 } while (/*CONSTCOND*/0)
94 #define	PTE_GET(_ptp)						\
95 	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
96 #define	PTE_GET_MA(_ptp)					\
97 	*(_ptp)
98 #define PTE_SET(_ptp,_maptp,_npte) do {				\
99 	int _s = splvm();					\
100 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
101 	xpq_flush_queue();					\
102 	splx(_s);						\
103 } while (/*CONSTCOND*/0)
104 #define PTE_SET_MA(_ptp,_maptp,_npte) do {			\
105 	int _s = splvm();					\
106 	xpq_queue_pte_update((_maptp), (_npte));		\
107 	xpq_flush_queue();					\
108 	splx(_s);						\
109 } while (/*CONSTCOND*/0)
110 #define PTE_CLEAR(_ptp,_maptp) do {				\
111 	int _s = splvm();					\
112 	xpq_queue_pte_update((_maptp), 0);			\
113 	xpq_flush_queue();					\
114 	splx(_s);						\
115 } while (/*CONSTCOND*/0)
116 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte) do {		\
117 	int _s;							\
118 	(_opte) = PTE_GET(_ptp);				\
119 	_s = splvm();						\
120 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
121 	xpq_flush_queue();					\
122 	splx(_s);						\
123 } while (/*CONSTCOND*/0)
124 #define PTE_ATOMIC_SET_MA(_ptp,_maptp,_npte,_opte) do {		\
125 	int _s;							\
126 	(_opte) = *(_ptp);					\
127 	_s = splvm();						\
128 	xpq_queue_pte_update((_maptp), (_npte));		\
129 	xpq_flush_queue();					\
130 	splx(_s);						\
131 } while (/*CONSTCOND*/0)
132 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte) do {		\
133 	int _s;							\
134 	(_opte) = PTE_GET(_ptp);				\
135 	_s = splvm();						\
136 	xpq_queue_pte_update((_maptp), 0);			\
137 	xpq_flush_queue();					\
138 	splx(_s);						\
139 } while (/*CONSTCOND*/0)
140 #define PTE_ATOMIC_CLEAR_MA(_ptp,_maptp,_opte) do {		\
141 	int _s;							\
142 	(_opte) = *(_ptp);					\
143 	_s = splvm();						\
144 	xpq_queue_pte_update((_maptp), 0);			\
145 	xpq_flush_queue();					\
146 	splx(_s);						\
147 } while (/*CONSTCOND*/0)
148 #define PDE_CLEARBITS(_pdp,_mapdp,_bits) do {			\
149 	int _s = splvm();					\
150 	xpq_queue_pte_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
151 	xpq_flush_queue();					\
152 	splx(_s);						\
153 } while (/*CONSTCOND*/0)
154 #define PTE_CLEARBITS(_ptp,_maptp,_bits) do {			\
155 	int _s = splvm();					\
156 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
157 	xpq_flush_queue();					\
158 	splx(_s);						\
159 } while (/*CONSTCOND*/0)
160 #define PDE_ATOMIC_CLEARBITS(_pdp,_mapdp,_bits) do {		\
161 	int _s = splvm();					\
162 	xpq_queue_pde_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
163 	xpq_flush_queue();					\
164 	splx(_s);						\
165 } while (/*CONSTCOND*/0)
166 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits) do {		\
167 	int _s = splvm();					\
168 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
169 	xpq_flush_queue();					\
170 	splx(_s);						\
171 } while (/*CONSTCOND*/0)
172 #define PTE_SETBITS(_ptp,_maptp,_bits) do {			\
173 	int _s = splvm();					\
174 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
175 	xpq_flush_queue();					\
176 	splx(_s);						\
177 } while (/*CONSTCOND*/0)
178 #define PDE_ATOMIC_SETBITS(_pdp,_mapdp,_bits) do {		\
179 	int _s = splvm();					\
180 	xpq_queue_pde_update((_mapdp), *(_pdp) | ((_bits) & ~PG_FRAME)); \
181 	xpq_flush_queue();					\
182 	splx(_s);						\
183 } while (/*CONSTCOND*/0)
184 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits) do {		\
185 	int _s = splvm();					\
186 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
187 	xpq_flush_queue();					\
188 	splx(_s);						\
189 } while (/*CONSTCOND*/0)
190 #define PDE_COPY(_dpdp,_madpdp,_spdp) do {			\
191 	int _s = splvm();					\
192 	xpq_queue_pde_update((_madpdp), *(_spdp));		\
193 	xpq_flush_queue();					\
194 	splx(_s);						\
195 } while (/*CONSTCOND*/0)
196 #define	PTE_UPDATES_FLUSH() do {				\
197 	int _s = splvm();					\
198 	xpq_flush_queue();					\
199 	splx(_s);						\
200 } while (/*CONSTCOND*/0)
201 
202 #endif
203 
204 /*
205  * On Xen-2, the start of the day virual memory starts at KERNTEXTOFF
206  * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
207  * So the offset between physical and virtual address is different on
208  * Xen-2 and Xen-3 for domain0.
209  * starting with xen-3.0.2, we can add notes so that virual memory starts
210  * at KERNBASE for domU as well.
211  */
212 #if defined(XEN3) && (defined(DOM0OPS) || !defined(XEN_COMPAT_030001))
213 #define XPMAP_OFFSET	0
214 #else
215 #define	XPMAP_OFFSET	(KERNTEXTOFF - KERNBASE)
216 #endif
217 
218 static __inline paddr_t
219 xpmap_mtop(paddr_t mpa)
220 {
221 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
222 	    XPMAP_OFFSET) | (mpa & ~PG_FRAME);
223 }
224 
225 static __inline paddr_t
226 xpmap_mtop_masked(paddr_t mpa)
227 {
228 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
229 	    XPMAP_OFFSET);
230 }
231 
232 static __inline paddr_t
233 xpmap_ptom(paddr_t ppa)
234 {
235 	return (xpmap_phys_to_machine_mapping[(ppa -
236 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT)
237 		| (ppa & ~PG_FRAME);
238 }
239 
240 static __inline paddr_t
241 xpmap_ptom_masked(paddr_t ppa)
242 {
243 	return (xpmap_phys_to_machine_mapping[(ppa -
244 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT);
245 }
246 
247 #ifdef XEN3
248 static inline void
249 MULTI_update_va_mapping(
250 	multicall_entry_t *mcl, vaddr_t va,
251 	paddr_t new_val, unsigned long flags)
252 {
253 	mcl->op = __HYPERVISOR_update_va_mapping;
254 	mcl->args[0] = va;
255 #if defined(__x86_64__)
256 	mcl->args[1] = new_val;
257 	mcl->args[2] = flags;
258 #else
259 	mcl->args[1] = new_val;
260 	mcl->args[2] = 0;
261 	mcl->args[3] = flags;
262 #endif
263 }
264 
265 static inline void
266 MULTI_update_va_mapping_otherdomain(
267 	multicall_entry_t *mcl, vaddr_t va,
268 	paddr_t new_val, unsigned long flags, domid_t domid)
269 {
270 	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
271 	mcl->args[0] = va;
272 #if defined(__x86_64__)
273 	mcl->args[1] = new_val;
274 	mcl->args[2] = flags;
275 	mcl->args[3] = domid;
276 #else
277 	mcl->args[1] = new_val;
278 	mcl->args[2] = 0;
279 	mcl->args[3] = flags;
280 	mcl->args[4] = domid;
281 #endif
282 }
283 #if defined(__x86_64__)
284 #define MULTI_UVMFLAGS_INDEX 2
285 #define MULTI_UVMDOMID_INDEX 3
286 #else
287 #define MULTI_UVMFLAGS_INDEX 3
288 #define MULTI_UVMDOMID_INDEX 4
289 #endif
290 
291 #if defined(__x86_64__)
292 void xen_set_user_pgd(paddr_t);
293 #endif
294 
295 #endif /* XEN3 */
296 
297 #endif /* _XEN_XENPMAP_H_ */
298