xref: /netbsd-src/sys/arch/xen/include/xenpmap.h (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: xenpmap.h,v 1.3 2004/04/26 22:05:05 cl Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 2004 Christian Limpach.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Christian Limpach.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 
35 #ifndef _XEN_XENPMAP_H_
36 #define _XEN_XENPMAP_H_
37 
38 void xpq_queue_invlpg(vaddr_t);
39 void xpq_queue_pde_update(pd_entry_t *, pd_entry_t);
40 void xpq_queue_pte_update(pt_entry_t *, pt_entry_t);
41 void xpq_queue_unchecked_pte_update(pt_entry_t *, pt_entry_t);
42 void xpq_queue_pt_switch(paddr_t);
43 void xpq_flush_queue(void);
44 void xpq_queue_set_ldt(vaddr_t, uint32_t);
45 void xpq_queue_tlb_flush(void);
46 void xpq_queue_pin_table(paddr_t, int);
47 void xpq_queue_unpin_table(paddr_t);
48 
49 extern paddr_t *xpmap_phys_to_machine_mapping;
50 
51 #define	XPQ_PIN_L1_TABLE 1
52 #define	XPQ_PIN_L2_TABLE 2
53 
54 #ifndef XEN
55 #define	PDE_GET(_pdp)						\
56 	*(_pdp)
57 #define PDE_SET(_pdp,_npde)					\
58 	*(_pdp) = (_npde)
59 #define PDE_CLEAR(_pdp)						\
60 	*(_pdp) = 0
61 #define PTE_SET(_ptp,_npte)					\
62 	*(_ptp) = (_npte)
63 #define PTE_CLEAR(_ptp)						\
64 	*(_ptp) = 0
65 #define PTE_ATOMIC_SET(_ptp,_npte,_opte)			\
66 	(_opte) = x86_atomic_testset_ul((_ptp), (_npte))
67 #define PTE_ATOMIC_CLEAR(_ptp,_opte)				\
68 	(_opte) = x86_atomic_testset_ul((_ptp), 0)
69 #define PDE_CLEARBITS(_pdp,_bits)				\
70 	*(_pdp) &= ~(_bits)
71 #define PTE_ATOMIC_CLEARBITS(_ptp,_bits)			\
72 	x86_atomic_clearbits_l((_ptp), (_bits))
73 #define PTE_SETBITS(_ptp,_bits)					\
74 	*(_ptp) |= (_bits)
75 #define PTE_ATOMIC_SETBITS(_ptp,_bits)				\
76 	x86_atomic_setbits_l((_ptp), (_bits))
77 #else
78 paddr_t *xpmap_phys_to_machine_mapping;
79 
80 #define	PDE_GET(_pdp)						\
81 	(pmap_valid_entry(*(_pdp)) ? xpmap_mtop(*(_pdp)) : *(_pdp))
82 #define PDE_SET(_pdp,_npde) do {				\
83 	xpq_queue_pde_update((_pdp), xpmap_ptom((_npde)));	\
84 	xpq_flush_queue();					\
85 } while (/*CONSTCOND*/0)
86 #define PDE_CLEAR(_pdp) do {					\
87 	xpq_queue_pde_update((_pdp), 0);			\
88 	xpq_flush_queue();					\
89 } while (/*CONSTCOND*/0)
90 #define	PTE_GET(_ptp)						\
91 	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
92 #define	PTE_GET_MA(_ptp)					\
93 	*(_ptp)
94 #define PTE_SET(_ptp,_npte) do {				\
95 	xpq_queue_pte_update((_ptp), xpmap_ptom((_npte)));	\
96 	xpq_flush_queue();					\
97 } while (/*CONSTCOND*/0)
98 #define PTE_SET_MA(_ptp,_npte) do {				\
99 	xpq_queue_pte_update((_ptp), (_npte));			\
100 	xpq_flush_queue();					\
101 } while (/*CONSTCOND*/0)
102 #define PTE_SET_MA_UNCHECKED(_ptp,_npte) do {			\
103 	xpq_queue_unchecked_pte_update((_ptp), (_npte));	\
104 	xpq_flush_queue();					\
105 } while (/*CONSTCOND*/0)
106 #define PTE_CLEAR(_ptp) do {					\
107 	xpq_queue_pte_update((_ptp), 0);			\
108 	xpq_flush_queue();					\
109 } while (/*CONSTCOND*/0)
110 #define PTE_ATOMIC_SET(_ptp,_npte,_opte) do {			\
111 	(_opte) = PTE_GET(_ptp);				\
112 	xpq_queue_pte_update((_ptp), xpmap_ptom((_npte)));	\
113 	xpq_flush_queue();					\
114 } while (/*CONSTCOND*/0)
115 #define PTE_ATOMIC_SET_MA(_ptp,_npte,_opte) do {		\
116 	(_opte) = *(_ptp);					\
117 	xpq_queue_pte_update((_ptp), (_npte));			\
118 	xpq_flush_queue();					\
119 } while (/*CONSTCOND*/0)
120 #define PTE_ATOMIC_CLEAR(_ptp,_opte) do {			\
121 	(_opte) = PTE_GET(_ptp);				\
122 	xpq_queue_pte_update((_ptp), 0);			\
123 	xpq_flush_queue();					\
124 } while (/*CONSTCOND*/0)
125 #define PTE_ATOMIC_CLEAR_MA(_ptp,_opte) do {			\
126 	(_opte) = *(_ptp);					\
127 	xpq_queue_pte_update((_ptp), 0);			\
128 	xpq_flush_queue();					\
129 } while (/*CONSTCOND*/0)
130 #define PDE_CLEARBITS(_pdp,_bits) do {				\
131 	xpq_queue_pte_update((_pdp), *(_pdp) & ~((_bits) & ~PG_FRAME));	\
132 	xpq_flush_queue();					\
133 } while (/*CONSTCOND*/0)
134 #define PTE_CLEARBITS(_ptp,_bits) do {				\
135 	xpq_queue_pte_update((_ptp), *(_ptp) & ~((_bits) & ~PG_FRAME));	\
136 	xpq_flush_queue();					\
137 } while (/*CONSTCOND*/0)
138 #define PDE_ATOMIC_CLEARBITS(_pdp,_bits) do {			\
139 	xpq_queue_pde_update((_pdp), *(_pdp) & ~((_bits) & ~PG_FRAME));	\
140 	xpq_flush_queue();					\
141 } while (/*CONSTCOND*/0)
142 #define PTE_ATOMIC_CLEARBITS(_ptp,_bits) do {			\
143 	xpq_queue_pte_update((_ptp), *(_ptp) & ~((_bits) & ~PG_FRAME));	\
144 	xpq_flush_queue();					\
145 } while (/*CONSTCOND*/0)
146 #define PTE_SETBITS(_ptp,_bits) do {				\
147 	xpq_queue_pte_update((_ptp), *(_ptp) | ((_bits) & ~PG_FRAME));	\
148 	xpq_flush_queue();					\
149 } while (/*CONSTCOND*/0)
150 #define PDE_ATOMIC_SETBITS(_pdp,_bits) do {			\
151 	xpq_queue_pde_update((_pdp), *(_pdp) | ((_bits) & ~PG_FRAME));	\
152 	xpq_flush_queue();					\
153 } while (/*CONSTCOND*/0)
154 #define PTE_ATOMIC_SETBITS(_ptp,_bits) do {			\
155 	xpq_queue_pde_update((_ptp), *(_ptp) | ((_bits) & ~PG_FRAME));	\
156 	xpq_flush_queue();					\
157 } while (/*CONSTCOND*/0)
158 #define PDE_COPY(_dpdp,_spdp) do {				\
159 	xpq_queue_pde_update((_dpdp), *(_spdp));		\
160 	xpq_flush_queue();					\
161 } while (/*CONSTCOND*/0)
162 #define	PTE_UPDATES_FLUSH() do {				\
163 	xpq_flush_queue();					\
164 } while (/*CONSTCOND*/0)
165 
166 #endif
167 
168 static __inline paddr_t
169 xpmap_mtop(paddr_t mpa)
170 {
171 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
172 	    (KERNTEXTOFF - KERNBASE_LOCORE)) | (mpa & ~PG_FRAME);
173 }
174 
175 static __inline paddr_t
176 xpmap_ptom(paddr_t ppa)
177 {
178 	return (xpmap_phys_to_machine_mapping[(ppa -
179 	    (KERNTEXTOFF - KERNBASE_LOCORE)) >> PAGE_SHIFT] << PAGE_SHIFT)
180 		| (ppa & ~PG_FRAME);
181 }
182 
183 static __inline paddr_t
184 xpmap_ptom_masked(paddr_t ppa)
185 {
186 	return (xpmap_phys_to_machine_mapping[(ppa -
187 	    (KERNTEXTOFF - KERNBASE_LOCORE)) >> PAGE_SHIFT] << PAGE_SHIFT);
188 }
189 
190 #endif /* _XEN_XENPMAP_H_ */
191