xref: /netbsd-src/sys/arch/xen/include/xenpmap.h (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: xenpmap.h,v 1.38 2014/05/06 04:26:24 cherry Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 2004 Christian Limpach.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 
30 #ifndef _XEN_XENPMAP_H_
31 #define _XEN_XENPMAP_H_
32 
33 #ifdef _KERNEL_OPT
34 #include "opt_xen.h"
35 #endif
36 
37 #include <sys/types.h>
38 #include <sys/kcpuset.h>
39 
40 #define	INVALID_P2M_ENTRY	(~0UL)
41 
42 void xpq_queue_machphys_update(paddr_t, paddr_t);
43 void xpq_queue_invlpg(vaddr_t);
44 void xpq_queue_pte_update(paddr_t, pt_entry_t);
45 void xpq_queue_pt_switch(paddr_t);
46 void xpq_flush_queue(void);
47 void xpq_queue_set_ldt(vaddr_t, uint32_t);
48 void xpq_queue_tlb_flush(void);
49 void xpq_queue_pin_table(paddr_t, int);
50 void xpq_queue_unpin_table(paddr_t);
51 int  xpq_update_foreign(paddr_t, pt_entry_t, int);
52 void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, kcpuset_t *);
53 void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t);
54 void xen_mcast_tlbflush(kcpuset_t *);
55 void xen_bcast_tlbflush(void);
56 void xen_mcast_invlpg(vaddr_t, kcpuset_t *);
57 void xen_bcast_invlpg(vaddr_t);
58 void xen_copy_page(paddr_t, paddr_t);
59 void xen_pagezero(paddr_t);
60 
61 void pmap_xen_resume(void);
62 void pmap_xen_suspend(void);
63 void pmap_map_recursive_entries(void);
64 void pmap_unmap_recursive_entries(void);
65 
66 #if defined(PAE) || defined(__x86_64__)
67 void xen_kpm_sync(struct pmap *, int);
68 #endif /* PAE || __x86_64__ */
69 
70 #define xpq_queue_pin_l1_table(pa)	\
71 	xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
72 #define xpq_queue_pin_l2_table(pa)	\
73 	xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
74 #define xpq_queue_pin_l3_table(pa)	\
75 	xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
76 #define xpq_queue_pin_l4_table(pa)	\
77 	xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
78 
79 extern unsigned long *xpmap_phys_to_machine_mapping;
80 
81 static __inline paddr_t
82 xpmap_mtop_masked(paddr_t mpa)
83 {
84 	return (
85 	    (paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT);
86 }
87 
88 static __inline paddr_t
89 xpmap_mtop(paddr_t mpa)
90 {
91 	return (xpmap_mtop_masked(mpa) | (mpa & ~PG_FRAME));
92 }
93 
94 static __inline paddr_t
95 xpmap_ptom_masked(paddr_t ppa)
96 {
97 	return (
98 	    (paddr_t)xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
99 	    << PAGE_SHIFT);
100 }
101 
102 static __inline paddr_t
103 xpmap_ptom(paddr_t ppa)
104 {
105 	return (xpmap_ptom_masked(ppa) | (ppa & ~PG_FRAME));
106 }
107 
108 static __inline void
109 xpmap_ptom_map(paddr_t ppa, paddr_t mpa)
110 {
111 	xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = mpa >> PAGE_SHIFT;
112 }
113 
114 static __inline void
115 xpmap_ptom_unmap(paddr_t ppa)
116 {
117 	xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
118 }
119 
120 static __inline bool
121 xpmap_ptom_isvalid(paddr_t ppa)
122 {
123 	return (
124 	    xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
125 	    != INVALID_P2M_ENTRY);
126 }
127 
128 static inline void
129 MULTI_update_va_mapping(
130 	multicall_entry_t *mcl, vaddr_t va,
131 	pt_entry_t new_val, unsigned long flags)
132 {
133 	mcl->op = __HYPERVISOR_update_va_mapping;
134 	mcl->args[0] = va;
135 #if defined(__x86_64__)
136 	mcl->args[1] = new_val;
137 	mcl->args[2] = flags;
138 #else
139 	mcl->args[1] = (new_val & 0xffffffff);
140 #ifdef PAE
141 	mcl->args[2] = (new_val >> 32);
142 #else
143 	mcl->args[2] = 0;
144 #endif
145 	mcl->args[3] = flags;
146 #endif
147 }
148 
149 static inline void
150 MULTI_update_va_mapping_otherdomain(
151 	multicall_entry_t *mcl, vaddr_t va,
152 	pt_entry_t new_val, unsigned long flags, domid_t domid)
153 {
154 	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
155 	mcl->args[0] = va;
156 #if defined(__x86_64__)
157 	mcl->args[1] = new_val;
158 	mcl->args[2] = flags;
159 	mcl->args[3] = domid;
160 #else
161 	mcl->args[1] = (new_val & 0xffffffff);
162 #ifdef PAE
163 	mcl->args[2] = (new_val >> 32);
164 #else
165 	mcl->args[2] = 0;
166 #endif
167 	mcl->args[3] = flags;
168 	mcl->args[4] = domid;
169 #endif
170 }
171 #if defined(__x86_64__)
172 #define MULTI_UVMFLAGS_INDEX 2
173 #define MULTI_UVMDOMID_INDEX 3
174 #else
175 #define MULTI_UVMFLAGS_INDEX 3
176 #define MULTI_UVMDOMID_INDEX 4
177 #endif
178 
179 #if defined(__x86_64__)
180 void xen_set_user_pgd(paddr_t);
181 #endif
182 
183 #endif /* _XEN_XENPMAP_H_ */
184