1 /* $NetBSD: xenpmap.h,v 1.31 2011/11/08 17:16:52 cherry Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2004 Christian Limpach. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 30 #ifndef _XEN_XENPMAP_H_ 31 #define _XEN_XENPMAP_H_ 32 33 #ifdef _KERNEL_OPT 34 #include "opt_xen.h" 35 #endif 36 37 #define INVALID_P2M_ENTRY (~0UL) 38 39 void xpq_queue_machphys_update(paddr_t, paddr_t); 40 void xpq_queue_invlpg(vaddr_t); 41 void xpq_queue_pte_update(paddr_t, pt_entry_t); 42 void xpq_queue_pt_switch(paddr_t); 43 void xpq_flush_queue(void); 44 void xpq_queue_set_ldt(vaddr_t, uint32_t); 45 void xpq_queue_tlb_flush(void); 46 void xpq_queue_pin_table(paddr_t, int); 47 void xpq_queue_unpin_table(paddr_t); 48 int xpq_update_foreign(paddr_t, pt_entry_t, int); 49 void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, uint32_t); 50 void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t); 51 void xen_mcast_tlbflush(uint32_t); 52 void xen_bcast_tlbflush(void); 53 void xen_mcast_invlpg(vaddr_t, uint32_t); 54 void xen_bcast_invlpg(vaddr_t); 55 56 57 #define xpq_queue_pin_l1_table(pa) \ 58 xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE) 59 #define xpq_queue_pin_l2_table(pa) \ 60 xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE) 61 #define xpq_queue_pin_l3_table(pa) \ 62 xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE) 63 #define xpq_queue_pin_l4_table(pa) \ 64 xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE) 65 66 extern unsigned long *xpmap_phys_to_machine_mapping; 67 68 /* 69 * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF 70 * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000). 71 * So the offset between physical and virtual address is different on 72 * Xen-2 and Xen-3 for domain0. 73 * starting with xen-3.0.2, we can add notes so that virtual memory starts 74 * at KERNBASE for domU as well. 75 */ 76 #if defined(DOM0OPS) || !defined(XEN_COMPAT_030001) 77 #define XPMAP_OFFSET 0 78 #else 79 #define XPMAP_OFFSET (KERNTEXTOFF - KERNBASE) 80 #endif 81 82 #define mfn_to_pfn(mfn) (machine_to_phys_mapping[(mfn)]) 83 #define pfn_to_mfn(pfn) (xpmap_phys_to_machine_mapping[(pfn)]) 84 85 static __inline paddr_t 86 xpmap_mtop_masked(paddr_t mpa) 87 { 88 return ( 89 ((paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) 90 + XPMAP_OFFSET); 91 } 92 93 static __inline paddr_t 94 xpmap_mtop(paddr_t mpa) 95 { 96 return (xpmap_mtop_masked(mpa) | (mpa & ~PG_FRAME)); 97 } 98 99 static __inline paddr_t 100 xpmap_ptom_masked(paddr_t ppa) 101 { 102 return (((paddr_t)xpmap_phys_to_machine_mapping[(ppa - 103 XPMAP_OFFSET) >> PAGE_SHIFT]) << PAGE_SHIFT); 104 } 105 106 static __inline paddr_t 107 xpmap_ptom(paddr_t ppa) 108 { 109 return (xpmap_ptom_masked(ppa) | (ppa & ~PG_FRAME)); 110 } 111 112 static inline void 113 MULTI_update_va_mapping( 114 multicall_entry_t *mcl, vaddr_t va, 115 pt_entry_t new_val, unsigned long flags) 116 { 117 mcl->op = __HYPERVISOR_update_va_mapping; 118 mcl->args[0] = va; 119 #if defined(__x86_64__) 120 mcl->args[1] = new_val; 121 mcl->args[2] = flags; 122 #else 123 mcl->args[1] = (new_val & 0xffffffff); 124 #ifdef PAE 125 mcl->args[2] = (new_val >> 32); 126 #else 127 mcl->args[2] = 0; 128 #endif 129 mcl->args[3] = flags; 130 #endif 131 } 132 133 static inline void 134 MULTI_update_va_mapping_otherdomain( 135 multicall_entry_t *mcl, vaddr_t va, 136 pt_entry_t new_val, unsigned long flags, domid_t domid) 137 { 138 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; 139 mcl->args[0] = va; 140 #if defined(__x86_64__) 141 mcl->args[1] = new_val; 142 mcl->args[2] = flags; 143 mcl->args[3] = domid; 144 #else 145 mcl->args[1] = (new_val & 0xffffffff); 146 #ifdef PAE 147 mcl->args[2] = (new_val >> 32); 148 #else 149 mcl->args[2] = 0; 150 #endif 151 mcl->args[3] = flags; 152 mcl->args[4] = domid; 153 #endif 154 } 155 #if defined(__x86_64__) 156 #define MULTI_UVMFLAGS_INDEX 2 157 #define MULTI_UVMDOMID_INDEX 3 158 #else 159 #define MULTI_UVMFLAGS_INDEX 3 160 #define MULTI_UVMDOMID_INDEX 4 161 #endif 162 163 #if defined(__x86_64__) 164 void xen_set_user_pgd(paddr_t); 165 #endif 166 167 #endif /* _XEN_XENPMAP_H_ */ 168