1 /* $NetBSD: xen_pmap.c,v 1.3 2011/06/12 03:35:50 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 30 * 31 * Permission to use, copy, modify, and distribute this software for any 32 * purpose with or without fee is hereby granted, provided that the above 33 * copyright notice and this permission notice appear in all copies. 34 * 35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 42 */ 43 44 /* 45 * Copyright (c) 1997 Charles D. Cranor and Washington University. 46 * All rights reserved. 47 * 48 * Redistribution and use in source and binary forms, with or without 49 * modification, are permitted provided that the following conditions 50 * are met: 51 * 1. Redistributions of source code must retain the above copyright 52 * notice, this list of conditions and the following disclaimer. 53 * 2. Redistributions in binary form must reproduce the above copyright 54 * notice, this list of conditions and the following disclaimer in the 55 * documentation and/or other materials provided with the distribution. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 61 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 62 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 66 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 /* 70 * Copyright 2001 (c) Wasabi Systems, Inc. 71 * All rights reserved. 72 * 73 * Written by Frank van der Linden for Wasabi Systems, Inc. 74 * 75 * Redistribution and use in source and binary forms, with or without 76 * modification, are permitted provided that the following conditions 77 * are met: 78 * 1. Redistributions of source code must retain the above copyright 79 * notice, this list of conditions and the following disclaimer. 80 * 2. Redistributions in binary form must reproduce the above copyright 81 * notice, this list of conditions and the following disclaimer in the 82 * documentation and/or other materials provided with the distribution. 83 * 3. All advertising materials mentioning features or use of this software 84 * must display the following acknowledgement: 85 * This product includes software developed for the NetBSD Project by 86 * Wasabi Systems, Inc. 87 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 88 * or promote products derived from this software without specific prior 89 * written permission. 90 * 91 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 93 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 94 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 95 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 96 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 97 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 98 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 99 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 100 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 101 * POSSIBILITY OF SUCH DAMAGE. 102 */ 103 104 #include <sys/cdefs.h> 105 __KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.3 2011/06/12 03:35:50 rmind Exp $"); 106 107 #include "opt_user_ldt.h" 108 #include "opt_lockdebug.h" 109 #include "opt_multiprocessor.h" 110 #include "opt_xen.h" 111 #if !defined(__x86_64__) 112 #include "opt_kstack_dr0.h" 113 #endif /* !defined(__x86_64__) */ 114 115 #include <sys/param.h> 116 #include <sys/systm.h> 117 #include <sys/proc.h> 118 #include <sys/pool.h> 119 #include <sys/kernel.h> 120 #include <sys/atomic.h> 121 #include <sys/cpu.h> 122 #include <sys/intr.h> 123 #include <sys/xcall.h> 124 125 #include <uvm/uvm.h> 126 127 #include <dev/isa/isareg.h> 128 129 #include <machine/specialreg.h> 130 #include <machine/gdt.h> 131 #include <machine/isa_machdep.h> 132 #include <machine/cpuvar.h> 133 134 #include <x86/pmap.h> 135 #include <x86/pmap_pv.h> 136 137 #include <x86/i82489reg.h> 138 #include <x86/i82489var.h> 139 140 #ifdef XEN 141 #include <xen/xen3-public/xen.h> 142 #include <xen/hypervisor.h> 143 #endif 144 145 /* flag to be used for kernel mappings: PG_u on Xen/amd64, 0 otherwise */ 146 #if defined(XEN) && defined(__x86_64__) 147 #define PG_k PG_u 148 #else 149 #define PG_k 0 150 #endif 151 152 #define COUNT(x) /* nothing */ 153 154 static pd_entry_t * const alternate_pdes[] = APDES_INITIALIZER; 155 extern pd_entry_t * const normal_pdes[]; 156 157 extern paddr_t pmap_pa_start; /* PA of first physical page for this domain */ 158 extern paddr_t pmap_pa_end; /* PA of last physical page for this domain */ 159 160 void 161 pmap_apte_flush(struct pmap *pmap) 162 { 163 164 KASSERT(kpreempt_disabled()); 165 166 /* 167 * Flush the APTE mapping from all other CPUs that 168 * are using the pmap we are using (who's APTE space 169 * is the one we've just modified). 170 * 171 * XXXthorpej -- find a way to defer the IPI. 172 */ 173 pmap_tlb_shootdown(pmap, (vaddr_t)-1LL, 0, TLBSHOOT_APTE); 174 pmap_tlb_shootnow(); 175 } 176 177 /* 178 * Unmap the content of APDP PDEs 179 */ 180 void 181 pmap_unmap_apdp(void) 182 { 183 int i; 184 185 for (i = 0; i < PDP_SIZE; i++) { 186 pmap_pte_set(APDP_PDE+i, 0); 187 #if defined (PAE) 188 /* clear shadow entries too */ 189 pmap_pte_set(APDP_PDE_SHADOW+i, 0); 190 #endif 191 } 192 } 193 194 /* 195 * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in 196 * 197 * => we lock enough pmaps to keep things locked in 198 * => must be undone with pmap_unmap_ptes before returning 199 */ 200 201 void 202 pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2, 203 pd_entry_t **ptepp, pd_entry_t * const **pdeppp) 204 { 205 pd_entry_t opde, npde; 206 struct pmap *ourpmap; 207 struct cpu_info *ci; 208 struct lwp *l; 209 bool iscurrent; 210 uint64_t ncsw; 211 int s; 212 213 /* the kernel's pmap is always accessible */ 214 if (pmap == pmap_kernel()) { 215 *pmap2 = NULL; 216 *ptepp = PTE_BASE; 217 *pdeppp = normal_pdes; 218 return; 219 } 220 KASSERT(kpreempt_disabled()); 221 222 retry: 223 l = curlwp; 224 ncsw = l->l_ncsw; 225 ourpmap = NULL; 226 ci = curcpu(); 227 #if defined(__x86_64__) 228 /* 229 * curmap can only be pmap_kernel so at this point 230 * pmap_is_curpmap is always false 231 */ 232 iscurrent = 0; 233 ourpmap = pmap_kernel(); 234 #else /* __x86_64__*/ 235 if (ci->ci_want_pmapload && 236 vm_map_pmap(&l->l_proc->p_vmspace->vm_map) == pmap) { 237 pmap_load(); 238 if (l->l_ncsw != ncsw) 239 goto retry; 240 } 241 iscurrent = pmap_is_curpmap(pmap); 242 /* if curpmap then we are always mapped */ 243 if (iscurrent) { 244 mutex_enter(pmap->pm_lock); 245 *pmap2 = NULL; 246 *ptepp = PTE_BASE; 247 *pdeppp = normal_pdes; 248 goto out; 249 } 250 ourpmap = ci->ci_pmap; 251 #endif /* __x86_64__ */ 252 253 /* need to lock both curpmap and pmap: use ordered locking */ 254 pmap_reference(ourpmap); 255 if ((uintptr_t) pmap < (uintptr_t) ourpmap) { 256 mutex_enter(pmap->pm_lock); 257 mutex_enter(ourpmap->pm_lock); 258 } else { 259 mutex_enter(ourpmap->pm_lock); 260 mutex_enter(pmap->pm_lock); 261 } 262 263 if (l->l_ncsw != ncsw) 264 goto unlock_and_retry; 265 266 /* need to load a new alternate pt space into curpmap? */ 267 COUNT(apdp_pde_map); 268 opde = *APDP_PDE; 269 if (!pmap_valid_entry(opde) || 270 pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) { 271 int i; 272 s = splvm(); 273 /* Make recursive entry usable in user PGD */ 274 for (i = 0; i < PDP_SIZE; i++) { 275 npde = pmap_pa2pte( 276 pmap_pdirpa(pmap, i * NPDPG)) | PG_k | PG_V; 277 xpq_queue_pte_update( 278 xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)), 279 npde); 280 xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]), 281 npde); 282 #ifdef PAE 283 /* update shadow entry too */ 284 xpq_queue_pte_update( 285 xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde); 286 #endif /* PAE */ 287 xpq_queue_invlpg( 288 (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]); 289 } 290 if (pmap_valid_entry(opde)) 291 pmap_apte_flush(ourpmap); 292 splx(s); 293 } 294 *pmap2 = ourpmap; 295 *ptepp = APTE_BASE; 296 *pdeppp = alternate_pdes; 297 KASSERT(l->l_ncsw == ncsw); 298 #if !defined(__x86_64__) 299 out: 300 #endif 301 /* 302 * might have blocked, need to retry? 303 */ 304 if (l->l_ncsw != ncsw) { 305 unlock_and_retry: 306 if (ourpmap != NULL) { 307 mutex_exit(ourpmap->pm_lock); 308 pmap_destroy(ourpmap); 309 } 310 mutex_exit(pmap->pm_lock); 311 goto retry; 312 } 313 } 314 315 /* 316 * pmap_unmap_ptes: unlock the PTE mapping of "pmap" 317 */ 318 319 void 320 pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2) 321 { 322 323 if (pmap == pmap_kernel()) { 324 return; 325 } 326 KASSERT(kpreempt_disabled()); 327 if (pmap2 == NULL) { 328 mutex_exit(pmap->pm_lock); 329 } else { 330 #if defined(__x86_64__) 331 KASSERT(pmap2 == pmap_kernel()); 332 #else 333 KASSERT(curcpu()->ci_pmap == pmap2); 334 #endif 335 #if defined(MULTIPROCESSOR) 336 pmap_unmap_apdp(); 337 pmap_pte_flush(); 338 pmap_apte_flush(pmap2); 339 #endif /* MULTIPROCESSOR */ 340 COUNT(apdp_pde_unmap); 341 mutex_exit(pmap->pm_lock); 342 mutex_exit(pmap2->pm_lock); 343 pmap_destroy(pmap2); 344 } 345 } 346 347 int 348 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 349 { 350 paddr_t ma; 351 352 if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) { 353 ma = pa; /* XXX hack */ 354 } else { 355 ma = xpmap_ptom(pa); 356 } 357 358 return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF); 359 } 360 361 /* 362 * pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking 363 * 364 * => no need to lock anything, assume va is already allocated 365 * => should be faster than normal pmap enter function 366 * => we expect a MACHINE address 367 */ 368 369 void 370 pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags) 371 { 372 pt_entry_t *pte, opte, npte; 373 374 if (va < VM_MIN_KERNEL_ADDRESS) 375 pte = vtopte(va); 376 else 377 pte = kvtopte(va); 378 379 npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) | 380 PG_V | PG_k; 381 if (flags & PMAP_NOCACHE) 382 npte |= PG_N; 383 384 if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE)) 385 npte |= PG_NX; 386 387 opte = pmap_pte_testset (pte, npte); /* zap! */ 388 389 if (pmap_valid_entry(opte)) { 390 #if defined(MULTIPROCESSOR) 391 kpreempt_disable(); 392 pmap_tlb_shootdown(pmap_kernel(), va, opte, TLBSHOOT_KENTER); 393 kpreempt_enable(); 394 #else 395 /* Don't bother deferring in the single CPU case. */ 396 pmap_update_pg(va); 397 #endif 398 } 399 } 400 401 /* 402 * pmap_extract_ma: extract a MA for the given VA 403 */ 404 405 bool 406 pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap) 407 { 408 pt_entry_t *ptes, pte; 409 pd_entry_t pde; 410 pd_entry_t * const *pdes; 411 struct pmap *pmap2; 412 413 kpreempt_disable(); 414 pmap_map_ptes(pmap, &pmap2, &ptes, &pdes); 415 if (!pmap_pdes_valid(va, pdes, &pde)) { 416 pmap_unmap_ptes(pmap, pmap2); 417 kpreempt_enable(); 418 return false; 419 } 420 421 pte = ptes[pl1_i(va)]; 422 pmap_unmap_ptes(pmap, pmap2); 423 kpreempt_enable(); 424 425 if (__predict_true((pte & PG_V) != 0)) { 426 if (pap != NULL) 427 *pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1)); 428 return true; 429 } 430 431 return false; 432 } 433