1 /* $NetBSD: hypervisor_machdep.c,v 1.21 2011/12/27 07:47:00 cherry Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 2004 Christian Limpach. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /****************************************************************************** 30 * hypervisor.c 31 * 32 * Communication to/from hypervisor. 33 * 34 * Copyright (c) 2002-2004, K A Fraser 35 * 36 * Permission is hereby granted, free of charge, to any person obtaining a copy 37 * of this software and associated documentation files (the "Software"), to 38 * deal in the Software without restriction, including without limitation the 39 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 40 * sell copies of the Software, and to permit persons to whom the Software is 41 * furnished to do so, subject to the following conditions: 42 * 43 * The above copyright notice and this permission notice shall be included in 44 * all copies or substantial portions of the Software. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 52 * DEALINGS IN THE SOFTWARE. 53 */ 54 55 56 #include <sys/cdefs.h> 57 __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.21 2011/12/27 07:47:00 cherry Exp $"); 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/kmem.h> 62 63 #include <uvm/uvm_extern.h> 64 65 #include <machine/vmparam.h> 66 #include <machine/pmap.h> 67 68 #include <xen/xen.h> 69 #include <xen/hypervisor.h> 70 #include <xen/evtchn.h> 71 #include <xen/xenpmap.h> 72 73 #include "opt_xen.h" 74 75 /* 76 * arch-dependent p2m frame lists list (L3 and L2) 77 * used by Xen for save/restore mappings 78 */ 79 static unsigned long * l3_p2m_page; 80 static unsigned long * l2_p2m_page; 81 static int l2_p2m_page_size; /* size of L2 page, in pages */ 82 83 static void build_p2m_frame_list_list(void); 84 static void update_p2m_frame_list_list(void); 85 86 // #define PORT_DEBUG 4 87 // #define EARLY_DEBUG_EVENT 88 89 /* callback function type */ 90 typedef void (*iterate_func_t)(struct cpu_info *, unsigned int, 91 unsigned int, unsigned int, void *); 92 93 static inline void 94 evt_iterate_bits(struct cpu_info *ci, volatile unsigned long *pendingl1, 95 volatile unsigned long *pendingl2, 96 volatile unsigned long *mask, 97 iterate_func_t iterate_pending, void *iterate_args) 98 { 99 100 KASSERT(pendingl1 != NULL); 101 KASSERT(pendingl2 != NULL); 102 103 unsigned long l1, l2; 104 unsigned int l1i, l2i, port; 105 106 l1 = xen_atomic_xchg(pendingl1, 0); 107 while ((l1i = xen_ffs(l1)) != 0) { 108 l1i--; 109 l1 &= ~(1UL << l1i); 110 111 l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL); 112 l2 &= ci->ci_evtmask[l1i]; 113 114 if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2); 115 xen_atomic_clearbits_l(&pendingl2[l1i], l2); 116 117 while ((l2i = xen_ffs(l2)) != 0) { 118 l2i--; 119 l2 &= ~(1UL << l2i); 120 121 port = (l1i << LONG_SHIFT) + l2i; 122 123 iterate_pending(ci, port, l1i, l2i, iterate_args); 124 } 125 } 126 } 127 128 /* 129 * Set per-cpu "pending" information for outstanding events that 130 * cannot be processed now. 131 */ 132 133 static inline void 134 evt_set_pending(struct cpu_info *ci, unsigned int port, unsigned int l1i, 135 unsigned int l2i, void *args) 136 { 137 138 KASSERT(args != NULL); 139 KASSERT(ci != NULL); 140 141 int *ret = args; 142 143 if (evtsource[port]) { 144 hypervisor_set_ipending(evtsource[port]->ev_cpu, 145 evtsource[port]->ev_imask, l1i, l2i); 146 evtsource[port]->ev_evcnt.ev_count++; 147 if (*ret == 0 && ci->ci_ilevel < 148 evtsource[port]->ev_maxlevel) 149 *ret = 1; 150 } 151 #ifdef DOM0OPS 152 else { 153 /* set pending event */ 154 xenevt_setipending(l1i, l2i); 155 } 156 #endif 157 } 158 159 int stipending(void); 160 int 161 stipending(void) 162 { 163 volatile shared_info_t *s = HYPERVISOR_shared_info; 164 struct cpu_info *ci; 165 volatile struct vcpu_info *vci; 166 int ret; 167 168 ret = 0; 169 ci = curcpu(); 170 vci = ci->ci_vcpu; 171 172 #if 0 173 if (HYPERVISOR_shared_info->events) 174 printf("stipending events %08lx mask %08lx ilevel %d\n", 175 HYPERVISOR_shared_info->events, 176 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel); 177 #endif 178 179 #ifdef EARLY_DEBUG_EVENT 180 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) { 181 xen_debug_handler(NULL); 182 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port); 183 } 184 #endif 185 186 /* 187 * we're only called after STIC, so we know that we'll have to 188 * STI at the end 189 */ 190 191 while (vci->evtchn_upcall_pending) { 192 cli(); 193 194 vci->evtchn_upcall_pending = 0; 195 196 evt_iterate_bits(ci, &vci->evtchn_pending_sel, 197 s->evtchn_pending, s->evtchn_mask, 198 evt_set_pending, &ret); 199 200 sti(); 201 } 202 203 #if 0 204 if (ci->ci_ipending & 0x1) 205 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n", 206 HYPERVISOR_shared_info->events, 207 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel, 208 ci->ci_ipending); 209 #endif 210 211 return (ret); 212 } 213 214 /* Iterate through pending events and call the event handler */ 215 216 static inline void 217 evt_do_hypervisor_callback(struct cpu_info *ci, unsigned int port, 218 unsigned int l1i, unsigned int l2i, void *args) 219 { 220 KASSERT(args != NULL); 221 KASSERT(ci == curcpu()); 222 223 struct intrframe *regs = args; 224 225 #ifdef PORT_DEBUG 226 if (port == PORT_DEBUG) 227 printf("do_hypervisor_callback event %d\n", port); 228 #endif 229 if (evtsource[port]) 230 call_evtchn_do_event(port, regs); 231 #ifdef DOM0OPS 232 else { 233 if (ci->ci_ilevel < IPL_HIGH) { 234 /* fast path */ 235 int oipl = ci->ci_ilevel; 236 ci->ci_ilevel = IPL_HIGH; 237 call_xenevt_event(port); 238 ci->ci_ilevel = oipl; 239 } else { 240 /* set pending event */ 241 xenevt_setipending(l1i, l2i); 242 } 243 } 244 #endif 245 } 246 247 void 248 do_hypervisor_callback(struct intrframe *regs) 249 { 250 volatile shared_info_t *s = HYPERVISOR_shared_info; 251 struct cpu_info *ci; 252 volatile struct vcpu_info *vci; 253 int level; 254 255 ci = curcpu(); 256 vci = ci->ci_vcpu; 257 level = ci->ci_ilevel; 258 259 // DDD printf("do_hypervisor_callback\n"); 260 261 #ifdef EARLY_DEBUG_EVENT 262 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) { 263 xen_debug_handler(NULL); 264 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port); 265 } 266 #endif 267 268 while (vci->evtchn_upcall_pending) { 269 vci->evtchn_upcall_pending = 0; 270 271 evt_iterate_bits(ci, &vci->evtchn_pending_sel, 272 s->evtchn_pending, s->evtchn_mask, 273 evt_do_hypervisor_callback, regs); 274 } 275 276 #ifdef DIAGNOSTIC 277 if (level != ci->ci_ilevel) 278 printf("hypervisor done %08x level %d/%d ipending %08x\n", 279 (uint)vci->evtchn_pending_sel, 280 level, ci->ci_ilevel, ci->ci_ipending); 281 #endif 282 } 283 284 void 285 hypervisor_send_event(struct cpu_info *ci, unsigned int ev) 286 { 287 KASSERT(ci != NULL); 288 289 volatile shared_info_t *s = HYPERVISOR_shared_info; 290 volatile struct vcpu_info *vci = ci->ci_vcpu; 291 292 #ifdef PORT_DEBUG 293 if (ev == PORT_DEBUG) 294 printf("hypervisor_send_event %d\n", ev); 295 #endif 296 297 xen_atomic_set_bit(&s->evtchn_pending[0], ev); 298 299 if (__predict_false(ci == curcpu())) { 300 xen_atomic_set_bit(&vci->evtchn_pending_sel, 301 ev >> LONG_SHIFT); 302 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0); 303 } 304 305 xen_atomic_clear_bit(&s->evtchn_mask[0], ev); 306 307 if (__predict_true(ci == curcpu())) { 308 hypervisor_force_callback(); 309 } else { 310 if (__predict_false(xen_send_ipi(ci, XEN_IPI_HVCB))) { 311 panic("xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", 312 (int) ci->ci_cpuid); 313 } 314 } 315 } 316 317 void 318 hypervisor_unmask_event(unsigned int ev) 319 { 320 volatile shared_info_t *s = HYPERVISOR_shared_info; 321 CPU_INFO_ITERATOR cii; 322 struct cpu_info *ci; 323 volatile struct vcpu_info *vci; 324 325 #ifdef PORT_DEBUG 326 if (ev == PORT_DEBUG) 327 printf("hypervisor_unmask_event %d\n", ev); 328 #endif 329 330 xen_atomic_clear_bit(&s->evtchn_mask[0], ev); 331 /* 332 * The following is basically the equivalent of 333 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the 334 * interrupt edge' if the channel is masked. 335 */ 336 if (!xen_atomic_test_bit(&s->evtchn_pending[0], ev)) 337 return; 338 339 for (CPU_INFO_FOREACH(cii, ci)) { 340 if (!xen_atomic_test_bit(&ci->ci_evtmask[0], ev)) 341 continue; 342 vci = ci->ci_vcpu; 343 if (__predict_true(ci == curcpu())) { 344 if (!xen_atomic_test_and_set_bit(&vci->evtchn_pending_sel, 345 ev>>LONG_SHIFT)) 346 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0); 347 } 348 if (!vci->evtchn_upcall_mask) { 349 if (__predict_true(ci == curcpu())) { 350 hypervisor_force_callback(); 351 } else { 352 if (__predict_false( 353 xen_send_ipi(ci, XEN_IPI_HVCB))) { 354 panic("xen_send_ipi(cpu%d, " 355 "XEN_IPI_HVCB) failed\n", 356 (int) ci->ci_cpuid); 357 } 358 } 359 } 360 } 361 } 362 363 void 364 hypervisor_mask_event(unsigned int ev) 365 { 366 volatile shared_info_t *s = HYPERVISOR_shared_info; 367 #ifdef PORT_DEBUG 368 if (ev == PORT_DEBUG) 369 printf("hypervisor_mask_event %d\n", ev); 370 #endif 371 372 xen_atomic_set_bit(&s->evtchn_mask[0], ev); 373 } 374 375 void 376 hypervisor_clear_event(unsigned int ev) 377 { 378 volatile shared_info_t *s = HYPERVISOR_shared_info; 379 #ifdef PORT_DEBUG 380 if (ev == PORT_DEBUG) 381 printf("hypervisor_clear_event %d\n", ev); 382 #endif 383 384 xen_atomic_clear_bit(&s->evtchn_pending[0], ev); 385 } 386 387 static inline void 388 evt_enable_event(struct cpu_info *ci, unsigned int port, 389 unsigned int l1i, unsigned int l2i, void *args) 390 { 391 KASSERT(ci != NULL); 392 KASSERT(args == NULL); 393 hypervisor_enable_event(port); 394 } 395 396 void 397 hypervisor_enable_ipl(unsigned int ipl) 398 { 399 struct cpu_info *ci = curcpu(); 400 401 /* 402 * enable all events for ipl. As we only set an event in ipl_evt_mask 403 * for its lowest IPL, and pending IPLs are processed high to low, 404 * we know that all callback for this event have been processed. 405 */ 406 407 evt_iterate_bits(ci, &ci->ci_isources[ipl]->ipl_evt_mask1, 408 ci->ci_isources[ipl]->ipl_evt_mask2, NULL, 409 evt_enable_event, NULL); 410 411 } 412 413 void 414 hypervisor_set_ipending(struct cpu_info *ci, uint32_t iplmask, int l1, int l2) 415 { 416 int ipl; 417 418 /* set pending bit for the appropriate IPLs */ 419 ci->ci_ipending |= iplmask; 420 421 /* 422 * And set event pending bit for the lowest IPL. As IPL are handled 423 * from high to low, this ensure that all callbacks will have been 424 * called when we ack the event 425 */ 426 ipl = ffs(iplmask); 427 KASSERT(ipl > 0); 428 ipl--; 429 KASSERT(ipl < NIPL); 430 KASSERT(ci->ci_isources[ipl] != NULL); 431 ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1; 432 ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2; 433 if (__predict_false(ci != curcpu())) { 434 if (xen_send_ipi(ci, XEN_IPI_HVCB)) { 435 panic("hypervisor_set_ipending: " 436 "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", 437 (int) ci->ci_cpuid); 438 } 439 } 440 } 441 442 void 443 hypervisor_machdep_attach(void) 444 { 445 /* dom0 does not require the arch-dependent P2M translation table */ 446 if (!xendomain_is_dom0()) { 447 build_p2m_frame_list_list(); 448 sysctl_xen_suspend_setup(); 449 } 450 } 451 452 void 453 hypervisor_machdep_resume(void) 454 { 455 /* dom0 does not require the arch-dependent P2M translation table */ 456 if (!xendomain_is_dom0()) 457 update_p2m_frame_list_list(); 458 } 459 460 /* 461 * Generate the p2m_frame_list_list table, 462 * needed for guest save/restore 463 */ 464 static void 465 build_p2m_frame_list_list(void) 466 { 467 int fpp; /* number of page (frame) pointer per page */ 468 unsigned long max_pfn; 469 /* 470 * The p2m list is composed of three levels of indirection, 471 * each layer containing MFNs pointing to lower level pages 472 * The indirection is used to convert a given PFN to its MFN 473 * Each N level page can point to @fpp (N-1) level pages 474 * For example, for x86 32bit, we have: 475 * - PAGE_SIZE: 4096 bytes 476 * - fpp: 1024 (one L3 page can address 1024 L2 pages) 477 * A L1 page contains the list of MFN we are looking for 478 */ 479 max_pfn = xen_start_info.nr_pages; 480 fpp = PAGE_SIZE / sizeof(xen_pfn_t); 481 482 /* we only need one L3 page */ 483 l3_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 484 PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT); 485 if (l3_p2m_page == NULL) 486 panic("could not allocate memory for l3_p2m_page"); 487 488 /* 489 * Determine how many L2 pages we need for the mapping 490 * Each L2 can map a total of @fpp L1 pages 491 */ 492 l2_p2m_page_size = howmany(max_pfn, fpp); 493 494 l2_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map, 495 l2_p2m_page_size * PAGE_SIZE, 496 PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT); 497 if (l2_p2m_page == NULL) 498 panic("could not allocate memory for l2_p2m_page"); 499 500 /* We now have L3 and L2 pages ready, update L1 mapping */ 501 update_p2m_frame_list_list(); 502 503 } 504 505 /* 506 * Update the L1 p2m_frame_list_list mapping (during guest boot or resume) 507 */ 508 static void 509 update_p2m_frame_list_list(void) 510 { 511 int i; 512 int fpp; /* number of page (frame) pointer per page */ 513 unsigned long max_pfn; 514 515 max_pfn = xen_start_info.nr_pages; 516 fpp = PAGE_SIZE / sizeof(xen_pfn_t); 517 518 for (i = 0; i < l2_p2m_page_size; i++) { 519 /* 520 * Each time we start a new L2 page, 521 * store its MFN in the L3 page 522 */ 523 if ((i % fpp) == 0) { 524 l3_p2m_page[i/fpp] = vtomfn( 525 (vaddr_t)&l2_p2m_page[i]); 526 } 527 /* 528 * we use a shortcut 529 * since @xpmap_phys_to_machine_mapping array 530 * already contains PFN to MFN mapping, we just 531 * set the l2_p2m_page MFN pointer to the MFN of the 532 * according frame of @xpmap_phys_to_machine_mapping 533 */ 534 l2_p2m_page[i] = vtomfn((vaddr_t) 535 &xpmap_phys_to_machine_mapping[i*fpp]); 536 } 537 538 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 539 vtomfn((vaddr_t)l3_p2m_page); 540 HYPERVISOR_shared_info->arch.max_pfn = max_pfn; 541 542 } 543