15084Sjohnlev /*
25084Sjohnlev * CDDL HEADER START
35084Sjohnlev *
45084Sjohnlev * The contents of this file are subject to the terms of the
55084Sjohnlev * Common Development and Distribution License (the "License").
65084Sjohnlev * You may not use this file except in compliance with the License.
75084Sjohnlev *
85084Sjohnlev * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95084Sjohnlev * or http://www.opensolaris.org/os/licensing.
105084Sjohnlev * See the License for the specific language governing permissions
115084Sjohnlev * and limitations under the License.
125084Sjohnlev *
135084Sjohnlev * When distributing Covered Code, include this CDDL HEADER in each
145084Sjohnlev * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155084Sjohnlev * If applicable, add the following below this CDDL HEADER, with the
165084Sjohnlev * fields enclosed by brackets "[]" replaced with your own identifying
175084Sjohnlev * information: Portions Copyright [yyyy] [name of copyright owner]
185084Sjohnlev *
195084Sjohnlev * CDDL HEADER END
205084Sjohnlev */
215084Sjohnlev
225084Sjohnlev /*
23*10175SStuart.Maybee@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
245084Sjohnlev * Use is subject to license terms.
255084Sjohnlev */
265084Sjohnlev
275084Sjohnlev
285084Sjohnlev #include <sys/mach_mmu.h>
295084Sjohnlev #include <sys/machsystm.h>
305084Sjohnlev #include <sys/cmn_err.h>
315084Sjohnlev #include <sys/promif.h>
325084Sjohnlev #include <sys/hypervisor.h>
335084Sjohnlev #include <sys/bootconf.h>
345084Sjohnlev #include <sys/ontrap.h>
355084Sjohnlev #include <sys/rwlock.h>
365084Sjohnlev #include <sys/sysmacros.h>
375084Sjohnlev #include <vm/seg_kmem.h>
385084Sjohnlev #include <vm/kboot_mmu.h>
395084Sjohnlev #include <vm/hat_pte.h>
405084Sjohnlev #include <vm/hat.h>
415084Sjohnlev #include <vm/htable.h>
425084Sjohnlev #include <vm/hat_i86.h>
435084Sjohnlev
445084Sjohnlev start_info_t *xen_info;
455084Sjohnlev ulong_t mfn_count;
465084Sjohnlev mfn_t *mfn_list;
475084Sjohnlev mfn_t *mfn_list_pages; /* pages that make a table of mfn's */
485084Sjohnlev /* that make up the pa_to_ma table */
495084Sjohnlev mfn_t *mfn_list_pages_page; /* page of mfn's for mfn_list_pages */
505084Sjohnlev mfn_t cached_max_mfn;
515084Sjohnlev uintptr_t xen_virt_start;
525084Sjohnlev pfn_t *mfn_to_pfn_mapping;
535084Sjohnlev caddr_t xb_addr; /* virtual addr for the store_mfn page */
545084Sjohnlev
555084Sjohnlev
565084Sjohnlev /*
575741Smrj * We need to prevent migration or suspension of a domU while it's
585741Smrj * manipulating MFN values, as the MFN values will spontaneously
595741Smrj * change. The next 4 routines provide a mechanism for that.
605741Smrj * The basic idea is to use reader/writer mutex, readers are any thread
615741Smrj * that is manipulating MFNs. Only the thread which is going to actually call
625741Smrj * HYPERVISOR_suspend() will become a writer.
635084Sjohnlev *
645741Smrj * Since various places need to manipulate MFNs and also call the HAT,
655741Smrj * we track if a thread acquires reader status and allow it to recursively
665741Smrj * do so again. This prevents deadlocks if a migration request
675741Smrj * is started and waits for some reader, but then the previous reader needs
685741Smrj * to call into the HAT.
695084Sjohnlev */
705084Sjohnlev #define NUM_M2P_LOCKS 128
715084Sjohnlev static struct {
725084Sjohnlev krwlock_t m2p_rwlock;
735084Sjohnlev char m2p_pad[64 - sizeof (krwlock_t)]; /* 64 byte cache line size */
745084Sjohnlev } m2p_lock[NUM_M2P_LOCKS];
755084Sjohnlev
765084Sjohnlev #define XM2P_HASH ((uintptr_t)curthread->t_tid & (NUM_M2P_LOCKS - 1))
775084Sjohnlev
785084Sjohnlev void
xen_block_migrate(void)795084Sjohnlev xen_block_migrate(void)
805084Sjohnlev {
815084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info) &&
825741Smrj ++curthread->t_xpvcntr == 1)
835084Sjohnlev rw_enter(&m2p_lock[XM2P_HASH].m2p_rwlock, RW_READER);
845084Sjohnlev }
855084Sjohnlev
865084Sjohnlev void
xen_allow_migrate(void)875084Sjohnlev xen_allow_migrate(void)
885084Sjohnlev {
895084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info) &&
905741Smrj --curthread->t_xpvcntr == 0)
915084Sjohnlev rw_exit(&m2p_lock[XM2P_HASH].m2p_rwlock);
925084Sjohnlev }
935084Sjohnlev
945084Sjohnlev void
xen_start_migrate(void)955084Sjohnlev xen_start_migrate(void)
965084Sjohnlev {
975084Sjohnlev int i;
985084Sjohnlev
995741Smrj ASSERT(curthread->t_xpvcntr == 0);
1005741Smrj ++curthread->t_xpvcntr; /* this allows calls into HAT */
1015084Sjohnlev for (i = 0; i < NUM_M2P_LOCKS; ++i)
1025084Sjohnlev rw_enter(&m2p_lock[i].m2p_rwlock, RW_WRITER);
1035084Sjohnlev }
1045084Sjohnlev
1055084Sjohnlev void
xen_end_migrate(void)1065084Sjohnlev xen_end_migrate(void)
1075084Sjohnlev {
1085084Sjohnlev int i;
1095084Sjohnlev
1105084Sjohnlev for (i = 0; i < NUM_M2P_LOCKS; ++i)
1115084Sjohnlev rw_exit(&m2p_lock[i].m2p_rwlock);
1125741Smrj ASSERT(curthread->t_xpvcntr == 1);
1135741Smrj --curthread->t_xpvcntr;
1145084Sjohnlev }
1155084Sjohnlev
1165084Sjohnlev /*ARGSUSED*/
1175084Sjohnlev void
set_pteval(paddr_t table,uint_t index,uint_t level,x86pte_t pteval)1185084Sjohnlev set_pteval(paddr_t table, uint_t index, uint_t level, x86pte_t pteval)
1195084Sjohnlev {
1205084Sjohnlev mmu_update_t t;
1215084Sjohnlev maddr_t mtable = pa_to_ma(table);
1225084Sjohnlev int retcnt;
1235084Sjohnlev
1245084Sjohnlev t.ptr = (mtable + index * pte_size) | MMU_NORMAL_PT_UPDATE;
1255084Sjohnlev t.val = pteval;
1265084Sjohnlev if (HYPERVISOR_mmu_update(&t, 1, &retcnt, DOMID_SELF) || retcnt != 1)
1275084Sjohnlev bop_panic("HYPERVISOR_mmu_update() failed");
1285084Sjohnlev }
1295084Sjohnlev
1305084Sjohnlev /*
1315084Sjohnlev * The start_info_t and mfn_list are initially mapped in low "boot" memory.
1325084Sjohnlev * Each has a page aligned address and size. We relocate them up into the
1335084Sjohnlev * kernel's normal address space at this point in time. We also create
1345084Sjohnlev * the arrays that let the hypervisor suspend/resume a domain.
1355084Sjohnlev */
1365084Sjohnlev void
xen_relocate_start_info(void)1375084Sjohnlev xen_relocate_start_info(void)
1385084Sjohnlev {
1395084Sjohnlev maddr_t mach_addr;
1405084Sjohnlev size_t sz;
1415084Sjohnlev size_t sz2;
1425084Sjohnlev offset_t off;
1435084Sjohnlev uintptr_t addr;
1445084Sjohnlev uintptr_t old;
1455084Sjohnlev int i, j;
1465084Sjohnlev
1475084Sjohnlev /*
1485084Sjohnlev * In dom0, we have to account for the console_info structure
1495084Sjohnlev * which might immediately follow the start_info in memory.
1505084Sjohnlev */
1515084Sjohnlev sz = sizeof (start_info_t);
1525084Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info) &&
1535084Sjohnlev xen_info->console.dom0.info_off >= sizeof (start_info_t)) {
1545084Sjohnlev sz += xen_info->console.dom0.info_off - sizeof (start_info_t) +
1555084Sjohnlev xen_info->console.dom0.info_size;
1565084Sjohnlev }
1575084Sjohnlev sz = P2ROUNDUP(sz, MMU_PAGESIZE);
1585084Sjohnlev addr = (uintptr_t)vmem_alloc(heap_arena, sz, VM_SLEEP);
1595084Sjohnlev for (off = 0; off < sz; off += MMU_PAGESIZE) {
1605084Sjohnlev mach_addr = pa_to_ma(pfn_to_pa(va_to_pfn(
1615084Sjohnlev (caddr_t)xen_info + off)));
1625084Sjohnlev kbm_map_ma(mach_addr + off, addr + off, 0);
1635084Sjohnlev }
1645084Sjohnlev boot_mapin((caddr_t)addr, sz);
1655084Sjohnlev old = (uintptr_t)xen_info;
1665084Sjohnlev xen_info = (start_info_t *)addr;
1675084Sjohnlev for (off = 0; off < sz; off += MMU_PAGESIZE)
1685084Sjohnlev kbm_unmap(old + off);
1695084Sjohnlev
1705084Sjohnlev /*
1715084Sjohnlev * Relocate the mfn_list, any number of pages.
1725084Sjohnlev */
1735084Sjohnlev sz = P2ROUNDUP(mfn_count * sizeof (mfn_t), MMU_PAGESIZE);
1745084Sjohnlev addr = (uintptr_t)vmem_xalloc(heap_arena, sz, MMU_PAGESIZE, 0,
1755084Sjohnlev 0, 0, 0, VM_SLEEP);
1765084Sjohnlev for (off = 0; off < sz; off += MMU_PAGESIZE) {
1775084Sjohnlev mach_addr =
1785084Sjohnlev pa_to_ma(pfn_to_pa(va_to_pfn((caddr_t)mfn_list + off)));
1795084Sjohnlev kbm_map_ma(mach_addr, addr + off, 0);
1805084Sjohnlev }
1815084Sjohnlev boot_mapin((caddr_t)addr, sz);
1825084Sjohnlev old = (uintptr_t)mfn_list;
1835084Sjohnlev mfn_list = (mfn_t *)addr;
1845084Sjohnlev xen_info->mfn_list = (mfn_t)addr;
1855084Sjohnlev for (off = 0; off < sz; off += MMU_PAGESIZE)
1865084Sjohnlev kbm_unmap(old + off);
1875084Sjohnlev
1885084Sjohnlev /*
1895084Sjohnlev * Create the lists of mfn_list pages needed by suspend/resume.
1905084Sjohnlev * Note we skip this for domain 0 as it can't suspend/resume.
1915084Sjohnlev */
1925084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
1935084Sjohnlev sz2 = P2ROUNDUP(mmu_btop(sz) * sizeof (mfn_t), MMU_PAGESIZE);
1945084Sjohnlev mfn_list_pages = kmem_zalloc(sz2, VM_SLEEP);
1955084Sjohnlev mfn_list_pages_page = kmem_zalloc(MMU_PAGESIZE, VM_SLEEP);
1965084Sjohnlev i = 0;
1975084Sjohnlev for (off = 0; off < sz; off += MMU_PAGESIZE) {
1985084Sjohnlev j = mmu_btop(off);
1995084Sjohnlev if (((j * sizeof (mfn_t)) & MMU_PAGEOFFSET) == 0) {
2005084Sjohnlev mfn_list_pages_page[i++] =
2015084Sjohnlev pfn_to_mfn(va_to_pfn(&mfn_list_pages[j]));
2025084Sjohnlev }
2035084Sjohnlev mfn_list_pages[j] =
2045084Sjohnlev pfn_to_mfn(va_to_pfn((caddr_t)mfn_list + off));
2055084Sjohnlev }
2065084Sjohnlev HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
2075084Sjohnlev pfn_to_mfn(va_to_pfn(mfn_list_pages_page));
2085084Sjohnlev HYPERVISOR_shared_info->arch.max_pfn = xen_info->nr_pages;
2095084Sjohnlev }
2105084Sjohnlev
2115084Sjohnlev /*
2125084Sjohnlev * Remap the shared info (for I/O) into high memory, too.
2135084Sjohnlev */
2145084Sjohnlev sz = MMU_PAGESIZE;
2155084Sjohnlev addr = (uintptr_t)vmem_alloc(heap_arena, sz, VM_SLEEP);
2165084Sjohnlev kbm_map_ma(xen_info->shared_info, addr, 0);
2175084Sjohnlev /* shared info has no PFN so don't do: boot_mapin((caddr_t)addr, sz) */
2185084Sjohnlev old = (uintptr_t)HYPERVISOR_shared_info;
2195084Sjohnlev HYPERVISOR_shared_info = (void *)addr;
2205084Sjohnlev kbm_unmap(old);
2215084Sjohnlev
2225084Sjohnlev /*
2235084Sjohnlev * Remap the console info into high memory, too.
2245084Sjohnlev */
2255084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
2265084Sjohnlev sz = MMU_PAGESIZE;
2275084Sjohnlev addr = (uintptr_t)vmem_alloc(heap_arena, sz, VM_SLEEP);
2285084Sjohnlev kbm_map_ma(pfn_to_pa(xen_info->console.domU.mfn), addr, 0);
2295084Sjohnlev boot_mapin((caddr_t)addr, sz);
2305084Sjohnlev old = (uintptr_t)HYPERVISOR_console_page;
2315084Sjohnlev HYPERVISOR_console_page = (void *)addr;
2325084Sjohnlev kbm_unmap(old);
2335084Sjohnlev } else {
2345084Sjohnlev HYPERVISOR_console_page = NULL;
2355084Sjohnlev }
2365084Sjohnlev
2375084Sjohnlev /*
2385084Sjohnlev * On domUs we need to have the xenbus page (store_mfn) mapped into
2395084Sjohnlev * the kernel. This is referenced as xb_addr.
2405084Sjohnlev */
2415084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
2425084Sjohnlev xb_addr = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
2435084Sjohnlev kbm_map_ma(mfn_to_ma(xen_info->store_mfn),
2445084Sjohnlev (uintptr_t)xb_addr, 0);
2455084Sjohnlev boot_mapin(xb_addr, MMU_PAGESIZE);
2465084Sjohnlev }
2475084Sjohnlev }
2485084Sjohnlev
2495084Sjohnlev /*
2505084Sjohnlev * Generate the pfn value to use for a foreign mfn.
2515084Sjohnlev */
2525084Sjohnlev pfn_t
xen_assign_pfn(mfn_t mfn)2535084Sjohnlev xen_assign_pfn(mfn_t mfn)
2545084Sjohnlev {
2555084Sjohnlev pfn_t pfn;
2565084Sjohnlev
2575084Sjohnlev #ifdef DEBUG
2585084Sjohnlev /*
2595084Sjohnlev * make sure this MFN isn't in our list of MFNs
2605084Sjohnlev */
2615084Sjohnlev on_trap_data_t otd;
2625084Sjohnlev uint_t on_trap_ready = (t0.t_stk != NULL);
2635084Sjohnlev
2645084Sjohnlev if (on_trap_ready) {
2655084Sjohnlev if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
2665084Sjohnlev pfn = mfn_to_pfn_mapping[mfn];
2675084Sjohnlev if (pfn < mfn_count && mfn_list[pfn] == mfn)
2685084Sjohnlev panic("xen_assign_pfn() mfn belongs to us");
2695084Sjohnlev }
2705084Sjohnlev no_trap();
2715084Sjohnlev }
2725084Sjohnlev #endif /* DEBUG */
2735084Sjohnlev
2745084Sjohnlev if (mfn == MFN_INVALID)
2755084Sjohnlev panic("xen_assign_pfn(MFN_INVALID) not allowed");
2765084Sjohnlev pfn = (pfn_t)mfn | PFN_IS_FOREIGN_MFN;
2775084Sjohnlev if (pfn == mfn)
2785084Sjohnlev panic("xen_assign_pfn(mfn) PFN_IS_FOREIGN_MFN bit already set");
2795084Sjohnlev return (pfn);
2805084Sjohnlev }
2815084Sjohnlev
2825084Sjohnlev void
xen_release_pfn(pfn_t pfn)2835084Sjohnlev xen_release_pfn(pfn_t pfn)
2845084Sjohnlev {
2855084Sjohnlev if (pfn == PFN_INVALID)
2865084Sjohnlev panic("xen_release_pfn(PFN_INVALID) not allowed");
2875084Sjohnlev if ((pfn & PFN_IS_FOREIGN_MFN) == 0)
2885084Sjohnlev panic("mfn high bit not set");
2895084Sjohnlev }
2905084Sjohnlev
2915084Sjohnlev uint_t
pfn_is_foreign(pfn_t pfn)2925084Sjohnlev pfn_is_foreign(pfn_t pfn)
2935084Sjohnlev {
2945084Sjohnlev if (pfn == PFN_INVALID)
2955084Sjohnlev return (0);
2965084Sjohnlev return ((pfn & PFN_IS_FOREIGN_MFN) != 0);
2975084Sjohnlev }
2985084Sjohnlev
2995084Sjohnlev pfn_t
pte2pfn(x86pte_t pte,level_t l)3005084Sjohnlev pte2pfn(x86pte_t pte, level_t l)
3015084Sjohnlev {
3025084Sjohnlev mfn_t mfn = PTE2MFN(pte, l);
3035084Sjohnlev
3045084Sjohnlev if ((pte & PT_SOFTWARE) >= PT_FOREIGN)
3055084Sjohnlev return ((pfn_t)mfn | PFN_IS_FOREIGN_MFN);
3065084Sjohnlev return (mfn_to_pfn(mfn));
3075084Sjohnlev }
3085084Sjohnlev
3095084Sjohnlev mfn_t
pfn_to_mfn(pfn_t pfn)3105084Sjohnlev pfn_to_mfn(pfn_t pfn)
3115084Sjohnlev {
3125084Sjohnlev if (pfn == PFN_INVALID)
3135084Sjohnlev panic("pfn_to_mfn(PFN_INVALID) not allowed");
3145084Sjohnlev
3155084Sjohnlev if (pfn & PFN_IS_FOREIGN_MFN)
3165084Sjohnlev return (pfn & ~PFN_IS_FOREIGN_MFN);
3175084Sjohnlev
3185084Sjohnlev if (pfn >= mfn_count)
3195084Sjohnlev panic("pfn_to_mfn(): illegal PFN 0x%lx", pfn);
3205084Sjohnlev
3215084Sjohnlev return (mfn_list[pfn]);
3225084Sjohnlev }
3235084Sjohnlev
3245084Sjohnlev /*
3255084Sjohnlev * This routine translates an MFN back into the corresponding PFN value.
3265084Sjohnlev * It has to be careful since the mfn_to_pfn_mapping[] might fault
3275084Sjohnlev * as that table is sparse. It also has to check for non-faulting, but out of
3285084Sjohnlev * range that exceed the table.
3295084Sjohnlev */
3305084Sjohnlev pfn_t
mfn_to_pfn(mfn_t mfn)3315084Sjohnlev mfn_to_pfn(mfn_t mfn)
3325084Sjohnlev {
3335084Sjohnlev pfn_t pfn;
3345084Sjohnlev on_trap_data_t otd;
3355084Sjohnlev uint_t on_trap_ready = (t0.t_stk != NULL);
3365084Sjohnlev
3375084Sjohnlev /*
3385084Sjohnlev * Cleared at a suspend or migrate
3395084Sjohnlev */
3405084Sjohnlev if (cached_max_mfn == 0)
3415084Sjohnlev cached_max_mfn =
3425084Sjohnlev HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
3435084Sjohnlev
3445084Sjohnlev if (cached_max_mfn < mfn)
3455084Sjohnlev return ((pfn_t)mfn | PFN_IS_FOREIGN_MFN);
3465084Sjohnlev
3475084Sjohnlev if (on_trap_ready && on_trap(&otd, OT_DATA_ACCESS)) {
3485084Sjohnlev pfn = (pfn_t)mfn | PFN_IS_FOREIGN_MFN;
3495084Sjohnlev } else {
3505084Sjohnlev pfn = mfn_to_pfn_mapping[mfn];
3515084Sjohnlev
3525084Sjohnlev if (pfn == PFN_INVALID || pfn >= mfn_count ||
3535084Sjohnlev pfn_to_mfn(pfn) != mfn)
3545084Sjohnlev pfn = (pfn_t)mfn | PFN_IS_FOREIGN_MFN;
3555084Sjohnlev }
3565084Sjohnlev
3575084Sjohnlev if (on_trap_ready)
3585084Sjohnlev no_trap();
3595084Sjohnlev
3605084Sjohnlev /*
3615084Sjohnlev * If khat_running is set then we should be checking
3625084Sjohnlev * in domUs that migration is blocked while using the
3635084Sjohnlev * mfn_to_pfn_mapping[] table.
3645084Sjohnlev */
3655084Sjohnlev ASSERT(!khat_running || DOMAIN_IS_INITDOMAIN(xen_info) ||
3665084Sjohnlev rw_read_held(&m2p_lock[XM2P_HASH].m2p_rwlock));
3675084Sjohnlev
3685084Sjohnlev return (pfn);
3695084Sjohnlev }
3705084Sjohnlev
3715084Sjohnlev /*
3725084Sjohnlev * From a pseudo-physical address, find the corresponding machine address.
3735084Sjohnlev */
3745084Sjohnlev maddr_t
pa_to_ma(paddr_t pa)3755084Sjohnlev pa_to_ma(paddr_t pa)
3765084Sjohnlev {
3775084Sjohnlev mfn_t mfn = pfn_to_mfn(mmu_btop(pa));
3785084Sjohnlev
3795084Sjohnlev if (mfn == MFN_INVALID)
3805084Sjohnlev panic("pa_to_ma() got MFN_INVALID");
3815084Sjohnlev return (mfn_to_ma(mfn) + (pa & MMU_PAGEOFFSET));
3825084Sjohnlev }
3835084Sjohnlev
3845084Sjohnlev /*
3855084Sjohnlev * From a machine address, find the corresponding pseudo-physical address.
3865084Sjohnlev */
3875084Sjohnlev paddr_t
ma_to_pa(maddr_t ma)3885084Sjohnlev ma_to_pa(maddr_t ma)
3895084Sjohnlev {
3905084Sjohnlev pfn_t pfn = mfn_to_pfn(mmu_btop(ma));
3915084Sjohnlev
3925084Sjohnlev if (pfn == PFN_INVALID)
3935084Sjohnlev panic("ma_to_pa() got PFN_INVALID");
3945084Sjohnlev return (pfn_to_pa(pfn) + (ma & MMU_PAGEOFFSET));
3955084Sjohnlev }
3965084Sjohnlev
3975084Sjohnlev /*
3985084Sjohnlev * When calling reassign_pfn(), the page must be (at least) read locked
3995084Sjohnlev * to make sure swrand does not try to grab it.
4005084Sjohnlev */
4015084Sjohnlev #ifdef DEBUG
4025084Sjohnlev #define CHECK_PAGE_LOCK(pfn) { \
4035084Sjohnlev page_t *pp = page_numtopp_nolock(pfn); \
4045084Sjohnlev if ((pp != NULL) && (!PAGE_LOCKED(pp))) { \
4055084Sjohnlev panic("reassign_pfn() called with unlocked page (pfn 0x%lx)", \
4065084Sjohnlev pfn); \
4075084Sjohnlev } \
4085084Sjohnlev }
4095084Sjohnlev #else /* DEBUG */
4105084Sjohnlev #define CHECK_PAGE_LOCK(pfn)
4115084Sjohnlev #endif /* DEBUG */
4125084Sjohnlev
4135084Sjohnlev /*
4145084Sjohnlev * Reassign a new machine page to back a physical address.
4155084Sjohnlev */
4165084Sjohnlev void
reassign_pfn(pfn_t pfn,mfn_t mfn)4175084Sjohnlev reassign_pfn(pfn_t pfn, mfn_t mfn)
4185084Sjohnlev {
4195084Sjohnlev int mmu_update_return;
4205084Sjohnlev mmu_update_t t;
4215084Sjohnlev extern void update_contig_pfnlist(pfn_t, mfn_t, mfn_t);
4225084Sjohnlev
4235084Sjohnlev ASSERT(pfn != PFN_INVALID);
4245084Sjohnlev ASSERT(!pfn_is_foreign(pfn));
4255084Sjohnlev
4265084Sjohnlev ASSERT(pfn < mfn_count);
4275084Sjohnlev update_contig_pfnlist(pfn, mfn_list[pfn], mfn);
4285084Sjohnlev if (mfn == MFN_INVALID) {
4295084Sjohnlev CHECK_PAGE_LOCK(pfn);
4305084Sjohnlev if (kpm_vbase != NULL && xen_kpm_page(pfn, 0) < 0)
4315084Sjohnlev panic("reassign_pfn(): failed to remove kpm mapping");
4325084Sjohnlev mfn_list[pfn] = mfn;
4335084Sjohnlev return;
4345084Sjohnlev }
4355084Sjohnlev
4365084Sjohnlev /*
4375084Sjohnlev * Verify that previously given away pages are still page locked.
4385084Sjohnlev */
4395084Sjohnlev if (mfn_list[pfn] == MFN_INVALID) {
4405084Sjohnlev CHECK_PAGE_LOCK(pfn);
4415084Sjohnlev }
4425084Sjohnlev mfn_list[pfn] = mfn;
4435084Sjohnlev
4445084Sjohnlev t.ptr = mfn_to_ma(mfn) | MMU_MACHPHYS_UPDATE;
4455084Sjohnlev t.val = pfn;
4465084Sjohnlev
4475084Sjohnlev if (HYPERVISOR_mmu_update(&t, 1, &mmu_update_return, DOMID_SELF))
4485084Sjohnlev panic("HYPERVISOR_mmu_update() failed");
4495084Sjohnlev ASSERT(mmu_update_return == 1);
4505084Sjohnlev
4515084Sjohnlev if (kpm_vbase != NULL && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0)
4525084Sjohnlev panic("reassign_pfn(): failed to enable kpm mapping");
4535084Sjohnlev }
454