xref: /freebsd-src/sys/dev/hyperv/vmbus/hyperv_mmu.c (revision 6dcffb980fa3026092f79107ee7668918c9f5490)
12b887687SSouradeep Chakrabarti /*-
22b887687SSouradeep Chakrabarti  * Copyright (c) 2009-2012,2016-2024 Microsoft Corp.
32b887687SSouradeep Chakrabarti  * Copyright (c) 2012 NetApp Inc.
42b887687SSouradeep Chakrabarti  * Copyright (c) 2012 Citrix Inc.
52b887687SSouradeep Chakrabarti  * All rights reserved.
62b887687SSouradeep Chakrabarti  *
72b887687SSouradeep Chakrabarti  * Redistribution and use in source and binary forms, with or without
82b887687SSouradeep Chakrabarti  * modification, are permitted provided that the following conditions
92b887687SSouradeep Chakrabarti  * are met:
102b887687SSouradeep Chakrabarti  * 1. Redistributions of source code must retain the above copyright
112b887687SSouradeep Chakrabarti  *    notice unmodified, this list of conditions, and the following
122b887687SSouradeep Chakrabarti  *    disclaimer.
132b887687SSouradeep Chakrabarti  * 2. Redistributions in binary form must reproduce the above copyright
142b887687SSouradeep Chakrabarti  *    notice, this list of conditions and the following disclaimer in the
152b887687SSouradeep Chakrabarti  *    documentation and/or other materials provided with the distribution.
162b887687SSouradeep Chakrabarti  *
172b887687SSouradeep Chakrabarti  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
182b887687SSouradeep Chakrabarti  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
192b887687SSouradeep Chakrabarti  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
202b887687SSouradeep Chakrabarti  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
212b887687SSouradeep Chakrabarti  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
222b887687SSouradeep Chakrabarti  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232b887687SSouradeep Chakrabarti  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242b887687SSouradeep Chakrabarti  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252b887687SSouradeep Chakrabarti  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
262b887687SSouradeep Chakrabarti  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272b887687SSouradeep Chakrabarti  */
282b887687SSouradeep Chakrabarti 
292b887687SSouradeep Chakrabarti #include <sys/param.h>
302b887687SSouradeep Chakrabarti #include <sys/bus.h>
312b887687SSouradeep Chakrabarti #include <sys/kernel.h>
322b887687SSouradeep Chakrabarti #include <sys/linker.h>
332b887687SSouradeep Chakrabarti #include <sys/lock.h>
342b887687SSouradeep Chakrabarti #include <sys/malloc.h>
352b887687SSouradeep Chakrabarti #include <sys/module.h>
362b887687SSouradeep Chakrabarti #include <sys/mutex.h>
372b887687SSouradeep Chakrabarti #include <sys/sbuf.h>
382b887687SSouradeep Chakrabarti #include <sys/smp.h>
392b887687SSouradeep Chakrabarti #include <sys/sysctl.h>
402b887687SSouradeep Chakrabarti #include <sys/systm.h>
412b887687SSouradeep Chakrabarti #include <sys/proc.h>
422b887687SSouradeep Chakrabarti #include <sys/sched.h>
432b887687SSouradeep Chakrabarti #include <sys/kdb.h>
442b887687SSouradeep Chakrabarti #include <vm/vm.h>
452b887687SSouradeep Chakrabarti #include <vm/pmap.h>
462b887687SSouradeep Chakrabarti 
472b887687SSouradeep Chakrabarti #include <machine/bus.h>
482b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/x86/hyperv_machdep.h>
492b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/x86/hyperv_reg.h>
502b887687SSouradeep Chakrabarti #include <dev/hyperv/include/hyperv.h>
512b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/hyperv_var.h>
522b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/vmbus_reg.h>
532b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/vmbus_var.h>
542b887687SSouradeep Chakrabarti #include <dev/hyperv/vmbus/hyperv_common_reg.h>
552b887687SSouradeep Chakrabarti #include "hyperv_mmu.h"
562b887687SSouradeep Chakrabarti 
572b887687SSouradeep Chakrabarti static inline int fill_gva_list(uint64_t gva_list[],
582b887687SSouradeep Chakrabarti     unsigned long start, unsigned long end)
592b887687SSouradeep Chakrabarti {
602b887687SSouradeep Chakrabarti 	int gva_n = 0;
612b887687SSouradeep Chakrabarti 	unsigned long cur = start, diff;
622b887687SSouradeep Chakrabarti 
632b887687SSouradeep Chakrabarti 	do {
642b887687SSouradeep Chakrabarti 		diff = end > cur ? end - cur : 0;
652b887687SSouradeep Chakrabarti 
662b887687SSouradeep Chakrabarti 		gva_list[gva_n] = cur;
672b887687SSouradeep Chakrabarti 		/*
682b887687SSouradeep Chakrabarti 		 * Lower 12 bits encode the number of additional
692b887687SSouradeep Chakrabarti 		 * pages to flush (in addition to the 'cur' page).
702b887687SSouradeep Chakrabarti 		 */
712b887687SSouradeep Chakrabarti 		if (diff >= HV_TLB_FLUSH_UNIT) {
722b887687SSouradeep Chakrabarti 			gva_list[gva_n] |= PAGE_MASK;
732b887687SSouradeep Chakrabarti 			cur += HV_TLB_FLUSH_UNIT;
742b887687SSouradeep Chakrabarti 		}  else if (diff) {
752b887687SSouradeep Chakrabarti 			gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
762b887687SSouradeep Chakrabarti 			cur = end;
772b887687SSouradeep Chakrabarti 		}
782b887687SSouradeep Chakrabarti 
792b887687SSouradeep Chakrabarti 		gva_n++;
802b887687SSouradeep Chakrabarti 
812b887687SSouradeep Chakrabarti 	} while (cur < end);
822b887687SSouradeep Chakrabarti 
832b887687SSouradeep Chakrabarti 	return gva_n;
842b887687SSouradeep Chakrabarti }
852b887687SSouradeep Chakrabarti 
862b887687SSouradeep Chakrabarti 
872b887687SSouradeep Chakrabarti inline int hv_cpumask_to_vpset(struct hv_vpset *vpset,
882b887687SSouradeep Chakrabarti     const cpuset_t *cpus, struct vmbus_softc * sc)
892b887687SSouradeep Chakrabarti {
902b887687SSouradeep Chakrabarti 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
912b887687SSouradeep Chakrabarti 	int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
922b887687SSouradeep Chakrabarti 
932b887687SSouradeep Chakrabarti 	/*
942b887687SSouradeep Chakrabarti 	 * vpset.valid_bank_mask can represent up to
952b887687SSouradeep Chakrabarti 	 * HV_MAX_SPARSE_VCPU_BANKS banks
962b887687SSouradeep Chakrabarti 	 */
972b887687SSouradeep Chakrabarti 	if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
982b887687SSouradeep Chakrabarti 		return 0;
992b887687SSouradeep Chakrabarti 
1002b887687SSouradeep Chakrabarti 	/*
1012b887687SSouradeep Chakrabarti 	 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
1022b887687SSouradeep Chakrabarti 	 * structs are not cleared between calls, we risk flushing unneeded
1032b887687SSouradeep Chakrabarti 	 * vCPUs otherwise.
1042b887687SSouradeep Chakrabarti 	 */
1052b887687SSouradeep Chakrabarti 	for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
1062b887687SSouradeep Chakrabarti 		vpset->bank_contents[vcpu_bank] = 0;
1072b887687SSouradeep Chakrabarti 
1082b887687SSouradeep Chakrabarti 	/*
1092b887687SSouradeep Chakrabarti 	 * Some banks may end up being empty but this is acceptable.
1102b887687SSouradeep Chakrabarti 	 */
1112b887687SSouradeep Chakrabarti 	CPU_FOREACH_ISSET(cpu, cpus) {
1122b887687SSouradeep Chakrabarti 		vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu);
1132b887687SSouradeep Chakrabarti 		if (vcpu == -1)
1142b887687SSouradeep Chakrabarti 			return -1;
1152b887687SSouradeep Chakrabarti 		vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
1162b887687SSouradeep Chakrabarti 		vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
1172b887687SSouradeep Chakrabarti 		set_bit(vcpu_offset, (unsigned long *)
1182b887687SSouradeep Chakrabarti 		    &vpset->bank_contents[vcpu_bank]);
1192b887687SSouradeep Chakrabarti 		if (vcpu_bank >= nr_bank)
1202b887687SSouradeep Chakrabarti 			nr_bank = vcpu_bank + 1;
1212b887687SSouradeep Chakrabarti 	}
1222b887687SSouradeep Chakrabarti 	vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
1232b887687SSouradeep Chakrabarti 	return nr_bank;
1242b887687SSouradeep Chakrabarti }
1252b887687SSouradeep Chakrabarti 
1262b887687SSouradeep Chakrabarti 
1272b887687SSouradeep Chakrabarti 
1282b887687SSouradeep Chakrabarti 
1292b887687SSouradeep Chakrabarti void
1302b887687SSouradeep Chakrabarti hv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
1312b887687SSouradeep Chakrabarti     enum invl_op_codes op, struct vmbus_softc *sc, smp_invl_local_cb_t curcpu_cb)
1322b887687SSouradeep Chakrabarti {
1332b887687SSouradeep Chakrabarti 	cpuset_t tmp_mask, mask;
1342b887687SSouradeep Chakrabarti 	struct hyperv_tlb_flush *flush;
1352b887687SSouradeep Chakrabarti 	int cpu, vcpu;
1362b887687SSouradeep Chakrabarti 	int max_gvas, gva_n;
1372b887687SSouradeep Chakrabarti 	uint64_t status = 0;
1382b887687SSouradeep Chakrabarti 	uint64_t cr3;
1392b887687SSouradeep Chakrabarti 
1402b887687SSouradeep Chakrabarti 	/*
1412b887687SSouradeep Chakrabarti 	 * Hyper-V doesn't handle the invalidating cache. Let system handle it.
1422b887687SSouradeep Chakrabarti 	 */
1432b887687SSouradeep Chakrabarti 	if (op == INVL_OP_CACHE)
1442b887687SSouradeep Chakrabarti 		return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2,
1452b887687SSouradeep Chakrabarti 		    curcpu_cb, op);
1462b887687SSouradeep Chakrabarti 
147*d0cb4674SWei Hu 	flush = *VMBUS_PCPU_PTR(sc, cpu_mem, curcpu);
1482b887687SSouradeep Chakrabarti 	if (flush == NULL)
1492b887687SSouradeep Chakrabarti 		return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2,
1502b887687SSouradeep Chakrabarti 		    curcpu_cb, op);
1512b887687SSouradeep Chakrabarti 	/*
1522b887687SSouradeep Chakrabarti 	 * It is not necessary to signal other CPUs while booting or
1532b887687SSouradeep Chakrabarti 	 * when in the debugger.
1542b887687SSouradeep Chakrabarti 	 */
1552b887687SSouradeep Chakrabarti 	if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
1562b887687SSouradeep Chakrabarti 		goto local_cb;
1572b887687SSouradeep Chakrabarti 
1582b887687SSouradeep Chakrabarti 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1592b887687SSouradeep Chakrabarti 
1602b887687SSouradeep Chakrabarti 	/*
1612b887687SSouradeep Chakrabarti 	 * Make a stable copy of the set of CPUs on which the pmap is active.
1622b887687SSouradeep Chakrabarti 	 * See if we have to interrupt other CPUs.
1632b887687SSouradeep Chakrabarti 	 */
1642b887687SSouradeep Chakrabarti 	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &tmp_mask);
1652b887687SSouradeep Chakrabarti 	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
1662b887687SSouradeep Chakrabarti 	CPU_CLR(curcpu, &tmp_mask);
1672b887687SSouradeep Chakrabarti 	if (CPU_EMPTY(&tmp_mask))
1682b887687SSouradeep Chakrabarti 		goto local_cb;
1692b887687SSouradeep Chakrabarti 
1702b887687SSouradeep Chakrabarti 	/*
1712b887687SSouradeep Chakrabarti 	 * Initiator must have interrupts enabled, which prevents
1722b887687SSouradeep Chakrabarti 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
1732b887687SSouradeep Chakrabarti 	 * from deadlocking with us.  On the other hand, preemption
1742b887687SSouradeep Chakrabarti 	 * must be disabled to pin initiator to the instance of the
1752b887687SSouradeep Chakrabarti 	 * pcpu pc_smp_tlb data and scoreboard line.
1762b887687SSouradeep Chakrabarti 	 */
1772b887687SSouradeep Chakrabarti 	KASSERT((read_rflags() & PSL_I) != 0,
1782b887687SSouradeep Chakrabarti 	    ("hv_tlb_flush: interrupts disabled"));
1792b887687SSouradeep Chakrabarti 	critical_enter();
1802b887687SSouradeep Chakrabarti 	flush->processor_mask = 0;
1812b887687SSouradeep Chakrabarti 	cr3 = pmap->pm_cr3;
1822b887687SSouradeep Chakrabarti 
1832b887687SSouradeep Chakrabarti 	if (op == INVL_OP_TLB || op == INVL_OP_TLB_INVPCID ||
1842b887687SSouradeep Chakrabarti 	    op == INVL_OP_TLB_INVPCID_PTI || op == INVL_OP_TLB_PCID) {
1852b887687SSouradeep Chakrabarti 		flush->address_space = 0;
1862b887687SSouradeep Chakrabarti 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
1872b887687SSouradeep Chakrabarti 	} else {
1882b887687SSouradeep Chakrabarti 
1892b887687SSouradeep Chakrabarti 		flush->address_space = cr3;
1902b887687SSouradeep Chakrabarti 		flush->address_space &= ~CR3_PCID_MASK;
1912b887687SSouradeep Chakrabarti 		flush->flags = 0;
1922b887687SSouradeep Chakrabarti 	}
1932b887687SSouradeep Chakrabarti 	if(CPU_CMP(&mask, &all_cpus) == 0) {
1942b887687SSouradeep Chakrabarti 		flush->flags |= HV_FLUSH_ALL_PROCESSORS;
1952b887687SSouradeep Chakrabarti 	} else {
1962b887687SSouradeep Chakrabarti 		if (CPU_FLS(&mask) < mp_ncpus && CPU_FLS(&mask) >= 64)
1972b887687SSouradeep Chakrabarti 			goto do_ex_hypercall;
1982b887687SSouradeep Chakrabarti 
1992b887687SSouradeep Chakrabarti 		CPU_FOREACH_ISSET(cpu, &mask) {
2002b887687SSouradeep Chakrabarti 			vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu);
2012b887687SSouradeep Chakrabarti 			if (vcpu >= 64)
2022b887687SSouradeep Chakrabarti 				goto do_ex_hypercall;
2032b887687SSouradeep Chakrabarti 
2042b887687SSouradeep Chakrabarti 			set_bit(vcpu, &flush->processor_mask);
2052b887687SSouradeep Chakrabarti 		}
2062b887687SSouradeep Chakrabarti 		if (!flush->processor_mask )
2072b887687SSouradeep Chakrabarti 			goto native;
2082b887687SSouradeep Chakrabarti 	}
2092b887687SSouradeep Chakrabarti 	max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
2102b887687SSouradeep Chakrabarti 	if (addr2 == 0) {
2112b887687SSouradeep Chakrabarti 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
2122b887687SSouradeep Chakrabarti 		status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
2132b887687SSouradeep Chakrabarti 		    (uint64_t)flush, (uint64_t)NULL);
2142b887687SSouradeep Chakrabarti 	} else if ((addr2 && (addr2 -addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) {
2152b887687SSouradeep Chakrabarti 		status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
2162b887687SSouradeep Chakrabarti 		    (uint64_t)flush, (uint64_t)NULL);
2172b887687SSouradeep Chakrabarti 	} else {
2182b887687SSouradeep Chakrabarti 		gva_n = fill_gva_list(flush->gva_list, addr1, addr2);
2192b887687SSouradeep Chakrabarti 
2202b887687SSouradeep Chakrabarti 		status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
2212b887687SSouradeep Chakrabarti 		    gva_n, 0, (uint64_t)flush, (uint64_t)NULL);
2222b887687SSouradeep Chakrabarti 
2232b887687SSouradeep Chakrabarti 	}
2242b887687SSouradeep Chakrabarti 	if(status)
2252b887687SSouradeep Chakrabarti 		goto native;
2262b887687SSouradeep Chakrabarti 	sched_unpin();
2272b887687SSouradeep Chakrabarti 	critical_exit();
2282b887687SSouradeep Chakrabarti 	return;
2292b887687SSouradeep Chakrabarti 
2302b887687SSouradeep Chakrabarti local_cb:
2312b887687SSouradeep Chakrabarti 	critical_enter();
2322b887687SSouradeep Chakrabarti 	curcpu_cb(pmap, addr1, addr2);
2332b887687SSouradeep Chakrabarti 	sched_unpin();
2342b887687SSouradeep Chakrabarti 	critical_exit();
2352b887687SSouradeep Chakrabarti 	return;
2362b887687SSouradeep Chakrabarti do_ex_hypercall:
2372b887687SSouradeep Chakrabarti 	status = hv_flush_tlb_others_ex(pmap, addr1, addr2, mask, op, sc);
2382b887687SSouradeep Chakrabarti 	if (status)
2392b887687SSouradeep Chakrabarti 		goto native;
2402b887687SSouradeep Chakrabarti 	sched_unpin();
2412b887687SSouradeep Chakrabarti 	critical_exit();
2422b887687SSouradeep Chakrabarti 	return;
2432b887687SSouradeep Chakrabarti native:
2442b887687SSouradeep Chakrabarti 	critical_exit();
2452b887687SSouradeep Chakrabarti 	return smp_targeted_tlb_shootdown_native(pmap, addr1,
2462b887687SSouradeep Chakrabarti 	    addr2, curcpu_cb, op);
2472b887687SSouradeep Chakrabarti }
2482b887687SSouradeep Chakrabarti 
2492b887687SSouradeep Chakrabarti uint64_t
2502b887687SSouradeep Chakrabarti hv_flush_tlb_others_ex(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
2512b887687SSouradeep Chakrabarti     const cpuset_t mask, enum invl_op_codes op, struct vmbus_softc *sc)
2522b887687SSouradeep Chakrabarti {
2532b887687SSouradeep Chakrabarti 	int nr_bank = 0, max_gvas, gva_n;
2542b887687SSouradeep Chakrabarti 	struct hv_tlb_flush_ex *flush;
255*d0cb4674SWei Hu 	if(*VMBUS_PCPU_PTR(sc, cpu_mem, curcpu) == NULL)
2562b887687SSouradeep Chakrabarti 		return EINVAL;
257*d0cb4674SWei Hu 	flush = *VMBUS_PCPU_PTR(sc, cpu_mem, curcpu);
2582b887687SSouradeep Chakrabarti 	uint64_t status = 0;
2592b887687SSouradeep Chakrabarti 	uint64_t cr3;
2602b887687SSouradeep Chakrabarti 
2612b887687SSouradeep Chakrabarti 	if (!(hyperv_recommends & HYPERV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2622b887687SSouradeep Chakrabarti 	       return EINVAL;
2632b887687SSouradeep Chakrabarti 
2642b887687SSouradeep Chakrabarti 	cr3 = pmap->pm_cr3;
2652b887687SSouradeep Chakrabarti 	if (op == INVL_OP_TLB) {
2662b887687SSouradeep Chakrabarti 		flush->address_space = 0;
2672b887687SSouradeep Chakrabarti 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
2682b887687SSouradeep Chakrabarti 	} else {
2692b887687SSouradeep Chakrabarti 
2702b887687SSouradeep Chakrabarti 		flush->address_space = cr3;
2712b887687SSouradeep Chakrabarti 		flush->address_space &= ~CR3_PCID_MASK;
2722b887687SSouradeep Chakrabarti 		flush->flags = 0;
2732b887687SSouradeep Chakrabarti 	}
2742b887687SSouradeep Chakrabarti 
2752b887687SSouradeep Chakrabarti 	flush->hv_vp_set.valid_bank_mask = 0;
2762b887687SSouradeep Chakrabarti 
2772b887687SSouradeep Chakrabarti 	flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
2782b887687SSouradeep Chakrabarti 	nr_bank = hv_cpumask_to_vpset(&flush->hv_vp_set, &mask, sc);
2792b887687SSouradeep Chakrabarti 	if (nr_bank < 0)
2802b887687SSouradeep Chakrabarti 		return EINVAL;
2812b887687SSouradeep Chakrabarti 
2822b887687SSouradeep Chakrabarti 	/*
2832b887687SSouradeep Chakrabarti 	 * We can flush not more than max_gvas with one hypercall. Flush the
2842b887687SSouradeep Chakrabarti 	 * whole address space if we were asked to do more.
2852b887687SSouradeep Chakrabarti 	 */
2862b887687SSouradeep Chakrabarti 	max_gvas = (PAGE_SIZE - sizeof(*flush) - nr_bank *
2872b887687SSouradeep Chakrabarti 	    sizeof(flush->hv_vp_set.bank_contents[0])) /
2882b887687SSouradeep Chakrabarti 	    sizeof(flush->hv_vp_set.bank_contents[0]);
2892b887687SSouradeep Chakrabarti 
2902b887687SSouradeep Chakrabarti 	if (addr2 == 0) {
2912b887687SSouradeep Chakrabarti 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
2922b887687SSouradeep Chakrabarti 		status = hv_do_rep_hypercall(
2932b887687SSouradeep Chakrabarti 				HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
2942b887687SSouradeep Chakrabarti 				0, nr_bank, (uint64_t)flush, (uint64_t)NULL);
2952b887687SSouradeep Chakrabarti 	} else if (addr2 &&
2962b887687SSouradeep Chakrabarti 	    ((addr2 - addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) {
2972b887687SSouradeep Chakrabarti 		status = hv_do_rep_hypercall(
2982b887687SSouradeep Chakrabarti 		    HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
2992b887687SSouradeep Chakrabarti 		    0, nr_bank, (uint64_t)flush, (uint64_t)NULL);
3002b887687SSouradeep Chakrabarti 	} else {
3012b887687SSouradeep Chakrabarti 		gva_n = fill_gva_list(&flush->hv_vp_set.bank_contents[nr_bank],
3022b887687SSouradeep Chakrabarti 		    addr1, addr2);
3032b887687SSouradeep Chakrabarti 		status = hv_do_rep_hypercall(
3042b887687SSouradeep Chakrabarti 		    HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
3052b887687SSouradeep Chakrabarti 		    gva_n, nr_bank, (uint64_t)flush, (uint64_t)NULL);
3062b887687SSouradeep Chakrabarti 	}
3072b887687SSouradeep Chakrabarti 	return status;
3082b887687SSouradeep Chakrabarti }
309