xref: /onnv-gate/usr/src/uts/i86pc/vm/vm_machdep.c (revision 7656:2621e50fdf4a)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51443Skchow  * Common Development and Distribution License (the "License").
61443Skchow  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
225843Ssmaybe  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
270Sstevel@tonic-gate /*	All Rights Reserved   */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley 4.3 BSD
310Sstevel@tonic-gate  * under license from the Regents of the University of California.
320Sstevel@tonic-gate  */
330Sstevel@tonic-gate 
340Sstevel@tonic-gate /*
350Sstevel@tonic-gate  * UNIX machine dependent virtual memory support.
360Sstevel@tonic-gate  */
370Sstevel@tonic-gate 
380Sstevel@tonic-gate #include <sys/types.h>
390Sstevel@tonic-gate #include <sys/param.h>
400Sstevel@tonic-gate #include <sys/systm.h>
410Sstevel@tonic-gate #include <sys/user.h>
420Sstevel@tonic-gate #include <sys/proc.h>
430Sstevel@tonic-gate #include <sys/kmem.h>
440Sstevel@tonic-gate #include <sys/vmem.h>
450Sstevel@tonic-gate #include <sys/buf.h>
460Sstevel@tonic-gate #include <sys/cpuvar.h>
470Sstevel@tonic-gate #include <sys/lgrp.h>
480Sstevel@tonic-gate #include <sys/disp.h>
490Sstevel@tonic-gate #include <sys/vm.h>
500Sstevel@tonic-gate #include <sys/mman.h>
510Sstevel@tonic-gate #include <sys/vnode.h>
520Sstevel@tonic-gate #include <sys/cred.h>
530Sstevel@tonic-gate #include <sys/exec.h>
540Sstevel@tonic-gate #include <sys/exechdr.h>
550Sstevel@tonic-gate #include <sys/debug.h>
562991Ssusans #include <sys/vmsystm.h>
570Sstevel@tonic-gate 
580Sstevel@tonic-gate #include <vm/hat.h>
590Sstevel@tonic-gate #include <vm/as.h>
600Sstevel@tonic-gate #include <vm/seg.h>
610Sstevel@tonic-gate #include <vm/seg_kp.h>
620Sstevel@tonic-gate #include <vm/seg_vn.h>
630Sstevel@tonic-gate #include <vm/page.h>
640Sstevel@tonic-gate #include <vm/seg_kmem.h>
650Sstevel@tonic-gate #include <vm/seg_kpm.h>
660Sstevel@tonic-gate #include <vm/vm_dep.h>
670Sstevel@tonic-gate 
680Sstevel@tonic-gate #include <sys/cpu.h>
690Sstevel@tonic-gate #include <sys/vm_machparam.h>
700Sstevel@tonic-gate #include <sys/memlist.h>
710Sstevel@tonic-gate #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
720Sstevel@tonic-gate #include <vm/hat_i86.h>
730Sstevel@tonic-gate #include <sys/x86_archext.h>
740Sstevel@tonic-gate #include <sys/elf_386.h>
750Sstevel@tonic-gate #include <sys/cmn_err.h>
760Sstevel@tonic-gate #include <sys/archsystm.h>
770Sstevel@tonic-gate #include <sys/machsystm.h>
780Sstevel@tonic-gate 
790Sstevel@tonic-gate #include <sys/vtrace.h>
800Sstevel@tonic-gate #include <sys/ddidmareq.h>
810Sstevel@tonic-gate #include <sys/promif.h>
820Sstevel@tonic-gate #include <sys/memnode.h>
830Sstevel@tonic-gate #include <sys/stack.h>
845084Sjohnlev #include <util/qsort.h>
855084Sjohnlev #include <sys/taskq.h>
865084Sjohnlev 
875084Sjohnlev #ifdef __xpv
885084Sjohnlev 
895084Sjohnlev #include <sys/hypervisor.h>
905084Sjohnlev #include <sys/xen_mmu.h>
915084Sjohnlev #include <sys/balloon_impl.h>
925084Sjohnlev 
935084Sjohnlev /*
945084Sjohnlev  * domain 0 pages usable for DMA are kept pre-allocated and kept in
955084Sjohnlev  * distinct lists, ordered by increasing mfn.
965084Sjohnlev  */
975084Sjohnlev static kmutex_t io_pool_lock;
985529Ssmaybe static kmutex_t contig_list_lock;
995084Sjohnlev static page_t *io_pool_4g;	/* pool for 32 bit dma limited devices */
1005084Sjohnlev static page_t *io_pool_16m;	/* pool for 24 bit dma limited legacy devices */
1015084Sjohnlev static long io_pool_cnt;
1025084Sjohnlev static long io_pool_cnt_max = 0;
1035084Sjohnlev #define	DEFAULT_IO_POOL_MIN	128
1045084Sjohnlev static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
1055084Sjohnlev static long io_pool_cnt_lowater = 0;
1065084Sjohnlev static long io_pool_shrink_attempts; /* how many times did we try to shrink */
1075084Sjohnlev static long io_pool_shrinks;	/* how many times did we really shrink */
1085084Sjohnlev static long io_pool_grows;	/* how many times did we grow */
1095084Sjohnlev static mfn_t start_mfn = 1;
1105084Sjohnlev static caddr_t io_pool_kva;	/* use to alloc pages when needed */
1115084Sjohnlev 
1125084Sjohnlev static int create_contig_pfnlist(uint_t);
1135084Sjohnlev 
1145084Sjohnlev /*
1155084Sjohnlev  * percentage of phys mem to hold in the i/o pool
1165084Sjohnlev  */
1175084Sjohnlev #define	DEFAULT_IO_POOL_PCT	2
1185084Sjohnlev static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
1195084Sjohnlev static void page_io_pool_sub(page_t **, page_t *, page_t *);
1205529Ssmaybe int ioalloc_dbg = 0;
1215084Sjohnlev 
1225084Sjohnlev #endif /* __xpv */
1230Sstevel@tonic-gate 
1242961Sdp78419 uint_t vac_colors = 1;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate int largepagesupport = 0;
1270Sstevel@tonic-gate extern uint_t page_create_new;
1280Sstevel@tonic-gate extern uint_t page_create_exists;
1290Sstevel@tonic-gate extern uint_t page_create_putbacks;
1300Sstevel@tonic-gate extern uint_t page_create_putbacks;
1313446Smrj /*
1323446Smrj  * Allow users to disable the kernel's use of SSE.
1333446Smrj  */
1343446Smrj extern int use_sse_pagecopy, use_sse_pagezero;
1350Sstevel@tonic-gate 
1365084Sjohnlev /*
1375084Sjohnlev  * combined memory ranges from mnode and memranges[] to manage single
1385084Sjohnlev  * mnode/mtype dimension in the page lists.
1395084Sjohnlev  */
1405084Sjohnlev typedef struct {
1415084Sjohnlev 	pfn_t	mnr_pfnlo;
1425084Sjohnlev 	pfn_t	mnr_pfnhi;
1435084Sjohnlev 	int	mnr_mnode;
1445084Sjohnlev 	int	mnr_memrange;		/* index into memranges[] */
1455084Sjohnlev 	/* maintain page list stats */
1465084Sjohnlev 	pgcnt_t	mnr_mt_clpgcnt;		/* cache list cnt */
1475466Skchow 	pgcnt_t	mnr_mt_flpgcnt[MMU_PAGE_SIZES];	/* free list cnt per szc */
1485466Skchow 	pgcnt_t	mnr_mt_totcnt;		/* sum of cache and free lists */
1495084Sjohnlev #ifdef DEBUG
1505084Sjohnlev 	struct mnr_mts {		/* mnode/mtype szc stats */
1515084Sjohnlev 		pgcnt_t	mnr_mts_pgcnt;
1525084Sjohnlev 		int	mnr_mts_colors;
1535084Sjohnlev 		pgcnt_t *mnr_mtsc_pgcnt;
1545084Sjohnlev 	} 	*mnr_mts;
1555084Sjohnlev #endif
1565084Sjohnlev } mnoderange_t;
1575084Sjohnlev 
1585084Sjohnlev #define	MEMRANGEHI(mtype)						\
1595084Sjohnlev 	((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
1605084Sjohnlev #define	MEMRANGELO(mtype)	(memranges[mtype])
1615084Sjohnlev 
1625466Skchow #define	MTYPE_FREEMEM(mt)	(mnoderanges[mt].mnr_mt_totcnt)
1635084Sjohnlev 
1645084Sjohnlev /*
1655084Sjohnlev  * As the PC architecture evolved memory up was clumped into several
1665084Sjohnlev  * ranges for various historical I/O devices to do DMA.
1675084Sjohnlev  * < 16Meg - ISA bus
1685084Sjohnlev  * < 2Gig - ???
1695084Sjohnlev  * < 4Gig - PCI bus or drivers that don't understand PAE mode
1705084Sjohnlev  *
1715084Sjohnlev  * These are listed in reverse order, so that we can skip over unused
1725084Sjohnlev  * ranges on machines with small memories.
1735084Sjohnlev  *
1745084Sjohnlev  * For now under the Hypervisor, we'll only ever have one memrange.
1755084Sjohnlev  */
1765084Sjohnlev #define	PFN_4GIG	0x100000
1775084Sjohnlev #define	PFN_16MEG	0x1000
1785084Sjohnlev static pfn_t arch_memranges[NUM_MEM_RANGES] = {
1795084Sjohnlev     PFN_4GIG,	/* pfn range for 4G and above */
1805084Sjohnlev     0x80000,	/* pfn range for 2G-4G */
1815084Sjohnlev     PFN_16MEG,	/* pfn range for 16M-2G */
1825084Sjohnlev     0x00000,	/* pfn range for 0-16M */
1835084Sjohnlev };
1845084Sjohnlev pfn_t *memranges = &arch_memranges[0];
1855084Sjohnlev int nranges = NUM_MEM_RANGES;
1865084Sjohnlev 
1875084Sjohnlev /*
1885084Sjohnlev  * This combines mem_node_config and memranges into one data
1895084Sjohnlev  * structure to be used for page list management.
1905084Sjohnlev  */
1915084Sjohnlev mnoderange_t	*mnoderanges;
1925084Sjohnlev int		mnoderangecnt;
1935084Sjohnlev int		mtype4g;
1945084Sjohnlev 
1955084Sjohnlev /*
1965084Sjohnlev  * 4g memory management variables for systems with more than 4g of memory:
1975084Sjohnlev  *
1985084Sjohnlev  * physical memory below 4g is required for 32bit dma devices and, currently,
1995084Sjohnlev  * for kmem memory. On systems with more than 4g of memory, the pool of memory
2005084Sjohnlev  * below 4g can be depleted without any paging activity given that there is
2015084Sjohnlev  * likely to be sufficient memory above 4g.
2025084Sjohnlev  *
2035084Sjohnlev  * physmax4g is set true if the largest pfn is over 4g. The rest of the
2045084Sjohnlev  * 4g memory management code is enabled only when physmax4g is true.
2055084Sjohnlev  *
2065084Sjohnlev  * maxmem4g is the count of the maximum number of pages on the page lists
2075084Sjohnlev  * with physical addresses below 4g. It can be a lot less then 4g given that
2085084Sjohnlev  * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
2095084Sjohnlev  * agp aperture etc.
2105084Sjohnlev  *
2115084Sjohnlev  * freemem4g maintains the count of the number of available pages on the
2125084Sjohnlev  * page lists with physical addresses below 4g.
2135084Sjohnlev  *
2145084Sjohnlev  * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
2155084Sjohnlev  * 6% (desfree4gshift = 4) of maxmem4g.
2165084Sjohnlev  *
2175084Sjohnlev  * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
2185084Sjohnlev  * and the amount of physical memory above 4g is greater than freemem4g.
2195084Sjohnlev  * In this case, page_get_* routines will restrict below 4g allocations
2205084Sjohnlev  * for requests that don't specifically require it.
2215084Sjohnlev  */
2225084Sjohnlev 
2235084Sjohnlev #define	LOTSFREE4G	(maxmem4g >> lotsfree4gshift)
2245084Sjohnlev #define	DESFREE4G	(maxmem4g >> desfree4gshift)
2255084Sjohnlev 
2265084Sjohnlev #define	RESTRICT4G_ALLOC					\
2275084Sjohnlev 	(physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
2285084Sjohnlev 
2295084Sjohnlev static pgcnt_t	maxmem4g;
2305084Sjohnlev static pgcnt_t	freemem4g;
2315084Sjohnlev static int	physmax4g;
2325084Sjohnlev static int	desfree4gshift = 4;	/* maxmem4g shift to derive DESFREE4G */
2335084Sjohnlev static int	lotsfree4gshift = 3;
2345084Sjohnlev 
2355084Sjohnlev /*
2365084Sjohnlev  * 16m memory management:
2375084Sjohnlev  *
2385084Sjohnlev  * reserve some amount of physical memory below 16m for legacy devices.
2395084Sjohnlev  *
2405084Sjohnlev  * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
2415084Sjohnlev  * 16m or if the 16m pool drops below DESFREE16M.
2425084Sjohnlev  *
2435084Sjohnlev  * In this case, general page allocations via page_get_{free,cache}list
2445084Sjohnlev  * routines will be restricted from allocating from the 16m pool. Allocations
2455084Sjohnlev  * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
2465084Sjohnlev  * are not restricted.
2475084Sjohnlev  */
2485084Sjohnlev 
2495084Sjohnlev #define	FREEMEM16M	MTYPE_FREEMEM(0)
2505084Sjohnlev #define	DESFREE16M	desfree16m
2515084Sjohnlev #define	RESTRICT16M_ALLOC(freemem, pgcnt, flags)		\
2525084Sjohnlev 	((freemem != 0) && ((flags & PG_PANIC) == 0) &&		\
2535084Sjohnlev 	    ((freemem >= (FREEMEM16M)) ||			\
2545084Sjohnlev 	    (FREEMEM16M  < (DESFREE16M + pgcnt))))
2555084Sjohnlev 
2565084Sjohnlev static pgcnt_t	desfree16m = 0x380;
2575084Sjohnlev 
2585084Sjohnlev /*
2595084Sjohnlev  * This can be patched via /etc/system to allow old non-PAE aware device
2605084Sjohnlev  * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
2615084Sjohnlev  */
2625084Sjohnlev int restricted_kmemalloc = 0;
2631385Skchow 
2640Sstevel@tonic-gate #ifdef VM_STATS
2650Sstevel@tonic-gate struct {
2660Sstevel@tonic-gate 	ulong_t	pga_alloc;
2670Sstevel@tonic-gate 	ulong_t	pga_notfullrange;
2680Sstevel@tonic-gate 	ulong_t	pga_nulldmaattr;
2690Sstevel@tonic-gate 	ulong_t	pga_allocok;
2700Sstevel@tonic-gate 	ulong_t	pga_allocfailed;
2710Sstevel@tonic-gate 	ulong_t	pgma_alloc;
2720Sstevel@tonic-gate 	ulong_t	pgma_allocok;
2730Sstevel@tonic-gate 	ulong_t	pgma_allocfailed;
2740Sstevel@tonic-gate 	ulong_t	pgma_allocempty;
2750Sstevel@tonic-gate } pga_vmstats;
2760Sstevel@tonic-gate #endif
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate uint_t mmu_page_sizes;
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate /* How many page sizes the users can see */
2810Sstevel@tonic-gate uint_t mmu_exported_page_sizes;
2820Sstevel@tonic-gate 
2835349Skchow /* page sizes that legacy applications can see */
2845349Skchow uint_t mmu_legacy_page_sizes;
2855349Skchow 
286423Sdavemq /*
287423Sdavemq  * Number of pages in 1 GB.  Don't enable automatic large pages if we have
288423Sdavemq  * fewer than this many pages.
289423Sdavemq  */
2902991Ssusans pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
2912991Ssusans pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
2922991Ssusans 
2932991Ssusans /*
2942991Ssusans  * Maximum and default segment size tunables for user private
2952991Ssusans  * and shared anon memory, and user text and initialized data.
2962991Ssusans  * These can be patched via /etc/system to allow large pages
2972991Ssusans  * to be used for mapping application private and shared anon memory.
2982991Ssusans  */
2992991Ssusans size_t mcntl0_lpsize = MMU_PAGESIZE;
3002991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE;
3012991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE;
3022991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE;
3032991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE;
3042991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE;
3052991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE;
3062991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE;
3072991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE;
3080Sstevel@tonic-gate 
3095084Sjohnlev 
3105084Sjohnlev /*
3115084Sjohnlev  * initialized by page_coloring_init().
3125084Sjohnlev  */
3135084Sjohnlev uint_t	page_colors;
3145084Sjohnlev uint_t	page_colors_mask;
3155084Sjohnlev uint_t	page_coloring_shift;
3165084Sjohnlev int	cpu_page_colors;
3175084Sjohnlev static uint_t	l2_colors;
3185084Sjohnlev 
3195084Sjohnlev /*
3205084Sjohnlev  * Page freelists and cachelists are dynamically allocated once mnoderangecnt
3215084Sjohnlev  * and page_colors are calculated from the l2 cache n-way set size.  Within a
3225084Sjohnlev  * mnode range, the page freelist and cachelist are hashed into bins based on
3235084Sjohnlev  * color. This makes it easier to search for a page within a specific memory
3245084Sjohnlev  * range.
3255084Sjohnlev  */
3265084Sjohnlev #define	PAGE_COLORS_MIN	16
3275084Sjohnlev 
3285084Sjohnlev page_t ****page_freelists;
3295084Sjohnlev page_t ***page_cachelists;
3305084Sjohnlev 
3315084Sjohnlev 
3325084Sjohnlev /*
3335084Sjohnlev  * Used by page layer to know about page sizes
3345084Sjohnlev  */
3355084Sjohnlev hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
3365084Sjohnlev 
3375084Sjohnlev kmutex_t	*fpc_mutex[NPC_MUTEX];
3385084Sjohnlev kmutex_t	*cpc_mutex[NPC_MUTEX];
3395084Sjohnlev 
3405084Sjohnlev /*
3415084Sjohnlev  * Only let one thread at a time try to coalesce large pages, to
3425084Sjohnlev  * prevent them from working against each other.
3435084Sjohnlev  */
3445084Sjohnlev static kmutex_t	contig_lock;
3455084Sjohnlev #define	CONTIG_LOCK()	mutex_enter(&contig_lock);
3465084Sjohnlev #define	CONTIG_UNLOCK()	mutex_exit(&contig_lock);
3475084Sjohnlev 
3485084Sjohnlev #define	PFN_16M		(mmu_btop((uint64_t)0x1000000))
3495084Sjohnlev 
3500Sstevel@tonic-gate /*
3510Sstevel@tonic-gate  * Return the optimum page size for a given mapping
3520Sstevel@tonic-gate  */
3530Sstevel@tonic-gate /*ARGSUSED*/
3540Sstevel@tonic-gate size_t
3552991Ssusans map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
3560Sstevel@tonic-gate {
3572991Ssusans 	level_t l = 0;
3582991Ssusans 	size_t pgsz = MMU_PAGESIZE;
3592991Ssusans 	size_t max_lpsize;
3602991Ssusans 	uint_t mszc;
3610Sstevel@tonic-gate 
3622991Ssusans 	ASSERT(maptype != MAPPGSZ_VA);
3632991Ssusans 
3642991Ssusans 	if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
3652991Ssusans 		return (MMU_PAGESIZE);
3662991Ssusans 	}
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	switch (maptype) {
3692991Ssusans 	case MAPPGSZ_HEAP:
3700Sstevel@tonic-gate 	case MAPPGSZ_STK:
3712991Ssusans 		max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
3722991Ssusans 		    MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
3732991Ssusans 		if (max_lpsize == MMU_PAGESIZE) {
3742991Ssusans 			return (MMU_PAGESIZE);
3752991Ssusans 		}
3762991Ssusans 		if (len == 0) {
3772991Ssusans 			len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
3782991Ssusans 			    p->p_brksize - p->p_bssbase : p->p_stksize;
3792991Ssusans 		}
3802991Ssusans 		len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
3812991Ssusans 		    default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
3822991Ssusans 
3830Sstevel@tonic-gate 		/*
3840Sstevel@tonic-gate 		 * use the pages size that best fits len
3850Sstevel@tonic-gate 		 */
3865349Skchow 		for (l = mmu.umax_page_level; l > 0; --l) {
3872991Ssusans 			if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
3880Sstevel@tonic-gate 				continue;
3892991Ssusans 			} else {
3902991Ssusans 				pgsz = LEVEL_SIZE(l);
3912991Ssusans 			}
3920Sstevel@tonic-gate 			break;
3930Sstevel@tonic-gate 		}
3942991Ssusans 
3952991Ssusans 		mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
3962991Ssusans 		    p->p_stkpageszc);
3972991Ssusans 		if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
3982991Ssusans 			pgsz = hw_page_array[mszc].hp_size;
3992991Ssusans 		}
4002991Ssusans 		return (pgsz);
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate 	case MAPPGSZ_ISM:
4035349Skchow 		for (l = mmu.umax_page_level; l > 0; --l) {
4045349Skchow 			if (len >= LEVEL_SIZE(l))
4055349Skchow 				return (LEVEL_SIZE(l));
4065349Skchow 		}
4075349Skchow 		return (LEVEL_SIZE(0));
4080Sstevel@tonic-gate 	}
4092991Ssusans 	return (pgsz);
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate 
4122991Ssusans static uint_t
4132991Ssusans map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
4142991Ssusans     size_t min_physmem)
4152991Ssusans {
4162991Ssusans 	caddr_t eaddr = addr + size;
4172991Ssusans 	uint_t szcvec = 0;
4182991Ssusans 	caddr_t raddr;
4192991Ssusans 	caddr_t readdr;
4202991Ssusans 	size_t	pgsz;
4212991Ssusans 	int i;
4222991Ssusans 
4232991Ssusans 	if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
4242991Ssusans 		return (0);
4252991Ssusans 	}
4262991Ssusans 
4275349Skchow 	for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
4282991Ssusans 		pgsz = page_get_pagesize(i);
4292991Ssusans 		if (pgsz > max_lpsize) {
4302991Ssusans 			continue;
4312991Ssusans 		}
4322991Ssusans 		raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
4332991Ssusans 		readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
4342991Ssusans 		if (raddr < addr || raddr >= readdr) {
4352991Ssusans 			continue;
4362991Ssusans 		}
4372991Ssusans 		if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
4382991Ssusans 			continue;
4392991Ssusans 		}
4402991Ssusans 		/*
4412991Ssusans 		 * Set szcvec to the remaining page sizes.
4422991Ssusans 		 */
4432991Ssusans 		szcvec = ((1 << (i + 1)) - 1) & ~1;
4442991Ssusans 		break;
4452991Ssusans 	}
4462991Ssusans 	return (szcvec);
4472991Ssusans }
4480Sstevel@tonic-gate 
4490Sstevel@tonic-gate /*
4500Sstevel@tonic-gate  * Return a bit vector of large page size codes that
4510Sstevel@tonic-gate  * can be used to map [addr, addr + len) region.
4520Sstevel@tonic-gate  */
4530Sstevel@tonic-gate /*ARGSUSED*/
4540Sstevel@tonic-gate uint_t
4552991Ssusans map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
4562991Ssusans     int memcntl)
4570Sstevel@tonic-gate {
4582991Ssusans 	size_t max_lpsize = mcntl0_lpsize;
4590Sstevel@tonic-gate 
4602991Ssusans 	if (mmu.max_page_level == 0)
4610Sstevel@tonic-gate 		return (0);
4620Sstevel@tonic-gate 
4632991Ssusans 	if (flags & MAP_TEXT) {
4645084Sjohnlev 		if (!memcntl)
4655084Sjohnlev 			max_lpsize = max_utext_lpsize;
4665084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4672991Ssusans 		    shm_lpg_min_physmem));
4682991Ssusans 
4692991Ssusans 	} else if (flags & MAP_INITDATA) {
4705084Sjohnlev 		if (!memcntl)
4715084Sjohnlev 			max_lpsize = max_uidata_lpsize;
4725084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4732991Ssusans 		    privm_lpg_min_physmem));
4742991Ssusans 
4752991Ssusans 	} else if (type == MAPPGSZC_SHM) {
4765084Sjohnlev 		if (!memcntl)
4775084Sjohnlev 			max_lpsize = max_shm_lpsize;
4785084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4792991Ssusans 		    shm_lpg_min_physmem));
4800Sstevel@tonic-gate 
4812991Ssusans 	} else if (type == MAPPGSZC_HEAP) {
4825084Sjohnlev 		if (!memcntl)
4835084Sjohnlev 			max_lpsize = max_uheap_lpsize;
4845084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4852991Ssusans 		    privm_lpg_min_physmem));
4862414Saguzovsk 
4872991Ssusans 	} else if (type == MAPPGSZC_STACK) {
4885084Sjohnlev 		if (!memcntl)
4895084Sjohnlev 			max_lpsize = max_ustack_lpsize;
4905084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4912991Ssusans 		    privm_lpg_min_physmem));
4922991Ssusans 
4932991Ssusans 	} else {
4945084Sjohnlev 		if (!memcntl)
4955084Sjohnlev 			max_lpsize = max_privmap_lpsize;
4965084Sjohnlev 		return (map_szcvec(addr, size, off, max_lpsize,
4972991Ssusans 		    privm_lpg_min_physmem));
4982414Saguzovsk 	}
4992414Saguzovsk }
5002414Saguzovsk 
5010Sstevel@tonic-gate /*
5020Sstevel@tonic-gate  * Handle a pagefault.
5030Sstevel@tonic-gate  */
5040Sstevel@tonic-gate faultcode_t
5050Sstevel@tonic-gate pagefault(
5060Sstevel@tonic-gate 	caddr_t addr,
5070Sstevel@tonic-gate 	enum fault_type type,
5080Sstevel@tonic-gate 	enum seg_rw rw,
5090Sstevel@tonic-gate 	int iskernel)
5100Sstevel@tonic-gate {
5110Sstevel@tonic-gate 	struct as *as;
5120Sstevel@tonic-gate 	struct hat *hat;
5130Sstevel@tonic-gate 	struct proc *p;
5140Sstevel@tonic-gate 	kthread_t *t;
5150Sstevel@tonic-gate 	faultcode_t res;
5160Sstevel@tonic-gate 	caddr_t base;
5170Sstevel@tonic-gate 	size_t len;
5180Sstevel@tonic-gate 	int err;
5190Sstevel@tonic-gate 	int mapped_red;
5200Sstevel@tonic-gate 	uintptr_t ea;
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate 	ASSERT_STACK_ALIGNED();
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 	if (INVALID_VADDR(addr))
5250Sstevel@tonic-gate 		return (FC_NOMAP);
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 	mapped_red = segkp_map_red();
5280Sstevel@tonic-gate 
5290Sstevel@tonic-gate 	if (iskernel) {
5300Sstevel@tonic-gate 		as = &kas;
5310Sstevel@tonic-gate 		hat = as->a_hat;
5320Sstevel@tonic-gate 	} else {
5330Sstevel@tonic-gate 		t = curthread;
5340Sstevel@tonic-gate 		p = ttoproc(t);
5350Sstevel@tonic-gate 		as = p->p_as;
5360Sstevel@tonic-gate 		hat = as->a_hat;
5370Sstevel@tonic-gate 	}
5380Sstevel@tonic-gate 
5390Sstevel@tonic-gate 	/*
5400Sstevel@tonic-gate 	 * Dispatch pagefault.
5410Sstevel@tonic-gate 	 */
5420Sstevel@tonic-gate 	res = as_fault(hat, as, addr, 1, type, rw);
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate 	/*
5450Sstevel@tonic-gate 	 * If this isn't a potential unmapped hole in the user's
5460Sstevel@tonic-gate 	 * UNIX data or stack segments, just return status info.
5470Sstevel@tonic-gate 	 */
5480Sstevel@tonic-gate 	if (res != FC_NOMAP || iskernel)
5490Sstevel@tonic-gate 		goto out;
5500Sstevel@tonic-gate 
5510Sstevel@tonic-gate 	/*
5520Sstevel@tonic-gate 	 * Check to see if we happened to faulted on a currently unmapped
5530Sstevel@tonic-gate 	 * part of the UNIX data or stack segments.  If so, create a zfod
5540Sstevel@tonic-gate 	 * mapping there and then try calling the fault routine again.
5550Sstevel@tonic-gate 	 */
5560Sstevel@tonic-gate 	base = p->p_brkbase;
5570Sstevel@tonic-gate 	len = p->p_brksize;
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	if (addr < base || addr >= base + len) {		/* data seg? */
5600Sstevel@tonic-gate 		base = (caddr_t)p->p_usrstack - p->p_stksize;
5610Sstevel@tonic-gate 		len = p->p_stksize;
5620Sstevel@tonic-gate 		if (addr < base || addr >= p->p_usrstack) {	/* stack seg? */
5630Sstevel@tonic-gate 			/* not in either UNIX data or stack segments */
5640Sstevel@tonic-gate 			res = FC_NOMAP;
5650Sstevel@tonic-gate 			goto out;
5660Sstevel@tonic-gate 		}
5670Sstevel@tonic-gate 	}
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	/*
5700Sstevel@tonic-gate 	 * the rest of this function implements a 3.X 4.X 5.X compatibility
5710Sstevel@tonic-gate 	 * This code is probably not needed anymore
5720Sstevel@tonic-gate 	 */
5730Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_ILP32) {
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate 		/* expand the gap to the page boundaries on each side */
5760Sstevel@tonic-gate 		ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
5770Sstevel@tonic-gate 		base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
5780Sstevel@tonic-gate 		len = ea - (uintptr_t)base;
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 		as_rangelock(as);
5810Sstevel@tonic-gate 		if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
5820Sstevel@tonic-gate 		    0) {
5830Sstevel@tonic-gate 			err = as_map(as, base, len, segvn_create, zfod_argsp);
5840Sstevel@tonic-gate 			as_rangeunlock(as);
5850Sstevel@tonic-gate 			if (err) {
5860Sstevel@tonic-gate 				res = FC_MAKE_ERR(err);
5870Sstevel@tonic-gate 				goto out;
5880Sstevel@tonic-gate 			}
5890Sstevel@tonic-gate 		} else {
5900Sstevel@tonic-gate 			/*
5910Sstevel@tonic-gate 			 * This page is already mapped by another thread after
5920Sstevel@tonic-gate 			 * we returned from as_fault() above.  We just fall
5930Sstevel@tonic-gate 			 * through as_fault() below.
5940Sstevel@tonic-gate 			 */
5950Sstevel@tonic-gate 			as_rangeunlock(as);
5960Sstevel@tonic-gate 		}
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate 		res = as_fault(hat, as, addr, 1, F_INVAL, rw);
5990Sstevel@tonic-gate 	}
6000Sstevel@tonic-gate 
6010Sstevel@tonic-gate out:
6020Sstevel@tonic-gate 	if (mapped_red)
6030Sstevel@tonic-gate 		segkp_unmap_red();
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	return (res);
6060Sstevel@tonic-gate }
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate void
6090Sstevel@tonic-gate map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
6100Sstevel@tonic-gate {
6110Sstevel@tonic-gate 	struct proc *p = curproc;
6120Sstevel@tonic-gate 	caddr_t userlimit = (flags & _MAP_LOW32) ?
6130Sstevel@tonic-gate 	    (caddr_t)_userlimit32 : p->p_as->a_userlimit;
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 	map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags);
6160Sstevel@tonic-gate }
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate /*ARGSUSED*/
6190Sstevel@tonic-gate int
6200Sstevel@tonic-gate map_addr_vacalign_check(caddr_t addr, u_offset_t off)
6210Sstevel@tonic-gate {
6220Sstevel@tonic-gate 	return (0);
6230Sstevel@tonic-gate }
6240Sstevel@tonic-gate 
6250Sstevel@tonic-gate /*
6260Sstevel@tonic-gate  * map_addr_proc() is the routine called when the system is to
6270Sstevel@tonic-gate  * choose an address for the user.  We will pick an address
6283446Smrj  * range which is the highest available below userlimit.
6290Sstevel@tonic-gate  *
6305668Smec  * Every mapping will have a redzone of a single page on either side of
6315668Smec  * the request. This is done to leave one page unmapped between segments.
6325668Smec  * This is not required, but it's useful for the user because if their
6335668Smec  * program strays across a segment boundary, it will catch a fault
6345668Smec  * immediately making debugging a little easier.  Currently the redzone
6355668Smec  * is mandatory.
6365668Smec  *
6370Sstevel@tonic-gate  * addrp is a value/result parameter.
6380Sstevel@tonic-gate  *	On input it is a hint from the user to be used in a completely
6390Sstevel@tonic-gate  *	machine dependent fashion.  We decide to completely ignore this hint.
6405668Smec  *	If MAP_ALIGN was specified, addrp contains the minimal alignment, which
6415668Smec  *	must be some "power of two" multiple of pagesize.
6420Sstevel@tonic-gate  *
6430Sstevel@tonic-gate  *	On output it is NULL if no address can be found in the current
6440Sstevel@tonic-gate  *	processes address space or else an address that is currently
6450Sstevel@tonic-gate  *	not mapped for len bytes with a page of red zone on either side.
6460Sstevel@tonic-gate  *
6475668Smec  *	vacalign is not needed on x86 (it's for viturally addressed caches)
6480Sstevel@tonic-gate  */
6490Sstevel@tonic-gate /*ARGSUSED*/
6500Sstevel@tonic-gate void
6510Sstevel@tonic-gate map_addr_proc(
6520Sstevel@tonic-gate 	caddr_t *addrp,
6530Sstevel@tonic-gate 	size_t len,
6540Sstevel@tonic-gate 	offset_t off,
6550Sstevel@tonic-gate 	int vacalign,
6560Sstevel@tonic-gate 	caddr_t userlimit,
6570Sstevel@tonic-gate 	struct proc *p,
6580Sstevel@tonic-gate 	uint_t flags)
6590Sstevel@tonic-gate {
6600Sstevel@tonic-gate 	struct as *as = p->p_as;
6610Sstevel@tonic-gate 	caddr_t addr;
6620Sstevel@tonic-gate 	caddr_t base;
6630Sstevel@tonic-gate 	size_t slen;
6640Sstevel@tonic-gate 	size_t align_amount;
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate 	ASSERT32(userlimit == as->a_userlimit);
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate 	base = p->p_brkbase;
6690Sstevel@tonic-gate #if defined(__amd64)
6700Sstevel@tonic-gate 	/*
6710Sstevel@tonic-gate 	 * XX64 Yes, this needs more work.
6720Sstevel@tonic-gate 	 */
6730Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_NATIVE) {
6740Sstevel@tonic-gate 		if (userlimit < as->a_userlimit) {
6750Sstevel@tonic-gate 			/*
6760Sstevel@tonic-gate 			 * This happens when a program wants to map
6770Sstevel@tonic-gate 			 * something in a range that's accessible to a
6780Sstevel@tonic-gate 			 * program in a smaller address space.  For example,
6790Sstevel@tonic-gate 			 * a 64-bit program calling mmap32(2) to guarantee
6800Sstevel@tonic-gate 			 * that the returned address is below 4Gbytes.
6810Sstevel@tonic-gate 			 */
6820Sstevel@tonic-gate 			ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 			if (userlimit > base)
6850Sstevel@tonic-gate 				slen = userlimit - base;
6860Sstevel@tonic-gate 			else {
6870Sstevel@tonic-gate 				*addrp = NULL;
6880Sstevel@tonic-gate 				return;
6890Sstevel@tonic-gate 			}
6900Sstevel@tonic-gate 		} else {
6910Sstevel@tonic-gate 			/*
6920Sstevel@tonic-gate 			 * XX64 This layout is probably wrong .. but in
6930Sstevel@tonic-gate 			 * the event we make the amd64 address space look
6940Sstevel@tonic-gate 			 * like sparcv9 i.e. with the stack -above- the
6950Sstevel@tonic-gate 			 * heap, this bit of code might even be correct.
6960Sstevel@tonic-gate 			 */
6970Sstevel@tonic-gate 			slen = p->p_usrstack - base -
6980Sstevel@tonic-gate 			    (((size_t)rctl_enforced_value(
6990Sstevel@tonic-gate 			    rctlproc_legacy[RLIMIT_STACK],
7000Sstevel@tonic-gate 			    p->p_rctls, p) + PAGEOFFSET) & PAGEMASK);
7010Sstevel@tonic-gate 		}
7020Sstevel@tonic-gate 	} else
7030Sstevel@tonic-gate #endif
7040Sstevel@tonic-gate 		slen = userlimit - base;
7050Sstevel@tonic-gate 
7065668Smec 	/* Make len be a multiple of PAGESIZE */
7070Sstevel@tonic-gate 	len = (len + PAGEOFFSET) & PAGEMASK;
7080Sstevel@tonic-gate 
7090Sstevel@tonic-gate 	/*
7100Sstevel@tonic-gate 	 * figure out what the alignment should be
7110Sstevel@tonic-gate 	 *
7120Sstevel@tonic-gate 	 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
7130Sstevel@tonic-gate 	 */
7140Sstevel@tonic-gate 	if (len <= ELF_386_MAXPGSZ) {
7150Sstevel@tonic-gate 		/*
7160Sstevel@tonic-gate 		 * Align virtual addresses to ensure that ELF shared libraries
7170Sstevel@tonic-gate 		 * are mapped with the appropriate alignment constraints by
7180Sstevel@tonic-gate 		 * the run-time linker.
7190Sstevel@tonic-gate 		 */
7200Sstevel@tonic-gate 		align_amount = ELF_386_MAXPGSZ;
7210Sstevel@tonic-gate 	} else {
7225349Skchow 		int l = mmu.umax_page_level;
7230Sstevel@tonic-gate 
7240Sstevel@tonic-gate 		while (l && len < LEVEL_SIZE(l))
7250Sstevel@tonic-gate 			--l;
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate 		align_amount = LEVEL_SIZE(l);
7280Sstevel@tonic-gate 	}
7290Sstevel@tonic-gate 
7300Sstevel@tonic-gate 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
7310Sstevel@tonic-gate 		align_amount = (uintptr_t)*addrp;
7320Sstevel@tonic-gate 
7335668Smec 	ASSERT(ISP2(align_amount));
7345668Smec 	ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
7355668Smec 
7365668Smec 	off = off & (align_amount - 1);
7370Sstevel@tonic-gate 	/*
7380Sstevel@tonic-gate 	 * Look for a large enough hole starting below userlimit.
7395668Smec 	 * After finding it, use the upper part.
7400Sstevel@tonic-gate 	 */
7415668Smec 	if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
7425668Smec 	    PAGESIZE, off) == 0) {
7430Sstevel@tonic-gate 		caddr_t as_addr;
7440Sstevel@tonic-gate 
7455668Smec 		/*
7465668Smec 		 * addr is the highest possible address to use since we have
7475668Smec 		 * a PAGESIZE redzone at the beginning and end.
7485668Smec 		 */
7495668Smec 		addr = base + slen - (PAGESIZE + len);
7500Sstevel@tonic-gate 		as_addr = addr;
7510Sstevel@tonic-gate 		/*
7525668Smec 		 * Round address DOWN to the alignment amount and
7535668Smec 		 * add the offset in.
7545668Smec 		 * If addr is greater than as_addr, len would not be large
7555668Smec 		 * enough to include the redzone, so we must adjust down
7565668Smec 		 * by the alignment amount.
7570Sstevel@tonic-gate 		 */
7580Sstevel@tonic-gate 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
7595668Smec 		addr += (uintptr_t)off;
7605668Smec 		if (addr > as_addr) {
7615668Smec 			addr -= align_amount;
7625668Smec 		}
7635668Smec 
7645668Smec 		ASSERT(addr > base);
7655668Smec 		ASSERT(addr + len < base + slen);
7660Sstevel@tonic-gate 		ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
7675668Smec 		    ((uintptr_t)(off)));
7680Sstevel@tonic-gate 		*addrp = addr;
7690Sstevel@tonic-gate 	} else {
7700Sstevel@tonic-gate 		*addrp = NULL;	/* no more virtual space */
7710Sstevel@tonic-gate 	}
7720Sstevel@tonic-gate }
7730Sstevel@tonic-gate 
7745668Smec int valid_va_range_aligned_wraparound;
7755668Smec 
7760Sstevel@tonic-gate /*
7775668Smec  * Determine whether [*basep, *basep + *lenp) contains a mappable range of
7785668Smec  * addresses at least "minlen" long, where the base of the range is at "off"
7795668Smec  * phase from an "align" boundary and there is space for a "redzone"-sized
7805668Smec  * redzone on either side of the range.  On success, 1 is returned and *basep
7815668Smec  * and *lenp are adjusted to describe the acceptable range (including
7825668Smec  * the redzone).  On failure, 0 is returned.
7830Sstevel@tonic-gate  */
7840Sstevel@tonic-gate /*ARGSUSED3*/
7850Sstevel@tonic-gate int
7865668Smec valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
7875668Smec     size_t align, size_t redzone, size_t off)
7880Sstevel@tonic-gate {
7890Sstevel@tonic-gate 	uintptr_t hi, lo;
7905668Smec 	size_t tot_len;
7915668Smec 
7925668Smec 	ASSERT(align == 0 ? off == 0 : off < align);
7935668Smec 	ASSERT(ISP2(align));
7945668Smec 	ASSERT(align == 0 || align >= PAGESIZE);
7950Sstevel@tonic-gate 
7960Sstevel@tonic-gate 	lo = (uintptr_t)*basep;
7970Sstevel@tonic-gate 	hi = lo + *lenp;
7985668Smec 	tot_len = minlen + 2 * redzone; /* need at least this much space */
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate 	/*
8010Sstevel@tonic-gate 	 * If hi rolled over the top, try cutting back.
8020Sstevel@tonic-gate 	 */
8030Sstevel@tonic-gate 	if (hi < lo) {
8045668Smec 		*lenp = 0UL - lo - 1UL;
8055668Smec 		/* See if this really happens. If so, then we figure out why */
8065668Smec 		valid_va_range_aligned_wraparound++;
8075668Smec 		hi = lo + *lenp;
8085668Smec 	}
8095668Smec 	if (*lenp < tot_len) {
8100Sstevel@tonic-gate 		return (0);
8110Sstevel@tonic-gate 	}
8125668Smec 
8130Sstevel@tonic-gate #if defined(__amd64)
8140Sstevel@tonic-gate 	/*
8150Sstevel@tonic-gate 	 * Deal with a possible hole in the address range between
8160Sstevel@tonic-gate 	 * hole_start and hole_end that should never be mapped.
8170Sstevel@tonic-gate 	 */
8180Sstevel@tonic-gate 	if (lo < hole_start) {
8190Sstevel@tonic-gate 		if (hi > hole_start) {
8200Sstevel@tonic-gate 			if (hi < hole_end) {
8210Sstevel@tonic-gate 				hi = hole_start;
8220Sstevel@tonic-gate 			} else {
8230Sstevel@tonic-gate 				/* lo < hole_start && hi >= hole_end */
8240Sstevel@tonic-gate 				if (dir == AH_LO) {
8250Sstevel@tonic-gate 					/*
8260Sstevel@tonic-gate 					 * prefer lowest range
8270Sstevel@tonic-gate 					 */
8285668Smec 					if (hole_start - lo >= tot_len)
8290Sstevel@tonic-gate 						hi = hole_start;
8305668Smec 					else if (hi - hole_end >= tot_len)
8310Sstevel@tonic-gate 						lo = hole_end;
8320Sstevel@tonic-gate 					else
8330Sstevel@tonic-gate 						return (0);
8340Sstevel@tonic-gate 				} else {
8350Sstevel@tonic-gate 					/*
8360Sstevel@tonic-gate 					 * prefer highest range
8370Sstevel@tonic-gate 					 */
8385668Smec 					if (hi - hole_end >= tot_len)
8390Sstevel@tonic-gate 						lo = hole_end;
8405668Smec 					else if (hole_start - lo >= tot_len)
8410Sstevel@tonic-gate 						hi = hole_start;
8420Sstevel@tonic-gate 					else
8430Sstevel@tonic-gate 						return (0);
8440Sstevel@tonic-gate 				}
8450Sstevel@tonic-gate 			}
8460Sstevel@tonic-gate 		}
8470Sstevel@tonic-gate 	} else {
8480Sstevel@tonic-gate 		/* lo >= hole_start */
8490Sstevel@tonic-gate 		if (hi < hole_end)
8500Sstevel@tonic-gate 			return (0);
8510Sstevel@tonic-gate 		if (lo < hole_end)
8520Sstevel@tonic-gate 			lo = hole_end;
8530Sstevel@tonic-gate 	}
8545668Smec #endif
8555668Smec 
8565668Smec 	if (hi - lo < tot_len)
8570Sstevel@tonic-gate 		return (0);
8580Sstevel@tonic-gate 
8595668Smec 	if (align > 1) {
8605668Smec 		uintptr_t tlo = lo + redzone;
8615668Smec 		uintptr_t thi = hi - redzone;
8625668Smec 		tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
8635668Smec 		if (tlo < lo + redzone) {
8645668Smec 			return (0);
8655668Smec 		}
8665668Smec 		if (thi < tlo || thi - tlo < minlen) {
8675668Smec 			return (0);
8685668Smec 		}
8695668Smec 	}
8705668Smec 
8710Sstevel@tonic-gate 	*basep = (caddr_t)lo;
8720Sstevel@tonic-gate 	*lenp = hi - lo;
8730Sstevel@tonic-gate 	return (1);
8740Sstevel@tonic-gate }
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate /*
8775668Smec  * Determine whether [*basep, *basep + *lenp) contains a mappable range of
8785668Smec  * addresses at least "minlen" long.  On success, 1 is returned and *basep
8795668Smec  * and *lenp are adjusted to describe the acceptable range.  On failure, 0
8805668Smec  * is returned.
8815668Smec  */
8825668Smec int
8835668Smec valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
8845668Smec {
8855668Smec 	return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
8865668Smec }
8875668Smec 
8885668Smec /*
8890Sstevel@tonic-gate  * Determine whether [addr, addr+len] are valid user addresses.
8900Sstevel@tonic-gate  */
8910Sstevel@tonic-gate /*ARGSUSED*/
8920Sstevel@tonic-gate int
8930Sstevel@tonic-gate valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
8940Sstevel@tonic-gate     caddr_t userlimit)
8950Sstevel@tonic-gate {
8960Sstevel@tonic-gate 	caddr_t eaddr = addr + len;
8970Sstevel@tonic-gate 
8980Sstevel@tonic-gate 	if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
8990Sstevel@tonic-gate 		return (RANGE_BADADDR);
9000Sstevel@tonic-gate 
9010Sstevel@tonic-gate #if defined(__amd64)
9020Sstevel@tonic-gate 	/*
9030Sstevel@tonic-gate 	 * Check for the VA hole
9040Sstevel@tonic-gate 	 */
9050Sstevel@tonic-gate 	if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
9060Sstevel@tonic-gate 		return (RANGE_BADADDR);
9070Sstevel@tonic-gate #endif
9080Sstevel@tonic-gate 
9090Sstevel@tonic-gate 	return (RANGE_OKAY);
9100Sstevel@tonic-gate }
9110Sstevel@tonic-gate 
9120Sstevel@tonic-gate /*
9130Sstevel@tonic-gate  * Return 1 if the page frame is onboard memory, else 0.
9140Sstevel@tonic-gate  */
9150Sstevel@tonic-gate int
9160Sstevel@tonic-gate pf_is_memory(pfn_t pf)
9170Sstevel@tonic-gate {
9183446Smrj 	if (pfn_is_foreign(pf))
9193446Smrj 		return (0);
9203446Smrj 	return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
9210Sstevel@tonic-gate }
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate /*
9240Sstevel@tonic-gate  * return the memrange containing pfn
9250Sstevel@tonic-gate  */
9260Sstevel@tonic-gate int
9270Sstevel@tonic-gate memrange_num(pfn_t pfn)
9280Sstevel@tonic-gate {
9290Sstevel@tonic-gate 	int n;
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	for (n = 0; n < nranges - 1; ++n) {
9320Sstevel@tonic-gate 		if (pfn >= memranges[n])
9330Sstevel@tonic-gate 			break;
9340Sstevel@tonic-gate 	}
9350Sstevel@tonic-gate 	return (n);
9360Sstevel@tonic-gate }
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate /*
9390Sstevel@tonic-gate  * return the mnoderange containing pfn
9400Sstevel@tonic-gate  */
9415084Sjohnlev /*ARGSUSED*/
9420Sstevel@tonic-gate int
9430Sstevel@tonic-gate pfn_2_mtype(pfn_t pfn)
9440Sstevel@tonic-gate {
9455084Sjohnlev #if defined(__xpv)
9465084Sjohnlev 	return (0);
9475084Sjohnlev #else
9480Sstevel@tonic-gate 	int	n;
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate 	for (n = mnoderangecnt - 1; n >= 0; n--) {
9510Sstevel@tonic-gate 		if (pfn >= mnoderanges[n].mnr_pfnlo) {
9520Sstevel@tonic-gate 			break;
9530Sstevel@tonic-gate 		}
9540Sstevel@tonic-gate 	}
9550Sstevel@tonic-gate 	return (n);
9565084Sjohnlev #endif
9570Sstevel@tonic-gate }
9580Sstevel@tonic-gate 
9595084Sjohnlev #if !defined(__xpv)
9600Sstevel@tonic-gate /*
9610Sstevel@tonic-gate  * is_contigpage_free:
9620Sstevel@tonic-gate  *	returns a page list of contiguous pages. It minimally has to return
9630Sstevel@tonic-gate  *	minctg pages. Caller determines minctg based on the scatter-gather
9640Sstevel@tonic-gate  *	list length.
9650Sstevel@tonic-gate  *
9660Sstevel@tonic-gate  *	pfnp is set to the next page frame to search on return.
9670Sstevel@tonic-gate  */
9680Sstevel@tonic-gate static page_t *
9690Sstevel@tonic-gate is_contigpage_free(
9700Sstevel@tonic-gate 	pfn_t *pfnp,
9710Sstevel@tonic-gate 	pgcnt_t *pgcnt,
9720Sstevel@tonic-gate 	pgcnt_t minctg,
9730Sstevel@tonic-gate 	uint64_t pfnseg,
9740Sstevel@tonic-gate 	int iolock)
9750Sstevel@tonic-gate {
9760Sstevel@tonic-gate 	int	i = 0;
9770Sstevel@tonic-gate 	pfn_t	pfn = *pfnp;
9780Sstevel@tonic-gate 	page_t	*pp;
9790Sstevel@tonic-gate 	page_t	*plist = NULL;
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate 	/*
9820Sstevel@tonic-gate 	 * fail if pfn + minctg crosses a segment boundary.
9830Sstevel@tonic-gate 	 * Adjust for next starting pfn to begin at segment boundary.
9840Sstevel@tonic-gate 	 */
9850Sstevel@tonic-gate 
9860Sstevel@tonic-gate 	if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
9870Sstevel@tonic-gate 		*pfnp = roundup(*pfnp, pfnseg + 1);
9880Sstevel@tonic-gate 		return (NULL);
9890Sstevel@tonic-gate 	}
9900Sstevel@tonic-gate 
9910Sstevel@tonic-gate 	do {
9920Sstevel@tonic-gate retry:
9930Sstevel@tonic-gate 		pp = page_numtopp_nolock(pfn + i);
9940Sstevel@tonic-gate 		if ((pp == NULL) ||
9950Sstevel@tonic-gate 		    (page_trylock(pp, SE_EXCL) == 0)) {
9960Sstevel@tonic-gate 			(*pfnp)++;
9970Sstevel@tonic-gate 			break;
9980Sstevel@tonic-gate 		}
9990Sstevel@tonic-gate 		if (page_pptonum(pp) != pfn + i) {
10000Sstevel@tonic-gate 			page_unlock(pp);
10010Sstevel@tonic-gate 			goto retry;
10020Sstevel@tonic-gate 		}
10030Sstevel@tonic-gate 
10040Sstevel@tonic-gate 		if (!(PP_ISFREE(pp))) {
10050Sstevel@tonic-gate 			page_unlock(pp);
10060Sstevel@tonic-gate 			(*pfnp)++;
10070Sstevel@tonic-gate 			break;
10080Sstevel@tonic-gate 		}
10090Sstevel@tonic-gate 
10100Sstevel@tonic-gate 		if (!PP_ISAGED(pp)) {
10110Sstevel@tonic-gate 			page_list_sub(pp, PG_CACHE_LIST);
10120Sstevel@tonic-gate 			page_hashout(pp, (kmutex_t *)NULL);
10130Sstevel@tonic-gate 		} else {
10140Sstevel@tonic-gate 			page_list_sub(pp, PG_FREE_LIST);
10150Sstevel@tonic-gate 		}
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate 		if (iolock)
10180Sstevel@tonic-gate 			page_io_lock(pp);
10190Sstevel@tonic-gate 		page_list_concat(&plist, &pp);
10200Sstevel@tonic-gate 
10210Sstevel@tonic-gate 		/*
10220Sstevel@tonic-gate 		 * exit loop when pgcnt satisfied or segment boundary reached.
10230Sstevel@tonic-gate 		 */
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	} while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
10260Sstevel@tonic-gate 
10270Sstevel@tonic-gate 	*pfnp += i;		/* set to next pfn to search */
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate 	if (i >= minctg) {
10300Sstevel@tonic-gate 		*pgcnt -= i;
10310Sstevel@tonic-gate 		return (plist);
10320Sstevel@tonic-gate 	}
10330Sstevel@tonic-gate 
10340Sstevel@tonic-gate 	/*
10350Sstevel@tonic-gate 	 * failure: minctg not satisfied.
10360Sstevel@tonic-gate 	 *
10370Sstevel@tonic-gate 	 * if next request crosses segment boundary, set next pfn
10380Sstevel@tonic-gate 	 * to search from the segment boundary.
10390Sstevel@tonic-gate 	 */
10400Sstevel@tonic-gate 	if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
10410Sstevel@tonic-gate 		*pfnp = roundup(*pfnp, pfnseg + 1);
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate 	/* clean up any pages already allocated */
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate 	while (plist) {
10460Sstevel@tonic-gate 		pp = plist;
10470Sstevel@tonic-gate 		page_sub(&plist, pp);
10480Sstevel@tonic-gate 		page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
10490Sstevel@tonic-gate 		if (iolock)
10500Sstevel@tonic-gate 			page_io_unlock(pp);
10510Sstevel@tonic-gate 		page_unlock(pp);
10520Sstevel@tonic-gate 	}
10530Sstevel@tonic-gate 
10540Sstevel@tonic-gate 	return (NULL);
10550Sstevel@tonic-gate }
10565084Sjohnlev #endif	/* !__xpv */
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate /*
10590Sstevel@tonic-gate  * verify that pages being returned from allocator have correct DMA attribute
10600Sstevel@tonic-gate  */
10610Sstevel@tonic-gate #ifndef DEBUG
10620Sstevel@tonic-gate #define	check_dma(a, b, c) (0)
10630Sstevel@tonic-gate #else
10640Sstevel@tonic-gate static void
10650Sstevel@tonic-gate check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
10660Sstevel@tonic-gate {
10670Sstevel@tonic-gate 	if (dma_attr == NULL)
10680Sstevel@tonic-gate 		return;
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 	while (cnt-- > 0) {
10713446Smrj 		if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
10720Sstevel@tonic-gate 		    dma_attr->dma_attr_addr_lo)
10737240Srh87107 			panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
10743446Smrj 		if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
10750Sstevel@tonic-gate 		    dma_attr->dma_attr_addr_hi)
10767240Srh87107 			panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
10770Sstevel@tonic-gate 		pp = pp->p_next;
10780Sstevel@tonic-gate 	}
10790Sstevel@tonic-gate }
10800Sstevel@tonic-gate #endif
10810Sstevel@tonic-gate 
10825084Sjohnlev #if !defined(__xpv)
10830Sstevel@tonic-gate static page_t *
10840Sstevel@tonic-gate page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
10850Sstevel@tonic-gate {
10860Sstevel@tonic-gate 	pfn_t		pfn;
10870Sstevel@tonic-gate 	int		sgllen;
10880Sstevel@tonic-gate 	uint64_t	pfnseg;
10890Sstevel@tonic-gate 	pgcnt_t		minctg;
10900Sstevel@tonic-gate 	page_t		*pplist = NULL, *plist;
10910Sstevel@tonic-gate 	uint64_t	lo, hi;
10920Sstevel@tonic-gate 	pgcnt_t		pfnalign = 0;
10930Sstevel@tonic-gate 	static pfn_t	startpfn;
10940Sstevel@tonic-gate 	static pgcnt_t	lastctgcnt;
10950Sstevel@tonic-gate 	uintptr_t	align;
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 	CONTIG_LOCK();
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	if (mattr) {
11000Sstevel@tonic-gate 		lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
11010Sstevel@tonic-gate 		hi = mmu_btop(mattr->dma_attr_addr_hi);
11020Sstevel@tonic-gate 		if (hi >= physmax)
11030Sstevel@tonic-gate 			hi = physmax - 1;
11040Sstevel@tonic-gate 		sgllen = mattr->dma_attr_sgllen;
11050Sstevel@tonic-gate 		pfnseg = mmu_btop(mattr->dma_attr_seg);
11060Sstevel@tonic-gate 
11070Sstevel@tonic-gate 		align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
11080Sstevel@tonic-gate 		if (align > MMU_PAGESIZE)
11090Sstevel@tonic-gate 			pfnalign = mmu_btop(align);
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 		/*
11120Sstevel@tonic-gate 		 * in order to satisfy the request, must minimally
11130Sstevel@tonic-gate 		 * acquire minctg contiguous pages
11140Sstevel@tonic-gate 		 */
11150Sstevel@tonic-gate 		minctg = howmany(*pgcnt, sgllen);
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate 		ASSERT(hi >= lo);
11180Sstevel@tonic-gate 
11190Sstevel@tonic-gate 		/*
11200Sstevel@tonic-gate 		 * start from where last searched if the minctg >= lastctgcnt
11210Sstevel@tonic-gate 		 */
11220Sstevel@tonic-gate 		if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
11230Sstevel@tonic-gate 			startpfn = lo;
11240Sstevel@tonic-gate 	} else {
11250Sstevel@tonic-gate 		hi = physmax - 1;
11260Sstevel@tonic-gate 		lo = 0;
11270Sstevel@tonic-gate 		sgllen = 1;
11280Sstevel@tonic-gate 		pfnseg = mmu.highest_pfn;
11290Sstevel@tonic-gate 		minctg = *pgcnt;
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 		if (minctg < lastctgcnt)
11320Sstevel@tonic-gate 			startpfn = lo;
11330Sstevel@tonic-gate 	}
11340Sstevel@tonic-gate 	lastctgcnt = minctg;
11350Sstevel@tonic-gate 
11360Sstevel@tonic-gate 	ASSERT(pfnseg + 1 >= (uint64_t)minctg);
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 	/* conserve 16m memory - start search above 16m when possible */
11390Sstevel@tonic-gate 	if (hi > PFN_16M && startpfn < PFN_16M)
11400Sstevel@tonic-gate 		startpfn = PFN_16M;
11410Sstevel@tonic-gate 
11420Sstevel@tonic-gate 	pfn = startpfn;
11430Sstevel@tonic-gate 	if (pfnalign)
11440Sstevel@tonic-gate 		pfn = P2ROUNDUP(pfn, pfnalign);
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 	while (pfn + minctg - 1 <= hi) {
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 		plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
11490Sstevel@tonic-gate 		if (plist) {
11500Sstevel@tonic-gate 			page_list_concat(&pplist, &plist);
11510Sstevel@tonic-gate 			sgllen--;
11520Sstevel@tonic-gate 			/*
11530Sstevel@tonic-gate 			 * return when contig pages no longer needed
11540Sstevel@tonic-gate 			 */
11550Sstevel@tonic-gate 			if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
11560Sstevel@tonic-gate 				startpfn = pfn;
11570Sstevel@tonic-gate 				CONTIG_UNLOCK();
11580Sstevel@tonic-gate 				check_dma(mattr, pplist, *pgcnt);
11590Sstevel@tonic-gate 				return (pplist);
11600Sstevel@tonic-gate 			}
11610Sstevel@tonic-gate 			minctg = howmany(*pgcnt, sgllen);
11620Sstevel@tonic-gate 		}
11630Sstevel@tonic-gate 		if (pfnalign)
11640Sstevel@tonic-gate 			pfn = P2ROUNDUP(pfn, pfnalign);
11650Sstevel@tonic-gate 	}
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 	/* cannot find contig pages in specified range */
11680Sstevel@tonic-gate 	if (startpfn == lo) {
11690Sstevel@tonic-gate 		CONTIG_UNLOCK();
11700Sstevel@tonic-gate 		return (NULL);
11710Sstevel@tonic-gate 	}
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 	/* did not start with lo previously */
11740Sstevel@tonic-gate 	pfn = lo;
11750Sstevel@tonic-gate 	if (pfnalign)
11760Sstevel@tonic-gate 		pfn = P2ROUNDUP(pfn, pfnalign);
11770Sstevel@tonic-gate 
11780Sstevel@tonic-gate 	/* allow search to go above startpfn */
11790Sstevel@tonic-gate 	while (pfn < startpfn) {
11800Sstevel@tonic-gate 
11810Sstevel@tonic-gate 		plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
11820Sstevel@tonic-gate 		if (plist != NULL) {
11830Sstevel@tonic-gate 
11840Sstevel@tonic-gate 			page_list_concat(&pplist, &plist);
11850Sstevel@tonic-gate 			sgllen--;
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 			/*
11880Sstevel@tonic-gate 			 * return when contig pages no longer needed
11890Sstevel@tonic-gate 			 */
11900Sstevel@tonic-gate 			if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
11910Sstevel@tonic-gate 				startpfn = pfn;
11920Sstevel@tonic-gate 				CONTIG_UNLOCK();
11930Sstevel@tonic-gate 				check_dma(mattr, pplist, *pgcnt);
11940Sstevel@tonic-gate 				return (pplist);
11950Sstevel@tonic-gate 			}
11960Sstevel@tonic-gate 			minctg = howmany(*pgcnt, sgllen);
11970Sstevel@tonic-gate 		}
11980Sstevel@tonic-gate 		if (pfnalign)
11990Sstevel@tonic-gate 			pfn = P2ROUNDUP(pfn, pfnalign);
12000Sstevel@tonic-gate 	}
12010Sstevel@tonic-gate 	CONTIG_UNLOCK();
12020Sstevel@tonic-gate 	return (NULL);
12030Sstevel@tonic-gate }
12045084Sjohnlev #endif	/* !__xpv */
12050Sstevel@tonic-gate 
12060Sstevel@tonic-gate /*
12070Sstevel@tonic-gate  * mnode_range_cnt() calculates the number of memory ranges for mnode and
12080Sstevel@tonic-gate  * memranges[]. Used to determine the size of page lists and mnoderanges.
12090Sstevel@tonic-gate  */
12100Sstevel@tonic-gate int
12112961Sdp78419 mnode_range_cnt(int mnode)
12120Sstevel@tonic-gate {
12135084Sjohnlev #if defined(__xpv)
12145084Sjohnlev 	ASSERT(mnode == 0);
12155084Sjohnlev 	return (1);
12165084Sjohnlev #else	/* __xpv */
12170Sstevel@tonic-gate 	int	mri;
12180Sstevel@tonic-gate 	int	mnrcnt = 0;
12190Sstevel@tonic-gate 
12202961Sdp78419 	if (mem_node_config[mnode].exists != 0) {
12210Sstevel@tonic-gate 		mri = nranges - 1;
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 		/* find the memranges index below contained in mnode range */
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 		while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
12260Sstevel@tonic-gate 			mri--;
12270Sstevel@tonic-gate 
12280Sstevel@tonic-gate 		/*
12290Sstevel@tonic-gate 		 * increment mnode range counter when memranges or mnode
12300Sstevel@tonic-gate 		 * boundary is reached.
12310Sstevel@tonic-gate 		 */
12320Sstevel@tonic-gate 		while (mri >= 0 &&
12330Sstevel@tonic-gate 		    mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
12340Sstevel@tonic-gate 			mnrcnt++;
12350Sstevel@tonic-gate 			if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
12360Sstevel@tonic-gate 				mri--;
12370Sstevel@tonic-gate 			else
12380Sstevel@tonic-gate 				break;
12390Sstevel@tonic-gate 		}
12400Sstevel@tonic-gate 	}
12412961Sdp78419 	ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
12420Sstevel@tonic-gate 	return (mnrcnt);
12435084Sjohnlev #endif	/* __xpv */
12440Sstevel@tonic-gate }
12450Sstevel@tonic-gate 
12465084Sjohnlev /*
12475084Sjohnlev  * mnode_range_setup() initializes mnoderanges.
12485084Sjohnlev  */
12490Sstevel@tonic-gate void
12500Sstevel@tonic-gate mnode_range_setup(mnoderange_t *mnoderanges)
12510Sstevel@tonic-gate {
12520Sstevel@tonic-gate 	int	mnode, mri;
12530Sstevel@tonic-gate 
12540Sstevel@tonic-gate 	for (mnode = 0; mnode < max_mem_nodes; mnode++) {
12550Sstevel@tonic-gate 		if (mem_node_config[mnode].exists == 0)
12560Sstevel@tonic-gate 			continue;
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate 		mri = nranges - 1;
12590Sstevel@tonic-gate 
12600Sstevel@tonic-gate 		while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
12610Sstevel@tonic-gate 			mri--;
12620Sstevel@tonic-gate 
12630Sstevel@tonic-gate 		while (mri >= 0 && mem_node_config[mnode].physmax >=
12640Sstevel@tonic-gate 		    MEMRANGELO(mri)) {
12655084Sjohnlev 			mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
12665084Sjohnlev 			    mem_node_config[mnode].physbase);
12675084Sjohnlev 			mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
12685084Sjohnlev 			    mem_node_config[mnode].physmax);
12690Sstevel@tonic-gate 			mnoderanges->mnr_mnode = mnode;
12700Sstevel@tonic-gate 			mnoderanges->mnr_memrange = mri;
12710Sstevel@tonic-gate 			mnoderanges++;
12720Sstevel@tonic-gate 			if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
12730Sstevel@tonic-gate 				mri--;
12740Sstevel@tonic-gate 			else
12750Sstevel@tonic-gate 				break;
12760Sstevel@tonic-gate 		}
12770Sstevel@tonic-gate 	}
12780Sstevel@tonic-gate }
12790Sstevel@tonic-gate 
12805084Sjohnlev /*ARGSUSED*/
12815084Sjohnlev int
12825084Sjohnlev mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
12835084Sjohnlev {
12845084Sjohnlev 	int mtype = mnoderangecnt - 1;
12855084Sjohnlev 
12865084Sjohnlev #if !defined(__xpv)
12875084Sjohnlev #if defined(__i386)
12885084Sjohnlev 	/*
12895084Sjohnlev 	 * set the mtype range
12905084Sjohnlev 	 * - kmem requests needs to be below 4g if restricted_kmemalloc is set.
12915084Sjohnlev 	 * - for non kmem requests, set range to above 4g if memory below 4g
12925084Sjohnlev 	 * runs low.
12935084Sjohnlev 	 */
12945084Sjohnlev 	if (restricted_kmemalloc && VN_ISKAS(vp) &&
12955084Sjohnlev 	    (caddr_t)(vaddr) >= kernelheap &&
12965084Sjohnlev 	    (caddr_t)(vaddr) < ekernelheap) {
12975084Sjohnlev 		ASSERT(physmax4g);
12985084Sjohnlev 		mtype = mtype4g;
12995084Sjohnlev 		if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
13005084Sjohnlev 		    btop(pgsz), *flags)) {
13015084Sjohnlev 			*flags |= PGI_MT_RANGE16M;
13025084Sjohnlev 		} else {
13035084Sjohnlev 			VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
13045084Sjohnlev 			VM_STAT_COND_ADD((*flags & PG_PANIC),
13055084Sjohnlev 			    vmm_vmstats.pgpanicalloc);
13065084Sjohnlev 			*flags |= PGI_MT_RANGE0;
13075084Sjohnlev 		}
13085084Sjohnlev 		return (mtype);
13095084Sjohnlev 	}
13105084Sjohnlev #endif	/* __i386 */
13115084Sjohnlev 
13125084Sjohnlev 	if (RESTRICT4G_ALLOC) {
13135084Sjohnlev 		VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
13145084Sjohnlev 		/* here only for > 4g systems */
13155084Sjohnlev 		*flags |= PGI_MT_RANGE4G;
13165084Sjohnlev 	} else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
13175084Sjohnlev 		*flags |= PGI_MT_RANGE16M;
13185084Sjohnlev 	} else {
13195084Sjohnlev 		VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
13205084Sjohnlev 		VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
13215084Sjohnlev 		*flags |= PGI_MT_RANGE0;
13225084Sjohnlev 	}
13235084Sjohnlev #endif /* !__xpv */
13245084Sjohnlev 	return (mtype);
13255084Sjohnlev }
13265084Sjohnlev 
13275084Sjohnlev 
13285084Sjohnlev /* mtype init for page_get_replacement_page */
13295084Sjohnlev /*ARGSUSED*/
13305084Sjohnlev int
13315084Sjohnlev mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
13325084Sjohnlev {
13335084Sjohnlev 	int mtype = mnoderangecnt - 1;
13345084Sjohnlev #if !defined(__ixpv)
13355084Sjohnlev 	if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
13365084Sjohnlev 		*flags |= PGI_MT_RANGE16M;
13375084Sjohnlev 	} else {
13385084Sjohnlev 		VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
13395084Sjohnlev 		*flags |= PGI_MT_RANGE0;
13405084Sjohnlev 	}
13415084Sjohnlev #endif
13425084Sjohnlev 	return (mtype);
13435084Sjohnlev }
13445084Sjohnlev 
13450Sstevel@tonic-gate /*
13460Sstevel@tonic-gate  * Determine if the mnode range specified in mtype contains memory belonging
13470Sstevel@tonic-gate  * to memory node mnode.  If flags & PGI_MT_RANGE is set then mtype contains
13481385Skchow  * the range of indices from high pfn to 0, 16m or 4g.
13490Sstevel@tonic-gate  *
13500Sstevel@tonic-gate  * Return first mnode range type index found otherwise return -1 if none found.
13510Sstevel@tonic-gate  */
13520Sstevel@tonic-gate int
13530Sstevel@tonic-gate mtype_func(int mnode, int mtype, uint_t flags)
13540Sstevel@tonic-gate {
13550Sstevel@tonic-gate 	if (flags & PGI_MT_RANGE) {
13565084Sjohnlev 		int	mtlim = 0;
13570Sstevel@tonic-gate 
13580Sstevel@tonic-gate 		if (flags & PGI_MT_NEXT)
13590Sstevel@tonic-gate 			mtype--;
13605084Sjohnlev 		if (flags & PGI_MT_RANGE4G)
13611385Skchow 			mtlim = mtype4g + 1;	/* exclude 0-4g range */
13621385Skchow 		else if (flags & PGI_MT_RANGE16M)
13631385Skchow 			mtlim = 1;		/* exclude 0-16m range */
13640Sstevel@tonic-gate 		while (mtype >= mtlim) {
13650Sstevel@tonic-gate 			if (mnoderanges[mtype].mnr_mnode == mnode)
13660Sstevel@tonic-gate 				return (mtype);
13670Sstevel@tonic-gate 			mtype--;
13680Sstevel@tonic-gate 		}
13695084Sjohnlev 	} else if (mnoderanges[mtype].mnr_mnode == mnode) {
13705084Sjohnlev 		return (mtype);
13710Sstevel@tonic-gate 	}
13720Sstevel@tonic-gate 	return (-1);
13730Sstevel@tonic-gate }
13740Sstevel@tonic-gate 
13750Sstevel@tonic-gate /*
13761373Skchow  * Update the page list max counts with the pfn range specified by the
13771373Skchow  * input parameters.  Called from add_physmem() when physical memory with
13781373Skchow  * page_t's are initially added to the page lists.
13791373Skchow  */
13801373Skchow void
13811373Skchow mtype_modify_max(pfn_t startpfn, long cnt)
13821373Skchow {
13831373Skchow 	int	mtype = 0;
13841373Skchow 	pfn_t	endpfn = startpfn + cnt, pfn;
13851373Skchow 	pgcnt_t	inc;
13861373Skchow 
13871373Skchow 	ASSERT(cnt > 0);
13881373Skchow 
13895084Sjohnlev 	if (!physmax4g)
13905084Sjohnlev 		return;
13915084Sjohnlev 
13921373Skchow 	for (pfn = startpfn; pfn < endpfn; ) {
13931373Skchow 		if (pfn <= mnoderanges[mtype].mnr_pfnhi) {
13941373Skchow 			if (endpfn < mnoderanges[mtype].mnr_pfnhi) {
13951373Skchow 				inc = endpfn - pfn;
13961373Skchow 			} else {
13971373Skchow 				inc = mnoderanges[mtype].mnr_pfnhi - pfn + 1;
13981373Skchow 			}
13995084Sjohnlev 			if (mtype <= mtype4g)
14001373Skchow 				maxmem4g += inc;
14011373Skchow 			pfn += inc;
14021373Skchow 		}
14031373Skchow 		mtype++;
14041373Skchow 		ASSERT(mtype < mnoderangecnt || pfn >= endpfn);
14051373Skchow 	}
14061373Skchow }
14071373Skchow 
14085084Sjohnlev int
14095084Sjohnlev mtype_2_mrange(int mtype)
14105084Sjohnlev {
14115084Sjohnlev 	return (mnoderanges[mtype].mnr_memrange);
14125084Sjohnlev }
14135084Sjohnlev 
14145084Sjohnlev void
14155084Sjohnlev mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
14165084Sjohnlev {
14175084Sjohnlev 	ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
14185084Sjohnlev 	*pfnlo = mnoderanges[mtype].mnr_pfnlo;
14195084Sjohnlev 	*pfnhi = mnoderanges[mtype].mnr_pfnhi;
14205084Sjohnlev }
14215084Sjohnlev 
14225084Sjohnlev size_t
14235084Sjohnlev plcnt_sz(size_t ctrs_sz)
14245084Sjohnlev {
14255084Sjohnlev #ifdef DEBUG
14265084Sjohnlev 	int	szc, colors;
14275084Sjohnlev 
14285084Sjohnlev 	ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
14295084Sjohnlev 	for (szc = 0; szc < mmu_page_sizes; szc++) {
14305084Sjohnlev 		colors = page_get_pagecolors(szc);
14315084Sjohnlev 		ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
14325084Sjohnlev 	}
14335084Sjohnlev #endif
14345084Sjohnlev 	return (ctrs_sz);
14355084Sjohnlev }
14365084Sjohnlev 
14375084Sjohnlev caddr_t
14385084Sjohnlev plcnt_init(caddr_t addr)
14395084Sjohnlev {
14405084Sjohnlev #ifdef DEBUG
14415084Sjohnlev 	int	mt, szc, colors;
14425084Sjohnlev 
14435084Sjohnlev 	for (mt = 0; mt < mnoderangecnt; mt++) {
14445084Sjohnlev 		mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
14455084Sjohnlev 		addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
14465084Sjohnlev 		for (szc = 0; szc < mmu_page_sizes; szc++) {
14475084Sjohnlev 			colors = page_get_pagecolors(szc);
14485084Sjohnlev 			mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
14495084Sjohnlev 			mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
14505084Sjohnlev 			    (pgcnt_t *)addr;
14515084Sjohnlev 			addr += (sizeof (pgcnt_t) * colors);
14525084Sjohnlev 		}
14535084Sjohnlev 	}
14545084Sjohnlev #endif
14555084Sjohnlev 	return (addr);
14565084Sjohnlev }
14575084Sjohnlev 
14585084Sjohnlev void
14595084Sjohnlev plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
14605084Sjohnlev {
14615084Sjohnlev #ifdef DEBUG
14625084Sjohnlev 	int	bin = PP_2_BIN(pp);
14635084Sjohnlev 
14645084Sjohnlev 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
14655084Sjohnlev 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
14665084Sjohnlev 	    cnt);
14675084Sjohnlev #endif
14685084Sjohnlev 	ASSERT(mtype == PP_2_MTYPE(pp));
14695084Sjohnlev 	if (physmax4g && mtype <= mtype4g)
14705084Sjohnlev 		atomic_add_long(&freemem4g, cnt);
14715084Sjohnlev 	if (flags & PG_CACHE_LIST)
14725084Sjohnlev 		atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
14735084Sjohnlev 	else
14745466Skchow 		atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
14755466Skchow 	atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
14765084Sjohnlev }
14775084Sjohnlev 
14781373Skchow /*
1479414Skchow  * Returns the free page count for mnode
1480414Skchow  */
1481414Skchow int
1482414Skchow mnode_pgcnt(int mnode)
1483414Skchow {
1484414Skchow 	int	mtype = mnoderangecnt - 1;
1485414Skchow 	int	flags = PGI_MT_RANGE0;
1486414Skchow 	pgcnt_t	pgcnt = 0;
1487414Skchow 
1488414Skchow 	mtype = mtype_func(mnode, mtype, flags);
1489414Skchow 
1490414Skchow 	while (mtype != -1) {
14911385Skchow 		pgcnt += MTYPE_FREEMEM(mtype);
1492414Skchow 		mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1493414Skchow 	}
1494414Skchow 	return (pgcnt);
1495414Skchow }
1496414Skchow 
1497414Skchow /*
14980Sstevel@tonic-gate  * Initialize page coloring variables based on the l2 cache parameters.
14990Sstevel@tonic-gate  * Calculate and return memory needed for page coloring data structures.
15000Sstevel@tonic-gate  */
15010Sstevel@tonic-gate size_t
15020Sstevel@tonic-gate page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
15030Sstevel@tonic-gate {
15040Sstevel@tonic-gate 	size_t	colorsz = 0;
15050Sstevel@tonic-gate 	int	i;
15060Sstevel@tonic-gate 	int	colors;
15070Sstevel@tonic-gate 
15085084Sjohnlev #if defined(__xpv)
15095084Sjohnlev 	/*
15105084Sjohnlev 	 * Hypervisor domains currently don't have any concept of NUMA.
15115084Sjohnlev 	 * Hence we'll act like there is only 1 memrange.
15125084Sjohnlev 	 */
15135084Sjohnlev 	i = memrange_num(1);
15145084Sjohnlev #else /* !__xpv */
15150Sstevel@tonic-gate 	/*
15160Sstevel@tonic-gate 	 * Reduce the memory ranges lists if we don't have large amounts
15170Sstevel@tonic-gate 	 * of memory. This avoids searching known empty free lists.
15180Sstevel@tonic-gate 	 */
15190Sstevel@tonic-gate 	i = memrange_num(physmax);
15200Sstevel@tonic-gate #if defined(__i386)
15210Sstevel@tonic-gate 	if (i > 0)
15220Sstevel@tonic-gate 		restricted_kmemalloc = 0;
15230Sstevel@tonic-gate #endif
15240Sstevel@tonic-gate 	/* physmax greater than 4g */
15250Sstevel@tonic-gate 	if (i == 0)
15260Sstevel@tonic-gate 		physmax4g = 1;
15275084Sjohnlev #endif /* !__xpv */
15285084Sjohnlev 	memranges += i;
15295084Sjohnlev 	nranges -= i;
15300Sstevel@tonic-gate 
15315349Skchow 	ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
15325349Skchow 
15330Sstevel@tonic-gate 	ASSERT(ISP2(l2_linesz));
15340Sstevel@tonic-gate 	ASSERT(l2_sz > MMU_PAGESIZE);
15350Sstevel@tonic-gate 
15360Sstevel@tonic-gate 	/* l2_assoc is 0 for fully associative l2 cache */
15370Sstevel@tonic-gate 	if (l2_assoc)
15380Sstevel@tonic-gate 		l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
15390Sstevel@tonic-gate 	else
15400Sstevel@tonic-gate 		l2_colors = 1;
15410Sstevel@tonic-gate 
15427069Svd224797 	ASSERT(ISP2(l2_colors));
15437069Svd224797 
15440Sstevel@tonic-gate 	/* for scalability, configure at least PAGE_COLORS_MIN color bins */
15450Sstevel@tonic-gate 	page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
15460Sstevel@tonic-gate 
15470Sstevel@tonic-gate 	/*
15480Sstevel@tonic-gate 	 * cpu_page_colors is non-zero when a page color may be spread across
15490Sstevel@tonic-gate 	 * multiple bins.
15500Sstevel@tonic-gate 	 */
15510Sstevel@tonic-gate 	if (l2_colors < page_colors)
15520Sstevel@tonic-gate 		cpu_page_colors = l2_colors;
15530Sstevel@tonic-gate 
15540Sstevel@tonic-gate 	ASSERT(ISP2(page_colors));
15550Sstevel@tonic-gate 
15560Sstevel@tonic-gate 	page_colors_mask = page_colors - 1;
15570Sstevel@tonic-gate 
15580Sstevel@tonic-gate 	ASSERT(ISP2(CPUSETSIZE()));
15590Sstevel@tonic-gate 	page_coloring_shift = lowbit(CPUSETSIZE());
15600Sstevel@tonic-gate 
15612961Sdp78419 	/* initialize number of colors per page size */
15622961Sdp78419 	for (i = 0; i <= mmu.max_page_level; i++) {
15632961Sdp78419 		hw_page_array[i].hp_size = LEVEL_SIZE(i);
15642961Sdp78419 		hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
15652961Sdp78419 		hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
15662961Sdp78419 		hw_page_array[i].hp_colors = (page_colors_mask >>
15672961Sdp78419 		    (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
15682961Sdp78419 		    + 1;
15693717Sdp78419 		colorequivszc[i] = 0;
15702961Sdp78419 	}
15712961Sdp78419 
15722961Sdp78419 	/*
15732961Sdp78419 	 * The value of cpu_page_colors determines if additional color bins
15742961Sdp78419 	 * need to be checked for a particular color in the page_get routines.
15752961Sdp78419 	 */
15762961Sdp78419 	if (cpu_page_colors != 0) {
15772961Sdp78419 
15782961Sdp78419 		int a = lowbit(page_colors) - lowbit(cpu_page_colors);
15792961Sdp78419 		ASSERT(a > 0);
15802961Sdp78419 		ASSERT(a < 16);
15812961Sdp78419 
15822961Sdp78419 		for (i = 0; i <= mmu.max_page_level; i++) {
15832961Sdp78419 			if ((colors = hw_page_array[i].hp_colors) <= 1) {
15842961Sdp78419 				colorequivszc[i] = 0;
15852961Sdp78419 				continue;
15862961Sdp78419 			}
15872961Sdp78419 			while ((colors >> a) == 0)
15882961Sdp78419 				a--;
15892961Sdp78419 			ASSERT(a >= 0);
15902961Sdp78419 
15912961Sdp78419 			/* higher 4 bits encodes color equiv mask */
15922961Sdp78419 			colorequivszc[i] = (a << 4);
15932961Sdp78419 		}
15942961Sdp78419 	}
15952961Sdp78419 
15965084Sjohnlev 	/* factor in colorequiv to check additional 'equivalent' bins. */
15975084Sjohnlev 	if (colorequiv > 1) {
15985084Sjohnlev 
15995084Sjohnlev 		int a = lowbit(colorequiv) - 1;
16005084Sjohnlev 		if (a > 15)
16015084Sjohnlev 			a = 15;
16025084Sjohnlev 
16035084Sjohnlev 		for (i = 0; i <= mmu.max_page_level; i++) {
16045084Sjohnlev 			if ((colors = hw_page_array[i].hp_colors) <= 1) {
16055084Sjohnlev 				continue;
16065084Sjohnlev 			}
16075084Sjohnlev 			while ((colors >> a) == 0)
16085084Sjohnlev 				a--;
16095084Sjohnlev 			if ((a << 4) > colorequivszc[i]) {
16105084Sjohnlev 				colorequivszc[i] = (a << 4);
16115084Sjohnlev 			}
16125084Sjohnlev 		}
16135084Sjohnlev 	}
16145084Sjohnlev 
16150Sstevel@tonic-gate 	/* size for mnoderanges */
16162961Sdp78419 	for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
16172961Sdp78419 		mnoderangecnt += mnode_range_cnt(i);
16180Sstevel@tonic-gate 	colorsz = mnoderangecnt * sizeof (mnoderange_t);
16190Sstevel@tonic-gate 
16200Sstevel@tonic-gate 	/* size for fpc_mutex and cpc_mutex */
16210Sstevel@tonic-gate 	colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
16220Sstevel@tonic-gate 
16230Sstevel@tonic-gate 	/* size of page_freelists */
16240Sstevel@tonic-gate 	colorsz += mnoderangecnt * sizeof (page_t ***);
16250Sstevel@tonic-gate 	colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
16260Sstevel@tonic-gate 
16270Sstevel@tonic-gate 	for (i = 0; i < mmu_page_sizes; i++) {
16280Sstevel@tonic-gate 		colors = page_get_pagecolors(i);
16290Sstevel@tonic-gate 		colorsz += mnoderangecnt * colors * sizeof (page_t *);
16300Sstevel@tonic-gate 	}
16310Sstevel@tonic-gate 
16320Sstevel@tonic-gate 	/* size of page_cachelists */
16330Sstevel@tonic-gate 	colorsz += mnoderangecnt * sizeof (page_t **);
16340Sstevel@tonic-gate 	colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
16350Sstevel@tonic-gate 
16360Sstevel@tonic-gate 	return (colorsz);
16370Sstevel@tonic-gate }
16380Sstevel@tonic-gate 
16390Sstevel@tonic-gate /*
16400Sstevel@tonic-gate  * Called once at startup to configure page_coloring data structures and
16410Sstevel@tonic-gate  * does the 1st page_free()/page_freelist_add().
16420Sstevel@tonic-gate  */
16430Sstevel@tonic-gate void
16440Sstevel@tonic-gate page_coloring_setup(caddr_t pcmemaddr)
16450Sstevel@tonic-gate {
16460Sstevel@tonic-gate 	int	i;
16470Sstevel@tonic-gate 	int	j;
16480Sstevel@tonic-gate 	int	k;
16490Sstevel@tonic-gate 	caddr_t	addr;
16500Sstevel@tonic-gate 	int	colors;
16510Sstevel@tonic-gate 
16520Sstevel@tonic-gate 	/*
16530Sstevel@tonic-gate 	 * do page coloring setup
16540Sstevel@tonic-gate 	 */
16550Sstevel@tonic-gate 	addr = pcmemaddr;
16560Sstevel@tonic-gate 
16570Sstevel@tonic-gate 	mnoderanges = (mnoderange_t *)addr;
16580Sstevel@tonic-gate 	addr += (mnoderangecnt * sizeof (mnoderange_t));
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate 	mnode_range_setup(mnoderanges);
16610Sstevel@tonic-gate 
16620Sstevel@tonic-gate 	if (physmax4g)
16630Sstevel@tonic-gate 		mtype4g = pfn_2_mtype(0xfffff);
16640Sstevel@tonic-gate 
16650Sstevel@tonic-gate 	for (k = 0; k < NPC_MUTEX; k++) {
16660Sstevel@tonic-gate 		fpc_mutex[k] = (kmutex_t *)addr;
16670Sstevel@tonic-gate 		addr += (max_mem_nodes * sizeof (kmutex_t));
16680Sstevel@tonic-gate 	}
16690Sstevel@tonic-gate 	for (k = 0; k < NPC_MUTEX; k++) {
16700Sstevel@tonic-gate 		cpc_mutex[k] = (kmutex_t *)addr;
16710Sstevel@tonic-gate 		addr += (max_mem_nodes * sizeof (kmutex_t));
16720Sstevel@tonic-gate 	}
16730Sstevel@tonic-gate 	page_freelists = (page_t ****)addr;
16740Sstevel@tonic-gate 	addr += (mnoderangecnt * sizeof (page_t ***));
16750Sstevel@tonic-gate 
16760Sstevel@tonic-gate 	page_cachelists = (page_t ***)addr;
16770Sstevel@tonic-gate 	addr += (mnoderangecnt * sizeof (page_t **));
16780Sstevel@tonic-gate 
16790Sstevel@tonic-gate 	for (i = 0; i < mnoderangecnt; i++) {
16800Sstevel@tonic-gate 		page_freelists[i] = (page_t ***)addr;
16810Sstevel@tonic-gate 		addr += (mmu_page_sizes * sizeof (page_t **));
16820Sstevel@tonic-gate 
16830Sstevel@tonic-gate 		for (j = 0; j < mmu_page_sizes; j++) {
16840Sstevel@tonic-gate 			colors = page_get_pagecolors(j);
16850Sstevel@tonic-gate 			page_freelists[i][j] = (page_t **)addr;
16860Sstevel@tonic-gate 			addr += (colors * sizeof (page_t *));
16870Sstevel@tonic-gate 		}
16880Sstevel@tonic-gate 		page_cachelists[i] = (page_t **)addr;
16890Sstevel@tonic-gate 		addr += (page_colors * sizeof (page_t *));
16900Sstevel@tonic-gate 	}
16910Sstevel@tonic-gate }
16920Sstevel@tonic-gate 
16935084Sjohnlev #if defined(__xpv)
16945084Sjohnlev /*
16955084Sjohnlev  * Give back 10% of the io_pool pages to the free list.
16965084Sjohnlev  * Don't shrink the pool below some absolute minimum.
16975084Sjohnlev  */
16985084Sjohnlev static void
16995084Sjohnlev page_io_pool_shrink()
17005084Sjohnlev {
17015084Sjohnlev 	int retcnt;
17025084Sjohnlev 	page_t *pp, *pp_first, *pp_last, **curpool;
17035084Sjohnlev 	mfn_t mfn;
17045084Sjohnlev 	int bothpools = 0;
17055084Sjohnlev 
17065084Sjohnlev 	mutex_enter(&io_pool_lock);
17075084Sjohnlev 	io_pool_shrink_attempts++;	/* should be a kstat? */
17085084Sjohnlev 	retcnt = io_pool_cnt / 10;
17095084Sjohnlev 	if (io_pool_cnt - retcnt < io_pool_cnt_min)
17105084Sjohnlev 		retcnt = io_pool_cnt - io_pool_cnt_min;
17115084Sjohnlev 	if (retcnt <= 0)
17125084Sjohnlev 		goto done;
17135084Sjohnlev 	io_pool_shrinks++;	/* should be a kstat? */
17145084Sjohnlev 	curpool = &io_pool_4g;
17155084Sjohnlev domore:
17165084Sjohnlev 	/*
17175084Sjohnlev 	 * Loop through taking pages from the end of the list
17185084Sjohnlev 	 * (highest mfns) till amount to return reached.
17195084Sjohnlev 	 */
17205084Sjohnlev 	for (pp = *curpool; pp && retcnt > 0; ) {
17215084Sjohnlev 		pp_first = pp_last = pp->p_prev;
17225084Sjohnlev 		if (pp_first == *curpool)
17235084Sjohnlev 			break;
17245084Sjohnlev 		retcnt--;
17255084Sjohnlev 		io_pool_cnt--;
17265084Sjohnlev 		page_io_pool_sub(curpool, pp_first, pp_last);
17275084Sjohnlev 		if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
17285084Sjohnlev 			start_mfn = mfn;
17295084Sjohnlev 		page_free(pp_first, 1);
17305084Sjohnlev 		pp = *curpool;
17315084Sjohnlev 	}
17325084Sjohnlev 	if (retcnt != 0 && !bothpools) {
17335084Sjohnlev 		/*
17345084Sjohnlev 		 * If not enough found in less constrained pool try the
17355084Sjohnlev 		 * more constrained one.
17365084Sjohnlev 		 */
17375084Sjohnlev 		curpool = &io_pool_16m;
17385084Sjohnlev 		bothpools = 1;
17395084Sjohnlev 		goto domore;
17405084Sjohnlev 	}
17415084Sjohnlev done:
17425084Sjohnlev 	mutex_exit(&io_pool_lock);
17435084Sjohnlev }
17445084Sjohnlev 
17455084Sjohnlev #endif	/* __xpv */
17465084Sjohnlev 
17475084Sjohnlev uint_t
17485084Sjohnlev page_create_update_flags_x86(uint_t flags)
17495084Sjohnlev {
17505084Sjohnlev #if defined(__xpv)
17515084Sjohnlev 	/*
17525084Sjohnlev 	 * Check this is an urgent allocation and free pages are depleted.
17535084Sjohnlev 	 */
17545084Sjohnlev 	if (!(flags & PG_WAIT) && freemem < desfree)
17555084Sjohnlev 		page_io_pool_shrink();
17565084Sjohnlev #else /* !__xpv */
17575084Sjohnlev 	/*
17585084Sjohnlev 	 * page_create_get_something may call this because 4g memory may be
17595084Sjohnlev 	 * depleted. Set flags to allow for relocation of base page below
17605084Sjohnlev 	 * 4g if necessary.
17615084Sjohnlev 	 */
17625084Sjohnlev 	if (physmax4g)
17635084Sjohnlev 		flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
17645084Sjohnlev #endif /* __xpv */
17655084Sjohnlev 	return (flags);
17665084Sjohnlev }
17675084Sjohnlev 
17680Sstevel@tonic-gate /*ARGSUSED*/
17690Sstevel@tonic-gate int
17700Sstevel@tonic-gate bp_color(struct buf *bp)
17710Sstevel@tonic-gate {
17720Sstevel@tonic-gate 	return (0);
17730Sstevel@tonic-gate }
17740Sstevel@tonic-gate 
17755084Sjohnlev #if defined(__xpv)
17765084Sjohnlev 
17775084Sjohnlev /*
17785084Sjohnlev  * Take pages out of an io_pool
17795084Sjohnlev  */
17805084Sjohnlev static void
17815084Sjohnlev page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
17825084Sjohnlev {
17835084Sjohnlev 	if (*poolp == pp_first) {
17845084Sjohnlev 		*poolp = pp_last->p_next;
17855084Sjohnlev 		if (*poolp == pp_first)
17865084Sjohnlev 			*poolp = NULL;
17875084Sjohnlev 	}
17885084Sjohnlev 	pp_first->p_prev->p_next = pp_last->p_next;
17895084Sjohnlev 	pp_last->p_next->p_prev = pp_first->p_prev;
17905084Sjohnlev 	pp_first->p_prev = pp_last;
17915084Sjohnlev 	pp_last->p_next = pp_first;
17925084Sjohnlev }
17935084Sjohnlev 
17945084Sjohnlev /*
17955084Sjohnlev  * Put a page on the io_pool list. The list is ordered by increasing MFN.
17965084Sjohnlev  */
17975084Sjohnlev static void
17985084Sjohnlev page_io_pool_add(page_t **poolp, page_t *pp)
17995084Sjohnlev {
18005084Sjohnlev 	page_t	*look;
18015084Sjohnlev 	mfn_t	mfn = mfn_list[pp->p_pagenum];
18025084Sjohnlev 
18035084Sjohnlev 	if (*poolp == NULL) {
18045084Sjohnlev 		*poolp = pp;
18055084Sjohnlev 		pp->p_next = pp;
18065084Sjohnlev 		pp->p_prev = pp;
18075084Sjohnlev 		return;
18085084Sjohnlev 	}
18095084Sjohnlev 
18105084Sjohnlev 	/*
18115084Sjohnlev 	 * Since we try to take pages from the high end of the pool
18125084Sjohnlev 	 * chances are good that the pages to be put on the list will
18135084Sjohnlev 	 * go at or near the end of the list. so start at the end and
18145084Sjohnlev 	 * work backwards.
18155084Sjohnlev 	 */
18165084Sjohnlev 	look = (*poolp)->p_prev;
18175084Sjohnlev 	while (mfn < mfn_list[look->p_pagenum]) {
18185084Sjohnlev 		look = look->p_prev;
18195084Sjohnlev 		if (look == (*poolp)->p_prev)
18205084Sjohnlev 			break; /* backed all the way to front of list */
18215084Sjohnlev 	}
18225084Sjohnlev 
18235084Sjohnlev 	/* insert after look */
18245084Sjohnlev 	pp->p_prev = look;
18255084Sjohnlev 	pp->p_next = look->p_next;
18265084Sjohnlev 	pp->p_next->p_prev = pp;
18275084Sjohnlev 	look->p_next = pp;
18285084Sjohnlev 	if (mfn < mfn_list[(*poolp)->p_pagenum]) {
18295084Sjohnlev 		/*
18305084Sjohnlev 		 * we inserted a new first list element
18315084Sjohnlev 		 * adjust pool pointer to newly inserted element
18325084Sjohnlev 		 */
18335084Sjohnlev 		*poolp = pp;
18345084Sjohnlev 	}
18355084Sjohnlev }
18365084Sjohnlev 
18375084Sjohnlev /*
18385084Sjohnlev  * Add a page to the io_pool.  Setting the force flag will force the page
18395084Sjohnlev  * into the io_pool no matter what.
18405084Sjohnlev  */
18415084Sjohnlev static void
18425084Sjohnlev add_page_to_pool(page_t *pp, int force)
18435084Sjohnlev {
18445084Sjohnlev 	page_t *highest;
18455084Sjohnlev 	page_t *freep = NULL;
18465084Sjohnlev 
18475084Sjohnlev 	mutex_enter(&io_pool_lock);
18485084Sjohnlev 	/*
18495084Sjohnlev 	 * Always keep the scarce low memory pages
18505084Sjohnlev 	 */
18515084Sjohnlev 	if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
18525084Sjohnlev 		++io_pool_cnt;
18535084Sjohnlev 		page_io_pool_add(&io_pool_16m, pp);
18545084Sjohnlev 		goto done;
18555084Sjohnlev 	}
18566159Ssmaybe 	if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
18575084Sjohnlev 		++io_pool_cnt;
18585084Sjohnlev 		page_io_pool_add(&io_pool_4g, pp);
18595084Sjohnlev 	} else {
18605084Sjohnlev 		highest = io_pool_4g->p_prev;
18615084Sjohnlev 		if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
18625084Sjohnlev 			page_io_pool_sub(&io_pool_4g, highest, highest);
18635084Sjohnlev 			page_io_pool_add(&io_pool_4g, pp);
18645084Sjohnlev 			freep = highest;
18655084Sjohnlev 		} else {
18665084Sjohnlev 			freep = pp;
18675084Sjohnlev 		}
18685084Sjohnlev 	}
18695084Sjohnlev done:
18705084Sjohnlev 	mutex_exit(&io_pool_lock);
18715084Sjohnlev 	if (freep)
18725084Sjohnlev 		page_free(freep, 1);
18735084Sjohnlev }
18745084Sjohnlev 
18755084Sjohnlev 
18765084Sjohnlev int contig_pfn_cnt;	/* no of pfns in the contig pfn list */
18775084Sjohnlev int contig_pfn_max;	/* capacity of the contig pfn list */
18785084Sjohnlev int next_alloc_pfn;	/* next position in list to start a contig search */
18795084Sjohnlev int contig_pfnlist_updates;	/* pfn list update count */
18805084Sjohnlev int contig_pfnlist_builds;	/* how many times have we (re)built list */
18815084Sjohnlev int contig_pfnlist_buildfailed;	/* how many times has list build failed */
18825084Sjohnlev int create_contig_pending;	/* nonzero means taskq creating contig list */
18835084Sjohnlev pfn_t *contig_pfn_list = NULL;	/* list of contig pfns in ascending mfn order */
18845084Sjohnlev 
18855084Sjohnlev /*
18865084Sjohnlev  * Function to use in sorting a list of pfns by their underlying mfns.
18875084Sjohnlev  */
18885084Sjohnlev static int
18895084Sjohnlev mfn_compare(const void *pfnp1, const void *pfnp2)
18905084Sjohnlev {
18915084Sjohnlev 	mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
18925084Sjohnlev 	mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
18935084Sjohnlev 
18945084Sjohnlev 	if (mfn1 > mfn2)
18955084Sjohnlev 		return (1);
18965084Sjohnlev 	if (mfn1 < mfn2)
18975084Sjohnlev 		return (-1);
18985084Sjohnlev 	return (0);
18995084Sjohnlev }
19005084Sjohnlev 
19015084Sjohnlev /*
19025084Sjohnlev  * Compact the contig_pfn_list by tossing all the non-contiguous
19035084Sjohnlev  * elements from the list.
19045084Sjohnlev  */
19055084Sjohnlev static void
19065084Sjohnlev compact_contig_pfn_list(void)
19075084Sjohnlev {
19085084Sjohnlev 	pfn_t pfn, lapfn, prev_lapfn;
19095084Sjohnlev 	mfn_t mfn;
19105084Sjohnlev 	int i, newcnt = 0;
19115084Sjohnlev 
19125084Sjohnlev 	prev_lapfn = 0;
19135084Sjohnlev 	for (i = 0; i < contig_pfn_cnt - 1; i++) {
19145084Sjohnlev 		pfn = contig_pfn_list[i];
19155084Sjohnlev 		lapfn = contig_pfn_list[i + 1];
19165084Sjohnlev 		mfn = mfn_list[pfn];
19175084Sjohnlev 		/*
19185084Sjohnlev 		 * See if next pfn is for a contig mfn
19195084Sjohnlev 		 */
19205084Sjohnlev 		if (mfn_list[lapfn] != mfn + 1)
19215084Sjohnlev 			continue;
19225084Sjohnlev 		/*
19235084Sjohnlev 		 * pfn and lookahead are both put in list
19245084Sjohnlev 		 * unless pfn is the previous lookahead.
19255084Sjohnlev 		 */
19265084Sjohnlev 		if (pfn != prev_lapfn)
19275084Sjohnlev 			contig_pfn_list[newcnt++] = pfn;
19285084Sjohnlev 		contig_pfn_list[newcnt++] = lapfn;
19295084Sjohnlev 		prev_lapfn = lapfn;
19305084Sjohnlev 	}
19315084Sjohnlev 	for (i = newcnt; i < contig_pfn_cnt; i++)
19325084Sjohnlev 		contig_pfn_list[i] = 0;
19335084Sjohnlev 	contig_pfn_cnt = newcnt;
19345084Sjohnlev }
19355084Sjohnlev 
19365084Sjohnlev /*ARGSUSED*/
19375084Sjohnlev static void
19385084Sjohnlev call_create_contiglist(void *arg)
19395084Sjohnlev {
19405084Sjohnlev 	(void) create_contig_pfnlist(PG_WAIT);
19415084Sjohnlev }
19425084Sjohnlev 
19435084Sjohnlev /*
19445084Sjohnlev  * Create list of freelist pfns that have underlying
19455084Sjohnlev  * contiguous mfns.  The list is kept in ascending mfn order.
19465084Sjohnlev  * returns 1 if list created else 0.
19475084Sjohnlev  */
19485084Sjohnlev static int
19495084Sjohnlev create_contig_pfnlist(uint_t flags)
19505084Sjohnlev {
19515084Sjohnlev 	pfn_t pfn;
19525084Sjohnlev 	page_t *pp;
19535529Ssmaybe 	int ret = 1;
19545529Ssmaybe 
19555529Ssmaybe 	mutex_enter(&contig_list_lock);
19565084Sjohnlev 	if (contig_pfn_list != NULL)
19575529Ssmaybe 		goto out;
19585084Sjohnlev 	contig_pfn_max = freemem + (freemem / 10);
19595084Sjohnlev 	contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
19605084Sjohnlev 	    (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
19615084Sjohnlev 	if (contig_pfn_list == NULL) {
19625084Sjohnlev 		/*
19635084Sjohnlev 		 * If we could not create the contig list (because
19645084Sjohnlev 		 * we could not sleep for memory).  Dispatch a taskq that can
19655084Sjohnlev 		 * sleep to get the memory.
19665084Sjohnlev 		 */
19675084Sjohnlev 		if (!create_contig_pending) {
19685084Sjohnlev 			if (taskq_dispatch(system_taskq, call_create_contiglist,
19695084Sjohnlev 			    NULL, TQ_NOSLEEP) != NULL)
19705084Sjohnlev 				create_contig_pending = 1;
19715084Sjohnlev 		}
19725084Sjohnlev 		contig_pfnlist_buildfailed++;	/* count list build failures */
19735529Ssmaybe 		ret = 0;
19745529Ssmaybe 		goto out;
19755084Sjohnlev 	}
19765529Ssmaybe 	create_contig_pending = 0;
19775084Sjohnlev 	ASSERT(contig_pfn_cnt == 0);
19785084Sjohnlev 	for (pfn = 0; pfn < mfn_count; pfn++) {
19795084Sjohnlev 		pp = page_numtopp_nolock(pfn);
19805084Sjohnlev 		if (pp == NULL || !PP_ISFREE(pp))
19815084Sjohnlev 			continue;
19825084Sjohnlev 		contig_pfn_list[contig_pfn_cnt] = pfn;
19835084Sjohnlev 		if (++contig_pfn_cnt == contig_pfn_max)
19845084Sjohnlev 			break;
19855084Sjohnlev 	}
19865084Sjohnlev 	qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
19875084Sjohnlev 	compact_contig_pfn_list();
19885084Sjohnlev 	/*
19895084Sjohnlev 	 * Make sure next search of the newly created contiguous pfn
19905084Sjohnlev 	 * list starts at the beginning of the list.
19915084Sjohnlev 	 */
19925084Sjohnlev 	next_alloc_pfn = 0;
19935084Sjohnlev 	contig_pfnlist_builds++;	/* count list builds */
19945529Ssmaybe out:
19955529Ssmaybe 	mutex_exit(&contig_list_lock);
19965529Ssmaybe 	return (ret);
19975084Sjohnlev }
19985084Sjohnlev 
19995084Sjohnlev 
20005084Sjohnlev /*
20015084Sjohnlev  * Toss the current contig pfnlist.  Someone is about to do a massive
20025084Sjohnlev  * update to pfn<->mfn mappings.  So we have them destroy the list and lock
20035084Sjohnlev  * it till they are done with their update.
20045084Sjohnlev  */
20055084Sjohnlev void
20065084Sjohnlev clear_and_lock_contig_pfnlist()
20075084Sjohnlev {
20085084Sjohnlev 	pfn_t *listp = NULL;
20095084Sjohnlev 	size_t listsize;
20105084Sjohnlev 
20115529Ssmaybe 	mutex_enter(&contig_list_lock);
20125084Sjohnlev 	if (contig_pfn_list != NULL) {
20135084Sjohnlev 		listp = contig_pfn_list;
20145084Sjohnlev 		listsize = contig_pfn_max * sizeof (pfn_t);
20155084Sjohnlev 		contig_pfn_list = NULL;
20165084Sjohnlev 		contig_pfn_max = contig_pfn_cnt = 0;
20175084Sjohnlev 	}
20185084Sjohnlev 	if (listp != NULL)
20195084Sjohnlev 		kmem_free(listp, listsize);
20205084Sjohnlev }
20215084Sjohnlev 
20225084Sjohnlev /*
20235084Sjohnlev  * Unlock the contig_pfn_list.  The next attempted use of it will cause
20245084Sjohnlev  * it to be re-created.
20255084Sjohnlev  */
20265084Sjohnlev void
20275084Sjohnlev unlock_contig_pfnlist()
20285084Sjohnlev {
20295529Ssmaybe 	mutex_exit(&contig_list_lock);
20305084Sjohnlev }
20315084Sjohnlev 
20325084Sjohnlev /*
20335084Sjohnlev  * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
20345084Sjohnlev  */
20355084Sjohnlev void
20365084Sjohnlev update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
20375084Sjohnlev {
20385084Sjohnlev 	int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
20395084Sjohnlev 	pfn_t probe_pfn;
20405084Sjohnlev 	mfn_t probe_mfn;
20415529Ssmaybe 	int drop_lock = 0;
20425529Ssmaybe 
20435529Ssmaybe 	if (mutex_owner(&contig_list_lock) != curthread) {
20445529Ssmaybe 		drop_lock = 1;
20455529Ssmaybe 		mutex_enter(&contig_list_lock);
20465529Ssmaybe 	}
20475084Sjohnlev 	if (contig_pfn_list == NULL)
20485529Ssmaybe 		goto done;
20495084Sjohnlev 	contig_pfnlist_updates++;
20505084Sjohnlev 	/*
20515084Sjohnlev 	 * Find the pfn in the current list.  Use a binary chop to locate it.
20525084Sjohnlev 	 */
20535084Sjohnlev 	probe_hi = contig_pfn_cnt - 1;
20545084Sjohnlev 	probe_lo = 0;
20555084Sjohnlev 	probe_pos = (probe_hi + probe_lo) / 2;
20565084Sjohnlev 	while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
20575084Sjohnlev 		if (probe_pos == probe_lo) { /* pfn not in list */
20585084Sjohnlev 			probe_pos = -1;
20595084Sjohnlev 			break;
20605084Sjohnlev 		}
20615084Sjohnlev 		if (pfn_to_mfn(probe_pfn) <= oldmfn)
20625084Sjohnlev 			probe_lo = probe_pos;
20635084Sjohnlev 		else
20645084Sjohnlev 			probe_hi = probe_pos;
20655084Sjohnlev 		probe_pos = (probe_hi + probe_lo) / 2;
20665084Sjohnlev 	}
20675084Sjohnlev 	if (probe_pos >= 0)  { /* remove pfn fom list */
20685084Sjohnlev 		contig_pfn_cnt--;
20695084Sjohnlev 		ovbcopy(&contig_pfn_list[probe_pos + 1],
20705084Sjohnlev 		    &contig_pfn_list[probe_pos],
20715084Sjohnlev 		    (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
20725084Sjohnlev 	}
20735084Sjohnlev 	if (newmfn == MFN_INVALID)
20745084Sjohnlev 		goto done;
20755084Sjohnlev 	/*
20765084Sjohnlev 	 * Check if new mfn has adjacent mfns in the list
20775084Sjohnlev 	 */
20785084Sjohnlev 	probe_hi = contig_pfn_cnt - 1;
20795084Sjohnlev 	probe_lo = 0;
20805084Sjohnlev 	insert_after = -2;
20815084Sjohnlev 	do {
20825084Sjohnlev 		probe_pos = (probe_hi + probe_lo) / 2;
20835084Sjohnlev 		probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
20845084Sjohnlev 		if (newmfn == probe_mfn + 1)
20855084Sjohnlev 			insert_after = probe_pos;
20865084Sjohnlev 		else if (newmfn == probe_mfn - 1)
20875084Sjohnlev 			insert_after = probe_pos - 1;
20885084Sjohnlev 		if (probe_pos == probe_lo)
20895084Sjohnlev 			break;
20905084Sjohnlev 		if (probe_mfn <= newmfn)
20915084Sjohnlev 			probe_lo = probe_pos;
20925084Sjohnlev 		else
20935084Sjohnlev 			probe_hi = probe_pos;
20945084Sjohnlev 	} while (insert_after == -2);
20955084Sjohnlev 	/*
20965084Sjohnlev 	 * If there is space in the list and there are adjacent mfns
20975084Sjohnlev 	 * insert the pfn in to its proper place in the list.
20985084Sjohnlev 	 */
20995084Sjohnlev 	if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
21005084Sjohnlev 		insert_point = insert_after + 1;
21015084Sjohnlev 		ovbcopy(&contig_pfn_list[insert_point],
21025084Sjohnlev 		    &contig_pfn_list[insert_point + 1],
21035084Sjohnlev 		    (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
21045084Sjohnlev 		contig_pfn_list[insert_point] = pfn;
21055084Sjohnlev 		contig_pfn_cnt++;
21065084Sjohnlev 	}
21075084Sjohnlev done:
21085529Ssmaybe 	if (drop_lock)
21095529Ssmaybe 		mutex_exit(&contig_list_lock);
21105084Sjohnlev }
21115084Sjohnlev 
21125084Sjohnlev /*
21135084Sjohnlev  * Called to (re-)populate the io_pool from the free page lists.
21145084Sjohnlev  */
21155084Sjohnlev long
21165084Sjohnlev populate_io_pool(void)
21175084Sjohnlev {
21185084Sjohnlev 	pfn_t pfn;
21195084Sjohnlev 	mfn_t mfn, max_mfn;
21205084Sjohnlev 	page_t *pp;
21215084Sjohnlev 
21225084Sjohnlev 	/*
21235084Sjohnlev 	 * Figure out the bounds of the pool on first invocation.
21245084Sjohnlev 	 * We use a percentage of memory for the io pool size.
21255084Sjohnlev 	 * we allow that to shrink, but not to less than a fixed minimum
21265084Sjohnlev 	 */
21275084Sjohnlev 	if (io_pool_cnt_max == 0) {
21285084Sjohnlev 		io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
21295084Sjohnlev 		io_pool_cnt_lowater = io_pool_cnt_max;
21305084Sjohnlev 		/*
21315084Sjohnlev 		 * This is the first time in populate_io_pool, grab a va to use
21325084Sjohnlev 		 * when we need to allocate pages.
21335084Sjohnlev 		 */
21345084Sjohnlev 		io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
21355084Sjohnlev 	}
21365084Sjohnlev 	/*
21375084Sjohnlev 	 * If we are out of pages in the pool, then grow the size of the pool
21385084Sjohnlev 	 */
21396159Ssmaybe 	if (io_pool_cnt == 0) {
21406159Ssmaybe 		/*
21416159Ssmaybe 		 * Grow the max size of the io pool by 5%, but never more than
21426159Ssmaybe 		 * 25% of physical memory.
21436159Ssmaybe 		 */
21446159Ssmaybe 		if (io_pool_cnt_max < physmem / 4)
21456159Ssmaybe 			io_pool_cnt_max += io_pool_cnt_max / 20;
21466159Ssmaybe 	}
21475084Sjohnlev 	io_pool_grows++;	/* should be a kstat? */
21485084Sjohnlev 
21495084Sjohnlev 	/*
21505084Sjohnlev 	 * Get highest mfn on this platform, but limit to the 32 bit DMA max.
21515084Sjohnlev 	 */
21525084Sjohnlev 	(void) mfn_to_pfn(start_mfn);
21535084Sjohnlev 	max_mfn = MIN(cached_max_mfn, PFN_4GIG);
21545084Sjohnlev 	for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
21555084Sjohnlev 		pfn = mfn_to_pfn(mfn);
21565084Sjohnlev 		if (pfn & PFN_IS_FOREIGN_MFN)
21575084Sjohnlev 			continue;
21585084Sjohnlev 		/*
21595084Sjohnlev 		 * try to allocate it from free pages
21605084Sjohnlev 		 */
21615084Sjohnlev 		pp = page_numtopp_alloc(pfn);
21625084Sjohnlev 		if (pp == NULL)
21635084Sjohnlev 			continue;
21645084Sjohnlev 		PP_CLRFREE(pp);
21655084Sjohnlev 		add_page_to_pool(pp, 1);
21665084Sjohnlev 		if (io_pool_cnt >= io_pool_cnt_max)
21675084Sjohnlev 			break;
21685084Sjohnlev 	}
21695084Sjohnlev 
21705084Sjohnlev 	return (io_pool_cnt);
21715084Sjohnlev }
21725084Sjohnlev 
21735084Sjohnlev /*
21745084Sjohnlev  * Destroy a page that was being used for DMA I/O. It may or
21755084Sjohnlev  * may not actually go back to the io_pool.
21765084Sjohnlev  */
21775084Sjohnlev void
21785084Sjohnlev page_destroy_io(page_t *pp)
21795084Sjohnlev {
21805084Sjohnlev 	mfn_t mfn = mfn_list[pp->p_pagenum];
21815084Sjohnlev 
21825084Sjohnlev 	/*
21835084Sjohnlev 	 * When the page was alloc'd a reservation was made, release it now
21845084Sjohnlev 	 */
21855084Sjohnlev 	page_unresv(1);
21865084Sjohnlev 	/*
21875084Sjohnlev 	 * Unload translations, if any, then hash out the
21885084Sjohnlev 	 * page to erase its identity.
21895084Sjohnlev 	 */
21905084Sjohnlev 	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
21915084Sjohnlev 	page_hashout(pp, NULL);
21925084Sjohnlev 
21935084Sjohnlev 	/*
21945084Sjohnlev 	 * If the page came from the free lists, just put it back to them.
21955084Sjohnlev 	 * DomU pages always go on the free lists as well.
21965084Sjohnlev 	 */
21975084Sjohnlev 	if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
21985084Sjohnlev 		page_free(pp, 1);
21995084Sjohnlev 		return;
22005084Sjohnlev 	}
22015084Sjohnlev 
22025084Sjohnlev 	add_page_to_pool(pp, 0);
22035084Sjohnlev }
22045084Sjohnlev 
22055084Sjohnlev 
22065084Sjohnlev long contig_searches;		/* count of times contig pages requested */
22075084Sjohnlev long contig_search_restarts;	/* count of contig ranges tried */
22085084Sjohnlev long contig_search_failed;	/* count of contig alloc failures */
22095084Sjohnlev 
22105084Sjohnlev /*
22115084Sjohnlev  * Look thru the contiguous pfns that are not part of the io_pool for
22125084Sjohnlev  * contiguous free pages.  Return a list of the found pages or NULL.
22135084Sjohnlev  */
22145084Sjohnlev page_t *
22156282Ssmaybe find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg)
22165084Sjohnlev {
22175084Sjohnlev 	page_t *pp, *plist = NULL;
22186282Ssmaybe 	mfn_t mfn, prev_mfn, start_mfn;
22195084Sjohnlev 	pfn_t pfn;
22205084Sjohnlev 	int pages_needed, pages_requested;
22215084Sjohnlev 	int search_start;
22225084Sjohnlev 
22235084Sjohnlev 	/*
22245084Sjohnlev 	 * create the contig pfn list if not already done
22255084Sjohnlev 	 */
22265529Ssmaybe retry:
22275529Ssmaybe 	mutex_enter(&contig_list_lock);
22285084Sjohnlev 	if (contig_pfn_list == NULL) {
22295529Ssmaybe 		mutex_exit(&contig_list_lock);
22305529Ssmaybe 		if (!create_contig_pfnlist(flags)) {
22315084Sjohnlev 			return (NULL);
22325084Sjohnlev 		}
22335529Ssmaybe 		goto retry;
22345084Sjohnlev 	}
22355084Sjohnlev 	contig_searches++;
22365084Sjohnlev 	/*
22375084Sjohnlev 	 * Search contiguous pfn list for physically contiguous pages not in
22385084Sjohnlev 	 * the io_pool.  Start the search where the last search left off.
22395084Sjohnlev 	 */
22405843Ssmaybe 	pages_requested = pages_needed = npages;
22415084Sjohnlev 	search_start = next_alloc_pfn;
22426282Ssmaybe 	start_mfn = prev_mfn = 0;
22435084Sjohnlev 	while (pages_needed) {
22445084Sjohnlev 		pfn = contig_pfn_list[next_alloc_pfn];
22455084Sjohnlev 		mfn = pfn_to_mfn(pfn);
22466282Ssmaybe 		/*
22476282Ssmaybe 		 * Check if mfn is first one or contig to previous one and
22486282Ssmaybe 		 * if page corresponding to mfn is free and that mfn
22496282Ssmaybe 		 * range is not crossing a segment boundary.
22506282Ssmaybe 		 */
22515084Sjohnlev 		if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
22526282Ssmaybe 		    (pp = page_numtopp_alloc(pfn)) != NULL &&
22536282Ssmaybe 		    !((mfn & pfnseg) < (start_mfn & pfnseg))) {
22545084Sjohnlev 			PP_CLRFREE(pp);
22555084Sjohnlev 			page_io_pool_add(&plist, pp);
22565084Sjohnlev 			pages_needed--;
22576282Ssmaybe 			if (prev_mfn == 0)
22586282Ssmaybe 				start_mfn = mfn;
22595084Sjohnlev 			prev_mfn = mfn;
22605084Sjohnlev 		} else {
22615084Sjohnlev 			contig_search_restarts++;
22625084Sjohnlev 			/*
22635084Sjohnlev 			 * free partial page list
22645084Sjohnlev 			 */
22655084Sjohnlev 			while (plist != NULL) {
22665084Sjohnlev 				pp = plist;
22675084Sjohnlev 				page_io_pool_sub(&plist, pp, pp);
22685084Sjohnlev 				page_free(pp, 1);
22695084Sjohnlev 			}
22705084Sjohnlev 			pages_needed = pages_requested;
22716282Ssmaybe 			start_mfn = prev_mfn = 0;
22725084Sjohnlev 		}
22735084Sjohnlev 		if (++next_alloc_pfn == contig_pfn_cnt)
22745084Sjohnlev 			next_alloc_pfn = 0;
22755084Sjohnlev 		if (next_alloc_pfn == search_start)
22765084Sjohnlev 			break; /* all pfns searched */
22775084Sjohnlev 	}
22785529Ssmaybe 	mutex_exit(&contig_list_lock);
22795084Sjohnlev 	if (pages_needed) {
22805084Sjohnlev 		contig_search_failed++;
22815084Sjohnlev 		/*
22825084Sjohnlev 		 * Failed to find enough contig pages.
22835084Sjohnlev 		 * free partial page list
22845084Sjohnlev 		 */
22855084Sjohnlev 		while (plist != NULL) {
22865084Sjohnlev 			pp = plist;
22875084Sjohnlev 			page_io_pool_sub(&plist, pp, pp);
22885084Sjohnlev 			page_free(pp, 1);
22895084Sjohnlev 		}
22905084Sjohnlev 	}
22915084Sjohnlev 	return (plist);
22925084Sjohnlev }
22935084Sjohnlev 
22945084Sjohnlev /*
22955843Ssmaybe  * Search the reserved io pool pages for a page range with the
22965843Ssmaybe  * desired characteristics.
22975084Sjohnlev  */
22985084Sjohnlev page_t *
22995843Ssmaybe page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
23005084Sjohnlev {
23015843Ssmaybe 	page_t *pp_first, *pp_last;
23025843Ssmaybe 	page_t *pp, **poolp;
23035843Ssmaybe 	pgcnt_t nwanted, pfnalign;
23045084Sjohnlev 	uint64_t pfnseg;
23055843Ssmaybe 	mfn_t mfn, tmfn, hi_mfn, lo_mfn;
23065843Ssmaybe 	int align, attempt = 0;
23075843Ssmaybe 
23085843Ssmaybe 	if (minctg == 1)
23095843Ssmaybe 		contig = 0;
23105084Sjohnlev 	lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
23115084Sjohnlev 	hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
23125843Ssmaybe 	pfnseg = mmu_btop(mattr->dma_attr_seg);
23135084Sjohnlev 	align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
23145084Sjohnlev 	if (align > MMU_PAGESIZE)
23155084Sjohnlev 		pfnalign = mmu_btop(align);
23165843Ssmaybe 	else
23175843Ssmaybe 		pfnalign = 0;
23185843Ssmaybe 
23195084Sjohnlev try_again:
23205084Sjohnlev 	/*
23215084Sjohnlev 	 * See if we want pages for a legacy device
23225084Sjohnlev 	 */
23235084Sjohnlev 	if (hi_mfn < PFN_16MEG)
23245084Sjohnlev 		poolp = &io_pool_16m;
23255084Sjohnlev 	else
23265084Sjohnlev 		poolp = &io_pool_4g;
23275084Sjohnlev try_smaller:
23285084Sjohnlev 	/*
23295843Ssmaybe 	 * Take pages from I/O pool. We'll use pages from the highest
23305843Ssmaybe 	 * MFN range possible.
23315084Sjohnlev 	 */
23325084Sjohnlev 	pp_first = pp_last = NULL;
23335084Sjohnlev 	mutex_enter(&io_pool_lock);
23345843Ssmaybe 	nwanted = minctg;
23355843Ssmaybe 	for (pp = *poolp; pp && nwanted > 0; ) {
23365084Sjohnlev 		pp = pp->p_prev;
23375084Sjohnlev 
23385084Sjohnlev 		/*
23395084Sjohnlev 		 * skip pages above allowable range
23405084Sjohnlev 		 */
23415084Sjohnlev 		mfn = mfn_list[pp->p_pagenum];
23425084Sjohnlev 		if (hi_mfn < mfn)
23435084Sjohnlev 			goto skip;
23445084Sjohnlev 
23455084Sjohnlev 		/*
23465084Sjohnlev 		 * stop at pages below allowable range
23475084Sjohnlev 		 */
23485084Sjohnlev 		if (lo_mfn > mfn)
23495084Sjohnlev 			break;
23505084Sjohnlev restart:
23515084Sjohnlev 		if (pp_last == NULL) {
23525084Sjohnlev 			/*
23535084Sjohnlev 			 * Check alignment
23545084Sjohnlev 			 */
23555843Ssmaybe 			tmfn = mfn - (minctg - 1);
23565843Ssmaybe 			if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
23575843Ssmaybe 				goto skip; /* not properly aligned */
23585084Sjohnlev 			/*
23595084Sjohnlev 			 * Check segment
23605084Sjohnlev 			 */
23615084Sjohnlev 			if ((mfn & pfnseg) < (tmfn & pfnseg))
23625843Ssmaybe 				goto skip; /* crosses seg boundary */
23635084Sjohnlev 			/*
23645084Sjohnlev 			 * Start building page list
23655084Sjohnlev 			 */
23665084Sjohnlev 			pp_first = pp_last = pp;
23675843Ssmaybe 			nwanted--;
23685084Sjohnlev 		} else {
23695084Sjohnlev 			/*
23705084Sjohnlev 			 * check physical contiguity if required
23715084Sjohnlev 			 */
23725084Sjohnlev 			if (contig &&
23735084Sjohnlev 			    mfn_list[pp_first->p_pagenum] != mfn + 1) {
23745084Sjohnlev 				/*
23755084Sjohnlev 				 * not a contiguous page, restart list.
23765084Sjohnlev 				 */
23775084Sjohnlev 				pp_last = NULL;
23785843Ssmaybe 				nwanted = minctg;
23795084Sjohnlev 				goto restart;
23805084Sjohnlev 			} else { /* add page to list */
23815084Sjohnlev 				pp_first = pp;
23825843Ssmaybe 				nwanted--;
23835084Sjohnlev 			}
23845084Sjohnlev 		}
23855084Sjohnlev skip:
23865084Sjohnlev 		if (pp == *poolp)
23875084Sjohnlev 			break;
23885084Sjohnlev 	}
23895084Sjohnlev 
23905084Sjohnlev 	/*
23915084Sjohnlev 	 * If we didn't find memory. Try the more constrained pool, then
23925843Ssmaybe 	 * sweep free pages into the DMA pool and try again.
23935084Sjohnlev 	 */
23945843Ssmaybe 	if (nwanted != 0) {
23955084Sjohnlev 		mutex_exit(&io_pool_lock);
23965084Sjohnlev 		/*
23975843Ssmaybe 		 * If we were looking in the less constrained pool and
23985843Ssmaybe 		 * didn't find pages, try the more constrained pool.
23995084Sjohnlev 		 */
24005084Sjohnlev 		if (poolp == &io_pool_4g) {
24015084Sjohnlev 			poolp = &io_pool_16m;
24025084Sjohnlev 			goto try_smaller;
24035084Sjohnlev 		}
24045084Sjohnlev 		kmem_reap();
24055084Sjohnlev 		if (++attempt < 4) {
24065084Sjohnlev 			/*
24075084Sjohnlev 			 * Grab some more io_pool pages
24085084Sjohnlev 			 */
24095084Sjohnlev 			(void) populate_io_pool();
24105843Ssmaybe 			goto try_again; /* go around and retry */
24115084Sjohnlev 		}
24125843Ssmaybe 		return (NULL);
24135084Sjohnlev 	}
24145084Sjohnlev 	/*
24155084Sjohnlev 	 * Found the pages, now snip them from the list
24165084Sjohnlev 	 */
24175084Sjohnlev 	page_io_pool_sub(poolp, pp_first, pp_last);
24185843Ssmaybe 	io_pool_cnt -= minctg;
24195843Ssmaybe 	/*
24205843Ssmaybe 	 * reset low water mark
24215843Ssmaybe 	 */
24225084Sjohnlev 	if (io_pool_cnt < io_pool_cnt_lowater)
24235843Ssmaybe 		io_pool_cnt_lowater = io_pool_cnt;
24245084Sjohnlev 	mutex_exit(&io_pool_lock);
24255843Ssmaybe 	return (pp_first);
24265843Ssmaybe }
24275843Ssmaybe 
24285843Ssmaybe page_t *
24295843Ssmaybe page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
24305843Ssmaybe     ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
24315843Ssmaybe {
24325843Ssmaybe 	uint_t kflags;
24335843Ssmaybe 	int order, extra, extpages, i, contig, nbits, extents;
24345843Ssmaybe 	page_t *pp, *expp, *pp_first, **pplist = NULL;
24355843Ssmaybe 	mfn_t *mfnlist = NULL;
24365843Ssmaybe 
24375843Ssmaybe 	contig = flags & PG_PHYSCONTIG;
24385843Ssmaybe 	if (minctg == 1)
24395843Ssmaybe 		contig = 0;
24405843Ssmaybe 	flags &= ~PG_PHYSCONTIG;
24415843Ssmaybe 	kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
24425843Ssmaybe 	/*
24435843Ssmaybe 	 * Hypervisor will allocate extents, if we want contig
24445843Ssmaybe 	 * pages extent must be >= minctg
24455843Ssmaybe 	 */
24465843Ssmaybe 	if (contig) {
24475843Ssmaybe 		order = highbit(minctg) - 1;
24485843Ssmaybe 		if (minctg & ((1 << order) - 1))
24495843Ssmaybe 			order++;
24505843Ssmaybe 		extpages = 1 << order;
24515843Ssmaybe 	} else {
24525843Ssmaybe 		order = 0;
24535843Ssmaybe 		extpages = minctg;
24545843Ssmaybe 	}
24555843Ssmaybe 	if (extpages > minctg) {
24565843Ssmaybe 		extra = extpages - minctg;
24575843Ssmaybe 		if (!page_resv(extra, kflags))
24585843Ssmaybe 			return (NULL);
24595843Ssmaybe 	}
24605843Ssmaybe 	pp_first = NULL;
24615843Ssmaybe 	pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
24625843Ssmaybe 	if (pplist == NULL)
24635843Ssmaybe 		goto balloon_fail;
24645843Ssmaybe 	mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
24655843Ssmaybe 	if (mfnlist == NULL)
24665843Ssmaybe 		goto balloon_fail;
24675843Ssmaybe 	pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
24685843Ssmaybe 	if (pp == NULL)
24695843Ssmaybe 		goto balloon_fail;
24705843Ssmaybe 	pp_first = pp;
24715843Ssmaybe 	if (extpages > minctg) {
24725843Ssmaybe 		/*
24735843Ssmaybe 		 * fill out the rest of extent pages to swap
24745843Ssmaybe 		 * with the hypervisor
24755843Ssmaybe 		 */
24765843Ssmaybe 		for (i = 0; i < extra; i++) {
24775843Ssmaybe 			expp = page_create_va(vp,
24785843Ssmaybe 			    (u_offset_t)(uintptr_t)io_pool_kva,
24795843Ssmaybe 			    PAGESIZE, flags, &kvseg, io_pool_kva);
24805843Ssmaybe 			if (expp == NULL)
24815843Ssmaybe 				goto balloon_fail;
24825843Ssmaybe 			(void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
24835843Ssmaybe 			page_io_unlock(expp);
24845843Ssmaybe 			page_hashout(expp, NULL);
24855843Ssmaybe 			page_io_lock(expp);
24865843Ssmaybe 			/*
24875843Ssmaybe 			 * add page to end of list
24885843Ssmaybe 			 */
24895843Ssmaybe 			expp->p_prev = pp_first->p_prev;
24905843Ssmaybe 			expp->p_next = pp_first;
24915843Ssmaybe 			expp->p_prev->p_next = expp;
24925843Ssmaybe 			pp_first->p_prev = expp;
24935084Sjohnlev 		}
24945843Ssmaybe 
24955843Ssmaybe 	}
24965843Ssmaybe 	for (i = 0; i < extpages; i++) {
24975843Ssmaybe 		pplist[i] = pp;
24985084Sjohnlev 		pp = pp->p_next;
24995843Ssmaybe 	}
25005843Ssmaybe 	nbits = highbit(mattr->dma_attr_addr_hi);
25015843Ssmaybe 	extents = contig ? 1 : minctg;
25025843Ssmaybe 	if (balloon_replace_pages(extents, pplist, nbits, order,
25035843Ssmaybe 	    mfnlist) != extents) {
25045843Ssmaybe 		if (ioalloc_dbg)
25055843Ssmaybe 			cmn_err(CE_NOTE, "request to hypervisor"
25065843Ssmaybe 			    " for %d pages, maxaddr %" PRIx64 " failed",
25075843Ssmaybe 			    extpages, mattr->dma_attr_addr_hi);
25085843Ssmaybe 		goto balloon_fail;
25095843Ssmaybe 	}
25105843Ssmaybe 
25115843Ssmaybe 	kmem_free(pplist, extpages * sizeof (page_t *));
25125843Ssmaybe 	kmem_free(mfnlist, extpages * sizeof (mfn_t));
25135843Ssmaybe 	/*
25145843Ssmaybe 	 * Return any excess pages to free list
25155843Ssmaybe 	 */
25165843Ssmaybe 	if (extpages > minctg) {
25175843Ssmaybe 		for (i = 0; i < extra; i++) {
25185843Ssmaybe 			pp = pp_first->p_prev;
25195843Ssmaybe 			page_sub(&pp_first, pp);
25205843Ssmaybe 			page_io_unlock(pp);
25215843Ssmaybe 			page_unresv(1);
25225843Ssmaybe 			page_free(pp, 1);
25235843Ssmaybe 		}
25245843Ssmaybe 	}
25255084Sjohnlev 	return (pp_first);
25265084Sjohnlev balloon_fail:
25275084Sjohnlev 	/*
25285084Sjohnlev 	 * Return pages to free list and return failure
25295084Sjohnlev 	 */
25305084Sjohnlev 	while (pp_first != NULL) {
25315084Sjohnlev 		pp = pp_first;
25325084Sjohnlev 		page_sub(&pp_first, pp);
25335084Sjohnlev 		page_io_unlock(pp);
25345084Sjohnlev 		if (pp->p_vnode != NULL)
25355084Sjohnlev 			page_hashout(pp, NULL);
25365084Sjohnlev 		page_free(pp, 1);
25375084Sjohnlev 	}
25385084Sjohnlev 	if (pplist)
25395084Sjohnlev 		kmem_free(pplist, extpages * sizeof (page_t *));
25405084Sjohnlev 	if (mfnlist)
25415084Sjohnlev 		kmem_free(mfnlist, extpages * sizeof (mfn_t));
25425843Ssmaybe 	page_unresv(extpages - minctg);
25435843Ssmaybe 	return (NULL);
25445843Ssmaybe }
25455843Ssmaybe 
25465843Ssmaybe static void
25475843Ssmaybe return_partial_alloc(page_t *plist)
25485843Ssmaybe {
25495843Ssmaybe 	page_t *pp;
25505843Ssmaybe 
25515843Ssmaybe 	while (plist != NULL) {
25525843Ssmaybe 		pp = plist;
25535843Ssmaybe 		page_sub(&plist, pp);
25547173Smrj 		page_io_unlock(pp);
25555843Ssmaybe 		page_destroy_io(pp);
25565843Ssmaybe 	}
25575843Ssmaybe }
25585843Ssmaybe 
25595843Ssmaybe static page_t *
25605843Ssmaybe page_get_contigpages(
25615843Ssmaybe 	struct vnode	*vp,
25625843Ssmaybe 	u_offset_t	off,
25635843Ssmaybe 	int		*npagesp,
25645843Ssmaybe 	uint_t		flags,
25655843Ssmaybe 	caddr_t		vaddr,
25665843Ssmaybe 	ddi_dma_attr_t	*mattr)
25675843Ssmaybe {
25685843Ssmaybe 	mfn_t	max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
25695843Ssmaybe 	page_t	*plist;	/* list to return */
25705843Ssmaybe 	page_t	*pp, *mcpl;
25715843Ssmaybe 	int	contig, anyaddr, npages, getone = 0;
25725843Ssmaybe 	mfn_t	lo_mfn;
25735843Ssmaybe 	mfn_t	hi_mfn;
25745843Ssmaybe 	pgcnt_t	pfnalign = 0;
25755843Ssmaybe 	int	align, sgllen;
25765843Ssmaybe 	uint64_t pfnseg;
25775843Ssmaybe 	pgcnt_t	minctg;
25785843Ssmaybe 
25795843Ssmaybe 	npages = *npagesp;
25805843Ssmaybe 	ASSERT(mattr != NULL);
25815843Ssmaybe 	lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
25825843Ssmaybe 	hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
25835843Ssmaybe 	sgllen = mattr->dma_attr_sgllen;
25845843Ssmaybe 	pfnseg = mmu_btop(mattr->dma_attr_seg);
25855843Ssmaybe 	align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
25865843Ssmaybe 	if (align > MMU_PAGESIZE)
25875843Ssmaybe 		pfnalign = mmu_btop(align);
25885843Ssmaybe 
25895843Ssmaybe 	/*
25905843Ssmaybe 	 * Clear the contig flag if only one page is needed.
25915843Ssmaybe 	 */
25925843Ssmaybe 	contig = flags & PG_PHYSCONTIG;
25935843Ssmaybe 	if (npages == 1) {
25945843Ssmaybe 		getone = 1;
25955843Ssmaybe 		contig = 0;
25965843Ssmaybe 	}
25975843Ssmaybe 
25985843Ssmaybe 	/*
25995843Ssmaybe 	 * Check if any page in the system is fine.
26005843Ssmaybe 	 */
26015843Ssmaybe 	anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
26025843Ssmaybe 	if (!contig && anyaddr) {
26035843Ssmaybe 		flags &= ~PG_PHYSCONTIG;
26045843Ssmaybe 		plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
26055843Ssmaybe 		    flags, &kvseg, vaddr);
26065843Ssmaybe 		if (plist != NULL) {
26075843Ssmaybe 			*npagesp = 0;
26085843Ssmaybe 			return (plist);
26095843Ssmaybe 		}
26105843Ssmaybe 	}
26115843Ssmaybe 	plist = NULL;
26125843Ssmaybe 	minctg = howmany(npages, sgllen);
26135843Ssmaybe 	while (npages > sgllen || getone) {
26146015Ssmaybe 		if (minctg > npages)
26156015Ssmaybe 			minctg = npages;
26166015Ssmaybe 		mcpl = NULL;
26175843Ssmaybe 		/*
26185843Ssmaybe 		 * We could just want unconstrained but contig pages.
26195843Ssmaybe 		 */
26206282Ssmaybe 		if (anyaddr && contig) {
26215843Ssmaybe 			/*
26225843Ssmaybe 			 * Look for free contig pages to satisfy the request.
26235843Ssmaybe 			 */
26246282Ssmaybe 			mcpl = find_contig_free(minctg, flags, pfnseg);
26255843Ssmaybe 		}
26265843Ssmaybe 		/*
26275843Ssmaybe 		 * Try the reserved io pools next
26285843Ssmaybe 		 */
26295843Ssmaybe 		if (mcpl == NULL)
26305843Ssmaybe 			mcpl = page_io_pool_alloc(mattr, contig, minctg);
26315843Ssmaybe 		if (mcpl != NULL) {
26325843Ssmaybe 			pp = mcpl;
26335843Ssmaybe 			do {
26345843Ssmaybe 				if (!page_hashin(pp, vp, off, NULL)) {
26355843Ssmaybe 					panic("page_get_contigpages:"
26365843Ssmaybe 					    " hashin failed"
26375843Ssmaybe 					    " pp %p, vp %p, off %llx",
26385843Ssmaybe 					    (void *)pp, (void *)vp, off);
26395843Ssmaybe 				}
26405843Ssmaybe 				off += MMU_PAGESIZE;
26415843Ssmaybe 				PP_CLRFREE(pp);
26425843Ssmaybe 				PP_CLRAGED(pp);
26435843Ssmaybe 				page_set_props(pp, P_REF);
26445843Ssmaybe 				page_io_lock(pp);
26455843Ssmaybe 				pp = pp->p_next;
26465843Ssmaybe 			} while (pp != mcpl);
26475843Ssmaybe 		} else {
26485843Ssmaybe 			/*
26495843Ssmaybe 			 * Hypervisor exchange doesn't handle segment or
26505843Ssmaybe 			 * alignment constraints
26515843Ssmaybe 			 */
26525843Ssmaybe 			if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
26535843Ssmaybe 			    pfnalign)
26545843Ssmaybe 				goto fail;
26555843Ssmaybe 			/*
26565843Ssmaybe 			 * Try exchanging pages with the hypervisor
26575843Ssmaybe 			 */
26585843Ssmaybe 			mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
26595843Ssmaybe 			    flags, minctg);
26605843Ssmaybe 			if (mcpl == NULL)
26615843Ssmaybe 				goto fail;
26625843Ssmaybe 			off += minctg * MMU_PAGESIZE;
26635843Ssmaybe 		}
26645843Ssmaybe 		check_dma(mattr, mcpl, minctg);
26655843Ssmaybe 		/*
26665843Ssmaybe 		 * Here with a minctg run of contiguous pages, add them to the
26675843Ssmaybe 		 * list we will return for this request.
26685843Ssmaybe 		 */
26695843Ssmaybe 		page_list_concat(&plist, &mcpl);
26705843Ssmaybe 		npages -= minctg;
26715843Ssmaybe 		*npagesp = npages;
26725843Ssmaybe 		sgllen--;
26736015Ssmaybe 		if (getone)
26746015Ssmaybe 			break;
26755843Ssmaybe 	}
26765843Ssmaybe 	return (plist);
26775843Ssmaybe fail:
26785843Ssmaybe 	return_partial_alloc(plist);
26795843Ssmaybe 	return (NULL);
26805843Ssmaybe }
26815843Ssmaybe 
26825843Ssmaybe /*
26835843Ssmaybe  * Allocator for domain 0 I/O pages. We match the required
26845843Ssmaybe  * DMA attributes and contiguity constraints.
26855843Ssmaybe  */
26865843Ssmaybe /*ARGSUSED*/
26875843Ssmaybe page_t *
26885843Ssmaybe page_create_io(
26895843Ssmaybe 	struct vnode	*vp,
26905843Ssmaybe 	u_offset_t	off,
26915843Ssmaybe 	uint_t		bytes,
26925843Ssmaybe 	uint_t		flags,
26935843Ssmaybe 	struct as	*as,
26945843Ssmaybe 	caddr_t		vaddr,
26955843Ssmaybe 	ddi_dma_attr_t	*mattr)
26965843Ssmaybe {
26975843Ssmaybe 	page_t	*plist = NULL, *pp;
26985843Ssmaybe 	int	npages = 0, contig, anyaddr, pages_req;
26995843Ssmaybe 	mfn_t	lo_mfn;
27005843Ssmaybe 	mfn_t	hi_mfn;
27015843Ssmaybe 	pgcnt_t	pfnalign = 0;
27025843Ssmaybe 	int	align;
27035843Ssmaybe 	int	is_domu = 0;
27045843Ssmaybe 	int	dummy, bytes_got;
27055843Ssmaybe 	mfn_t	max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
27065843Ssmaybe 
27075843Ssmaybe 	ASSERT(mattr != NULL);
27085843Ssmaybe 	lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
27095843Ssmaybe 	hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
27105843Ssmaybe 	align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
27115843Ssmaybe 	if (align > MMU_PAGESIZE)
27125843Ssmaybe 		pfnalign = mmu_btop(align);
27135843Ssmaybe 
27145843Ssmaybe 	/*
27155843Ssmaybe 	 * Clear the contig flag if only one page is needed or the scatter
27165843Ssmaybe 	 * gather list length is >= npages.
27175843Ssmaybe 	 */
27185843Ssmaybe 	pages_req = npages = mmu_btopr(bytes);
27195843Ssmaybe 	contig = (flags & PG_PHYSCONTIG);
27205843Ssmaybe 	bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
27215843Ssmaybe 	if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
27225843Ssmaybe 		contig = 0;
27235843Ssmaybe 
27245843Ssmaybe 	/*
27255843Ssmaybe 	 * Check if any old page in the system is fine.
27265843Ssmaybe 	 * DomU should always go down this path.
27275843Ssmaybe 	 */
27285843Ssmaybe 	is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
27295843Ssmaybe 	anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
27305843Ssmaybe 	if ((!contig && anyaddr) || is_domu) {
27315843Ssmaybe 		flags &= ~PG_PHYSCONTIG;
27325843Ssmaybe 		plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
27335843Ssmaybe 		if (plist != NULL)
27345843Ssmaybe 			return (plist);
27355843Ssmaybe 		else if (is_domu)
27365843Ssmaybe 			return (NULL); /* no memory available */
27375843Ssmaybe 	}
27385843Ssmaybe 	/*
27395843Ssmaybe 	 * DomU should never reach here
27405843Ssmaybe 	 */
27415843Ssmaybe 	if (contig) {
27425843Ssmaybe 		plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
27435843Ssmaybe 		    mattr);
27445843Ssmaybe 		if (plist == NULL)
27455843Ssmaybe 			goto fail;
27465843Ssmaybe 		bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
27475843Ssmaybe 		vaddr += bytes_got;
27485843Ssmaybe 		off += bytes_got;
27495843Ssmaybe 		/*
27505843Ssmaybe 		 * We now have all the contiguous pages we need, but
27515843Ssmaybe 		 * we may still need additional non-contiguous pages.
27525843Ssmaybe 		 */
27535843Ssmaybe 	}
27545843Ssmaybe 	/*
27555843Ssmaybe 	 * now loop collecting the requested number of pages, these do
27565843Ssmaybe 	 * not have to be contiguous pages but we will use the contig
27575843Ssmaybe 	 * page alloc code to get the pages since it will honor any
27585843Ssmaybe 	 * other constraints the pages may have.
27595843Ssmaybe 	 */
27605843Ssmaybe 	while (npages--) {
27615843Ssmaybe 		dummy = 1;
27625843Ssmaybe 		pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
27635843Ssmaybe 		if (pp == NULL)
27645843Ssmaybe 			goto fail;
27655843Ssmaybe 		page_add(&plist, pp);
27665843Ssmaybe 		vaddr += MMU_PAGESIZE;
27675843Ssmaybe 		off += MMU_PAGESIZE;
27685843Ssmaybe 	}
27695843Ssmaybe 	return (plist);
27705843Ssmaybe fail:
27715843Ssmaybe 	/*
27725843Ssmaybe 	 * Failed to get enough pages, return ones we did get
27735843Ssmaybe 	 */
27745843Ssmaybe 	return_partial_alloc(plist);
27755084Sjohnlev 	return (NULL);
27765084Sjohnlev }
27775084Sjohnlev 
27785084Sjohnlev /*
27795084Sjohnlev  * Lock and return the page with the highest mfn that we can find.  last_mfn
27805084Sjohnlev  * holds the last one found, so the next search can start from there.  We
27815084Sjohnlev  * also keep a counter so that we don't loop forever if the machine has no
27825084Sjohnlev  * free pages.
27835084Sjohnlev  *
27845084Sjohnlev  * This is called from the balloon thread to find pages to give away.  new_high
27855084Sjohnlev  * is used when new mfn's have been added to the system - we will reset our
27865084Sjohnlev  * search if the new mfn's are higher than our current search position.
27875084Sjohnlev  */
27885084Sjohnlev page_t *
27895084Sjohnlev page_get_high_mfn(mfn_t new_high)
27905084Sjohnlev {
27915084Sjohnlev 	static mfn_t last_mfn = 0;
27925084Sjohnlev 	pfn_t pfn;
27935084Sjohnlev 	page_t *pp;
27945084Sjohnlev 	ulong_t loop_count = 0;
27955084Sjohnlev 
27965084Sjohnlev 	if (new_high > last_mfn)
27975084Sjohnlev 		last_mfn = new_high;
27985084Sjohnlev 
27995084Sjohnlev 	for (; loop_count < mfn_count; loop_count++, last_mfn--) {
28005084Sjohnlev 		if (last_mfn == 0) {
28015084Sjohnlev 			last_mfn = cached_max_mfn;
28025084Sjohnlev 		}
28035084Sjohnlev 
28045084Sjohnlev 		pfn = mfn_to_pfn(last_mfn);
28055084Sjohnlev 		if (pfn & PFN_IS_FOREIGN_MFN)
28065084Sjohnlev 			continue;
28075084Sjohnlev 
28085084Sjohnlev 		/* See if the page is free.  If so, lock it. */
28095084Sjohnlev 		pp = page_numtopp_alloc(pfn);
28105084Sjohnlev 		if (pp == NULL)
28115084Sjohnlev 			continue;
28125084Sjohnlev 		PP_CLRFREE(pp);
28135084Sjohnlev 
28145084Sjohnlev 		ASSERT(PAGE_EXCL(pp));
28155084Sjohnlev 		ASSERT(pp->p_vnode == NULL);
28165084Sjohnlev 		ASSERT(!hat_page_is_mapped(pp));
28175084Sjohnlev 		last_mfn--;
28185084Sjohnlev 		return (pp);
28195084Sjohnlev 	}
28205084Sjohnlev 	return (NULL);
28215084Sjohnlev }
28225084Sjohnlev 
28235084Sjohnlev #else /* !__xpv */
28245084Sjohnlev 
28250Sstevel@tonic-gate /*
28260Sstevel@tonic-gate  * get a page from any list with the given mnode
28270Sstevel@tonic-gate  */
28285084Sjohnlev static page_t *
28290Sstevel@tonic-gate page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
28300Sstevel@tonic-gate     int mnode, int mtype, ddi_dma_attr_t *dma_attr)
28310Sstevel@tonic-gate {
28322961Sdp78419 	kmutex_t		*pcm;
28332961Sdp78419 	int			i;
28342961Sdp78419 	page_t			*pp;
28352961Sdp78419 	page_t			*first_pp;
28362961Sdp78419 	uint64_t		pgaddr;
28372961Sdp78419 	ulong_t			bin;
28382961Sdp78419 	int			mtypestart;
28392961Sdp78419 	int			plw_initialized;
28402961Sdp78419 	page_list_walker_t	plw;
28410Sstevel@tonic-gate 
28420Sstevel@tonic-gate 	VM_STAT_ADD(pga_vmstats.pgma_alloc);
28430Sstevel@tonic-gate 
28440Sstevel@tonic-gate 	ASSERT((flags & PG_MATCH_COLOR) == 0);
28450Sstevel@tonic-gate 	ASSERT(szc == 0);
28460Sstevel@tonic-gate 	ASSERT(dma_attr != NULL);
28470Sstevel@tonic-gate 
28480Sstevel@tonic-gate 	MTYPE_START(mnode, mtype, flags);
28490Sstevel@tonic-gate 	if (mtype < 0) {
28500Sstevel@tonic-gate 		VM_STAT_ADD(pga_vmstats.pgma_allocempty);
28510Sstevel@tonic-gate 		return (NULL);
28520Sstevel@tonic-gate 	}
28530Sstevel@tonic-gate 
28540Sstevel@tonic-gate 	mtypestart = mtype;
28550Sstevel@tonic-gate 
28560Sstevel@tonic-gate 	bin = origbin;
28570Sstevel@tonic-gate 
28580Sstevel@tonic-gate 	/*
28590Sstevel@tonic-gate 	 * check up to page_colors + 1 bins - origbin may be checked twice
28600Sstevel@tonic-gate 	 * because of BIN_STEP skip
28610Sstevel@tonic-gate 	 */
28620Sstevel@tonic-gate 	do {
28632961Sdp78419 		plw_initialized = 0;
28642961Sdp78419 
28652961Sdp78419 		for (plw.plw_count = 0;
28662961Sdp78419 		    plw.plw_count < page_colors; plw.plw_count++) {
28672961Sdp78419 
28680Sstevel@tonic-gate 			if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
28690Sstevel@tonic-gate 				goto nextfreebin;
28700Sstevel@tonic-gate 
28710Sstevel@tonic-gate 			pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
28720Sstevel@tonic-gate 			mutex_enter(pcm);
28730Sstevel@tonic-gate 			pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
28740Sstevel@tonic-gate 			first_pp = pp;
28750Sstevel@tonic-gate 			while (pp != NULL) {
28760Sstevel@tonic-gate 				if (page_trylock(pp, SE_EXCL) == 0) {
28770Sstevel@tonic-gate 					pp = pp->p_next;
28780Sstevel@tonic-gate 					if (pp == first_pp) {
28790Sstevel@tonic-gate 						pp = NULL;
28800Sstevel@tonic-gate 					}
28810Sstevel@tonic-gate 					continue;
28820Sstevel@tonic-gate 				}
28830Sstevel@tonic-gate 
28840Sstevel@tonic-gate 				ASSERT(PP_ISFREE(pp));
28850Sstevel@tonic-gate 				ASSERT(PP_ISAGED(pp));
28860Sstevel@tonic-gate 				ASSERT(pp->p_vnode == NULL);
28870Sstevel@tonic-gate 				ASSERT(pp->p_hash == NULL);
28880Sstevel@tonic-gate 				ASSERT(pp->p_offset == (u_offset_t)-1);
28890Sstevel@tonic-gate 				ASSERT(pp->p_szc == szc);
28900Sstevel@tonic-gate 				ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
28910Sstevel@tonic-gate 				/* check if page within DMA attributes */
28923446Smrj 				pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
28930Sstevel@tonic-gate 				if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
28940Sstevel@tonic-gate 				    (pgaddr + MMU_PAGESIZE - 1 <=
28950Sstevel@tonic-gate 				    dma_attr->dma_attr_addr_hi)) {
28960Sstevel@tonic-gate 					break;
28970Sstevel@tonic-gate 				}
28980Sstevel@tonic-gate 
28990Sstevel@tonic-gate 				/* continue looking */
29000Sstevel@tonic-gate 				page_unlock(pp);
29010Sstevel@tonic-gate 				pp = pp->p_next;
29020Sstevel@tonic-gate 				if (pp == first_pp)
29030Sstevel@tonic-gate 					pp = NULL;
29040Sstevel@tonic-gate 
29050Sstevel@tonic-gate 			}
29060Sstevel@tonic-gate 			if (pp != NULL) {
29070Sstevel@tonic-gate 				ASSERT(mtype == PP_2_MTYPE(pp));
29080Sstevel@tonic-gate 				ASSERT(pp->p_szc == 0);
29090Sstevel@tonic-gate 
29100Sstevel@tonic-gate 				/* found a page with specified DMA attributes */
29110Sstevel@tonic-gate 				page_sub(&PAGE_FREELISTS(mnode, szc, bin,
29120Sstevel@tonic-gate 				    mtype), pp);
2913414Skchow 				page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
29140Sstevel@tonic-gate 
29150Sstevel@tonic-gate 				if ((PP_ISFREE(pp) == 0) ||
29160Sstevel@tonic-gate 				    (PP_ISAGED(pp) == 0)) {
29170Sstevel@tonic-gate 					cmn_err(CE_PANIC, "page %p is not free",
29180Sstevel@tonic-gate 					    (void *)pp);
29190Sstevel@tonic-gate 				}
29200Sstevel@tonic-gate 
29210Sstevel@tonic-gate 				mutex_exit(pcm);
29220Sstevel@tonic-gate 				check_dma(dma_attr, pp, 1);
29230Sstevel@tonic-gate 				VM_STAT_ADD(pga_vmstats.pgma_allocok);
29240Sstevel@tonic-gate 				return (pp);
29250Sstevel@tonic-gate 			}
29260Sstevel@tonic-gate 			mutex_exit(pcm);
29270Sstevel@tonic-gate nextfreebin:
29282961Sdp78419 			if (plw_initialized == 0) {
29292961Sdp78419 				page_list_walk_init(szc, 0, bin, 1, 0, &plw);
29302961Sdp78419 				ASSERT(plw.plw_ceq_dif == page_colors);
29312961Sdp78419 				plw_initialized = 1;
29322961Sdp78419 			}
29330Sstevel@tonic-gate 
29342961Sdp78419 			if (plw.plw_do_split) {
29352961Sdp78419 				pp = page_freelist_split(szc, bin, mnode,
29362961Sdp78419 				    mtype,
2937*7656SSherry.Moore@Sun.COM 				    mmu_btop(dma_attr->dma_attr_addr_lo),
29382961Sdp78419 				    mmu_btop(dma_attr->dma_attr_addr_hi + 1),
29392961Sdp78419 				    &plw);
2940*7656SSherry.Moore@Sun.COM 				if (pp != NULL) {
2941*7656SSherry.Moore@Sun.COM 					check_dma(dma_attr, pp, 1);
29422961Sdp78419 					return (pp);
2943*7656SSherry.Moore@Sun.COM 				}
29442961Sdp78419 			}
29452961Sdp78419 
29462961Sdp78419 			bin = page_list_walk_next_bin(szc, bin, &plw);
29470Sstevel@tonic-gate 		}
29482961Sdp78419 
2949414Skchow 		MTYPE_NEXT(mnode, mtype, flags);
2950414Skchow 	} while (mtype >= 0);
29510Sstevel@tonic-gate 
29520Sstevel@tonic-gate 	/* failed to find a page in the freelist; try it in the cachelist */
29530Sstevel@tonic-gate 
29540Sstevel@tonic-gate 	/* reset mtype start for cachelist search */
29550Sstevel@tonic-gate 	mtype = mtypestart;
29560Sstevel@tonic-gate 	ASSERT(mtype >= 0);
29570Sstevel@tonic-gate 
29580Sstevel@tonic-gate 	/* start with the bin of matching color */
29590Sstevel@tonic-gate 	bin = origbin;
29600Sstevel@tonic-gate 
29610Sstevel@tonic-gate 	do {
29620Sstevel@tonic-gate 		for (i = 0; i <= page_colors; i++) {
29630Sstevel@tonic-gate 			if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
29640Sstevel@tonic-gate 				goto nextcachebin;
29650Sstevel@tonic-gate 			pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
29660Sstevel@tonic-gate 			mutex_enter(pcm);
29670Sstevel@tonic-gate 			pp = PAGE_CACHELISTS(mnode, bin, mtype);
29680Sstevel@tonic-gate 			first_pp = pp;
29690Sstevel@tonic-gate 			while (pp != NULL) {
29700Sstevel@tonic-gate 				if (page_trylock(pp, SE_EXCL) == 0) {
29710Sstevel@tonic-gate 					pp = pp->p_next;
29720Sstevel@tonic-gate 					if (pp == first_pp)
29730Sstevel@tonic-gate 						break;
29740Sstevel@tonic-gate 					continue;
29750Sstevel@tonic-gate 				}
29760Sstevel@tonic-gate 				ASSERT(pp->p_vnode);
29770Sstevel@tonic-gate 				ASSERT(PP_ISAGED(pp) == 0);
29780Sstevel@tonic-gate 				ASSERT(pp->p_szc == 0);
29790Sstevel@tonic-gate 				ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
29800Sstevel@tonic-gate 
29810Sstevel@tonic-gate 				/* check if page within DMA attributes */
29820Sstevel@tonic-gate 
29833446Smrj 				pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
29840Sstevel@tonic-gate 				if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
29850Sstevel@tonic-gate 				    (pgaddr + MMU_PAGESIZE - 1 <=
29860Sstevel@tonic-gate 				    dma_attr->dma_attr_addr_hi)) {
29870Sstevel@tonic-gate 					break;
29880Sstevel@tonic-gate 				}
29890Sstevel@tonic-gate 
29900Sstevel@tonic-gate 				/* continue looking */
29910Sstevel@tonic-gate 				page_unlock(pp);
29920Sstevel@tonic-gate 				pp = pp->p_next;
29930Sstevel@tonic-gate 				if (pp == first_pp)
29940Sstevel@tonic-gate 					pp = NULL;
29950Sstevel@tonic-gate 			}
29960Sstevel@tonic-gate 
29970Sstevel@tonic-gate 			if (pp != NULL) {
29980Sstevel@tonic-gate 				ASSERT(mtype == PP_2_MTYPE(pp));
29990Sstevel@tonic-gate 				ASSERT(pp->p_szc == 0);
30000Sstevel@tonic-gate 
30010Sstevel@tonic-gate 				/* found a page with specified DMA attributes */
30020Sstevel@tonic-gate 				page_sub(&PAGE_CACHELISTS(mnode, bin,
30030Sstevel@tonic-gate 				    mtype), pp);
3004414Skchow 				page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
30050Sstevel@tonic-gate 
30060Sstevel@tonic-gate 				mutex_exit(pcm);
30070Sstevel@tonic-gate 				ASSERT(pp->p_vnode);
30080Sstevel@tonic-gate 				ASSERT(PP_ISAGED(pp) == 0);
30090Sstevel@tonic-gate 				check_dma(dma_attr, pp, 1);
30100Sstevel@tonic-gate 				VM_STAT_ADD(pga_vmstats.pgma_allocok);
30110Sstevel@tonic-gate 				return (pp);
30120Sstevel@tonic-gate 			}
30130Sstevel@tonic-gate 			mutex_exit(pcm);
30140Sstevel@tonic-gate nextcachebin:
30150Sstevel@tonic-gate 			bin += (i == 0) ? BIN_STEP : 1;
30160Sstevel@tonic-gate 			bin &= page_colors_mask;
30170Sstevel@tonic-gate 		}
3018414Skchow 		MTYPE_NEXT(mnode, mtype, flags);
3019414Skchow 	} while (mtype >= 0);
30200Sstevel@tonic-gate 
30210Sstevel@tonic-gate 	VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
30220Sstevel@tonic-gate 	return (NULL);
30230Sstevel@tonic-gate }
30240Sstevel@tonic-gate 
30250Sstevel@tonic-gate /*
30260Sstevel@tonic-gate  * This function is similar to page_get_freelist()/page_get_cachelist()
30270Sstevel@tonic-gate  * but it searches both the lists to find a page with the specified
30280Sstevel@tonic-gate  * color (or no color) and DMA attributes. The search is done in the
30290Sstevel@tonic-gate  * freelist first and then in the cache list within the highest memory
30300Sstevel@tonic-gate  * range (based on DMA attributes) before searching in the lower
30310Sstevel@tonic-gate  * memory ranges.
30320Sstevel@tonic-gate  *
30330Sstevel@tonic-gate  * Note: This function is called only by page_create_io().
30340Sstevel@tonic-gate  */
30350Sstevel@tonic-gate /*ARGSUSED*/
30365084Sjohnlev static page_t *
30370Sstevel@tonic-gate page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
30380Sstevel@tonic-gate     size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t	*lgrp)
30390Sstevel@tonic-gate {
30400Sstevel@tonic-gate 	uint_t		bin;
30410Sstevel@tonic-gate 	int		mtype;
30420Sstevel@tonic-gate 	page_t		*pp;
30430Sstevel@tonic-gate 	int		n;
30440Sstevel@tonic-gate 	int		m;
30450Sstevel@tonic-gate 	int		szc;
30460Sstevel@tonic-gate 	int		fullrange;
30470Sstevel@tonic-gate 	int		mnode;
30480Sstevel@tonic-gate 	int		local_failed_stat = 0;
30490Sstevel@tonic-gate 	lgrp_mnode_cookie_t	lgrp_cookie;
30500Sstevel@tonic-gate 
30510Sstevel@tonic-gate 	VM_STAT_ADD(pga_vmstats.pga_alloc);
30520Sstevel@tonic-gate 
30530Sstevel@tonic-gate 	/* only base pagesize currently supported */
30540Sstevel@tonic-gate 	if (size != MMU_PAGESIZE)
30550Sstevel@tonic-gate 		return (NULL);
30560Sstevel@tonic-gate 
30570Sstevel@tonic-gate 	/*
30580Sstevel@tonic-gate 	 * If we're passed a specific lgroup, we use it.  Otherwise,
30590Sstevel@tonic-gate 	 * assume first-touch placement is desired.
30600Sstevel@tonic-gate 	 */
30610Sstevel@tonic-gate 	if (!LGRP_EXISTS(lgrp))
30620Sstevel@tonic-gate 		lgrp = lgrp_home_lgrp();
30630Sstevel@tonic-gate 
30640Sstevel@tonic-gate 	/* LINTED */
30652961Sdp78419 	AS_2_BIN(as, seg, vp, vaddr, bin, 0);
30660Sstevel@tonic-gate 
30670Sstevel@tonic-gate 	/*
30680Sstevel@tonic-gate 	 * Only hold one freelist or cachelist lock at a time, that way we
30690Sstevel@tonic-gate 	 * can start anywhere and not have to worry about lock
30700Sstevel@tonic-gate 	 * ordering.
30710Sstevel@tonic-gate 	 */
30720Sstevel@tonic-gate 	if (dma_attr == NULL) {
30730Sstevel@tonic-gate 		n = 0;
30740Sstevel@tonic-gate 		m = mnoderangecnt - 1;
30750Sstevel@tonic-gate 		fullrange = 1;
30760Sstevel@tonic-gate 		VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
30770Sstevel@tonic-gate 	} else {
30780Sstevel@tonic-gate 		pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
30790Sstevel@tonic-gate 		pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
30800Sstevel@tonic-gate 
30810Sstevel@tonic-gate 		/*
30820Sstevel@tonic-gate 		 * We can guarantee alignment only for page boundary.
30830Sstevel@tonic-gate 		 */
30840Sstevel@tonic-gate 		if (dma_attr->dma_attr_align > MMU_PAGESIZE)
30850Sstevel@tonic-gate 			return (NULL);
30860Sstevel@tonic-gate 
30870Sstevel@tonic-gate 		n = pfn_2_mtype(pfnlo);
30880Sstevel@tonic-gate 		m = pfn_2_mtype(pfnhi);
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate 		fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
30910Sstevel@tonic-gate 		    (pfnhi >= mnoderanges[m].mnr_pfnhi));
30920Sstevel@tonic-gate 	}
30930Sstevel@tonic-gate 	VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
30940Sstevel@tonic-gate 
30950Sstevel@tonic-gate 	if (n > m)
30960Sstevel@tonic-gate 		return (NULL);
30970Sstevel@tonic-gate 
30980Sstevel@tonic-gate 	szc = 0;
30990Sstevel@tonic-gate 
31000Sstevel@tonic-gate 	/* cylcing thru mtype handled by RANGE0 if n == 0 */
31010Sstevel@tonic-gate 	if (n == 0) {
31020Sstevel@tonic-gate 		flags |= PGI_MT_RANGE0;
31030Sstevel@tonic-gate 		n = m;
31040Sstevel@tonic-gate 	}
31050Sstevel@tonic-gate 
31060Sstevel@tonic-gate 	/*
31070Sstevel@tonic-gate 	 * Try local memory node first, but try remote if we can't
31080Sstevel@tonic-gate 	 * get a page of the right color.
31090Sstevel@tonic-gate 	 */
31100Sstevel@tonic-gate 	LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
31110Sstevel@tonic-gate 	while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
31120Sstevel@tonic-gate 		/*
31130Sstevel@tonic-gate 		 * allocate pages from high pfn to low.
31140Sstevel@tonic-gate 		 */
31150Sstevel@tonic-gate 		for (mtype = m; mtype >= n; mtype--) {
31160Sstevel@tonic-gate 			if (fullrange != 0) {
31170Sstevel@tonic-gate 				pp = page_get_mnode_freelist(mnode,
31180Sstevel@tonic-gate 				    bin, mtype, szc, flags);
31190Sstevel@tonic-gate 				if (pp == NULL) {
31200Sstevel@tonic-gate 					pp = page_get_mnode_cachelist(
31215084Sjohnlev 					    bin, flags, mnode, mtype);
31220Sstevel@tonic-gate 				}
31230Sstevel@tonic-gate 			} else {
31240Sstevel@tonic-gate 				pp = page_get_mnode_anylist(bin, szc,
31250Sstevel@tonic-gate 				    flags, mnode, mtype, dma_attr);
31260Sstevel@tonic-gate 			}
31270Sstevel@tonic-gate 			if (pp != NULL) {
31280Sstevel@tonic-gate 				VM_STAT_ADD(pga_vmstats.pga_allocok);
31290Sstevel@tonic-gate 				check_dma(dma_attr, pp, 1);
31300Sstevel@tonic-gate 				return (pp);
31310Sstevel@tonic-gate 			}
31320Sstevel@tonic-gate 		}
31330Sstevel@tonic-gate 		if (!local_failed_stat) {
31340Sstevel@tonic-gate 			lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
31350Sstevel@tonic-gate 			local_failed_stat = 1;
31360Sstevel@tonic-gate 		}
31370Sstevel@tonic-gate 	}
31380Sstevel@tonic-gate 	VM_STAT_ADD(pga_vmstats.pga_allocfailed);
31390Sstevel@tonic-gate 
31400Sstevel@tonic-gate 	return (NULL);
31410Sstevel@tonic-gate }
31420Sstevel@tonic-gate 
31430Sstevel@tonic-gate /*
31440Sstevel@tonic-gate  * page_create_io()
31450Sstevel@tonic-gate  *
31460Sstevel@tonic-gate  * This function is a copy of page_create_va() with an additional
31470Sstevel@tonic-gate  * argument 'mattr' that specifies DMA memory requirements to
31480Sstevel@tonic-gate  * the page list functions. This function is used by the segkmem
31490Sstevel@tonic-gate  * allocator so it is only to create new pages (i.e PG_EXCL is
31500Sstevel@tonic-gate  * set).
31510Sstevel@tonic-gate  *
31520Sstevel@tonic-gate  * Note: This interface is currently used by x86 PSM only and is
31530Sstevel@tonic-gate  *	 not fully specified so the commitment level is only for
31540Sstevel@tonic-gate  *	 private interface specific to x86. This interface uses PSM
31550Sstevel@tonic-gate  *	 specific page_get_anylist() interface.
31560Sstevel@tonic-gate  */
31570Sstevel@tonic-gate 
31580Sstevel@tonic-gate #define	PAGE_HASH_SEARCH(index, pp, vp, off) { \
31590Sstevel@tonic-gate 	for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
31600Sstevel@tonic-gate 		if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
31610Sstevel@tonic-gate 			break; \
31620Sstevel@tonic-gate 	} \
31630Sstevel@tonic-gate }
31640Sstevel@tonic-gate 
31650Sstevel@tonic-gate 
31660Sstevel@tonic-gate page_t *
31670Sstevel@tonic-gate page_create_io(
31680Sstevel@tonic-gate 	struct vnode	*vp,
31690Sstevel@tonic-gate 	u_offset_t	off,
31700Sstevel@tonic-gate 	uint_t		bytes,
31710Sstevel@tonic-gate 	uint_t		flags,
31720Sstevel@tonic-gate 	struct as	*as,
31730Sstevel@tonic-gate 	caddr_t		vaddr,
31740Sstevel@tonic-gate 	ddi_dma_attr_t	*mattr)	/* DMA memory attributes if any */
31750Sstevel@tonic-gate {
31760Sstevel@tonic-gate 	page_t		*plist = NULL;
31770Sstevel@tonic-gate 	uint_t		plist_len = 0;
31780Sstevel@tonic-gate 	pgcnt_t		npages;
31790Sstevel@tonic-gate 	page_t		*npp = NULL;
31800Sstevel@tonic-gate 	uint_t		pages_req;
31810Sstevel@tonic-gate 	page_t		*pp;
31820Sstevel@tonic-gate 	kmutex_t	*phm = NULL;
31830Sstevel@tonic-gate 	uint_t		index;
31840Sstevel@tonic-gate 
31850Sstevel@tonic-gate 	TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
31865084Sjohnlev 	    "page_create_start:vp %p off %llx bytes %u flags %x",
31875084Sjohnlev 	    vp, off, bytes, flags);
31880Sstevel@tonic-gate 
31890Sstevel@tonic-gate 	ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
31900Sstevel@tonic-gate 
31910Sstevel@tonic-gate 	pages_req = npages = mmu_btopr(bytes);
31920Sstevel@tonic-gate 
31930Sstevel@tonic-gate 	/*
31940Sstevel@tonic-gate 	 * Do the freemem and pcf accounting.
31950Sstevel@tonic-gate 	 */
31960Sstevel@tonic-gate 	if (!page_create_wait(npages, flags)) {
31970Sstevel@tonic-gate 		return (NULL);
31980Sstevel@tonic-gate 	}
31990Sstevel@tonic-gate 
32000Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
32015084Sjohnlev 	    "page_create_success:vp %p off %llx", vp, off);
32020Sstevel@tonic-gate 
32030Sstevel@tonic-gate 	/*
32040Sstevel@tonic-gate 	 * If satisfying this request has left us with too little
32050Sstevel@tonic-gate 	 * memory, start the wheels turning to get some back.  The
32060Sstevel@tonic-gate 	 * first clause of the test prevents waking up the pageout
32070Sstevel@tonic-gate 	 * daemon in situations where it would decide that there's
32080Sstevel@tonic-gate 	 * nothing to do.
32090Sstevel@tonic-gate 	 */
32100Sstevel@tonic-gate 	if (nscan < desscan && freemem < minfree) {
32110Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
32125084Sjohnlev 		    "pageout_cv_signal:freemem %ld", freemem);
32130Sstevel@tonic-gate 		cv_signal(&proc_pageout->p_cv);
32140Sstevel@tonic-gate 	}
32150Sstevel@tonic-gate 
32160Sstevel@tonic-gate 	if (flags & PG_PHYSCONTIG) {
32170Sstevel@tonic-gate 
32180Sstevel@tonic-gate 		plist = page_get_contigpage(&npages, mattr, 1);
32190Sstevel@tonic-gate 		if (plist == NULL) {
32200Sstevel@tonic-gate 			page_create_putback(npages);
32210Sstevel@tonic-gate 			return (NULL);
32220Sstevel@tonic-gate 		}
32230Sstevel@tonic-gate 
32240Sstevel@tonic-gate 		pp = plist;
32250Sstevel@tonic-gate 
32260Sstevel@tonic-gate 		do {
32270Sstevel@tonic-gate 			if (!page_hashin(pp, vp, off, NULL)) {
32280Sstevel@tonic-gate 				panic("pg_creat_io: hashin failed %p %p %llx",
32290Sstevel@tonic-gate 				    (void *)pp, (void *)vp, off);
32300Sstevel@tonic-gate 			}
32310Sstevel@tonic-gate 			VM_STAT_ADD(page_create_new);
32320Sstevel@tonic-gate 			off += MMU_PAGESIZE;
32330Sstevel@tonic-gate 			PP_CLRFREE(pp);
32340Sstevel@tonic-gate 			PP_CLRAGED(pp);
32350Sstevel@tonic-gate 			page_set_props(pp, P_REF);
32360Sstevel@tonic-gate 			pp = pp->p_next;
32370Sstevel@tonic-gate 		} while (pp != plist);
32380Sstevel@tonic-gate 
32390Sstevel@tonic-gate 		if (!npages) {
32400Sstevel@tonic-gate 			check_dma(mattr, plist, pages_req);
32410Sstevel@tonic-gate 			return (plist);
32420Sstevel@tonic-gate 		} else {
32430Sstevel@tonic-gate 			vaddr += (pages_req - npages) << MMU_PAGESHIFT;
32440Sstevel@tonic-gate 		}
32450Sstevel@tonic-gate 
32460Sstevel@tonic-gate 		/*
32470Sstevel@tonic-gate 		 * fall-thru:
32480Sstevel@tonic-gate 		 *
32490Sstevel@tonic-gate 		 * page_get_contigpage returns when npages <= sgllen.
32500Sstevel@tonic-gate 		 * Grab the rest of the non-contig pages below from anylist.
32510Sstevel@tonic-gate 		 */
32520Sstevel@tonic-gate 	}
32530Sstevel@tonic-gate 
32540Sstevel@tonic-gate 	/*
32550Sstevel@tonic-gate 	 * Loop around collecting the requested number of pages.
32560Sstevel@tonic-gate 	 * Most of the time, we have to `create' a new page. With
32570Sstevel@tonic-gate 	 * this in mind, pull the page off the free list before
32580Sstevel@tonic-gate 	 * getting the hash lock.  This will minimize the hash
32590Sstevel@tonic-gate 	 * lock hold time, nesting, and the like.  If it turns
32600Sstevel@tonic-gate 	 * out we don't need the page, we put it back at the end.
32610Sstevel@tonic-gate 	 */
32620Sstevel@tonic-gate 	while (npages--) {
32630Sstevel@tonic-gate 		phm = NULL;
32640Sstevel@tonic-gate 
32650Sstevel@tonic-gate 		index = PAGE_HASH_FUNC(vp, off);
32660Sstevel@tonic-gate top:
32670Sstevel@tonic-gate 		ASSERT(phm == NULL);
32680Sstevel@tonic-gate 		ASSERT(index == PAGE_HASH_FUNC(vp, off));
32690Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate 		if (npp == NULL) {
32720Sstevel@tonic-gate 			/*
32730Sstevel@tonic-gate 			 * Try to get the page of any color either from
32740Sstevel@tonic-gate 			 * the freelist or from the cache list.
32750Sstevel@tonic-gate 			 */
32760Sstevel@tonic-gate 			npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
32770Sstevel@tonic-gate 			    flags & ~PG_MATCH_COLOR, mattr, NULL);
32780Sstevel@tonic-gate 			if (npp == NULL) {
32790Sstevel@tonic-gate 				if (mattr == NULL) {
32800Sstevel@tonic-gate 					/*
32810Sstevel@tonic-gate 					 * Not looking for a special page;
32820Sstevel@tonic-gate 					 * panic!
32830Sstevel@tonic-gate 					 */
32840Sstevel@tonic-gate 					panic("no page found %d", (int)npages);
32850Sstevel@tonic-gate 				}
32860Sstevel@tonic-gate 				/*
32870Sstevel@tonic-gate 				 * No page found! This can happen
32880Sstevel@tonic-gate 				 * if we are looking for a page
32890Sstevel@tonic-gate 				 * within a specific memory range
32900Sstevel@tonic-gate 				 * for DMA purposes. If PG_WAIT is
32910Sstevel@tonic-gate 				 * specified then we wait for a
32920Sstevel@tonic-gate 				 * while and then try again. The
32930Sstevel@tonic-gate 				 * wait could be forever if we
32940Sstevel@tonic-gate 				 * don't get the page(s) we need.
32950Sstevel@tonic-gate 				 *
32960Sstevel@tonic-gate 				 * Note: XXX We really need a mechanism
32970Sstevel@tonic-gate 				 * to wait for pages in the desired
32980Sstevel@tonic-gate 				 * range. For now, we wait for any
32990Sstevel@tonic-gate 				 * pages and see if we can use it.
33000Sstevel@tonic-gate 				 */
33010Sstevel@tonic-gate 
33020Sstevel@tonic-gate 				if ((mattr != NULL) && (flags & PG_WAIT)) {
33030Sstevel@tonic-gate 					delay(10);
33040Sstevel@tonic-gate 					goto top;
33050Sstevel@tonic-gate 				}
33060Sstevel@tonic-gate 				goto fail; /* undo accounting stuff */
33070Sstevel@tonic-gate 			}
33080Sstevel@tonic-gate 
33090Sstevel@tonic-gate 			if (PP_ISAGED(npp) == 0) {
33100Sstevel@tonic-gate 				/*
33110Sstevel@tonic-gate 				 * Since this page came from the
33120Sstevel@tonic-gate 				 * cachelist, we must destroy the
33130Sstevel@tonic-gate 				 * old vnode association.
33140Sstevel@tonic-gate 				 */
33150Sstevel@tonic-gate 				page_hashout(npp, (kmutex_t *)NULL);
33160Sstevel@tonic-gate 			}
33170Sstevel@tonic-gate 		}
33180Sstevel@tonic-gate 
33190Sstevel@tonic-gate 		/*
33200Sstevel@tonic-gate 		 * We own this page!
33210Sstevel@tonic-gate 		 */
33220Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(npp));
33230Sstevel@tonic-gate 		ASSERT(npp->p_vnode == NULL);
33240Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(npp));
33250Sstevel@tonic-gate 		PP_CLRFREE(npp);
33260Sstevel@tonic-gate 		PP_CLRAGED(npp);
33270Sstevel@tonic-gate 
33280Sstevel@tonic-gate 		/*
33290Sstevel@tonic-gate 		 * Here we have a page in our hot little mits and are
33300Sstevel@tonic-gate 		 * just waiting to stuff it on the appropriate lists.
33310Sstevel@tonic-gate 		 * Get the mutex and check to see if it really does
33320Sstevel@tonic-gate 		 * not exist.
33330Sstevel@tonic-gate 		 */
33340Sstevel@tonic-gate 		phm = PAGE_HASH_MUTEX(index);
33350Sstevel@tonic-gate 		mutex_enter(phm);
33360Sstevel@tonic-gate 		PAGE_HASH_SEARCH(index, pp, vp, off);
33370Sstevel@tonic-gate 		if (pp == NULL) {
33380Sstevel@tonic-gate 			VM_STAT_ADD(page_create_new);
33390Sstevel@tonic-gate 			pp = npp;
33400Sstevel@tonic-gate 			npp = NULL;
33410Sstevel@tonic-gate 			if (!page_hashin(pp, vp, off, phm)) {
33420Sstevel@tonic-gate 				/*
33430Sstevel@tonic-gate 				 * Since we hold the page hash mutex and
33440Sstevel@tonic-gate 				 * just searched for this page, page_hashin
33450Sstevel@tonic-gate 				 * had better not fail.  If it does, that
33460Sstevel@tonic-gate 				 * means somethread did not follow the
33470Sstevel@tonic-gate 				 * page hash mutex rules.  Panic now and
33480Sstevel@tonic-gate 				 * get it over with.  As usual, go down
33490Sstevel@tonic-gate 				 * holding all the locks.
33500Sstevel@tonic-gate 				 */
33510Sstevel@tonic-gate 				ASSERT(MUTEX_HELD(phm));
33520Sstevel@tonic-gate 				panic("page_create: hashin fail %p %p %llx %p",
33530Sstevel@tonic-gate 				    (void *)pp, (void *)vp, off, (void *)phm);
33540Sstevel@tonic-gate 
33550Sstevel@tonic-gate 			}
33560Sstevel@tonic-gate 			ASSERT(MUTEX_HELD(phm));
33570Sstevel@tonic-gate 			mutex_exit(phm);
33580Sstevel@tonic-gate 			phm = NULL;
33590Sstevel@tonic-gate 
33600Sstevel@tonic-gate 			/*
33610Sstevel@tonic-gate 			 * Hat layer locking need not be done to set
33620Sstevel@tonic-gate 			 * the following bits since the page is not hashed
33630Sstevel@tonic-gate 			 * and was on the free list (i.e., had no mappings).
33640Sstevel@tonic-gate 			 *
33650Sstevel@tonic-gate 			 * Set the reference bit to protect
33660Sstevel@tonic-gate 			 * against immediate pageout
33670Sstevel@tonic-gate 			 *
33680Sstevel@tonic-gate 			 * XXXmh modify freelist code to set reference
33690Sstevel@tonic-gate 			 * bit so we don't have to do it here.
33700Sstevel@tonic-gate 			 */
33710Sstevel@tonic-gate 			page_set_props(pp, P_REF);
33720Sstevel@tonic-gate 		} else {
33730Sstevel@tonic-gate 			ASSERT(MUTEX_HELD(phm));
33740Sstevel@tonic-gate 			mutex_exit(phm);
33750Sstevel@tonic-gate 			phm = NULL;
33760Sstevel@tonic-gate 			/*
33770Sstevel@tonic-gate 			 * NOTE: This should not happen for pages associated
33780Sstevel@tonic-gate 			 *	 with kernel vnode 'kvp'.
33790Sstevel@tonic-gate 			 */
33800Sstevel@tonic-gate 			/* XX64 - to debug why this happens! */
33813290Sjohansen 			ASSERT(!VN_ISKAS(vp));
33823290Sjohansen 			if (VN_ISKAS(vp))
33830Sstevel@tonic-gate 				cmn_err(CE_NOTE,
33840Sstevel@tonic-gate 				    "page_create: page not expected "
33850Sstevel@tonic-gate 				    "in hash list for kernel vnode - pp 0x%p",
33860Sstevel@tonic-gate 				    (void *)pp);
33870Sstevel@tonic-gate 			VM_STAT_ADD(page_create_exists);
33880Sstevel@tonic-gate 			goto fail;
33890Sstevel@tonic-gate 		}
33900Sstevel@tonic-gate 
33910Sstevel@tonic-gate 		/*
33920Sstevel@tonic-gate 		 * Got a page!  It is locked.  Acquire the i/o
33930Sstevel@tonic-gate 		 * lock since we are going to use the p_next and
33940Sstevel@tonic-gate 		 * p_prev fields to link the requested pages together.
33950Sstevel@tonic-gate 		 */
33960Sstevel@tonic-gate 		page_io_lock(pp);
33970Sstevel@tonic-gate 		page_add(&plist, pp);
33980Sstevel@tonic-gate 		plist = plist->p_next;
33990Sstevel@tonic-gate 		off += MMU_PAGESIZE;
34000Sstevel@tonic-gate 		vaddr += MMU_PAGESIZE;
34010Sstevel@tonic-gate 	}
34020Sstevel@tonic-gate 
34030Sstevel@tonic-gate 	check_dma(mattr, plist, pages_req);
34040Sstevel@tonic-gate 	return (plist);
34050Sstevel@tonic-gate 
34060Sstevel@tonic-gate fail:
34070Sstevel@tonic-gate 	if (npp != NULL) {
34080Sstevel@tonic-gate 		/*
34090Sstevel@tonic-gate 		 * Did not need this page after all.
34100Sstevel@tonic-gate 		 * Put it back on the free list.
34110Sstevel@tonic-gate 		 */
34120Sstevel@tonic-gate 		VM_STAT_ADD(page_create_putbacks);
34130Sstevel@tonic-gate 		PP_SETFREE(npp);
34140Sstevel@tonic-gate 		PP_SETAGED(npp);
34150Sstevel@tonic-gate 		npp->p_offset = (u_offset_t)-1;
34160Sstevel@tonic-gate 		page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
34170Sstevel@tonic-gate 		page_unlock(npp);
34180Sstevel@tonic-gate 	}
34190Sstevel@tonic-gate 
34200Sstevel@tonic-gate 	/*
34210Sstevel@tonic-gate 	 * Give up the pages we already got.
34220Sstevel@tonic-gate 	 */
34230Sstevel@tonic-gate 	while (plist != NULL) {
34240Sstevel@tonic-gate 		pp = plist;
34250Sstevel@tonic-gate 		page_sub(&plist, pp);
34260Sstevel@tonic-gate 		page_io_unlock(pp);
34270Sstevel@tonic-gate 		plist_len++;
34280Sstevel@tonic-gate 		/*LINTED: constant in conditional ctx*/
34290Sstevel@tonic-gate 		VN_DISPOSE(pp, B_INVAL, 0, kcred);
34300Sstevel@tonic-gate 	}
34310Sstevel@tonic-gate 
34320Sstevel@tonic-gate 	/*
34330Sstevel@tonic-gate 	 * VN_DISPOSE does freemem accounting for the pages in plist
34340Sstevel@tonic-gate 	 * by calling page_free. So, we need to undo the pcf accounting
34350Sstevel@tonic-gate 	 * for only the remaining pages.
34360Sstevel@tonic-gate 	 */
34370Sstevel@tonic-gate 	VM_STAT_ADD(page_create_putbacks);
34380Sstevel@tonic-gate 	page_create_putback(pages_req - plist_len);
34390Sstevel@tonic-gate 
34400Sstevel@tonic-gate 	return (NULL);
34410Sstevel@tonic-gate }
34425084Sjohnlev #endif /* !__xpv */
34430Sstevel@tonic-gate 
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate /*
34460Sstevel@tonic-gate  * Copy the data from the physical page represented by "frompp" to
34470Sstevel@tonic-gate  * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
34480Sstevel@tonic-gate  * CPU->cpu_caddr2.  It assumes that no one uses either map at interrupt
34490Sstevel@tonic-gate  * level and no one sleeps with an active mapping there.
34500Sstevel@tonic-gate  *
34510Sstevel@tonic-gate  * Note that the ref/mod bits in the page_t's are not affected by
34520Sstevel@tonic-gate  * this operation, hence it is up to the caller to update them appropriately.
34530Sstevel@tonic-gate  */
34543253Smec int
34550Sstevel@tonic-gate ppcopy(page_t *frompp, page_t *topp)
34560Sstevel@tonic-gate {
34570Sstevel@tonic-gate 	caddr_t		pp_addr1;
34580Sstevel@tonic-gate 	caddr_t		pp_addr2;
34593446Smrj 	hat_mempte_t	pte1;
34603446Smrj 	hat_mempte_t	pte2;
34610Sstevel@tonic-gate 	kmutex_t	*ppaddr_mutex;
34623253Smec 	label_t		ljb;
34633253Smec 	int		ret = 1;
34640Sstevel@tonic-gate 
34650Sstevel@tonic-gate 	ASSERT_STACK_ALIGNED();
34660Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(frompp));
34670Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(topp));
34680Sstevel@tonic-gate 
34690Sstevel@tonic-gate 	if (kpm_enable) {
34700Sstevel@tonic-gate 		pp_addr1 = hat_kpm_page2va(frompp, 0);
34710Sstevel@tonic-gate 		pp_addr2 = hat_kpm_page2va(topp, 0);
34720Sstevel@tonic-gate 		kpreempt_disable();
34730Sstevel@tonic-gate 	} else {
34740Sstevel@tonic-gate 		/*
34750Sstevel@tonic-gate 		 * disable pre-emption so that CPU can't change
34760Sstevel@tonic-gate 		 */
34770Sstevel@tonic-gate 		kpreempt_disable();
34780Sstevel@tonic-gate 
34790Sstevel@tonic-gate 		pp_addr1 = CPU->cpu_caddr1;
34800Sstevel@tonic-gate 		pp_addr2 = CPU->cpu_caddr2;
34813446Smrj 		pte1 = CPU->cpu_caddr1pte;
34823446Smrj 		pte2 = CPU->cpu_caddr2pte;
34830Sstevel@tonic-gate 
34840Sstevel@tonic-gate 		ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
34850Sstevel@tonic-gate 		mutex_enter(ppaddr_mutex);
34860Sstevel@tonic-gate 
34870Sstevel@tonic-gate 		hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
34880Sstevel@tonic-gate 		    PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
34890Sstevel@tonic-gate 		hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
34900Sstevel@tonic-gate 		    PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
34910Sstevel@tonic-gate 		    HAT_LOAD_NOCONSIST);
34920Sstevel@tonic-gate 	}
34930Sstevel@tonic-gate 
34943253Smec 	if (on_fault(&ljb)) {
34953253Smec 		ret = 0;
34963253Smec 		goto faulted;
34973253Smec 	}
34980Sstevel@tonic-gate 	if (use_sse_pagecopy)
34995084Sjohnlev #ifdef __xpv
35005084Sjohnlev 		page_copy_no_xmm(pp_addr2, pp_addr1);
35015084Sjohnlev #else
35020Sstevel@tonic-gate 		hwblkpagecopy(pp_addr1, pp_addr2);
35035084Sjohnlev #endif
35040Sstevel@tonic-gate 	else
35050Sstevel@tonic-gate 		bcopy(pp_addr1, pp_addr2, PAGESIZE);
35060Sstevel@tonic-gate 
35073253Smec 	no_fault();
35083253Smec faulted:
35093446Smrj 	if (!kpm_enable) {
35105084Sjohnlev #ifdef __xpv
35115084Sjohnlev 		/*
35125217Sjosephb 		 * We can't leave unused mappings laying about under the
35135217Sjosephb 		 * hypervisor, so blow them away.
35145084Sjohnlev 		 */
35155217Sjosephb 		if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
35165217Sjosephb 		    UVMF_INVLPG | UVMF_LOCAL) < 0)
35175217Sjosephb 			panic("HYPERVISOR_update_va_mapping() failed");
35185084Sjohnlev 		if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
35195084Sjohnlev 		    UVMF_INVLPG | UVMF_LOCAL) < 0)
35205084Sjohnlev 			panic("HYPERVISOR_update_va_mapping() failed");
35215084Sjohnlev #endif
35220Sstevel@tonic-gate 		mutex_exit(ppaddr_mutex);
35233446Smrj 	}
35240Sstevel@tonic-gate 	kpreempt_enable();
35253253Smec 	return (ret);
35260Sstevel@tonic-gate }
35270Sstevel@tonic-gate 
35285262Srscott void
35295262Srscott pagezero(page_t *pp, uint_t off, uint_t len)
35305262Srscott {
35315262Srscott 	ASSERT(PAGE_LOCKED(pp));
35325262Srscott 	pfnzero(page_pptonum(pp), off, len);
35335262Srscott }
35345262Srscott 
35350Sstevel@tonic-gate /*
35365262Srscott  * Zero the physical page from off to off + len given by pfn
35370Sstevel@tonic-gate  * without changing the reference and modified bits of page.
35380Sstevel@tonic-gate  *
35390Sstevel@tonic-gate  * We use this using CPU private page address #2, see ppcopy() for more info.
35405262Srscott  * pfnzero() must not be called at interrupt level.
35410Sstevel@tonic-gate  */
35420Sstevel@tonic-gate void
35435262Srscott pfnzero(pfn_t pfn, uint_t off, uint_t len)
35440Sstevel@tonic-gate {
35450Sstevel@tonic-gate 	caddr_t		pp_addr2;
35463446Smrj 	hat_mempte_t	pte2;
35475262Srscott 	kmutex_t	*ppaddr_mutex = NULL;
35480Sstevel@tonic-gate 
35490Sstevel@tonic-gate 	ASSERT_STACK_ALIGNED();
35500Sstevel@tonic-gate 	ASSERT(len <= MMU_PAGESIZE);
35510Sstevel@tonic-gate 	ASSERT(off <= MMU_PAGESIZE);
35520Sstevel@tonic-gate 	ASSERT(off + len <= MMU_PAGESIZE);
35535262Srscott 
35545262Srscott 	if (kpm_enable && !pfn_is_foreign(pfn)) {
35555262Srscott 		pp_addr2 = hat_kpm_pfn2va(pfn);
35560Sstevel@tonic-gate 		kpreempt_disable();
35570Sstevel@tonic-gate 	} else {
35580Sstevel@tonic-gate 		kpreempt_disable();
35590Sstevel@tonic-gate 
35600Sstevel@tonic-gate 		pp_addr2 = CPU->cpu_caddr2;
35613446Smrj 		pte2 = CPU->cpu_caddr2pte;
35620Sstevel@tonic-gate 
35630Sstevel@tonic-gate 		ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
35640Sstevel@tonic-gate 		mutex_enter(ppaddr_mutex);
35650Sstevel@tonic-gate 
35665262Srscott 		hat_mempte_remap(pfn, pp_addr2, pte2,
35670Sstevel@tonic-gate 		    PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
35680Sstevel@tonic-gate 		    HAT_LOAD_NOCONSIST);
35690Sstevel@tonic-gate 	}
35700Sstevel@tonic-gate 
35713446Smrj 	if (use_sse_pagezero) {
35725084Sjohnlev #ifdef __xpv
35735084Sjohnlev 		uint_t rem;
35745084Sjohnlev 
35755084Sjohnlev 		/*
35765084Sjohnlev 		 * zero a byte at a time until properly aligned for
35775084Sjohnlev 		 * block_zero_no_xmm().
35785084Sjohnlev 		 */
35795084Sjohnlev 		while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
35805084Sjohnlev 			pp_addr2[off++] = 0;
35815084Sjohnlev 
35825084Sjohnlev 		/*
35835084Sjohnlev 		 * Now use faster block_zero_no_xmm() for any range
35845084Sjohnlev 		 * that is properly aligned and sized.
35855084Sjohnlev 		 */
35865084Sjohnlev 		rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
35875084Sjohnlev 		len -= rem;
35885084Sjohnlev 		if (len != 0) {
35895084Sjohnlev 			block_zero_no_xmm(pp_addr2 + off, len);
35905084Sjohnlev 			off += len;
35915084Sjohnlev 		}
35925084Sjohnlev 
35935084Sjohnlev 		/*
35945084Sjohnlev 		 * zero remainder with byte stores.
35955084Sjohnlev 		 */
35965084Sjohnlev 		while (rem-- > 0)
35975084Sjohnlev 			pp_addr2[off++] = 0;
35985084Sjohnlev #else
35990Sstevel@tonic-gate 		hwblkclr(pp_addr2 + off, len);
36005084Sjohnlev #endif
36013446Smrj 	} else {
36020Sstevel@tonic-gate 		bzero(pp_addr2 + off, len);
36033446Smrj 	}
36040Sstevel@tonic-gate 
36055262Srscott 	if (!kpm_enable || pfn_is_foreign(pfn)) {
36065084Sjohnlev #ifdef __xpv
36075262Srscott 		/*
36085262Srscott 		 * On the hypervisor this page might get used for a page
36095262Srscott 		 * table before any intervening change to this mapping,
36105262Srscott 		 * so blow it away.
36115262Srscott 		 */
36125262Srscott 		if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
36135262Srscott 		    UVMF_INVLPG) < 0)
36145262Srscott 			panic("HYPERVISOR_update_va_mapping() failed");
36155084Sjohnlev #endif
36160Sstevel@tonic-gate 		mutex_exit(ppaddr_mutex);
36175262Srscott 	}
36185262Srscott 
36190Sstevel@tonic-gate 	kpreempt_enable();
36200Sstevel@tonic-gate }
36210Sstevel@tonic-gate 
36220Sstevel@tonic-gate /*
36230Sstevel@tonic-gate  * Platform-dependent page scrub call.
36240Sstevel@tonic-gate  */
36250Sstevel@tonic-gate void
36260Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len)
36270Sstevel@tonic-gate {
36280Sstevel@tonic-gate 	/*
36290Sstevel@tonic-gate 	 * For now, we rely on the fact that pagezero() will
36300Sstevel@tonic-gate 	 * always clear UEs.
36310Sstevel@tonic-gate 	 */
36320Sstevel@tonic-gate 	pagezero(pp, off, len);
36330Sstevel@tonic-gate }
36340Sstevel@tonic-gate 
36350Sstevel@tonic-gate /*
36360Sstevel@tonic-gate  * set up two private addresses for use on a given CPU for use in ppcopy()
36370Sstevel@tonic-gate  */
36380Sstevel@tonic-gate void
36390Sstevel@tonic-gate setup_vaddr_for_ppcopy(struct cpu *cpup)
36400Sstevel@tonic-gate {
36410Sstevel@tonic-gate 	void *addr;
36423446Smrj 	hat_mempte_t pte_pa;
36430Sstevel@tonic-gate 
36440Sstevel@tonic-gate 	addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
36453446Smrj 	pte_pa = hat_mempte_setup(addr);
36460Sstevel@tonic-gate 	cpup->cpu_caddr1 = addr;
36473446Smrj 	cpup->cpu_caddr1pte = pte_pa;
36480Sstevel@tonic-gate 
36490Sstevel@tonic-gate 	addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
36503446Smrj 	pte_pa = hat_mempte_setup(addr);
36510Sstevel@tonic-gate 	cpup->cpu_caddr2 = addr;
36523446Smrj 	cpup->cpu_caddr2pte = pte_pa;
36530Sstevel@tonic-gate 
36540Sstevel@tonic-gate 	mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
36550Sstevel@tonic-gate }
36560Sstevel@tonic-gate 
36573446Smrj /*
36583446Smrj  * Undo setup_vaddr_for_ppcopy
36593446Smrj  */
36603446Smrj void
36613446Smrj teardown_vaddr_for_ppcopy(struct cpu *cpup)
36623446Smrj {
36633446Smrj 	mutex_destroy(&cpup->cpu_ppaddr_mutex);
36643446Smrj 
36653446Smrj 	hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
36663446Smrj 	cpup->cpu_caddr2pte = 0;
36673446Smrj 	vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
36683446Smrj 	cpup->cpu_caddr2 = 0;
36693446Smrj 
36703446Smrj 	hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
36713446Smrj 	cpup->cpu_caddr1pte = 0;
36723446Smrj 	vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
36733446Smrj 	cpup->cpu_caddr1 = 0;
36743446Smrj }
36750Sstevel@tonic-gate 
36760Sstevel@tonic-gate /*
36770Sstevel@tonic-gate  * Create the pageout scanner thread. The thread has to
36780Sstevel@tonic-gate  * start at procedure with process pp and priority pri.
36790Sstevel@tonic-gate  */
36800Sstevel@tonic-gate void
36810Sstevel@tonic-gate pageout_init(void (*procedure)(), proc_t *pp, pri_t pri)
36820Sstevel@tonic-gate {
36830Sstevel@tonic-gate 	(void) thread_create(NULL, 0, procedure, NULL, 0, pp, TS_RUN, pri);
36840Sstevel@tonic-gate }
36850Sstevel@tonic-gate 
36860Sstevel@tonic-gate /*
36870Sstevel@tonic-gate  * Function for flushing D-cache when performing module relocations
36880Sstevel@tonic-gate  * to an alternate mapping.  Unnecessary on Intel / AMD platforms.
36890Sstevel@tonic-gate  */
36900Sstevel@tonic-gate void
36910Sstevel@tonic-gate dcache_flushall()
36920Sstevel@tonic-gate {}
36933177Sdp78419 
36943177Sdp78419 size_t
36953177Sdp78419 exec_get_spslew(void)
36963177Sdp78419 {
36973177Sdp78419 	return (0);
36983177Sdp78419 }
36993446Smrj 
37003446Smrj /*
37013446Smrj  * Allocate a memory page.  The argument 'seed' can be any pseudo-random
37023446Smrj  * number to vary where the pages come from.  This is quite a hacked up
37033446Smrj  * method -- it works for now, but really needs to be fixed up a bit.
37043446Smrj  *
37053446Smrj  * We currently use page_create_va() on the kvp with fake offsets,
37063446Smrj  * segments and virt address.  This is pretty bogus, but was copied from the
37073446Smrj  * old hat_i86.c code.  A better approach would be to specify either mnode
37083446Smrj  * random or mnode local and takes a page from whatever color has the MOST
37093446Smrj  * available - this would have a minimal impact on page coloring.
37103446Smrj  */
37113446Smrj page_t *
37127589SVikram.Hegde@Sun.COM page_get_physical(int flags)
37133446Smrj {
37143446Smrj 	page_t *pp;
37157589SVikram.Hegde@Sun.COM 	u_offset_t offset = (u_offset_t)1 << 41;	/* in VA hole */
37163446Smrj 	static struct seg tmpseg;
37173446Smrj 	static uintptr_t ctr = 0;
37187589SVikram.Hegde@Sun.COM 	static kmutex_t pgp_mutex;
37193446Smrj 
37203446Smrj 	/*
37213446Smrj 	 * This code is gross, we really need a simpler page allocator.
37223446Smrj 	 *
37233446Smrj 	 * To avoid conflicts with other pages, we get creative with the offset.
37247589SVikram.Hegde@Sun.COM 	 * For 32 bits, we need an offset > 4Gig
37257589SVikram.Hegde@Sun.COM 	 * For 64 bits, need an offset somewhere in the VA hole.
37263446Smrj 	 */
37277589SVikram.Hegde@Sun.COM 	if (page_resv(1, flags & KM_NOSLEEP) == 0)
37283446Smrj 		return (NULL);
37293446Smrj 
37307589SVikram.Hegde@Sun.COM 	mutex_enter(&pgp_mutex);
37313446Smrj #ifdef	DEBUG
37323446Smrj 	pp = page_exists(&kvp, offset);
37333446Smrj 	if (pp != NULL)
37347240Srh87107 		panic("page already exists %p", (void *)pp);
37353446Smrj #endif
37363446Smrj 
37375084Sjohnlev 	pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
37383446Smrj 	    &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE));	/* changing VA usage */
37397589SVikram.Hegde@Sun.COM 	if (pp != NULL) {
37407589SVikram.Hegde@Sun.COM 		page_io_unlock(pp);
37417589SVikram.Hegde@Sun.COM 		page_hashout(pp, NULL);
37427589SVikram.Hegde@Sun.COM 	}
37437589SVikram.Hegde@Sun.COM 	mutex_exit(&pgp_mutex);
37447589SVikram.Hegde@Sun.COM 	page_downgrade(pp);
37453446Smrj 	return (pp);
37463446Smrj }
37477589SVikram.Hegde@Sun.COM 
37487589SVikram.Hegde@Sun.COM void
37497589SVikram.Hegde@Sun.COM page_free_physical(page_t *pp)
37507589SVikram.Hegde@Sun.COM {
37517589SVikram.Hegde@Sun.COM 	/*
37527589SVikram.Hegde@Sun.COM 	 * Get an exclusive lock, might have to wait for a kmem reader.
37537589SVikram.Hegde@Sun.COM 	 */
37547589SVikram.Hegde@Sun.COM 	ASSERT(PAGE_SHARED(pp));
37557589SVikram.Hegde@Sun.COM 	if (!page_tryupgrade(pp)) {
37567589SVikram.Hegde@Sun.COM 		page_unlock(pp);
37577589SVikram.Hegde@Sun.COM 		/*
37587589SVikram.Hegde@Sun.COM 		 * RFE: we could change this to not loop forever
37597589SVikram.Hegde@Sun.COM 		 * George Cameron had some idea on how to do that.
37607589SVikram.Hegde@Sun.COM 		 * For now looping works - it's just like sfmmu.
37617589SVikram.Hegde@Sun.COM 		 */
37627589SVikram.Hegde@Sun.COM 		while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM))
37637589SVikram.Hegde@Sun.COM 			continue;
37647589SVikram.Hegde@Sun.COM 	}
37657589SVikram.Hegde@Sun.COM 	page_free(pp, 1);
37667589SVikram.Hegde@Sun.COM 	page_unresv(1);
37677589SVikram.Hegde@Sun.COM }
3768