10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51443Skchow * Common Development and Distribution License (the "License"). 61443Skchow * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 310Sstevel@tonic-gate * under license from the Regents of the University of California. 320Sstevel@tonic-gate */ 330Sstevel@tonic-gate 340Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 370Sstevel@tonic-gate * UNIX machine dependent virtual memory support. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/types.h> 410Sstevel@tonic-gate #include <sys/param.h> 420Sstevel@tonic-gate #include <sys/systm.h> 430Sstevel@tonic-gate #include <sys/user.h> 440Sstevel@tonic-gate #include <sys/proc.h> 450Sstevel@tonic-gate #include <sys/kmem.h> 460Sstevel@tonic-gate #include <sys/vmem.h> 470Sstevel@tonic-gate #include <sys/buf.h> 480Sstevel@tonic-gate #include <sys/cpuvar.h> 490Sstevel@tonic-gate #include <sys/lgrp.h> 500Sstevel@tonic-gate #include <sys/disp.h> 510Sstevel@tonic-gate #include <sys/vm.h> 520Sstevel@tonic-gate #include <sys/mman.h> 530Sstevel@tonic-gate #include <sys/vnode.h> 540Sstevel@tonic-gate #include <sys/cred.h> 550Sstevel@tonic-gate #include <sys/exec.h> 560Sstevel@tonic-gate #include <sys/exechdr.h> 570Sstevel@tonic-gate #include <sys/debug.h> 582991Ssusans #include <sys/vmsystm.h> 590Sstevel@tonic-gate 600Sstevel@tonic-gate #include <vm/hat.h> 610Sstevel@tonic-gate #include <vm/as.h> 620Sstevel@tonic-gate #include <vm/seg.h> 630Sstevel@tonic-gate #include <vm/seg_kp.h> 640Sstevel@tonic-gate #include <vm/seg_vn.h> 650Sstevel@tonic-gate #include <vm/page.h> 660Sstevel@tonic-gate #include <vm/seg_kmem.h> 670Sstevel@tonic-gate #include <vm/seg_kpm.h> 680Sstevel@tonic-gate #include <vm/vm_dep.h> 690Sstevel@tonic-gate 700Sstevel@tonic-gate #include <sys/cpu.h> 710Sstevel@tonic-gate #include <sys/vm_machparam.h> 720Sstevel@tonic-gate #include <sys/memlist.h> 730Sstevel@tonic-gate #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */ 740Sstevel@tonic-gate #include <vm/hat_i86.h> 750Sstevel@tonic-gate #include <sys/x86_archext.h> 760Sstevel@tonic-gate #include <sys/elf_386.h> 770Sstevel@tonic-gate #include <sys/cmn_err.h> 780Sstevel@tonic-gate #include <sys/archsystm.h> 790Sstevel@tonic-gate #include <sys/machsystm.h> 800Sstevel@tonic-gate 810Sstevel@tonic-gate #include <sys/vtrace.h> 820Sstevel@tonic-gate #include <sys/ddidmareq.h> 830Sstevel@tonic-gate #include <sys/promif.h> 840Sstevel@tonic-gate #include <sys/memnode.h> 850Sstevel@tonic-gate #include <sys/stack.h> 865084Sjohnlev #include <util/qsort.h> 875084Sjohnlev #include <sys/taskq.h> 885084Sjohnlev 895084Sjohnlev #ifdef __xpv 905084Sjohnlev 915084Sjohnlev #include <sys/hypervisor.h> 925084Sjohnlev #include <sys/xen_mmu.h> 935084Sjohnlev #include <sys/balloon_impl.h> 945084Sjohnlev 955084Sjohnlev /* 965084Sjohnlev * domain 0 pages usable for DMA are kept pre-allocated and kept in 975084Sjohnlev * distinct lists, ordered by increasing mfn. 985084Sjohnlev */ 995084Sjohnlev static kmutex_t io_pool_lock; 1005529Ssmaybe static kmutex_t contig_list_lock; 1015084Sjohnlev static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */ 1025084Sjohnlev static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */ 1035084Sjohnlev static long io_pool_cnt; 1045084Sjohnlev static long io_pool_cnt_max = 0; 1055084Sjohnlev #define DEFAULT_IO_POOL_MIN 128 1065084Sjohnlev static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN; 1075084Sjohnlev static long io_pool_cnt_lowater = 0; 1085084Sjohnlev static long io_pool_shrink_attempts; /* how many times did we try to shrink */ 1095084Sjohnlev static long io_pool_shrinks; /* how many times did we really shrink */ 1105084Sjohnlev static long io_pool_grows; /* how many times did we grow */ 1115084Sjohnlev static mfn_t start_mfn = 1; 1125084Sjohnlev static caddr_t io_pool_kva; /* use to alloc pages when needed */ 1135084Sjohnlev 1145084Sjohnlev static int create_contig_pfnlist(uint_t); 1155084Sjohnlev 1165084Sjohnlev /* 1175084Sjohnlev * percentage of phys mem to hold in the i/o pool 1185084Sjohnlev */ 1195084Sjohnlev #define DEFAULT_IO_POOL_PCT 2 1205084Sjohnlev static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT; 1215084Sjohnlev static void page_io_pool_sub(page_t **, page_t *, page_t *); 1225529Ssmaybe int ioalloc_dbg = 0; 1235084Sjohnlev 1245084Sjohnlev #endif /* __xpv */ 1250Sstevel@tonic-gate 1262961Sdp78419 uint_t vac_colors = 1; 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate int largepagesupport = 0; 1290Sstevel@tonic-gate extern uint_t page_create_new; 1300Sstevel@tonic-gate extern uint_t page_create_exists; 1310Sstevel@tonic-gate extern uint_t page_create_putbacks; 1320Sstevel@tonic-gate extern uint_t page_create_putbacks; 1333446Smrj /* 1343446Smrj * Allow users to disable the kernel's use of SSE. 1353446Smrj */ 1363446Smrj extern int use_sse_pagecopy, use_sse_pagezero; 1370Sstevel@tonic-gate 1385084Sjohnlev /* 1395084Sjohnlev * combined memory ranges from mnode and memranges[] to manage single 1405084Sjohnlev * mnode/mtype dimension in the page lists. 1415084Sjohnlev */ 1425084Sjohnlev typedef struct { 1435084Sjohnlev pfn_t mnr_pfnlo; 1445084Sjohnlev pfn_t mnr_pfnhi; 1455084Sjohnlev int mnr_mnode; 1465084Sjohnlev int mnr_memrange; /* index into memranges[] */ 1475084Sjohnlev /* maintain page list stats */ 1485084Sjohnlev pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */ 1495466Skchow pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */ 1505466Skchow pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */ 1515084Sjohnlev #ifdef DEBUG 1525084Sjohnlev struct mnr_mts { /* mnode/mtype szc stats */ 1535084Sjohnlev pgcnt_t mnr_mts_pgcnt; 1545084Sjohnlev int mnr_mts_colors; 1555084Sjohnlev pgcnt_t *mnr_mtsc_pgcnt; 1565084Sjohnlev } *mnr_mts; 1575084Sjohnlev #endif 1585084Sjohnlev } mnoderange_t; 1595084Sjohnlev 1605084Sjohnlev #define MEMRANGEHI(mtype) \ 1615084Sjohnlev ((mtype > 0) ? memranges[mtype - 1] - 1: physmax) 1625084Sjohnlev #define MEMRANGELO(mtype) (memranges[mtype]) 1635084Sjohnlev 1645466Skchow #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt) 1655084Sjohnlev 1665084Sjohnlev /* 1675084Sjohnlev * As the PC architecture evolved memory up was clumped into several 1685084Sjohnlev * ranges for various historical I/O devices to do DMA. 1695084Sjohnlev * < 16Meg - ISA bus 1705084Sjohnlev * < 2Gig - ??? 1715084Sjohnlev * < 4Gig - PCI bus or drivers that don't understand PAE mode 1725084Sjohnlev * 1735084Sjohnlev * These are listed in reverse order, so that we can skip over unused 1745084Sjohnlev * ranges on machines with small memories. 1755084Sjohnlev * 1765084Sjohnlev * For now under the Hypervisor, we'll only ever have one memrange. 1775084Sjohnlev */ 1785084Sjohnlev #define PFN_4GIG 0x100000 1795084Sjohnlev #define PFN_16MEG 0x1000 1805084Sjohnlev static pfn_t arch_memranges[NUM_MEM_RANGES] = { 1815084Sjohnlev PFN_4GIG, /* pfn range for 4G and above */ 1825084Sjohnlev 0x80000, /* pfn range for 2G-4G */ 1835084Sjohnlev PFN_16MEG, /* pfn range for 16M-2G */ 1845084Sjohnlev 0x00000, /* pfn range for 0-16M */ 1855084Sjohnlev }; 1865084Sjohnlev pfn_t *memranges = &arch_memranges[0]; 1875084Sjohnlev int nranges = NUM_MEM_RANGES; 1885084Sjohnlev 1895084Sjohnlev /* 1905084Sjohnlev * This combines mem_node_config and memranges into one data 1915084Sjohnlev * structure to be used for page list management. 1925084Sjohnlev */ 1935084Sjohnlev mnoderange_t *mnoderanges; 1945084Sjohnlev int mnoderangecnt; 1955084Sjohnlev int mtype4g; 1965084Sjohnlev 1975084Sjohnlev /* 1985084Sjohnlev * 4g memory management variables for systems with more than 4g of memory: 1995084Sjohnlev * 2005084Sjohnlev * physical memory below 4g is required for 32bit dma devices and, currently, 2015084Sjohnlev * for kmem memory. On systems with more than 4g of memory, the pool of memory 2025084Sjohnlev * below 4g can be depleted without any paging activity given that there is 2035084Sjohnlev * likely to be sufficient memory above 4g. 2045084Sjohnlev * 2055084Sjohnlev * physmax4g is set true if the largest pfn is over 4g. The rest of the 2065084Sjohnlev * 4g memory management code is enabled only when physmax4g is true. 2075084Sjohnlev * 2085084Sjohnlev * maxmem4g is the count of the maximum number of pages on the page lists 2095084Sjohnlev * with physical addresses below 4g. It can be a lot less then 4g given that 2105084Sjohnlev * BIOS may reserve large chunks of space below 4g for hot plug pci devices, 2115084Sjohnlev * agp aperture etc. 2125084Sjohnlev * 2135084Sjohnlev * freemem4g maintains the count of the number of available pages on the 2145084Sjohnlev * page lists with physical addresses below 4g. 2155084Sjohnlev * 2165084Sjohnlev * DESFREE4G specifies the desired amount of below 4g memory. It defaults to 2175084Sjohnlev * 6% (desfree4gshift = 4) of maxmem4g. 2185084Sjohnlev * 2195084Sjohnlev * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G 2205084Sjohnlev * and the amount of physical memory above 4g is greater than freemem4g. 2215084Sjohnlev * In this case, page_get_* routines will restrict below 4g allocations 2225084Sjohnlev * for requests that don't specifically require it. 2235084Sjohnlev */ 2245084Sjohnlev 2255084Sjohnlev #define LOTSFREE4G (maxmem4g >> lotsfree4gshift) 2265084Sjohnlev #define DESFREE4G (maxmem4g >> desfree4gshift) 2275084Sjohnlev 2285084Sjohnlev #define RESTRICT4G_ALLOC \ 2295084Sjohnlev (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem)) 2305084Sjohnlev 2315084Sjohnlev static pgcnt_t maxmem4g; 2325084Sjohnlev static pgcnt_t freemem4g; 2335084Sjohnlev static int physmax4g; 2345084Sjohnlev static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */ 2355084Sjohnlev static int lotsfree4gshift = 3; 2365084Sjohnlev 2375084Sjohnlev /* 2385084Sjohnlev * 16m memory management: 2395084Sjohnlev * 2405084Sjohnlev * reserve some amount of physical memory below 16m for legacy devices. 2415084Sjohnlev * 2425084Sjohnlev * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above 2435084Sjohnlev * 16m or if the 16m pool drops below DESFREE16M. 2445084Sjohnlev * 2455084Sjohnlev * In this case, general page allocations via page_get_{free,cache}list 2465084Sjohnlev * routines will be restricted from allocating from the 16m pool. Allocations 2475084Sjohnlev * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations 2485084Sjohnlev * are not restricted. 2495084Sjohnlev */ 2505084Sjohnlev 2515084Sjohnlev #define FREEMEM16M MTYPE_FREEMEM(0) 2525084Sjohnlev #define DESFREE16M desfree16m 2535084Sjohnlev #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \ 2545084Sjohnlev ((freemem != 0) && ((flags & PG_PANIC) == 0) && \ 2555084Sjohnlev ((freemem >= (FREEMEM16M)) || \ 2565084Sjohnlev (FREEMEM16M < (DESFREE16M + pgcnt)))) 2575084Sjohnlev 2585084Sjohnlev static pgcnt_t desfree16m = 0x380; 2595084Sjohnlev 2605084Sjohnlev /* 2615084Sjohnlev * This can be patched via /etc/system to allow old non-PAE aware device 2625084Sjohnlev * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM. 2635084Sjohnlev */ 2645084Sjohnlev int restricted_kmemalloc = 0; 2651385Skchow 2660Sstevel@tonic-gate #ifdef VM_STATS 2670Sstevel@tonic-gate struct { 2680Sstevel@tonic-gate ulong_t pga_alloc; 2690Sstevel@tonic-gate ulong_t pga_notfullrange; 2700Sstevel@tonic-gate ulong_t pga_nulldmaattr; 2710Sstevel@tonic-gate ulong_t pga_allocok; 2720Sstevel@tonic-gate ulong_t pga_allocfailed; 2730Sstevel@tonic-gate ulong_t pgma_alloc; 2740Sstevel@tonic-gate ulong_t pgma_allocok; 2750Sstevel@tonic-gate ulong_t pgma_allocfailed; 2760Sstevel@tonic-gate ulong_t pgma_allocempty; 2770Sstevel@tonic-gate } pga_vmstats; 2780Sstevel@tonic-gate #endif 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate uint_t mmu_page_sizes; 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate /* How many page sizes the users can see */ 2830Sstevel@tonic-gate uint_t mmu_exported_page_sizes; 2840Sstevel@tonic-gate 2855349Skchow /* page sizes that legacy applications can see */ 2865349Skchow uint_t mmu_legacy_page_sizes; 2875349Skchow 288423Sdavemq /* 289423Sdavemq * Number of pages in 1 GB. Don't enable automatic large pages if we have 290423Sdavemq * fewer than this many pages. 291423Sdavemq */ 2922991Ssusans pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 2932991Ssusans pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 2942991Ssusans 2952991Ssusans /* 2962991Ssusans * Maximum and default segment size tunables for user private 2972991Ssusans * and shared anon memory, and user text and initialized data. 2982991Ssusans * These can be patched via /etc/system to allow large pages 2992991Ssusans * to be used for mapping application private and shared anon memory. 3002991Ssusans */ 3012991Ssusans size_t mcntl0_lpsize = MMU_PAGESIZE; 3022991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE; 3032991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE; 3042991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE; 3052991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE; 3062991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE; 3072991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE; 3082991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE; 3092991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE; 3100Sstevel@tonic-gate 3115084Sjohnlev 3125084Sjohnlev /* 3135084Sjohnlev * initialized by page_coloring_init(). 3145084Sjohnlev */ 3155084Sjohnlev uint_t page_colors; 3165084Sjohnlev uint_t page_colors_mask; 3175084Sjohnlev uint_t page_coloring_shift; 3185084Sjohnlev int cpu_page_colors; 3195084Sjohnlev static uint_t l2_colors; 3205084Sjohnlev 3215084Sjohnlev /* 3225084Sjohnlev * Page freelists and cachelists are dynamically allocated once mnoderangecnt 3235084Sjohnlev * and page_colors are calculated from the l2 cache n-way set size. Within a 3245084Sjohnlev * mnode range, the page freelist and cachelist are hashed into bins based on 3255084Sjohnlev * color. This makes it easier to search for a page within a specific memory 3265084Sjohnlev * range. 3275084Sjohnlev */ 3285084Sjohnlev #define PAGE_COLORS_MIN 16 3295084Sjohnlev 3305084Sjohnlev page_t ****page_freelists; 3315084Sjohnlev page_t ***page_cachelists; 3325084Sjohnlev 3335084Sjohnlev 3345084Sjohnlev /* 3355084Sjohnlev * Used by page layer to know about page sizes 3365084Sjohnlev */ 3375084Sjohnlev hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1]; 3385084Sjohnlev 3395084Sjohnlev kmutex_t *fpc_mutex[NPC_MUTEX]; 3405084Sjohnlev kmutex_t *cpc_mutex[NPC_MUTEX]; 3415084Sjohnlev 3425084Sjohnlev /* 3435084Sjohnlev * Only let one thread at a time try to coalesce large pages, to 3445084Sjohnlev * prevent them from working against each other. 3455084Sjohnlev */ 3465084Sjohnlev static kmutex_t contig_lock; 3475084Sjohnlev #define CONTIG_LOCK() mutex_enter(&contig_lock); 3485084Sjohnlev #define CONTIG_UNLOCK() mutex_exit(&contig_lock); 3495084Sjohnlev 3505084Sjohnlev #define PFN_16M (mmu_btop((uint64_t)0x1000000)) 3515084Sjohnlev 3520Sstevel@tonic-gate /* 3530Sstevel@tonic-gate * Return the optimum page size for a given mapping 3540Sstevel@tonic-gate */ 3550Sstevel@tonic-gate /*ARGSUSED*/ 3560Sstevel@tonic-gate size_t 3572991Ssusans map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl) 3580Sstevel@tonic-gate { 3592991Ssusans level_t l = 0; 3602991Ssusans size_t pgsz = MMU_PAGESIZE; 3612991Ssusans size_t max_lpsize; 3622991Ssusans uint_t mszc; 3630Sstevel@tonic-gate 3642991Ssusans ASSERT(maptype != MAPPGSZ_VA); 3652991Ssusans 3662991Ssusans if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) { 3672991Ssusans return (MMU_PAGESIZE); 3682991Ssusans } 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate switch (maptype) { 3712991Ssusans case MAPPGSZ_HEAP: 3720Sstevel@tonic-gate case MAPPGSZ_STK: 3732991Ssusans max_lpsize = memcntl ? mcntl0_lpsize : (maptype == 3742991Ssusans MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize); 3752991Ssusans if (max_lpsize == MMU_PAGESIZE) { 3762991Ssusans return (MMU_PAGESIZE); 3772991Ssusans } 3782991Ssusans if (len == 0) { 3792991Ssusans len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase + 3802991Ssusans p->p_brksize - p->p_bssbase : p->p_stksize; 3812991Ssusans } 3822991Ssusans len = (maptype == MAPPGSZ_HEAP) ? MAX(len, 3832991Ssusans default_uheap_lpsize) : MAX(len, default_ustack_lpsize); 3842991Ssusans 3850Sstevel@tonic-gate /* 3860Sstevel@tonic-gate * use the pages size that best fits len 3870Sstevel@tonic-gate */ 3885349Skchow for (l = mmu.umax_page_level; l > 0; --l) { 3892991Ssusans if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) { 3900Sstevel@tonic-gate continue; 3912991Ssusans } else { 3922991Ssusans pgsz = LEVEL_SIZE(l); 3932991Ssusans } 3940Sstevel@tonic-gate break; 3950Sstevel@tonic-gate } 3962991Ssusans 3972991Ssusans mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc : 3982991Ssusans p->p_stkpageszc); 3992991Ssusans if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) { 4002991Ssusans pgsz = hw_page_array[mszc].hp_size; 4012991Ssusans } 4022991Ssusans return (pgsz); 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate case MAPPGSZ_ISM: 4055349Skchow for (l = mmu.umax_page_level; l > 0; --l) { 4065349Skchow if (len >= LEVEL_SIZE(l)) 4075349Skchow return (LEVEL_SIZE(l)); 4085349Skchow } 4095349Skchow return (LEVEL_SIZE(0)); 4100Sstevel@tonic-gate } 4112991Ssusans return (pgsz); 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4142991Ssusans static uint_t 4152991Ssusans map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize, 4162991Ssusans size_t min_physmem) 4172991Ssusans { 4182991Ssusans caddr_t eaddr = addr + size; 4192991Ssusans uint_t szcvec = 0; 4202991Ssusans caddr_t raddr; 4212991Ssusans caddr_t readdr; 4222991Ssusans size_t pgsz; 4232991Ssusans int i; 4242991Ssusans 4252991Ssusans if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) { 4262991Ssusans return (0); 4272991Ssusans } 4282991Ssusans 4295349Skchow for (i = mmu_exported_page_sizes - 1; i > 0; i--) { 4302991Ssusans pgsz = page_get_pagesize(i); 4312991Ssusans if (pgsz > max_lpsize) { 4322991Ssusans continue; 4332991Ssusans } 4342991Ssusans raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz); 4352991Ssusans readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz); 4362991Ssusans if (raddr < addr || raddr >= readdr) { 4372991Ssusans continue; 4382991Ssusans } 4392991Ssusans if (P2PHASE((uintptr_t)addr ^ off, pgsz)) { 4402991Ssusans continue; 4412991Ssusans } 4422991Ssusans /* 4432991Ssusans * Set szcvec to the remaining page sizes. 4442991Ssusans */ 4452991Ssusans szcvec = ((1 << (i + 1)) - 1) & ~1; 4462991Ssusans break; 4472991Ssusans } 4482991Ssusans return (szcvec); 4492991Ssusans } 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate /* 4520Sstevel@tonic-gate * Return a bit vector of large page size codes that 4530Sstevel@tonic-gate * can be used to map [addr, addr + len) region. 4540Sstevel@tonic-gate */ 4550Sstevel@tonic-gate /*ARGSUSED*/ 4560Sstevel@tonic-gate uint_t 4572991Ssusans map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type, 4582991Ssusans int memcntl) 4590Sstevel@tonic-gate { 4602991Ssusans size_t max_lpsize = mcntl0_lpsize; 4610Sstevel@tonic-gate 4622991Ssusans if (mmu.max_page_level == 0) 4630Sstevel@tonic-gate return (0); 4640Sstevel@tonic-gate 4652991Ssusans if (flags & MAP_TEXT) { 4665084Sjohnlev if (!memcntl) 4675084Sjohnlev max_lpsize = max_utext_lpsize; 4685084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4692991Ssusans shm_lpg_min_physmem)); 4702991Ssusans 4712991Ssusans } else if (flags & MAP_INITDATA) { 4725084Sjohnlev if (!memcntl) 4735084Sjohnlev max_lpsize = max_uidata_lpsize; 4745084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4752991Ssusans privm_lpg_min_physmem)); 4762991Ssusans 4772991Ssusans } else if (type == MAPPGSZC_SHM) { 4785084Sjohnlev if (!memcntl) 4795084Sjohnlev max_lpsize = max_shm_lpsize; 4805084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4812991Ssusans shm_lpg_min_physmem)); 4820Sstevel@tonic-gate 4832991Ssusans } else if (type == MAPPGSZC_HEAP) { 4845084Sjohnlev if (!memcntl) 4855084Sjohnlev max_lpsize = max_uheap_lpsize; 4865084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4872991Ssusans privm_lpg_min_physmem)); 4882414Saguzovsk 4892991Ssusans } else if (type == MAPPGSZC_STACK) { 4905084Sjohnlev if (!memcntl) 4915084Sjohnlev max_lpsize = max_ustack_lpsize; 4925084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4932991Ssusans privm_lpg_min_physmem)); 4942991Ssusans 4952991Ssusans } else { 4965084Sjohnlev if (!memcntl) 4975084Sjohnlev max_lpsize = max_privmap_lpsize; 4985084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4992991Ssusans privm_lpg_min_physmem)); 5002414Saguzovsk } 5012414Saguzovsk } 5022414Saguzovsk 5030Sstevel@tonic-gate /* 5040Sstevel@tonic-gate * Handle a pagefault. 5050Sstevel@tonic-gate */ 5060Sstevel@tonic-gate faultcode_t 5070Sstevel@tonic-gate pagefault( 5080Sstevel@tonic-gate caddr_t addr, 5090Sstevel@tonic-gate enum fault_type type, 5100Sstevel@tonic-gate enum seg_rw rw, 5110Sstevel@tonic-gate int iskernel) 5120Sstevel@tonic-gate { 5130Sstevel@tonic-gate struct as *as; 5140Sstevel@tonic-gate struct hat *hat; 5150Sstevel@tonic-gate struct proc *p; 5160Sstevel@tonic-gate kthread_t *t; 5170Sstevel@tonic-gate faultcode_t res; 5180Sstevel@tonic-gate caddr_t base; 5190Sstevel@tonic-gate size_t len; 5200Sstevel@tonic-gate int err; 5210Sstevel@tonic-gate int mapped_red; 5220Sstevel@tonic-gate uintptr_t ea; 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate if (INVALID_VADDR(addr)) 5270Sstevel@tonic-gate return (FC_NOMAP); 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate mapped_red = segkp_map_red(); 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate if (iskernel) { 5320Sstevel@tonic-gate as = &kas; 5330Sstevel@tonic-gate hat = as->a_hat; 5340Sstevel@tonic-gate } else { 5350Sstevel@tonic-gate t = curthread; 5360Sstevel@tonic-gate p = ttoproc(t); 5370Sstevel@tonic-gate as = p->p_as; 5380Sstevel@tonic-gate hat = as->a_hat; 5390Sstevel@tonic-gate } 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate /* 5420Sstevel@tonic-gate * Dispatch pagefault. 5430Sstevel@tonic-gate */ 5440Sstevel@tonic-gate res = as_fault(hat, as, addr, 1, type, rw); 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate /* 5470Sstevel@tonic-gate * If this isn't a potential unmapped hole in the user's 5480Sstevel@tonic-gate * UNIX data or stack segments, just return status info. 5490Sstevel@tonic-gate */ 5500Sstevel@tonic-gate if (res != FC_NOMAP || iskernel) 5510Sstevel@tonic-gate goto out; 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate /* 5540Sstevel@tonic-gate * Check to see if we happened to faulted on a currently unmapped 5550Sstevel@tonic-gate * part of the UNIX data or stack segments. If so, create a zfod 5560Sstevel@tonic-gate * mapping there and then try calling the fault routine again. 5570Sstevel@tonic-gate */ 5580Sstevel@tonic-gate base = p->p_brkbase; 5590Sstevel@tonic-gate len = p->p_brksize; 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate if (addr < base || addr >= base + len) { /* data seg? */ 5620Sstevel@tonic-gate base = (caddr_t)p->p_usrstack - p->p_stksize; 5630Sstevel@tonic-gate len = p->p_stksize; 5640Sstevel@tonic-gate if (addr < base || addr >= p->p_usrstack) { /* stack seg? */ 5650Sstevel@tonic-gate /* not in either UNIX data or stack segments */ 5660Sstevel@tonic-gate res = FC_NOMAP; 5670Sstevel@tonic-gate goto out; 5680Sstevel@tonic-gate } 5690Sstevel@tonic-gate } 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate /* 5720Sstevel@tonic-gate * the rest of this function implements a 3.X 4.X 5.X compatibility 5730Sstevel@tonic-gate * This code is probably not needed anymore 5740Sstevel@tonic-gate */ 5750Sstevel@tonic-gate if (p->p_model == DATAMODEL_ILP32) { 5760Sstevel@tonic-gate 5770Sstevel@tonic-gate /* expand the gap to the page boundaries on each side */ 5780Sstevel@tonic-gate ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE); 5790Sstevel@tonic-gate base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE); 5800Sstevel@tonic-gate len = ea - (uintptr_t)base; 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate as_rangelock(as); 5830Sstevel@tonic-gate if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) == 5840Sstevel@tonic-gate 0) { 5850Sstevel@tonic-gate err = as_map(as, base, len, segvn_create, zfod_argsp); 5860Sstevel@tonic-gate as_rangeunlock(as); 5870Sstevel@tonic-gate if (err) { 5880Sstevel@tonic-gate res = FC_MAKE_ERR(err); 5890Sstevel@tonic-gate goto out; 5900Sstevel@tonic-gate } 5910Sstevel@tonic-gate } else { 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * This page is already mapped by another thread after 5940Sstevel@tonic-gate * we returned from as_fault() above. We just fall 5950Sstevel@tonic-gate * through as_fault() below. 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate as_rangeunlock(as); 5980Sstevel@tonic-gate } 5990Sstevel@tonic-gate 6000Sstevel@tonic-gate res = as_fault(hat, as, addr, 1, F_INVAL, rw); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate out: 6040Sstevel@tonic-gate if (mapped_red) 6050Sstevel@tonic-gate segkp_unmap_red(); 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate return (res); 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate void 6110Sstevel@tonic-gate map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags) 6120Sstevel@tonic-gate { 6130Sstevel@tonic-gate struct proc *p = curproc; 6140Sstevel@tonic-gate caddr_t userlimit = (flags & _MAP_LOW32) ? 6150Sstevel@tonic-gate (caddr_t)_userlimit32 : p->p_as->a_userlimit; 6160Sstevel@tonic-gate 6170Sstevel@tonic-gate map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags); 6180Sstevel@tonic-gate } 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate /*ARGSUSED*/ 6210Sstevel@tonic-gate int 6220Sstevel@tonic-gate map_addr_vacalign_check(caddr_t addr, u_offset_t off) 6230Sstevel@tonic-gate { 6240Sstevel@tonic-gate return (0); 6250Sstevel@tonic-gate } 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate /* 6280Sstevel@tonic-gate * map_addr_proc() is the routine called when the system is to 6290Sstevel@tonic-gate * choose an address for the user. We will pick an address 6303446Smrj * range which is the highest available below userlimit. 6310Sstevel@tonic-gate * 632*5668Smec * Every mapping will have a redzone of a single page on either side of 633*5668Smec * the request. This is done to leave one page unmapped between segments. 634*5668Smec * This is not required, but it's useful for the user because if their 635*5668Smec * program strays across a segment boundary, it will catch a fault 636*5668Smec * immediately making debugging a little easier. Currently the redzone 637*5668Smec * is mandatory. 638*5668Smec * 6390Sstevel@tonic-gate * addrp is a value/result parameter. 6400Sstevel@tonic-gate * On input it is a hint from the user to be used in a completely 6410Sstevel@tonic-gate * machine dependent fashion. We decide to completely ignore this hint. 642*5668Smec * If MAP_ALIGN was specified, addrp contains the minimal alignment, which 643*5668Smec * must be some "power of two" multiple of pagesize. 6440Sstevel@tonic-gate * 6450Sstevel@tonic-gate * On output it is NULL if no address can be found in the current 6460Sstevel@tonic-gate * processes address space or else an address that is currently 6470Sstevel@tonic-gate * not mapped for len bytes with a page of red zone on either side. 6480Sstevel@tonic-gate * 649*5668Smec * vacalign is not needed on x86 (it's for viturally addressed caches) 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate /*ARGSUSED*/ 6520Sstevel@tonic-gate void 6530Sstevel@tonic-gate map_addr_proc( 6540Sstevel@tonic-gate caddr_t *addrp, 6550Sstevel@tonic-gate size_t len, 6560Sstevel@tonic-gate offset_t off, 6570Sstevel@tonic-gate int vacalign, 6580Sstevel@tonic-gate caddr_t userlimit, 6590Sstevel@tonic-gate struct proc *p, 6600Sstevel@tonic-gate uint_t flags) 6610Sstevel@tonic-gate { 6620Sstevel@tonic-gate struct as *as = p->p_as; 6630Sstevel@tonic-gate caddr_t addr; 6640Sstevel@tonic-gate caddr_t base; 6650Sstevel@tonic-gate size_t slen; 6660Sstevel@tonic-gate size_t align_amount; 6670Sstevel@tonic-gate 6680Sstevel@tonic-gate ASSERT32(userlimit == as->a_userlimit); 6690Sstevel@tonic-gate 6700Sstevel@tonic-gate base = p->p_brkbase; 6710Sstevel@tonic-gate #if defined(__amd64) 6720Sstevel@tonic-gate /* 6730Sstevel@tonic-gate * XX64 Yes, this needs more work. 6740Sstevel@tonic-gate */ 6750Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 6760Sstevel@tonic-gate if (userlimit < as->a_userlimit) { 6770Sstevel@tonic-gate /* 6780Sstevel@tonic-gate * This happens when a program wants to map 6790Sstevel@tonic-gate * something in a range that's accessible to a 6800Sstevel@tonic-gate * program in a smaller address space. For example, 6810Sstevel@tonic-gate * a 64-bit program calling mmap32(2) to guarantee 6820Sstevel@tonic-gate * that the returned address is below 4Gbytes. 6830Sstevel@tonic-gate */ 6840Sstevel@tonic-gate ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff)); 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate if (userlimit > base) 6870Sstevel@tonic-gate slen = userlimit - base; 6880Sstevel@tonic-gate else { 6890Sstevel@tonic-gate *addrp = NULL; 6900Sstevel@tonic-gate return; 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate } else { 6930Sstevel@tonic-gate /* 6940Sstevel@tonic-gate * XX64 This layout is probably wrong .. but in 6950Sstevel@tonic-gate * the event we make the amd64 address space look 6960Sstevel@tonic-gate * like sparcv9 i.e. with the stack -above- the 6970Sstevel@tonic-gate * heap, this bit of code might even be correct. 6980Sstevel@tonic-gate */ 6990Sstevel@tonic-gate slen = p->p_usrstack - base - 7000Sstevel@tonic-gate (((size_t)rctl_enforced_value( 7010Sstevel@tonic-gate rctlproc_legacy[RLIMIT_STACK], 7020Sstevel@tonic-gate p->p_rctls, p) + PAGEOFFSET) & PAGEMASK); 7030Sstevel@tonic-gate } 7040Sstevel@tonic-gate } else 7050Sstevel@tonic-gate #endif 7060Sstevel@tonic-gate slen = userlimit - base; 7070Sstevel@tonic-gate 708*5668Smec /* Make len be a multiple of PAGESIZE */ 7090Sstevel@tonic-gate len = (len + PAGEOFFSET) & PAGEMASK; 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate /* 7120Sstevel@tonic-gate * figure out what the alignment should be 7130Sstevel@tonic-gate * 7140Sstevel@tonic-gate * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same???? 7150Sstevel@tonic-gate */ 7160Sstevel@tonic-gate if (len <= ELF_386_MAXPGSZ) { 7170Sstevel@tonic-gate /* 7180Sstevel@tonic-gate * Align virtual addresses to ensure that ELF shared libraries 7190Sstevel@tonic-gate * are mapped with the appropriate alignment constraints by 7200Sstevel@tonic-gate * the run-time linker. 7210Sstevel@tonic-gate */ 7220Sstevel@tonic-gate align_amount = ELF_386_MAXPGSZ; 7230Sstevel@tonic-gate } else { 7245349Skchow int l = mmu.umax_page_level; 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate while (l && len < LEVEL_SIZE(l)) 7270Sstevel@tonic-gate --l; 7280Sstevel@tonic-gate 7290Sstevel@tonic-gate align_amount = LEVEL_SIZE(l); 7300Sstevel@tonic-gate } 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) 7330Sstevel@tonic-gate align_amount = (uintptr_t)*addrp; 7340Sstevel@tonic-gate 735*5668Smec ASSERT(ISP2(align_amount)); 736*5668Smec ASSERT(align_amount == 0 || align_amount >= PAGESIZE); 737*5668Smec 738*5668Smec off = off & (align_amount - 1); 7390Sstevel@tonic-gate /* 7400Sstevel@tonic-gate * Look for a large enough hole starting below userlimit. 741*5668Smec * After finding it, use the upper part. 7420Sstevel@tonic-gate */ 743*5668Smec if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount, 744*5668Smec PAGESIZE, off) == 0) { 7450Sstevel@tonic-gate caddr_t as_addr; 7460Sstevel@tonic-gate 747*5668Smec /* 748*5668Smec * addr is the highest possible address to use since we have 749*5668Smec * a PAGESIZE redzone at the beginning and end. 750*5668Smec */ 751*5668Smec addr = base + slen - (PAGESIZE + len); 7520Sstevel@tonic-gate as_addr = addr; 7530Sstevel@tonic-gate /* 754*5668Smec * Round address DOWN to the alignment amount and 755*5668Smec * add the offset in. 756*5668Smec * If addr is greater than as_addr, len would not be large 757*5668Smec * enough to include the redzone, so we must adjust down 758*5668Smec * by the alignment amount. 7590Sstevel@tonic-gate */ 7600Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1))); 761*5668Smec addr += (uintptr_t)off; 762*5668Smec if (addr > as_addr) { 763*5668Smec addr -= align_amount; 764*5668Smec } 765*5668Smec 766*5668Smec ASSERT(addr > base); 767*5668Smec ASSERT(addr + len < base + slen); 7680Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_amount - 1)) == 769*5668Smec ((uintptr_t)(off))); 7700Sstevel@tonic-gate *addrp = addr; 7710Sstevel@tonic-gate } else { 7720Sstevel@tonic-gate *addrp = NULL; /* no more virtual space */ 7730Sstevel@tonic-gate } 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate 776*5668Smec int valid_va_range_aligned_wraparound; 777*5668Smec 7780Sstevel@tonic-gate /* 779*5668Smec * Determine whether [*basep, *basep + *lenp) contains a mappable range of 780*5668Smec * addresses at least "minlen" long, where the base of the range is at "off" 781*5668Smec * phase from an "align" boundary and there is space for a "redzone"-sized 782*5668Smec * redzone on either side of the range. On success, 1 is returned and *basep 783*5668Smec * and *lenp are adjusted to describe the acceptable range (including 784*5668Smec * the redzone). On failure, 0 is returned. 7850Sstevel@tonic-gate */ 7860Sstevel@tonic-gate /*ARGSUSED3*/ 7870Sstevel@tonic-gate int 788*5668Smec valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir, 789*5668Smec size_t align, size_t redzone, size_t off) 7900Sstevel@tonic-gate { 7910Sstevel@tonic-gate uintptr_t hi, lo; 792*5668Smec size_t tot_len; 793*5668Smec 794*5668Smec ASSERT(align == 0 ? off == 0 : off < align); 795*5668Smec ASSERT(ISP2(align)); 796*5668Smec ASSERT(align == 0 || align >= PAGESIZE); 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate lo = (uintptr_t)*basep; 7990Sstevel@tonic-gate hi = lo + *lenp; 800*5668Smec tot_len = minlen + 2 * redzone; /* need at least this much space */ 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* 8030Sstevel@tonic-gate * If hi rolled over the top, try cutting back. 8040Sstevel@tonic-gate */ 8050Sstevel@tonic-gate if (hi < lo) { 806*5668Smec *lenp = 0UL - lo - 1UL; 807*5668Smec /* See if this really happens. If so, then we figure out why */ 808*5668Smec valid_va_range_aligned_wraparound++; 809*5668Smec hi = lo + *lenp; 810*5668Smec } 811*5668Smec if (*lenp < tot_len) { 8120Sstevel@tonic-gate return (0); 8130Sstevel@tonic-gate } 814*5668Smec 8150Sstevel@tonic-gate #if defined(__amd64) 8160Sstevel@tonic-gate /* 8170Sstevel@tonic-gate * Deal with a possible hole in the address range between 8180Sstevel@tonic-gate * hole_start and hole_end that should never be mapped. 8190Sstevel@tonic-gate */ 8200Sstevel@tonic-gate if (lo < hole_start) { 8210Sstevel@tonic-gate if (hi > hole_start) { 8220Sstevel@tonic-gate if (hi < hole_end) { 8230Sstevel@tonic-gate hi = hole_start; 8240Sstevel@tonic-gate } else { 8250Sstevel@tonic-gate /* lo < hole_start && hi >= hole_end */ 8260Sstevel@tonic-gate if (dir == AH_LO) { 8270Sstevel@tonic-gate /* 8280Sstevel@tonic-gate * prefer lowest range 8290Sstevel@tonic-gate */ 830*5668Smec if (hole_start - lo >= tot_len) 8310Sstevel@tonic-gate hi = hole_start; 832*5668Smec else if (hi - hole_end >= tot_len) 8330Sstevel@tonic-gate lo = hole_end; 8340Sstevel@tonic-gate else 8350Sstevel@tonic-gate return (0); 8360Sstevel@tonic-gate } else { 8370Sstevel@tonic-gate /* 8380Sstevel@tonic-gate * prefer highest range 8390Sstevel@tonic-gate */ 840*5668Smec if (hi - hole_end >= tot_len) 8410Sstevel@tonic-gate lo = hole_end; 842*5668Smec else if (hole_start - lo >= tot_len) 8430Sstevel@tonic-gate hi = hole_start; 8440Sstevel@tonic-gate else 8450Sstevel@tonic-gate return (0); 8460Sstevel@tonic-gate } 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate } 8490Sstevel@tonic-gate } else { 8500Sstevel@tonic-gate /* lo >= hole_start */ 8510Sstevel@tonic-gate if (hi < hole_end) 8520Sstevel@tonic-gate return (0); 8530Sstevel@tonic-gate if (lo < hole_end) 8540Sstevel@tonic-gate lo = hole_end; 8550Sstevel@tonic-gate } 856*5668Smec #endif 857*5668Smec 858*5668Smec if (hi - lo < tot_len) 8590Sstevel@tonic-gate return (0); 8600Sstevel@tonic-gate 861*5668Smec if (align > 1) { 862*5668Smec uintptr_t tlo = lo + redzone; 863*5668Smec uintptr_t thi = hi - redzone; 864*5668Smec tlo = (uintptr_t)P2PHASEUP(tlo, align, off); 865*5668Smec if (tlo < lo + redzone) { 866*5668Smec return (0); 867*5668Smec } 868*5668Smec if (thi < tlo || thi - tlo < minlen) { 869*5668Smec return (0); 870*5668Smec } 871*5668Smec } 872*5668Smec 8730Sstevel@tonic-gate *basep = (caddr_t)lo; 8740Sstevel@tonic-gate *lenp = hi - lo; 8750Sstevel@tonic-gate return (1); 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate /* 879*5668Smec * Determine whether [*basep, *basep + *lenp) contains a mappable range of 880*5668Smec * addresses at least "minlen" long. On success, 1 is returned and *basep 881*5668Smec * and *lenp are adjusted to describe the acceptable range. On failure, 0 882*5668Smec * is returned. 883*5668Smec */ 884*5668Smec int 885*5668Smec valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir) 886*5668Smec { 887*5668Smec return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0)); 888*5668Smec } 889*5668Smec 890*5668Smec /* 8910Sstevel@tonic-gate * Determine whether [addr, addr+len] are valid user addresses. 8920Sstevel@tonic-gate */ 8930Sstevel@tonic-gate /*ARGSUSED*/ 8940Sstevel@tonic-gate int 8950Sstevel@tonic-gate valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as, 8960Sstevel@tonic-gate caddr_t userlimit) 8970Sstevel@tonic-gate { 8980Sstevel@tonic-gate caddr_t eaddr = addr + len; 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate if (eaddr <= addr || addr >= userlimit || eaddr > userlimit) 9010Sstevel@tonic-gate return (RANGE_BADADDR); 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate #if defined(__amd64) 9040Sstevel@tonic-gate /* 9050Sstevel@tonic-gate * Check for the VA hole 9060Sstevel@tonic-gate */ 9070Sstevel@tonic-gate if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end) 9080Sstevel@tonic-gate return (RANGE_BADADDR); 9090Sstevel@tonic-gate #endif 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate return (RANGE_OKAY); 9120Sstevel@tonic-gate } 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate /* 9150Sstevel@tonic-gate * Return 1 if the page frame is onboard memory, else 0. 9160Sstevel@tonic-gate */ 9170Sstevel@tonic-gate int 9180Sstevel@tonic-gate pf_is_memory(pfn_t pf) 9190Sstevel@tonic-gate { 9203446Smrj if (pfn_is_foreign(pf)) 9213446Smrj return (0); 9223446Smrj return (address_in_memlist(phys_install, pfn_to_pa(pf), 1)); 9230Sstevel@tonic-gate } 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate /* 9260Sstevel@tonic-gate * return the memrange containing pfn 9270Sstevel@tonic-gate */ 9280Sstevel@tonic-gate int 9290Sstevel@tonic-gate memrange_num(pfn_t pfn) 9300Sstevel@tonic-gate { 9310Sstevel@tonic-gate int n; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate for (n = 0; n < nranges - 1; ++n) { 9340Sstevel@tonic-gate if (pfn >= memranges[n]) 9350Sstevel@tonic-gate break; 9360Sstevel@tonic-gate } 9370Sstevel@tonic-gate return (n); 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * return the mnoderange containing pfn 9420Sstevel@tonic-gate */ 9435084Sjohnlev /*ARGSUSED*/ 9440Sstevel@tonic-gate int 9450Sstevel@tonic-gate pfn_2_mtype(pfn_t pfn) 9460Sstevel@tonic-gate { 9475084Sjohnlev #if defined(__xpv) 9485084Sjohnlev return (0); 9495084Sjohnlev #else 9500Sstevel@tonic-gate int n; 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate for (n = mnoderangecnt - 1; n >= 0; n--) { 9530Sstevel@tonic-gate if (pfn >= mnoderanges[n].mnr_pfnlo) { 9540Sstevel@tonic-gate break; 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate } 9570Sstevel@tonic-gate return (n); 9585084Sjohnlev #endif 9590Sstevel@tonic-gate } 9600Sstevel@tonic-gate 9615084Sjohnlev #if !defined(__xpv) 9620Sstevel@tonic-gate /* 9630Sstevel@tonic-gate * is_contigpage_free: 9640Sstevel@tonic-gate * returns a page list of contiguous pages. It minimally has to return 9650Sstevel@tonic-gate * minctg pages. Caller determines minctg based on the scatter-gather 9660Sstevel@tonic-gate * list length. 9670Sstevel@tonic-gate * 9680Sstevel@tonic-gate * pfnp is set to the next page frame to search on return. 9690Sstevel@tonic-gate */ 9700Sstevel@tonic-gate static page_t * 9710Sstevel@tonic-gate is_contigpage_free( 9720Sstevel@tonic-gate pfn_t *pfnp, 9730Sstevel@tonic-gate pgcnt_t *pgcnt, 9740Sstevel@tonic-gate pgcnt_t minctg, 9750Sstevel@tonic-gate uint64_t pfnseg, 9760Sstevel@tonic-gate int iolock) 9770Sstevel@tonic-gate { 9780Sstevel@tonic-gate int i = 0; 9790Sstevel@tonic-gate pfn_t pfn = *pfnp; 9800Sstevel@tonic-gate page_t *pp; 9810Sstevel@tonic-gate page_t *plist = NULL; 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate /* 9840Sstevel@tonic-gate * fail if pfn + minctg crosses a segment boundary. 9850Sstevel@tonic-gate * Adjust for next starting pfn to begin at segment boundary. 9860Sstevel@tonic-gate */ 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) { 9890Sstevel@tonic-gate *pfnp = roundup(*pfnp, pfnseg + 1); 9900Sstevel@tonic-gate return (NULL); 9910Sstevel@tonic-gate } 9920Sstevel@tonic-gate 9930Sstevel@tonic-gate do { 9940Sstevel@tonic-gate retry: 9950Sstevel@tonic-gate pp = page_numtopp_nolock(pfn + i); 9960Sstevel@tonic-gate if ((pp == NULL) || 9970Sstevel@tonic-gate (page_trylock(pp, SE_EXCL) == 0)) { 9980Sstevel@tonic-gate (*pfnp)++; 9990Sstevel@tonic-gate break; 10000Sstevel@tonic-gate } 10010Sstevel@tonic-gate if (page_pptonum(pp) != pfn + i) { 10020Sstevel@tonic-gate page_unlock(pp); 10030Sstevel@tonic-gate goto retry; 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate if (!(PP_ISFREE(pp))) { 10070Sstevel@tonic-gate page_unlock(pp); 10080Sstevel@tonic-gate (*pfnp)++; 10090Sstevel@tonic-gate break; 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate if (!PP_ISAGED(pp)) { 10130Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 10140Sstevel@tonic-gate page_hashout(pp, (kmutex_t *)NULL); 10150Sstevel@tonic-gate } else { 10160Sstevel@tonic-gate page_list_sub(pp, PG_FREE_LIST); 10170Sstevel@tonic-gate } 10180Sstevel@tonic-gate 10190Sstevel@tonic-gate if (iolock) 10200Sstevel@tonic-gate page_io_lock(pp); 10210Sstevel@tonic-gate page_list_concat(&plist, &pp); 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate /* 10240Sstevel@tonic-gate * exit loop when pgcnt satisfied or segment boundary reached. 10250Sstevel@tonic-gate */ 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate } while ((++i < *pgcnt) && ((pfn + i) & pfnseg)); 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate *pfnp += i; /* set to next pfn to search */ 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate if (i >= minctg) { 10320Sstevel@tonic-gate *pgcnt -= i; 10330Sstevel@tonic-gate return (plist); 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate /* 10370Sstevel@tonic-gate * failure: minctg not satisfied. 10380Sstevel@tonic-gate * 10390Sstevel@tonic-gate * if next request crosses segment boundary, set next pfn 10400Sstevel@tonic-gate * to search from the segment boundary. 10410Sstevel@tonic-gate */ 10420Sstevel@tonic-gate if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) 10430Sstevel@tonic-gate *pfnp = roundup(*pfnp, pfnseg + 1); 10440Sstevel@tonic-gate 10450Sstevel@tonic-gate /* clean up any pages already allocated */ 10460Sstevel@tonic-gate 10470Sstevel@tonic-gate while (plist) { 10480Sstevel@tonic-gate pp = plist; 10490Sstevel@tonic-gate page_sub(&plist, pp); 10500Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 10510Sstevel@tonic-gate if (iolock) 10520Sstevel@tonic-gate page_io_unlock(pp); 10530Sstevel@tonic-gate page_unlock(pp); 10540Sstevel@tonic-gate } 10550Sstevel@tonic-gate 10560Sstevel@tonic-gate return (NULL); 10570Sstevel@tonic-gate } 10585084Sjohnlev #endif /* !__xpv */ 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate /* 10610Sstevel@tonic-gate * verify that pages being returned from allocator have correct DMA attribute 10620Sstevel@tonic-gate */ 10630Sstevel@tonic-gate #ifndef DEBUG 10640Sstevel@tonic-gate #define check_dma(a, b, c) (0) 10650Sstevel@tonic-gate #else 10660Sstevel@tonic-gate static void 10670Sstevel@tonic-gate check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt) 10680Sstevel@tonic-gate { 10690Sstevel@tonic-gate if (dma_attr == NULL) 10700Sstevel@tonic-gate return; 10710Sstevel@tonic-gate 10720Sstevel@tonic-gate while (cnt-- > 0) { 10733446Smrj if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) < 10740Sstevel@tonic-gate dma_attr->dma_attr_addr_lo) 10750Sstevel@tonic-gate panic("PFN (pp=%p) below dma_attr_addr_lo", pp); 10763446Smrj if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >= 10770Sstevel@tonic-gate dma_attr->dma_attr_addr_hi) 10780Sstevel@tonic-gate panic("PFN (pp=%p) above dma_attr_addr_hi", pp); 10790Sstevel@tonic-gate pp = pp->p_next; 10800Sstevel@tonic-gate } 10810Sstevel@tonic-gate } 10820Sstevel@tonic-gate #endif 10830Sstevel@tonic-gate 10845084Sjohnlev #if !defined(__xpv) 10850Sstevel@tonic-gate static page_t * 10860Sstevel@tonic-gate page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock) 10870Sstevel@tonic-gate { 10880Sstevel@tonic-gate pfn_t pfn; 10890Sstevel@tonic-gate int sgllen; 10900Sstevel@tonic-gate uint64_t pfnseg; 10910Sstevel@tonic-gate pgcnt_t minctg; 10920Sstevel@tonic-gate page_t *pplist = NULL, *plist; 10930Sstevel@tonic-gate uint64_t lo, hi; 10940Sstevel@tonic-gate pgcnt_t pfnalign = 0; 10950Sstevel@tonic-gate static pfn_t startpfn; 10960Sstevel@tonic-gate static pgcnt_t lastctgcnt; 10970Sstevel@tonic-gate uintptr_t align; 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate CONTIG_LOCK(); 11000Sstevel@tonic-gate 11010Sstevel@tonic-gate if (mattr) { 11020Sstevel@tonic-gate lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET)); 11030Sstevel@tonic-gate hi = mmu_btop(mattr->dma_attr_addr_hi); 11040Sstevel@tonic-gate if (hi >= physmax) 11050Sstevel@tonic-gate hi = physmax - 1; 11060Sstevel@tonic-gate sgllen = mattr->dma_attr_sgllen; 11070Sstevel@tonic-gate pfnseg = mmu_btop(mattr->dma_attr_seg); 11080Sstevel@tonic-gate 11090Sstevel@tonic-gate align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 11100Sstevel@tonic-gate if (align > MMU_PAGESIZE) 11110Sstevel@tonic-gate pfnalign = mmu_btop(align); 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate /* 11140Sstevel@tonic-gate * in order to satisfy the request, must minimally 11150Sstevel@tonic-gate * acquire minctg contiguous pages 11160Sstevel@tonic-gate */ 11170Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate ASSERT(hi >= lo); 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate /* 11220Sstevel@tonic-gate * start from where last searched if the minctg >= lastctgcnt 11230Sstevel@tonic-gate */ 11240Sstevel@tonic-gate if (minctg < lastctgcnt || startpfn < lo || startpfn > hi) 11250Sstevel@tonic-gate startpfn = lo; 11260Sstevel@tonic-gate } else { 11270Sstevel@tonic-gate hi = physmax - 1; 11280Sstevel@tonic-gate lo = 0; 11290Sstevel@tonic-gate sgllen = 1; 11300Sstevel@tonic-gate pfnseg = mmu.highest_pfn; 11310Sstevel@tonic-gate minctg = *pgcnt; 11320Sstevel@tonic-gate 11330Sstevel@tonic-gate if (minctg < lastctgcnt) 11340Sstevel@tonic-gate startpfn = lo; 11350Sstevel@tonic-gate } 11360Sstevel@tonic-gate lastctgcnt = minctg; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate ASSERT(pfnseg + 1 >= (uint64_t)minctg); 11390Sstevel@tonic-gate 11400Sstevel@tonic-gate /* conserve 16m memory - start search above 16m when possible */ 11410Sstevel@tonic-gate if (hi > PFN_16M && startpfn < PFN_16M) 11420Sstevel@tonic-gate startpfn = PFN_16M; 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate pfn = startpfn; 11450Sstevel@tonic-gate if (pfnalign) 11460Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate while (pfn + minctg - 1 <= hi) { 11490Sstevel@tonic-gate 11500Sstevel@tonic-gate plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 11510Sstevel@tonic-gate if (plist) { 11520Sstevel@tonic-gate page_list_concat(&pplist, &plist); 11530Sstevel@tonic-gate sgllen--; 11540Sstevel@tonic-gate /* 11550Sstevel@tonic-gate * return when contig pages no longer needed 11560Sstevel@tonic-gate */ 11570Sstevel@tonic-gate if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 11580Sstevel@tonic-gate startpfn = pfn; 11590Sstevel@tonic-gate CONTIG_UNLOCK(); 11600Sstevel@tonic-gate check_dma(mattr, pplist, *pgcnt); 11610Sstevel@tonic-gate return (pplist); 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 11640Sstevel@tonic-gate } 11650Sstevel@tonic-gate if (pfnalign) 11660Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11670Sstevel@tonic-gate } 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate /* cannot find contig pages in specified range */ 11700Sstevel@tonic-gate if (startpfn == lo) { 11710Sstevel@tonic-gate CONTIG_UNLOCK(); 11720Sstevel@tonic-gate return (NULL); 11730Sstevel@tonic-gate } 11740Sstevel@tonic-gate 11750Sstevel@tonic-gate /* did not start with lo previously */ 11760Sstevel@tonic-gate pfn = lo; 11770Sstevel@tonic-gate if (pfnalign) 11780Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11790Sstevel@tonic-gate 11800Sstevel@tonic-gate /* allow search to go above startpfn */ 11810Sstevel@tonic-gate while (pfn < startpfn) { 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 11840Sstevel@tonic-gate if (plist != NULL) { 11850Sstevel@tonic-gate 11860Sstevel@tonic-gate page_list_concat(&pplist, &plist); 11870Sstevel@tonic-gate sgllen--; 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate /* 11900Sstevel@tonic-gate * return when contig pages no longer needed 11910Sstevel@tonic-gate */ 11920Sstevel@tonic-gate if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 11930Sstevel@tonic-gate startpfn = pfn; 11940Sstevel@tonic-gate CONTIG_UNLOCK(); 11950Sstevel@tonic-gate check_dma(mattr, pplist, *pgcnt); 11960Sstevel@tonic-gate return (pplist); 11970Sstevel@tonic-gate } 11980Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 11990Sstevel@tonic-gate } 12000Sstevel@tonic-gate if (pfnalign) 12010Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 12020Sstevel@tonic-gate } 12030Sstevel@tonic-gate CONTIG_UNLOCK(); 12040Sstevel@tonic-gate return (NULL); 12050Sstevel@tonic-gate } 12065084Sjohnlev #endif /* !__xpv */ 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate /* 12090Sstevel@tonic-gate * mnode_range_cnt() calculates the number of memory ranges for mnode and 12100Sstevel@tonic-gate * memranges[]. Used to determine the size of page lists and mnoderanges. 12110Sstevel@tonic-gate */ 12120Sstevel@tonic-gate int 12132961Sdp78419 mnode_range_cnt(int mnode) 12140Sstevel@tonic-gate { 12155084Sjohnlev #if defined(__xpv) 12165084Sjohnlev ASSERT(mnode == 0); 12175084Sjohnlev return (1); 12185084Sjohnlev #else /* __xpv */ 12190Sstevel@tonic-gate int mri; 12200Sstevel@tonic-gate int mnrcnt = 0; 12210Sstevel@tonic-gate 12222961Sdp78419 if (mem_node_config[mnode].exists != 0) { 12230Sstevel@tonic-gate mri = nranges - 1; 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate /* find the memranges index below contained in mnode range */ 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 12280Sstevel@tonic-gate mri--; 12290Sstevel@tonic-gate 12300Sstevel@tonic-gate /* 12310Sstevel@tonic-gate * increment mnode range counter when memranges or mnode 12320Sstevel@tonic-gate * boundary is reached. 12330Sstevel@tonic-gate */ 12340Sstevel@tonic-gate while (mri >= 0 && 12350Sstevel@tonic-gate mem_node_config[mnode].physmax >= MEMRANGELO(mri)) { 12360Sstevel@tonic-gate mnrcnt++; 12370Sstevel@tonic-gate if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 12380Sstevel@tonic-gate mri--; 12390Sstevel@tonic-gate else 12400Sstevel@tonic-gate break; 12410Sstevel@tonic-gate } 12420Sstevel@tonic-gate } 12432961Sdp78419 ASSERT(mnrcnt <= MAX_MNODE_MRANGES); 12440Sstevel@tonic-gate return (mnrcnt); 12455084Sjohnlev #endif /* __xpv */ 12460Sstevel@tonic-gate } 12470Sstevel@tonic-gate 12485084Sjohnlev /* 12495084Sjohnlev * mnode_range_setup() initializes mnoderanges. 12505084Sjohnlev */ 12510Sstevel@tonic-gate void 12520Sstevel@tonic-gate mnode_range_setup(mnoderange_t *mnoderanges) 12530Sstevel@tonic-gate { 12540Sstevel@tonic-gate int mnode, mri; 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate for (mnode = 0; mnode < max_mem_nodes; mnode++) { 12570Sstevel@tonic-gate if (mem_node_config[mnode].exists == 0) 12580Sstevel@tonic-gate continue; 12590Sstevel@tonic-gate 12600Sstevel@tonic-gate mri = nranges - 1; 12610Sstevel@tonic-gate 12620Sstevel@tonic-gate while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 12630Sstevel@tonic-gate mri--; 12640Sstevel@tonic-gate 12650Sstevel@tonic-gate while (mri >= 0 && mem_node_config[mnode].physmax >= 12660Sstevel@tonic-gate MEMRANGELO(mri)) { 12675084Sjohnlev mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri), 12685084Sjohnlev mem_node_config[mnode].physbase); 12695084Sjohnlev mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri), 12705084Sjohnlev mem_node_config[mnode].physmax); 12710Sstevel@tonic-gate mnoderanges->mnr_mnode = mnode; 12720Sstevel@tonic-gate mnoderanges->mnr_memrange = mri; 12730Sstevel@tonic-gate mnoderanges++; 12740Sstevel@tonic-gate if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 12750Sstevel@tonic-gate mri--; 12760Sstevel@tonic-gate else 12770Sstevel@tonic-gate break; 12780Sstevel@tonic-gate } 12790Sstevel@tonic-gate } 12800Sstevel@tonic-gate } 12810Sstevel@tonic-gate 12825084Sjohnlev /*ARGSUSED*/ 12835084Sjohnlev int 12845084Sjohnlev mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz) 12855084Sjohnlev { 12865084Sjohnlev int mtype = mnoderangecnt - 1; 12875084Sjohnlev 12885084Sjohnlev #if !defined(__xpv) 12895084Sjohnlev #if defined(__i386) 12905084Sjohnlev /* 12915084Sjohnlev * set the mtype range 12925084Sjohnlev * - kmem requests needs to be below 4g if restricted_kmemalloc is set. 12935084Sjohnlev * - for non kmem requests, set range to above 4g if memory below 4g 12945084Sjohnlev * runs low. 12955084Sjohnlev */ 12965084Sjohnlev if (restricted_kmemalloc && VN_ISKAS(vp) && 12975084Sjohnlev (caddr_t)(vaddr) >= kernelheap && 12985084Sjohnlev (caddr_t)(vaddr) < ekernelheap) { 12995084Sjohnlev ASSERT(physmax4g); 13005084Sjohnlev mtype = mtype4g; 13015084Sjohnlev if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz), 13025084Sjohnlev btop(pgsz), *flags)) { 13035084Sjohnlev *flags |= PGI_MT_RANGE16M; 13045084Sjohnlev } else { 13055084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 13065084Sjohnlev VM_STAT_COND_ADD((*flags & PG_PANIC), 13075084Sjohnlev vmm_vmstats.pgpanicalloc); 13085084Sjohnlev *flags |= PGI_MT_RANGE0; 13095084Sjohnlev } 13105084Sjohnlev return (mtype); 13115084Sjohnlev } 13125084Sjohnlev #endif /* __i386 */ 13135084Sjohnlev 13145084Sjohnlev if (RESTRICT4G_ALLOC) { 13155084Sjohnlev VM_STAT_ADD(vmm_vmstats.restrict4gcnt); 13165084Sjohnlev /* here only for > 4g systems */ 13175084Sjohnlev *flags |= PGI_MT_RANGE4G; 13185084Sjohnlev } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) { 13195084Sjohnlev *flags |= PGI_MT_RANGE16M; 13205084Sjohnlev } else { 13215084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 13225084Sjohnlev VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc); 13235084Sjohnlev *flags |= PGI_MT_RANGE0; 13245084Sjohnlev } 13255084Sjohnlev #endif /* !__xpv */ 13265084Sjohnlev return (mtype); 13275084Sjohnlev } 13285084Sjohnlev 13295084Sjohnlev 13305084Sjohnlev /* mtype init for page_get_replacement_page */ 13315084Sjohnlev /*ARGSUSED*/ 13325084Sjohnlev int 13335084Sjohnlev mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt) 13345084Sjohnlev { 13355084Sjohnlev int mtype = mnoderangecnt - 1; 13365084Sjohnlev #if !defined(__ixpv) 13375084Sjohnlev if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) { 13385084Sjohnlev *flags |= PGI_MT_RANGE16M; 13395084Sjohnlev } else { 13405084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 13415084Sjohnlev *flags |= PGI_MT_RANGE0; 13425084Sjohnlev } 13435084Sjohnlev #endif 13445084Sjohnlev return (mtype); 13455084Sjohnlev } 13465084Sjohnlev 13470Sstevel@tonic-gate /* 13480Sstevel@tonic-gate * Determine if the mnode range specified in mtype contains memory belonging 13490Sstevel@tonic-gate * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains 13501385Skchow * the range of indices from high pfn to 0, 16m or 4g. 13510Sstevel@tonic-gate * 13520Sstevel@tonic-gate * Return first mnode range type index found otherwise return -1 if none found. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate int 13550Sstevel@tonic-gate mtype_func(int mnode, int mtype, uint_t flags) 13560Sstevel@tonic-gate { 13570Sstevel@tonic-gate if (flags & PGI_MT_RANGE) { 13585084Sjohnlev int mtlim = 0; 13590Sstevel@tonic-gate 13600Sstevel@tonic-gate if (flags & PGI_MT_NEXT) 13610Sstevel@tonic-gate mtype--; 13625084Sjohnlev if (flags & PGI_MT_RANGE4G) 13631385Skchow mtlim = mtype4g + 1; /* exclude 0-4g range */ 13641385Skchow else if (flags & PGI_MT_RANGE16M) 13651385Skchow mtlim = 1; /* exclude 0-16m range */ 13660Sstevel@tonic-gate while (mtype >= mtlim) { 13670Sstevel@tonic-gate if (mnoderanges[mtype].mnr_mnode == mnode) 13680Sstevel@tonic-gate return (mtype); 13690Sstevel@tonic-gate mtype--; 13700Sstevel@tonic-gate } 13715084Sjohnlev } else if (mnoderanges[mtype].mnr_mnode == mnode) { 13725084Sjohnlev return (mtype); 13730Sstevel@tonic-gate } 13740Sstevel@tonic-gate return (-1); 13750Sstevel@tonic-gate } 13760Sstevel@tonic-gate 13770Sstevel@tonic-gate /* 13781373Skchow * Update the page list max counts with the pfn range specified by the 13791373Skchow * input parameters. Called from add_physmem() when physical memory with 13801373Skchow * page_t's are initially added to the page lists. 13811373Skchow */ 13821373Skchow void 13831373Skchow mtype_modify_max(pfn_t startpfn, long cnt) 13841373Skchow { 13851373Skchow int mtype = 0; 13861373Skchow pfn_t endpfn = startpfn + cnt, pfn; 13871373Skchow pgcnt_t inc; 13881373Skchow 13891373Skchow ASSERT(cnt > 0); 13901373Skchow 13915084Sjohnlev if (!physmax4g) 13925084Sjohnlev return; 13935084Sjohnlev 13941373Skchow for (pfn = startpfn; pfn < endpfn; ) { 13951373Skchow if (pfn <= mnoderanges[mtype].mnr_pfnhi) { 13961373Skchow if (endpfn < mnoderanges[mtype].mnr_pfnhi) { 13971373Skchow inc = endpfn - pfn; 13981373Skchow } else { 13991373Skchow inc = mnoderanges[mtype].mnr_pfnhi - pfn + 1; 14001373Skchow } 14015084Sjohnlev if (mtype <= mtype4g) 14021373Skchow maxmem4g += inc; 14031373Skchow pfn += inc; 14041373Skchow } 14051373Skchow mtype++; 14061373Skchow ASSERT(mtype < mnoderangecnt || pfn >= endpfn); 14071373Skchow } 14081373Skchow } 14091373Skchow 14105084Sjohnlev int 14115084Sjohnlev mtype_2_mrange(int mtype) 14125084Sjohnlev { 14135084Sjohnlev return (mnoderanges[mtype].mnr_memrange); 14145084Sjohnlev } 14155084Sjohnlev 14165084Sjohnlev void 14175084Sjohnlev mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi) 14185084Sjohnlev { 14195084Sjohnlev ASSERT(mnoderanges[mtype].mnr_mnode == mnode); 14205084Sjohnlev *pfnlo = mnoderanges[mtype].mnr_pfnlo; 14215084Sjohnlev *pfnhi = mnoderanges[mtype].mnr_pfnhi; 14225084Sjohnlev } 14235084Sjohnlev 14245084Sjohnlev size_t 14255084Sjohnlev plcnt_sz(size_t ctrs_sz) 14265084Sjohnlev { 14275084Sjohnlev #ifdef DEBUG 14285084Sjohnlev int szc, colors; 14295084Sjohnlev 14305084Sjohnlev ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes; 14315084Sjohnlev for (szc = 0; szc < mmu_page_sizes; szc++) { 14325084Sjohnlev colors = page_get_pagecolors(szc); 14335084Sjohnlev ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors; 14345084Sjohnlev } 14355084Sjohnlev #endif 14365084Sjohnlev return (ctrs_sz); 14375084Sjohnlev } 14385084Sjohnlev 14395084Sjohnlev caddr_t 14405084Sjohnlev plcnt_init(caddr_t addr) 14415084Sjohnlev { 14425084Sjohnlev #ifdef DEBUG 14435084Sjohnlev int mt, szc, colors; 14445084Sjohnlev 14455084Sjohnlev for (mt = 0; mt < mnoderangecnt; mt++) { 14465084Sjohnlev mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr; 14475084Sjohnlev addr += (sizeof (struct mnr_mts) * mmu_page_sizes); 14485084Sjohnlev for (szc = 0; szc < mmu_page_sizes; szc++) { 14495084Sjohnlev colors = page_get_pagecolors(szc); 14505084Sjohnlev mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors; 14515084Sjohnlev mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt = 14525084Sjohnlev (pgcnt_t *)addr; 14535084Sjohnlev addr += (sizeof (pgcnt_t) * colors); 14545084Sjohnlev } 14555084Sjohnlev } 14565084Sjohnlev #endif 14575084Sjohnlev return (addr); 14585084Sjohnlev } 14595084Sjohnlev 14605084Sjohnlev void 14615084Sjohnlev plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags) 14625084Sjohnlev { 14635084Sjohnlev #ifdef DEBUG 14645084Sjohnlev int bin = PP_2_BIN(pp); 14655084Sjohnlev 14665084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt); 14675084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin], 14685084Sjohnlev cnt); 14695084Sjohnlev #endif 14705084Sjohnlev ASSERT(mtype == PP_2_MTYPE(pp)); 14715084Sjohnlev if (physmax4g && mtype <= mtype4g) 14725084Sjohnlev atomic_add_long(&freemem4g, cnt); 14735084Sjohnlev if (flags & PG_CACHE_LIST) 14745084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt); 14755084Sjohnlev else 14765466Skchow atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt); 14775466Skchow atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt); 14785084Sjohnlev } 14795084Sjohnlev 14801373Skchow /* 1481414Skchow * Returns the free page count for mnode 1482414Skchow */ 1483414Skchow int 1484414Skchow mnode_pgcnt(int mnode) 1485414Skchow { 1486414Skchow int mtype = mnoderangecnt - 1; 1487414Skchow int flags = PGI_MT_RANGE0; 1488414Skchow pgcnt_t pgcnt = 0; 1489414Skchow 1490414Skchow mtype = mtype_func(mnode, mtype, flags); 1491414Skchow 1492414Skchow while (mtype != -1) { 14931385Skchow pgcnt += MTYPE_FREEMEM(mtype); 1494414Skchow mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); 1495414Skchow } 1496414Skchow return (pgcnt); 1497414Skchow } 1498414Skchow 1499414Skchow /* 15000Sstevel@tonic-gate * Initialize page coloring variables based on the l2 cache parameters. 15010Sstevel@tonic-gate * Calculate and return memory needed for page coloring data structures. 15020Sstevel@tonic-gate */ 15030Sstevel@tonic-gate size_t 15040Sstevel@tonic-gate page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc) 15050Sstevel@tonic-gate { 15060Sstevel@tonic-gate size_t colorsz = 0; 15070Sstevel@tonic-gate int i; 15080Sstevel@tonic-gate int colors; 15090Sstevel@tonic-gate 15105084Sjohnlev #if defined(__xpv) 15115084Sjohnlev /* 15125084Sjohnlev * Hypervisor domains currently don't have any concept of NUMA. 15135084Sjohnlev * Hence we'll act like there is only 1 memrange. 15145084Sjohnlev */ 15155084Sjohnlev i = memrange_num(1); 15165084Sjohnlev #else /* !__xpv */ 15170Sstevel@tonic-gate /* 15180Sstevel@tonic-gate * Reduce the memory ranges lists if we don't have large amounts 15190Sstevel@tonic-gate * of memory. This avoids searching known empty free lists. 15200Sstevel@tonic-gate */ 15210Sstevel@tonic-gate i = memrange_num(physmax); 15220Sstevel@tonic-gate #if defined(__i386) 15230Sstevel@tonic-gate if (i > 0) 15240Sstevel@tonic-gate restricted_kmemalloc = 0; 15250Sstevel@tonic-gate #endif 15260Sstevel@tonic-gate /* physmax greater than 4g */ 15270Sstevel@tonic-gate if (i == 0) 15280Sstevel@tonic-gate physmax4g = 1; 15295084Sjohnlev #endif /* !__xpv */ 15305084Sjohnlev memranges += i; 15315084Sjohnlev nranges -= i; 15320Sstevel@tonic-gate 15335349Skchow ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES); 15345349Skchow 15350Sstevel@tonic-gate ASSERT(ISP2(l2_sz)); 15360Sstevel@tonic-gate ASSERT(ISP2(l2_linesz)); 15370Sstevel@tonic-gate ASSERT(l2_sz > MMU_PAGESIZE); 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate /* l2_assoc is 0 for fully associative l2 cache */ 15400Sstevel@tonic-gate if (l2_assoc) 15410Sstevel@tonic-gate l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE)); 15420Sstevel@tonic-gate else 15430Sstevel@tonic-gate l2_colors = 1; 15440Sstevel@tonic-gate 15450Sstevel@tonic-gate /* for scalability, configure at least PAGE_COLORS_MIN color bins */ 15460Sstevel@tonic-gate page_colors = MAX(l2_colors, PAGE_COLORS_MIN); 15470Sstevel@tonic-gate 15480Sstevel@tonic-gate /* 15490Sstevel@tonic-gate * cpu_page_colors is non-zero when a page color may be spread across 15500Sstevel@tonic-gate * multiple bins. 15510Sstevel@tonic-gate */ 15520Sstevel@tonic-gate if (l2_colors < page_colors) 15530Sstevel@tonic-gate cpu_page_colors = l2_colors; 15540Sstevel@tonic-gate 15550Sstevel@tonic-gate ASSERT(ISP2(page_colors)); 15560Sstevel@tonic-gate 15570Sstevel@tonic-gate page_colors_mask = page_colors - 1; 15580Sstevel@tonic-gate 15590Sstevel@tonic-gate ASSERT(ISP2(CPUSETSIZE())); 15600Sstevel@tonic-gate page_coloring_shift = lowbit(CPUSETSIZE()); 15610Sstevel@tonic-gate 15622961Sdp78419 /* initialize number of colors per page size */ 15632961Sdp78419 for (i = 0; i <= mmu.max_page_level; i++) { 15642961Sdp78419 hw_page_array[i].hp_size = LEVEL_SIZE(i); 15652961Sdp78419 hw_page_array[i].hp_shift = LEVEL_SHIFT(i); 15662961Sdp78419 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0); 15672961Sdp78419 hw_page_array[i].hp_colors = (page_colors_mask >> 15682961Sdp78419 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift)) 15692961Sdp78419 + 1; 15703717Sdp78419 colorequivszc[i] = 0; 15712961Sdp78419 } 15722961Sdp78419 15732961Sdp78419 /* 15742961Sdp78419 * The value of cpu_page_colors determines if additional color bins 15752961Sdp78419 * need to be checked for a particular color in the page_get routines. 15762961Sdp78419 */ 15772961Sdp78419 if (cpu_page_colors != 0) { 15782961Sdp78419 15792961Sdp78419 int a = lowbit(page_colors) - lowbit(cpu_page_colors); 15802961Sdp78419 ASSERT(a > 0); 15812961Sdp78419 ASSERT(a < 16); 15822961Sdp78419 15832961Sdp78419 for (i = 0; i <= mmu.max_page_level; i++) { 15842961Sdp78419 if ((colors = hw_page_array[i].hp_colors) <= 1) { 15852961Sdp78419 colorequivszc[i] = 0; 15862961Sdp78419 continue; 15872961Sdp78419 } 15882961Sdp78419 while ((colors >> a) == 0) 15892961Sdp78419 a--; 15902961Sdp78419 ASSERT(a >= 0); 15912961Sdp78419 15922961Sdp78419 /* higher 4 bits encodes color equiv mask */ 15932961Sdp78419 colorequivszc[i] = (a << 4); 15942961Sdp78419 } 15952961Sdp78419 } 15962961Sdp78419 15975084Sjohnlev /* factor in colorequiv to check additional 'equivalent' bins. */ 15985084Sjohnlev if (colorequiv > 1) { 15995084Sjohnlev 16005084Sjohnlev int a = lowbit(colorequiv) - 1; 16015084Sjohnlev if (a > 15) 16025084Sjohnlev a = 15; 16035084Sjohnlev 16045084Sjohnlev for (i = 0; i <= mmu.max_page_level; i++) { 16055084Sjohnlev if ((colors = hw_page_array[i].hp_colors) <= 1) { 16065084Sjohnlev continue; 16075084Sjohnlev } 16085084Sjohnlev while ((colors >> a) == 0) 16095084Sjohnlev a--; 16105084Sjohnlev if ((a << 4) > colorequivszc[i]) { 16115084Sjohnlev colorequivszc[i] = (a << 4); 16125084Sjohnlev } 16135084Sjohnlev } 16145084Sjohnlev } 16155084Sjohnlev 16160Sstevel@tonic-gate /* size for mnoderanges */ 16172961Sdp78419 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++) 16182961Sdp78419 mnoderangecnt += mnode_range_cnt(i); 16190Sstevel@tonic-gate colorsz = mnoderangecnt * sizeof (mnoderange_t); 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate /* size for fpc_mutex and cpc_mutex */ 16220Sstevel@tonic-gate colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX); 16230Sstevel@tonic-gate 16240Sstevel@tonic-gate /* size of page_freelists */ 16250Sstevel@tonic-gate colorsz += mnoderangecnt * sizeof (page_t ***); 16260Sstevel@tonic-gate colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **); 16270Sstevel@tonic-gate 16280Sstevel@tonic-gate for (i = 0; i < mmu_page_sizes; i++) { 16290Sstevel@tonic-gate colors = page_get_pagecolors(i); 16300Sstevel@tonic-gate colorsz += mnoderangecnt * colors * sizeof (page_t *); 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate /* size of page_cachelists */ 16340Sstevel@tonic-gate colorsz += mnoderangecnt * sizeof (page_t **); 16350Sstevel@tonic-gate colorsz += mnoderangecnt * page_colors * sizeof (page_t *); 16360Sstevel@tonic-gate 16370Sstevel@tonic-gate return (colorsz); 16380Sstevel@tonic-gate } 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate /* 16410Sstevel@tonic-gate * Called once at startup to configure page_coloring data structures and 16420Sstevel@tonic-gate * does the 1st page_free()/page_freelist_add(). 16430Sstevel@tonic-gate */ 16440Sstevel@tonic-gate void 16450Sstevel@tonic-gate page_coloring_setup(caddr_t pcmemaddr) 16460Sstevel@tonic-gate { 16470Sstevel@tonic-gate int i; 16480Sstevel@tonic-gate int j; 16490Sstevel@tonic-gate int k; 16500Sstevel@tonic-gate caddr_t addr; 16510Sstevel@tonic-gate int colors; 16520Sstevel@tonic-gate 16530Sstevel@tonic-gate /* 16540Sstevel@tonic-gate * do page coloring setup 16550Sstevel@tonic-gate */ 16560Sstevel@tonic-gate addr = pcmemaddr; 16570Sstevel@tonic-gate 16580Sstevel@tonic-gate mnoderanges = (mnoderange_t *)addr; 16590Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (mnoderange_t)); 16600Sstevel@tonic-gate 16610Sstevel@tonic-gate mnode_range_setup(mnoderanges); 16620Sstevel@tonic-gate 16630Sstevel@tonic-gate if (physmax4g) 16640Sstevel@tonic-gate mtype4g = pfn_2_mtype(0xfffff); 16650Sstevel@tonic-gate 16660Sstevel@tonic-gate for (k = 0; k < NPC_MUTEX; k++) { 16670Sstevel@tonic-gate fpc_mutex[k] = (kmutex_t *)addr; 16680Sstevel@tonic-gate addr += (max_mem_nodes * sizeof (kmutex_t)); 16690Sstevel@tonic-gate } 16700Sstevel@tonic-gate for (k = 0; k < NPC_MUTEX; k++) { 16710Sstevel@tonic-gate cpc_mutex[k] = (kmutex_t *)addr; 16720Sstevel@tonic-gate addr += (max_mem_nodes * sizeof (kmutex_t)); 16730Sstevel@tonic-gate } 16740Sstevel@tonic-gate page_freelists = (page_t ****)addr; 16750Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (page_t ***)); 16760Sstevel@tonic-gate 16770Sstevel@tonic-gate page_cachelists = (page_t ***)addr; 16780Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (page_t **)); 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate for (i = 0; i < mnoderangecnt; i++) { 16810Sstevel@tonic-gate page_freelists[i] = (page_t ***)addr; 16820Sstevel@tonic-gate addr += (mmu_page_sizes * sizeof (page_t **)); 16830Sstevel@tonic-gate 16840Sstevel@tonic-gate for (j = 0; j < mmu_page_sizes; j++) { 16850Sstevel@tonic-gate colors = page_get_pagecolors(j); 16860Sstevel@tonic-gate page_freelists[i][j] = (page_t **)addr; 16870Sstevel@tonic-gate addr += (colors * sizeof (page_t *)); 16880Sstevel@tonic-gate } 16890Sstevel@tonic-gate page_cachelists[i] = (page_t **)addr; 16900Sstevel@tonic-gate addr += (page_colors * sizeof (page_t *)); 16910Sstevel@tonic-gate } 16920Sstevel@tonic-gate } 16930Sstevel@tonic-gate 16945084Sjohnlev #if defined(__xpv) 16955084Sjohnlev /* 16965084Sjohnlev * Give back 10% of the io_pool pages to the free list. 16975084Sjohnlev * Don't shrink the pool below some absolute minimum. 16985084Sjohnlev */ 16995084Sjohnlev static void 17005084Sjohnlev page_io_pool_shrink() 17015084Sjohnlev { 17025084Sjohnlev int retcnt; 17035084Sjohnlev page_t *pp, *pp_first, *pp_last, **curpool; 17045084Sjohnlev mfn_t mfn; 17055084Sjohnlev int bothpools = 0; 17065084Sjohnlev 17075084Sjohnlev mutex_enter(&io_pool_lock); 17085084Sjohnlev io_pool_shrink_attempts++; /* should be a kstat? */ 17095084Sjohnlev retcnt = io_pool_cnt / 10; 17105084Sjohnlev if (io_pool_cnt - retcnt < io_pool_cnt_min) 17115084Sjohnlev retcnt = io_pool_cnt - io_pool_cnt_min; 17125084Sjohnlev if (retcnt <= 0) 17135084Sjohnlev goto done; 17145084Sjohnlev io_pool_shrinks++; /* should be a kstat? */ 17155084Sjohnlev curpool = &io_pool_4g; 17165084Sjohnlev domore: 17175084Sjohnlev /* 17185084Sjohnlev * Loop through taking pages from the end of the list 17195084Sjohnlev * (highest mfns) till amount to return reached. 17205084Sjohnlev */ 17215084Sjohnlev for (pp = *curpool; pp && retcnt > 0; ) { 17225084Sjohnlev pp_first = pp_last = pp->p_prev; 17235084Sjohnlev if (pp_first == *curpool) 17245084Sjohnlev break; 17255084Sjohnlev retcnt--; 17265084Sjohnlev io_pool_cnt--; 17275084Sjohnlev page_io_pool_sub(curpool, pp_first, pp_last); 17285084Sjohnlev if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn) 17295084Sjohnlev start_mfn = mfn; 17305084Sjohnlev page_free(pp_first, 1); 17315084Sjohnlev pp = *curpool; 17325084Sjohnlev } 17335084Sjohnlev if (retcnt != 0 && !bothpools) { 17345084Sjohnlev /* 17355084Sjohnlev * If not enough found in less constrained pool try the 17365084Sjohnlev * more constrained one. 17375084Sjohnlev */ 17385084Sjohnlev curpool = &io_pool_16m; 17395084Sjohnlev bothpools = 1; 17405084Sjohnlev goto domore; 17415084Sjohnlev } 17425084Sjohnlev done: 17435084Sjohnlev mutex_exit(&io_pool_lock); 17445084Sjohnlev } 17455084Sjohnlev 17465084Sjohnlev #endif /* __xpv */ 17475084Sjohnlev 17485084Sjohnlev uint_t 17495084Sjohnlev page_create_update_flags_x86(uint_t flags) 17505084Sjohnlev { 17515084Sjohnlev #if defined(__xpv) 17525084Sjohnlev /* 17535084Sjohnlev * Check this is an urgent allocation and free pages are depleted. 17545084Sjohnlev */ 17555084Sjohnlev if (!(flags & PG_WAIT) && freemem < desfree) 17565084Sjohnlev page_io_pool_shrink(); 17575084Sjohnlev #else /* !__xpv */ 17585084Sjohnlev /* 17595084Sjohnlev * page_create_get_something may call this because 4g memory may be 17605084Sjohnlev * depleted. Set flags to allow for relocation of base page below 17615084Sjohnlev * 4g if necessary. 17625084Sjohnlev */ 17635084Sjohnlev if (physmax4g) 17645084Sjohnlev flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 17655084Sjohnlev #endif /* __xpv */ 17665084Sjohnlev return (flags); 17675084Sjohnlev } 17685084Sjohnlev 17690Sstevel@tonic-gate /*ARGSUSED*/ 17700Sstevel@tonic-gate int 17710Sstevel@tonic-gate bp_color(struct buf *bp) 17720Sstevel@tonic-gate { 17730Sstevel@tonic-gate return (0); 17740Sstevel@tonic-gate } 17750Sstevel@tonic-gate 17765084Sjohnlev #if defined(__xpv) 17775084Sjohnlev 17785084Sjohnlev /* 17795084Sjohnlev * Take pages out of an io_pool 17805084Sjohnlev */ 17815084Sjohnlev static void 17825084Sjohnlev page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last) 17835084Sjohnlev { 17845084Sjohnlev if (*poolp == pp_first) { 17855084Sjohnlev *poolp = pp_last->p_next; 17865084Sjohnlev if (*poolp == pp_first) 17875084Sjohnlev *poolp = NULL; 17885084Sjohnlev } 17895084Sjohnlev pp_first->p_prev->p_next = pp_last->p_next; 17905084Sjohnlev pp_last->p_next->p_prev = pp_first->p_prev; 17915084Sjohnlev pp_first->p_prev = pp_last; 17925084Sjohnlev pp_last->p_next = pp_first; 17935084Sjohnlev } 17945084Sjohnlev 17955084Sjohnlev /* 17965084Sjohnlev * Put a page on the io_pool list. The list is ordered by increasing MFN. 17975084Sjohnlev */ 17985084Sjohnlev static void 17995084Sjohnlev page_io_pool_add(page_t **poolp, page_t *pp) 18005084Sjohnlev { 18015084Sjohnlev page_t *look; 18025084Sjohnlev mfn_t mfn = mfn_list[pp->p_pagenum]; 18035084Sjohnlev 18045084Sjohnlev if (*poolp == NULL) { 18055084Sjohnlev *poolp = pp; 18065084Sjohnlev pp->p_next = pp; 18075084Sjohnlev pp->p_prev = pp; 18085084Sjohnlev return; 18095084Sjohnlev } 18105084Sjohnlev 18115084Sjohnlev /* 18125084Sjohnlev * Since we try to take pages from the high end of the pool 18135084Sjohnlev * chances are good that the pages to be put on the list will 18145084Sjohnlev * go at or near the end of the list. so start at the end and 18155084Sjohnlev * work backwards. 18165084Sjohnlev */ 18175084Sjohnlev look = (*poolp)->p_prev; 18185084Sjohnlev while (mfn < mfn_list[look->p_pagenum]) { 18195084Sjohnlev look = look->p_prev; 18205084Sjohnlev if (look == (*poolp)->p_prev) 18215084Sjohnlev break; /* backed all the way to front of list */ 18225084Sjohnlev } 18235084Sjohnlev 18245084Sjohnlev /* insert after look */ 18255084Sjohnlev pp->p_prev = look; 18265084Sjohnlev pp->p_next = look->p_next; 18275084Sjohnlev pp->p_next->p_prev = pp; 18285084Sjohnlev look->p_next = pp; 18295084Sjohnlev if (mfn < mfn_list[(*poolp)->p_pagenum]) { 18305084Sjohnlev /* 18315084Sjohnlev * we inserted a new first list element 18325084Sjohnlev * adjust pool pointer to newly inserted element 18335084Sjohnlev */ 18345084Sjohnlev *poolp = pp; 18355084Sjohnlev } 18365084Sjohnlev } 18375084Sjohnlev 18385084Sjohnlev /* 18395084Sjohnlev * Add a page to the io_pool. Setting the force flag will force the page 18405084Sjohnlev * into the io_pool no matter what. 18415084Sjohnlev */ 18425084Sjohnlev static void 18435084Sjohnlev add_page_to_pool(page_t *pp, int force) 18445084Sjohnlev { 18455084Sjohnlev page_t *highest; 18465084Sjohnlev page_t *freep = NULL; 18475084Sjohnlev 18485084Sjohnlev mutex_enter(&io_pool_lock); 18495084Sjohnlev /* 18505084Sjohnlev * Always keep the scarce low memory pages 18515084Sjohnlev */ 18525084Sjohnlev if (mfn_list[pp->p_pagenum] < PFN_16MEG) { 18535084Sjohnlev ++io_pool_cnt; 18545084Sjohnlev page_io_pool_add(&io_pool_16m, pp); 18555084Sjohnlev goto done; 18565084Sjohnlev } 18575084Sjohnlev if (io_pool_cnt < io_pool_cnt_max || force) { 18585084Sjohnlev ++io_pool_cnt; 18595084Sjohnlev page_io_pool_add(&io_pool_4g, pp); 18605084Sjohnlev } else { 18615084Sjohnlev highest = io_pool_4g->p_prev; 18625084Sjohnlev if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) { 18635084Sjohnlev page_io_pool_sub(&io_pool_4g, highest, highest); 18645084Sjohnlev page_io_pool_add(&io_pool_4g, pp); 18655084Sjohnlev freep = highest; 18665084Sjohnlev } else { 18675084Sjohnlev freep = pp; 18685084Sjohnlev } 18695084Sjohnlev } 18705084Sjohnlev done: 18715084Sjohnlev mutex_exit(&io_pool_lock); 18725084Sjohnlev if (freep) 18735084Sjohnlev page_free(freep, 1); 18745084Sjohnlev } 18755084Sjohnlev 18765084Sjohnlev 18775084Sjohnlev int contig_pfn_cnt; /* no of pfns in the contig pfn list */ 18785084Sjohnlev int contig_pfn_max; /* capacity of the contig pfn list */ 18795084Sjohnlev int next_alloc_pfn; /* next position in list to start a contig search */ 18805084Sjohnlev int contig_pfnlist_updates; /* pfn list update count */ 18815084Sjohnlev int contig_pfnlist_builds; /* how many times have we (re)built list */ 18825084Sjohnlev int contig_pfnlist_buildfailed; /* how many times has list build failed */ 18835084Sjohnlev int create_contig_pending; /* nonzero means taskq creating contig list */ 18845084Sjohnlev pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */ 18855084Sjohnlev 18865084Sjohnlev /* 18875084Sjohnlev * Function to use in sorting a list of pfns by their underlying mfns. 18885084Sjohnlev */ 18895084Sjohnlev static int 18905084Sjohnlev mfn_compare(const void *pfnp1, const void *pfnp2) 18915084Sjohnlev { 18925084Sjohnlev mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1]; 18935084Sjohnlev mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2]; 18945084Sjohnlev 18955084Sjohnlev if (mfn1 > mfn2) 18965084Sjohnlev return (1); 18975084Sjohnlev if (mfn1 < mfn2) 18985084Sjohnlev return (-1); 18995084Sjohnlev return (0); 19005084Sjohnlev } 19015084Sjohnlev 19025084Sjohnlev /* 19035084Sjohnlev * Compact the contig_pfn_list by tossing all the non-contiguous 19045084Sjohnlev * elements from the list. 19055084Sjohnlev */ 19065084Sjohnlev static void 19075084Sjohnlev compact_contig_pfn_list(void) 19085084Sjohnlev { 19095084Sjohnlev pfn_t pfn, lapfn, prev_lapfn; 19105084Sjohnlev mfn_t mfn; 19115084Sjohnlev int i, newcnt = 0; 19125084Sjohnlev 19135084Sjohnlev prev_lapfn = 0; 19145084Sjohnlev for (i = 0; i < contig_pfn_cnt - 1; i++) { 19155084Sjohnlev pfn = contig_pfn_list[i]; 19165084Sjohnlev lapfn = contig_pfn_list[i + 1]; 19175084Sjohnlev mfn = mfn_list[pfn]; 19185084Sjohnlev /* 19195084Sjohnlev * See if next pfn is for a contig mfn 19205084Sjohnlev */ 19215084Sjohnlev if (mfn_list[lapfn] != mfn + 1) 19225084Sjohnlev continue; 19235084Sjohnlev /* 19245084Sjohnlev * pfn and lookahead are both put in list 19255084Sjohnlev * unless pfn is the previous lookahead. 19265084Sjohnlev */ 19275084Sjohnlev if (pfn != prev_lapfn) 19285084Sjohnlev contig_pfn_list[newcnt++] = pfn; 19295084Sjohnlev contig_pfn_list[newcnt++] = lapfn; 19305084Sjohnlev prev_lapfn = lapfn; 19315084Sjohnlev } 19325084Sjohnlev for (i = newcnt; i < contig_pfn_cnt; i++) 19335084Sjohnlev contig_pfn_list[i] = 0; 19345084Sjohnlev contig_pfn_cnt = newcnt; 19355084Sjohnlev } 19365084Sjohnlev 19375084Sjohnlev /*ARGSUSED*/ 19385084Sjohnlev static void 19395084Sjohnlev call_create_contiglist(void *arg) 19405084Sjohnlev { 19415084Sjohnlev (void) create_contig_pfnlist(PG_WAIT); 19425084Sjohnlev } 19435084Sjohnlev 19445084Sjohnlev /* 19455084Sjohnlev * Create list of freelist pfns that have underlying 19465084Sjohnlev * contiguous mfns. The list is kept in ascending mfn order. 19475084Sjohnlev * returns 1 if list created else 0. 19485084Sjohnlev */ 19495084Sjohnlev static int 19505084Sjohnlev create_contig_pfnlist(uint_t flags) 19515084Sjohnlev { 19525084Sjohnlev pfn_t pfn; 19535084Sjohnlev page_t *pp; 19545529Ssmaybe int ret = 1; 19555529Ssmaybe 19565529Ssmaybe mutex_enter(&contig_list_lock); 19575084Sjohnlev if (contig_pfn_list != NULL) 19585529Ssmaybe goto out; 19595084Sjohnlev contig_pfn_max = freemem + (freemem / 10); 19605084Sjohnlev contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t), 19615084Sjohnlev (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP); 19625084Sjohnlev if (contig_pfn_list == NULL) { 19635084Sjohnlev /* 19645084Sjohnlev * If we could not create the contig list (because 19655084Sjohnlev * we could not sleep for memory). Dispatch a taskq that can 19665084Sjohnlev * sleep to get the memory. 19675084Sjohnlev */ 19685084Sjohnlev if (!create_contig_pending) { 19695084Sjohnlev if (taskq_dispatch(system_taskq, call_create_contiglist, 19705084Sjohnlev NULL, TQ_NOSLEEP) != NULL) 19715084Sjohnlev create_contig_pending = 1; 19725084Sjohnlev } 19735084Sjohnlev contig_pfnlist_buildfailed++; /* count list build failures */ 19745529Ssmaybe ret = 0; 19755529Ssmaybe goto out; 19765084Sjohnlev } 19775529Ssmaybe create_contig_pending = 0; 19785084Sjohnlev ASSERT(contig_pfn_cnt == 0); 19795084Sjohnlev for (pfn = 0; pfn < mfn_count; pfn++) { 19805084Sjohnlev pp = page_numtopp_nolock(pfn); 19815084Sjohnlev if (pp == NULL || !PP_ISFREE(pp)) 19825084Sjohnlev continue; 19835084Sjohnlev contig_pfn_list[contig_pfn_cnt] = pfn; 19845084Sjohnlev if (++contig_pfn_cnt == contig_pfn_max) 19855084Sjohnlev break; 19865084Sjohnlev } 19875084Sjohnlev qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare); 19885084Sjohnlev compact_contig_pfn_list(); 19895084Sjohnlev /* 19905084Sjohnlev * Make sure next search of the newly created contiguous pfn 19915084Sjohnlev * list starts at the beginning of the list. 19925084Sjohnlev */ 19935084Sjohnlev next_alloc_pfn = 0; 19945084Sjohnlev contig_pfnlist_builds++; /* count list builds */ 19955529Ssmaybe out: 19965529Ssmaybe mutex_exit(&contig_list_lock); 19975529Ssmaybe return (ret); 19985084Sjohnlev } 19995084Sjohnlev 20005084Sjohnlev 20015084Sjohnlev /* 20025084Sjohnlev * Toss the current contig pfnlist. Someone is about to do a massive 20035084Sjohnlev * update to pfn<->mfn mappings. So we have them destroy the list and lock 20045084Sjohnlev * it till they are done with their update. 20055084Sjohnlev */ 20065084Sjohnlev void 20075084Sjohnlev clear_and_lock_contig_pfnlist() 20085084Sjohnlev { 20095084Sjohnlev pfn_t *listp = NULL; 20105084Sjohnlev size_t listsize; 20115084Sjohnlev 20125529Ssmaybe mutex_enter(&contig_list_lock); 20135084Sjohnlev if (contig_pfn_list != NULL) { 20145084Sjohnlev listp = contig_pfn_list; 20155084Sjohnlev listsize = contig_pfn_max * sizeof (pfn_t); 20165084Sjohnlev contig_pfn_list = NULL; 20175084Sjohnlev contig_pfn_max = contig_pfn_cnt = 0; 20185084Sjohnlev } 20195084Sjohnlev if (listp != NULL) 20205084Sjohnlev kmem_free(listp, listsize); 20215084Sjohnlev } 20225084Sjohnlev 20235084Sjohnlev /* 20245084Sjohnlev * Unlock the contig_pfn_list. The next attempted use of it will cause 20255084Sjohnlev * it to be re-created. 20265084Sjohnlev */ 20275084Sjohnlev void 20285084Sjohnlev unlock_contig_pfnlist() 20295084Sjohnlev { 20305529Ssmaybe mutex_exit(&contig_list_lock); 20315084Sjohnlev } 20325084Sjohnlev 20335084Sjohnlev /* 20345084Sjohnlev * Update the contiguous pfn list in response to a pfn <-> mfn reassignment 20355084Sjohnlev */ 20365084Sjohnlev void 20375084Sjohnlev update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn) 20385084Sjohnlev { 20395084Sjohnlev int probe_hi, probe_lo, probe_pos, insert_after, insert_point; 20405084Sjohnlev pfn_t probe_pfn; 20415084Sjohnlev mfn_t probe_mfn; 20425529Ssmaybe int drop_lock = 0; 20435529Ssmaybe 20445529Ssmaybe if (mutex_owner(&contig_list_lock) != curthread) { 20455529Ssmaybe drop_lock = 1; 20465529Ssmaybe mutex_enter(&contig_list_lock); 20475529Ssmaybe } 20485084Sjohnlev if (contig_pfn_list == NULL) 20495529Ssmaybe goto done; 20505084Sjohnlev contig_pfnlist_updates++; 20515084Sjohnlev /* 20525084Sjohnlev * Find the pfn in the current list. Use a binary chop to locate it. 20535084Sjohnlev */ 20545084Sjohnlev probe_hi = contig_pfn_cnt - 1; 20555084Sjohnlev probe_lo = 0; 20565084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20575084Sjohnlev while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) { 20585084Sjohnlev if (probe_pos == probe_lo) { /* pfn not in list */ 20595084Sjohnlev probe_pos = -1; 20605084Sjohnlev break; 20615084Sjohnlev } 20625084Sjohnlev if (pfn_to_mfn(probe_pfn) <= oldmfn) 20635084Sjohnlev probe_lo = probe_pos; 20645084Sjohnlev else 20655084Sjohnlev probe_hi = probe_pos; 20665084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20675084Sjohnlev } 20685084Sjohnlev if (probe_pos >= 0) { /* remove pfn fom list */ 20695084Sjohnlev contig_pfn_cnt--; 20705084Sjohnlev ovbcopy(&contig_pfn_list[probe_pos + 1], 20715084Sjohnlev &contig_pfn_list[probe_pos], 20725084Sjohnlev (contig_pfn_cnt - probe_pos) * sizeof (pfn_t)); 20735084Sjohnlev } 20745084Sjohnlev if (newmfn == MFN_INVALID) 20755084Sjohnlev goto done; 20765084Sjohnlev /* 20775084Sjohnlev * Check if new mfn has adjacent mfns in the list 20785084Sjohnlev */ 20795084Sjohnlev probe_hi = contig_pfn_cnt - 1; 20805084Sjohnlev probe_lo = 0; 20815084Sjohnlev insert_after = -2; 20825084Sjohnlev do { 20835084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20845084Sjohnlev probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]); 20855084Sjohnlev if (newmfn == probe_mfn + 1) 20865084Sjohnlev insert_after = probe_pos; 20875084Sjohnlev else if (newmfn == probe_mfn - 1) 20885084Sjohnlev insert_after = probe_pos - 1; 20895084Sjohnlev if (probe_pos == probe_lo) 20905084Sjohnlev break; 20915084Sjohnlev if (probe_mfn <= newmfn) 20925084Sjohnlev probe_lo = probe_pos; 20935084Sjohnlev else 20945084Sjohnlev probe_hi = probe_pos; 20955084Sjohnlev } while (insert_after == -2); 20965084Sjohnlev /* 20975084Sjohnlev * If there is space in the list and there are adjacent mfns 20985084Sjohnlev * insert the pfn in to its proper place in the list. 20995084Sjohnlev */ 21005084Sjohnlev if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) { 21015084Sjohnlev insert_point = insert_after + 1; 21025084Sjohnlev ovbcopy(&contig_pfn_list[insert_point], 21035084Sjohnlev &contig_pfn_list[insert_point + 1], 21045084Sjohnlev (contig_pfn_cnt - insert_point) * sizeof (pfn_t)); 21055084Sjohnlev contig_pfn_list[insert_point] = pfn; 21065084Sjohnlev contig_pfn_cnt++; 21075084Sjohnlev } 21085084Sjohnlev done: 21095529Ssmaybe if (drop_lock) 21105529Ssmaybe mutex_exit(&contig_list_lock); 21115084Sjohnlev } 21125084Sjohnlev 21135084Sjohnlev /* 21145084Sjohnlev * Called to (re-)populate the io_pool from the free page lists. 21155084Sjohnlev */ 21165084Sjohnlev long 21175084Sjohnlev populate_io_pool(void) 21185084Sjohnlev { 21195084Sjohnlev pfn_t pfn; 21205084Sjohnlev mfn_t mfn, max_mfn; 21215084Sjohnlev page_t *pp; 21225084Sjohnlev 21235084Sjohnlev /* 21245084Sjohnlev * Figure out the bounds of the pool on first invocation. 21255084Sjohnlev * We use a percentage of memory for the io pool size. 21265084Sjohnlev * we allow that to shrink, but not to less than a fixed minimum 21275084Sjohnlev */ 21285084Sjohnlev if (io_pool_cnt_max == 0) { 21295084Sjohnlev io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct); 21305084Sjohnlev io_pool_cnt_lowater = io_pool_cnt_max; 21315084Sjohnlev /* 21325084Sjohnlev * This is the first time in populate_io_pool, grab a va to use 21335084Sjohnlev * when we need to allocate pages. 21345084Sjohnlev */ 21355084Sjohnlev io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 21365084Sjohnlev } 21375084Sjohnlev /* 21385084Sjohnlev * If we are out of pages in the pool, then grow the size of the pool 21395084Sjohnlev */ 21405084Sjohnlev if (io_pool_cnt == 0) 21415084Sjohnlev io_pool_cnt_max += io_pool_cnt_max / 20; /* grow by 5% */ 21425084Sjohnlev io_pool_grows++; /* should be a kstat? */ 21435084Sjohnlev 21445084Sjohnlev /* 21455084Sjohnlev * Get highest mfn on this platform, but limit to the 32 bit DMA max. 21465084Sjohnlev */ 21475084Sjohnlev (void) mfn_to_pfn(start_mfn); 21485084Sjohnlev max_mfn = MIN(cached_max_mfn, PFN_4GIG); 21495084Sjohnlev for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) { 21505084Sjohnlev pfn = mfn_to_pfn(mfn); 21515084Sjohnlev if (pfn & PFN_IS_FOREIGN_MFN) 21525084Sjohnlev continue; 21535084Sjohnlev /* 21545084Sjohnlev * try to allocate it from free pages 21555084Sjohnlev */ 21565084Sjohnlev pp = page_numtopp_alloc(pfn); 21575084Sjohnlev if (pp == NULL) 21585084Sjohnlev continue; 21595084Sjohnlev PP_CLRFREE(pp); 21605084Sjohnlev add_page_to_pool(pp, 1); 21615084Sjohnlev if (io_pool_cnt >= io_pool_cnt_max) 21625084Sjohnlev break; 21635084Sjohnlev } 21645084Sjohnlev 21655084Sjohnlev return (io_pool_cnt); 21665084Sjohnlev } 21675084Sjohnlev 21685084Sjohnlev /* 21695084Sjohnlev * Destroy a page that was being used for DMA I/O. It may or 21705084Sjohnlev * may not actually go back to the io_pool. 21715084Sjohnlev */ 21725084Sjohnlev void 21735084Sjohnlev page_destroy_io(page_t *pp) 21745084Sjohnlev { 21755084Sjohnlev mfn_t mfn = mfn_list[pp->p_pagenum]; 21765084Sjohnlev 21775084Sjohnlev /* 21785084Sjohnlev * When the page was alloc'd a reservation was made, release it now 21795084Sjohnlev */ 21805084Sjohnlev page_unresv(1); 21815084Sjohnlev /* 21825084Sjohnlev * Unload translations, if any, then hash out the 21835084Sjohnlev * page to erase its identity. 21845084Sjohnlev */ 21855084Sjohnlev (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 21865084Sjohnlev page_hashout(pp, NULL); 21875084Sjohnlev 21885084Sjohnlev /* 21895084Sjohnlev * If the page came from the free lists, just put it back to them. 21905084Sjohnlev * DomU pages always go on the free lists as well. 21915084Sjohnlev */ 21925084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) { 21935084Sjohnlev page_free(pp, 1); 21945084Sjohnlev return; 21955084Sjohnlev } 21965084Sjohnlev 21975084Sjohnlev add_page_to_pool(pp, 0); 21985084Sjohnlev } 21995084Sjohnlev 22005084Sjohnlev 22015084Sjohnlev long contig_searches; /* count of times contig pages requested */ 22025084Sjohnlev long contig_search_restarts; /* count of contig ranges tried */ 22035084Sjohnlev long contig_search_failed; /* count of contig alloc failures */ 22045084Sjohnlev 22055084Sjohnlev /* 22065084Sjohnlev * Look thru the contiguous pfns that are not part of the io_pool for 22075084Sjohnlev * contiguous free pages. Return a list of the found pages or NULL. 22085084Sjohnlev */ 22095084Sjohnlev page_t * 22105084Sjohnlev find_contig_free(uint_t bytes, uint_t flags) 22115084Sjohnlev { 22125084Sjohnlev page_t *pp, *plist = NULL; 22135084Sjohnlev mfn_t mfn, prev_mfn; 22145084Sjohnlev pfn_t pfn; 22155084Sjohnlev int pages_needed, pages_requested; 22165084Sjohnlev int search_start; 22175084Sjohnlev 22185084Sjohnlev /* 22195084Sjohnlev * create the contig pfn list if not already done 22205084Sjohnlev */ 22215529Ssmaybe retry: 22225529Ssmaybe mutex_enter(&contig_list_lock); 22235084Sjohnlev if (contig_pfn_list == NULL) { 22245529Ssmaybe mutex_exit(&contig_list_lock); 22255529Ssmaybe if (!create_contig_pfnlist(flags)) { 22265084Sjohnlev return (NULL); 22275084Sjohnlev } 22285529Ssmaybe goto retry; 22295084Sjohnlev } 22305084Sjohnlev contig_searches++; 22315084Sjohnlev /* 22325084Sjohnlev * Search contiguous pfn list for physically contiguous pages not in 22335084Sjohnlev * the io_pool. Start the search where the last search left off. 22345084Sjohnlev */ 22355084Sjohnlev pages_requested = pages_needed = mmu_btop(bytes); 22365084Sjohnlev search_start = next_alloc_pfn; 22375084Sjohnlev prev_mfn = 0; 22385084Sjohnlev while (pages_needed) { 22395084Sjohnlev pfn = contig_pfn_list[next_alloc_pfn]; 22405084Sjohnlev mfn = pfn_to_mfn(pfn); 22415084Sjohnlev if ((prev_mfn == 0 || mfn == prev_mfn + 1) && 22425084Sjohnlev (pp = page_numtopp_alloc(pfn)) != NULL) { 22435084Sjohnlev PP_CLRFREE(pp); 22445084Sjohnlev page_io_pool_add(&plist, pp); 22455084Sjohnlev pages_needed--; 22465084Sjohnlev prev_mfn = mfn; 22475084Sjohnlev } else { 22485084Sjohnlev contig_search_restarts++; 22495084Sjohnlev /* 22505084Sjohnlev * free partial page list 22515084Sjohnlev */ 22525084Sjohnlev while (plist != NULL) { 22535084Sjohnlev pp = plist; 22545084Sjohnlev page_io_pool_sub(&plist, pp, pp); 22555084Sjohnlev page_free(pp, 1); 22565084Sjohnlev } 22575084Sjohnlev pages_needed = pages_requested; 22585084Sjohnlev prev_mfn = 0; 22595084Sjohnlev } 22605084Sjohnlev if (++next_alloc_pfn == contig_pfn_cnt) 22615084Sjohnlev next_alloc_pfn = 0; 22625084Sjohnlev if (next_alloc_pfn == search_start) 22635084Sjohnlev break; /* all pfns searched */ 22645084Sjohnlev } 22655529Ssmaybe mutex_exit(&contig_list_lock); 22665084Sjohnlev if (pages_needed) { 22675084Sjohnlev contig_search_failed++; 22685084Sjohnlev /* 22695084Sjohnlev * Failed to find enough contig pages. 22705084Sjohnlev * free partial page list 22715084Sjohnlev */ 22725084Sjohnlev while (plist != NULL) { 22735084Sjohnlev pp = plist; 22745084Sjohnlev page_io_pool_sub(&plist, pp, pp); 22755084Sjohnlev page_free(pp, 1); 22765084Sjohnlev } 22775084Sjohnlev } 22785084Sjohnlev return (plist); 22795084Sjohnlev } 22805084Sjohnlev 22815084Sjohnlev /* 22825084Sjohnlev * Allocator for domain 0 I/O pages. We match the required 22835084Sjohnlev * DMA attributes and contiguity constraints. 22845084Sjohnlev */ 22855084Sjohnlev /*ARGSUSED*/ 22865084Sjohnlev page_t * 22875084Sjohnlev page_create_io( 22885084Sjohnlev struct vnode *vp, 22895084Sjohnlev u_offset_t off, 22905084Sjohnlev uint_t bytes, 22915084Sjohnlev uint_t flags, 22925084Sjohnlev struct as *as, 22935084Sjohnlev caddr_t vaddr, 22945084Sjohnlev ddi_dma_attr_t *mattr) 22955084Sjohnlev { 22965084Sjohnlev mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); 22975084Sjohnlev page_t *pp_first; /* list to return */ 22985084Sjohnlev page_t *pp_last; /* last in list to return */ 22995084Sjohnlev page_t *pp, **poolp, **pplist = NULL, *expp; 23005084Sjohnlev int i, extpages = 0, npages = 0, contig, anyaddr, extra; 23015084Sjohnlev mfn_t lo_mfn; 23025084Sjohnlev mfn_t hi_mfn; 23035084Sjohnlev mfn_t mfn, tmfn; 23045084Sjohnlev mfn_t *mfnlist = 0; 23055084Sjohnlev pgcnt_t pfnalign = 0; 23065084Sjohnlev int align, order, nbits, extents; 23075084Sjohnlev uint64_t pfnseg; 23085084Sjohnlev int attempt = 0, is_domu = 0; 23095084Sjohnlev int asked_hypervisor = 0; 23105084Sjohnlev uint_t kflags; 23115084Sjohnlev 23125084Sjohnlev ASSERT(mattr != NULL); 23135084Sjohnlev lo_mfn = mmu_btop(mattr->dma_attr_addr_lo); 23145084Sjohnlev hi_mfn = mmu_btop(mattr->dma_attr_addr_hi); 23155084Sjohnlev align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 23165084Sjohnlev if (align > MMU_PAGESIZE) 23175084Sjohnlev pfnalign = mmu_btop(align); 23185084Sjohnlev pfnseg = mmu_btop(mattr->dma_attr_seg); 23195084Sjohnlev 23205084Sjohnlev /* 23215084Sjohnlev * Clear the contig flag if only one page is needed. 23225084Sjohnlev */ 23235084Sjohnlev contig = (flags & PG_PHYSCONTIG); 23245084Sjohnlev flags &= ~PG_PHYSCONTIG; 23255084Sjohnlev bytes = P2ROUNDUP(bytes, MMU_PAGESIZE); 23265084Sjohnlev if (bytes == MMU_PAGESIZE) 23275084Sjohnlev contig = 0; 23285084Sjohnlev 23295084Sjohnlev /* 23305084Sjohnlev * Check if any old page in the system is fine. 23315084Sjohnlev * DomU should always go down this path. 23325084Sjohnlev */ 23335084Sjohnlev is_domu = !DOMAIN_IS_INITDOMAIN(xen_info); 23345084Sjohnlev anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign; 23355084Sjohnlev if ((!contig && anyaddr) || is_domu) { 23365084Sjohnlev pp = page_create_va(vp, off, bytes, flags, &kvseg, vaddr); 23375084Sjohnlev if (pp) 23385084Sjohnlev return (pp); 23395084Sjohnlev else if (is_domu) 23405084Sjohnlev return (NULL); /* no memory available */ 23415084Sjohnlev } 23425084Sjohnlev /* 23435084Sjohnlev * DomU should never reach here 23445084Sjohnlev */ 23455084Sjohnlev try_again: 23465084Sjohnlev /* 23475084Sjohnlev * We could just want unconstrained but contig pages. 23485084Sjohnlev */ 23495084Sjohnlev if (anyaddr && contig && pfnseg >= max_mfn) { 23505084Sjohnlev /* 23515084Sjohnlev * Look for free contig pages to satisfy the request. 23525084Sjohnlev */ 23535084Sjohnlev pp_first = find_contig_free(bytes, flags); 23545084Sjohnlev if (pp_first != NULL) 23555084Sjohnlev goto done; 23565084Sjohnlev } 23575084Sjohnlev /* 23585084Sjohnlev * See if we want pages for a legacy device 23595084Sjohnlev */ 23605084Sjohnlev if (hi_mfn < PFN_16MEG) 23615084Sjohnlev poolp = &io_pool_16m; 23625084Sjohnlev else 23635084Sjohnlev poolp = &io_pool_4g; 23645084Sjohnlev try_smaller: 23655084Sjohnlev /* 23665084Sjohnlev * Take pages from I/O pool. We'll use pages from the highest MFN 23675084Sjohnlev * range possible. 23685084Sjohnlev */ 23695084Sjohnlev pp_first = pp_last = NULL; 23705084Sjohnlev npages = mmu_btop(bytes); 23715084Sjohnlev mutex_enter(&io_pool_lock); 23725084Sjohnlev for (pp = *poolp; pp && npages > 0; ) { 23735084Sjohnlev pp = pp->p_prev; 23745084Sjohnlev 23755084Sjohnlev /* 23765084Sjohnlev * skip pages above allowable range 23775084Sjohnlev */ 23785084Sjohnlev mfn = mfn_list[pp->p_pagenum]; 23795084Sjohnlev if (hi_mfn < mfn) 23805084Sjohnlev goto skip; 23815084Sjohnlev 23825084Sjohnlev /* 23835084Sjohnlev * stop at pages below allowable range 23845084Sjohnlev */ 23855084Sjohnlev if (lo_mfn > mfn) 23865084Sjohnlev break; 23875084Sjohnlev restart: 23885084Sjohnlev if (pp_last == NULL) { 23895084Sjohnlev /* 23905084Sjohnlev * Check alignment 23915084Sjohnlev */ 23925084Sjohnlev tmfn = mfn - (npages - 1); 23935084Sjohnlev if (pfnalign) { 23945084Sjohnlev if (tmfn != P2ROUNDUP(tmfn, pfnalign)) 23955084Sjohnlev goto skip; /* not properly aligned */ 23965084Sjohnlev } 23975084Sjohnlev /* 23985084Sjohnlev * Check segment 23995084Sjohnlev */ 24005084Sjohnlev if ((mfn & pfnseg) < (tmfn & pfnseg)) 24015084Sjohnlev goto skip; /* crosses segment boundary */ 24025084Sjohnlev /* 24035084Sjohnlev * Start building page list 24045084Sjohnlev */ 24055084Sjohnlev pp_first = pp_last = pp; 24065084Sjohnlev npages--; 24075084Sjohnlev } else { 24085084Sjohnlev /* 24095084Sjohnlev * check physical contiguity if required 24105084Sjohnlev */ 24115084Sjohnlev if (contig && 24125084Sjohnlev mfn_list[pp_first->p_pagenum] != mfn + 1) { 24135084Sjohnlev /* 24145084Sjohnlev * not a contiguous page, restart list. 24155084Sjohnlev */ 24165084Sjohnlev pp_last = NULL; 24175084Sjohnlev npages = mmu_btop(bytes); 24185084Sjohnlev goto restart; 24195084Sjohnlev } else { /* add page to list */ 24205084Sjohnlev pp_first = pp; 24215084Sjohnlev --npages; 24225084Sjohnlev } 24235084Sjohnlev } 24245084Sjohnlev skip: 24255084Sjohnlev if (pp == *poolp) 24265084Sjohnlev break; 24275084Sjohnlev } 24285084Sjohnlev 24295084Sjohnlev /* 24305084Sjohnlev * If we didn't find memory. Try the more constrained pool, then 24315084Sjohnlev * sweep free pages into the DMA pool and try again. If we fail 24325084Sjohnlev * repeatedly, ask the Hypervisor for help. 24335084Sjohnlev */ 24345084Sjohnlev if (npages != 0) { 24355084Sjohnlev mutex_exit(&io_pool_lock); 24365084Sjohnlev /* 24375084Sjohnlev * If we were looking in the less constrained pool and didn't 24385084Sjohnlev * find pages, try the more constrained pool. 24395084Sjohnlev */ 24405084Sjohnlev if (poolp == &io_pool_4g) { 24415084Sjohnlev poolp = &io_pool_16m; 24425084Sjohnlev goto try_smaller; 24435084Sjohnlev } 24445084Sjohnlev kmem_reap(); 24455084Sjohnlev if (++attempt < 4) { 24465084Sjohnlev /* 24475084Sjohnlev * Grab some more io_pool pages 24485084Sjohnlev */ 24495084Sjohnlev (void) populate_io_pool(); 24505084Sjohnlev goto try_again; 24515084Sjohnlev } 24525084Sjohnlev 24535084Sjohnlev if (asked_hypervisor++) 24545084Sjohnlev return (NULL); /* really out of luck */ 24555084Sjohnlev /* 24565084Sjohnlev * Hypervisor exchange doesn't handle segment or alignment 24575084Sjohnlev * constraints 24585084Sjohnlev */ 24595084Sjohnlev if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi || pfnalign) 24605084Sjohnlev return (NULL); 24615084Sjohnlev /* 24625084Sjohnlev * Try exchanging pages with the hypervisor. 24635084Sjohnlev */ 24645084Sjohnlev npages = mmu_btop(bytes); 24655084Sjohnlev kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP; 24665084Sjohnlev /* 24675084Sjohnlev * Hypervisor will allocate extents, if we want contig pages 24685084Sjohnlev * extent must be >= npages 24695084Sjohnlev */ 24705084Sjohnlev if (contig) { 24715084Sjohnlev order = highbit(npages) - 1; 24725084Sjohnlev if (npages & ((1 << order) - 1)) 24735084Sjohnlev order++; 24745084Sjohnlev extpages = 1 << order; 24755084Sjohnlev } else { 24765084Sjohnlev order = 0; 24775084Sjohnlev extpages = npages; 24785084Sjohnlev } 24795084Sjohnlev if (extpages > npages) { 24805084Sjohnlev extra = extpages - npages; 24815084Sjohnlev if (!page_resv(extra, kflags)) 24825084Sjohnlev return (NULL); 24835084Sjohnlev } 24845084Sjohnlev pplist = kmem_alloc(extpages * sizeof (page_t *), kflags); 24855084Sjohnlev if (pplist == NULL) 24865084Sjohnlev goto fail; 24875084Sjohnlev mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags); 24885084Sjohnlev if (mfnlist == NULL) 24895084Sjohnlev goto fail; 24905084Sjohnlev pp = page_create_va(vp, off, npages * PAGESIZE, flags, 24915084Sjohnlev &kvseg, vaddr); 24925084Sjohnlev if (pp == NULL) 24935084Sjohnlev goto fail; 24945084Sjohnlev pp_first = pp; 24955084Sjohnlev if (extpages > npages) { 24965084Sjohnlev /* 24975084Sjohnlev * fill out the rest of extent pages to swap with the 24985084Sjohnlev * hypervisor 24995084Sjohnlev */ 25005084Sjohnlev for (i = 0; i < extra; i++) { 25015084Sjohnlev expp = page_create_va(vp, 25025084Sjohnlev (u_offset_t)(uintptr_t)io_pool_kva, 25035084Sjohnlev PAGESIZE, flags, &kvseg, io_pool_kva); 25045084Sjohnlev if (expp == NULL) 25055084Sjohnlev goto balloon_fail; 25065084Sjohnlev (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD); 25075084Sjohnlev page_io_unlock(expp); 25085084Sjohnlev page_hashout(expp, NULL); 25095084Sjohnlev page_io_lock(expp); 25105084Sjohnlev /* 25115084Sjohnlev * add page to end of list 25125084Sjohnlev */ 25135084Sjohnlev expp->p_prev = pp_first->p_prev; 25145084Sjohnlev expp->p_next = pp_first; 25155084Sjohnlev expp->p_prev->p_next = expp; 25165084Sjohnlev pp_first->p_prev = expp; 25175084Sjohnlev } 25185084Sjohnlev 25195084Sjohnlev } 25205084Sjohnlev for (i = 0; i < extpages; i++) { 25215084Sjohnlev pplist[i] = pp; 25225084Sjohnlev pp = pp->p_next; 25235084Sjohnlev } 25245084Sjohnlev nbits = highbit(mattr->dma_attr_addr_hi); 25255084Sjohnlev extents = contig ? 1 : npages; 25265084Sjohnlev if (balloon_replace_pages(extents, pplist, nbits, order, 25275529Ssmaybe mfnlist) != extents) { 25285529Ssmaybe if (ioalloc_dbg) 25295529Ssmaybe cmn_err(CE_NOTE, "request to hypervisor for" 25305529Ssmaybe " %d pages, maxaddr %" PRIx64 " failed", 25315529Ssmaybe extpages, mattr->dma_attr_addr_hi); 25325084Sjohnlev goto balloon_fail; 25335529Ssmaybe } 25345084Sjohnlev 25355084Sjohnlev kmem_free(pplist, extpages * sizeof (page_t *)); 25365084Sjohnlev kmem_free(mfnlist, extpages * sizeof (mfn_t)); 25375084Sjohnlev /* 25385084Sjohnlev * Return any excess pages to free list 25395084Sjohnlev */ 25405084Sjohnlev if (extpages > npages) { 25415084Sjohnlev for (i = 0; i < extra; i++) { 25425084Sjohnlev pp = pp_first->p_prev; 25435084Sjohnlev page_sub(&pp_first, pp); 25445084Sjohnlev page_io_unlock(pp); 25455084Sjohnlev page_unresv(1); 25465084Sjohnlev page_free(pp, 1); 25475084Sjohnlev } 25485084Sjohnlev } 25495084Sjohnlev check_dma(mattr, pp_first, mmu_btop(bytes)); 25505084Sjohnlev return (pp_first); 25515084Sjohnlev } 25525084Sjohnlev 25535084Sjohnlev /* 25545084Sjohnlev * Found the pages, now snip them from the list 25555084Sjohnlev */ 25565084Sjohnlev page_io_pool_sub(poolp, pp_first, pp_last); 25575084Sjohnlev io_pool_cnt -= mmu_btop(bytes); 25585084Sjohnlev if (io_pool_cnt < io_pool_cnt_lowater) 25595084Sjohnlev io_pool_cnt_lowater = io_pool_cnt; /* io pool low water mark */ 25605084Sjohnlev mutex_exit(&io_pool_lock); 25615084Sjohnlev done: 25625084Sjohnlev check_dma(mattr, pp_first, mmu_btop(bytes)); 25635084Sjohnlev pp = pp_first; 25645084Sjohnlev do { 25655084Sjohnlev if (!page_hashin(pp, vp, off, NULL)) { 25665084Sjohnlev panic("pg_create_io: hashin failed pp %p, vp %p," 25675084Sjohnlev " off %llx", 25685084Sjohnlev (void *)pp, (void *)vp, off); 25695084Sjohnlev } 25705084Sjohnlev off += MMU_PAGESIZE; 25715084Sjohnlev PP_CLRFREE(pp); 25725084Sjohnlev PP_CLRAGED(pp); 25735084Sjohnlev page_set_props(pp, P_REF); 25745084Sjohnlev page_io_lock(pp); 25755084Sjohnlev pp = pp->p_next; 25765084Sjohnlev } while (pp != pp_first); 25775084Sjohnlev return (pp_first); 25785084Sjohnlev balloon_fail: 25795084Sjohnlev /* 25805084Sjohnlev * Return pages to free list and return failure 25815084Sjohnlev */ 25825084Sjohnlev while (pp_first != NULL) { 25835084Sjohnlev pp = pp_first; 25845084Sjohnlev page_sub(&pp_first, pp); 25855084Sjohnlev page_io_unlock(pp); 25865084Sjohnlev if (pp->p_vnode != NULL) 25875084Sjohnlev page_hashout(pp, NULL); 25885084Sjohnlev page_free(pp, 1); 25895084Sjohnlev } 25905084Sjohnlev fail: 25915084Sjohnlev if (pplist) 25925084Sjohnlev kmem_free(pplist, extpages * sizeof (page_t *)); 25935084Sjohnlev if (mfnlist) 25945084Sjohnlev kmem_free(mfnlist, extpages * sizeof (mfn_t)); 25955084Sjohnlev page_unresv(extpages - npages); 25965084Sjohnlev return (NULL); 25975084Sjohnlev } 25985084Sjohnlev 25995084Sjohnlev /* 26005084Sjohnlev * Lock and return the page with the highest mfn that we can find. last_mfn 26015084Sjohnlev * holds the last one found, so the next search can start from there. We 26025084Sjohnlev * also keep a counter so that we don't loop forever if the machine has no 26035084Sjohnlev * free pages. 26045084Sjohnlev * 26055084Sjohnlev * This is called from the balloon thread to find pages to give away. new_high 26065084Sjohnlev * is used when new mfn's have been added to the system - we will reset our 26075084Sjohnlev * search if the new mfn's are higher than our current search position. 26085084Sjohnlev */ 26095084Sjohnlev page_t * 26105084Sjohnlev page_get_high_mfn(mfn_t new_high) 26115084Sjohnlev { 26125084Sjohnlev static mfn_t last_mfn = 0; 26135084Sjohnlev pfn_t pfn; 26145084Sjohnlev page_t *pp; 26155084Sjohnlev ulong_t loop_count = 0; 26165084Sjohnlev 26175084Sjohnlev if (new_high > last_mfn) 26185084Sjohnlev last_mfn = new_high; 26195084Sjohnlev 26205084Sjohnlev for (; loop_count < mfn_count; loop_count++, last_mfn--) { 26215084Sjohnlev if (last_mfn == 0) { 26225084Sjohnlev last_mfn = cached_max_mfn; 26235084Sjohnlev } 26245084Sjohnlev 26255084Sjohnlev pfn = mfn_to_pfn(last_mfn); 26265084Sjohnlev if (pfn & PFN_IS_FOREIGN_MFN) 26275084Sjohnlev continue; 26285084Sjohnlev 26295084Sjohnlev /* See if the page is free. If so, lock it. */ 26305084Sjohnlev pp = page_numtopp_alloc(pfn); 26315084Sjohnlev if (pp == NULL) 26325084Sjohnlev continue; 26335084Sjohnlev PP_CLRFREE(pp); 26345084Sjohnlev 26355084Sjohnlev ASSERT(PAGE_EXCL(pp)); 26365084Sjohnlev ASSERT(pp->p_vnode == NULL); 26375084Sjohnlev ASSERT(!hat_page_is_mapped(pp)); 26385084Sjohnlev last_mfn--; 26395084Sjohnlev return (pp); 26405084Sjohnlev } 26415084Sjohnlev return (NULL); 26425084Sjohnlev } 26435084Sjohnlev 26445084Sjohnlev #else /* !__xpv */ 26455084Sjohnlev 26460Sstevel@tonic-gate /* 26470Sstevel@tonic-gate * get a page from any list with the given mnode 26480Sstevel@tonic-gate */ 26495084Sjohnlev static page_t * 26500Sstevel@tonic-gate page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags, 26510Sstevel@tonic-gate int mnode, int mtype, ddi_dma_attr_t *dma_attr) 26520Sstevel@tonic-gate { 26532961Sdp78419 kmutex_t *pcm; 26542961Sdp78419 int i; 26552961Sdp78419 page_t *pp; 26562961Sdp78419 page_t *first_pp; 26572961Sdp78419 uint64_t pgaddr; 26582961Sdp78419 ulong_t bin; 26592961Sdp78419 int mtypestart; 26602961Sdp78419 int plw_initialized; 26612961Sdp78419 page_list_walker_t plw; 26620Sstevel@tonic-gate 26630Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_alloc); 26640Sstevel@tonic-gate 26650Sstevel@tonic-gate ASSERT((flags & PG_MATCH_COLOR) == 0); 26660Sstevel@tonic-gate ASSERT(szc == 0); 26670Sstevel@tonic-gate ASSERT(dma_attr != NULL); 26680Sstevel@tonic-gate 26690Sstevel@tonic-gate MTYPE_START(mnode, mtype, flags); 26700Sstevel@tonic-gate if (mtype < 0) { 26710Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocempty); 26720Sstevel@tonic-gate return (NULL); 26730Sstevel@tonic-gate } 26740Sstevel@tonic-gate 26750Sstevel@tonic-gate mtypestart = mtype; 26760Sstevel@tonic-gate 26770Sstevel@tonic-gate bin = origbin; 26780Sstevel@tonic-gate 26790Sstevel@tonic-gate /* 26800Sstevel@tonic-gate * check up to page_colors + 1 bins - origbin may be checked twice 26810Sstevel@tonic-gate * because of BIN_STEP skip 26820Sstevel@tonic-gate */ 26830Sstevel@tonic-gate do { 26842961Sdp78419 plw_initialized = 0; 26852961Sdp78419 26862961Sdp78419 for (plw.plw_count = 0; 26872961Sdp78419 plw.plw_count < page_colors; plw.plw_count++) { 26882961Sdp78419 26890Sstevel@tonic-gate if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL) 26900Sstevel@tonic-gate goto nextfreebin; 26910Sstevel@tonic-gate 26920Sstevel@tonic-gate pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 26930Sstevel@tonic-gate mutex_enter(pcm); 26940Sstevel@tonic-gate pp = PAGE_FREELISTS(mnode, szc, bin, mtype); 26950Sstevel@tonic-gate first_pp = pp; 26960Sstevel@tonic-gate while (pp != NULL) { 26970Sstevel@tonic-gate if (page_trylock(pp, SE_EXCL) == 0) { 26980Sstevel@tonic-gate pp = pp->p_next; 26990Sstevel@tonic-gate if (pp == first_pp) { 27000Sstevel@tonic-gate pp = NULL; 27010Sstevel@tonic-gate } 27020Sstevel@tonic-gate continue; 27030Sstevel@tonic-gate } 27040Sstevel@tonic-gate 27050Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 27060Sstevel@tonic-gate ASSERT(PP_ISAGED(pp)); 27070Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 27080Sstevel@tonic-gate ASSERT(pp->p_hash == NULL); 27090Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 27100Sstevel@tonic-gate ASSERT(pp->p_szc == szc); 27110Sstevel@tonic-gate ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 27120Sstevel@tonic-gate /* check if page within DMA attributes */ 27133446Smrj pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 27140Sstevel@tonic-gate if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 27150Sstevel@tonic-gate (pgaddr + MMU_PAGESIZE - 1 <= 27160Sstevel@tonic-gate dma_attr->dma_attr_addr_hi)) { 27170Sstevel@tonic-gate break; 27180Sstevel@tonic-gate } 27190Sstevel@tonic-gate 27200Sstevel@tonic-gate /* continue looking */ 27210Sstevel@tonic-gate page_unlock(pp); 27220Sstevel@tonic-gate pp = pp->p_next; 27230Sstevel@tonic-gate if (pp == first_pp) 27240Sstevel@tonic-gate pp = NULL; 27250Sstevel@tonic-gate 27260Sstevel@tonic-gate } 27270Sstevel@tonic-gate if (pp != NULL) { 27280Sstevel@tonic-gate ASSERT(mtype == PP_2_MTYPE(pp)); 27290Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 27300Sstevel@tonic-gate 27310Sstevel@tonic-gate /* found a page with specified DMA attributes */ 27320Sstevel@tonic-gate page_sub(&PAGE_FREELISTS(mnode, szc, bin, 27330Sstevel@tonic-gate mtype), pp); 2734414Skchow page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST); 27350Sstevel@tonic-gate 27360Sstevel@tonic-gate if ((PP_ISFREE(pp) == 0) || 27370Sstevel@tonic-gate (PP_ISAGED(pp) == 0)) { 27380Sstevel@tonic-gate cmn_err(CE_PANIC, "page %p is not free", 27390Sstevel@tonic-gate (void *)pp); 27400Sstevel@tonic-gate } 27410Sstevel@tonic-gate 27420Sstevel@tonic-gate mutex_exit(pcm); 27430Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 27440Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocok); 27450Sstevel@tonic-gate return (pp); 27460Sstevel@tonic-gate } 27470Sstevel@tonic-gate mutex_exit(pcm); 27480Sstevel@tonic-gate nextfreebin: 27492961Sdp78419 if (plw_initialized == 0) { 27502961Sdp78419 page_list_walk_init(szc, 0, bin, 1, 0, &plw); 27512961Sdp78419 ASSERT(plw.plw_ceq_dif == page_colors); 27522961Sdp78419 plw_initialized = 1; 27532961Sdp78419 } 27540Sstevel@tonic-gate 27552961Sdp78419 if (plw.plw_do_split) { 27562961Sdp78419 pp = page_freelist_split(szc, bin, mnode, 27572961Sdp78419 mtype, 27582961Sdp78419 mmu_btop(dma_attr->dma_attr_addr_hi + 1), 27592961Sdp78419 &plw); 27602961Sdp78419 if (pp != NULL) 27612961Sdp78419 return (pp); 27622961Sdp78419 } 27632961Sdp78419 27642961Sdp78419 bin = page_list_walk_next_bin(szc, bin, &plw); 27650Sstevel@tonic-gate } 27662961Sdp78419 2767414Skchow MTYPE_NEXT(mnode, mtype, flags); 2768414Skchow } while (mtype >= 0); 27690Sstevel@tonic-gate 27700Sstevel@tonic-gate /* failed to find a page in the freelist; try it in the cachelist */ 27710Sstevel@tonic-gate 27720Sstevel@tonic-gate /* reset mtype start for cachelist search */ 27730Sstevel@tonic-gate mtype = mtypestart; 27740Sstevel@tonic-gate ASSERT(mtype >= 0); 27750Sstevel@tonic-gate 27760Sstevel@tonic-gate /* start with the bin of matching color */ 27770Sstevel@tonic-gate bin = origbin; 27780Sstevel@tonic-gate 27790Sstevel@tonic-gate do { 27800Sstevel@tonic-gate for (i = 0; i <= page_colors; i++) { 27810Sstevel@tonic-gate if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL) 27820Sstevel@tonic-gate goto nextcachebin; 27830Sstevel@tonic-gate pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST); 27840Sstevel@tonic-gate mutex_enter(pcm); 27850Sstevel@tonic-gate pp = PAGE_CACHELISTS(mnode, bin, mtype); 27860Sstevel@tonic-gate first_pp = pp; 27870Sstevel@tonic-gate while (pp != NULL) { 27880Sstevel@tonic-gate if (page_trylock(pp, SE_EXCL) == 0) { 27890Sstevel@tonic-gate pp = pp->p_next; 27900Sstevel@tonic-gate if (pp == first_pp) 27910Sstevel@tonic-gate break; 27920Sstevel@tonic-gate continue; 27930Sstevel@tonic-gate } 27940Sstevel@tonic-gate ASSERT(pp->p_vnode); 27950Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 27960Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 27970Sstevel@tonic-gate ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate /* check if page within DMA attributes */ 28000Sstevel@tonic-gate 28013446Smrj pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 28020Sstevel@tonic-gate if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 28030Sstevel@tonic-gate (pgaddr + MMU_PAGESIZE - 1 <= 28040Sstevel@tonic-gate dma_attr->dma_attr_addr_hi)) { 28050Sstevel@tonic-gate break; 28060Sstevel@tonic-gate } 28070Sstevel@tonic-gate 28080Sstevel@tonic-gate /* continue looking */ 28090Sstevel@tonic-gate page_unlock(pp); 28100Sstevel@tonic-gate pp = pp->p_next; 28110Sstevel@tonic-gate if (pp == first_pp) 28120Sstevel@tonic-gate pp = NULL; 28130Sstevel@tonic-gate } 28140Sstevel@tonic-gate 28150Sstevel@tonic-gate if (pp != NULL) { 28160Sstevel@tonic-gate ASSERT(mtype == PP_2_MTYPE(pp)); 28170Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 28180Sstevel@tonic-gate 28190Sstevel@tonic-gate /* found a page with specified DMA attributes */ 28200Sstevel@tonic-gate page_sub(&PAGE_CACHELISTS(mnode, bin, 28210Sstevel@tonic-gate mtype), pp); 2822414Skchow page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST); 28230Sstevel@tonic-gate 28240Sstevel@tonic-gate mutex_exit(pcm); 28250Sstevel@tonic-gate ASSERT(pp->p_vnode); 28260Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 28270Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 28280Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocok); 28290Sstevel@tonic-gate return (pp); 28300Sstevel@tonic-gate } 28310Sstevel@tonic-gate mutex_exit(pcm); 28320Sstevel@tonic-gate nextcachebin: 28330Sstevel@tonic-gate bin += (i == 0) ? BIN_STEP : 1; 28340Sstevel@tonic-gate bin &= page_colors_mask; 28350Sstevel@tonic-gate } 2836414Skchow MTYPE_NEXT(mnode, mtype, flags); 2837414Skchow } while (mtype >= 0); 28380Sstevel@tonic-gate 28390Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocfailed); 28400Sstevel@tonic-gate return (NULL); 28410Sstevel@tonic-gate } 28420Sstevel@tonic-gate 28430Sstevel@tonic-gate /* 28440Sstevel@tonic-gate * This function is similar to page_get_freelist()/page_get_cachelist() 28450Sstevel@tonic-gate * but it searches both the lists to find a page with the specified 28460Sstevel@tonic-gate * color (or no color) and DMA attributes. The search is done in the 28470Sstevel@tonic-gate * freelist first and then in the cache list within the highest memory 28480Sstevel@tonic-gate * range (based on DMA attributes) before searching in the lower 28490Sstevel@tonic-gate * memory ranges. 28500Sstevel@tonic-gate * 28510Sstevel@tonic-gate * Note: This function is called only by page_create_io(). 28520Sstevel@tonic-gate */ 28530Sstevel@tonic-gate /*ARGSUSED*/ 28545084Sjohnlev static page_t * 28550Sstevel@tonic-gate page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr, 28560Sstevel@tonic-gate size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp) 28570Sstevel@tonic-gate { 28580Sstevel@tonic-gate uint_t bin; 28590Sstevel@tonic-gate int mtype; 28600Sstevel@tonic-gate page_t *pp; 28610Sstevel@tonic-gate int n; 28620Sstevel@tonic-gate int m; 28630Sstevel@tonic-gate int szc; 28640Sstevel@tonic-gate int fullrange; 28650Sstevel@tonic-gate int mnode; 28660Sstevel@tonic-gate int local_failed_stat = 0; 28670Sstevel@tonic-gate lgrp_mnode_cookie_t lgrp_cookie; 28680Sstevel@tonic-gate 28690Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_alloc); 28700Sstevel@tonic-gate 28710Sstevel@tonic-gate /* only base pagesize currently supported */ 28720Sstevel@tonic-gate if (size != MMU_PAGESIZE) 28730Sstevel@tonic-gate return (NULL); 28740Sstevel@tonic-gate 28750Sstevel@tonic-gate /* 28760Sstevel@tonic-gate * If we're passed a specific lgroup, we use it. Otherwise, 28770Sstevel@tonic-gate * assume first-touch placement is desired. 28780Sstevel@tonic-gate */ 28790Sstevel@tonic-gate if (!LGRP_EXISTS(lgrp)) 28800Sstevel@tonic-gate lgrp = lgrp_home_lgrp(); 28810Sstevel@tonic-gate 28820Sstevel@tonic-gate /* LINTED */ 28832961Sdp78419 AS_2_BIN(as, seg, vp, vaddr, bin, 0); 28840Sstevel@tonic-gate 28850Sstevel@tonic-gate /* 28860Sstevel@tonic-gate * Only hold one freelist or cachelist lock at a time, that way we 28870Sstevel@tonic-gate * can start anywhere and not have to worry about lock 28880Sstevel@tonic-gate * ordering. 28890Sstevel@tonic-gate */ 28900Sstevel@tonic-gate if (dma_attr == NULL) { 28910Sstevel@tonic-gate n = 0; 28920Sstevel@tonic-gate m = mnoderangecnt - 1; 28930Sstevel@tonic-gate fullrange = 1; 28940Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_nulldmaattr); 28950Sstevel@tonic-gate } else { 28960Sstevel@tonic-gate pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo); 28970Sstevel@tonic-gate pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi); 28980Sstevel@tonic-gate 28990Sstevel@tonic-gate /* 29000Sstevel@tonic-gate * We can guarantee alignment only for page boundary. 29010Sstevel@tonic-gate */ 29020Sstevel@tonic-gate if (dma_attr->dma_attr_align > MMU_PAGESIZE) 29030Sstevel@tonic-gate return (NULL); 29040Sstevel@tonic-gate 29050Sstevel@tonic-gate n = pfn_2_mtype(pfnlo); 29060Sstevel@tonic-gate m = pfn_2_mtype(pfnhi); 29070Sstevel@tonic-gate 29080Sstevel@tonic-gate fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) && 29090Sstevel@tonic-gate (pfnhi >= mnoderanges[m].mnr_pfnhi)); 29100Sstevel@tonic-gate } 29110Sstevel@tonic-gate VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange); 29120Sstevel@tonic-gate 29130Sstevel@tonic-gate if (n > m) 29140Sstevel@tonic-gate return (NULL); 29150Sstevel@tonic-gate 29160Sstevel@tonic-gate szc = 0; 29170Sstevel@tonic-gate 29180Sstevel@tonic-gate /* cylcing thru mtype handled by RANGE0 if n == 0 */ 29190Sstevel@tonic-gate if (n == 0) { 29200Sstevel@tonic-gate flags |= PGI_MT_RANGE0; 29210Sstevel@tonic-gate n = m; 29220Sstevel@tonic-gate } 29230Sstevel@tonic-gate 29240Sstevel@tonic-gate /* 29250Sstevel@tonic-gate * Try local memory node first, but try remote if we can't 29260Sstevel@tonic-gate * get a page of the right color. 29270Sstevel@tonic-gate */ 29280Sstevel@tonic-gate LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER); 29290Sstevel@tonic-gate while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 29300Sstevel@tonic-gate /* 29310Sstevel@tonic-gate * allocate pages from high pfn to low. 29320Sstevel@tonic-gate */ 29330Sstevel@tonic-gate for (mtype = m; mtype >= n; mtype--) { 29340Sstevel@tonic-gate if (fullrange != 0) { 29350Sstevel@tonic-gate pp = page_get_mnode_freelist(mnode, 29360Sstevel@tonic-gate bin, mtype, szc, flags); 29370Sstevel@tonic-gate if (pp == NULL) { 29380Sstevel@tonic-gate pp = page_get_mnode_cachelist( 29395084Sjohnlev bin, flags, mnode, mtype); 29400Sstevel@tonic-gate } 29410Sstevel@tonic-gate } else { 29420Sstevel@tonic-gate pp = page_get_mnode_anylist(bin, szc, 29430Sstevel@tonic-gate flags, mnode, mtype, dma_attr); 29440Sstevel@tonic-gate } 29450Sstevel@tonic-gate if (pp != NULL) { 29460Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_allocok); 29470Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 29480Sstevel@tonic-gate return (pp); 29490Sstevel@tonic-gate } 29500Sstevel@tonic-gate } 29510Sstevel@tonic-gate if (!local_failed_stat) { 29520Sstevel@tonic-gate lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1); 29530Sstevel@tonic-gate local_failed_stat = 1; 29540Sstevel@tonic-gate } 29550Sstevel@tonic-gate } 29560Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_allocfailed); 29570Sstevel@tonic-gate 29580Sstevel@tonic-gate return (NULL); 29590Sstevel@tonic-gate } 29600Sstevel@tonic-gate 29610Sstevel@tonic-gate /* 29620Sstevel@tonic-gate * page_create_io() 29630Sstevel@tonic-gate * 29640Sstevel@tonic-gate * This function is a copy of page_create_va() with an additional 29650Sstevel@tonic-gate * argument 'mattr' that specifies DMA memory requirements to 29660Sstevel@tonic-gate * the page list functions. This function is used by the segkmem 29670Sstevel@tonic-gate * allocator so it is only to create new pages (i.e PG_EXCL is 29680Sstevel@tonic-gate * set). 29690Sstevel@tonic-gate * 29700Sstevel@tonic-gate * Note: This interface is currently used by x86 PSM only and is 29710Sstevel@tonic-gate * not fully specified so the commitment level is only for 29720Sstevel@tonic-gate * private interface specific to x86. This interface uses PSM 29730Sstevel@tonic-gate * specific page_get_anylist() interface. 29740Sstevel@tonic-gate */ 29750Sstevel@tonic-gate 29760Sstevel@tonic-gate #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 29770Sstevel@tonic-gate for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 29780Sstevel@tonic-gate if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 29790Sstevel@tonic-gate break; \ 29800Sstevel@tonic-gate } \ 29810Sstevel@tonic-gate } 29820Sstevel@tonic-gate 29830Sstevel@tonic-gate 29840Sstevel@tonic-gate page_t * 29850Sstevel@tonic-gate page_create_io( 29860Sstevel@tonic-gate struct vnode *vp, 29870Sstevel@tonic-gate u_offset_t off, 29880Sstevel@tonic-gate uint_t bytes, 29890Sstevel@tonic-gate uint_t flags, 29900Sstevel@tonic-gate struct as *as, 29910Sstevel@tonic-gate caddr_t vaddr, 29920Sstevel@tonic-gate ddi_dma_attr_t *mattr) /* DMA memory attributes if any */ 29930Sstevel@tonic-gate { 29940Sstevel@tonic-gate page_t *plist = NULL; 29950Sstevel@tonic-gate uint_t plist_len = 0; 29960Sstevel@tonic-gate pgcnt_t npages; 29970Sstevel@tonic-gate page_t *npp = NULL; 29980Sstevel@tonic-gate uint_t pages_req; 29990Sstevel@tonic-gate page_t *pp; 30000Sstevel@tonic-gate kmutex_t *phm = NULL; 30010Sstevel@tonic-gate uint_t index; 30020Sstevel@tonic-gate 30030Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 30045084Sjohnlev "page_create_start:vp %p off %llx bytes %u flags %x", 30055084Sjohnlev vp, off, bytes, flags); 30060Sstevel@tonic-gate 30070Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0); 30080Sstevel@tonic-gate 30090Sstevel@tonic-gate pages_req = npages = mmu_btopr(bytes); 30100Sstevel@tonic-gate 30110Sstevel@tonic-gate /* 30120Sstevel@tonic-gate * Do the freemem and pcf accounting. 30130Sstevel@tonic-gate */ 30140Sstevel@tonic-gate if (!page_create_wait(npages, flags)) { 30150Sstevel@tonic-gate return (NULL); 30160Sstevel@tonic-gate } 30170Sstevel@tonic-gate 30180Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 30195084Sjohnlev "page_create_success:vp %p off %llx", vp, off); 30200Sstevel@tonic-gate 30210Sstevel@tonic-gate /* 30220Sstevel@tonic-gate * If satisfying this request has left us with too little 30230Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 30240Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 30250Sstevel@tonic-gate * daemon in situations where it would decide that there's 30260Sstevel@tonic-gate * nothing to do. 30270Sstevel@tonic-gate */ 30280Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 30290Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 30305084Sjohnlev "pageout_cv_signal:freemem %ld", freemem); 30310Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 30320Sstevel@tonic-gate } 30330Sstevel@tonic-gate 30340Sstevel@tonic-gate if (flags & PG_PHYSCONTIG) { 30350Sstevel@tonic-gate 30360Sstevel@tonic-gate plist = page_get_contigpage(&npages, mattr, 1); 30370Sstevel@tonic-gate if (plist == NULL) { 30380Sstevel@tonic-gate page_create_putback(npages); 30390Sstevel@tonic-gate return (NULL); 30400Sstevel@tonic-gate } 30410Sstevel@tonic-gate 30420Sstevel@tonic-gate pp = plist; 30430Sstevel@tonic-gate 30440Sstevel@tonic-gate do { 30450Sstevel@tonic-gate if (!page_hashin(pp, vp, off, NULL)) { 30460Sstevel@tonic-gate panic("pg_creat_io: hashin failed %p %p %llx", 30470Sstevel@tonic-gate (void *)pp, (void *)vp, off); 30480Sstevel@tonic-gate } 30490Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 30500Sstevel@tonic-gate off += MMU_PAGESIZE; 30510Sstevel@tonic-gate PP_CLRFREE(pp); 30520Sstevel@tonic-gate PP_CLRAGED(pp); 30530Sstevel@tonic-gate page_set_props(pp, P_REF); 30540Sstevel@tonic-gate pp = pp->p_next; 30550Sstevel@tonic-gate } while (pp != plist); 30560Sstevel@tonic-gate 30570Sstevel@tonic-gate if (!npages) { 30580Sstevel@tonic-gate check_dma(mattr, plist, pages_req); 30590Sstevel@tonic-gate return (plist); 30600Sstevel@tonic-gate } else { 30610Sstevel@tonic-gate vaddr += (pages_req - npages) << MMU_PAGESHIFT; 30620Sstevel@tonic-gate } 30630Sstevel@tonic-gate 30640Sstevel@tonic-gate /* 30650Sstevel@tonic-gate * fall-thru: 30660Sstevel@tonic-gate * 30670Sstevel@tonic-gate * page_get_contigpage returns when npages <= sgllen. 30680Sstevel@tonic-gate * Grab the rest of the non-contig pages below from anylist. 30690Sstevel@tonic-gate */ 30700Sstevel@tonic-gate } 30710Sstevel@tonic-gate 30720Sstevel@tonic-gate /* 30730Sstevel@tonic-gate * Loop around collecting the requested number of pages. 30740Sstevel@tonic-gate * Most of the time, we have to `create' a new page. With 30750Sstevel@tonic-gate * this in mind, pull the page off the free list before 30760Sstevel@tonic-gate * getting the hash lock. This will minimize the hash 30770Sstevel@tonic-gate * lock hold time, nesting, and the like. If it turns 30780Sstevel@tonic-gate * out we don't need the page, we put it back at the end. 30790Sstevel@tonic-gate */ 30800Sstevel@tonic-gate while (npages--) { 30810Sstevel@tonic-gate phm = NULL; 30820Sstevel@tonic-gate 30830Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 30840Sstevel@tonic-gate top: 30850Sstevel@tonic-gate ASSERT(phm == NULL); 30860Sstevel@tonic-gate ASSERT(index == PAGE_HASH_FUNC(vp, off)); 30870Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 30880Sstevel@tonic-gate 30890Sstevel@tonic-gate if (npp == NULL) { 30900Sstevel@tonic-gate /* 30910Sstevel@tonic-gate * Try to get the page of any color either from 30920Sstevel@tonic-gate * the freelist or from the cache list. 30930Sstevel@tonic-gate */ 30940Sstevel@tonic-gate npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE, 30950Sstevel@tonic-gate flags & ~PG_MATCH_COLOR, mattr, NULL); 30960Sstevel@tonic-gate if (npp == NULL) { 30970Sstevel@tonic-gate if (mattr == NULL) { 30980Sstevel@tonic-gate /* 30990Sstevel@tonic-gate * Not looking for a special page; 31000Sstevel@tonic-gate * panic! 31010Sstevel@tonic-gate */ 31020Sstevel@tonic-gate panic("no page found %d", (int)npages); 31030Sstevel@tonic-gate } 31040Sstevel@tonic-gate /* 31050Sstevel@tonic-gate * No page found! This can happen 31060Sstevel@tonic-gate * if we are looking for a page 31070Sstevel@tonic-gate * within a specific memory range 31080Sstevel@tonic-gate * for DMA purposes. If PG_WAIT is 31090Sstevel@tonic-gate * specified then we wait for a 31100Sstevel@tonic-gate * while and then try again. The 31110Sstevel@tonic-gate * wait could be forever if we 31120Sstevel@tonic-gate * don't get the page(s) we need. 31130Sstevel@tonic-gate * 31140Sstevel@tonic-gate * Note: XXX We really need a mechanism 31150Sstevel@tonic-gate * to wait for pages in the desired 31160Sstevel@tonic-gate * range. For now, we wait for any 31170Sstevel@tonic-gate * pages and see if we can use it. 31180Sstevel@tonic-gate */ 31190Sstevel@tonic-gate 31200Sstevel@tonic-gate if ((mattr != NULL) && (flags & PG_WAIT)) { 31210Sstevel@tonic-gate delay(10); 31220Sstevel@tonic-gate goto top; 31230Sstevel@tonic-gate } 31240Sstevel@tonic-gate goto fail; /* undo accounting stuff */ 31250Sstevel@tonic-gate } 31260Sstevel@tonic-gate 31270Sstevel@tonic-gate if (PP_ISAGED(npp) == 0) { 31280Sstevel@tonic-gate /* 31290Sstevel@tonic-gate * Since this page came from the 31300Sstevel@tonic-gate * cachelist, we must destroy the 31310Sstevel@tonic-gate * old vnode association. 31320Sstevel@tonic-gate */ 31330Sstevel@tonic-gate page_hashout(npp, (kmutex_t *)NULL); 31340Sstevel@tonic-gate } 31350Sstevel@tonic-gate } 31360Sstevel@tonic-gate 31370Sstevel@tonic-gate /* 31380Sstevel@tonic-gate * We own this page! 31390Sstevel@tonic-gate */ 31400Sstevel@tonic-gate ASSERT(PAGE_EXCL(npp)); 31410Sstevel@tonic-gate ASSERT(npp->p_vnode == NULL); 31420Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(npp)); 31430Sstevel@tonic-gate PP_CLRFREE(npp); 31440Sstevel@tonic-gate PP_CLRAGED(npp); 31450Sstevel@tonic-gate 31460Sstevel@tonic-gate /* 31470Sstevel@tonic-gate * Here we have a page in our hot little mits and are 31480Sstevel@tonic-gate * just waiting to stuff it on the appropriate lists. 31490Sstevel@tonic-gate * Get the mutex and check to see if it really does 31500Sstevel@tonic-gate * not exist. 31510Sstevel@tonic-gate */ 31520Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 31530Sstevel@tonic-gate mutex_enter(phm); 31540Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 31550Sstevel@tonic-gate if (pp == NULL) { 31560Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 31570Sstevel@tonic-gate pp = npp; 31580Sstevel@tonic-gate npp = NULL; 31590Sstevel@tonic-gate if (!page_hashin(pp, vp, off, phm)) { 31600Sstevel@tonic-gate /* 31610Sstevel@tonic-gate * Since we hold the page hash mutex and 31620Sstevel@tonic-gate * just searched for this page, page_hashin 31630Sstevel@tonic-gate * had better not fail. If it does, that 31640Sstevel@tonic-gate * means somethread did not follow the 31650Sstevel@tonic-gate * page hash mutex rules. Panic now and 31660Sstevel@tonic-gate * get it over with. As usual, go down 31670Sstevel@tonic-gate * holding all the locks. 31680Sstevel@tonic-gate */ 31690Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31700Sstevel@tonic-gate panic("page_create: hashin fail %p %p %llx %p", 31710Sstevel@tonic-gate (void *)pp, (void *)vp, off, (void *)phm); 31720Sstevel@tonic-gate 31730Sstevel@tonic-gate } 31740Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31750Sstevel@tonic-gate mutex_exit(phm); 31760Sstevel@tonic-gate phm = NULL; 31770Sstevel@tonic-gate 31780Sstevel@tonic-gate /* 31790Sstevel@tonic-gate * Hat layer locking need not be done to set 31800Sstevel@tonic-gate * the following bits since the page is not hashed 31810Sstevel@tonic-gate * and was on the free list (i.e., had no mappings). 31820Sstevel@tonic-gate * 31830Sstevel@tonic-gate * Set the reference bit to protect 31840Sstevel@tonic-gate * against immediate pageout 31850Sstevel@tonic-gate * 31860Sstevel@tonic-gate * XXXmh modify freelist code to set reference 31870Sstevel@tonic-gate * bit so we don't have to do it here. 31880Sstevel@tonic-gate */ 31890Sstevel@tonic-gate page_set_props(pp, P_REF); 31900Sstevel@tonic-gate } else { 31910Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31920Sstevel@tonic-gate mutex_exit(phm); 31930Sstevel@tonic-gate phm = NULL; 31940Sstevel@tonic-gate /* 31950Sstevel@tonic-gate * NOTE: This should not happen for pages associated 31960Sstevel@tonic-gate * with kernel vnode 'kvp'. 31970Sstevel@tonic-gate */ 31980Sstevel@tonic-gate /* XX64 - to debug why this happens! */ 31993290Sjohansen ASSERT(!VN_ISKAS(vp)); 32003290Sjohansen if (VN_ISKAS(vp)) 32010Sstevel@tonic-gate cmn_err(CE_NOTE, 32020Sstevel@tonic-gate "page_create: page not expected " 32030Sstevel@tonic-gate "in hash list for kernel vnode - pp 0x%p", 32040Sstevel@tonic-gate (void *)pp); 32050Sstevel@tonic-gate VM_STAT_ADD(page_create_exists); 32060Sstevel@tonic-gate goto fail; 32070Sstevel@tonic-gate } 32080Sstevel@tonic-gate 32090Sstevel@tonic-gate /* 32100Sstevel@tonic-gate * Got a page! It is locked. Acquire the i/o 32110Sstevel@tonic-gate * lock since we are going to use the p_next and 32120Sstevel@tonic-gate * p_prev fields to link the requested pages together. 32130Sstevel@tonic-gate */ 32140Sstevel@tonic-gate page_io_lock(pp); 32150Sstevel@tonic-gate page_add(&plist, pp); 32160Sstevel@tonic-gate plist = plist->p_next; 32170Sstevel@tonic-gate off += MMU_PAGESIZE; 32180Sstevel@tonic-gate vaddr += MMU_PAGESIZE; 32190Sstevel@tonic-gate } 32200Sstevel@tonic-gate 32210Sstevel@tonic-gate check_dma(mattr, plist, pages_req); 32220Sstevel@tonic-gate return (plist); 32230Sstevel@tonic-gate 32240Sstevel@tonic-gate fail: 32250Sstevel@tonic-gate if (npp != NULL) { 32260Sstevel@tonic-gate /* 32270Sstevel@tonic-gate * Did not need this page after all. 32280Sstevel@tonic-gate * Put it back on the free list. 32290Sstevel@tonic-gate */ 32300Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 32310Sstevel@tonic-gate PP_SETFREE(npp); 32320Sstevel@tonic-gate PP_SETAGED(npp); 32330Sstevel@tonic-gate npp->p_offset = (u_offset_t)-1; 32340Sstevel@tonic-gate page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 32350Sstevel@tonic-gate page_unlock(npp); 32360Sstevel@tonic-gate } 32370Sstevel@tonic-gate 32380Sstevel@tonic-gate /* 32390Sstevel@tonic-gate * Give up the pages we already got. 32400Sstevel@tonic-gate */ 32410Sstevel@tonic-gate while (plist != NULL) { 32420Sstevel@tonic-gate pp = plist; 32430Sstevel@tonic-gate page_sub(&plist, pp); 32440Sstevel@tonic-gate page_io_unlock(pp); 32450Sstevel@tonic-gate plist_len++; 32460Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/ 32470Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 32480Sstevel@tonic-gate } 32490Sstevel@tonic-gate 32500Sstevel@tonic-gate /* 32510Sstevel@tonic-gate * VN_DISPOSE does freemem accounting for the pages in plist 32520Sstevel@tonic-gate * by calling page_free. So, we need to undo the pcf accounting 32530Sstevel@tonic-gate * for only the remaining pages. 32540Sstevel@tonic-gate */ 32550Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 32560Sstevel@tonic-gate page_create_putback(pages_req - plist_len); 32570Sstevel@tonic-gate 32580Sstevel@tonic-gate return (NULL); 32590Sstevel@tonic-gate } 32605084Sjohnlev #endif /* !__xpv */ 32610Sstevel@tonic-gate 32620Sstevel@tonic-gate 32630Sstevel@tonic-gate /* 32640Sstevel@tonic-gate * Copy the data from the physical page represented by "frompp" to 32650Sstevel@tonic-gate * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and 32660Sstevel@tonic-gate * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt 32670Sstevel@tonic-gate * level and no one sleeps with an active mapping there. 32680Sstevel@tonic-gate * 32690Sstevel@tonic-gate * Note that the ref/mod bits in the page_t's are not affected by 32700Sstevel@tonic-gate * this operation, hence it is up to the caller to update them appropriately. 32710Sstevel@tonic-gate */ 32723253Smec int 32730Sstevel@tonic-gate ppcopy(page_t *frompp, page_t *topp) 32740Sstevel@tonic-gate { 32750Sstevel@tonic-gate caddr_t pp_addr1; 32760Sstevel@tonic-gate caddr_t pp_addr2; 32773446Smrj hat_mempte_t pte1; 32783446Smrj hat_mempte_t pte2; 32790Sstevel@tonic-gate kmutex_t *ppaddr_mutex; 32803253Smec label_t ljb; 32813253Smec int ret = 1; 32820Sstevel@tonic-gate 32830Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 32840Sstevel@tonic-gate ASSERT(PAGE_LOCKED(frompp)); 32850Sstevel@tonic-gate ASSERT(PAGE_LOCKED(topp)); 32860Sstevel@tonic-gate 32870Sstevel@tonic-gate if (kpm_enable) { 32880Sstevel@tonic-gate pp_addr1 = hat_kpm_page2va(frompp, 0); 32890Sstevel@tonic-gate pp_addr2 = hat_kpm_page2va(topp, 0); 32900Sstevel@tonic-gate kpreempt_disable(); 32910Sstevel@tonic-gate } else { 32920Sstevel@tonic-gate /* 32930Sstevel@tonic-gate * disable pre-emption so that CPU can't change 32940Sstevel@tonic-gate */ 32950Sstevel@tonic-gate kpreempt_disable(); 32960Sstevel@tonic-gate 32970Sstevel@tonic-gate pp_addr1 = CPU->cpu_caddr1; 32980Sstevel@tonic-gate pp_addr2 = CPU->cpu_caddr2; 32993446Smrj pte1 = CPU->cpu_caddr1pte; 33003446Smrj pte2 = CPU->cpu_caddr2pte; 33010Sstevel@tonic-gate 33020Sstevel@tonic-gate ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 33030Sstevel@tonic-gate mutex_enter(ppaddr_mutex); 33040Sstevel@tonic-gate 33050Sstevel@tonic-gate hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1, 33060Sstevel@tonic-gate PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST); 33070Sstevel@tonic-gate hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2, 33080Sstevel@tonic-gate PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 33090Sstevel@tonic-gate HAT_LOAD_NOCONSIST); 33100Sstevel@tonic-gate } 33110Sstevel@tonic-gate 33123253Smec if (on_fault(&ljb)) { 33133253Smec ret = 0; 33143253Smec goto faulted; 33153253Smec } 33160Sstevel@tonic-gate if (use_sse_pagecopy) 33175084Sjohnlev #ifdef __xpv 33185084Sjohnlev page_copy_no_xmm(pp_addr2, pp_addr1); 33195084Sjohnlev #else 33200Sstevel@tonic-gate hwblkpagecopy(pp_addr1, pp_addr2); 33215084Sjohnlev #endif 33220Sstevel@tonic-gate else 33230Sstevel@tonic-gate bcopy(pp_addr1, pp_addr2, PAGESIZE); 33240Sstevel@tonic-gate 33253253Smec no_fault(); 33263253Smec faulted: 33273446Smrj if (!kpm_enable) { 33285084Sjohnlev #ifdef __xpv 33295084Sjohnlev /* 33305217Sjosephb * We can't leave unused mappings laying about under the 33315217Sjosephb * hypervisor, so blow them away. 33325084Sjohnlev */ 33335217Sjosephb if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0, 33345217Sjosephb UVMF_INVLPG | UVMF_LOCAL) < 0) 33355217Sjosephb panic("HYPERVISOR_update_va_mapping() failed"); 33365084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 33375084Sjohnlev UVMF_INVLPG | UVMF_LOCAL) < 0) 33385084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 33395084Sjohnlev #endif 33400Sstevel@tonic-gate mutex_exit(ppaddr_mutex); 33413446Smrj } 33420Sstevel@tonic-gate kpreempt_enable(); 33433253Smec return (ret); 33440Sstevel@tonic-gate } 33450Sstevel@tonic-gate 33465262Srscott void 33475262Srscott pagezero(page_t *pp, uint_t off, uint_t len) 33485262Srscott { 33495262Srscott ASSERT(PAGE_LOCKED(pp)); 33505262Srscott pfnzero(page_pptonum(pp), off, len); 33515262Srscott } 33525262Srscott 33530Sstevel@tonic-gate /* 33545262Srscott * Zero the physical page from off to off + len given by pfn 33550Sstevel@tonic-gate * without changing the reference and modified bits of page. 33560Sstevel@tonic-gate * 33570Sstevel@tonic-gate * We use this using CPU private page address #2, see ppcopy() for more info. 33585262Srscott * pfnzero() must not be called at interrupt level. 33590Sstevel@tonic-gate */ 33600Sstevel@tonic-gate void 33615262Srscott pfnzero(pfn_t pfn, uint_t off, uint_t len) 33620Sstevel@tonic-gate { 33630Sstevel@tonic-gate caddr_t pp_addr2; 33643446Smrj hat_mempte_t pte2; 33655262Srscott kmutex_t *ppaddr_mutex = NULL; 33660Sstevel@tonic-gate 33670Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 33680Sstevel@tonic-gate ASSERT(len <= MMU_PAGESIZE); 33690Sstevel@tonic-gate ASSERT(off <= MMU_PAGESIZE); 33700Sstevel@tonic-gate ASSERT(off + len <= MMU_PAGESIZE); 33715262Srscott 33725262Srscott if (kpm_enable && !pfn_is_foreign(pfn)) { 33735262Srscott pp_addr2 = hat_kpm_pfn2va(pfn); 33740Sstevel@tonic-gate kpreempt_disable(); 33750Sstevel@tonic-gate } else { 33760Sstevel@tonic-gate kpreempt_disable(); 33770Sstevel@tonic-gate 33780Sstevel@tonic-gate pp_addr2 = CPU->cpu_caddr2; 33793446Smrj pte2 = CPU->cpu_caddr2pte; 33800Sstevel@tonic-gate 33810Sstevel@tonic-gate ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 33820Sstevel@tonic-gate mutex_enter(ppaddr_mutex); 33830Sstevel@tonic-gate 33845262Srscott hat_mempte_remap(pfn, pp_addr2, pte2, 33850Sstevel@tonic-gate PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 33860Sstevel@tonic-gate HAT_LOAD_NOCONSIST); 33870Sstevel@tonic-gate } 33880Sstevel@tonic-gate 33893446Smrj if (use_sse_pagezero) { 33905084Sjohnlev #ifdef __xpv 33915084Sjohnlev uint_t rem; 33925084Sjohnlev 33935084Sjohnlev /* 33945084Sjohnlev * zero a byte at a time until properly aligned for 33955084Sjohnlev * block_zero_no_xmm(). 33965084Sjohnlev */ 33975084Sjohnlev while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0) 33985084Sjohnlev pp_addr2[off++] = 0; 33995084Sjohnlev 34005084Sjohnlev /* 34015084Sjohnlev * Now use faster block_zero_no_xmm() for any range 34025084Sjohnlev * that is properly aligned and sized. 34035084Sjohnlev */ 34045084Sjohnlev rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN)); 34055084Sjohnlev len -= rem; 34065084Sjohnlev if (len != 0) { 34075084Sjohnlev block_zero_no_xmm(pp_addr2 + off, len); 34085084Sjohnlev off += len; 34095084Sjohnlev } 34105084Sjohnlev 34115084Sjohnlev /* 34125084Sjohnlev * zero remainder with byte stores. 34135084Sjohnlev */ 34145084Sjohnlev while (rem-- > 0) 34155084Sjohnlev pp_addr2[off++] = 0; 34165084Sjohnlev #else 34170Sstevel@tonic-gate hwblkclr(pp_addr2 + off, len); 34185084Sjohnlev #endif 34193446Smrj } else { 34200Sstevel@tonic-gate bzero(pp_addr2 + off, len); 34213446Smrj } 34220Sstevel@tonic-gate 34235262Srscott if (!kpm_enable || pfn_is_foreign(pfn)) { 34245084Sjohnlev #ifdef __xpv 34255262Srscott /* 34265262Srscott * On the hypervisor this page might get used for a page 34275262Srscott * table before any intervening change to this mapping, 34285262Srscott * so blow it away. 34295262Srscott */ 34305262Srscott if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 34315262Srscott UVMF_INVLPG) < 0) 34325262Srscott panic("HYPERVISOR_update_va_mapping() failed"); 34335084Sjohnlev #endif 34340Sstevel@tonic-gate mutex_exit(ppaddr_mutex); 34355262Srscott } 34365262Srscott 34370Sstevel@tonic-gate kpreempt_enable(); 34380Sstevel@tonic-gate } 34390Sstevel@tonic-gate 34400Sstevel@tonic-gate /* 34410Sstevel@tonic-gate * Platform-dependent page scrub call. 34420Sstevel@tonic-gate */ 34430Sstevel@tonic-gate void 34440Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len) 34450Sstevel@tonic-gate { 34460Sstevel@tonic-gate /* 34470Sstevel@tonic-gate * For now, we rely on the fact that pagezero() will 34480Sstevel@tonic-gate * always clear UEs. 34490Sstevel@tonic-gate */ 34500Sstevel@tonic-gate pagezero(pp, off, len); 34510Sstevel@tonic-gate } 34520Sstevel@tonic-gate 34530Sstevel@tonic-gate /* 34540Sstevel@tonic-gate * set up two private addresses for use on a given CPU for use in ppcopy() 34550Sstevel@tonic-gate */ 34560Sstevel@tonic-gate void 34570Sstevel@tonic-gate setup_vaddr_for_ppcopy(struct cpu *cpup) 34580Sstevel@tonic-gate { 34590Sstevel@tonic-gate void *addr; 34603446Smrj hat_mempte_t pte_pa; 34610Sstevel@tonic-gate 34620Sstevel@tonic-gate addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 34633446Smrj pte_pa = hat_mempte_setup(addr); 34640Sstevel@tonic-gate cpup->cpu_caddr1 = addr; 34653446Smrj cpup->cpu_caddr1pte = pte_pa; 34660Sstevel@tonic-gate 34670Sstevel@tonic-gate addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 34683446Smrj pte_pa = hat_mempte_setup(addr); 34690Sstevel@tonic-gate cpup->cpu_caddr2 = addr; 34703446Smrj cpup->cpu_caddr2pte = pte_pa; 34710Sstevel@tonic-gate 34720Sstevel@tonic-gate mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL); 34730Sstevel@tonic-gate } 34740Sstevel@tonic-gate 34753446Smrj /* 34763446Smrj * Undo setup_vaddr_for_ppcopy 34773446Smrj */ 34783446Smrj void 34793446Smrj teardown_vaddr_for_ppcopy(struct cpu *cpup) 34803446Smrj { 34813446Smrj mutex_destroy(&cpup->cpu_ppaddr_mutex); 34823446Smrj 34833446Smrj hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte); 34843446Smrj cpup->cpu_caddr2pte = 0; 34853446Smrj vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1)); 34863446Smrj cpup->cpu_caddr2 = 0; 34873446Smrj 34883446Smrj hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte); 34893446Smrj cpup->cpu_caddr1pte = 0; 34903446Smrj vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1)); 34913446Smrj cpup->cpu_caddr1 = 0; 34923446Smrj } 34930Sstevel@tonic-gate 34940Sstevel@tonic-gate /* 34950Sstevel@tonic-gate * Create the pageout scanner thread. The thread has to 34960Sstevel@tonic-gate * start at procedure with process pp and priority pri. 34970Sstevel@tonic-gate */ 34980Sstevel@tonic-gate void 34990Sstevel@tonic-gate pageout_init(void (*procedure)(), proc_t *pp, pri_t pri) 35000Sstevel@tonic-gate { 35010Sstevel@tonic-gate (void) thread_create(NULL, 0, procedure, NULL, 0, pp, TS_RUN, pri); 35020Sstevel@tonic-gate } 35030Sstevel@tonic-gate 35040Sstevel@tonic-gate /* 35050Sstevel@tonic-gate * Function for flushing D-cache when performing module relocations 35060Sstevel@tonic-gate * to an alternate mapping. Unnecessary on Intel / AMD platforms. 35070Sstevel@tonic-gate */ 35080Sstevel@tonic-gate void 35090Sstevel@tonic-gate dcache_flushall() 35100Sstevel@tonic-gate {} 35113177Sdp78419 35123177Sdp78419 size_t 35133177Sdp78419 exec_get_spslew(void) 35143177Sdp78419 { 35153177Sdp78419 return (0); 35163177Sdp78419 } 35173446Smrj 35183446Smrj /* 35193446Smrj * Allocate a memory page. The argument 'seed' can be any pseudo-random 35203446Smrj * number to vary where the pages come from. This is quite a hacked up 35213446Smrj * method -- it works for now, but really needs to be fixed up a bit. 35223446Smrj * 35233446Smrj * We currently use page_create_va() on the kvp with fake offsets, 35243446Smrj * segments and virt address. This is pretty bogus, but was copied from the 35253446Smrj * old hat_i86.c code. A better approach would be to specify either mnode 35263446Smrj * random or mnode local and takes a page from whatever color has the MOST 35273446Smrj * available - this would have a minimal impact on page coloring. 35283446Smrj */ 35293446Smrj page_t * 35303446Smrj page_get_physical(uintptr_t seed) 35313446Smrj { 35323446Smrj page_t *pp; 35333446Smrj u_offset_t offset; 35343446Smrj static struct seg tmpseg; 35353446Smrj static uintptr_t ctr = 0; 35363446Smrj 35373446Smrj /* 35383446Smrj * This code is gross, we really need a simpler page allocator. 35393446Smrj * 35403446Smrj * We need assign an offset for the page to call page_create_va(). 35413446Smrj * To avoid conflicts with other pages, we get creative with the offset. 35423446Smrj * For 32 bits, we pick an offset > 4Gig 35433446Smrj * For 64 bits, pick an offset somewhere in the VA hole. 35443446Smrj */ 35453446Smrj offset = seed; 35463446Smrj if (offset > kernelbase) 35473446Smrj offset -= kernelbase; 35483446Smrj offset <<= MMU_PAGESHIFT; 35493446Smrj #if defined(__amd64) 35503446Smrj offset += mmu.hole_start; /* something in VA hole */ 35513446Smrj #else 35523446Smrj offset += 1ULL << 40; /* something > 4 Gig */ 35533446Smrj #endif 35543446Smrj 35553446Smrj if (page_resv(1, KM_NOSLEEP) == 0) 35563446Smrj return (NULL); 35573446Smrj 35583446Smrj #ifdef DEBUG 35593446Smrj pp = page_exists(&kvp, offset); 35603446Smrj if (pp != NULL) 35613446Smrj panic("page already exists %p", pp); 35623446Smrj #endif 35633446Smrj 35645084Sjohnlev pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL, 35653446Smrj &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */ 35663446Smrj if (pp == NULL) 35673446Smrj return (NULL); 35683446Smrj page_io_unlock(pp); 35693446Smrj page_hashout(pp, NULL); 35703446Smrj return (pp); 35713446Smrj } 3572