10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51443Skchow * Common Development and Distribution License (the "License"). 61443Skchow * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 310Sstevel@tonic-gate * under license from the Regents of the University of California. 320Sstevel@tonic-gate */ 330Sstevel@tonic-gate 340Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 370Sstevel@tonic-gate * UNIX machine dependent virtual memory support. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/types.h> 410Sstevel@tonic-gate #include <sys/param.h> 420Sstevel@tonic-gate #include <sys/systm.h> 430Sstevel@tonic-gate #include <sys/user.h> 440Sstevel@tonic-gate #include <sys/proc.h> 450Sstevel@tonic-gate #include <sys/kmem.h> 460Sstevel@tonic-gate #include <sys/vmem.h> 470Sstevel@tonic-gate #include <sys/buf.h> 480Sstevel@tonic-gate #include <sys/cpuvar.h> 490Sstevel@tonic-gate #include <sys/lgrp.h> 500Sstevel@tonic-gate #include <sys/disp.h> 510Sstevel@tonic-gate #include <sys/vm.h> 520Sstevel@tonic-gate #include <sys/mman.h> 530Sstevel@tonic-gate #include <sys/vnode.h> 540Sstevel@tonic-gate #include <sys/cred.h> 550Sstevel@tonic-gate #include <sys/exec.h> 560Sstevel@tonic-gate #include <sys/exechdr.h> 570Sstevel@tonic-gate #include <sys/debug.h> 582991Ssusans #include <sys/vmsystm.h> 590Sstevel@tonic-gate 600Sstevel@tonic-gate #include <vm/hat.h> 610Sstevel@tonic-gate #include <vm/as.h> 620Sstevel@tonic-gate #include <vm/seg.h> 630Sstevel@tonic-gate #include <vm/seg_kp.h> 640Sstevel@tonic-gate #include <vm/seg_vn.h> 650Sstevel@tonic-gate #include <vm/page.h> 660Sstevel@tonic-gate #include <vm/seg_kmem.h> 670Sstevel@tonic-gate #include <vm/seg_kpm.h> 680Sstevel@tonic-gate #include <vm/vm_dep.h> 690Sstevel@tonic-gate 700Sstevel@tonic-gate #include <sys/cpu.h> 710Sstevel@tonic-gate #include <sys/vm_machparam.h> 720Sstevel@tonic-gate #include <sys/memlist.h> 730Sstevel@tonic-gate #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */ 740Sstevel@tonic-gate #include <vm/hat_i86.h> 750Sstevel@tonic-gate #include <sys/x86_archext.h> 760Sstevel@tonic-gate #include <sys/elf_386.h> 770Sstevel@tonic-gate #include <sys/cmn_err.h> 780Sstevel@tonic-gate #include <sys/archsystm.h> 790Sstevel@tonic-gate #include <sys/machsystm.h> 800Sstevel@tonic-gate 810Sstevel@tonic-gate #include <sys/vtrace.h> 820Sstevel@tonic-gate #include <sys/ddidmareq.h> 830Sstevel@tonic-gate #include <sys/promif.h> 840Sstevel@tonic-gate #include <sys/memnode.h> 850Sstevel@tonic-gate #include <sys/stack.h> 865084Sjohnlev #include <util/qsort.h> 875084Sjohnlev #include <sys/taskq.h> 885084Sjohnlev 895084Sjohnlev #ifdef __xpv 905084Sjohnlev 915084Sjohnlev #include <sys/hypervisor.h> 925084Sjohnlev #include <sys/xen_mmu.h> 935084Sjohnlev #include <sys/balloon_impl.h> 945084Sjohnlev 955084Sjohnlev /* 965084Sjohnlev * domain 0 pages usable for DMA are kept pre-allocated and kept in 975084Sjohnlev * distinct lists, ordered by increasing mfn. 985084Sjohnlev */ 995084Sjohnlev static kmutex_t io_pool_lock; 1005084Sjohnlev static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */ 1015084Sjohnlev static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */ 1025084Sjohnlev static long io_pool_cnt; 1035084Sjohnlev static long io_pool_cnt_max = 0; 1045084Sjohnlev #define DEFAULT_IO_POOL_MIN 128 1055084Sjohnlev static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN; 1065084Sjohnlev static long io_pool_cnt_lowater = 0; 1075084Sjohnlev static long io_pool_shrink_attempts; /* how many times did we try to shrink */ 1085084Sjohnlev static long io_pool_shrinks; /* how many times did we really shrink */ 1095084Sjohnlev static long io_pool_grows; /* how many times did we grow */ 1105084Sjohnlev static mfn_t start_mfn = 1; 1115084Sjohnlev static caddr_t io_pool_kva; /* use to alloc pages when needed */ 1125084Sjohnlev 1135084Sjohnlev static int create_contig_pfnlist(uint_t); 1145084Sjohnlev 1155084Sjohnlev /* 1165084Sjohnlev * percentage of phys mem to hold in the i/o pool 1175084Sjohnlev */ 1185084Sjohnlev #define DEFAULT_IO_POOL_PCT 2 1195084Sjohnlev static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT; 1205084Sjohnlev static void page_io_pool_sub(page_t **, page_t *, page_t *); 1215084Sjohnlev 1225084Sjohnlev #endif /* __xpv */ 1230Sstevel@tonic-gate 1242961Sdp78419 uint_t vac_colors = 1; 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate int largepagesupport = 0; 1270Sstevel@tonic-gate extern uint_t page_create_new; 1280Sstevel@tonic-gate extern uint_t page_create_exists; 1290Sstevel@tonic-gate extern uint_t page_create_putbacks; 1300Sstevel@tonic-gate extern uint_t page_create_putbacks; 1313446Smrj /* 1323446Smrj * Allow users to disable the kernel's use of SSE. 1333446Smrj */ 1343446Smrj extern int use_sse_pagecopy, use_sse_pagezero; 1350Sstevel@tonic-gate 1365084Sjohnlev /* 1375084Sjohnlev * combined memory ranges from mnode and memranges[] to manage single 1385084Sjohnlev * mnode/mtype dimension in the page lists. 1395084Sjohnlev */ 1405084Sjohnlev typedef struct { 1415084Sjohnlev pfn_t mnr_pfnlo; 1425084Sjohnlev pfn_t mnr_pfnhi; 1435084Sjohnlev int mnr_mnode; 1445084Sjohnlev int mnr_memrange; /* index into memranges[] */ 1455084Sjohnlev /* maintain page list stats */ 1465084Sjohnlev pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */ 147*5466Skchow pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */ 148*5466Skchow pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */ 1495084Sjohnlev #ifdef DEBUG 1505084Sjohnlev struct mnr_mts { /* mnode/mtype szc stats */ 1515084Sjohnlev pgcnt_t mnr_mts_pgcnt; 1525084Sjohnlev int mnr_mts_colors; 1535084Sjohnlev pgcnt_t *mnr_mtsc_pgcnt; 1545084Sjohnlev } *mnr_mts; 1555084Sjohnlev #endif 1565084Sjohnlev } mnoderange_t; 1575084Sjohnlev 1585084Sjohnlev #define MEMRANGEHI(mtype) \ 1595084Sjohnlev ((mtype > 0) ? memranges[mtype - 1] - 1: physmax) 1605084Sjohnlev #define MEMRANGELO(mtype) (memranges[mtype]) 1615084Sjohnlev 162*5466Skchow #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt) 1635084Sjohnlev 1645084Sjohnlev /* 1655084Sjohnlev * As the PC architecture evolved memory up was clumped into several 1665084Sjohnlev * ranges for various historical I/O devices to do DMA. 1675084Sjohnlev * < 16Meg - ISA bus 1685084Sjohnlev * < 2Gig - ??? 1695084Sjohnlev * < 4Gig - PCI bus or drivers that don't understand PAE mode 1705084Sjohnlev * 1715084Sjohnlev * These are listed in reverse order, so that we can skip over unused 1725084Sjohnlev * ranges on machines with small memories. 1735084Sjohnlev * 1745084Sjohnlev * For now under the Hypervisor, we'll only ever have one memrange. 1755084Sjohnlev */ 1765084Sjohnlev #define PFN_4GIG 0x100000 1775084Sjohnlev #define PFN_16MEG 0x1000 1785084Sjohnlev static pfn_t arch_memranges[NUM_MEM_RANGES] = { 1795084Sjohnlev PFN_4GIG, /* pfn range for 4G and above */ 1805084Sjohnlev 0x80000, /* pfn range for 2G-4G */ 1815084Sjohnlev PFN_16MEG, /* pfn range for 16M-2G */ 1825084Sjohnlev 0x00000, /* pfn range for 0-16M */ 1835084Sjohnlev }; 1845084Sjohnlev pfn_t *memranges = &arch_memranges[0]; 1855084Sjohnlev int nranges = NUM_MEM_RANGES; 1865084Sjohnlev 1875084Sjohnlev /* 1885084Sjohnlev * This combines mem_node_config and memranges into one data 1895084Sjohnlev * structure to be used for page list management. 1905084Sjohnlev */ 1915084Sjohnlev mnoderange_t *mnoderanges; 1925084Sjohnlev int mnoderangecnt; 1935084Sjohnlev int mtype4g; 1945084Sjohnlev 1955084Sjohnlev /* 1965084Sjohnlev * 4g memory management variables for systems with more than 4g of memory: 1975084Sjohnlev * 1985084Sjohnlev * physical memory below 4g is required for 32bit dma devices and, currently, 1995084Sjohnlev * for kmem memory. On systems with more than 4g of memory, the pool of memory 2005084Sjohnlev * below 4g can be depleted without any paging activity given that there is 2015084Sjohnlev * likely to be sufficient memory above 4g. 2025084Sjohnlev * 2035084Sjohnlev * physmax4g is set true if the largest pfn is over 4g. The rest of the 2045084Sjohnlev * 4g memory management code is enabled only when physmax4g is true. 2055084Sjohnlev * 2065084Sjohnlev * maxmem4g is the count of the maximum number of pages on the page lists 2075084Sjohnlev * with physical addresses below 4g. It can be a lot less then 4g given that 2085084Sjohnlev * BIOS may reserve large chunks of space below 4g for hot plug pci devices, 2095084Sjohnlev * agp aperture etc. 2105084Sjohnlev * 2115084Sjohnlev * freemem4g maintains the count of the number of available pages on the 2125084Sjohnlev * page lists with physical addresses below 4g. 2135084Sjohnlev * 2145084Sjohnlev * DESFREE4G specifies the desired amount of below 4g memory. It defaults to 2155084Sjohnlev * 6% (desfree4gshift = 4) of maxmem4g. 2165084Sjohnlev * 2175084Sjohnlev * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G 2185084Sjohnlev * and the amount of physical memory above 4g is greater than freemem4g. 2195084Sjohnlev * In this case, page_get_* routines will restrict below 4g allocations 2205084Sjohnlev * for requests that don't specifically require it. 2215084Sjohnlev */ 2225084Sjohnlev 2235084Sjohnlev #define LOTSFREE4G (maxmem4g >> lotsfree4gshift) 2245084Sjohnlev #define DESFREE4G (maxmem4g >> desfree4gshift) 2255084Sjohnlev 2265084Sjohnlev #define RESTRICT4G_ALLOC \ 2275084Sjohnlev (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem)) 2285084Sjohnlev 2295084Sjohnlev static pgcnt_t maxmem4g; 2305084Sjohnlev static pgcnt_t freemem4g; 2315084Sjohnlev static int physmax4g; 2325084Sjohnlev static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */ 2335084Sjohnlev static int lotsfree4gshift = 3; 2345084Sjohnlev 2355084Sjohnlev /* 2365084Sjohnlev * 16m memory management: 2375084Sjohnlev * 2385084Sjohnlev * reserve some amount of physical memory below 16m for legacy devices. 2395084Sjohnlev * 2405084Sjohnlev * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above 2415084Sjohnlev * 16m or if the 16m pool drops below DESFREE16M. 2425084Sjohnlev * 2435084Sjohnlev * In this case, general page allocations via page_get_{free,cache}list 2445084Sjohnlev * routines will be restricted from allocating from the 16m pool. Allocations 2455084Sjohnlev * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations 2465084Sjohnlev * are not restricted. 2475084Sjohnlev */ 2485084Sjohnlev 2495084Sjohnlev #define FREEMEM16M MTYPE_FREEMEM(0) 2505084Sjohnlev #define DESFREE16M desfree16m 2515084Sjohnlev #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \ 2525084Sjohnlev ((freemem != 0) && ((flags & PG_PANIC) == 0) && \ 2535084Sjohnlev ((freemem >= (FREEMEM16M)) || \ 2545084Sjohnlev (FREEMEM16M < (DESFREE16M + pgcnt)))) 2555084Sjohnlev 2565084Sjohnlev static pgcnt_t desfree16m = 0x380; 2575084Sjohnlev 2585084Sjohnlev /* 2595084Sjohnlev * This can be patched via /etc/system to allow old non-PAE aware device 2605084Sjohnlev * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM. 2615084Sjohnlev */ 2625084Sjohnlev int restricted_kmemalloc = 0; 2631385Skchow 2640Sstevel@tonic-gate #ifdef VM_STATS 2650Sstevel@tonic-gate struct { 2660Sstevel@tonic-gate ulong_t pga_alloc; 2670Sstevel@tonic-gate ulong_t pga_notfullrange; 2680Sstevel@tonic-gate ulong_t pga_nulldmaattr; 2690Sstevel@tonic-gate ulong_t pga_allocok; 2700Sstevel@tonic-gate ulong_t pga_allocfailed; 2710Sstevel@tonic-gate ulong_t pgma_alloc; 2720Sstevel@tonic-gate ulong_t pgma_allocok; 2730Sstevel@tonic-gate ulong_t pgma_allocfailed; 2740Sstevel@tonic-gate ulong_t pgma_allocempty; 2750Sstevel@tonic-gate } pga_vmstats; 2760Sstevel@tonic-gate #endif 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate uint_t mmu_page_sizes; 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate /* How many page sizes the users can see */ 2810Sstevel@tonic-gate uint_t mmu_exported_page_sizes; 2820Sstevel@tonic-gate 2835349Skchow /* page sizes that legacy applications can see */ 2845349Skchow uint_t mmu_legacy_page_sizes; 2855349Skchow 286423Sdavemq /* 287423Sdavemq * Number of pages in 1 GB. Don't enable automatic large pages if we have 288423Sdavemq * fewer than this many pages. 289423Sdavemq */ 2902991Ssusans pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 2912991Ssusans pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 2922991Ssusans 2932991Ssusans /* 2942991Ssusans * Maximum and default segment size tunables for user private 2952991Ssusans * and shared anon memory, and user text and initialized data. 2962991Ssusans * These can be patched via /etc/system to allow large pages 2972991Ssusans * to be used for mapping application private and shared anon memory. 2982991Ssusans */ 2992991Ssusans size_t mcntl0_lpsize = MMU_PAGESIZE; 3002991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE; 3012991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE; 3022991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE; 3032991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE; 3042991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE; 3052991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE; 3062991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE; 3072991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE; 3080Sstevel@tonic-gate 3095084Sjohnlev 3105084Sjohnlev /* 3115084Sjohnlev * initialized by page_coloring_init(). 3125084Sjohnlev */ 3135084Sjohnlev uint_t page_colors; 3145084Sjohnlev uint_t page_colors_mask; 3155084Sjohnlev uint_t page_coloring_shift; 3165084Sjohnlev int cpu_page_colors; 3175084Sjohnlev static uint_t l2_colors; 3185084Sjohnlev 3195084Sjohnlev /* 3205084Sjohnlev * Page freelists and cachelists are dynamically allocated once mnoderangecnt 3215084Sjohnlev * and page_colors are calculated from the l2 cache n-way set size. Within a 3225084Sjohnlev * mnode range, the page freelist and cachelist are hashed into bins based on 3235084Sjohnlev * color. This makes it easier to search for a page within a specific memory 3245084Sjohnlev * range. 3255084Sjohnlev */ 3265084Sjohnlev #define PAGE_COLORS_MIN 16 3275084Sjohnlev 3285084Sjohnlev page_t ****page_freelists; 3295084Sjohnlev page_t ***page_cachelists; 3305084Sjohnlev 3315084Sjohnlev 3325084Sjohnlev /* 3335084Sjohnlev * Used by page layer to know about page sizes 3345084Sjohnlev */ 3355084Sjohnlev hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1]; 3365084Sjohnlev 3375084Sjohnlev kmutex_t *fpc_mutex[NPC_MUTEX]; 3385084Sjohnlev kmutex_t *cpc_mutex[NPC_MUTEX]; 3395084Sjohnlev 3405084Sjohnlev /* 3415084Sjohnlev * Only let one thread at a time try to coalesce large pages, to 3425084Sjohnlev * prevent them from working against each other. 3435084Sjohnlev */ 3445084Sjohnlev static kmutex_t contig_lock; 3455084Sjohnlev #define CONTIG_LOCK() mutex_enter(&contig_lock); 3465084Sjohnlev #define CONTIG_UNLOCK() mutex_exit(&contig_lock); 3475084Sjohnlev 3485084Sjohnlev #define PFN_16M (mmu_btop((uint64_t)0x1000000)) 3495084Sjohnlev 3500Sstevel@tonic-gate /* 3510Sstevel@tonic-gate * Return the optimum page size for a given mapping 3520Sstevel@tonic-gate */ 3530Sstevel@tonic-gate /*ARGSUSED*/ 3540Sstevel@tonic-gate size_t 3552991Ssusans map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl) 3560Sstevel@tonic-gate { 3572991Ssusans level_t l = 0; 3582991Ssusans size_t pgsz = MMU_PAGESIZE; 3592991Ssusans size_t max_lpsize; 3602991Ssusans uint_t mszc; 3610Sstevel@tonic-gate 3622991Ssusans ASSERT(maptype != MAPPGSZ_VA); 3632991Ssusans 3642991Ssusans if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) { 3652991Ssusans return (MMU_PAGESIZE); 3662991Ssusans } 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate switch (maptype) { 3692991Ssusans case MAPPGSZ_HEAP: 3700Sstevel@tonic-gate case MAPPGSZ_STK: 3712991Ssusans max_lpsize = memcntl ? mcntl0_lpsize : (maptype == 3722991Ssusans MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize); 3732991Ssusans if (max_lpsize == MMU_PAGESIZE) { 3742991Ssusans return (MMU_PAGESIZE); 3752991Ssusans } 3762991Ssusans if (len == 0) { 3772991Ssusans len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase + 3782991Ssusans p->p_brksize - p->p_bssbase : p->p_stksize; 3792991Ssusans } 3802991Ssusans len = (maptype == MAPPGSZ_HEAP) ? MAX(len, 3812991Ssusans default_uheap_lpsize) : MAX(len, default_ustack_lpsize); 3822991Ssusans 3830Sstevel@tonic-gate /* 3840Sstevel@tonic-gate * use the pages size that best fits len 3850Sstevel@tonic-gate */ 3865349Skchow for (l = mmu.umax_page_level; l > 0; --l) { 3872991Ssusans if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) { 3880Sstevel@tonic-gate continue; 3892991Ssusans } else { 3902991Ssusans pgsz = LEVEL_SIZE(l); 3912991Ssusans } 3920Sstevel@tonic-gate break; 3930Sstevel@tonic-gate } 3942991Ssusans 3952991Ssusans mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc : 3962991Ssusans p->p_stkpageszc); 3972991Ssusans if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) { 3982991Ssusans pgsz = hw_page_array[mszc].hp_size; 3992991Ssusans } 4002991Ssusans return (pgsz); 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate case MAPPGSZ_ISM: 4035349Skchow for (l = mmu.umax_page_level; l > 0; --l) { 4045349Skchow if (len >= LEVEL_SIZE(l)) 4055349Skchow return (LEVEL_SIZE(l)); 4065349Skchow } 4075349Skchow return (LEVEL_SIZE(0)); 4080Sstevel@tonic-gate } 4092991Ssusans return (pgsz); 4100Sstevel@tonic-gate } 4110Sstevel@tonic-gate 4122991Ssusans static uint_t 4132991Ssusans map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize, 4142991Ssusans size_t min_physmem) 4152991Ssusans { 4162991Ssusans caddr_t eaddr = addr + size; 4172991Ssusans uint_t szcvec = 0; 4182991Ssusans caddr_t raddr; 4192991Ssusans caddr_t readdr; 4202991Ssusans size_t pgsz; 4212991Ssusans int i; 4222991Ssusans 4232991Ssusans if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) { 4242991Ssusans return (0); 4252991Ssusans } 4262991Ssusans 4275349Skchow for (i = mmu_exported_page_sizes - 1; i > 0; i--) { 4282991Ssusans pgsz = page_get_pagesize(i); 4292991Ssusans if (pgsz > max_lpsize) { 4302991Ssusans continue; 4312991Ssusans } 4322991Ssusans raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz); 4332991Ssusans readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz); 4342991Ssusans if (raddr < addr || raddr >= readdr) { 4352991Ssusans continue; 4362991Ssusans } 4372991Ssusans if (P2PHASE((uintptr_t)addr ^ off, pgsz)) { 4382991Ssusans continue; 4392991Ssusans } 4402991Ssusans /* 4412991Ssusans * Set szcvec to the remaining page sizes. 4422991Ssusans */ 4432991Ssusans szcvec = ((1 << (i + 1)) - 1) & ~1; 4442991Ssusans break; 4452991Ssusans } 4462991Ssusans return (szcvec); 4472991Ssusans } 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate /* 4500Sstevel@tonic-gate * Return a bit vector of large page size codes that 4510Sstevel@tonic-gate * can be used to map [addr, addr + len) region. 4520Sstevel@tonic-gate */ 4530Sstevel@tonic-gate /*ARGSUSED*/ 4540Sstevel@tonic-gate uint_t 4552991Ssusans map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type, 4562991Ssusans int memcntl) 4570Sstevel@tonic-gate { 4582991Ssusans size_t max_lpsize = mcntl0_lpsize; 4590Sstevel@tonic-gate 4602991Ssusans if (mmu.max_page_level == 0) 4610Sstevel@tonic-gate return (0); 4620Sstevel@tonic-gate 4632991Ssusans if (flags & MAP_TEXT) { 4645084Sjohnlev if (!memcntl) 4655084Sjohnlev max_lpsize = max_utext_lpsize; 4665084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4672991Ssusans shm_lpg_min_physmem)); 4682991Ssusans 4692991Ssusans } else if (flags & MAP_INITDATA) { 4705084Sjohnlev if (!memcntl) 4715084Sjohnlev max_lpsize = max_uidata_lpsize; 4725084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4732991Ssusans privm_lpg_min_physmem)); 4742991Ssusans 4752991Ssusans } else if (type == MAPPGSZC_SHM) { 4765084Sjohnlev if (!memcntl) 4775084Sjohnlev max_lpsize = max_shm_lpsize; 4785084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4792991Ssusans shm_lpg_min_physmem)); 4800Sstevel@tonic-gate 4812991Ssusans } else if (type == MAPPGSZC_HEAP) { 4825084Sjohnlev if (!memcntl) 4835084Sjohnlev max_lpsize = max_uheap_lpsize; 4845084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4852991Ssusans privm_lpg_min_physmem)); 4862414Saguzovsk 4872991Ssusans } else if (type == MAPPGSZC_STACK) { 4885084Sjohnlev if (!memcntl) 4895084Sjohnlev max_lpsize = max_ustack_lpsize; 4905084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4912991Ssusans privm_lpg_min_physmem)); 4922991Ssusans 4932991Ssusans } else { 4945084Sjohnlev if (!memcntl) 4955084Sjohnlev max_lpsize = max_privmap_lpsize; 4965084Sjohnlev return (map_szcvec(addr, size, off, max_lpsize, 4972991Ssusans privm_lpg_min_physmem)); 4982414Saguzovsk } 4992414Saguzovsk } 5002414Saguzovsk 5010Sstevel@tonic-gate /* 5020Sstevel@tonic-gate * Handle a pagefault. 5030Sstevel@tonic-gate */ 5040Sstevel@tonic-gate faultcode_t 5050Sstevel@tonic-gate pagefault( 5060Sstevel@tonic-gate caddr_t addr, 5070Sstevel@tonic-gate enum fault_type type, 5080Sstevel@tonic-gate enum seg_rw rw, 5090Sstevel@tonic-gate int iskernel) 5100Sstevel@tonic-gate { 5110Sstevel@tonic-gate struct as *as; 5120Sstevel@tonic-gate struct hat *hat; 5130Sstevel@tonic-gate struct proc *p; 5140Sstevel@tonic-gate kthread_t *t; 5150Sstevel@tonic-gate faultcode_t res; 5160Sstevel@tonic-gate caddr_t base; 5170Sstevel@tonic-gate size_t len; 5180Sstevel@tonic-gate int err; 5190Sstevel@tonic-gate int mapped_red; 5200Sstevel@tonic-gate uintptr_t ea; 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate if (INVALID_VADDR(addr)) 5250Sstevel@tonic-gate return (FC_NOMAP); 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate mapped_red = segkp_map_red(); 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate if (iskernel) { 5300Sstevel@tonic-gate as = &kas; 5310Sstevel@tonic-gate hat = as->a_hat; 5320Sstevel@tonic-gate } else { 5330Sstevel@tonic-gate t = curthread; 5340Sstevel@tonic-gate p = ttoproc(t); 5350Sstevel@tonic-gate as = p->p_as; 5360Sstevel@tonic-gate hat = as->a_hat; 5370Sstevel@tonic-gate } 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate /* 5400Sstevel@tonic-gate * Dispatch pagefault. 5410Sstevel@tonic-gate */ 5420Sstevel@tonic-gate res = as_fault(hat, as, addr, 1, type, rw); 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate /* 5450Sstevel@tonic-gate * If this isn't a potential unmapped hole in the user's 5460Sstevel@tonic-gate * UNIX data or stack segments, just return status info. 5470Sstevel@tonic-gate */ 5480Sstevel@tonic-gate if (res != FC_NOMAP || iskernel) 5490Sstevel@tonic-gate goto out; 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate /* 5520Sstevel@tonic-gate * Check to see if we happened to faulted on a currently unmapped 5530Sstevel@tonic-gate * part of the UNIX data or stack segments. If so, create a zfod 5540Sstevel@tonic-gate * mapping there and then try calling the fault routine again. 5550Sstevel@tonic-gate */ 5560Sstevel@tonic-gate base = p->p_brkbase; 5570Sstevel@tonic-gate len = p->p_brksize; 5580Sstevel@tonic-gate 5590Sstevel@tonic-gate if (addr < base || addr >= base + len) { /* data seg? */ 5600Sstevel@tonic-gate base = (caddr_t)p->p_usrstack - p->p_stksize; 5610Sstevel@tonic-gate len = p->p_stksize; 5620Sstevel@tonic-gate if (addr < base || addr >= p->p_usrstack) { /* stack seg? */ 5630Sstevel@tonic-gate /* not in either UNIX data or stack segments */ 5640Sstevel@tonic-gate res = FC_NOMAP; 5650Sstevel@tonic-gate goto out; 5660Sstevel@tonic-gate } 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate /* 5700Sstevel@tonic-gate * the rest of this function implements a 3.X 4.X 5.X compatibility 5710Sstevel@tonic-gate * This code is probably not needed anymore 5720Sstevel@tonic-gate */ 5730Sstevel@tonic-gate if (p->p_model == DATAMODEL_ILP32) { 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate /* expand the gap to the page boundaries on each side */ 5760Sstevel@tonic-gate ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE); 5770Sstevel@tonic-gate base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE); 5780Sstevel@tonic-gate len = ea - (uintptr_t)base; 5790Sstevel@tonic-gate 5800Sstevel@tonic-gate as_rangelock(as); 5810Sstevel@tonic-gate if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) == 5820Sstevel@tonic-gate 0) { 5830Sstevel@tonic-gate err = as_map(as, base, len, segvn_create, zfod_argsp); 5840Sstevel@tonic-gate as_rangeunlock(as); 5850Sstevel@tonic-gate if (err) { 5860Sstevel@tonic-gate res = FC_MAKE_ERR(err); 5870Sstevel@tonic-gate goto out; 5880Sstevel@tonic-gate } 5890Sstevel@tonic-gate } else { 5900Sstevel@tonic-gate /* 5910Sstevel@tonic-gate * This page is already mapped by another thread after 5920Sstevel@tonic-gate * we returned from as_fault() above. We just fall 5930Sstevel@tonic-gate * through as_fault() below. 5940Sstevel@tonic-gate */ 5950Sstevel@tonic-gate as_rangeunlock(as); 5960Sstevel@tonic-gate } 5970Sstevel@tonic-gate 5980Sstevel@tonic-gate res = as_fault(hat, as, addr, 1, F_INVAL, rw); 5990Sstevel@tonic-gate } 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate out: 6020Sstevel@tonic-gate if (mapped_red) 6030Sstevel@tonic-gate segkp_unmap_red(); 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate return (res); 6060Sstevel@tonic-gate } 6070Sstevel@tonic-gate 6080Sstevel@tonic-gate void 6090Sstevel@tonic-gate map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags) 6100Sstevel@tonic-gate { 6110Sstevel@tonic-gate struct proc *p = curproc; 6120Sstevel@tonic-gate caddr_t userlimit = (flags & _MAP_LOW32) ? 6130Sstevel@tonic-gate (caddr_t)_userlimit32 : p->p_as->a_userlimit; 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags); 6160Sstevel@tonic-gate } 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate /*ARGSUSED*/ 6190Sstevel@tonic-gate int 6200Sstevel@tonic-gate map_addr_vacalign_check(caddr_t addr, u_offset_t off) 6210Sstevel@tonic-gate { 6220Sstevel@tonic-gate return (0); 6230Sstevel@tonic-gate } 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * map_addr_proc() is the routine called when the system is to 6270Sstevel@tonic-gate * choose an address for the user. We will pick an address 6283446Smrj * range which is the highest available below userlimit. 6290Sstevel@tonic-gate * 6300Sstevel@tonic-gate * addrp is a value/result parameter. 6310Sstevel@tonic-gate * On input it is a hint from the user to be used in a completely 6320Sstevel@tonic-gate * machine dependent fashion. We decide to completely ignore this hint. 6330Sstevel@tonic-gate * 6340Sstevel@tonic-gate * On output it is NULL if no address can be found in the current 6350Sstevel@tonic-gate * processes address space or else an address that is currently 6360Sstevel@tonic-gate * not mapped for len bytes with a page of red zone on either side. 6370Sstevel@tonic-gate * 6380Sstevel@tonic-gate * align is not needed on x86 (it's for viturally addressed caches) 6390Sstevel@tonic-gate */ 6400Sstevel@tonic-gate /*ARGSUSED*/ 6410Sstevel@tonic-gate void 6420Sstevel@tonic-gate map_addr_proc( 6430Sstevel@tonic-gate caddr_t *addrp, 6440Sstevel@tonic-gate size_t len, 6450Sstevel@tonic-gate offset_t off, 6460Sstevel@tonic-gate int vacalign, 6470Sstevel@tonic-gate caddr_t userlimit, 6480Sstevel@tonic-gate struct proc *p, 6490Sstevel@tonic-gate uint_t flags) 6500Sstevel@tonic-gate { 6510Sstevel@tonic-gate struct as *as = p->p_as; 6520Sstevel@tonic-gate caddr_t addr; 6530Sstevel@tonic-gate caddr_t base; 6540Sstevel@tonic-gate size_t slen; 6550Sstevel@tonic-gate size_t align_amount; 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate ASSERT32(userlimit == as->a_userlimit); 6580Sstevel@tonic-gate 6590Sstevel@tonic-gate base = p->p_brkbase; 6600Sstevel@tonic-gate #if defined(__amd64) 6610Sstevel@tonic-gate /* 6620Sstevel@tonic-gate * XX64 Yes, this needs more work. 6630Sstevel@tonic-gate */ 6640Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 6650Sstevel@tonic-gate if (userlimit < as->a_userlimit) { 6660Sstevel@tonic-gate /* 6670Sstevel@tonic-gate * This happens when a program wants to map 6680Sstevel@tonic-gate * something in a range that's accessible to a 6690Sstevel@tonic-gate * program in a smaller address space. For example, 6700Sstevel@tonic-gate * a 64-bit program calling mmap32(2) to guarantee 6710Sstevel@tonic-gate * that the returned address is below 4Gbytes. 6720Sstevel@tonic-gate */ 6730Sstevel@tonic-gate ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff)); 6740Sstevel@tonic-gate 6750Sstevel@tonic-gate if (userlimit > base) 6760Sstevel@tonic-gate slen = userlimit - base; 6770Sstevel@tonic-gate else { 6780Sstevel@tonic-gate *addrp = NULL; 6790Sstevel@tonic-gate return; 6800Sstevel@tonic-gate } 6810Sstevel@tonic-gate } else { 6820Sstevel@tonic-gate /* 6830Sstevel@tonic-gate * XX64 This layout is probably wrong .. but in 6840Sstevel@tonic-gate * the event we make the amd64 address space look 6850Sstevel@tonic-gate * like sparcv9 i.e. with the stack -above- the 6860Sstevel@tonic-gate * heap, this bit of code might even be correct. 6870Sstevel@tonic-gate */ 6880Sstevel@tonic-gate slen = p->p_usrstack - base - 6890Sstevel@tonic-gate (((size_t)rctl_enforced_value( 6900Sstevel@tonic-gate rctlproc_legacy[RLIMIT_STACK], 6910Sstevel@tonic-gate p->p_rctls, p) + PAGEOFFSET) & PAGEMASK); 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate } else 6940Sstevel@tonic-gate #endif 6950Sstevel@tonic-gate slen = userlimit - base; 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate len = (len + PAGEOFFSET) & PAGEMASK; 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate /* 7000Sstevel@tonic-gate * Redzone for each side of the request. This is done to leave 7010Sstevel@tonic-gate * one page unmapped between segments. This is not required, but 7020Sstevel@tonic-gate * it's useful for the user because if their program strays across 7030Sstevel@tonic-gate * a segment boundary, it will catch a fault immediately making 7040Sstevel@tonic-gate * debugging a little easier. 7050Sstevel@tonic-gate */ 7060Sstevel@tonic-gate len += 2 * MMU_PAGESIZE; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate /* 7090Sstevel@tonic-gate * figure out what the alignment should be 7100Sstevel@tonic-gate * 7110Sstevel@tonic-gate * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same???? 7120Sstevel@tonic-gate */ 7130Sstevel@tonic-gate if (len <= ELF_386_MAXPGSZ) { 7140Sstevel@tonic-gate /* 7150Sstevel@tonic-gate * Align virtual addresses to ensure that ELF shared libraries 7160Sstevel@tonic-gate * are mapped with the appropriate alignment constraints by 7170Sstevel@tonic-gate * the run-time linker. 7180Sstevel@tonic-gate */ 7190Sstevel@tonic-gate align_amount = ELF_386_MAXPGSZ; 7200Sstevel@tonic-gate } else { 7215349Skchow int l = mmu.umax_page_level; 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate while (l && len < LEVEL_SIZE(l)) 7240Sstevel@tonic-gate --l; 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate align_amount = LEVEL_SIZE(l); 7270Sstevel@tonic-gate } 7280Sstevel@tonic-gate 7290Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) 7300Sstevel@tonic-gate align_amount = (uintptr_t)*addrp; 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate len += align_amount; 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate /* 7350Sstevel@tonic-gate * Look for a large enough hole starting below userlimit. 7360Sstevel@tonic-gate * After finding it, use the upper part. Addition of PAGESIZE 7370Sstevel@tonic-gate * is for the redzone as described above. 7380Sstevel@tonic-gate */ 7390Sstevel@tonic-gate if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) { 7400Sstevel@tonic-gate caddr_t as_addr; 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate addr = base + slen - len + MMU_PAGESIZE; 7430Sstevel@tonic-gate as_addr = addr; 7440Sstevel@tonic-gate /* 7450Sstevel@tonic-gate * Round address DOWN to the alignment amount, 7460Sstevel@tonic-gate * add the offset, and if this address is less 7470Sstevel@tonic-gate * than the original address, add alignment amount. 7480Sstevel@tonic-gate */ 7490Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1))); 7500Sstevel@tonic-gate addr += (uintptr_t)(off & (align_amount - 1)); 7510Sstevel@tonic-gate if (addr < as_addr) 7520Sstevel@tonic-gate addr += align_amount; 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate ASSERT(addr <= (as_addr + align_amount)); 7550Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_amount - 1)) == 7560Sstevel@tonic-gate ((uintptr_t)(off & (align_amount - 1)))); 7570Sstevel@tonic-gate *addrp = addr; 7580Sstevel@tonic-gate } else { 7590Sstevel@tonic-gate *addrp = NULL; /* no more virtual space */ 7600Sstevel@tonic-gate } 7610Sstevel@tonic-gate } 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate /* 7640Sstevel@tonic-gate * Determine whether [base, base+len] contains a valid range of 7650Sstevel@tonic-gate * addresses at least minlen long. base and len are adjusted if 7660Sstevel@tonic-gate * required to provide a valid range. 7670Sstevel@tonic-gate */ 7680Sstevel@tonic-gate /*ARGSUSED3*/ 7690Sstevel@tonic-gate int 7700Sstevel@tonic-gate valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir) 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate uintptr_t hi, lo; 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate lo = (uintptr_t)*basep; 7750Sstevel@tonic-gate hi = lo + *lenp; 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate /* 7780Sstevel@tonic-gate * If hi rolled over the top, try cutting back. 7790Sstevel@tonic-gate */ 7800Sstevel@tonic-gate if (hi < lo) { 7810Sstevel@tonic-gate if (0 - lo + hi < minlen) 7820Sstevel@tonic-gate return (0); 7830Sstevel@tonic-gate if (0 - lo < minlen) 7840Sstevel@tonic-gate return (0); 7850Sstevel@tonic-gate *lenp = 0 - lo; 7860Sstevel@tonic-gate } else if (hi - lo < minlen) { 7870Sstevel@tonic-gate return (0); 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate #if defined(__amd64) 7900Sstevel@tonic-gate /* 7910Sstevel@tonic-gate * Deal with a possible hole in the address range between 7920Sstevel@tonic-gate * hole_start and hole_end that should never be mapped. 7930Sstevel@tonic-gate */ 7940Sstevel@tonic-gate if (lo < hole_start) { 7950Sstevel@tonic-gate if (hi > hole_start) { 7960Sstevel@tonic-gate if (hi < hole_end) { 7970Sstevel@tonic-gate hi = hole_start; 7980Sstevel@tonic-gate } else { 7990Sstevel@tonic-gate /* lo < hole_start && hi >= hole_end */ 8000Sstevel@tonic-gate if (dir == AH_LO) { 8010Sstevel@tonic-gate /* 8020Sstevel@tonic-gate * prefer lowest range 8030Sstevel@tonic-gate */ 8040Sstevel@tonic-gate if (hole_start - lo >= minlen) 8050Sstevel@tonic-gate hi = hole_start; 8060Sstevel@tonic-gate else if (hi - hole_end >= minlen) 8070Sstevel@tonic-gate lo = hole_end; 8080Sstevel@tonic-gate else 8090Sstevel@tonic-gate return (0); 8100Sstevel@tonic-gate } else { 8110Sstevel@tonic-gate /* 8120Sstevel@tonic-gate * prefer highest range 8130Sstevel@tonic-gate */ 8140Sstevel@tonic-gate if (hi - hole_end >= minlen) 8150Sstevel@tonic-gate lo = hole_end; 8160Sstevel@tonic-gate else if (hole_start - lo >= minlen) 8170Sstevel@tonic-gate hi = hole_start; 8180Sstevel@tonic-gate else 8190Sstevel@tonic-gate return (0); 8200Sstevel@tonic-gate } 8210Sstevel@tonic-gate } 8220Sstevel@tonic-gate } 8230Sstevel@tonic-gate } else { 8240Sstevel@tonic-gate /* lo >= hole_start */ 8250Sstevel@tonic-gate if (hi < hole_end) 8260Sstevel@tonic-gate return (0); 8270Sstevel@tonic-gate if (lo < hole_end) 8280Sstevel@tonic-gate lo = hole_end; 8290Sstevel@tonic-gate } 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate if (hi - lo < minlen) 8320Sstevel@tonic-gate return (0); 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate *basep = (caddr_t)lo; 8350Sstevel@tonic-gate *lenp = hi - lo; 8360Sstevel@tonic-gate #endif 8370Sstevel@tonic-gate return (1); 8380Sstevel@tonic-gate } 8390Sstevel@tonic-gate 8400Sstevel@tonic-gate /* 8410Sstevel@tonic-gate * Determine whether [addr, addr+len] are valid user addresses. 8420Sstevel@tonic-gate */ 8430Sstevel@tonic-gate /*ARGSUSED*/ 8440Sstevel@tonic-gate int 8450Sstevel@tonic-gate valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as, 8460Sstevel@tonic-gate caddr_t userlimit) 8470Sstevel@tonic-gate { 8480Sstevel@tonic-gate caddr_t eaddr = addr + len; 8490Sstevel@tonic-gate 8500Sstevel@tonic-gate if (eaddr <= addr || addr >= userlimit || eaddr > userlimit) 8510Sstevel@tonic-gate return (RANGE_BADADDR); 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate #if defined(__amd64) 8540Sstevel@tonic-gate /* 8550Sstevel@tonic-gate * Check for the VA hole 8560Sstevel@tonic-gate */ 8570Sstevel@tonic-gate if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end) 8580Sstevel@tonic-gate return (RANGE_BADADDR); 8590Sstevel@tonic-gate #endif 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate return (RANGE_OKAY); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * Return 1 if the page frame is onboard memory, else 0. 8660Sstevel@tonic-gate */ 8670Sstevel@tonic-gate int 8680Sstevel@tonic-gate pf_is_memory(pfn_t pf) 8690Sstevel@tonic-gate { 8703446Smrj if (pfn_is_foreign(pf)) 8713446Smrj return (0); 8723446Smrj return (address_in_memlist(phys_install, pfn_to_pa(pf), 1)); 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate /* 8760Sstevel@tonic-gate * return the memrange containing pfn 8770Sstevel@tonic-gate */ 8780Sstevel@tonic-gate int 8790Sstevel@tonic-gate memrange_num(pfn_t pfn) 8800Sstevel@tonic-gate { 8810Sstevel@tonic-gate int n; 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate for (n = 0; n < nranges - 1; ++n) { 8840Sstevel@tonic-gate if (pfn >= memranges[n]) 8850Sstevel@tonic-gate break; 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate return (n); 8880Sstevel@tonic-gate } 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate /* 8910Sstevel@tonic-gate * return the mnoderange containing pfn 8920Sstevel@tonic-gate */ 8935084Sjohnlev /*ARGSUSED*/ 8940Sstevel@tonic-gate int 8950Sstevel@tonic-gate pfn_2_mtype(pfn_t pfn) 8960Sstevel@tonic-gate { 8975084Sjohnlev #if defined(__xpv) 8985084Sjohnlev return (0); 8995084Sjohnlev #else 9000Sstevel@tonic-gate int n; 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate for (n = mnoderangecnt - 1; n >= 0; n--) { 9030Sstevel@tonic-gate if (pfn >= mnoderanges[n].mnr_pfnlo) { 9040Sstevel@tonic-gate break; 9050Sstevel@tonic-gate } 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate return (n); 9085084Sjohnlev #endif 9090Sstevel@tonic-gate } 9100Sstevel@tonic-gate 9115084Sjohnlev #if !defined(__xpv) 9120Sstevel@tonic-gate /* 9130Sstevel@tonic-gate * is_contigpage_free: 9140Sstevel@tonic-gate * returns a page list of contiguous pages. It minimally has to return 9150Sstevel@tonic-gate * minctg pages. Caller determines minctg based on the scatter-gather 9160Sstevel@tonic-gate * list length. 9170Sstevel@tonic-gate * 9180Sstevel@tonic-gate * pfnp is set to the next page frame to search on return. 9190Sstevel@tonic-gate */ 9200Sstevel@tonic-gate static page_t * 9210Sstevel@tonic-gate is_contigpage_free( 9220Sstevel@tonic-gate pfn_t *pfnp, 9230Sstevel@tonic-gate pgcnt_t *pgcnt, 9240Sstevel@tonic-gate pgcnt_t minctg, 9250Sstevel@tonic-gate uint64_t pfnseg, 9260Sstevel@tonic-gate int iolock) 9270Sstevel@tonic-gate { 9280Sstevel@tonic-gate int i = 0; 9290Sstevel@tonic-gate pfn_t pfn = *pfnp; 9300Sstevel@tonic-gate page_t *pp; 9310Sstevel@tonic-gate page_t *plist = NULL; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * fail if pfn + minctg crosses a segment boundary. 9350Sstevel@tonic-gate * Adjust for next starting pfn to begin at segment boundary. 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) { 9390Sstevel@tonic-gate *pfnp = roundup(*pfnp, pfnseg + 1); 9400Sstevel@tonic-gate return (NULL); 9410Sstevel@tonic-gate } 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate do { 9440Sstevel@tonic-gate retry: 9450Sstevel@tonic-gate pp = page_numtopp_nolock(pfn + i); 9460Sstevel@tonic-gate if ((pp == NULL) || 9470Sstevel@tonic-gate (page_trylock(pp, SE_EXCL) == 0)) { 9480Sstevel@tonic-gate (*pfnp)++; 9490Sstevel@tonic-gate break; 9500Sstevel@tonic-gate } 9510Sstevel@tonic-gate if (page_pptonum(pp) != pfn + i) { 9520Sstevel@tonic-gate page_unlock(pp); 9530Sstevel@tonic-gate goto retry; 9540Sstevel@tonic-gate } 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate if (!(PP_ISFREE(pp))) { 9570Sstevel@tonic-gate page_unlock(pp); 9580Sstevel@tonic-gate (*pfnp)++; 9590Sstevel@tonic-gate break; 9600Sstevel@tonic-gate } 9610Sstevel@tonic-gate 9620Sstevel@tonic-gate if (!PP_ISAGED(pp)) { 9630Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 9640Sstevel@tonic-gate page_hashout(pp, (kmutex_t *)NULL); 9650Sstevel@tonic-gate } else { 9660Sstevel@tonic-gate page_list_sub(pp, PG_FREE_LIST); 9670Sstevel@tonic-gate } 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate if (iolock) 9700Sstevel@tonic-gate page_io_lock(pp); 9710Sstevel@tonic-gate page_list_concat(&plist, &pp); 9720Sstevel@tonic-gate 9730Sstevel@tonic-gate /* 9740Sstevel@tonic-gate * exit loop when pgcnt satisfied or segment boundary reached. 9750Sstevel@tonic-gate */ 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate } while ((++i < *pgcnt) && ((pfn + i) & pfnseg)); 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate *pfnp += i; /* set to next pfn to search */ 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate if (i >= minctg) { 9820Sstevel@tonic-gate *pgcnt -= i; 9830Sstevel@tonic-gate return (plist); 9840Sstevel@tonic-gate } 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate /* 9870Sstevel@tonic-gate * failure: minctg not satisfied. 9880Sstevel@tonic-gate * 9890Sstevel@tonic-gate * if next request crosses segment boundary, set next pfn 9900Sstevel@tonic-gate * to search from the segment boundary. 9910Sstevel@tonic-gate */ 9920Sstevel@tonic-gate if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) 9930Sstevel@tonic-gate *pfnp = roundup(*pfnp, pfnseg + 1); 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate /* clean up any pages already allocated */ 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate while (plist) { 9980Sstevel@tonic-gate pp = plist; 9990Sstevel@tonic-gate page_sub(&plist, pp); 10000Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 10010Sstevel@tonic-gate if (iolock) 10020Sstevel@tonic-gate page_io_unlock(pp); 10030Sstevel@tonic-gate page_unlock(pp); 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate return (NULL); 10070Sstevel@tonic-gate } 10085084Sjohnlev #endif /* !__xpv */ 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate /* 10110Sstevel@tonic-gate * verify that pages being returned from allocator have correct DMA attribute 10120Sstevel@tonic-gate */ 10130Sstevel@tonic-gate #ifndef DEBUG 10140Sstevel@tonic-gate #define check_dma(a, b, c) (0) 10150Sstevel@tonic-gate #else 10160Sstevel@tonic-gate static void 10170Sstevel@tonic-gate check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt) 10180Sstevel@tonic-gate { 10190Sstevel@tonic-gate if (dma_attr == NULL) 10200Sstevel@tonic-gate return; 10210Sstevel@tonic-gate 10220Sstevel@tonic-gate while (cnt-- > 0) { 10233446Smrj if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) < 10240Sstevel@tonic-gate dma_attr->dma_attr_addr_lo) 10250Sstevel@tonic-gate panic("PFN (pp=%p) below dma_attr_addr_lo", pp); 10263446Smrj if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >= 10270Sstevel@tonic-gate dma_attr->dma_attr_addr_hi) 10280Sstevel@tonic-gate panic("PFN (pp=%p) above dma_attr_addr_hi", pp); 10290Sstevel@tonic-gate pp = pp->p_next; 10300Sstevel@tonic-gate } 10310Sstevel@tonic-gate } 10320Sstevel@tonic-gate #endif 10330Sstevel@tonic-gate 10345084Sjohnlev #if !defined(__xpv) 10350Sstevel@tonic-gate static page_t * 10360Sstevel@tonic-gate page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock) 10370Sstevel@tonic-gate { 10380Sstevel@tonic-gate pfn_t pfn; 10390Sstevel@tonic-gate int sgllen; 10400Sstevel@tonic-gate uint64_t pfnseg; 10410Sstevel@tonic-gate pgcnt_t minctg; 10420Sstevel@tonic-gate page_t *pplist = NULL, *plist; 10430Sstevel@tonic-gate uint64_t lo, hi; 10440Sstevel@tonic-gate pgcnt_t pfnalign = 0; 10450Sstevel@tonic-gate static pfn_t startpfn; 10460Sstevel@tonic-gate static pgcnt_t lastctgcnt; 10470Sstevel@tonic-gate uintptr_t align; 10480Sstevel@tonic-gate 10490Sstevel@tonic-gate CONTIG_LOCK(); 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate if (mattr) { 10520Sstevel@tonic-gate lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET)); 10530Sstevel@tonic-gate hi = mmu_btop(mattr->dma_attr_addr_hi); 10540Sstevel@tonic-gate if (hi >= physmax) 10550Sstevel@tonic-gate hi = physmax - 1; 10560Sstevel@tonic-gate sgllen = mattr->dma_attr_sgllen; 10570Sstevel@tonic-gate pfnseg = mmu_btop(mattr->dma_attr_seg); 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 10600Sstevel@tonic-gate if (align > MMU_PAGESIZE) 10610Sstevel@tonic-gate pfnalign = mmu_btop(align); 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate /* 10640Sstevel@tonic-gate * in order to satisfy the request, must minimally 10650Sstevel@tonic-gate * acquire minctg contiguous pages 10660Sstevel@tonic-gate */ 10670Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 10680Sstevel@tonic-gate 10690Sstevel@tonic-gate ASSERT(hi >= lo); 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate /* 10720Sstevel@tonic-gate * start from where last searched if the minctg >= lastctgcnt 10730Sstevel@tonic-gate */ 10740Sstevel@tonic-gate if (minctg < lastctgcnt || startpfn < lo || startpfn > hi) 10750Sstevel@tonic-gate startpfn = lo; 10760Sstevel@tonic-gate } else { 10770Sstevel@tonic-gate hi = physmax - 1; 10780Sstevel@tonic-gate lo = 0; 10790Sstevel@tonic-gate sgllen = 1; 10800Sstevel@tonic-gate pfnseg = mmu.highest_pfn; 10810Sstevel@tonic-gate minctg = *pgcnt; 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate if (minctg < lastctgcnt) 10840Sstevel@tonic-gate startpfn = lo; 10850Sstevel@tonic-gate } 10860Sstevel@tonic-gate lastctgcnt = minctg; 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate ASSERT(pfnseg + 1 >= (uint64_t)minctg); 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate /* conserve 16m memory - start search above 16m when possible */ 10910Sstevel@tonic-gate if (hi > PFN_16M && startpfn < PFN_16M) 10920Sstevel@tonic-gate startpfn = PFN_16M; 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate pfn = startpfn; 10950Sstevel@tonic-gate if (pfnalign) 10960Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 10970Sstevel@tonic-gate 10980Sstevel@tonic-gate while (pfn + minctg - 1 <= hi) { 10990Sstevel@tonic-gate 11000Sstevel@tonic-gate plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 11010Sstevel@tonic-gate if (plist) { 11020Sstevel@tonic-gate page_list_concat(&pplist, &plist); 11030Sstevel@tonic-gate sgllen--; 11040Sstevel@tonic-gate /* 11050Sstevel@tonic-gate * return when contig pages no longer needed 11060Sstevel@tonic-gate */ 11070Sstevel@tonic-gate if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 11080Sstevel@tonic-gate startpfn = pfn; 11090Sstevel@tonic-gate CONTIG_UNLOCK(); 11100Sstevel@tonic-gate check_dma(mattr, pplist, *pgcnt); 11110Sstevel@tonic-gate return (pplist); 11120Sstevel@tonic-gate } 11130Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 11140Sstevel@tonic-gate } 11150Sstevel@tonic-gate if (pfnalign) 11160Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11170Sstevel@tonic-gate } 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate /* cannot find contig pages in specified range */ 11200Sstevel@tonic-gate if (startpfn == lo) { 11210Sstevel@tonic-gate CONTIG_UNLOCK(); 11220Sstevel@tonic-gate return (NULL); 11230Sstevel@tonic-gate } 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate /* did not start with lo previously */ 11260Sstevel@tonic-gate pfn = lo; 11270Sstevel@tonic-gate if (pfnalign) 11280Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate /* allow search to go above startpfn */ 11310Sstevel@tonic-gate while (pfn < startpfn) { 11320Sstevel@tonic-gate 11330Sstevel@tonic-gate plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 11340Sstevel@tonic-gate if (plist != NULL) { 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate page_list_concat(&pplist, &plist); 11370Sstevel@tonic-gate sgllen--; 11380Sstevel@tonic-gate 11390Sstevel@tonic-gate /* 11400Sstevel@tonic-gate * return when contig pages no longer needed 11410Sstevel@tonic-gate */ 11420Sstevel@tonic-gate if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 11430Sstevel@tonic-gate startpfn = pfn; 11440Sstevel@tonic-gate CONTIG_UNLOCK(); 11450Sstevel@tonic-gate check_dma(mattr, pplist, *pgcnt); 11460Sstevel@tonic-gate return (pplist); 11470Sstevel@tonic-gate } 11480Sstevel@tonic-gate minctg = howmany(*pgcnt, sgllen); 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate if (pfnalign) 11510Sstevel@tonic-gate pfn = P2ROUNDUP(pfn, pfnalign); 11520Sstevel@tonic-gate } 11530Sstevel@tonic-gate CONTIG_UNLOCK(); 11540Sstevel@tonic-gate return (NULL); 11550Sstevel@tonic-gate } 11565084Sjohnlev #endif /* !__xpv */ 11570Sstevel@tonic-gate 11580Sstevel@tonic-gate /* 11590Sstevel@tonic-gate * mnode_range_cnt() calculates the number of memory ranges for mnode and 11600Sstevel@tonic-gate * memranges[]. Used to determine the size of page lists and mnoderanges. 11610Sstevel@tonic-gate */ 11620Sstevel@tonic-gate int 11632961Sdp78419 mnode_range_cnt(int mnode) 11640Sstevel@tonic-gate { 11655084Sjohnlev #if defined(__xpv) 11665084Sjohnlev ASSERT(mnode == 0); 11675084Sjohnlev return (1); 11685084Sjohnlev #else /* __xpv */ 11690Sstevel@tonic-gate int mri; 11700Sstevel@tonic-gate int mnrcnt = 0; 11710Sstevel@tonic-gate 11722961Sdp78419 if (mem_node_config[mnode].exists != 0) { 11730Sstevel@tonic-gate mri = nranges - 1; 11740Sstevel@tonic-gate 11750Sstevel@tonic-gate /* find the memranges index below contained in mnode range */ 11760Sstevel@tonic-gate 11770Sstevel@tonic-gate while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 11780Sstevel@tonic-gate mri--; 11790Sstevel@tonic-gate 11800Sstevel@tonic-gate /* 11810Sstevel@tonic-gate * increment mnode range counter when memranges or mnode 11820Sstevel@tonic-gate * boundary is reached. 11830Sstevel@tonic-gate */ 11840Sstevel@tonic-gate while (mri >= 0 && 11850Sstevel@tonic-gate mem_node_config[mnode].physmax >= MEMRANGELO(mri)) { 11860Sstevel@tonic-gate mnrcnt++; 11870Sstevel@tonic-gate if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 11880Sstevel@tonic-gate mri--; 11890Sstevel@tonic-gate else 11900Sstevel@tonic-gate break; 11910Sstevel@tonic-gate } 11920Sstevel@tonic-gate } 11932961Sdp78419 ASSERT(mnrcnt <= MAX_MNODE_MRANGES); 11940Sstevel@tonic-gate return (mnrcnt); 11955084Sjohnlev #endif /* __xpv */ 11960Sstevel@tonic-gate } 11970Sstevel@tonic-gate 11985084Sjohnlev /* 11995084Sjohnlev * mnode_range_setup() initializes mnoderanges. 12005084Sjohnlev */ 12010Sstevel@tonic-gate void 12020Sstevel@tonic-gate mnode_range_setup(mnoderange_t *mnoderanges) 12030Sstevel@tonic-gate { 12040Sstevel@tonic-gate int mnode, mri; 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate for (mnode = 0; mnode < max_mem_nodes; mnode++) { 12070Sstevel@tonic-gate if (mem_node_config[mnode].exists == 0) 12080Sstevel@tonic-gate continue; 12090Sstevel@tonic-gate 12100Sstevel@tonic-gate mri = nranges - 1; 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 12130Sstevel@tonic-gate mri--; 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate while (mri >= 0 && mem_node_config[mnode].physmax >= 12160Sstevel@tonic-gate MEMRANGELO(mri)) { 12175084Sjohnlev mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri), 12185084Sjohnlev mem_node_config[mnode].physbase); 12195084Sjohnlev mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri), 12205084Sjohnlev mem_node_config[mnode].physmax); 12210Sstevel@tonic-gate mnoderanges->mnr_mnode = mnode; 12220Sstevel@tonic-gate mnoderanges->mnr_memrange = mri; 12230Sstevel@tonic-gate mnoderanges++; 12240Sstevel@tonic-gate if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 12250Sstevel@tonic-gate mri--; 12260Sstevel@tonic-gate else 12270Sstevel@tonic-gate break; 12280Sstevel@tonic-gate } 12290Sstevel@tonic-gate } 12300Sstevel@tonic-gate } 12310Sstevel@tonic-gate 12325084Sjohnlev /*ARGSUSED*/ 12335084Sjohnlev int 12345084Sjohnlev mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz) 12355084Sjohnlev { 12365084Sjohnlev int mtype = mnoderangecnt - 1; 12375084Sjohnlev 12385084Sjohnlev #if !defined(__xpv) 12395084Sjohnlev #if defined(__i386) 12405084Sjohnlev /* 12415084Sjohnlev * set the mtype range 12425084Sjohnlev * - kmem requests needs to be below 4g if restricted_kmemalloc is set. 12435084Sjohnlev * - for non kmem requests, set range to above 4g if memory below 4g 12445084Sjohnlev * runs low. 12455084Sjohnlev */ 12465084Sjohnlev if (restricted_kmemalloc && VN_ISKAS(vp) && 12475084Sjohnlev (caddr_t)(vaddr) >= kernelheap && 12485084Sjohnlev (caddr_t)(vaddr) < ekernelheap) { 12495084Sjohnlev ASSERT(physmax4g); 12505084Sjohnlev mtype = mtype4g; 12515084Sjohnlev if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz), 12525084Sjohnlev btop(pgsz), *flags)) { 12535084Sjohnlev *flags |= PGI_MT_RANGE16M; 12545084Sjohnlev } else { 12555084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 12565084Sjohnlev VM_STAT_COND_ADD((*flags & PG_PANIC), 12575084Sjohnlev vmm_vmstats.pgpanicalloc); 12585084Sjohnlev *flags |= PGI_MT_RANGE0; 12595084Sjohnlev } 12605084Sjohnlev return (mtype); 12615084Sjohnlev } 12625084Sjohnlev #endif /* __i386 */ 12635084Sjohnlev 12645084Sjohnlev if (RESTRICT4G_ALLOC) { 12655084Sjohnlev VM_STAT_ADD(vmm_vmstats.restrict4gcnt); 12665084Sjohnlev /* here only for > 4g systems */ 12675084Sjohnlev *flags |= PGI_MT_RANGE4G; 12685084Sjohnlev } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) { 12695084Sjohnlev *flags |= PGI_MT_RANGE16M; 12705084Sjohnlev } else { 12715084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 12725084Sjohnlev VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc); 12735084Sjohnlev *flags |= PGI_MT_RANGE0; 12745084Sjohnlev } 12755084Sjohnlev #endif /* !__xpv */ 12765084Sjohnlev return (mtype); 12775084Sjohnlev } 12785084Sjohnlev 12795084Sjohnlev 12805084Sjohnlev /* mtype init for page_get_replacement_page */ 12815084Sjohnlev /*ARGSUSED*/ 12825084Sjohnlev int 12835084Sjohnlev mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt) 12845084Sjohnlev { 12855084Sjohnlev int mtype = mnoderangecnt - 1; 12865084Sjohnlev #if !defined(__ixpv) 12875084Sjohnlev if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) { 12885084Sjohnlev *flags |= PGI_MT_RANGE16M; 12895084Sjohnlev } else { 12905084Sjohnlev VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 12915084Sjohnlev *flags |= PGI_MT_RANGE0; 12925084Sjohnlev } 12935084Sjohnlev #endif 12945084Sjohnlev return (mtype); 12955084Sjohnlev } 12965084Sjohnlev 12970Sstevel@tonic-gate /* 12980Sstevel@tonic-gate * Determine if the mnode range specified in mtype contains memory belonging 12990Sstevel@tonic-gate * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains 13001385Skchow * the range of indices from high pfn to 0, 16m or 4g. 13010Sstevel@tonic-gate * 13020Sstevel@tonic-gate * Return first mnode range type index found otherwise return -1 if none found. 13030Sstevel@tonic-gate */ 13040Sstevel@tonic-gate int 13050Sstevel@tonic-gate mtype_func(int mnode, int mtype, uint_t flags) 13060Sstevel@tonic-gate { 13070Sstevel@tonic-gate if (flags & PGI_MT_RANGE) { 13085084Sjohnlev int mtlim = 0; 13090Sstevel@tonic-gate 13100Sstevel@tonic-gate if (flags & PGI_MT_NEXT) 13110Sstevel@tonic-gate mtype--; 13125084Sjohnlev if (flags & PGI_MT_RANGE4G) 13131385Skchow mtlim = mtype4g + 1; /* exclude 0-4g range */ 13141385Skchow else if (flags & PGI_MT_RANGE16M) 13151385Skchow mtlim = 1; /* exclude 0-16m range */ 13160Sstevel@tonic-gate while (mtype >= mtlim) { 13170Sstevel@tonic-gate if (mnoderanges[mtype].mnr_mnode == mnode) 13180Sstevel@tonic-gate return (mtype); 13190Sstevel@tonic-gate mtype--; 13200Sstevel@tonic-gate } 13215084Sjohnlev } else if (mnoderanges[mtype].mnr_mnode == mnode) { 13225084Sjohnlev return (mtype); 13230Sstevel@tonic-gate } 13240Sstevel@tonic-gate return (-1); 13250Sstevel@tonic-gate } 13260Sstevel@tonic-gate 13270Sstevel@tonic-gate /* 13281373Skchow * Update the page list max counts with the pfn range specified by the 13291373Skchow * input parameters. Called from add_physmem() when physical memory with 13301373Skchow * page_t's are initially added to the page lists. 13311373Skchow */ 13321373Skchow void 13331373Skchow mtype_modify_max(pfn_t startpfn, long cnt) 13341373Skchow { 13351373Skchow int mtype = 0; 13361373Skchow pfn_t endpfn = startpfn + cnt, pfn; 13371373Skchow pgcnt_t inc; 13381373Skchow 13391373Skchow ASSERT(cnt > 0); 13401373Skchow 13415084Sjohnlev if (!physmax4g) 13425084Sjohnlev return; 13435084Sjohnlev 13441373Skchow for (pfn = startpfn; pfn < endpfn; ) { 13451373Skchow if (pfn <= mnoderanges[mtype].mnr_pfnhi) { 13461373Skchow if (endpfn < mnoderanges[mtype].mnr_pfnhi) { 13471373Skchow inc = endpfn - pfn; 13481373Skchow } else { 13491373Skchow inc = mnoderanges[mtype].mnr_pfnhi - pfn + 1; 13501373Skchow } 13515084Sjohnlev if (mtype <= mtype4g) 13521373Skchow maxmem4g += inc; 13531373Skchow pfn += inc; 13541373Skchow } 13551373Skchow mtype++; 13561373Skchow ASSERT(mtype < mnoderangecnt || pfn >= endpfn); 13571373Skchow } 13581373Skchow } 13591373Skchow 13605084Sjohnlev int 13615084Sjohnlev mtype_2_mrange(int mtype) 13625084Sjohnlev { 13635084Sjohnlev return (mnoderanges[mtype].mnr_memrange); 13645084Sjohnlev } 13655084Sjohnlev 13665084Sjohnlev void 13675084Sjohnlev mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi) 13685084Sjohnlev { 13695084Sjohnlev ASSERT(mnoderanges[mtype].mnr_mnode == mnode); 13705084Sjohnlev *pfnlo = mnoderanges[mtype].mnr_pfnlo; 13715084Sjohnlev *pfnhi = mnoderanges[mtype].mnr_pfnhi; 13725084Sjohnlev } 13735084Sjohnlev 13745084Sjohnlev size_t 13755084Sjohnlev plcnt_sz(size_t ctrs_sz) 13765084Sjohnlev { 13775084Sjohnlev #ifdef DEBUG 13785084Sjohnlev int szc, colors; 13795084Sjohnlev 13805084Sjohnlev ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes; 13815084Sjohnlev for (szc = 0; szc < mmu_page_sizes; szc++) { 13825084Sjohnlev colors = page_get_pagecolors(szc); 13835084Sjohnlev ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors; 13845084Sjohnlev } 13855084Sjohnlev #endif 13865084Sjohnlev return (ctrs_sz); 13875084Sjohnlev } 13885084Sjohnlev 13895084Sjohnlev caddr_t 13905084Sjohnlev plcnt_init(caddr_t addr) 13915084Sjohnlev { 13925084Sjohnlev #ifdef DEBUG 13935084Sjohnlev int mt, szc, colors; 13945084Sjohnlev 13955084Sjohnlev for (mt = 0; mt < mnoderangecnt; mt++) { 13965084Sjohnlev mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr; 13975084Sjohnlev addr += (sizeof (struct mnr_mts) * mmu_page_sizes); 13985084Sjohnlev for (szc = 0; szc < mmu_page_sizes; szc++) { 13995084Sjohnlev colors = page_get_pagecolors(szc); 14005084Sjohnlev mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors; 14015084Sjohnlev mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt = 14025084Sjohnlev (pgcnt_t *)addr; 14035084Sjohnlev addr += (sizeof (pgcnt_t) * colors); 14045084Sjohnlev } 14055084Sjohnlev } 14065084Sjohnlev #endif 14075084Sjohnlev return (addr); 14085084Sjohnlev } 14095084Sjohnlev 14105084Sjohnlev void 14115084Sjohnlev plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags) 14125084Sjohnlev { 14135084Sjohnlev #ifdef DEBUG 14145084Sjohnlev int bin = PP_2_BIN(pp); 14155084Sjohnlev 14165084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt); 14175084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin], 14185084Sjohnlev cnt); 14195084Sjohnlev #endif 14205084Sjohnlev ASSERT(mtype == PP_2_MTYPE(pp)); 14215084Sjohnlev if (physmax4g && mtype <= mtype4g) 14225084Sjohnlev atomic_add_long(&freemem4g, cnt); 14235084Sjohnlev if (flags & PG_CACHE_LIST) 14245084Sjohnlev atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt); 14255084Sjohnlev else 1426*5466Skchow atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt); 1427*5466Skchow atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt); 14285084Sjohnlev } 14295084Sjohnlev 14301373Skchow /* 1431414Skchow * Returns the free page count for mnode 1432414Skchow */ 1433414Skchow int 1434414Skchow mnode_pgcnt(int mnode) 1435414Skchow { 1436414Skchow int mtype = mnoderangecnt - 1; 1437414Skchow int flags = PGI_MT_RANGE0; 1438414Skchow pgcnt_t pgcnt = 0; 1439414Skchow 1440414Skchow mtype = mtype_func(mnode, mtype, flags); 1441414Skchow 1442414Skchow while (mtype != -1) { 14431385Skchow pgcnt += MTYPE_FREEMEM(mtype); 1444414Skchow mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); 1445414Skchow } 1446414Skchow return (pgcnt); 1447414Skchow } 1448414Skchow 1449414Skchow /* 14500Sstevel@tonic-gate * Initialize page coloring variables based on the l2 cache parameters. 14510Sstevel@tonic-gate * Calculate and return memory needed for page coloring data structures. 14520Sstevel@tonic-gate */ 14530Sstevel@tonic-gate size_t 14540Sstevel@tonic-gate page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc) 14550Sstevel@tonic-gate { 14560Sstevel@tonic-gate size_t colorsz = 0; 14570Sstevel@tonic-gate int i; 14580Sstevel@tonic-gate int colors; 14590Sstevel@tonic-gate 14605084Sjohnlev #if defined(__xpv) 14615084Sjohnlev /* 14625084Sjohnlev * Hypervisor domains currently don't have any concept of NUMA. 14635084Sjohnlev * Hence we'll act like there is only 1 memrange. 14645084Sjohnlev */ 14655084Sjohnlev i = memrange_num(1); 14665084Sjohnlev #else /* !__xpv */ 14670Sstevel@tonic-gate /* 14680Sstevel@tonic-gate * Reduce the memory ranges lists if we don't have large amounts 14690Sstevel@tonic-gate * of memory. This avoids searching known empty free lists. 14700Sstevel@tonic-gate */ 14710Sstevel@tonic-gate i = memrange_num(physmax); 14720Sstevel@tonic-gate #if defined(__i386) 14730Sstevel@tonic-gate if (i > 0) 14740Sstevel@tonic-gate restricted_kmemalloc = 0; 14750Sstevel@tonic-gate #endif 14760Sstevel@tonic-gate /* physmax greater than 4g */ 14770Sstevel@tonic-gate if (i == 0) 14780Sstevel@tonic-gate physmax4g = 1; 14795084Sjohnlev #endif /* !__xpv */ 14805084Sjohnlev memranges += i; 14815084Sjohnlev nranges -= i; 14820Sstevel@tonic-gate 14835349Skchow ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES); 14845349Skchow 14850Sstevel@tonic-gate ASSERT(ISP2(l2_sz)); 14860Sstevel@tonic-gate ASSERT(ISP2(l2_linesz)); 14870Sstevel@tonic-gate ASSERT(l2_sz > MMU_PAGESIZE); 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate /* l2_assoc is 0 for fully associative l2 cache */ 14900Sstevel@tonic-gate if (l2_assoc) 14910Sstevel@tonic-gate l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE)); 14920Sstevel@tonic-gate else 14930Sstevel@tonic-gate l2_colors = 1; 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate /* for scalability, configure at least PAGE_COLORS_MIN color bins */ 14960Sstevel@tonic-gate page_colors = MAX(l2_colors, PAGE_COLORS_MIN); 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate /* 14990Sstevel@tonic-gate * cpu_page_colors is non-zero when a page color may be spread across 15000Sstevel@tonic-gate * multiple bins. 15010Sstevel@tonic-gate */ 15020Sstevel@tonic-gate if (l2_colors < page_colors) 15030Sstevel@tonic-gate cpu_page_colors = l2_colors; 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate ASSERT(ISP2(page_colors)); 15060Sstevel@tonic-gate 15070Sstevel@tonic-gate page_colors_mask = page_colors - 1; 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate ASSERT(ISP2(CPUSETSIZE())); 15100Sstevel@tonic-gate page_coloring_shift = lowbit(CPUSETSIZE()); 15110Sstevel@tonic-gate 15122961Sdp78419 /* initialize number of colors per page size */ 15132961Sdp78419 for (i = 0; i <= mmu.max_page_level; i++) { 15142961Sdp78419 hw_page_array[i].hp_size = LEVEL_SIZE(i); 15152961Sdp78419 hw_page_array[i].hp_shift = LEVEL_SHIFT(i); 15162961Sdp78419 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0); 15172961Sdp78419 hw_page_array[i].hp_colors = (page_colors_mask >> 15182961Sdp78419 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift)) 15192961Sdp78419 + 1; 15203717Sdp78419 colorequivszc[i] = 0; 15212961Sdp78419 } 15222961Sdp78419 15232961Sdp78419 /* 15242961Sdp78419 * The value of cpu_page_colors determines if additional color bins 15252961Sdp78419 * need to be checked for a particular color in the page_get routines. 15262961Sdp78419 */ 15272961Sdp78419 if (cpu_page_colors != 0) { 15282961Sdp78419 15292961Sdp78419 int a = lowbit(page_colors) - lowbit(cpu_page_colors); 15302961Sdp78419 ASSERT(a > 0); 15312961Sdp78419 ASSERT(a < 16); 15322961Sdp78419 15332961Sdp78419 for (i = 0; i <= mmu.max_page_level; i++) { 15342961Sdp78419 if ((colors = hw_page_array[i].hp_colors) <= 1) { 15352961Sdp78419 colorequivszc[i] = 0; 15362961Sdp78419 continue; 15372961Sdp78419 } 15382961Sdp78419 while ((colors >> a) == 0) 15392961Sdp78419 a--; 15402961Sdp78419 ASSERT(a >= 0); 15412961Sdp78419 15422961Sdp78419 /* higher 4 bits encodes color equiv mask */ 15432961Sdp78419 colorequivszc[i] = (a << 4); 15442961Sdp78419 } 15452961Sdp78419 } 15462961Sdp78419 15475084Sjohnlev /* factor in colorequiv to check additional 'equivalent' bins. */ 15485084Sjohnlev if (colorequiv > 1) { 15495084Sjohnlev 15505084Sjohnlev int a = lowbit(colorequiv) - 1; 15515084Sjohnlev if (a > 15) 15525084Sjohnlev a = 15; 15535084Sjohnlev 15545084Sjohnlev for (i = 0; i <= mmu.max_page_level; i++) { 15555084Sjohnlev if ((colors = hw_page_array[i].hp_colors) <= 1) { 15565084Sjohnlev continue; 15575084Sjohnlev } 15585084Sjohnlev while ((colors >> a) == 0) 15595084Sjohnlev a--; 15605084Sjohnlev if ((a << 4) > colorequivszc[i]) { 15615084Sjohnlev colorequivszc[i] = (a << 4); 15625084Sjohnlev } 15635084Sjohnlev } 15645084Sjohnlev } 15655084Sjohnlev 15660Sstevel@tonic-gate /* size for mnoderanges */ 15672961Sdp78419 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++) 15682961Sdp78419 mnoderangecnt += mnode_range_cnt(i); 15690Sstevel@tonic-gate colorsz = mnoderangecnt * sizeof (mnoderange_t); 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate /* size for fpc_mutex and cpc_mutex */ 15720Sstevel@tonic-gate colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX); 15730Sstevel@tonic-gate 15740Sstevel@tonic-gate /* size of page_freelists */ 15750Sstevel@tonic-gate colorsz += mnoderangecnt * sizeof (page_t ***); 15760Sstevel@tonic-gate colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **); 15770Sstevel@tonic-gate 15780Sstevel@tonic-gate for (i = 0; i < mmu_page_sizes; i++) { 15790Sstevel@tonic-gate colors = page_get_pagecolors(i); 15800Sstevel@tonic-gate colorsz += mnoderangecnt * colors * sizeof (page_t *); 15810Sstevel@tonic-gate } 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate /* size of page_cachelists */ 15840Sstevel@tonic-gate colorsz += mnoderangecnt * sizeof (page_t **); 15850Sstevel@tonic-gate colorsz += mnoderangecnt * page_colors * sizeof (page_t *); 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate return (colorsz); 15880Sstevel@tonic-gate } 15890Sstevel@tonic-gate 15900Sstevel@tonic-gate /* 15910Sstevel@tonic-gate * Called once at startup to configure page_coloring data structures and 15920Sstevel@tonic-gate * does the 1st page_free()/page_freelist_add(). 15930Sstevel@tonic-gate */ 15940Sstevel@tonic-gate void 15950Sstevel@tonic-gate page_coloring_setup(caddr_t pcmemaddr) 15960Sstevel@tonic-gate { 15970Sstevel@tonic-gate int i; 15980Sstevel@tonic-gate int j; 15990Sstevel@tonic-gate int k; 16000Sstevel@tonic-gate caddr_t addr; 16010Sstevel@tonic-gate int colors; 16020Sstevel@tonic-gate 16030Sstevel@tonic-gate /* 16040Sstevel@tonic-gate * do page coloring setup 16050Sstevel@tonic-gate */ 16060Sstevel@tonic-gate addr = pcmemaddr; 16070Sstevel@tonic-gate 16080Sstevel@tonic-gate mnoderanges = (mnoderange_t *)addr; 16090Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (mnoderange_t)); 16100Sstevel@tonic-gate 16110Sstevel@tonic-gate mnode_range_setup(mnoderanges); 16120Sstevel@tonic-gate 16130Sstevel@tonic-gate if (physmax4g) 16140Sstevel@tonic-gate mtype4g = pfn_2_mtype(0xfffff); 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate for (k = 0; k < NPC_MUTEX; k++) { 16170Sstevel@tonic-gate fpc_mutex[k] = (kmutex_t *)addr; 16180Sstevel@tonic-gate addr += (max_mem_nodes * sizeof (kmutex_t)); 16190Sstevel@tonic-gate } 16200Sstevel@tonic-gate for (k = 0; k < NPC_MUTEX; k++) { 16210Sstevel@tonic-gate cpc_mutex[k] = (kmutex_t *)addr; 16220Sstevel@tonic-gate addr += (max_mem_nodes * sizeof (kmutex_t)); 16230Sstevel@tonic-gate } 16240Sstevel@tonic-gate page_freelists = (page_t ****)addr; 16250Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (page_t ***)); 16260Sstevel@tonic-gate 16270Sstevel@tonic-gate page_cachelists = (page_t ***)addr; 16280Sstevel@tonic-gate addr += (mnoderangecnt * sizeof (page_t **)); 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate for (i = 0; i < mnoderangecnt; i++) { 16310Sstevel@tonic-gate page_freelists[i] = (page_t ***)addr; 16320Sstevel@tonic-gate addr += (mmu_page_sizes * sizeof (page_t **)); 16330Sstevel@tonic-gate 16340Sstevel@tonic-gate for (j = 0; j < mmu_page_sizes; j++) { 16350Sstevel@tonic-gate colors = page_get_pagecolors(j); 16360Sstevel@tonic-gate page_freelists[i][j] = (page_t **)addr; 16370Sstevel@tonic-gate addr += (colors * sizeof (page_t *)); 16380Sstevel@tonic-gate } 16390Sstevel@tonic-gate page_cachelists[i] = (page_t **)addr; 16400Sstevel@tonic-gate addr += (page_colors * sizeof (page_t *)); 16410Sstevel@tonic-gate } 16420Sstevel@tonic-gate } 16430Sstevel@tonic-gate 16445084Sjohnlev #if defined(__xpv) 16455084Sjohnlev /* 16465084Sjohnlev * Give back 10% of the io_pool pages to the free list. 16475084Sjohnlev * Don't shrink the pool below some absolute minimum. 16485084Sjohnlev */ 16495084Sjohnlev static void 16505084Sjohnlev page_io_pool_shrink() 16515084Sjohnlev { 16525084Sjohnlev int retcnt; 16535084Sjohnlev page_t *pp, *pp_first, *pp_last, **curpool; 16545084Sjohnlev mfn_t mfn; 16555084Sjohnlev int bothpools = 0; 16565084Sjohnlev 16575084Sjohnlev mutex_enter(&io_pool_lock); 16585084Sjohnlev io_pool_shrink_attempts++; /* should be a kstat? */ 16595084Sjohnlev retcnt = io_pool_cnt / 10; 16605084Sjohnlev if (io_pool_cnt - retcnt < io_pool_cnt_min) 16615084Sjohnlev retcnt = io_pool_cnt - io_pool_cnt_min; 16625084Sjohnlev if (retcnt <= 0) 16635084Sjohnlev goto done; 16645084Sjohnlev io_pool_shrinks++; /* should be a kstat? */ 16655084Sjohnlev curpool = &io_pool_4g; 16665084Sjohnlev domore: 16675084Sjohnlev /* 16685084Sjohnlev * Loop through taking pages from the end of the list 16695084Sjohnlev * (highest mfns) till amount to return reached. 16705084Sjohnlev */ 16715084Sjohnlev for (pp = *curpool; pp && retcnt > 0; ) { 16725084Sjohnlev pp_first = pp_last = pp->p_prev; 16735084Sjohnlev if (pp_first == *curpool) 16745084Sjohnlev break; 16755084Sjohnlev retcnt--; 16765084Sjohnlev io_pool_cnt--; 16775084Sjohnlev page_io_pool_sub(curpool, pp_first, pp_last); 16785084Sjohnlev if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn) 16795084Sjohnlev start_mfn = mfn; 16805084Sjohnlev page_free(pp_first, 1); 16815084Sjohnlev pp = *curpool; 16825084Sjohnlev } 16835084Sjohnlev if (retcnt != 0 && !bothpools) { 16845084Sjohnlev /* 16855084Sjohnlev * If not enough found in less constrained pool try the 16865084Sjohnlev * more constrained one. 16875084Sjohnlev */ 16885084Sjohnlev curpool = &io_pool_16m; 16895084Sjohnlev bothpools = 1; 16905084Sjohnlev goto domore; 16915084Sjohnlev } 16925084Sjohnlev done: 16935084Sjohnlev mutex_exit(&io_pool_lock); 16945084Sjohnlev } 16955084Sjohnlev 16965084Sjohnlev #endif /* __xpv */ 16975084Sjohnlev 16985084Sjohnlev uint_t 16995084Sjohnlev page_create_update_flags_x86(uint_t flags) 17005084Sjohnlev { 17015084Sjohnlev #if defined(__xpv) 17025084Sjohnlev /* 17035084Sjohnlev * Check this is an urgent allocation and free pages are depleted. 17045084Sjohnlev */ 17055084Sjohnlev if (!(flags & PG_WAIT) && freemem < desfree) 17065084Sjohnlev page_io_pool_shrink(); 17075084Sjohnlev #else /* !__xpv */ 17085084Sjohnlev /* 17095084Sjohnlev * page_create_get_something may call this because 4g memory may be 17105084Sjohnlev * depleted. Set flags to allow for relocation of base page below 17115084Sjohnlev * 4g if necessary. 17125084Sjohnlev */ 17135084Sjohnlev if (physmax4g) 17145084Sjohnlev flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 17155084Sjohnlev #endif /* __xpv */ 17165084Sjohnlev return (flags); 17175084Sjohnlev } 17185084Sjohnlev 17190Sstevel@tonic-gate /*ARGSUSED*/ 17200Sstevel@tonic-gate int 17210Sstevel@tonic-gate bp_color(struct buf *bp) 17220Sstevel@tonic-gate { 17230Sstevel@tonic-gate return (0); 17240Sstevel@tonic-gate } 17250Sstevel@tonic-gate 17265084Sjohnlev #if defined(__xpv) 17275084Sjohnlev 17285084Sjohnlev /* 17295084Sjohnlev * Take pages out of an io_pool 17305084Sjohnlev */ 17315084Sjohnlev static void 17325084Sjohnlev page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last) 17335084Sjohnlev { 17345084Sjohnlev if (*poolp == pp_first) { 17355084Sjohnlev *poolp = pp_last->p_next; 17365084Sjohnlev if (*poolp == pp_first) 17375084Sjohnlev *poolp = NULL; 17385084Sjohnlev } 17395084Sjohnlev pp_first->p_prev->p_next = pp_last->p_next; 17405084Sjohnlev pp_last->p_next->p_prev = pp_first->p_prev; 17415084Sjohnlev pp_first->p_prev = pp_last; 17425084Sjohnlev pp_last->p_next = pp_first; 17435084Sjohnlev } 17445084Sjohnlev 17455084Sjohnlev /* 17465084Sjohnlev * Put a page on the io_pool list. The list is ordered by increasing MFN. 17475084Sjohnlev */ 17485084Sjohnlev static void 17495084Sjohnlev page_io_pool_add(page_t **poolp, page_t *pp) 17505084Sjohnlev { 17515084Sjohnlev page_t *look; 17525084Sjohnlev mfn_t mfn = mfn_list[pp->p_pagenum]; 17535084Sjohnlev 17545084Sjohnlev if (*poolp == NULL) { 17555084Sjohnlev *poolp = pp; 17565084Sjohnlev pp->p_next = pp; 17575084Sjohnlev pp->p_prev = pp; 17585084Sjohnlev return; 17595084Sjohnlev } 17605084Sjohnlev 17615084Sjohnlev /* 17625084Sjohnlev * Since we try to take pages from the high end of the pool 17635084Sjohnlev * chances are good that the pages to be put on the list will 17645084Sjohnlev * go at or near the end of the list. so start at the end and 17655084Sjohnlev * work backwards. 17665084Sjohnlev */ 17675084Sjohnlev look = (*poolp)->p_prev; 17685084Sjohnlev while (mfn < mfn_list[look->p_pagenum]) { 17695084Sjohnlev look = look->p_prev; 17705084Sjohnlev if (look == (*poolp)->p_prev) 17715084Sjohnlev break; /* backed all the way to front of list */ 17725084Sjohnlev } 17735084Sjohnlev 17745084Sjohnlev /* insert after look */ 17755084Sjohnlev pp->p_prev = look; 17765084Sjohnlev pp->p_next = look->p_next; 17775084Sjohnlev pp->p_next->p_prev = pp; 17785084Sjohnlev look->p_next = pp; 17795084Sjohnlev if (mfn < mfn_list[(*poolp)->p_pagenum]) { 17805084Sjohnlev /* 17815084Sjohnlev * we inserted a new first list element 17825084Sjohnlev * adjust pool pointer to newly inserted element 17835084Sjohnlev */ 17845084Sjohnlev *poolp = pp; 17855084Sjohnlev } 17865084Sjohnlev } 17875084Sjohnlev 17885084Sjohnlev /* 17895084Sjohnlev * Add a page to the io_pool. Setting the force flag will force the page 17905084Sjohnlev * into the io_pool no matter what. 17915084Sjohnlev */ 17925084Sjohnlev static void 17935084Sjohnlev add_page_to_pool(page_t *pp, int force) 17945084Sjohnlev { 17955084Sjohnlev page_t *highest; 17965084Sjohnlev page_t *freep = NULL; 17975084Sjohnlev 17985084Sjohnlev mutex_enter(&io_pool_lock); 17995084Sjohnlev /* 18005084Sjohnlev * Always keep the scarce low memory pages 18015084Sjohnlev */ 18025084Sjohnlev if (mfn_list[pp->p_pagenum] < PFN_16MEG) { 18035084Sjohnlev ++io_pool_cnt; 18045084Sjohnlev page_io_pool_add(&io_pool_16m, pp); 18055084Sjohnlev goto done; 18065084Sjohnlev } 18075084Sjohnlev if (io_pool_cnt < io_pool_cnt_max || force) { 18085084Sjohnlev ++io_pool_cnt; 18095084Sjohnlev page_io_pool_add(&io_pool_4g, pp); 18105084Sjohnlev } else { 18115084Sjohnlev highest = io_pool_4g->p_prev; 18125084Sjohnlev if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) { 18135084Sjohnlev page_io_pool_sub(&io_pool_4g, highest, highest); 18145084Sjohnlev page_io_pool_add(&io_pool_4g, pp); 18155084Sjohnlev freep = highest; 18165084Sjohnlev } else { 18175084Sjohnlev freep = pp; 18185084Sjohnlev } 18195084Sjohnlev } 18205084Sjohnlev done: 18215084Sjohnlev mutex_exit(&io_pool_lock); 18225084Sjohnlev if (freep) 18235084Sjohnlev page_free(freep, 1); 18245084Sjohnlev } 18255084Sjohnlev 18265084Sjohnlev 18275084Sjohnlev int contig_pfn_cnt; /* no of pfns in the contig pfn list */ 18285084Sjohnlev int contig_pfn_max; /* capacity of the contig pfn list */ 18295084Sjohnlev int next_alloc_pfn; /* next position in list to start a contig search */ 18305084Sjohnlev int contig_pfnlist_updates; /* pfn list update count */ 18315084Sjohnlev int contig_pfnlist_locked; /* contig pfn list locked against use */ 18325084Sjohnlev int contig_pfnlist_builds; /* how many times have we (re)built list */ 18335084Sjohnlev int contig_pfnlist_buildfailed; /* how many times has list build failed */ 18345084Sjohnlev int create_contig_pending; /* nonzero means taskq creating contig list */ 18355084Sjohnlev pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */ 18365084Sjohnlev 18375084Sjohnlev /* 18385084Sjohnlev * Function to use in sorting a list of pfns by their underlying mfns. 18395084Sjohnlev */ 18405084Sjohnlev static int 18415084Sjohnlev mfn_compare(const void *pfnp1, const void *pfnp2) 18425084Sjohnlev { 18435084Sjohnlev mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1]; 18445084Sjohnlev mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2]; 18455084Sjohnlev 18465084Sjohnlev if (mfn1 > mfn2) 18475084Sjohnlev return (1); 18485084Sjohnlev if (mfn1 < mfn2) 18495084Sjohnlev return (-1); 18505084Sjohnlev return (0); 18515084Sjohnlev } 18525084Sjohnlev 18535084Sjohnlev /* 18545084Sjohnlev * Compact the contig_pfn_list by tossing all the non-contiguous 18555084Sjohnlev * elements from the list. 18565084Sjohnlev */ 18575084Sjohnlev static void 18585084Sjohnlev compact_contig_pfn_list(void) 18595084Sjohnlev { 18605084Sjohnlev pfn_t pfn, lapfn, prev_lapfn; 18615084Sjohnlev mfn_t mfn; 18625084Sjohnlev int i, newcnt = 0; 18635084Sjohnlev 18645084Sjohnlev prev_lapfn = 0; 18655084Sjohnlev for (i = 0; i < contig_pfn_cnt - 1; i++) { 18665084Sjohnlev pfn = contig_pfn_list[i]; 18675084Sjohnlev lapfn = contig_pfn_list[i + 1]; 18685084Sjohnlev mfn = mfn_list[pfn]; 18695084Sjohnlev /* 18705084Sjohnlev * See if next pfn is for a contig mfn 18715084Sjohnlev */ 18725084Sjohnlev if (mfn_list[lapfn] != mfn + 1) 18735084Sjohnlev continue; 18745084Sjohnlev /* 18755084Sjohnlev * pfn and lookahead are both put in list 18765084Sjohnlev * unless pfn is the previous lookahead. 18775084Sjohnlev */ 18785084Sjohnlev if (pfn != prev_lapfn) 18795084Sjohnlev contig_pfn_list[newcnt++] = pfn; 18805084Sjohnlev contig_pfn_list[newcnt++] = lapfn; 18815084Sjohnlev prev_lapfn = lapfn; 18825084Sjohnlev } 18835084Sjohnlev for (i = newcnt; i < contig_pfn_cnt; i++) 18845084Sjohnlev contig_pfn_list[i] = 0; 18855084Sjohnlev contig_pfn_cnt = newcnt; 18865084Sjohnlev } 18875084Sjohnlev 18885084Sjohnlev /*ARGSUSED*/ 18895084Sjohnlev static void 18905084Sjohnlev call_create_contiglist(void *arg) 18915084Sjohnlev { 18925084Sjohnlev mutex_enter(&io_pool_lock); 18935084Sjohnlev (void) create_contig_pfnlist(PG_WAIT); 18945084Sjohnlev create_contig_pending = 0; 18955084Sjohnlev mutex_exit(&io_pool_lock); 18965084Sjohnlev } 18975084Sjohnlev 18985084Sjohnlev /* 18995084Sjohnlev * Create list of freelist pfns that have underlying 19005084Sjohnlev * contiguous mfns. The list is kept in ascending mfn order. 19015084Sjohnlev * returns 1 if list created else 0. 19025084Sjohnlev */ 19035084Sjohnlev static int 19045084Sjohnlev create_contig_pfnlist(uint_t flags) 19055084Sjohnlev { 19065084Sjohnlev pfn_t pfn; 19075084Sjohnlev page_t *pp; 19085084Sjohnlev 19095084Sjohnlev if (contig_pfn_list != NULL) 19105084Sjohnlev return (1); 19115084Sjohnlev ASSERT(!contig_pfnlist_locked); 19125084Sjohnlev contig_pfn_max = freemem + (freemem / 10); 19135084Sjohnlev contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t), 19145084Sjohnlev (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP); 19155084Sjohnlev if (contig_pfn_list == NULL) { 19165084Sjohnlev /* 19175084Sjohnlev * If we could not create the contig list (because 19185084Sjohnlev * we could not sleep for memory). Dispatch a taskq that can 19195084Sjohnlev * sleep to get the memory. 19205084Sjohnlev */ 19215084Sjohnlev if (!create_contig_pending) { 19225084Sjohnlev if (taskq_dispatch(system_taskq, call_create_contiglist, 19235084Sjohnlev NULL, TQ_NOSLEEP) != NULL) 19245084Sjohnlev create_contig_pending = 1; 19255084Sjohnlev } 19265084Sjohnlev contig_pfnlist_buildfailed++; /* count list build failures */ 19275084Sjohnlev return (0); 19285084Sjohnlev } 19295084Sjohnlev ASSERT(contig_pfn_cnt == 0); 19305084Sjohnlev for (pfn = 0; pfn < mfn_count; pfn++) { 19315084Sjohnlev pp = page_numtopp_nolock(pfn); 19325084Sjohnlev if (pp == NULL || !PP_ISFREE(pp)) 19335084Sjohnlev continue; 19345084Sjohnlev contig_pfn_list[contig_pfn_cnt] = pfn; 19355084Sjohnlev if (++contig_pfn_cnt == contig_pfn_max) 19365084Sjohnlev break; 19375084Sjohnlev } 19385084Sjohnlev qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare); 19395084Sjohnlev compact_contig_pfn_list(); 19405084Sjohnlev /* 19415084Sjohnlev * Make sure next search of the newly created contiguous pfn 19425084Sjohnlev * list starts at the beginning of the list. 19435084Sjohnlev */ 19445084Sjohnlev next_alloc_pfn = 0; 19455084Sjohnlev contig_pfnlist_builds++; /* count list builds */ 19465084Sjohnlev return (1); 19475084Sjohnlev } 19485084Sjohnlev 19495084Sjohnlev 19505084Sjohnlev /* 19515084Sjohnlev * Toss the current contig pfnlist. Someone is about to do a massive 19525084Sjohnlev * update to pfn<->mfn mappings. So we have them destroy the list and lock 19535084Sjohnlev * it till they are done with their update. 19545084Sjohnlev */ 19555084Sjohnlev void 19565084Sjohnlev clear_and_lock_contig_pfnlist() 19575084Sjohnlev { 19585084Sjohnlev pfn_t *listp = NULL; 19595084Sjohnlev size_t listsize; 19605084Sjohnlev 19615084Sjohnlev mutex_enter(&io_pool_lock); 19625084Sjohnlev ASSERT(!contig_pfnlist_locked); 19635084Sjohnlev if (contig_pfn_list != NULL) { 19645084Sjohnlev listp = contig_pfn_list; 19655084Sjohnlev listsize = contig_pfn_max * sizeof (pfn_t); 19665084Sjohnlev contig_pfn_list = NULL; 19675084Sjohnlev contig_pfn_max = contig_pfn_cnt = 0; 19685084Sjohnlev } 19695084Sjohnlev contig_pfnlist_locked = 1; 19705084Sjohnlev mutex_exit(&io_pool_lock); 19715084Sjohnlev if (listp != NULL) 19725084Sjohnlev kmem_free(listp, listsize); 19735084Sjohnlev } 19745084Sjohnlev 19755084Sjohnlev /* 19765084Sjohnlev * Unlock the contig_pfn_list. The next attempted use of it will cause 19775084Sjohnlev * it to be re-created. 19785084Sjohnlev */ 19795084Sjohnlev void 19805084Sjohnlev unlock_contig_pfnlist() 19815084Sjohnlev { 19825084Sjohnlev mutex_enter(&io_pool_lock); 19835084Sjohnlev ASSERT(contig_pfnlist_locked); 19845084Sjohnlev contig_pfnlist_locked = 0; 19855084Sjohnlev mutex_exit(&io_pool_lock); 19865084Sjohnlev } 19875084Sjohnlev 19885084Sjohnlev /* 19895084Sjohnlev * Update the contiguous pfn list in response to a pfn <-> mfn reassignment 19905084Sjohnlev */ 19915084Sjohnlev void 19925084Sjohnlev update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn) 19935084Sjohnlev { 19945084Sjohnlev int probe_hi, probe_lo, probe_pos, insert_after, insert_point; 19955084Sjohnlev pfn_t probe_pfn; 19965084Sjohnlev mfn_t probe_mfn; 19975084Sjohnlev 19985084Sjohnlev if (contig_pfn_list == NULL) 19995084Sjohnlev return; 20005084Sjohnlev mutex_enter(&io_pool_lock); 20015084Sjohnlev contig_pfnlist_updates++; 20025084Sjohnlev /* 20035084Sjohnlev * Find the pfn in the current list. Use a binary chop to locate it. 20045084Sjohnlev */ 20055084Sjohnlev probe_hi = contig_pfn_cnt - 1; 20065084Sjohnlev probe_lo = 0; 20075084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20085084Sjohnlev while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) { 20095084Sjohnlev if (probe_pos == probe_lo) { /* pfn not in list */ 20105084Sjohnlev probe_pos = -1; 20115084Sjohnlev break; 20125084Sjohnlev } 20135084Sjohnlev if (pfn_to_mfn(probe_pfn) <= oldmfn) 20145084Sjohnlev probe_lo = probe_pos; 20155084Sjohnlev else 20165084Sjohnlev probe_hi = probe_pos; 20175084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20185084Sjohnlev } 20195084Sjohnlev if (probe_pos >= 0) { /* remove pfn fom list */ 20205084Sjohnlev contig_pfn_cnt--; 20215084Sjohnlev ovbcopy(&contig_pfn_list[probe_pos + 1], 20225084Sjohnlev &contig_pfn_list[probe_pos], 20235084Sjohnlev (contig_pfn_cnt - probe_pos) * sizeof (pfn_t)); 20245084Sjohnlev } 20255084Sjohnlev if (newmfn == MFN_INVALID) 20265084Sjohnlev goto done; 20275084Sjohnlev /* 20285084Sjohnlev * Check if new mfn has adjacent mfns in the list 20295084Sjohnlev */ 20305084Sjohnlev probe_hi = contig_pfn_cnt - 1; 20315084Sjohnlev probe_lo = 0; 20325084Sjohnlev insert_after = -2; 20335084Sjohnlev do { 20345084Sjohnlev probe_pos = (probe_hi + probe_lo) / 2; 20355084Sjohnlev probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]); 20365084Sjohnlev if (newmfn == probe_mfn + 1) 20375084Sjohnlev insert_after = probe_pos; 20385084Sjohnlev else if (newmfn == probe_mfn - 1) 20395084Sjohnlev insert_after = probe_pos - 1; 20405084Sjohnlev if (probe_pos == probe_lo) 20415084Sjohnlev break; 20425084Sjohnlev if (probe_mfn <= newmfn) 20435084Sjohnlev probe_lo = probe_pos; 20445084Sjohnlev else 20455084Sjohnlev probe_hi = probe_pos; 20465084Sjohnlev } while (insert_after == -2); 20475084Sjohnlev /* 20485084Sjohnlev * If there is space in the list and there are adjacent mfns 20495084Sjohnlev * insert the pfn in to its proper place in the list. 20505084Sjohnlev */ 20515084Sjohnlev if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) { 20525084Sjohnlev insert_point = insert_after + 1; 20535084Sjohnlev ovbcopy(&contig_pfn_list[insert_point], 20545084Sjohnlev &contig_pfn_list[insert_point + 1], 20555084Sjohnlev (contig_pfn_cnt - insert_point) * sizeof (pfn_t)); 20565084Sjohnlev contig_pfn_list[insert_point] = pfn; 20575084Sjohnlev contig_pfn_cnt++; 20585084Sjohnlev } 20595084Sjohnlev done: 20605084Sjohnlev mutex_exit(&io_pool_lock); 20615084Sjohnlev } 20625084Sjohnlev 20635084Sjohnlev /* 20645084Sjohnlev * Called to (re-)populate the io_pool from the free page lists. 20655084Sjohnlev */ 20665084Sjohnlev long 20675084Sjohnlev populate_io_pool(void) 20685084Sjohnlev { 20695084Sjohnlev pfn_t pfn; 20705084Sjohnlev mfn_t mfn, max_mfn; 20715084Sjohnlev page_t *pp; 20725084Sjohnlev 20735084Sjohnlev /* 20745084Sjohnlev * Figure out the bounds of the pool on first invocation. 20755084Sjohnlev * We use a percentage of memory for the io pool size. 20765084Sjohnlev * we allow that to shrink, but not to less than a fixed minimum 20775084Sjohnlev */ 20785084Sjohnlev if (io_pool_cnt_max == 0) { 20795084Sjohnlev io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct); 20805084Sjohnlev io_pool_cnt_lowater = io_pool_cnt_max; 20815084Sjohnlev /* 20825084Sjohnlev * This is the first time in populate_io_pool, grab a va to use 20835084Sjohnlev * when we need to allocate pages. 20845084Sjohnlev */ 20855084Sjohnlev io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 20865084Sjohnlev } 20875084Sjohnlev /* 20885084Sjohnlev * If we are out of pages in the pool, then grow the size of the pool 20895084Sjohnlev */ 20905084Sjohnlev if (io_pool_cnt == 0) 20915084Sjohnlev io_pool_cnt_max += io_pool_cnt_max / 20; /* grow by 5% */ 20925084Sjohnlev io_pool_grows++; /* should be a kstat? */ 20935084Sjohnlev 20945084Sjohnlev /* 20955084Sjohnlev * Get highest mfn on this platform, but limit to the 32 bit DMA max. 20965084Sjohnlev */ 20975084Sjohnlev (void) mfn_to_pfn(start_mfn); 20985084Sjohnlev max_mfn = MIN(cached_max_mfn, PFN_4GIG); 20995084Sjohnlev for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) { 21005084Sjohnlev pfn = mfn_to_pfn(mfn); 21015084Sjohnlev if (pfn & PFN_IS_FOREIGN_MFN) 21025084Sjohnlev continue; 21035084Sjohnlev /* 21045084Sjohnlev * try to allocate it from free pages 21055084Sjohnlev */ 21065084Sjohnlev pp = page_numtopp_alloc(pfn); 21075084Sjohnlev if (pp == NULL) 21085084Sjohnlev continue; 21095084Sjohnlev PP_CLRFREE(pp); 21105084Sjohnlev add_page_to_pool(pp, 1); 21115084Sjohnlev if (io_pool_cnt >= io_pool_cnt_max) 21125084Sjohnlev break; 21135084Sjohnlev } 21145084Sjohnlev 21155084Sjohnlev return (io_pool_cnt); 21165084Sjohnlev } 21175084Sjohnlev 21185084Sjohnlev /* 21195084Sjohnlev * Destroy a page that was being used for DMA I/O. It may or 21205084Sjohnlev * may not actually go back to the io_pool. 21215084Sjohnlev */ 21225084Sjohnlev void 21235084Sjohnlev page_destroy_io(page_t *pp) 21245084Sjohnlev { 21255084Sjohnlev mfn_t mfn = mfn_list[pp->p_pagenum]; 21265084Sjohnlev 21275084Sjohnlev /* 21285084Sjohnlev * When the page was alloc'd a reservation was made, release it now 21295084Sjohnlev */ 21305084Sjohnlev page_unresv(1); 21315084Sjohnlev /* 21325084Sjohnlev * Unload translations, if any, then hash out the 21335084Sjohnlev * page to erase its identity. 21345084Sjohnlev */ 21355084Sjohnlev (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 21365084Sjohnlev page_hashout(pp, NULL); 21375084Sjohnlev 21385084Sjohnlev /* 21395084Sjohnlev * If the page came from the free lists, just put it back to them. 21405084Sjohnlev * DomU pages always go on the free lists as well. 21415084Sjohnlev */ 21425084Sjohnlev if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) { 21435084Sjohnlev page_free(pp, 1); 21445084Sjohnlev return; 21455084Sjohnlev } 21465084Sjohnlev 21475084Sjohnlev add_page_to_pool(pp, 0); 21485084Sjohnlev } 21495084Sjohnlev 21505084Sjohnlev 21515084Sjohnlev long contig_searches; /* count of times contig pages requested */ 21525084Sjohnlev long contig_search_restarts; /* count of contig ranges tried */ 21535084Sjohnlev long contig_search_failed; /* count of contig alloc failures */ 21545084Sjohnlev 21555084Sjohnlev /* 21565084Sjohnlev * Look thru the contiguous pfns that are not part of the io_pool for 21575084Sjohnlev * contiguous free pages. Return a list of the found pages or NULL. 21585084Sjohnlev */ 21595084Sjohnlev page_t * 21605084Sjohnlev find_contig_free(uint_t bytes, uint_t flags) 21615084Sjohnlev { 21625084Sjohnlev page_t *pp, *plist = NULL; 21635084Sjohnlev mfn_t mfn, prev_mfn; 21645084Sjohnlev pfn_t pfn; 21655084Sjohnlev int pages_needed, pages_requested; 21665084Sjohnlev int search_start; 21675084Sjohnlev 21685084Sjohnlev /* 21695084Sjohnlev * create the contig pfn list if not already done 21705084Sjohnlev */ 21715084Sjohnlev if (contig_pfn_list == NULL) { 21725084Sjohnlev if (contig_pfnlist_locked) { 21735084Sjohnlev return (NULL); 21745084Sjohnlev } else { 21755084Sjohnlev if (!create_contig_pfnlist(flags)) 21765084Sjohnlev return (NULL); 21775084Sjohnlev } 21785084Sjohnlev } 21795084Sjohnlev contig_searches++; 21805084Sjohnlev /* 21815084Sjohnlev * Search contiguous pfn list for physically contiguous pages not in 21825084Sjohnlev * the io_pool. Start the search where the last search left off. 21835084Sjohnlev */ 21845084Sjohnlev pages_requested = pages_needed = mmu_btop(bytes); 21855084Sjohnlev search_start = next_alloc_pfn; 21865084Sjohnlev prev_mfn = 0; 21875084Sjohnlev while (pages_needed) { 21885084Sjohnlev pfn = contig_pfn_list[next_alloc_pfn]; 21895084Sjohnlev mfn = pfn_to_mfn(pfn); 21905084Sjohnlev if ((prev_mfn == 0 || mfn == prev_mfn + 1) && 21915084Sjohnlev (pp = page_numtopp_alloc(pfn)) != NULL) { 21925084Sjohnlev PP_CLRFREE(pp); 21935084Sjohnlev page_io_pool_add(&plist, pp); 21945084Sjohnlev pages_needed--; 21955084Sjohnlev prev_mfn = mfn; 21965084Sjohnlev } else { 21975084Sjohnlev contig_search_restarts++; 21985084Sjohnlev /* 21995084Sjohnlev * free partial page list 22005084Sjohnlev */ 22015084Sjohnlev while (plist != NULL) { 22025084Sjohnlev pp = plist; 22035084Sjohnlev page_io_pool_sub(&plist, pp, pp); 22045084Sjohnlev page_free(pp, 1); 22055084Sjohnlev } 22065084Sjohnlev pages_needed = pages_requested; 22075084Sjohnlev prev_mfn = 0; 22085084Sjohnlev } 22095084Sjohnlev if (++next_alloc_pfn == contig_pfn_cnt) 22105084Sjohnlev next_alloc_pfn = 0; 22115084Sjohnlev if (next_alloc_pfn == search_start) 22125084Sjohnlev break; /* all pfns searched */ 22135084Sjohnlev } 22145084Sjohnlev if (pages_needed) { 22155084Sjohnlev contig_search_failed++; 22165084Sjohnlev /* 22175084Sjohnlev * Failed to find enough contig pages. 22185084Sjohnlev * free partial page list 22195084Sjohnlev */ 22205084Sjohnlev while (plist != NULL) { 22215084Sjohnlev pp = plist; 22225084Sjohnlev page_io_pool_sub(&plist, pp, pp); 22235084Sjohnlev page_free(pp, 1); 22245084Sjohnlev } 22255084Sjohnlev } 22265084Sjohnlev return (plist); 22275084Sjohnlev } 22285084Sjohnlev 22295084Sjohnlev /* 22305084Sjohnlev * Allocator for domain 0 I/O pages. We match the required 22315084Sjohnlev * DMA attributes and contiguity constraints. 22325084Sjohnlev */ 22335084Sjohnlev /*ARGSUSED*/ 22345084Sjohnlev page_t * 22355084Sjohnlev page_create_io( 22365084Sjohnlev struct vnode *vp, 22375084Sjohnlev u_offset_t off, 22385084Sjohnlev uint_t bytes, 22395084Sjohnlev uint_t flags, 22405084Sjohnlev struct as *as, 22415084Sjohnlev caddr_t vaddr, 22425084Sjohnlev ddi_dma_attr_t *mattr) 22435084Sjohnlev { 22445084Sjohnlev mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); 22455084Sjohnlev page_t *pp_first; /* list to return */ 22465084Sjohnlev page_t *pp_last; /* last in list to return */ 22475084Sjohnlev page_t *pp, **poolp, **pplist = NULL, *expp; 22485084Sjohnlev int i, extpages = 0, npages = 0, contig, anyaddr, extra; 22495084Sjohnlev mfn_t lo_mfn; 22505084Sjohnlev mfn_t hi_mfn; 22515084Sjohnlev mfn_t mfn, tmfn; 22525084Sjohnlev mfn_t *mfnlist = 0; 22535084Sjohnlev pgcnt_t pfnalign = 0; 22545084Sjohnlev int align, order, nbits, extents; 22555084Sjohnlev uint64_t pfnseg; 22565084Sjohnlev int attempt = 0, is_domu = 0; 22575084Sjohnlev int asked_hypervisor = 0; 22585084Sjohnlev uint_t kflags; 22595084Sjohnlev 22605084Sjohnlev ASSERT(mattr != NULL); 22615084Sjohnlev lo_mfn = mmu_btop(mattr->dma_attr_addr_lo); 22625084Sjohnlev hi_mfn = mmu_btop(mattr->dma_attr_addr_hi); 22635084Sjohnlev align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 22645084Sjohnlev if (align > MMU_PAGESIZE) 22655084Sjohnlev pfnalign = mmu_btop(align); 22665084Sjohnlev pfnseg = mmu_btop(mattr->dma_attr_seg); 22675084Sjohnlev 22685084Sjohnlev /* 22695084Sjohnlev * Clear the contig flag if only one page is needed. 22705084Sjohnlev */ 22715084Sjohnlev contig = (flags & PG_PHYSCONTIG); 22725084Sjohnlev flags &= ~PG_PHYSCONTIG; 22735084Sjohnlev bytes = P2ROUNDUP(bytes, MMU_PAGESIZE); 22745084Sjohnlev if (bytes == MMU_PAGESIZE) 22755084Sjohnlev contig = 0; 22765084Sjohnlev 22775084Sjohnlev /* 22785084Sjohnlev * Check if any old page in the system is fine. 22795084Sjohnlev * DomU should always go down this path. 22805084Sjohnlev */ 22815084Sjohnlev is_domu = !DOMAIN_IS_INITDOMAIN(xen_info); 22825084Sjohnlev anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign; 22835084Sjohnlev if ((!contig && anyaddr) || is_domu) { 22845084Sjohnlev pp = page_create_va(vp, off, bytes, flags, &kvseg, vaddr); 22855084Sjohnlev if (pp) 22865084Sjohnlev return (pp); 22875084Sjohnlev else if (is_domu) 22885084Sjohnlev return (NULL); /* no memory available */ 22895084Sjohnlev } 22905084Sjohnlev /* 22915084Sjohnlev * DomU should never reach here 22925084Sjohnlev */ 22935084Sjohnlev try_again: 22945084Sjohnlev /* 22955084Sjohnlev * We could just want unconstrained but contig pages. 22965084Sjohnlev */ 22975084Sjohnlev if (anyaddr && contig && pfnseg >= max_mfn) { 22985084Sjohnlev /* 22995084Sjohnlev * Look for free contig pages to satisfy the request. 23005084Sjohnlev */ 23015084Sjohnlev mutex_enter(&io_pool_lock); 23025084Sjohnlev pp_first = find_contig_free(bytes, flags); 23035084Sjohnlev mutex_exit(&io_pool_lock); 23045084Sjohnlev if (pp_first != NULL) 23055084Sjohnlev goto done; 23065084Sjohnlev } 23075084Sjohnlev /* 23085084Sjohnlev * See if we want pages for a legacy device 23095084Sjohnlev */ 23105084Sjohnlev if (hi_mfn < PFN_16MEG) 23115084Sjohnlev poolp = &io_pool_16m; 23125084Sjohnlev else 23135084Sjohnlev poolp = &io_pool_4g; 23145084Sjohnlev try_smaller: 23155084Sjohnlev /* 23165084Sjohnlev * Take pages from I/O pool. We'll use pages from the highest MFN 23175084Sjohnlev * range possible. 23185084Sjohnlev */ 23195084Sjohnlev pp_first = pp_last = NULL; 23205084Sjohnlev npages = mmu_btop(bytes); 23215084Sjohnlev mutex_enter(&io_pool_lock); 23225084Sjohnlev for (pp = *poolp; pp && npages > 0; ) { 23235084Sjohnlev pp = pp->p_prev; 23245084Sjohnlev 23255084Sjohnlev /* 23265084Sjohnlev * skip pages above allowable range 23275084Sjohnlev */ 23285084Sjohnlev mfn = mfn_list[pp->p_pagenum]; 23295084Sjohnlev if (hi_mfn < mfn) 23305084Sjohnlev goto skip; 23315084Sjohnlev 23325084Sjohnlev /* 23335084Sjohnlev * stop at pages below allowable range 23345084Sjohnlev */ 23355084Sjohnlev if (lo_mfn > mfn) 23365084Sjohnlev break; 23375084Sjohnlev restart: 23385084Sjohnlev if (pp_last == NULL) { 23395084Sjohnlev /* 23405084Sjohnlev * Check alignment 23415084Sjohnlev */ 23425084Sjohnlev tmfn = mfn - (npages - 1); 23435084Sjohnlev if (pfnalign) { 23445084Sjohnlev if (tmfn != P2ROUNDUP(tmfn, pfnalign)) 23455084Sjohnlev goto skip; /* not properly aligned */ 23465084Sjohnlev } 23475084Sjohnlev /* 23485084Sjohnlev * Check segment 23495084Sjohnlev */ 23505084Sjohnlev if ((mfn & pfnseg) < (tmfn & pfnseg)) 23515084Sjohnlev goto skip; /* crosses segment boundary */ 23525084Sjohnlev /* 23535084Sjohnlev * Start building page list 23545084Sjohnlev */ 23555084Sjohnlev pp_first = pp_last = pp; 23565084Sjohnlev npages--; 23575084Sjohnlev } else { 23585084Sjohnlev /* 23595084Sjohnlev * check physical contiguity if required 23605084Sjohnlev */ 23615084Sjohnlev if (contig && 23625084Sjohnlev mfn_list[pp_first->p_pagenum] != mfn + 1) { 23635084Sjohnlev /* 23645084Sjohnlev * not a contiguous page, restart list. 23655084Sjohnlev */ 23665084Sjohnlev pp_last = NULL; 23675084Sjohnlev npages = mmu_btop(bytes); 23685084Sjohnlev goto restart; 23695084Sjohnlev } else { /* add page to list */ 23705084Sjohnlev pp_first = pp; 23715084Sjohnlev --npages; 23725084Sjohnlev } 23735084Sjohnlev } 23745084Sjohnlev skip: 23755084Sjohnlev if (pp == *poolp) 23765084Sjohnlev break; 23775084Sjohnlev } 23785084Sjohnlev 23795084Sjohnlev /* 23805084Sjohnlev * If we didn't find memory. Try the more constrained pool, then 23815084Sjohnlev * sweep free pages into the DMA pool and try again. If we fail 23825084Sjohnlev * repeatedly, ask the Hypervisor for help. 23835084Sjohnlev */ 23845084Sjohnlev if (npages != 0) { 23855084Sjohnlev mutex_exit(&io_pool_lock); 23865084Sjohnlev /* 23875084Sjohnlev * If we were looking in the less constrained pool and didn't 23885084Sjohnlev * find pages, try the more constrained pool. 23895084Sjohnlev */ 23905084Sjohnlev if (poolp == &io_pool_4g) { 23915084Sjohnlev poolp = &io_pool_16m; 23925084Sjohnlev goto try_smaller; 23935084Sjohnlev } 23945084Sjohnlev kmem_reap(); 23955084Sjohnlev if (++attempt < 4) { 23965084Sjohnlev /* 23975084Sjohnlev * Grab some more io_pool pages 23985084Sjohnlev */ 23995084Sjohnlev (void) populate_io_pool(); 24005084Sjohnlev goto try_again; 24015084Sjohnlev } 24025084Sjohnlev 24035084Sjohnlev if (asked_hypervisor++) 24045084Sjohnlev return (NULL); /* really out of luck */ 24055084Sjohnlev /* 24065084Sjohnlev * Hypervisor exchange doesn't handle segment or alignment 24075084Sjohnlev * constraints 24085084Sjohnlev */ 24095084Sjohnlev if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi || pfnalign) 24105084Sjohnlev return (NULL); 24115084Sjohnlev /* 24125084Sjohnlev * Try exchanging pages with the hypervisor. 24135084Sjohnlev */ 24145084Sjohnlev npages = mmu_btop(bytes); 24155084Sjohnlev kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP; 24165084Sjohnlev /* 24175084Sjohnlev * Hypervisor will allocate extents, if we want contig pages 24185084Sjohnlev * extent must be >= npages 24195084Sjohnlev */ 24205084Sjohnlev if (contig) { 24215084Sjohnlev order = highbit(npages) - 1; 24225084Sjohnlev if (npages & ((1 << order) - 1)) 24235084Sjohnlev order++; 24245084Sjohnlev extpages = 1 << order; 24255084Sjohnlev } else { 24265084Sjohnlev order = 0; 24275084Sjohnlev extpages = npages; 24285084Sjohnlev } 24295084Sjohnlev if (extpages > npages) { 24305084Sjohnlev extra = extpages - npages; 24315084Sjohnlev if (!page_resv(extra, kflags)) 24325084Sjohnlev return (NULL); 24335084Sjohnlev } 24345084Sjohnlev pplist = kmem_alloc(extpages * sizeof (page_t *), kflags); 24355084Sjohnlev if (pplist == NULL) 24365084Sjohnlev goto fail; 24375084Sjohnlev mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags); 24385084Sjohnlev if (mfnlist == NULL) 24395084Sjohnlev goto fail; 24405084Sjohnlev pp = page_create_va(vp, off, npages * PAGESIZE, flags, 24415084Sjohnlev &kvseg, vaddr); 24425084Sjohnlev if (pp == NULL) 24435084Sjohnlev goto fail; 24445084Sjohnlev pp_first = pp; 24455084Sjohnlev if (extpages > npages) { 24465084Sjohnlev /* 24475084Sjohnlev * fill out the rest of extent pages to swap with the 24485084Sjohnlev * hypervisor 24495084Sjohnlev */ 24505084Sjohnlev for (i = 0; i < extra; i++) { 24515084Sjohnlev expp = page_create_va(vp, 24525084Sjohnlev (u_offset_t)(uintptr_t)io_pool_kva, 24535084Sjohnlev PAGESIZE, flags, &kvseg, io_pool_kva); 24545084Sjohnlev if (expp == NULL) 24555084Sjohnlev goto balloon_fail; 24565084Sjohnlev (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD); 24575084Sjohnlev page_io_unlock(expp); 24585084Sjohnlev page_hashout(expp, NULL); 24595084Sjohnlev page_io_lock(expp); 24605084Sjohnlev /* 24615084Sjohnlev * add page to end of list 24625084Sjohnlev */ 24635084Sjohnlev expp->p_prev = pp_first->p_prev; 24645084Sjohnlev expp->p_next = pp_first; 24655084Sjohnlev expp->p_prev->p_next = expp; 24665084Sjohnlev pp_first->p_prev = expp; 24675084Sjohnlev } 24685084Sjohnlev 24695084Sjohnlev } 24705084Sjohnlev for (i = 0; i < extpages; i++) { 24715084Sjohnlev pplist[i] = pp; 24725084Sjohnlev pp = pp->p_next; 24735084Sjohnlev } 24745084Sjohnlev nbits = highbit(mattr->dma_attr_addr_hi); 24755084Sjohnlev extents = contig ? 1 : npages; 24765084Sjohnlev if (balloon_replace_pages(extents, pplist, nbits, order, 24775084Sjohnlev mfnlist) != extents) 24785084Sjohnlev goto balloon_fail; 24795084Sjohnlev 24805084Sjohnlev kmem_free(pplist, extpages * sizeof (page_t *)); 24815084Sjohnlev kmem_free(mfnlist, extpages * sizeof (mfn_t)); 24825084Sjohnlev /* 24835084Sjohnlev * Return any excess pages to free list 24845084Sjohnlev */ 24855084Sjohnlev if (extpages > npages) { 24865084Sjohnlev for (i = 0; i < extra; i++) { 24875084Sjohnlev pp = pp_first->p_prev; 24885084Sjohnlev page_sub(&pp_first, pp); 24895084Sjohnlev page_io_unlock(pp); 24905084Sjohnlev page_unresv(1); 24915084Sjohnlev page_free(pp, 1); 24925084Sjohnlev } 24935084Sjohnlev } 24945084Sjohnlev check_dma(mattr, pp_first, mmu_btop(bytes)); 24955084Sjohnlev return (pp_first); 24965084Sjohnlev } 24975084Sjohnlev 24985084Sjohnlev /* 24995084Sjohnlev * Found the pages, now snip them from the list 25005084Sjohnlev */ 25015084Sjohnlev page_io_pool_sub(poolp, pp_first, pp_last); 25025084Sjohnlev io_pool_cnt -= mmu_btop(bytes); 25035084Sjohnlev if (io_pool_cnt < io_pool_cnt_lowater) 25045084Sjohnlev io_pool_cnt_lowater = io_pool_cnt; /* io pool low water mark */ 25055084Sjohnlev mutex_exit(&io_pool_lock); 25065084Sjohnlev done: 25075084Sjohnlev check_dma(mattr, pp_first, mmu_btop(bytes)); 25085084Sjohnlev pp = pp_first; 25095084Sjohnlev do { 25105084Sjohnlev if (!page_hashin(pp, vp, off, NULL)) { 25115084Sjohnlev panic("pg_create_io: hashin failed pp %p, vp %p," 25125084Sjohnlev " off %llx", 25135084Sjohnlev (void *)pp, (void *)vp, off); 25145084Sjohnlev } 25155084Sjohnlev off += MMU_PAGESIZE; 25165084Sjohnlev PP_CLRFREE(pp); 25175084Sjohnlev PP_CLRAGED(pp); 25185084Sjohnlev page_set_props(pp, P_REF); 25195084Sjohnlev page_io_lock(pp); 25205084Sjohnlev pp = pp->p_next; 25215084Sjohnlev } while (pp != pp_first); 25225084Sjohnlev return (pp_first); 25235084Sjohnlev balloon_fail: 25245084Sjohnlev /* 25255084Sjohnlev * Return pages to free list and return failure 25265084Sjohnlev */ 25275084Sjohnlev while (pp_first != NULL) { 25285084Sjohnlev pp = pp_first; 25295084Sjohnlev page_sub(&pp_first, pp); 25305084Sjohnlev page_io_unlock(pp); 25315084Sjohnlev if (pp->p_vnode != NULL) 25325084Sjohnlev page_hashout(pp, NULL); 25335084Sjohnlev page_free(pp, 1); 25345084Sjohnlev } 25355084Sjohnlev fail: 25365084Sjohnlev if (pplist) 25375084Sjohnlev kmem_free(pplist, extpages * sizeof (page_t *)); 25385084Sjohnlev if (mfnlist) 25395084Sjohnlev kmem_free(mfnlist, extpages * sizeof (mfn_t)); 25405084Sjohnlev page_unresv(extpages - npages); 25415084Sjohnlev return (NULL); 25425084Sjohnlev } 25435084Sjohnlev 25445084Sjohnlev /* 25455084Sjohnlev * Lock and return the page with the highest mfn that we can find. last_mfn 25465084Sjohnlev * holds the last one found, so the next search can start from there. We 25475084Sjohnlev * also keep a counter so that we don't loop forever if the machine has no 25485084Sjohnlev * free pages. 25495084Sjohnlev * 25505084Sjohnlev * This is called from the balloon thread to find pages to give away. new_high 25515084Sjohnlev * is used when new mfn's have been added to the system - we will reset our 25525084Sjohnlev * search if the new mfn's are higher than our current search position. 25535084Sjohnlev */ 25545084Sjohnlev page_t * 25555084Sjohnlev page_get_high_mfn(mfn_t new_high) 25565084Sjohnlev { 25575084Sjohnlev static mfn_t last_mfn = 0; 25585084Sjohnlev pfn_t pfn; 25595084Sjohnlev page_t *pp; 25605084Sjohnlev ulong_t loop_count = 0; 25615084Sjohnlev 25625084Sjohnlev if (new_high > last_mfn) 25635084Sjohnlev last_mfn = new_high; 25645084Sjohnlev 25655084Sjohnlev for (; loop_count < mfn_count; loop_count++, last_mfn--) { 25665084Sjohnlev if (last_mfn == 0) { 25675084Sjohnlev last_mfn = cached_max_mfn; 25685084Sjohnlev } 25695084Sjohnlev 25705084Sjohnlev pfn = mfn_to_pfn(last_mfn); 25715084Sjohnlev if (pfn & PFN_IS_FOREIGN_MFN) 25725084Sjohnlev continue; 25735084Sjohnlev 25745084Sjohnlev /* See if the page is free. If so, lock it. */ 25755084Sjohnlev pp = page_numtopp_alloc(pfn); 25765084Sjohnlev if (pp == NULL) 25775084Sjohnlev continue; 25785084Sjohnlev PP_CLRFREE(pp); 25795084Sjohnlev 25805084Sjohnlev ASSERT(PAGE_EXCL(pp)); 25815084Sjohnlev ASSERT(pp->p_vnode == NULL); 25825084Sjohnlev ASSERT(!hat_page_is_mapped(pp)); 25835084Sjohnlev last_mfn--; 25845084Sjohnlev return (pp); 25855084Sjohnlev } 25865084Sjohnlev return (NULL); 25875084Sjohnlev } 25885084Sjohnlev 25895084Sjohnlev #else /* !__xpv */ 25905084Sjohnlev 25910Sstevel@tonic-gate /* 25920Sstevel@tonic-gate * get a page from any list with the given mnode 25930Sstevel@tonic-gate */ 25945084Sjohnlev static page_t * 25950Sstevel@tonic-gate page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags, 25960Sstevel@tonic-gate int mnode, int mtype, ddi_dma_attr_t *dma_attr) 25970Sstevel@tonic-gate { 25982961Sdp78419 kmutex_t *pcm; 25992961Sdp78419 int i; 26002961Sdp78419 page_t *pp; 26012961Sdp78419 page_t *first_pp; 26022961Sdp78419 uint64_t pgaddr; 26032961Sdp78419 ulong_t bin; 26042961Sdp78419 int mtypestart; 26052961Sdp78419 int plw_initialized; 26062961Sdp78419 page_list_walker_t plw; 26070Sstevel@tonic-gate 26080Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_alloc); 26090Sstevel@tonic-gate 26100Sstevel@tonic-gate ASSERT((flags & PG_MATCH_COLOR) == 0); 26110Sstevel@tonic-gate ASSERT(szc == 0); 26120Sstevel@tonic-gate ASSERT(dma_attr != NULL); 26130Sstevel@tonic-gate 26140Sstevel@tonic-gate MTYPE_START(mnode, mtype, flags); 26150Sstevel@tonic-gate if (mtype < 0) { 26160Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocempty); 26170Sstevel@tonic-gate return (NULL); 26180Sstevel@tonic-gate } 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate mtypestart = mtype; 26210Sstevel@tonic-gate 26220Sstevel@tonic-gate bin = origbin; 26230Sstevel@tonic-gate 26240Sstevel@tonic-gate /* 26250Sstevel@tonic-gate * check up to page_colors + 1 bins - origbin may be checked twice 26260Sstevel@tonic-gate * because of BIN_STEP skip 26270Sstevel@tonic-gate */ 26280Sstevel@tonic-gate do { 26292961Sdp78419 plw_initialized = 0; 26302961Sdp78419 26312961Sdp78419 for (plw.plw_count = 0; 26322961Sdp78419 plw.plw_count < page_colors; plw.plw_count++) { 26332961Sdp78419 26340Sstevel@tonic-gate if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL) 26350Sstevel@tonic-gate goto nextfreebin; 26360Sstevel@tonic-gate 26370Sstevel@tonic-gate pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 26380Sstevel@tonic-gate mutex_enter(pcm); 26390Sstevel@tonic-gate pp = PAGE_FREELISTS(mnode, szc, bin, mtype); 26400Sstevel@tonic-gate first_pp = pp; 26410Sstevel@tonic-gate while (pp != NULL) { 26420Sstevel@tonic-gate if (page_trylock(pp, SE_EXCL) == 0) { 26430Sstevel@tonic-gate pp = pp->p_next; 26440Sstevel@tonic-gate if (pp == first_pp) { 26450Sstevel@tonic-gate pp = NULL; 26460Sstevel@tonic-gate } 26470Sstevel@tonic-gate continue; 26480Sstevel@tonic-gate } 26490Sstevel@tonic-gate 26500Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 26510Sstevel@tonic-gate ASSERT(PP_ISAGED(pp)); 26520Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 26530Sstevel@tonic-gate ASSERT(pp->p_hash == NULL); 26540Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 26550Sstevel@tonic-gate ASSERT(pp->p_szc == szc); 26560Sstevel@tonic-gate ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 26570Sstevel@tonic-gate /* check if page within DMA attributes */ 26583446Smrj pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 26590Sstevel@tonic-gate if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 26600Sstevel@tonic-gate (pgaddr + MMU_PAGESIZE - 1 <= 26610Sstevel@tonic-gate dma_attr->dma_attr_addr_hi)) { 26620Sstevel@tonic-gate break; 26630Sstevel@tonic-gate } 26640Sstevel@tonic-gate 26650Sstevel@tonic-gate /* continue looking */ 26660Sstevel@tonic-gate page_unlock(pp); 26670Sstevel@tonic-gate pp = pp->p_next; 26680Sstevel@tonic-gate if (pp == first_pp) 26690Sstevel@tonic-gate pp = NULL; 26700Sstevel@tonic-gate 26710Sstevel@tonic-gate } 26720Sstevel@tonic-gate if (pp != NULL) { 26730Sstevel@tonic-gate ASSERT(mtype == PP_2_MTYPE(pp)); 26740Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 26750Sstevel@tonic-gate 26760Sstevel@tonic-gate /* found a page with specified DMA attributes */ 26770Sstevel@tonic-gate page_sub(&PAGE_FREELISTS(mnode, szc, bin, 26780Sstevel@tonic-gate mtype), pp); 2679414Skchow page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST); 26800Sstevel@tonic-gate 26810Sstevel@tonic-gate if ((PP_ISFREE(pp) == 0) || 26820Sstevel@tonic-gate (PP_ISAGED(pp) == 0)) { 26830Sstevel@tonic-gate cmn_err(CE_PANIC, "page %p is not free", 26840Sstevel@tonic-gate (void *)pp); 26850Sstevel@tonic-gate } 26860Sstevel@tonic-gate 26870Sstevel@tonic-gate mutex_exit(pcm); 26880Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 26890Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocok); 26900Sstevel@tonic-gate return (pp); 26910Sstevel@tonic-gate } 26920Sstevel@tonic-gate mutex_exit(pcm); 26930Sstevel@tonic-gate nextfreebin: 26942961Sdp78419 if (plw_initialized == 0) { 26952961Sdp78419 page_list_walk_init(szc, 0, bin, 1, 0, &plw); 26962961Sdp78419 ASSERT(plw.plw_ceq_dif == page_colors); 26972961Sdp78419 plw_initialized = 1; 26982961Sdp78419 } 26990Sstevel@tonic-gate 27002961Sdp78419 if (plw.plw_do_split) { 27012961Sdp78419 pp = page_freelist_split(szc, bin, mnode, 27022961Sdp78419 mtype, 27032961Sdp78419 mmu_btop(dma_attr->dma_attr_addr_hi + 1), 27042961Sdp78419 &plw); 27052961Sdp78419 if (pp != NULL) 27062961Sdp78419 return (pp); 27072961Sdp78419 } 27082961Sdp78419 27092961Sdp78419 bin = page_list_walk_next_bin(szc, bin, &plw); 27100Sstevel@tonic-gate } 27112961Sdp78419 2712414Skchow MTYPE_NEXT(mnode, mtype, flags); 2713414Skchow } while (mtype >= 0); 27140Sstevel@tonic-gate 27150Sstevel@tonic-gate /* failed to find a page in the freelist; try it in the cachelist */ 27160Sstevel@tonic-gate 27170Sstevel@tonic-gate /* reset mtype start for cachelist search */ 27180Sstevel@tonic-gate mtype = mtypestart; 27190Sstevel@tonic-gate ASSERT(mtype >= 0); 27200Sstevel@tonic-gate 27210Sstevel@tonic-gate /* start with the bin of matching color */ 27220Sstevel@tonic-gate bin = origbin; 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate do { 27250Sstevel@tonic-gate for (i = 0; i <= page_colors; i++) { 27260Sstevel@tonic-gate if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL) 27270Sstevel@tonic-gate goto nextcachebin; 27280Sstevel@tonic-gate pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST); 27290Sstevel@tonic-gate mutex_enter(pcm); 27300Sstevel@tonic-gate pp = PAGE_CACHELISTS(mnode, bin, mtype); 27310Sstevel@tonic-gate first_pp = pp; 27320Sstevel@tonic-gate while (pp != NULL) { 27330Sstevel@tonic-gate if (page_trylock(pp, SE_EXCL) == 0) { 27340Sstevel@tonic-gate pp = pp->p_next; 27350Sstevel@tonic-gate if (pp == first_pp) 27360Sstevel@tonic-gate break; 27370Sstevel@tonic-gate continue; 27380Sstevel@tonic-gate } 27390Sstevel@tonic-gate ASSERT(pp->p_vnode); 27400Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 27410Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 27420Sstevel@tonic-gate ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 27430Sstevel@tonic-gate 27440Sstevel@tonic-gate /* check if page within DMA attributes */ 27450Sstevel@tonic-gate 27463446Smrj pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 27470Sstevel@tonic-gate if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 27480Sstevel@tonic-gate (pgaddr + MMU_PAGESIZE - 1 <= 27490Sstevel@tonic-gate dma_attr->dma_attr_addr_hi)) { 27500Sstevel@tonic-gate break; 27510Sstevel@tonic-gate } 27520Sstevel@tonic-gate 27530Sstevel@tonic-gate /* continue looking */ 27540Sstevel@tonic-gate page_unlock(pp); 27550Sstevel@tonic-gate pp = pp->p_next; 27560Sstevel@tonic-gate if (pp == first_pp) 27570Sstevel@tonic-gate pp = NULL; 27580Sstevel@tonic-gate } 27590Sstevel@tonic-gate 27600Sstevel@tonic-gate if (pp != NULL) { 27610Sstevel@tonic-gate ASSERT(mtype == PP_2_MTYPE(pp)); 27620Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 27630Sstevel@tonic-gate 27640Sstevel@tonic-gate /* found a page with specified DMA attributes */ 27650Sstevel@tonic-gate page_sub(&PAGE_CACHELISTS(mnode, bin, 27660Sstevel@tonic-gate mtype), pp); 2767414Skchow page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST); 27680Sstevel@tonic-gate 27690Sstevel@tonic-gate mutex_exit(pcm); 27700Sstevel@tonic-gate ASSERT(pp->p_vnode); 27710Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 27720Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 27730Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocok); 27740Sstevel@tonic-gate return (pp); 27750Sstevel@tonic-gate } 27760Sstevel@tonic-gate mutex_exit(pcm); 27770Sstevel@tonic-gate nextcachebin: 27780Sstevel@tonic-gate bin += (i == 0) ? BIN_STEP : 1; 27790Sstevel@tonic-gate bin &= page_colors_mask; 27800Sstevel@tonic-gate } 2781414Skchow MTYPE_NEXT(mnode, mtype, flags); 2782414Skchow } while (mtype >= 0); 27830Sstevel@tonic-gate 27840Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pgma_allocfailed); 27850Sstevel@tonic-gate return (NULL); 27860Sstevel@tonic-gate } 27870Sstevel@tonic-gate 27880Sstevel@tonic-gate /* 27890Sstevel@tonic-gate * This function is similar to page_get_freelist()/page_get_cachelist() 27900Sstevel@tonic-gate * but it searches both the lists to find a page with the specified 27910Sstevel@tonic-gate * color (or no color) and DMA attributes. The search is done in the 27920Sstevel@tonic-gate * freelist first and then in the cache list within the highest memory 27930Sstevel@tonic-gate * range (based on DMA attributes) before searching in the lower 27940Sstevel@tonic-gate * memory ranges. 27950Sstevel@tonic-gate * 27960Sstevel@tonic-gate * Note: This function is called only by page_create_io(). 27970Sstevel@tonic-gate */ 27980Sstevel@tonic-gate /*ARGSUSED*/ 27995084Sjohnlev static page_t * 28000Sstevel@tonic-gate page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr, 28010Sstevel@tonic-gate size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp) 28020Sstevel@tonic-gate { 28030Sstevel@tonic-gate uint_t bin; 28040Sstevel@tonic-gate int mtype; 28050Sstevel@tonic-gate page_t *pp; 28060Sstevel@tonic-gate int n; 28070Sstevel@tonic-gate int m; 28080Sstevel@tonic-gate int szc; 28090Sstevel@tonic-gate int fullrange; 28100Sstevel@tonic-gate int mnode; 28110Sstevel@tonic-gate int local_failed_stat = 0; 28120Sstevel@tonic-gate lgrp_mnode_cookie_t lgrp_cookie; 28130Sstevel@tonic-gate 28140Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_alloc); 28150Sstevel@tonic-gate 28160Sstevel@tonic-gate /* only base pagesize currently supported */ 28170Sstevel@tonic-gate if (size != MMU_PAGESIZE) 28180Sstevel@tonic-gate return (NULL); 28190Sstevel@tonic-gate 28200Sstevel@tonic-gate /* 28210Sstevel@tonic-gate * If we're passed a specific lgroup, we use it. Otherwise, 28220Sstevel@tonic-gate * assume first-touch placement is desired. 28230Sstevel@tonic-gate */ 28240Sstevel@tonic-gate if (!LGRP_EXISTS(lgrp)) 28250Sstevel@tonic-gate lgrp = lgrp_home_lgrp(); 28260Sstevel@tonic-gate 28270Sstevel@tonic-gate /* LINTED */ 28282961Sdp78419 AS_2_BIN(as, seg, vp, vaddr, bin, 0); 28290Sstevel@tonic-gate 28300Sstevel@tonic-gate /* 28310Sstevel@tonic-gate * Only hold one freelist or cachelist lock at a time, that way we 28320Sstevel@tonic-gate * can start anywhere and not have to worry about lock 28330Sstevel@tonic-gate * ordering. 28340Sstevel@tonic-gate */ 28350Sstevel@tonic-gate if (dma_attr == NULL) { 28360Sstevel@tonic-gate n = 0; 28370Sstevel@tonic-gate m = mnoderangecnt - 1; 28380Sstevel@tonic-gate fullrange = 1; 28390Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_nulldmaattr); 28400Sstevel@tonic-gate } else { 28410Sstevel@tonic-gate pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo); 28420Sstevel@tonic-gate pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi); 28430Sstevel@tonic-gate 28440Sstevel@tonic-gate /* 28450Sstevel@tonic-gate * We can guarantee alignment only for page boundary. 28460Sstevel@tonic-gate */ 28470Sstevel@tonic-gate if (dma_attr->dma_attr_align > MMU_PAGESIZE) 28480Sstevel@tonic-gate return (NULL); 28490Sstevel@tonic-gate 28500Sstevel@tonic-gate n = pfn_2_mtype(pfnlo); 28510Sstevel@tonic-gate m = pfn_2_mtype(pfnhi); 28520Sstevel@tonic-gate 28530Sstevel@tonic-gate fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) && 28540Sstevel@tonic-gate (pfnhi >= mnoderanges[m].mnr_pfnhi)); 28550Sstevel@tonic-gate } 28560Sstevel@tonic-gate VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange); 28570Sstevel@tonic-gate 28580Sstevel@tonic-gate if (n > m) 28590Sstevel@tonic-gate return (NULL); 28600Sstevel@tonic-gate 28610Sstevel@tonic-gate szc = 0; 28620Sstevel@tonic-gate 28630Sstevel@tonic-gate /* cylcing thru mtype handled by RANGE0 if n == 0 */ 28640Sstevel@tonic-gate if (n == 0) { 28650Sstevel@tonic-gate flags |= PGI_MT_RANGE0; 28660Sstevel@tonic-gate n = m; 28670Sstevel@tonic-gate } 28680Sstevel@tonic-gate 28690Sstevel@tonic-gate /* 28700Sstevel@tonic-gate * Try local memory node first, but try remote if we can't 28710Sstevel@tonic-gate * get a page of the right color. 28720Sstevel@tonic-gate */ 28730Sstevel@tonic-gate LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER); 28740Sstevel@tonic-gate while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 28750Sstevel@tonic-gate /* 28760Sstevel@tonic-gate * allocate pages from high pfn to low. 28770Sstevel@tonic-gate */ 28780Sstevel@tonic-gate for (mtype = m; mtype >= n; mtype--) { 28790Sstevel@tonic-gate if (fullrange != 0) { 28800Sstevel@tonic-gate pp = page_get_mnode_freelist(mnode, 28810Sstevel@tonic-gate bin, mtype, szc, flags); 28820Sstevel@tonic-gate if (pp == NULL) { 28830Sstevel@tonic-gate pp = page_get_mnode_cachelist( 28845084Sjohnlev bin, flags, mnode, mtype); 28850Sstevel@tonic-gate } 28860Sstevel@tonic-gate } else { 28870Sstevel@tonic-gate pp = page_get_mnode_anylist(bin, szc, 28880Sstevel@tonic-gate flags, mnode, mtype, dma_attr); 28890Sstevel@tonic-gate } 28900Sstevel@tonic-gate if (pp != NULL) { 28910Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_allocok); 28920Sstevel@tonic-gate check_dma(dma_attr, pp, 1); 28930Sstevel@tonic-gate return (pp); 28940Sstevel@tonic-gate } 28950Sstevel@tonic-gate } 28960Sstevel@tonic-gate if (!local_failed_stat) { 28970Sstevel@tonic-gate lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1); 28980Sstevel@tonic-gate local_failed_stat = 1; 28990Sstevel@tonic-gate } 29000Sstevel@tonic-gate } 29010Sstevel@tonic-gate VM_STAT_ADD(pga_vmstats.pga_allocfailed); 29020Sstevel@tonic-gate 29030Sstevel@tonic-gate return (NULL); 29040Sstevel@tonic-gate } 29050Sstevel@tonic-gate 29060Sstevel@tonic-gate /* 29070Sstevel@tonic-gate * page_create_io() 29080Sstevel@tonic-gate * 29090Sstevel@tonic-gate * This function is a copy of page_create_va() with an additional 29100Sstevel@tonic-gate * argument 'mattr' that specifies DMA memory requirements to 29110Sstevel@tonic-gate * the page list functions. This function is used by the segkmem 29120Sstevel@tonic-gate * allocator so it is only to create new pages (i.e PG_EXCL is 29130Sstevel@tonic-gate * set). 29140Sstevel@tonic-gate * 29150Sstevel@tonic-gate * Note: This interface is currently used by x86 PSM only and is 29160Sstevel@tonic-gate * not fully specified so the commitment level is only for 29170Sstevel@tonic-gate * private interface specific to x86. This interface uses PSM 29180Sstevel@tonic-gate * specific page_get_anylist() interface. 29190Sstevel@tonic-gate */ 29200Sstevel@tonic-gate 29210Sstevel@tonic-gate #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 29220Sstevel@tonic-gate for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 29230Sstevel@tonic-gate if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 29240Sstevel@tonic-gate break; \ 29250Sstevel@tonic-gate } \ 29260Sstevel@tonic-gate } 29270Sstevel@tonic-gate 29280Sstevel@tonic-gate 29290Sstevel@tonic-gate page_t * 29300Sstevel@tonic-gate page_create_io( 29310Sstevel@tonic-gate struct vnode *vp, 29320Sstevel@tonic-gate u_offset_t off, 29330Sstevel@tonic-gate uint_t bytes, 29340Sstevel@tonic-gate uint_t flags, 29350Sstevel@tonic-gate struct as *as, 29360Sstevel@tonic-gate caddr_t vaddr, 29370Sstevel@tonic-gate ddi_dma_attr_t *mattr) /* DMA memory attributes if any */ 29380Sstevel@tonic-gate { 29390Sstevel@tonic-gate page_t *plist = NULL; 29400Sstevel@tonic-gate uint_t plist_len = 0; 29410Sstevel@tonic-gate pgcnt_t npages; 29420Sstevel@tonic-gate page_t *npp = NULL; 29430Sstevel@tonic-gate uint_t pages_req; 29440Sstevel@tonic-gate page_t *pp; 29450Sstevel@tonic-gate kmutex_t *phm = NULL; 29460Sstevel@tonic-gate uint_t index; 29470Sstevel@tonic-gate 29480Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 29495084Sjohnlev "page_create_start:vp %p off %llx bytes %u flags %x", 29505084Sjohnlev vp, off, bytes, flags); 29510Sstevel@tonic-gate 29520Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0); 29530Sstevel@tonic-gate 29540Sstevel@tonic-gate pages_req = npages = mmu_btopr(bytes); 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate /* 29570Sstevel@tonic-gate * Do the freemem and pcf accounting. 29580Sstevel@tonic-gate */ 29590Sstevel@tonic-gate if (!page_create_wait(npages, flags)) { 29600Sstevel@tonic-gate return (NULL); 29610Sstevel@tonic-gate } 29620Sstevel@tonic-gate 29630Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 29645084Sjohnlev "page_create_success:vp %p off %llx", vp, off); 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate /* 29670Sstevel@tonic-gate * If satisfying this request has left us with too little 29680Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 29690Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 29700Sstevel@tonic-gate * daemon in situations where it would decide that there's 29710Sstevel@tonic-gate * nothing to do. 29720Sstevel@tonic-gate */ 29730Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 29740Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 29755084Sjohnlev "pageout_cv_signal:freemem %ld", freemem); 29760Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 29770Sstevel@tonic-gate } 29780Sstevel@tonic-gate 29790Sstevel@tonic-gate if (flags & PG_PHYSCONTIG) { 29800Sstevel@tonic-gate 29810Sstevel@tonic-gate plist = page_get_contigpage(&npages, mattr, 1); 29820Sstevel@tonic-gate if (plist == NULL) { 29830Sstevel@tonic-gate page_create_putback(npages); 29840Sstevel@tonic-gate return (NULL); 29850Sstevel@tonic-gate } 29860Sstevel@tonic-gate 29870Sstevel@tonic-gate pp = plist; 29880Sstevel@tonic-gate 29890Sstevel@tonic-gate do { 29900Sstevel@tonic-gate if (!page_hashin(pp, vp, off, NULL)) { 29910Sstevel@tonic-gate panic("pg_creat_io: hashin failed %p %p %llx", 29920Sstevel@tonic-gate (void *)pp, (void *)vp, off); 29930Sstevel@tonic-gate } 29940Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 29950Sstevel@tonic-gate off += MMU_PAGESIZE; 29960Sstevel@tonic-gate PP_CLRFREE(pp); 29970Sstevel@tonic-gate PP_CLRAGED(pp); 29980Sstevel@tonic-gate page_set_props(pp, P_REF); 29990Sstevel@tonic-gate pp = pp->p_next; 30000Sstevel@tonic-gate } while (pp != plist); 30010Sstevel@tonic-gate 30020Sstevel@tonic-gate if (!npages) { 30030Sstevel@tonic-gate check_dma(mattr, plist, pages_req); 30040Sstevel@tonic-gate return (plist); 30050Sstevel@tonic-gate } else { 30060Sstevel@tonic-gate vaddr += (pages_req - npages) << MMU_PAGESHIFT; 30070Sstevel@tonic-gate } 30080Sstevel@tonic-gate 30090Sstevel@tonic-gate /* 30100Sstevel@tonic-gate * fall-thru: 30110Sstevel@tonic-gate * 30120Sstevel@tonic-gate * page_get_contigpage returns when npages <= sgllen. 30130Sstevel@tonic-gate * Grab the rest of the non-contig pages below from anylist. 30140Sstevel@tonic-gate */ 30150Sstevel@tonic-gate } 30160Sstevel@tonic-gate 30170Sstevel@tonic-gate /* 30180Sstevel@tonic-gate * Loop around collecting the requested number of pages. 30190Sstevel@tonic-gate * Most of the time, we have to `create' a new page. With 30200Sstevel@tonic-gate * this in mind, pull the page off the free list before 30210Sstevel@tonic-gate * getting the hash lock. This will minimize the hash 30220Sstevel@tonic-gate * lock hold time, nesting, and the like. If it turns 30230Sstevel@tonic-gate * out we don't need the page, we put it back at the end. 30240Sstevel@tonic-gate */ 30250Sstevel@tonic-gate while (npages--) { 30260Sstevel@tonic-gate phm = NULL; 30270Sstevel@tonic-gate 30280Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 30290Sstevel@tonic-gate top: 30300Sstevel@tonic-gate ASSERT(phm == NULL); 30310Sstevel@tonic-gate ASSERT(index == PAGE_HASH_FUNC(vp, off)); 30320Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 30330Sstevel@tonic-gate 30340Sstevel@tonic-gate if (npp == NULL) { 30350Sstevel@tonic-gate /* 30360Sstevel@tonic-gate * Try to get the page of any color either from 30370Sstevel@tonic-gate * the freelist or from the cache list. 30380Sstevel@tonic-gate */ 30390Sstevel@tonic-gate npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE, 30400Sstevel@tonic-gate flags & ~PG_MATCH_COLOR, mattr, NULL); 30410Sstevel@tonic-gate if (npp == NULL) { 30420Sstevel@tonic-gate if (mattr == NULL) { 30430Sstevel@tonic-gate /* 30440Sstevel@tonic-gate * Not looking for a special page; 30450Sstevel@tonic-gate * panic! 30460Sstevel@tonic-gate */ 30470Sstevel@tonic-gate panic("no page found %d", (int)npages); 30480Sstevel@tonic-gate } 30490Sstevel@tonic-gate /* 30500Sstevel@tonic-gate * No page found! This can happen 30510Sstevel@tonic-gate * if we are looking for a page 30520Sstevel@tonic-gate * within a specific memory range 30530Sstevel@tonic-gate * for DMA purposes. If PG_WAIT is 30540Sstevel@tonic-gate * specified then we wait for a 30550Sstevel@tonic-gate * while and then try again. The 30560Sstevel@tonic-gate * wait could be forever if we 30570Sstevel@tonic-gate * don't get the page(s) we need. 30580Sstevel@tonic-gate * 30590Sstevel@tonic-gate * Note: XXX We really need a mechanism 30600Sstevel@tonic-gate * to wait for pages in the desired 30610Sstevel@tonic-gate * range. For now, we wait for any 30620Sstevel@tonic-gate * pages and see if we can use it. 30630Sstevel@tonic-gate */ 30640Sstevel@tonic-gate 30650Sstevel@tonic-gate if ((mattr != NULL) && (flags & PG_WAIT)) { 30660Sstevel@tonic-gate delay(10); 30670Sstevel@tonic-gate goto top; 30680Sstevel@tonic-gate } 30690Sstevel@tonic-gate goto fail; /* undo accounting stuff */ 30700Sstevel@tonic-gate } 30710Sstevel@tonic-gate 30720Sstevel@tonic-gate if (PP_ISAGED(npp) == 0) { 30730Sstevel@tonic-gate /* 30740Sstevel@tonic-gate * Since this page came from the 30750Sstevel@tonic-gate * cachelist, we must destroy the 30760Sstevel@tonic-gate * old vnode association. 30770Sstevel@tonic-gate */ 30780Sstevel@tonic-gate page_hashout(npp, (kmutex_t *)NULL); 30790Sstevel@tonic-gate } 30800Sstevel@tonic-gate } 30810Sstevel@tonic-gate 30820Sstevel@tonic-gate /* 30830Sstevel@tonic-gate * We own this page! 30840Sstevel@tonic-gate */ 30850Sstevel@tonic-gate ASSERT(PAGE_EXCL(npp)); 30860Sstevel@tonic-gate ASSERT(npp->p_vnode == NULL); 30870Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(npp)); 30880Sstevel@tonic-gate PP_CLRFREE(npp); 30890Sstevel@tonic-gate PP_CLRAGED(npp); 30900Sstevel@tonic-gate 30910Sstevel@tonic-gate /* 30920Sstevel@tonic-gate * Here we have a page in our hot little mits and are 30930Sstevel@tonic-gate * just waiting to stuff it on the appropriate lists. 30940Sstevel@tonic-gate * Get the mutex and check to see if it really does 30950Sstevel@tonic-gate * not exist. 30960Sstevel@tonic-gate */ 30970Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 30980Sstevel@tonic-gate mutex_enter(phm); 30990Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 31000Sstevel@tonic-gate if (pp == NULL) { 31010Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 31020Sstevel@tonic-gate pp = npp; 31030Sstevel@tonic-gate npp = NULL; 31040Sstevel@tonic-gate if (!page_hashin(pp, vp, off, phm)) { 31050Sstevel@tonic-gate /* 31060Sstevel@tonic-gate * Since we hold the page hash mutex and 31070Sstevel@tonic-gate * just searched for this page, page_hashin 31080Sstevel@tonic-gate * had better not fail. If it does, that 31090Sstevel@tonic-gate * means somethread did not follow the 31100Sstevel@tonic-gate * page hash mutex rules. Panic now and 31110Sstevel@tonic-gate * get it over with. As usual, go down 31120Sstevel@tonic-gate * holding all the locks. 31130Sstevel@tonic-gate */ 31140Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31150Sstevel@tonic-gate panic("page_create: hashin fail %p %p %llx %p", 31160Sstevel@tonic-gate (void *)pp, (void *)vp, off, (void *)phm); 31170Sstevel@tonic-gate 31180Sstevel@tonic-gate } 31190Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31200Sstevel@tonic-gate mutex_exit(phm); 31210Sstevel@tonic-gate phm = NULL; 31220Sstevel@tonic-gate 31230Sstevel@tonic-gate /* 31240Sstevel@tonic-gate * Hat layer locking need not be done to set 31250Sstevel@tonic-gate * the following bits since the page is not hashed 31260Sstevel@tonic-gate * and was on the free list (i.e., had no mappings). 31270Sstevel@tonic-gate * 31280Sstevel@tonic-gate * Set the reference bit to protect 31290Sstevel@tonic-gate * against immediate pageout 31300Sstevel@tonic-gate * 31310Sstevel@tonic-gate * XXXmh modify freelist code to set reference 31320Sstevel@tonic-gate * bit so we don't have to do it here. 31330Sstevel@tonic-gate */ 31340Sstevel@tonic-gate page_set_props(pp, P_REF); 31350Sstevel@tonic-gate } else { 31360Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 31370Sstevel@tonic-gate mutex_exit(phm); 31380Sstevel@tonic-gate phm = NULL; 31390Sstevel@tonic-gate /* 31400Sstevel@tonic-gate * NOTE: This should not happen for pages associated 31410Sstevel@tonic-gate * with kernel vnode 'kvp'. 31420Sstevel@tonic-gate */ 31430Sstevel@tonic-gate /* XX64 - to debug why this happens! */ 31443290Sjohansen ASSERT(!VN_ISKAS(vp)); 31453290Sjohansen if (VN_ISKAS(vp)) 31460Sstevel@tonic-gate cmn_err(CE_NOTE, 31470Sstevel@tonic-gate "page_create: page not expected " 31480Sstevel@tonic-gate "in hash list for kernel vnode - pp 0x%p", 31490Sstevel@tonic-gate (void *)pp); 31500Sstevel@tonic-gate VM_STAT_ADD(page_create_exists); 31510Sstevel@tonic-gate goto fail; 31520Sstevel@tonic-gate } 31530Sstevel@tonic-gate 31540Sstevel@tonic-gate /* 31550Sstevel@tonic-gate * Got a page! It is locked. Acquire the i/o 31560Sstevel@tonic-gate * lock since we are going to use the p_next and 31570Sstevel@tonic-gate * p_prev fields to link the requested pages together. 31580Sstevel@tonic-gate */ 31590Sstevel@tonic-gate page_io_lock(pp); 31600Sstevel@tonic-gate page_add(&plist, pp); 31610Sstevel@tonic-gate plist = plist->p_next; 31620Sstevel@tonic-gate off += MMU_PAGESIZE; 31630Sstevel@tonic-gate vaddr += MMU_PAGESIZE; 31640Sstevel@tonic-gate } 31650Sstevel@tonic-gate 31660Sstevel@tonic-gate check_dma(mattr, plist, pages_req); 31670Sstevel@tonic-gate return (plist); 31680Sstevel@tonic-gate 31690Sstevel@tonic-gate fail: 31700Sstevel@tonic-gate if (npp != NULL) { 31710Sstevel@tonic-gate /* 31720Sstevel@tonic-gate * Did not need this page after all. 31730Sstevel@tonic-gate * Put it back on the free list. 31740Sstevel@tonic-gate */ 31750Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 31760Sstevel@tonic-gate PP_SETFREE(npp); 31770Sstevel@tonic-gate PP_SETAGED(npp); 31780Sstevel@tonic-gate npp->p_offset = (u_offset_t)-1; 31790Sstevel@tonic-gate page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 31800Sstevel@tonic-gate page_unlock(npp); 31810Sstevel@tonic-gate } 31820Sstevel@tonic-gate 31830Sstevel@tonic-gate /* 31840Sstevel@tonic-gate * Give up the pages we already got. 31850Sstevel@tonic-gate */ 31860Sstevel@tonic-gate while (plist != NULL) { 31870Sstevel@tonic-gate pp = plist; 31880Sstevel@tonic-gate page_sub(&plist, pp); 31890Sstevel@tonic-gate page_io_unlock(pp); 31900Sstevel@tonic-gate plist_len++; 31910Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/ 31920Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 31930Sstevel@tonic-gate } 31940Sstevel@tonic-gate 31950Sstevel@tonic-gate /* 31960Sstevel@tonic-gate * VN_DISPOSE does freemem accounting for the pages in plist 31970Sstevel@tonic-gate * by calling page_free. So, we need to undo the pcf accounting 31980Sstevel@tonic-gate * for only the remaining pages. 31990Sstevel@tonic-gate */ 32000Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 32010Sstevel@tonic-gate page_create_putback(pages_req - plist_len); 32020Sstevel@tonic-gate 32030Sstevel@tonic-gate return (NULL); 32040Sstevel@tonic-gate } 32055084Sjohnlev #endif /* !__xpv */ 32060Sstevel@tonic-gate 32070Sstevel@tonic-gate 32080Sstevel@tonic-gate /* 32090Sstevel@tonic-gate * Copy the data from the physical page represented by "frompp" to 32100Sstevel@tonic-gate * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and 32110Sstevel@tonic-gate * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt 32120Sstevel@tonic-gate * level and no one sleeps with an active mapping there. 32130Sstevel@tonic-gate * 32140Sstevel@tonic-gate * Note that the ref/mod bits in the page_t's are not affected by 32150Sstevel@tonic-gate * this operation, hence it is up to the caller to update them appropriately. 32160Sstevel@tonic-gate */ 32173253Smec int 32180Sstevel@tonic-gate ppcopy(page_t *frompp, page_t *topp) 32190Sstevel@tonic-gate { 32200Sstevel@tonic-gate caddr_t pp_addr1; 32210Sstevel@tonic-gate caddr_t pp_addr2; 32223446Smrj hat_mempte_t pte1; 32233446Smrj hat_mempte_t pte2; 32240Sstevel@tonic-gate kmutex_t *ppaddr_mutex; 32253253Smec label_t ljb; 32263253Smec int ret = 1; 32270Sstevel@tonic-gate 32280Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 32290Sstevel@tonic-gate ASSERT(PAGE_LOCKED(frompp)); 32300Sstevel@tonic-gate ASSERT(PAGE_LOCKED(topp)); 32310Sstevel@tonic-gate 32320Sstevel@tonic-gate if (kpm_enable) { 32330Sstevel@tonic-gate pp_addr1 = hat_kpm_page2va(frompp, 0); 32340Sstevel@tonic-gate pp_addr2 = hat_kpm_page2va(topp, 0); 32350Sstevel@tonic-gate kpreempt_disable(); 32360Sstevel@tonic-gate } else { 32370Sstevel@tonic-gate /* 32380Sstevel@tonic-gate * disable pre-emption so that CPU can't change 32390Sstevel@tonic-gate */ 32400Sstevel@tonic-gate kpreempt_disable(); 32410Sstevel@tonic-gate 32420Sstevel@tonic-gate pp_addr1 = CPU->cpu_caddr1; 32430Sstevel@tonic-gate pp_addr2 = CPU->cpu_caddr2; 32443446Smrj pte1 = CPU->cpu_caddr1pte; 32453446Smrj pte2 = CPU->cpu_caddr2pte; 32460Sstevel@tonic-gate 32470Sstevel@tonic-gate ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 32480Sstevel@tonic-gate mutex_enter(ppaddr_mutex); 32490Sstevel@tonic-gate 32500Sstevel@tonic-gate hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1, 32510Sstevel@tonic-gate PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST); 32520Sstevel@tonic-gate hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2, 32530Sstevel@tonic-gate PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 32540Sstevel@tonic-gate HAT_LOAD_NOCONSIST); 32550Sstevel@tonic-gate } 32560Sstevel@tonic-gate 32573253Smec if (on_fault(&ljb)) { 32583253Smec ret = 0; 32593253Smec goto faulted; 32603253Smec } 32610Sstevel@tonic-gate if (use_sse_pagecopy) 32625084Sjohnlev #ifdef __xpv 32635084Sjohnlev page_copy_no_xmm(pp_addr2, pp_addr1); 32645084Sjohnlev #else 32650Sstevel@tonic-gate hwblkpagecopy(pp_addr1, pp_addr2); 32665084Sjohnlev #endif 32670Sstevel@tonic-gate else 32680Sstevel@tonic-gate bcopy(pp_addr1, pp_addr2, PAGESIZE); 32690Sstevel@tonic-gate 32703253Smec no_fault(); 32713253Smec faulted: 32723446Smrj if (!kpm_enable) { 32735084Sjohnlev #ifdef __xpv 32745084Sjohnlev /* 32755217Sjosephb * We can't leave unused mappings laying about under the 32765217Sjosephb * hypervisor, so blow them away. 32775084Sjohnlev */ 32785217Sjosephb if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0, 32795217Sjosephb UVMF_INVLPG | UVMF_LOCAL) < 0) 32805217Sjosephb panic("HYPERVISOR_update_va_mapping() failed"); 32815084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 32825084Sjohnlev UVMF_INVLPG | UVMF_LOCAL) < 0) 32835084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 32845084Sjohnlev #endif 32850Sstevel@tonic-gate mutex_exit(ppaddr_mutex); 32863446Smrj } 32870Sstevel@tonic-gate kpreempt_enable(); 32883253Smec return (ret); 32890Sstevel@tonic-gate } 32900Sstevel@tonic-gate 32915262Srscott void 32925262Srscott pagezero(page_t *pp, uint_t off, uint_t len) 32935262Srscott { 32945262Srscott ASSERT(PAGE_LOCKED(pp)); 32955262Srscott pfnzero(page_pptonum(pp), off, len); 32965262Srscott } 32975262Srscott 32980Sstevel@tonic-gate /* 32995262Srscott * Zero the physical page from off to off + len given by pfn 33000Sstevel@tonic-gate * without changing the reference and modified bits of page. 33010Sstevel@tonic-gate * 33020Sstevel@tonic-gate * We use this using CPU private page address #2, see ppcopy() for more info. 33035262Srscott * pfnzero() must not be called at interrupt level. 33040Sstevel@tonic-gate */ 33050Sstevel@tonic-gate void 33065262Srscott pfnzero(pfn_t pfn, uint_t off, uint_t len) 33070Sstevel@tonic-gate { 33080Sstevel@tonic-gate caddr_t pp_addr2; 33093446Smrj hat_mempte_t pte2; 33105262Srscott kmutex_t *ppaddr_mutex = NULL; 33110Sstevel@tonic-gate 33120Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 33130Sstevel@tonic-gate ASSERT(len <= MMU_PAGESIZE); 33140Sstevel@tonic-gate ASSERT(off <= MMU_PAGESIZE); 33150Sstevel@tonic-gate ASSERT(off + len <= MMU_PAGESIZE); 33165262Srscott 33175262Srscott if (kpm_enable && !pfn_is_foreign(pfn)) { 33185262Srscott pp_addr2 = hat_kpm_pfn2va(pfn); 33190Sstevel@tonic-gate kpreempt_disable(); 33200Sstevel@tonic-gate } else { 33210Sstevel@tonic-gate kpreempt_disable(); 33220Sstevel@tonic-gate 33230Sstevel@tonic-gate pp_addr2 = CPU->cpu_caddr2; 33243446Smrj pte2 = CPU->cpu_caddr2pte; 33250Sstevel@tonic-gate 33260Sstevel@tonic-gate ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 33270Sstevel@tonic-gate mutex_enter(ppaddr_mutex); 33280Sstevel@tonic-gate 33295262Srscott hat_mempte_remap(pfn, pp_addr2, pte2, 33300Sstevel@tonic-gate PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 33310Sstevel@tonic-gate HAT_LOAD_NOCONSIST); 33320Sstevel@tonic-gate } 33330Sstevel@tonic-gate 33343446Smrj if (use_sse_pagezero) { 33355084Sjohnlev #ifdef __xpv 33365084Sjohnlev uint_t rem; 33375084Sjohnlev 33385084Sjohnlev /* 33395084Sjohnlev * zero a byte at a time until properly aligned for 33405084Sjohnlev * block_zero_no_xmm(). 33415084Sjohnlev */ 33425084Sjohnlev while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0) 33435084Sjohnlev pp_addr2[off++] = 0; 33445084Sjohnlev 33455084Sjohnlev /* 33465084Sjohnlev * Now use faster block_zero_no_xmm() for any range 33475084Sjohnlev * that is properly aligned and sized. 33485084Sjohnlev */ 33495084Sjohnlev rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN)); 33505084Sjohnlev len -= rem; 33515084Sjohnlev if (len != 0) { 33525084Sjohnlev block_zero_no_xmm(pp_addr2 + off, len); 33535084Sjohnlev off += len; 33545084Sjohnlev } 33555084Sjohnlev 33565084Sjohnlev /* 33575084Sjohnlev * zero remainder with byte stores. 33585084Sjohnlev */ 33595084Sjohnlev while (rem-- > 0) 33605084Sjohnlev pp_addr2[off++] = 0; 33615084Sjohnlev #else 33620Sstevel@tonic-gate hwblkclr(pp_addr2 + off, len); 33635084Sjohnlev #endif 33643446Smrj } else { 33650Sstevel@tonic-gate bzero(pp_addr2 + off, len); 33663446Smrj } 33670Sstevel@tonic-gate 33685262Srscott if (!kpm_enable || pfn_is_foreign(pfn)) { 33695084Sjohnlev #ifdef __xpv 33705262Srscott /* 33715262Srscott * On the hypervisor this page might get used for a page 33725262Srscott * table before any intervening change to this mapping, 33735262Srscott * so blow it away. 33745262Srscott */ 33755262Srscott if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 33765262Srscott UVMF_INVLPG) < 0) 33775262Srscott panic("HYPERVISOR_update_va_mapping() failed"); 33785084Sjohnlev #endif 33790Sstevel@tonic-gate mutex_exit(ppaddr_mutex); 33805262Srscott } 33815262Srscott 33820Sstevel@tonic-gate kpreempt_enable(); 33830Sstevel@tonic-gate } 33840Sstevel@tonic-gate 33850Sstevel@tonic-gate /* 33860Sstevel@tonic-gate * Platform-dependent page scrub call. 33870Sstevel@tonic-gate */ 33880Sstevel@tonic-gate void 33890Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len) 33900Sstevel@tonic-gate { 33910Sstevel@tonic-gate /* 33920Sstevel@tonic-gate * For now, we rely on the fact that pagezero() will 33930Sstevel@tonic-gate * always clear UEs. 33940Sstevel@tonic-gate */ 33950Sstevel@tonic-gate pagezero(pp, off, len); 33960Sstevel@tonic-gate } 33970Sstevel@tonic-gate 33980Sstevel@tonic-gate /* 33990Sstevel@tonic-gate * set up two private addresses for use on a given CPU for use in ppcopy() 34000Sstevel@tonic-gate */ 34010Sstevel@tonic-gate void 34020Sstevel@tonic-gate setup_vaddr_for_ppcopy(struct cpu *cpup) 34030Sstevel@tonic-gate { 34040Sstevel@tonic-gate void *addr; 34053446Smrj hat_mempte_t pte_pa; 34060Sstevel@tonic-gate 34070Sstevel@tonic-gate addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 34083446Smrj pte_pa = hat_mempte_setup(addr); 34090Sstevel@tonic-gate cpup->cpu_caddr1 = addr; 34103446Smrj cpup->cpu_caddr1pte = pte_pa; 34110Sstevel@tonic-gate 34120Sstevel@tonic-gate addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 34133446Smrj pte_pa = hat_mempte_setup(addr); 34140Sstevel@tonic-gate cpup->cpu_caddr2 = addr; 34153446Smrj cpup->cpu_caddr2pte = pte_pa; 34160Sstevel@tonic-gate 34170Sstevel@tonic-gate mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL); 34180Sstevel@tonic-gate } 34190Sstevel@tonic-gate 34203446Smrj /* 34213446Smrj * Undo setup_vaddr_for_ppcopy 34223446Smrj */ 34233446Smrj void 34243446Smrj teardown_vaddr_for_ppcopy(struct cpu *cpup) 34253446Smrj { 34263446Smrj mutex_destroy(&cpup->cpu_ppaddr_mutex); 34273446Smrj 34283446Smrj hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte); 34293446Smrj cpup->cpu_caddr2pte = 0; 34303446Smrj vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1)); 34313446Smrj cpup->cpu_caddr2 = 0; 34323446Smrj 34333446Smrj hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte); 34343446Smrj cpup->cpu_caddr1pte = 0; 34353446Smrj vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1)); 34363446Smrj cpup->cpu_caddr1 = 0; 34373446Smrj } 34380Sstevel@tonic-gate 34390Sstevel@tonic-gate /* 34400Sstevel@tonic-gate * Create the pageout scanner thread. The thread has to 34410Sstevel@tonic-gate * start at procedure with process pp and priority pri. 34420Sstevel@tonic-gate */ 34430Sstevel@tonic-gate void 34440Sstevel@tonic-gate pageout_init(void (*procedure)(), proc_t *pp, pri_t pri) 34450Sstevel@tonic-gate { 34460Sstevel@tonic-gate (void) thread_create(NULL, 0, procedure, NULL, 0, pp, TS_RUN, pri); 34470Sstevel@tonic-gate } 34480Sstevel@tonic-gate 34490Sstevel@tonic-gate /* 34500Sstevel@tonic-gate * Function for flushing D-cache when performing module relocations 34510Sstevel@tonic-gate * to an alternate mapping. Unnecessary on Intel / AMD platforms. 34520Sstevel@tonic-gate */ 34530Sstevel@tonic-gate void 34540Sstevel@tonic-gate dcache_flushall() 34550Sstevel@tonic-gate {} 34563177Sdp78419 34573177Sdp78419 size_t 34583177Sdp78419 exec_get_spslew(void) 34593177Sdp78419 { 34603177Sdp78419 return (0); 34613177Sdp78419 } 34623446Smrj 34633446Smrj /* 34643446Smrj * Allocate a memory page. The argument 'seed' can be any pseudo-random 34653446Smrj * number to vary where the pages come from. This is quite a hacked up 34663446Smrj * method -- it works for now, but really needs to be fixed up a bit. 34673446Smrj * 34683446Smrj * We currently use page_create_va() on the kvp with fake offsets, 34693446Smrj * segments and virt address. This is pretty bogus, but was copied from the 34703446Smrj * old hat_i86.c code. A better approach would be to specify either mnode 34713446Smrj * random or mnode local and takes a page from whatever color has the MOST 34723446Smrj * available - this would have a minimal impact on page coloring. 34733446Smrj */ 34743446Smrj page_t * 34753446Smrj page_get_physical(uintptr_t seed) 34763446Smrj { 34773446Smrj page_t *pp; 34783446Smrj u_offset_t offset; 34793446Smrj static struct seg tmpseg; 34803446Smrj static uintptr_t ctr = 0; 34813446Smrj 34823446Smrj /* 34833446Smrj * This code is gross, we really need a simpler page allocator. 34843446Smrj * 34853446Smrj * We need assign an offset for the page to call page_create_va(). 34863446Smrj * To avoid conflicts with other pages, we get creative with the offset. 34873446Smrj * For 32 bits, we pick an offset > 4Gig 34883446Smrj * For 64 bits, pick an offset somewhere in the VA hole. 34893446Smrj */ 34903446Smrj offset = seed; 34913446Smrj if (offset > kernelbase) 34923446Smrj offset -= kernelbase; 34933446Smrj offset <<= MMU_PAGESHIFT; 34943446Smrj #if defined(__amd64) 34953446Smrj offset += mmu.hole_start; /* something in VA hole */ 34963446Smrj #else 34973446Smrj offset += 1ULL << 40; /* something > 4 Gig */ 34983446Smrj #endif 34993446Smrj 35003446Smrj if (page_resv(1, KM_NOSLEEP) == 0) 35013446Smrj return (NULL); 35023446Smrj 35033446Smrj #ifdef DEBUG 35043446Smrj pp = page_exists(&kvp, offset); 35053446Smrj if (pp != NULL) 35063446Smrj panic("page already exists %p", pp); 35073446Smrj #endif 35083446Smrj 35095084Sjohnlev pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL, 35103446Smrj &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */ 35113446Smrj if (pp == NULL) 35123446Smrj return (NULL); 35133446Smrj page_io_unlock(pp); 35143446Smrj page_hashout(pp, NULL); 35153446Smrj return (pp); 35163446Smrj } 3517