10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52048Sstans * Common Development and Distribution License (the "License"). 62048Sstans * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 221338Selowe * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988 310Sstevel@tonic-gate * The Regents of the University of California 320Sstevel@tonic-gate * All Rights Reserved 330Sstevel@tonic-gate * 340Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from 350Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its 360Sstevel@tonic-gate * contributors. 370Sstevel@tonic-gate */ 380Sstevel@tonic-gate 390Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 400Sstevel@tonic-gate 410Sstevel@tonic-gate /* 420Sstevel@tonic-gate * VM - physical page management. 430Sstevel@tonic-gate */ 440Sstevel@tonic-gate 450Sstevel@tonic-gate #include <sys/types.h> 460Sstevel@tonic-gate #include <sys/t_lock.h> 470Sstevel@tonic-gate #include <sys/param.h> 480Sstevel@tonic-gate #include <sys/systm.h> 490Sstevel@tonic-gate #include <sys/errno.h> 500Sstevel@tonic-gate #include <sys/time.h> 510Sstevel@tonic-gate #include <sys/vnode.h> 520Sstevel@tonic-gate #include <sys/vm.h> 530Sstevel@tonic-gate #include <sys/vtrace.h> 540Sstevel@tonic-gate #include <sys/swap.h> 550Sstevel@tonic-gate #include <sys/cmn_err.h> 560Sstevel@tonic-gate #include <sys/tuneable.h> 570Sstevel@tonic-gate #include <sys/sysmacros.h> 580Sstevel@tonic-gate #include <sys/cpuvar.h> 590Sstevel@tonic-gate #include <sys/callb.h> 600Sstevel@tonic-gate #include <sys/debug.h> 610Sstevel@tonic-gate #include <sys/tnf_probe.h> 620Sstevel@tonic-gate #include <sys/condvar_impl.h> 630Sstevel@tonic-gate #include <sys/mem_config.h> 640Sstevel@tonic-gate #include <sys/mem_cage.h> 650Sstevel@tonic-gate #include <sys/kmem.h> 660Sstevel@tonic-gate #include <sys/atomic.h> 670Sstevel@tonic-gate #include <sys/strlog.h> 680Sstevel@tonic-gate #include <sys/mman.h> 690Sstevel@tonic-gate #include <sys/ontrap.h> 700Sstevel@tonic-gate #include <sys/lgrp.h> 710Sstevel@tonic-gate #include <sys/vfs.h> 720Sstevel@tonic-gate 730Sstevel@tonic-gate #include <vm/hat.h> 740Sstevel@tonic-gate #include <vm/anon.h> 750Sstevel@tonic-gate #include <vm/page.h> 760Sstevel@tonic-gate #include <vm/seg.h> 770Sstevel@tonic-gate #include <vm/pvn.h> 780Sstevel@tonic-gate #include <vm/seg_kmem.h> 790Sstevel@tonic-gate #include <vm/vm_dep.h> 80*3247Sgjelinek #include <sys/vm_usage.h> 810Sstevel@tonic-gate #include <fs/fs_subr.h> 820Sstevel@tonic-gate 830Sstevel@tonic-gate static int nopageage = 0; 840Sstevel@tonic-gate 850Sstevel@tonic-gate static pgcnt_t max_page_get; /* max page_get request size in pages */ 860Sstevel@tonic-gate pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 870Sstevel@tonic-gate 880Sstevel@tonic-gate /* 890Sstevel@tonic-gate * freemem_lock protects all freemem variables: 900Sstevel@tonic-gate * availrmem. Also this lock protects the globals which track the 910Sstevel@tonic-gate * availrmem changes for accurate kernel footprint calculation. 920Sstevel@tonic-gate * See below for an explanation of these 930Sstevel@tonic-gate * globals. 940Sstevel@tonic-gate */ 950Sstevel@tonic-gate kmutex_t freemem_lock; 960Sstevel@tonic-gate pgcnt_t availrmem; 970Sstevel@tonic-gate pgcnt_t availrmem_initial; 980Sstevel@tonic-gate 990Sstevel@tonic-gate /* 1000Sstevel@tonic-gate * These globals track availrmem changes to get a more accurate 1010Sstevel@tonic-gate * estimate of tke kernel size. Historically pp_kernel is used for 1020Sstevel@tonic-gate * kernel size and is based on availrmem. But availrmem is adjusted for 1030Sstevel@tonic-gate * locked pages in the system not just for kernel locked pages. 1040Sstevel@tonic-gate * These new counters will track the pages locked through segvn and 1050Sstevel@tonic-gate * by explicit user locking. 1060Sstevel@tonic-gate * 1070Sstevel@tonic-gate * segvn_pages_locked : This keeps track on a global basis how many pages 1080Sstevel@tonic-gate * are currently locked because of I/O. 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * pages_locked : How many pages are locked becuase of user specified 1110Sstevel@tonic-gate * locking through mlock or plock. 1120Sstevel@tonic-gate * 1130Sstevel@tonic-gate * pages_useclaim,pages_claimed : These two variables track the 1140Sstevel@tonic-gate * cliam adjustments because of the protection changes on a segvn segment. 1150Sstevel@tonic-gate * 1160Sstevel@tonic-gate * All these globals are protected by the same lock which protects availrmem. 1170Sstevel@tonic-gate */ 1180Sstevel@tonic-gate pgcnt_t segvn_pages_locked; 1190Sstevel@tonic-gate pgcnt_t pages_locked; 1200Sstevel@tonic-gate pgcnt_t pages_useclaim; 1210Sstevel@tonic-gate pgcnt_t pages_claimed; 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 1260Sstevel@tonic-gate */ 1270Sstevel@tonic-gate static kmutex_t new_freemem_lock; 1280Sstevel@tonic-gate static uint_t freemem_wait; /* someone waiting for freemem */ 1290Sstevel@tonic-gate static kcondvar_t freemem_cv; 1300Sstevel@tonic-gate 1310Sstevel@tonic-gate /* 1320Sstevel@tonic-gate * The logical page free list is maintained as two lists, the 'free' 1330Sstevel@tonic-gate * and the 'cache' lists. 1340Sstevel@tonic-gate * The free list contains those pages that should be reused first. 1350Sstevel@tonic-gate * 1360Sstevel@tonic-gate * The implementation of the lists is machine dependent. 1370Sstevel@tonic-gate * page_get_freelist(), page_get_cachelist(), 1380Sstevel@tonic-gate * page_list_sub(), and page_list_add() 1390Sstevel@tonic-gate * form the interface to the machine dependent implementation. 1400Sstevel@tonic-gate * 1410Sstevel@tonic-gate * Pages with p_free set are on the cache list. 1420Sstevel@tonic-gate * Pages with p_free and p_age set are on the free list, 1430Sstevel@tonic-gate * 1440Sstevel@tonic-gate * A page may be locked while on either list. 1450Sstevel@tonic-gate */ 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate /* 1480Sstevel@tonic-gate * free list accounting stuff. 1490Sstevel@tonic-gate * 1500Sstevel@tonic-gate * 1510Sstevel@tonic-gate * Spread out the value for the number of pages on the 1520Sstevel@tonic-gate * page free and page cache lists. If there is just one 1530Sstevel@tonic-gate * value, then it must be under just one lock. 1540Sstevel@tonic-gate * The lock contention and cache traffic are a real bother. 1550Sstevel@tonic-gate * 1560Sstevel@tonic-gate * When we acquire and then drop a single pcf lock 1570Sstevel@tonic-gate * we can start in the middle of the array of pcf structures. 1580Sstevel@tonic-gate * If we acquire more than one pcf lock at a time, we need to 1590Sstevel@tonic-gate * start at the front to avoid deadlocking. 1600Sstevel@tonic-gate * 1610Sstevel@tonic-gate * pcf_count holds the number of pages in each pool. 1620Sstevel@tonic-gate * 1630Sstevel@tonic-gate * pcf_block is set when page_create_get_something() has asked the 1640Sstevel@tonic-gate * PSM page freelist and page cachelist routines without specifying 1650Sstevel@tonic-gate * a color and nothing came back. This is used to block anything 1660Sstevel@tonic-gate * else from moving pages from one list to the other while the 1670Sstevel@tonic-gate * lists are searched again. If a page is freeed while pcf_block is 1680Sstevel@tonic-gate * set, then pcf_reserve is incremented. pcgs_unblock() takes care 1690Sstevel@tonic-gate * of clearning pcf_block, doing the wakeups, etc. 1700Sstevel@tonic-gate */ 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate #if NCPU <= 4 1732290Sfr157268 #define PAD 2 1740Sstevel@tonic-gate #define PCF_FANOUT 4 1750Sstevel@tonic-gate static uint_t pcf_mask = PCF_FANOUT - 1; 1760Sstevel@tonic-gate #else 1772290Sfr157268 #define PAD 10 1780Sstevel@tonic-gate #ifdef sun4v 1790Sstevel@tonic-gate #define PCF_FANOUT 32 1800Sstevel@tonic-gate #else 1810Sstevel@tonic-gate #define PCF_FANOUT 128 1820Sstevel@tonic-gate #endif 1830Sstevel@tonic-gate static uint_t pcf_mask = PCF_FANOUT - 1; 1840Sstevel@tonic-gate #endif 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate struct pcf { 1872290Sfr157268 kmutex_t pcf_lock; /* protects the structure */ 1880Sstevel@tonic-gate uint_t pcf_count; /* page count */ 1890Sstevel@tonic-gate uint_t pcf_wait; /* number of waiters */ 1900Sstevel@tonic-gate uint_t pcf_block; /* pcgs flag to page_free() */ 1910Sstevel@tonic-gate uint_t pcf_reserve; /* pages freed after pcf_block set */ 1920Sstevel@tonic-gate uint_t pcf_fill[PAD]; /* to line up on the caches */ 1930Sstevel@tonic-gate }; 1940Sstevel@tonic-gate 1950Sstevel@tonic-gate static struct pcf pcf[PCF_FANOUT]; 1960Sstevel@tonic-gate #define PCF_INDEX() ((CPU->cpu_id) & (pcf_mask)) 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate kmutex_t pcgs_lock; /* serializes page_create_get_ */ 1990Sstevel@tonic-gate kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 2000Sstevel@tonic-gate kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 2010Sstevel@tonic-gate static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate #ifdef VM_STATS 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate /* 2060Sstevel@tonic-gate * No locks, but so what, they are only statistics. 2070Sstevel@tonic-gate */ 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate static struct page_tcnt { 2100Sstevel@tonic-gate int pc_free_cache; /* free's into cache list */ 2110Sstevel@tonic-gate int pc_free_dontneed; /* free's with dontneed */ 2120Sstevel@tonic-gate int pc_free_pageout; /* free's from pageout */ 2130Sstevel@tonic-gate int pc_free_free; /* free's into free list */ 2140Sstevel@tonic-gate int pc_free_pages; /* free's into large page free list */ 2150Sstevel@tonic-gate int pc_destroy_pages; /* large page destroy's */ 2160Sstevel@tonic-gate int pc_get_cache; /* get's from cache list */ 2170Sstevel@tonic-gate int pc_get_free; /* get's from free list */ 2180Sstevel@tonic-gate int pc_reclaim; /* reclaim's */ 2190Sstevel@tonic-gate int pc_abortfree; /* abort's of free pages */ 2200Sstevel@tonic-gate int pc_find_hit; /* find's that find page */ 2210Sstevel@tonic-gate int pc_find_miss; /* find's that don't find page */ 2220Sstevel@tonic-gate int pc_destroy_free; /* # of free pages destroyed */ 2230Sstevel@tonic-gate #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 2240Sstevel@tonic-gate int pc_find_hashlen[PC_HASH_CNT+1]; 2250Sstevel@tonic-gate int pc_addclaim_pages; 2260Sstevel@tonic-gate int pc_subclaim_pages; 2270Sstevel@tonic-gate int pc_free_replacement_page[2]; 2280Sstevel@tonic-gate int pc_try_demote_pages[6]; 2290Sstevel@tonic-gate int pc_demote_pages[2]; 2300Sstevel@tonic-gate } pagecnt; 2310Sstevel@tonic-gate 2320Sstevel@tonic-gate uint_t hashin_count; 2330Sstevel@tonic-gate uint_t hashin_not_held; 2340Sstevel@tonic-gate uint_t hashin_already; 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate uint_t hashout_count; 2370Sstevel@tonic-gate uint_t hashout_not_held; 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate uint_t page_create_count; 2400Sstevel@tonic-gate uint_t page_create_not_enough; 2410Sstevel@tonic-gate uint_t page_create_not_enough_again; 2420Sstevel@tonic-gate uint_t page_create_zero; 2430Sstevel@tonic-gate uint_t page_create_hashout; 2440Sstevel@tonic-gate uint_t page_create_page_lock_failed; 2450Sstevel@tonic-gate uint_t page_create_trylock_failed; 2460Sstevel@tonic-gate uint_t page_create_found_one; 2470Sstevel@tonic-gate uint_t page_create_hashin_failed; 2480Sstevel@tonic-gate uint_t page_create_dropped_phm; 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate uint_t page_create_new; 2510Sstevel@tonic-gate uint_t page_create_exists; 2520Sstevel@tonic-gate uint_t page_create_putbacks; 2530Sstevel@tonic-gate uint_t page_create_overshoot; 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate uint_t page_reclaim_zero; 2560Sstevel@tonic-gate uint_t page_reclaim_zero_locked; 2570Sstevel@tonic-gate 2580Sstevel@tonic-gate uint_t page_rename_exists; 2590Sstevel@tonic-gate uint_t page_rename_count; 2600Sstevel@tonic-gate 2610Sstevel@tonic-gate uint_t page_lookup_cnt[20]; 2620Sstevel@tonic-gate uint_t page_lookup_nowait_cnt[10]; 2630Sstevel@tonic-gate uint_t page_find_cnt; 2640Sstevel@tonic-gate uint_t page_exists_cnt; 2650Sstevel@tonic-gate uint_t page_exists_forreal_cnt; 2660Sstevel@tonic-gate uint_t page_lookup_dev_cnt; 2670Sstevel@tonic-gate uint_t get_cachelist_cnt; 2680Sstevel@tonic-gate uint_t page_create_cnt[10]; 2690Sstevel@tonic-gate uint_t alloc_pages[8]; 2700Sstevel@tonic-gate uint_t page_exphcontg[19]; 2710Sstevel@tonic-gate uint_t page_create_large_cnt[10]; 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate /* 2740Sstevel@tonic-gate * Collects statistics. 2750Sstevel@tonic-gate */ 2760Sstevel@tonic-gate #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 2770Sstevel@tonic-gate uint_t mylen = 0; \ 2780Sstevel@tonic-gate \ 2790Sstevel@tonic-gate for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 2800Sstevel@tonic-gate if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 2810Sstevel@tonic-gate break; \ 2820Sstevel@tonic-gate } \ 2830Sstevel@tonic-gate if ((pp) != NULL) \ 2840Sstevel@tonic-gate pagecnt.pc_find_hit++; \ 2850Sstevel@tonic-gate else \ 2860Sstevel@tonic-gate pagecnt.pc_find_miss++; \ 2870Sstevel@tonic-gate if (mylen > PC_HASH_CNT) \ 2880Sstevel@tonic-gate mylen = PC_HASH_CNT; \ 2890Sstevel@tonic-gate pagecnt.pc_find_hashlen[mylen]++; \ 2900Sstevel@tonic-gate } 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate #else /* VM_STATS */ 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate /* 2950Sstevel@tonic-gate * Don't collect statistics 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 2980Sstevel@tonic-gate for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 2990Sstevel@tonic-gate if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 3000Sstevel@tonic-gate break; \ 3010Sstevel@tonic-gate } \ 3020Sstevel@tonic-gate } 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate #endif /* VM_STATS */ 3050Sstevel@tonic-gate 3060Sstevel@tonic-gate 3070Sstevel@tonic-gate 3080Sstevel@tonic-gate #ifdef DEBUG 3090Sstevel@tonic-gate #define MEMSEG_SEARCH_STATS 3100Sstevel@tonic-gate #endif 3110Sstevel@tonic-gate 3120Sstevel@tonic-gate #ifdef MEMSEG_SEARCH_STATS 3130Sstevel@tonic-gate struct memseg_stats { 3140Sstevel@tonic-gate uint_t nsearch; 3150Sstevel@tonic-gate uint_t nlastwon; 3160Sstevel@tonic-gate uint_t nhashwon; 3170Sstevel@tonic-gate uint_t nnotfound; 3180Sstevel@tonic-gate } memseg_stats; 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate #define MEMSEG_STAT_INCR(v) \ 3210Sstevel@tonic-gate atomic_add_32(&memseg_stats.v, 1) 3220Sstevel@tonic-gate #else 3230Sstevel@tonic-gate #define MEMSEG_STAT_INCR(x) 3240Sstevel@tonic-gate #endif 3250Sstevel@tonic-gate 3260Sstevel@tonic-gate struct memseg *memsegs; /* list of memory segments */ 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate static void page_init_mem_config(void); 3300Sstevel@tonic-gate static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 3310Sstevel@tonic-gate static void page_do_hashout(page_t *); 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate static void page_demote_vp_pages(page_t *); 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate /* 3360Sstevel@tonic-gate * vm subsystem related initialization 3370Sstevel@tonic-gate */ 3380Sstevel@tonic-gate void 3390Sstevel@tonic-gate vm_init(void) 3400Sstevel@tonic-gate { 3410Sstevel@tonic-gate boolean_t callb_vm_cpr(void *, int); 3420Sstevel@tonic-gate 3430Sstevel@tonic-gate (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 3440Sstevel@tonic-gate page_init_mem_config(); 345917Selowe page_retire_init(); 346*3247Sgjelinek vm_usage_init(); 3470Sstevel@tonic-gate } 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate /* 3500Sstevel@tonic-gate * This function is called at startup and when memory is added or deleted. 3510Sstevel@tonic-gate */ 3520Sstevel@tonic-gate void 3530Sstevel@tonic-gate init_pages_pp_maximum() 3540Sstevel@tonic-gate { 3550Sstevel@tonic-gate static pgcnt_t p_min; 3560Sstevel@tonic-gate static pgcnt_t pages_pp_maximum_startup; 3570Sstevel@tonic-gate static pgcnt_t avrmem_delta; 3580Sstevel@tonic-gate static int init_done; 3590Sstevel@tonic-gate static int user_set; /* true if set in /etc/system */ 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate if (init_done == 0) { 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* If the user specified a value, save it */ 3640Sstevel@tonic-gate if (pages_pp_maximum != 0) { 3650Sstevel@tonic-gate user_set = 1; 3660Sstevel@tonic-gate pages_pp_maximum_startup = pages_pp_maximum; 3670Sstevel@tonic-gate } 3680Sstevel@tonic-gate 3690Sstevel@tonic-gate /* 3700Sstevel@tonic-gate * Setting of pages_pp_maximum is based first time 3710Sstevel@tonic-gate * on the value of availrmem just after the start-up 3720Sstevel@tonic-gate * allocations. To preserve this relationship at run 3730Sstevel@tonic-gate * time, use a delta from availrmem_initial. 3740Sstevel@tonic-gate */ 3750Sstevel@tonic-gate ASSERT(availrmem_initial >= availrmem); 3760Sstevel@tonic-gate avrmem_delta = availrmem_initial - availrmem; 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate /* The allowable floor of pages_pp_maximum */ 3790Sstevel@tonic-gate p_min = tune.t_minarmem + 100; 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate /* Make sure we don't come through here again. */ 3820Sstevel@tonic-gate init_done = 1; 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate /* 3850Sstevel@tonic-gate * Determine pages_pp_maximum, the number of currently available 3860Sstevel@tonic-gate * pages (availrmem) that can't be `locked'. If not set by 3870Sstevel@tonic-gate * the user, we set it to 4% of the currently available memory 3880Sstevel@tonic-gate * plus 4MB. 3890Sstevel@tonic-gate * But we also insist that it be greater than tune.t_minarmem; 3900Sstevel@tonic-gate * otherwise a process could lock down a lot of memory, get swapped 3910Sstevel@tonic-gate * out, and never have enough to get swapped back in. 3920Sstevel@tonic-gate */ 3930Sstevel@tonic-gate if (user_set) 3940Sstevel@tonic-gate pages_pp_maximum = pages_pp_maximum_startup; 3950Sstevel@tonic-gate else 3960Sstevel@tonic-gate pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 3970Sstevel@tonic-gate + btop(4 * 1024 * 1024); 3980Sstevel@tonic-gate 3990Sstevel@tonic-gate if (pages_pp_maximum <= p_min) { 4000Sstevel@tonic-gate pages_pp_maximum = p_min; 4010Sstevel@tonic-gate } 4020Sstevel@tonic-gate } 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate void 4050Sstevel@tonic-gate set_max_page_get(pgcnt_t target_total_pages) 4060Sstevel@tonic-gate { 4070Sstevel@tonic-gate max_page_get = target_total_pages / 2; 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate static pgcnt_t pending_delete; 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate /*ARGSUSED*/ 4130Sstevel@tonic-gate static void 4140Sstevel@tonic-gate page_mem_config_post_add( 4150Sstevel@tonic-gate void *arg, 4160Sstevel@tonic-gate pgcnt_t delta_pages) 4170Sstevel@tonic-gate { 4180Sstevel@tonic-gate set_max_page_get(total_pages - pending_delete); 4190Sstevel@tonic-gate init_pages_pp_maximum(); 4200Sstevel@tonic-gate } 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate /*ARGSUSED*/ 4230Sstevel@tonic-gate static int 4240Sstevel@tonic-gate page_mem_config_pre_del( 4250Sstevel@tonic-gate void *arg, 4260Sstevel@tonic-gate pgcnt_t delta_pages) 4270Sstevel@tonic-gate { 4280Sstevel@tonic-gate pgcnt_t nv; 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 4310Sstevel@tonic-gate set_max_page_get(total_pages - nv); 4320Sstevel@tonic-gate return (0); 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate /*ARGSUSED*/ 4360Sstevel@tonic-gate static void 4370Sstevel@tonic-gate page_mem_config_post_del( 4380Sstevel@tonic-gate void *arg, 4390Sstevel@tonic-gate pgcnt_t delta_pages, 4400Sstevel@tonic-gate int cancelled) 4410Sstevel@tonic-gate { 4420Sstevel@tonic-gate pgcnt_t nv; 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 4450Sstevel@tonic-gate set_max_page_get(total_pages - nv); 4460Sstevel@tonic-gate if (!cancelled) 4470Sstevel@tonic-gate init_pages_pp_maximum(); 4480Sstevel@tonic-gate } 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate static kphysm_setup_vector_t page_mem_config_vec = { 4510Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION, 4520Sstevel@tonic-gate page_mem_config_post_add, 4530Sstevel@tonic-gate page_mem_config_pre_del, 4540Sstevel@tonic-gate page_mem_config_post_del, 4550Sstevel@tonic-gate }; 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate static void 4580Sstevel@tonic-gate page_init_mem_config(void) 4590Sstevel@tonic-gate { 4600Sstevel@tonic-gate int ret; 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 4630Sstevel@tonic-gate ASSERT(ret == 0); 4640Sstevel@tonic-gate } 4650Sstevel@tonic-gate 4660Sstevel@tonic-gate /* 4670Sstevel@tonic-gate * Evenly spread out the PCF counters for large free pages 4680Sstevel@tonic-gate */ 4690Sstevel@tonic-gate static void 4700Sstevel@tonic-gate page_free_large_ctr(pgcnt_t npages) 4710Sstevel@tonic-gate { 4720Sstevel@tonic-gate static struct pcf *p = pcf; 4730Sstevel@tonic-gate pgcnt_t lump; 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate freemem += npages; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate while (npages > 0) { 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate ASSERT(!p->pcf_block); 4820Sstevel@tonic-gate 4830Sstevel@tonic-gate if (lump < npages) { 4840Sstevel@tonic-gate p->pcf_count += (uint_t)lump; 4850Sstevel@tonic-gate npages -= lump; 4860Sstevel@tonic-gate } else { 4870Sstevel@tonic-gate p->pcf_count += (uint_t)npages; 4880Sstevel@tonic-gate npages = 0; 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate ASSERT(!p->pcf_wait); 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate if (++p > &pcf[PCF_FANOUT - 1]) 4940Sstevel@tonic-gate p = pcf; 4950Sstevel@tonic-gate } 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate ASSERT(npages == 0); 4980Sstevel@tonic-gate } 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate /* 5010Sstevel@tonic-gate * Add a physical chunk of memory to the system freee lists during startup. 5020Sstevel@tonic-gate * Platform specific startup() allocates the memory for the page structs. 5030Sstevel@tonic-gate * 5040Sstevel@tonic-gate * num - number of page structures 5050Sstevel@tonic-gate * base - page number (pfn) to be associated with the first page. 5060Sstevel@tonic-gate * 5070Sstevel@tonic-gate * Since we are doing this during startup (ie. single threaded), we will 5080Sstevel@tonic-gate * use shortcut routines to avoid any locking overhead while putting all 5090Sstevel@tonic-gate * these pages on the freelists. 5100Sstevel@tonic-gate * 5110Sstevel@tonic-gate * NOTE: Any changes performed to page_free(), must also be performed to 5120Sstevel@tonic-gate * add_physmem() since this is how we initialize all page_t's at 5130Sstevel@tonic-gate * boot time. 5140Sstevel@tonic-gate */ 5150Sstevel@tonic-gate void 5160Sstevel@tonic-gate add_physmem( 5170Sstevel@tonic-gate page_t *pp, 5180Sstevel@tonic-gate pgcnt_t num, 5190Sstevel@tonic-gate pfn_t pnum) 5200Sstevel@tonic-gate { 5210Sstevel@tonic-gate page_t *root = NULL; 5220Sstevel@tonic-gate uint_t szc = page_num_pagesizes() - 1; 5230Sstevel@tonic-gate pgcnt_t large = page_get_pagecnt(szc); 5240Sstevel@tonic-gate pgcnt_t cnt = 0; 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 5270Sstevel@tonic-gate "add_physmem:pp %p num %lu", pp, num); 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate /* 5300Sstevel@tonic-gate * Arbitrarily limit the max page_get request 5310Sstevel@tonic-gate * to 1/2 of the page structs we have. 5320Sstevel@tonic-gate */ 5330Sstevel@tonic-gate total_pages += num; 5340Sstevel@tonic-gate set_max_page_get(total_pages); 5350Sstevel@tonic-gate 5361373Skchow PLCNT_MODIFY_MAX(pnum, (long)num); 5371373Skchow 5380Sstevel@tonic-gate /* 5390Sstevel@tonic-gate * The physical space for the pages array 5400Sstevel@tonic-gate * representing ram pages has already been 5410Sstevel@tonic-gate * allocated. Here we initialize each lock 5420Sstevel@tonic-gate * in the page structure, and put each on 5430Sstevel@tonic-gate * the free list 5440Sstevel@tonic-gate */ 545414Skchow for (; num; pp++, pnum++, num--) { 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate /* 5480Sstevel@tonic-gate * this needs to fill in the page number 5490Sstevel@tonic-gate * and do any other arch specific initialization 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate add_physmem_cb(pp, pnum); 5520Sstevel@tonic-gate 5532414Saguzovsk pp->p_lckcnt = 0; 5542414Saguzovsk pp->p_cowcnt = 0; 5552414Saguzovsk pp->p_slckcnt = 0; 5562414Saguzovsk 5570Sstevel@tonic-gate /* 5580Sstevel@tonic-gate * Initialize the page lock as unlocked, since nobody 5590Sstevel@tonic-gate * can see or access this page yet. 5600Sstevel@tonic-gate */ 5610Sstevel@tonic-gate pp->p_selock = 0; 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate /* 5640Sstevel@tonic-gate * Initialize IO lock 5650Sstevel@tonic-gate */ 5660Sstevel@tonic-gate page_iolock_init(pp); 5670Sstevel@tonic-gate 5680Sstevel@tonic-gate /* 5690Sstevel@tonic-gate * initialize other fields in the page_t 5700Sstevel@tonic-gate */ 5710Sstevel@tonic-gate PP_SETFREE(pp); 5720Sstevel@tonic-gate page_clr_all_props(pp); 5730Sstevel@tonic-gate PP_SETAGED(pp); 5740Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 5750Sstevel@tonic-gate pp->p_next = pp; 5760Sstevel@tonic-gate pp->p_prev = pp; 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate /* 5790Sstevel@tonic-gate * Simple case: System doesn't support large pages. 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate if (szc == 0) { 5820Sstevel@tonic-gate pp->p_szc = 0; 5830Sstevel@tonic-gate page_free_at_startup(pp); 5840Sstevel@tonic-gate continue; 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate /* 5880Sstevel@tonic-gate * Handle unaligned pages, we collect them up onto 5890Sstevel@tonic-gate * the root page until we have a full large page. 5900Sstevel@tonic-gate */ 5910Sstevel@tonic-gate if (!IS_P2ALIGNED(pnum, large)) { 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate /* 5940Sstevel@tonic-gate * If not in a large page, 5950Sstevel@tonic-gate * just free as small page. 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate if (root == NULL) { 5980Sstevel@tonic-gate pp->p_szc = 0; 5990Sstevel@tonic-gate page_free_at_startup(pp); 6000Sstevel@tonic-gate continue; 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate /* 6040Sstevel@tonic-gate * Link a constituent page into the large page. 6050Sstevel@tonic-gate */ 6060Sstevel@tonic-gate pp->p_szc = szc; 6070Sstevel@tonic-gate page_list_concat(&root, &pp); 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate /* 6100Sstevel@tonic-gate * When large page is fully formed, free it. 6110Sstevel@tonic-gate */ 6120Sstevel@tonic-gate if (++cnt == large) { 6130Sstevel@tonic-gate page_free_large_ctr(cnt); 6140Sstevel@tonic-gate page_list_add_pages(root, PG_LIST_ISINIT); 6150Sstevel@tonic-gate root = NULL; 6160Sstevel@tonic-gate cnt = 0; 6170Sstevel@tonic-gate } 6180Sstevel@tonic-gate continue; 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate /* 6220Sstevel@tonic-gate * At this point we have a page number which 6230Sstevel@tonic-gate * is aligned. We assert that we aren't already 6240Sstevel@tonic-gate * in a different large page. 6250Sstevel@tonic-gate */ 6260Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pnum, large)); 6270Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0); 6280Sstevel@tonic-gate 6290Sstevel@tonic-gate /* 6300Sstevel@tonic-gate * If insufficient number of pages left to form 6310Sstevel@tonic-gate * a large page, just free the small page. 6320Sstevel@tonic-gate */ 6330Sstevel@tonic-gate if (num < large) { 6340Sstevel@tonic-gate pp->p_szc = 0; 6350Sstevel@tonic-gate page_free_at_startup(pp); 6360Sstevel@tonic-gate continue; 6370Sstevel@tonic-gate } 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * Otherwise start a new large page. 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate pp->p_szc = szc; 6430Sstevel@tonic-gate cnt++; 6440Sstevel@tonic-gate root = pp; 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0); 6470Sstevel@tonic-gate } 6480Sstevel@tonic-gate 6490Sstevel@tonic-gate /* 6500Sstevel@tonic-gate * Find a page representing the specified [vp, offset]. 6510Sstevel@tonic-gate * If we find the page but it is intransit coming in, 6520Sstevel@tonic-gate * it will have an "exclusive" lock and we wait for 6530Sstevel@tonic-gate * the i/o to complete. A page found on the free list 6540Sstevel@tonic-gate * is always reclaimed and then locked. On success, the page 6550Sstevel@tonic-gate * is locked, its data is valid and it isn't on the free 6560Sstevel@tonic-gate * list, while a NULL is returned if the page doesn't exist. 6570Sstevel@tonic-gate */ 6580Sstevel@tonic-gate page_t * 6590Sstevel@tonic-gate page_lookup(vnode_t *vp, u_offset_t off, se_t se) 6600Sstevel@tonic-gate { 6610Sstevel@tonic-gate return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 6620Sstevel@tonic-gate } 6630Sstevel@tonic-gate 6640Sstevel@tonic-gate /* 6650Sstevel@tonic-gate * Find a page representing the specified [vp, offset]. 6660Sstevel@tonic-gate * We either return the one we found or, if passed in, 6670Sstevel@tonic-gate * create one with identity of [vp, offset] of the 6680Sstevel@tonic-gate * pre-allocated page. If we find exsisting page but it is 6690Sstevel@tonic-gate * intransit coming in, it will have an "exclusive" lock 6700Sstevel@tonic-gate * and we wait for the i/o to complete. A page found on 6710Sstevel@tonic-gate * the free list is always reclaimed and then locked. 6720Sstevel@tonic-gate * On success, the page is locked, its data is valid and 6730Sstevel@tonic-gate * it isn't on the free list, while a NULL is returned 6740Sstevel@tonic-gate * if the page doesn't exist and newpp is NULL; 6750Sstevel@tonic-gate */ 6760Sstevel@tonic-gate page_t * 6770Sstevel@tonic-gate page_lookup_create( 6780Sstevel@tonic-gate vnode_t *vp, 6790Sstevel@tonic-gate u_offset_t off, 6800Sstevel@tonic-gate se_t se, 6810Sstevel@tonic-gate page_t *newpp, 6820Sstevel@tonic-gate spgcnt_t *nrelocp, 6830Sstevel@tonic-gate int flags) 6840Sstevel@tonic-gate { 6850Sstevel@tonic-gate page_t *pp; 6860Sstevel@tonic-gate kmutex_t *phm; 6870Sstevel@tonic-gate ulong_t index; 6880Sstevel@tonic-gate uint_t hash_locked; 6890Sstevel@tonic-gate uint_t es; 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 6920Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[0]); 6930Sstevel@tonic-gate ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate /* 6960Sstevel@tonic-gate * Acquire the appropriate page hash lock since 6970Sstevel@tonic-gate * we have to search the hash list. Pages that 6980Sstevel@tonic-gate * hash to this list can't change identity while 6990Sstevel@tonic-gate * this lock is held. 7000Sstevel@tonic-gate */ 7010Sstevel@tonic-gate hash_locked = 0; 7020Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 7030Sstevel@tonic-gate phm = NULL; 7040Sstevel@tonic-gate top: 7050Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 7060Sstevel@tonic-gate if (pp != NULL) { 7070Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[1]); 7080Sstevel@tonic-gate es = (newpp != NULL) ? 1 : 0; 7090Sstevel@tonic-gate es |= flags; 7100Sstevel@tonic-gate if (!hash_locked) { 7110Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[2]); 7120Sstevel@tonic-gate if (!page_try_reclaim_lock(pp, se, es)) { 7130Sstevel@tonic-gate /* 7140Sstevel@tonic-gate * On a miss, acquire the phm. Then 7150Sstevel@tonic-gate * next time, page_lock() will be called, 7160Sstevel@tonic-gate * causing a wait if the page is busy. 7170Sstevel@tonic-gate * just looping with page_trylock() would 7180Sstevel@tonic-gate * get pretty boring. 7190Sstevel@tonic-gate */ 7200Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[3]); 7210Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 7220Sstevel@tonic-gate mutex_enter(phm); 7230Sstevel@tonic-gate hash_locked = 1; 7240Sstevel@tonic-gate goto top; 7250Sstevel@tonic-gate } 7260Sstevel@tonic-gate } else { 7270Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[4]); 7280Sstevel@tonic-gate if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 7290Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[5]); 7300Sstevel@tonic-gate goto top; 7310Sstevel@tonic-gate } 7320Sstevel@tonic-gate } 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate /* 7350Sstevel@tonic-gate * Since `pp' is locked it can not change identity now. 7360Sstevel@tonic-gate * Reconfirm we locked the correct page. 7370Sstevel@tonic-gate * 7380Sstevel@tonic-gate * Both the p_vnode and p_offset *must* be cast volatile 7390Sstevel@tonic-gate * to force a reload of their values: The PAGE_HASH_SEARCH 7400Sstevel@tonic-gate * macro will have stuffed p_vnode and p_offset into 7410Sstevel@tonic-gate * registers before calling page_trylock(); another thread, 7420Sstevel@tonic-gate * actually holding the hash lock, could have changed the 7430Sstevel@tonic-gate * page's identity in memory, but our registers would not 7440Sstevel@tonic-gate * be changed, fooling the reconfirmation. If the hash 7450Sstevel@tonic-gate * lock was held during the search, the casting would 7460Sstevel@tonic-gate * not be needed. 7470Sstevel@tonic-gate */ 7480Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[6]); 7490Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) || 7500Sstevel@tonic-gate ((volatile u_offset_t)(pp->p_offset) != off)) { 7510Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[7]); 7520Sstevel@tonic-gate if (hash_locked) { 7530Sstevel@tonic-gate panic("page_lookup_create: lost page %p", 7540Sstevel@tonic-gate (void *)pp); 7550Sstevel@tonic-gate /*NOTREACHED*/ 7560Sstevel@tonic-gate } 7570Sstevel@tonic-gate page_unlock(pp); 7580Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 7590Sstevel@tonic-gate mutex_enter(phm); 7600Sstevel@tonic-gate hash_locked = 1; 7610Sstevel@tonic-gate goto top; 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate /* 7650Sstevel@tonic-gate * If page_trylock() was called, then pp may still be on 7660Sstevel@tonic-gate * the cachelist (can't be on the free list, it would not 7670Sstevel@tonic-gate * have been found in the search). If it is on the 7680Sstevel@tonic-gate * cachelist it must be pulled now. To pull the page from 7690Sstevel@tonic-gate * the cachelist, it must be exclusively locked. 7700Sstevel@tonic-gate * 7710Sstevel@tonic-gate * The other big difference between page_trylock() and 7720Sstevel@tonic-gate * page_lock(), is that page_lock() will pull the 7730Sstevel@tonic-gate * page from whatever free list (the cache list in this 7740Sstevel@tonic-gate * case) the page is on. If page_trylock() was used 7750Sstevel@tonic-gate * above, then we have to do the reclaim ourselves. 7760Sstevel@tonic-gate */ 7770Sstevel@tonic-gate if ((!hash_locked) && (PP_ISFREE(pp))) { 7780Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 7790Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[8]); 7800Sstevel@tonic-gate 7810Sstevel@tonic-gate /* 7820Sstevel@tonic-gate * page_relcaim will insure that we 7830Sstevel@tonic-gate * have this page exclusively 7840Sstevel@tonic-gate */ 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) { 7870Sstevel@tonic-gate /* 7880Sstevel@tonic-gate * Page_reclaim dropped whatever lock 7890Sstevel@tonic-gate * we held. 7900Sstevel@tonic-gate */ 7910Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[9]); 7920Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 7930Sstevel@tonic-gate mutex_enter(phm); 7940Sstevel@tonic-gate hash_locked = 1; 7950Sstevel@tonic-gate goto top; 7960Sstevel@tonic-gate } else if (se == SE_SHARED && newpp == NULL) { 7970Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[10]); 7980Sstevel@tonic-gate page_downgrade(pp); 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate } 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate if (hash_locked) { 8030Sstevel@tonic-gate mutex_exit(phm); 8040Sstevel@tonic-gate } 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate if (newpp != NULL && pp->p_szc < newpp->p_szc && 8070Sstevel@tonic-gate PAGE_EXCL(pp) && nrelocp != NULL) { 8080Sstevel@tonic-gate ASSERT(nrelocp != NULL); 8090Sstevel@tonic-gate (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 8100Sstevel@tonic-gate NULL); 8110Sstevel@tonic-gate if (*nrelocp > 0) { 8120Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp == 1, 8130Sstevel@tonic-gate page_lookup_cnt[11]); 8140Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp > 1, 8150Sstevel@tonic-gate page_lookup_cnt[12]); 8160Sstevel@tonic-gate pp = newpp; 8170Sstevel@tonic-gate se = SE_EXCL; 8180Sstevel@tonic-gate } else { 8190Sstevel@tonic-gate if (se == SE_SHARED) { 8200Sstevel@tonic-gate page_downgrade(pp); 8210Sstevel@tonic-gate } 8220Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[13]); 8230Sstevel@tonic-gate } 8240Sstevel@tonic-gate } else if (newpp != NULL && nrelocp != NULL) { 8250Sstevel@tonic-gate if (PAGE_EXCL(pp) && se == SE_SHARED) { 8260Sstevel@tonic-gate page_downgrade(pp); 8270Sstevel@tonic-gate } 8280Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 8290Sstevel@tonic-gate page_lookup_cnt[14]); 8300Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 8310Sstevel@tonic-gate page_lookup_cnt[15]); 8320Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 8330Sstevel@tonic-gate page_lookup_cnt[16]); 8340Sstevel@tonic-gate } else if (newpp != NULL && PAGE_EXCL(pp)) { 8350Sstevel@tonic-gate se = SE_EXCL; 8360Sstevel@tonic-gate } 8370Sstevel@tonic-gate } else if (!hash_locked) { 8380Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[17]); 8390Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 8400Sstevel@tonic-gate mutex_enter(phm); 8410Sstevel@tonic-gate hash_locked = 1; 8420Sstevel@tonic-gate goto top; 8430Sstevel@tonic-gate } else if (newpp != NULL) { 8440Sstevel@tonic-gate /* 8450Sstevel@tonic-gate * If we have a preallocated page then 8460Sstevel@tonic-gate * insert it now and basically behave like 8470Sstevel@tonic-gate * page_create. 8480Sstevel@tonic-gate */ 8490Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[18]); 8500Sstevel@tonic-gate /* 8510Sstevel@tonic-gate * Since we hold the page hash mutex and 8520Sstevel@tonic-gate * just searched for this page, page_hashin 8530Sstevel@tonic-gate * had better not fail. If it does, that 8540Sstevel@tonic-gate * means some thread did not follow the 8550Sstevel@tonic-gate * page hash mutex rules. Panic now and 8560Sstevel@tonic-gate * get it over with. As usual, go down 8570Sstevel@tonic-gate * holding all the locks. 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 8600Sstevel@tonic-gate if (!page_hashin(newpp, vp, off, phm)) { 8610Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 8620Sstevel@tonic-gate panic("page_lookup_create: hashin failed %p %p %llx %p", 8630Sstevel@tonic-gate (void *)newpp, (void *)vp, off, (void *)phm); 8640Sstevel@tonic-gate /*NOTREACHED*/ 8650Sstevel@tonic-gate } 8660Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 8670Sstevel@tonic-gate mutex_exit(phm); 8680Sstevel@tonic-gate phm = NULL; 8690Sstevel@tonic-gate page_set_props(newpp, P_REF); 8700Sstevel@tonic-gate page_io_lock(newpp); 8710Sstevel@tonic-gate pp = newpp; 8720Sstevel@tonic-gate se = SE_EXCL; 8730Sstevel@tonic-gate } else { 8740Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[19]); 8750Sstevel@tonic-gate mutex_exit(phm); 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate return (pp); 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate /* 8860Sstevel@tonic-gate * Search the hash list for the page representing the 8870Sstevel@tonic-gate * specified [vp, offset] and return it locked. Skip 8880Sstevel@tonic-gate * free pages and pages that cannot be locked as requested. 8890Sstevel@tonic-gate * Used while attempting to kluster pages. 8900Sstevel@tonic-gate */ 8910Sstevel@tonic-gate page_t * 8920Sstevel@tonic-gate page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 8930Sstevel@tonic-gate { 8940Sstevel@tonic-gate page_t *pp; 8950Sstevel@tonic-gate kmutex_t *phm; 8960Sstevel@tonic-gate ulong_t index; 8970Sstevel@tonic-gate uint_t locked; 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 9000Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[0]); 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 9030Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 9040Sstevel@tonic-gate locked = 0; 9050Sstevel@tonic-gate if (pp == NULL) { 9060Sstevel@tonic-gate top: 9070Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[1]); 9080Sstevel@tonic-gate locked = 1; 9090Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 9100Sstevel@tonic-gate mutex_enter(phm); 9110Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 9120Sstevel@tonic-gate } 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp)) { 9150Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[2]); 9160Sstevel@tonic-gate pp = NULL; 9170Sstevel@tonic-gate } else { 9180Sstevel@tonic-gate if (!page_trylock(pp, se)) { 9190Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[3]); 9200Sstevel@tonic-gate pp = NULL; 9210Sstevel@tonic-gate } else { 9220Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[4]); 9230Sstevel@tonic-gate /* 9240Sstevel@tonic-gate * See the comment in page_lookup() 9250Sstevel@tonic-gate */ 9260Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) || 9270Sstevel@tonic-gate ((u_offset_t)(pp->p_offset) != off)) { 9280Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[5]); 9290Sstevel@tonic-gate if (locked) { 9300Sstevel@tonic-gate panic("page_lookup_nowait %p", 9310Sstevel@tonic-gate (void *)pp); 9320Sstevel@tonic-gate /*NOTREACHED*/ 9330Sstevel@tonic-gate } 9340Sstevel@tonic-gate page_unlock(pp); 9350Sstevel@tonic-gate goto top; 9360Sstevel@tonic-gate } 9370Sstevel@tonic-gate if (PP_ISFREE(pp)) { 9380Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[6]); 9390Sstevel@tonic-gate page_unlock(pp); 9400Sstevel@tonic-gate pp = NULL; 9410Sstevel@tonic-gate } 9420Sstevel@tonic-gate } 9430Sstevel@tonic-gate } 9440Sstevel@tonic-gate if (locked) { 9450Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[7]); 9460Sstevel@tonic-gate mutex_exit(phm); 9470Sstevel@tonic-gate } 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate return (pp); 9520Sstevel@tonic-gate } 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * Search the hash list for a page with the specified [vp, off] 9560Sstevel@tonic-gate * that is known to exist and is already locked. This routine 9570Sstevel@tonic-gate * is typically used by segment SOFTUNLOCK routines. 9580Sstevel@tonic-gate */ 9590Sstevel@tonic-gate page_t * 9600Sstevel@tonic-gate page_find(vnode_t *vp, u_offset_t off) 9610Sstevel@tonic-gate { 9620Sstevel@tonic-gate page_t *pp; 9630Sstevel@tonic-gate kmutex_t *phm; 9640Sstevel@tonic-gate ulong_t index; 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 9670Sstevel@tonic-gate VM_STAT_ADD(page_find_cnt); 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 9700Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate mutex_enter(phm); 9730Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 9740Sstevel@tonic-gate mutex_exit(phm); 9750Sstevel@tonic-gate 9761338Selowe ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 9770Sstevel@tonic-gate return (pp); 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate /* 9810Sstevel@tonic-gate * Determine whether a page with the specified [vp, off] 9820Sstevel@tonic-gate * currently exists in the system. Obviously this should 9830Sstevel@tonic-gate * only be considered as a hint since nothing prevents the 9840Sstevel@tonic-gate * page from disappearing or appearing immediately after 9850Sstevel@tonic-gate * the return from this routine. Subsequently, we don't 9860Sstevel@tonic-gate * even bother to lock the list. 9870Sstevel@tonic-gate */ 9880Sstevel@tonic-gate page_t * 9890Sstevel@tonic-gate page_exists(vnode_t *vp, u_offset_t off) 9900Sstevel@tonic-gate { 9910Sstevel@tonic-gate page_t *pp; 9920Sstevel@tonic-gate ulong_t index; 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 9950Sstevel@tonic-gate VM_STAT_ADD(page_exists_cnt); 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 9980Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate return (pp); 10010Sstevel@tonic-gate } 10020Sstevel@tonic-gate 10030Sstevel@tonic-gate /* 10040Sstevel@tonic-gate * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 10050Sstevel@tonic-gate * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 10060Sstevel@tonic-gate * with these pages locked SHARED. If necessary reclaim pages from 10070Sstevel@tonic-gate * freelist. Return 1 if contiguous pages exist and 0 otherwise. 10080Sstevel@tonic-gate * 10090Sstevel@tonic-gate * If we fail to lock pages still return 1 if pages exist and contiguous. 10100Sstevel@tonic-gate * But in this case return value is just a hint. ppa array won't be filled. 10110Sstevel@tonic-gate * Caller should initialize ppa[0] as NULL to distinguish return value. 10120Sstevel@tonic-gate * 10130Sstevel@tonic-gate * Returns 0 if pages don't exist or not physically contiguous. 10140Sstevel@tonic-gate * 10150Sstevel@tonic-gate * This routine doesn't work for anonymous(swapfs) pages. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate int 10180Sstevel@tonic-gate page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 10190Sstevel@tonic-gate { 10200Sstevel@tonic-gate pgcnt_t pages; 10210Sstevel@tonic-gate pfn_t pfn; 10220Sstevel@tonic-gate page_t *rootpp; 10230Sstevel@tonic-gate pgcnt_t i; 10240Sstevel@tonic-gate pgcnt_t j; 10250Sstevel@tonic-gate u_offset_t save_off = off; 10260Sstevel@tonic-gate ulong_t index; 10270Sstevel@tonic-gate kmutex_t *phm; 10280Sstevel@tonic-gate page_t *pp; 10290Sstevel@tonic-gate uint_t pszc; 10300Sstevel@tonic-gate int loopcnt = 0; 10310Sstevel@tonic-gate 10320Sstevel@tonic-gate ASSERT(szc != 0); 10330Sstevel@tonic-gate ASSERT(vp != NULL); 10340Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(vp)); 10350Sstevel@tonic-gate ASSERT(vp != &kvp); 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate again: 10380Sstevel@tonic-gate if (++loopcnt > 3) { 10390Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[0]); 10400Sstevel@tonic-gate return (0); 10410Sstevel@tonic-gate } 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 10440Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate mutex_enter(phm); 10470Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 10480Sstevel@tonic-gate mutex_exit(phm); 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[1]); 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate if (pp == NULL) { 10530Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[2]); 10540Sstevel@tonic-gate return (0); 10550Sstevel@tonic-gate } 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate pages = page_get_pagecnt(szc); 10580Sstevel@tonic-gate rootpp = pp; 10590Sstevel@tonic-gate pfn = rootpp->p_pagenum; 10600Sstevel@tonic-gate 10610Sstevel@tonic-gate if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 10620Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[3]); 10630Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) { 10640Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[4]); 10650Sstevel@tonic-gate return (1); 10660Sstevel@tonic-gate } 10670Sstevel@tonic-gate if (pp->p_szc != pszc || pp->p_vnode != vp || 10680Sstevel@tonic-gate pp->p_offset != off) { 10690Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[5]); 10700Sstevel@tonic-gate page_unlock(pp); 10710Sstevel@tonic-gate off = save_off; 10720Sstevel@tonic-gate goto again; 10730Sstevel@tonic-gate } 10740Sstevel@tonic-gate /* 10750Sstevel@tonic-gate * szc was non zero and vnode and offset matched after we 10760Sstevel@tonic-gate * locked the page it means it can't become free on us. 10770Sstevel@tonic-gate */ 10780Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 10790Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 10800Sstevel@tonic-gate page_unlock(pp); 10810Sstevel@tonic-gate return (0); 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate ppa[0] = pp; 10840Sstevel@tonic-gate pp++; 10850Sstevel@tonic-gate off += PAGESIZE; 10860Sstevel@tonic-gate pfn++; 10870Sstevel@tonic-gate for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 10880Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) { 10890Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[6]); 10900Sstevel@tonic-gate pp--; 10910Sstevel@tonic-gate while (i-- > 0) { 10920Sstevel@tonic-gate page_unlock(pp); 10930Sstevel@tonic-gate pp--; 10940Sstevel@tonic-gate } 10950Sstevel@tonic-gate ppa[0] = NULL; 10960Sstevel@tonic-gate return (1); 10970Sstevel@tonic-gate } 10980Sstevel@tonic-gate if (pp->p_szc != pszc) { 10990Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[7]); 11000Sstevel@tonic-gate page_unlock(pp); 11010Sstevel@tonic-gate pp--; 11020Sstevel@tonic-gate while (i-- > 0) { 11030Sstevel@tonic-gate page_unlock(pp); 11040Sstevel@tonic-gate pp--; 11050Sstevel@tonic-gate } 11060Sstevel@tonic-gate ppa[0] = NULL; 11070Sstevel@tonic-gate off = save_off; 11080Sstevel@tonic-gate goto again; 11090Sstevel@tonic-gate } 11100Sstevel@tonic-gate /* 11110Sstevel@tonic-gate * szc the same as for previous already locked pages 11120Sstevel@tonic-gate * with right identity. Since this page had correct 11130Sstevel@tonic-gate * szc after we locked it can't get freed or destroyed 11140Sstevel@tonic-gate * and therefore must have the expected identity. 11150Sstevel@tonic-gate */ 11160Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 11170Sstevel@tonic-gate if (pp->p_vnode != vp || 11180Sstevel@tonic-gate pp->p_offset != off) { 11190Sstevel@tonic-gate panic("page_exists_physcontig: " 11200Sstevel@tonic-gate "large page identity doesn't match"); 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate ppa[i] = pp; 11230Sstevel@tonic-gate ASSERT(pp->p_pagenum == pfn); 11240Sstevel@tonic-gate } 11250Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[8]); 11260Sstevel@tonic-gate ppa[pages] = NULL; 11270Sstevel@tonic-gate return (1); 11280Sstevel@tonic-gate } else if (pszc >= szc) { 11290Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[9]); 11300Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 11310Sstevel@tonic-gate return (0); 11320Sstevel@tonic-gate } 11330Sstevel@tonic-gate return (1); 11340Sstevel@tonic-gate } 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 11370Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[10]); 11380Sstevel@tonic-gate return (0); 11390Sstevel@tonic-gate } 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate if (page_numtomemseg_nolock(pfn) != 11420Sstevel@tonic-gate page_numtomemseg_nolock(pfn + pages - 1)) { 11430Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[11]); 11440Sstevel@tonic-gate return (0); 11450Sstevel@tonic-gate } 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate /* 11480Sstevel@tonic-gate * We loop up 4 times across pages to promote page size. 11490Sstevel@tonic-gate * We're extra cautious to promote page size atomically with respect 11500Sstevel@tonic-gate * to everybody else. But we can probably optimize into 1 loop if 11510Sstevel@tonic-gate * this becomes an issue. 11520Sstevel@tonic-gate */ 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 11550Sstevel@tonic-gate ASSERT(pp->p_pagenum == pfn); 11560Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 11570Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[12]); 11580Sstevel@tonic-gate break; 11590Sstevel@tonic-gate } 11600Sstevel@tonic-gate if (pp->p_vnode != vp || 11610Sstevel@tonic-gate pp->p_offset != off) { 11620Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[13]); 11630Sstevel@tonic-gate page_unlock(pp); 11640Sstevel@tonic-gate break; 11650Sstevel@tonic-gate } 11660Sstevel@tonic-gate if (pp->p_szc >= szc) { 11670Sstevel@tonic-gate ASSERT(i == 0); 11680Sstevel@tonic-gate page_unlock(pp); 11690Sstevel@tonic-gate off = save_off; 11700Sstevel@tonic-gate goto again; 11710Sstevel@tonic-gate } 11720Sstevel@tonic-gate } 11730Sstevel@tonic-gate 11740Sstevel@tonic-gate if (i != pages) { 11750Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[14]); 11760Sstevel@tonic-gate --pp; 11770Sstevel@tonic-gate while (i-- > 0) { 11780Sstevel@tonic-gate page_unlock(pp); 11790Sstevel@tonic-gate --pp; 11800Sstevel@tonic-gate } 11810Sstevel@tonic-gate return (0); 11820Sstevel@tonic-gate } 11830Sstevel@tonic-gate 11840Sstevel@tonic-gate pp = rootpp; 11850Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) { 11860Sstevel@tonic-gate if (PP_ISFREE(pp)) { 11870Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[15]); 11880Sstevel@tonic-gate ASSERT(!PP_ISAGED(pp)); 11890Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 11900Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) { 11910Sstevel@tonic-gate break; 11920Sstevel@tonic-gate } 11930Sstevel@tonic-gate } else { 11940Sstevel@tonic-gate ASSERT(pp->p_szc < szc); 11950Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[16]); 11960Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 11970Sstevel@tonic-gate } 11980Sstevel@tonic-gate } 11990Sstevel@tonic-gate if (i < pages) { 12000Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[17]); 12010Sstevel@tonic-gate /* 12020Sstevel@tonic-gate * page_reclaim failed because we were out of memory. 12030Sstevel@tonic-gate * drop the rest of the locks and return because this page 12040Sstevel@tonic-gate * must be already reallocated anyway. 12050Sstevel@tonic-gate */ 12060Sstevel@tonic-gate pp = rootpp; 12070Sstevel@tonic-gate for (j = 0; j < pages; j++, pp++) { 12080Sstevel@tonic-gate if (j != i) { 12090Sstevel@tonic-gate page_unlock(pp); 12100Sstevel@tonic-gate } 12110Sstevel@tonic-gate } 12120Sstevel@tonic-gate return (0); 12130Sstevel@tonic-gate } 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate off = save_off; 12160Sstevel@tonic-gate pp = rootpp; 12170Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 12180Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 12190Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 12200Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 12210Sstevel@tonic-gate ASSERT(pp->p_vnode == vp); 12220Sstevel@tonic-gate ASSERT(pp->p_offset == off); 12230Sstevel@tonic-gate pp->p_szc = szc; 12240Sstevel@tonic-gate } 12250Sstevel@tonic-gate pp = rootpp; 12260Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) { 12270Sstevel@tonic-gate if (ppa == NULL) { 12280Sstevel@tonic-gate page_unlock(pp); 12290Sstevel@tonic-gate } else { 12300Sstevel@tonic-gate ppa[i] = pp; 12310Sstevel@tonic-gate page_downgrade(ppa[i]); 12320Sstevel@tonic-gate } 12330Sstevel@tonic-gate } 12340Sstevel@tonic-gate if (ppa != NULL) { 12350Sstevel@tonic-gate ppa[pages] = NULL; 12360Sstevel@tonic-gate } 12370Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[18]); 12380Sstevel@tonic-gate ASSERT(vp->v_pages != NULL); 12390Sstevel@tonic-gate return (1); 12400Sstevel@tonic-gate } 12410Sstevel@tonic-gate 12420Sstevel@tonic-gate /* 12430Sstevel@tonic-gate * Determine whether a page with the specified [vp, off] 12440Sstevel@tonic-gate * currently exists in the system and if so return its 12450Sstevel@tonic-gate * size code. Obviously this should only be considered as 12460Sstevel@tonic-gate * a hint since nothing prevents the page from disappearing 12470Sstevel@tonic-gate * or appearing immediately after the return from this routine. 12480Sstevel@tonic-gate */ 12490Sstevel@tonic-gate int 12500Sstevel@tonic-gate page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 12510Sstevel@tonic-gate { 12520Sstevel@tonic-gate page_t *pp; 12530Sstevel@tonic-gate kmutex_t *phm; 12540Sstevel@tonic-gate ulong_t index; 12550Sstevel@tonic-gate int rc = 0; 12560Sstevel@tonic-gate 12570Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 12580Sstevel@tonic-gate ASSERT(szc != NULL); 12590Sstevel@tonic-gate VM_STAT_ADD(page_exists_forreal_cnt); 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 12620Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate mutex_enter(phm); 12650Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 12660Sstevel@tonic-gate if (pp != NULL) { 12670Sstevel@tonic-gate *szc = pp->p_szc; 12680Sstevel@tonic-gate rc = 1; 12690Sstevel@tonic-gate } 12700Sstevel@tonic-gate mutex_exit(phm); 12710Sstevel@tonic-gate return (rc); 12720Sstevel@tonic-gate } 12730Sstevel@tonic-gate 12740Sstevel@tonic-gate /* wakeup threads waiting for pages in page_create_get_something() */ 12750Sstevel@tonic-gate void 12760Sstevel@tonic-gate wakeup_pcgs(void) 12770Sstevel@tonic-gate { 12780Sstevel@tonic-gate if (!CV_HAS_WAITERS(&pcgs_cv)) 12790Sstevel@tonic-gate return; 12800Sstevel@tonic-gate cv_broadcast(&pcgs_cv); 12810Sstevel@tonic-gate } 12820Sstevel@tonic-gate 12830Sstevel@tonic-gate /* 12840Sstevel@tonic-gate * 'freemem' is used all over the kernel as an indication of how many 12850Sstevel@tonic-gate * pages are free (either on the cache list or on the free page list) 12860Sstevel@tonic-gate * in the system. In very few places is a really accurate 'freemem' 12870Sstevel@tonic-gate * needed. To avoid contention of the lock protecting a the 12880Sstevel@tonic-gate * single freemem, it was spread out into NCPU buckets. Set_freemem 12890Sstevel@tonic-gate * sets freemem to the total of all NCPU buckets. It is called from 12900Sstevel@tonic-gate * clock() on each TICK. 12910Sstevel@tonic-gate */ 12920Sstevel@tonic-gate void 12930Sstevel@tonic-gate set_freemem() 12940Sstevel@tonic-gate { 12950Sstevel@tonic-gate struct pcf *p; 12960Sstevel@tonic-gate ulong_t t; 12970Sstevel@tonic-gate uint_t i; 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate t = 0; 13000Sstevel@tonic-gate p = pcf; 13010Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 13020Sstevel@tonic-gate t += p->pcf_count; 13030Sstevel@tonic-gate p++; 13040Sstevel@tonic-gate } 13050Sstevel@tonic-gate freemem = t; 13060Sstevel@tonic-gate 13070Sstevel@tonic-gate /* 13080Sstevel@tonic-gate * Don't worry about grabbing mutex. It's not that 13090Sstevel@tonic-gate * critical if we miss a tick or two. This is 13100Sstevel@tonic-gate * where we wakeup possible delayers in 13110Sstevel@tonic-gate * page_create_get_something(). 13120Sstevel@tonic-gate */ 13130Sstevel@tonic-gate wakeup_pcgs(); 13140Sstevel@tonic-gate } 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate ulong_t 13170Sstevel@tonic-gate get_freemem() 13180Sstevel@tonic-gate { 13190Sstevel@tonic-gate struct pcf *p; 13200Sstevel@tonic-gate ulong_t t; 13210Sstevel@tonic-gate uint_t i; 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate t = 0; 13240Sstevel@tonic-gate p = pcf; 13250Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 13260Sstevel@tonic-gate t += p->pcf_count; 13270Sstevel@tonic-gate p++; 13280Sstevel@tonic-gate } 13290Sstevel@tonic-gate /* 13300Sstevel@tonic-gate * We just calculated it, might as well set it. 13310Sstevel@tonic-gate */ 13320Sstevel@tonic-gate freemem = t; 13330Sstevel@tonic-gate return (t); 13340Sstevel@tonic-gate } 13350Sstevel@tonic-gate 13360Sstevel@tonic-gate /* 13370Sstevel@tonic-gate * Acquire all of the page cache & free (pcf) locks. 13380Sstevel@tonic-gate */ 13390Sstevel@tonic-gate void 13400Sstevel@tonic-gate pcf_acquire_all() 13410Sstevel@tonic-gate { 13420Sstevel@tonic-gate struct pcf *p; 13430Sstevel@tonic-gate uint_t i; 13440Sstevel@tonic-gate 13450Sstevel@tonic-gate p = pcf; 13460Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 13470Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 13480Sstevel@tonic-gate p++; 13490Sstevel@tonic-gate } 13500Sstevel@tonic-gate } 13510Sstevel@tonic-gate 13520Sstevel@tonic-gate /* 13530Sstevel@tonic-gate * Release all the pcf_locks. 13540Sstevel@tonic-gate */ 13550Sstevel@tonic-gate void 13560Sstevel@tonic-gate pcf_release_all() 13570Sstevel@tonic-gate { 13580Sstevel@tonic-gate struct pcf *p; 13590Sstevel@tonic-gate uint_t i; 13600Sstevel@tonic-gate 13610Sstevel@tonic-gate p = pcf; 13620Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 13630Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 13640Sstevel@tonic-gate p++; 13650Sstevel@tonic-gate } 13660Sstevel@tonic-gate } 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate /* 13690Sstevel@tonic-gate * Inform the VM system that we need some pages freed up. 13700Sstevel@tonic-gate * Calls must be symmetric, e.g.: 13710Sstevel@tonic-gate * 13720Sstevel@tonic-gate * page_needfree(100); 13730Sstevel@tonic-gate * wait a bit; 13740Sstevel@tonic-gate * page_needfree(-100); 13750Sstevel@tonic-gate */ 13760Sstevel@tonic-gate void 13770Sstevel@tonic-gate page_needfree(spgcnt_t npages) 13780Sstevel@tonic-gate { 13790Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 13800Sstevel@tonic-gate needfree += npages; 13810Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 13820Sstevel@tonic-gate } 13830Sstevel@tonic-gate 13840Sstevel@tonic-gate /* 13850Sstevel@tonic-gate * Throttle for page_create(): try to prevent freemem from dropping 13860Sstevel@tonic-gate * below throttlefree. We can't provide a 100% guarantee because 13870Sstevel@tonic-gate * KM_NOSLEEP allocations, page_reclaim(), and various other things 13880Sstevel@tonic-gate * nibble away at the freelist. However, we can block all PG_WAIT 13890Sstevel@tonic-gate * allocations until memory becomes available. The motivation is 13900Sstevel@tonic-gate * that several things can fall apart when there's no free memory: 13910Sstevel@tonic-gate * 13920Sstevel@tonic-gate * (1) If pageout() needs memory to push a page, the system deadlocks. 13930Sstevel@tonic-gate * 13940Sstevel@tonic-gate * (2) By (broken) specification, timeout(9F) can neither fail nor 13950Sstevel@tonic-gate * block, so it has no choice but to panic the system if it 13960Sstevel@tonic-gate * cannot allocate a callout structure. 13970Sstevel@tonic-gate * 13980Sstevel@tonic-gate * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 13990Sstevel@tonic-gate * it panics if it cannot allocate a callback structure. 14000Sstevel@tonic-gate * 14010Sstevel@tonic-gate * (4) Untold numbers of third-party drivers have not yet been hardened 14020Sstevel@tonic-gate * against KM_NOSLEEP and/or allocb() failures; they simply assume 14030Sstevel@tonic-gate * success and panic the system with a data fault on failure. 14040Sstevel@tonic-gate * (The long-term solution to this particular problem is to ship 14050Sstevel@tonic-gate * hostile fault-injecting DEBUG kernels with the DDK.) 14060Sstevel@tonic-gate * 14070Sstevel@tonic-gate * It is theoretically impossible to guarantee success of non-blocking 14080Sstevel@tonic-gate * allocations, but in practice, this throttle is very hard to break. 14090Sstevel@tonic-gate */ 14100Sstevel@tonic-gate static int 14110Sstevel@tonic-gate page_create_throttle(pgcnt_t npages, int flags) 14120Sstevel@tonic-gate { 14130Sstevel@tonic-gate ulong_t fm; 14140Sstevel@tonic-gate uint_t i; 14150Sstevel@tonic-gate pgcnt_t tf; /* effective value of throttlefree */ 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate /* 14180Sstevel@tonic-gate * Never deny pages when: 14190Sstevel@tonic-gate * - it's a thread that cannot block [NOMEMWAIT()] 14200Sstevel@tonic-gate * - the allocation cannot block and must not fail 14210Sstevel@tonic-gate * - the allocation cannot block and is pageout dispensated 14220Sstevel@tonic-gate */ 14230Sstevel@tonic-gate if (NOMEMWAIT() || 14240Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 14250Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 14260Sstevel@tonic-gate return (1); 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate /* 14290Sstevel@tonic-gate * If the allocation can't block, we look favorably upon it 14300Sstevel@tonic-gate * unless we're below pageout_reserve. In that case we fail 14310Sstevel@tonic-gate * the allocation because we want to make sure there are a few 14320Sstevel@tonic-gate * pages available for pageout. 14330Sstevel@tonic-gate */ 14340Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) 14350Sstevel@tonic-gate return (freemem >= npages + pageout_reserve); 14360Sstevel@tonic-gate 14370Sstevel@tonic-gate /* Calculate the effective throttlefree value */ 14380Sstevel@tonic-gate tf = throttlefree - 14390Sstevel@tonic-gate ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 14400Sstevel@tonic-gate 14410Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate while (freemem < npages + tf) { 14440Sstevel@tonic-gate pcf_acquire_all(); 14450Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 14460Sstevel@tonic-gate fm = 0; 14470Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 14480Sstevel@tonic-gate fm += pcf[i].pcf_count; 14490Sstevel@tonic-gate pcf[i].pcf_wait++; 14500Sstevel@tonic-gate mutex_exit(&pcf[i].pcf_lock); 14510Sstevel@tonic-gate } 14520Sstevel@tonic-gate freemem = fm; 14530Sstevel@tonic-gate needfree += npages; 14540Sstevel@tonic-gate freemem_wait++; 14550Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 14560Sstevel@tonic-gate freemem_wait--; 14570Sstevel@tonic-gate needfree -= npages; 14580Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 14590Sstevel@tonic-gate } 14600Sstevel@tonic-gate return (1); 14610Sstevel@tonic-gate } 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate /* 14640Sstevel@tonic-gate * page_create_wait() is called to either coalecse pages from the 14650Sstevel@tonic-gate * different pcf buckets or to wait because there simply are not 14660Sstevel@tonic-gate * enough pages to satisfy the caller's request. 14670Sstevel@tonic-gate * 14680Sstevel@tonic-gate * Sadly, this is called from platform/vm/vm_machdep.c 14690Sstevel@tonic-gate */ 14700Sstevel@tonic-gate int 14710Sstevel@tonic-gate page_create_wait(size_t npages, uint_t flags) 14720Sstevel@tonic-gate { 14730Sstevel@tonic-gate pgcnt_t total; 14740Sstevel@tonic-gate uint_t i; 14750Sstevel@tonic-gate struct pcf *p; 14760Sstevel@tonic-gate 14770Sstevel@tonic-gate /* 14780Sstevel@tonic-gate * Wait until there are enough free pages to satisfy our 14790Sstevel@tonic-gate * entire request. 14800Sstevel@tonic-gate * We set needfree += npages before prodding pageout, to make sure 14810Sstevel@tonic-gate * it does real work when npages > lotsfree > freemem. 14820Sstevel@tonic-gate */ 14830Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough); 14840Sstevel@tonic-gate 14850Sstevel@tonic-gate ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 14860Sstevel@tonic-gate checkagain: 14870Sstevel@tonic-gate if ((flags & PG_NORELOC) && 14880Sstevel@tonic-gate kcage_freemem < kcage_throttlefree + npages) 14890Sstevel@tonic-gate (void) kcage_create_throttle(npages, flags); 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate if (freemem < npages + throttlefree) 14920Sstevel@tonic-gate if (!page_create_throttle(npages, flags)) 14930Sstevel@tonic-gate return (0); 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate /* 14960Sstevel@tonic-gate * Since page_create_va() looked at every 14970Sstevel@tonic-gate * bucket, assume we are going to have to wait. 14980Sstevel@tonic-gate * Get all of the pcf locks. 14990Sstevel@tonic-gate */ 15000Sstevel@tonic-gate total = 0; 15010Sstevel@tonic-gate p = pcf; 15020Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 15030Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 15040Sstevel@tonic-gate total += p->pcf_count; 15050Sstevel@tonic-gate if (total >= npages) { 15060Sstevel@tonic-gate /* 15070Sstevel@tonic-gate * Wow! There are enough pages laying around 15080Sstevel@tonic-gate * to satisfy the request. Do the accounting, 15090Sstevel@tonic-gate * drop the locks we acquired, and go back. 15100Sstevel@tonic-gate * 15110Sstevel@tonic-gate * freemem is not protected by any lock. So, 15120Sstevel@tonic-gate * we cannot have any assertion containing 15130Sstevel@tonic-gate * freemem. 15140Sstevel@tonic-gate */ 15150Sstevel@tonic-gate freemem -= npages; 15160Sstevel@tonic-gate 15170Sstevel@tonic-gate while (p >= pcf) { 15180Sstevel@tonic-gate if (p->pcf_count <= npages) { 15190Sstevel@tonic-gate npages -= p->pcf_count; 15200Sstevel@tonic-gate p->pcf_count = 0; 15210Sstevel@tonic-gate } else { 15220Sstevel@tonic-gate p->pcf_count -= (uint_t)npages; 15230Sstevel@tonic-gate npages = 0; 15240Sstevel@tonic-gate } 15250Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 15260Sstevel@tonic-gate p--; 15270Sstevel@tonic-gate } 15280Sstevel@tonic-gate ASSERT(npages == 0); 15290Sstevel@tonic-gate return (1); 15300Sstevel@tonic-gate } 15310Sstevel@tonic-gate p++; 15320Sstevel@tonic-gate } 15330Sstevel@tonic-gate 15340Sstevel@tonic-gate /* 15350Sstevel@tonic-gate * All of the pcf locks are held, there are not enough pages 15360Sstevel@tonic-gate * to satisfy the request (npages < total). 15370Sstevel@tonic-gate * Be sure to acquire the new_freemem_lock before dropping 15380Sstevel@tonic-gate * the pcf locks. This prevents dropping wakeups in page_free(). 15390Sstevel@tonic-gate * The order is always pcf_lock then new_freemem_lock. 15400Sstevel@tonic-gate * 15410Sstevel@tonic-gate * Since we hold all the pcf locks, it is a good time to set freemem. 15420Sstevel@tonic-gate * 15430Sstevel@tonic-gate * If the caller does not want to wait, return now. 15440Sstevel@tonic-gate * Else turn the pageout daemon loose to find something 15450Sstevel@tonic-gate * and wait till it does. 15460Sstevel@tonic-gate * 15470Sstevel@tonic-gate */ 15480Sstevel@tonic-gate freemem = total; 15490Sstevel@tonic-gate 15500Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) { 15510Sstevel@tonic-gate pcf_release_all(); 15520Sstevel@tonic-gate 15530Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 15540Sstevel@tonic-gate "page_create_nomem:npages %ld freemem %ld", npages, freemem); 15550Sstevel@tonic-gate return (0); 15560Sstevel@tonic-gate } 15570Sstevel@tonic-gate 15580Sstevel@tonic-gate ASSERT(proc_pageout != NULL); 15590Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 15600Sstevel@tonic-gate 15610Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 15620Sstevel@tonic-gate "page_create_sleep_start: freemem %ld needfree %ld", 15630Sstevel@tonic-gate freemem, needfree); 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate /* 15660Sstevel@tonic-gate * We are going to wait. 15670Sstevel@tonic-gate * We currently hold all of the pcf_locks, 15680Sstevel@tonic-gate * get the new_freemem_lock (it protects freemem_wait), 15690Sstevel@tonic-gate * before dropping the pcf_locks. 15700Sstevel@tonic-gate */ 15710Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 15720Sstevel@tonic-gate 15730Sstevel@tonic-gate p = pcf; 15740Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 15750Sstevel@tonic-gate p->pcf_wait++; 15760Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 15770Sstevel@tonic-gate p++; 15780Sstevel@tonic-gate } 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate needfree += npages; 15810Sstevel@tonic-gate freemem_wait++; 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 15840Sstevel@tonic-gate 15850Sstevel@tonic-gate freemem_wait--; 15860Sstevel@tonic-gate needfree -= npages; 15870Sstevel@tonic-gate 15880Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 15890Sstevel@tonic-gate 15900Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 15910Sstevel@tonic-gate "page_create_sleep_end: freemem %ld needfree %ld", 15920Sstevel@tonic-gate freemem, needfree); 15930Sstevel@tonic-gate 15940Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough_again); 15950Sstevel@tonic-gate goto checkagain; 15960Sstevel@tonic-gate } 15970Sstevel@tonic-gate 15980Sstevel@tonic-gate /* 15990Sstevel@tonic-gate * A routine to do the opposite of page_create_wait(). 16000Sstevel@tonic-gate */ 16010Sstevel@tonic-gate void 16020Sstevel@tonic-gate page_create_putback(spgcnt_t npages) 16030Sstevel@tonic-gate { 16040Sstevel@tonic-gate struct pcf *p; 16050Sstevel@tonic-gate pgcnt_t lump; 16060Sstevel@tonic-gate uint_t *which; 16070Sstevel@tonic-gate 16080Sstevel@tonic-gate /* 16090Sstevel@tonic-gate * When a contiguous lump is broken up, we have to 16100Sstevel@tonic-gate * deal with lots of pages (min 64) so lets spread 16110Sstevel@tonic-gate * the wealth around. 16120Sstevel@tonic-gate */ 16130Sstevel@tonic-gate lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 16140Sstevel@tonic-gate freemem += npages; 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) { 16170Sstevel@tonic-gate which = &p->pcf_count; 16180Sstevel@tonic-gate 16190Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate if (p->pcf_block) { 16220Sstevel@tonic-gate which = &p->pcf_reserve; 16230Sstevel@tonic-gate } 16240Sstevel@tonic-gate 16250Sstevel@tonic-gate if (lump < npages) { 16260Sstevel@tonic-gate *which += (uint_t)lump; 16270Sstevel@tonic-gate npages -= lump; 16280Sstevel@tonic-gate } else { 16290Sstevel@tonic-gate *which += (uint_t)npages; 16300Sstevel@tonic-gate npages = 0; 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate if (p->pcf_wait) { 16340Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 16350Sstevel@tonic-gate /* 16360Sstevel@tonic-gate * Check to see if some other thread 16370Sstevel@tonic-gate * is actually waiting. Another bucket 16380Sstevel@tonic-gate * may have woken it up by now. If there 16390Sstevel@tonic-gate * are no waiters, then set our pcf_wait 16400Sstevel@tonic-gate * count to zero to avoid coming in here 16410Sstevel@tonic-gate * next time. 16420Sstevel@tonic-gate */ 16430Sstevel@tonic-gate if (freemem_wait) { 16440Sstevel@tonic-gate if (npages > 1) { 16450Sstevel@tonic-gate cv_broadcast(&freemem_cv); 16460Sstevel@tonic-gate } else { 16470Sstevel@tonic-gate cv_signal(&freemem_cv); 16480Sstevel@tonic-gate } 16490Sstevel@tonic-gate p->pcf_wait--; 16500Sstevel@tonic-gate } else { 16510Sstevel@tonic-gate p->pcf_wait = 0; 16520Sstevel@tonic-gate } 16530Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 16540Sstevel@tonic-gate } 16550Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 16560Sstevel@tonic-gate } 16570Sstevel@tonic-gate ASSERT(npages == 0); 16580Sstevel@tonic-gate } 16590Sstevel@tonic-gate 16600Sstevel@tonic-gate /* 16610Sstevel@tonic-gate * A helper routine for page_create_get_something. 16620Sstevel@tonic-gate * The indenting got to deep down there. 16630Sstevel@tonic-gate * Unblock the pcf counters. Any pages freed after 16640Sstevel@tonic-gate * pcf_block got set are moved to pcf_count and 16650Sstevel@tonic-gate * wakeups (cv_broadcast() or cv_signal()) are done as needed. 16660Sstevel@tonic-gate */ 16670Sstevel@tonic-gate static void 16680Sstevel@tonic-gate pcgs_unblock(void) 16690Sstevel@tonic-gate { 16700Sstevel@tonic-gate int i; 16710Sstevel@tonic-gate struct pcf *p; 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate /* Update freemem while we're here. */ 16740Sstevel@tonic-gate freemem = 0; 16750Sstevel@tonic-gate p = pcf; 16760Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 16770Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 16780Sstevel@tonic-gate ASSERT(p->pcf_count == 0); 16790Sstevel@tonic-gate p->pcf_count = p->pcf_reserve; 16800Sstevel@tonic-gate p->pcf_block = 0; 16810Sstevel@tonic-gate freemem += p->pcf_count; 16820Sstevel@tonic-gate if (p->pcf_wait) { 16830Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 16840Sstevel@tonic-gate if (freemem_wait) { 16850Sstevel@tonic-gate if (p->pcf_reserve > 1) { 16860Sstevel@tonic-gate cv_broadcast(&freemem_cv); 16870Sstevel@tonic-gate p->pcf_wait = 0; 16880Sstevel@tonic-gate } else { 16890Sstevel@tonic-gate cv_signal(&freemem_cv); 16900Sstevel@tonic-gate p->pcf_wait--; 16910Sstevel@tonic-gate } 16920Sstevel@tonic-gate } else { 16930Sstevel@tonic-gate p->pcf_wait = 0; 16940Sstevel@tonic-gate } 16950Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 16960Sstevel@tonic-gate } 16970Sstevel@tonic-gate p->pcf_reserve = 0; 16980Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 16990Sstevel@tonic-gate p++; 17000Sstevel@tonic-gate } 17010Sstevel@tonic-gate } 17020Sstevel@tonic-gate 17030Sstevel@tonic-gate /* 17040Sstevel@tonic-gate * Called from page_create_va() when both the cache and free lists 17050Sstevel@tonic-gate * have been checked once. 17060Sstevel@tonic-gate * 17070Sstevel@tonic-gate * Either returns a page or panics since the accounting was done 17080Sstevel@tonic-gate * way before we got here. 17090Sstevel@tonic-gate * 17100Sstevel@tonic-gate * We don't come here often, so leave the accounting on permanently. 17110Sstevel@tonic-gate */ 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate #define MAX_PCGS 100 17140Sstevel@tonic-gate 17150Sstevel@tonic-gate #ifdef DEBUG 17160Sstevel@tonic-gate #define PCGS_TRIES 100 17170Sstevel@tonic-gate #else /* DEBUG */ 17180Sstevel@tonic-gate #define PCGS_TRIES 10 17190Sstevel@tonic-gate #endif /* DEBUG */ 17200Sstevel@tonic-gate 17210Sstevel@tonic-gate #ifdef VM_STATS 17220Sstevel@tonic-gate uint_t pcgs_counts[PCGS_TRIES]; 17230Sstevel@tonic-gate uint_t pcgs_too_many; 17240Sstevel@tonic-gate uint_t pcgs_entered; 17250Sstevel@tonic-gate uint_t pcgs_entered_noreloc; 17260Sstevel@tonic-gate uint_t pcgs_locked; 17270Sstevel@tonic-gate uint_t pcgs_cagelocked; 17280Sstevel@tonic-gate #endif /* VM_STATS */ 17290Sstevel@tonic-gate 17300Sstevel@tonic-gate static page_t * 17310Sstevel@tonic-gate page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 17320Sstevel@tonic-gate caddr_t vaddr, uint_t flags) 17330Sstevel@tonic-gate { 17340Sstevel@tonic-gate uint_t count; 17350Sstevel@tonic-gate page_t *pp; 17360Sstevel@tonic-gate uint_t locked, i; 17370Sstevel@tonic-gate struct pcf *p; 17380Sstevel@tonic-gate lgrp_t *lgrp; 17390Sstevel@tonic-gate int cagelocked = 0; 17400Sstevel@tonic-gate 17410Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered); 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate /* 17440Sstevel@tonic-gate * Tap any reserve freelists: if we fail now, we'll die 17450Sstevel@tonic-gate * since the page(s) we're looking for have already been 17460Sstevel@tonic-gate * accounted for. 17470Sstevel@tonic-gate */ 17480Sstevel@tonic-gate flags |= PG_PANIC; 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate if ((flags & PG_NORELOC) != 0) { 17510Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered_noreloc); 17520Sstevel@tonic-gate /* 17530Sstevel@tonic-gate * Requests for free pages from critical threads 17540Sstevel@tonic-gate * such as pageout still won't throttle here, but 17550Sstevel@tonic-gate * we must try again, to give the cageout thread 17560Sstevel@tonic-gate * another chance to catch up. Since we already 17570Sstevel@tonic-gate * accounted for the pages, we had better get them 17580Sstevel@tonic-gate * this time. 17590Sstevel@tonic-gate * 17600Sstevel@tonic-gate * N.B. All non-critical threads acquire the pcgs_cagelock 17610Sstevel@tonic-gate * to serialize access to the freelists. This implements a 17620Sstevel@tonic-gate * turnstile-type synchornization to avoid starvation of 17630Sstevel@tonic-gate * critical requests for PG_NORELOC memory by non-critical 17640Sstevel@tonic-gate * threads: all non-critical threads must acquire a 'ticket' 17650Sstevel@tonic-gate * before passing through, which entails making sure 17660Sstevel@tonic-gate * kcage_freemem won't fall below minfree prior to grabbing 17670Sstevel@tonic-gate * pages from the freelists. 17680Sstevel@tonic-gate */ 17690Sstevel@tonic-gate if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 17700Sstevel@tonic-gate mutex_enter(&pcgs_cagelock); 17710Sstevel@tonic-gate cagelocked = 1; 17720Sstevel@tonic-gate VM_STAT_ADD(pcgs_cagelocked); 17730Sstevel@tonic-gate } 17740Sstevel@tonic-gate } 17750Sstevel@tonic-gate 17760Sstevel@tonic-gate /* 17770Sstevel@tonic-gate * Time to get serious. 17780Sstevel@tonic-gate * We failed to get a `correctly colored' page from both the 17790Sstevel@tonic-gate * free and cache lists. 17800Sstevel@tonic-gate * We escalate in stage. 17810Sstevel@tonic-gate * 17820Sstevel@tonic-gate * First try both lists without worring about color. 17830Sstevel@tonic-gate * 17840Sstevel@tonic-gate * Then, grab all page accounting locks (ie. pcf[]) and 17850Sstevel@tonic-gate * steal any pages that they have and set the pcf_block flag to 17860Sstevel@tonic-gate * stop deletions from the lists. This will help because 17870Sstevel@tonic-gate * a page can get added to the free list while we are looking 17880Sstevel@tonic-gate * at the cache list, then another page could be added to the cache 17890Sstevel@tonic-gate * list allowing the page on the free list to be removed as we 17900Sstevel@tonic-gate * move from looking at the cache list to the free list. This 17910Sstevel@tonic-gate * could happen over and over. We would never find the page 17920Sstevel@tonic-gate * we have accounted for. 17930Sstevel@tonic-gate * 17940Sstevel@tonic-gate * Noreloc pages are a subset of the global (relocatable) page pool. 17950Sstevel@tonic-gate * They are not tracked separately in the pcf bins, so it is 17960Sstevel@tonic-gate * impossible to know when doing pcf accounting if the available 17970Sstevel@tonic-gate * page(s) are noreloc pages or not. When looking for a noreloc page 17980Sstevel@tonic-gate * it is quite easy to end up here even if the global (relocatable) 17990Sstevel@tonic-gate * page pool has plenty of free pages but the noreloc pool is empty. 18000Sstevel@tonic-gate * 18010Sstevel@tonic-gate * When the noreloc pool is empty (or low), additional noreloc pages 18020Sstevel@tonic-gate * are created by converting pages from the global page pool. This 18030Sstevel@tonic-gate * process will stall during pcf accounting if the pcf bins are 18040Sstevel@tonic-gate * already locked. Such is the case when a noreloc allocation is 18050Sstevel@tonic-gate * looping here in page_create_get_something waiting for more noreloc 18060Sstevel@tonic-gate * pages to appear. 18070Sstevel@tonic-gate * 18080Sstevel@tonic-gate * Short of adding a new field to the pcf bins to accurately track 18090Sstevel@tonic-gate * the number of free noreloc pages, we instead do not grab the 18100Sstevel@tonic-gate * pcgs_lock, do not set the pcf blocks and do not timeout when 18110Sstevel@tonic-gate * allocating a noreloc page. This allows noreloc allocations to 18120Sstevel@tonic-gate * loop without blocking global page pool allocations. 18130Sstevel@tonic-gate * 18140Sstevel@tonic-gate * NOTE: the behaviour of page_create_get_something has not changed 18150Sstevel@tonic-gate * for the case of global page pool allocations. 18160Sstevel@tonic-gate */ 18170Sstevel@tonic-gate 18180Sstevel@tonic-gate flags &= ~PG_MATCH_COLOR; 18190Sstevel@tonic-gate locked = 0; 18201385Skchow #if defined(__i386) || defined(__amd64) 18210Sstevel@tonic-gate /* 18220Sstevel@tonic-gate * page_create_get_something may be called because 4g memory may be 18230Sstevel@tonic-gate * depleted. Set flags to allow for relocation of base page below 18240Sstevel@tonic-gate * 4g if necessary. 18250Sstevel@tonic-gate */ 18260Sstevel@tonic-gate if (physmax4g) 18270Sstevel@tonic-gate flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 18280Sstevel@tonic-gate #endif 18290Sstevel@tonic-gate 18300Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 18310Sstevel@tonic-gate 18320Sstevel@tonic-gate for (count = 0; kcage_on || count < MAX_PCGS; count++) { 18330Sstevel@tonic-gate pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 18340Sstevel@tonic-gate flags, lgrp); 18350Sstevel@tonic-gate if (pp == NULL) { 18360Sstevel@tonic-gate pp = page_get_cachelist(vp, off, seg, vaddr, 18370Sstevel@tonic-gate flags, lgrp); 18380Sstevel@tonic-gate } 18390Sstevel@tonic-gate if (pp == NULL) { 18400Sstevel@tonic-gate /* 18410Sstevel@tonic-gate * Serialize. Don't fight with other pcgs(). 18420Sstevel@tonic-gate */ 18430Sstevel@tonic-gate if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 18440Sstevel@tonic-gate mutex_enter(&pcgs_lock); 18450Sstevel@tonic-gate VM_STAT_ADD(pcgs_locked); 18460Sstevel@tonic-gate locked = 1; 18470Sstevel@tonic-gate p = pcf; 18480Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 18490Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 18500Sstevel@tonic-gate ASSERT(p->pcf_block == 0); 18510Sstevel@tonic-gate p->pcf_block = 1; 18520Sstevel@tonic-gate p->pcf_reserve = p->pcf_count; 18530Sstevel@tonic-gate p->pcf_count = 0; 18540Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 18550Sstevel@tonic-gate p++; 18560Sstevel@tonic-gate } 18570Sstevel@tonic-gate freemem = 0; 18580Sstevel@tonic-gate } 18590Sstevel@tonic-gate 18600Sstevel@tonic-gate if (count) { 18610Sstevel@tonic-gate /* 18620Sstevel@tonic-gate * Since page_free() puts pages on 18630Sstevel@tonic-gate * a list then accounts for it, we 18640Sstevel@tonic-gate * just have to wait for page_free() 18650Sstevel@tonic-gate * to unlock any page it was working 18660Sstevel@tonic-gate * with. The page_lock()-page_reclaim() 18670Sstevel@tonic-gate * path falls in the same boat. 18680Sstevel@tonic-gate * 18690Sstevel@tonic-gate * We don't need to check on the 18700Sstevel@tonic-gate * PG_WAIT flag, we have already 18710Sstevel@tonic-gate * accounted for the page we are 18720Sstevel@tonic-gate * looking for in page_create_va(). 18730Sstevel@tonic-gate * 18740Sstevel@tonic-gate * We just wait a moment to let any 18750Sstevel@tonic-gate * locked pages on the lists free up, 18760Sstevel@tonic-gate * then continue around and try again. 18770Sstevel@tonic-gate * 18780Sstevel@tonic-gate * Will be awakened by set_freemem(). 18790Sstevel@tonic-gate */ 18800Sstevel@tonic-gate mutex_enter(&pcgs_wait_lock); 18810Sstevel@tonic-gate cv_wait(&pcgs_cv, &pcgs_wait_lock); 18820Sstevel@tonic-gate mutex_exit(&pcgs_wait_lock); 18830Sstevel@tonic-gate } 18840Sstevel@tonic-gate } else { 18850Sstevel@tonic-gate #ifdef VM_STATS 18860Sstevel@tonic-gate if (count >= PCGS_TRIES) { 18870Sstevel@tonic-gate VM_STAT_ADD(pcgs_too_many); 18880Sstevel@tonic-gate } else { 18890Sstevel@tonic-gate VM_STAT_ADD(pcgs_counts[count]); 18900Sstevel@tonic-gate } 18910Sstevel@tonic-gate #endif 18920Sstevel@tonic-gate if (locked) { 18930Sstevel@tonic-gate pcgs_unblock(); 18940Sstevel@tonic-gate mutex_exit(&pcgs_lock); 18950Sstevel@tonic-gate } 18960Sstevel@tonic-gate if (cagelocked) 18970Sstevel@tonic-gate mutex_exit(&pcgs_cagelock); 18980Sstevel@tonic-gate return (pp); 18990Sstevel@tonic-gate } 19000Sstevel@tonic-gate } 19010Sstevel@tonic-gate /* 19020Sstevel@tonic-gate * we go down holding the pcf locks. 19030Sstevel@tonic-gate */ 19040Sstevel@tonic-gate panic("no %spage found %d", 19050Sstevel@tonic-gate ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 19060Sstevel@tonic-gate /*NOTREACHED*/ 19070Sstevel@tonic-gate } 19080Sstevel@tonic-gate 19090Sstevel@tonic-gate /* 19100Sstevel@tonic-gate * Create enough pages for "bytes" worth of data starting at 19110Sstevel@tonic-gate * "off" in "vp". 19120Sstevel@tonic-gate * 19130Sstevel@tonic-gate * Where flag must be one of: 19140Sstevel@tonic-gate * 19150Sstevel@tonic-gate * PG_EXCL: Exclusive create (fail if any page already 19160Sstevel@tonic-gate * exists in the page cache) which does not 19170Sstevel@tonic-gate * wait for memory to become available. 19180Sstevel@tonic-gate * 19190Sstevel@tonic-gate * PG_WAIT: Non-exclusive create which can wait for 19200Sstevel@tonic-gate * memory to become available. 19210Sstevel@tonic-gate * 19220Sstevel@tonic-gate * PG_PHYSCONTIG: Allocate physically contiguous pages. 19230Sstevel@tonic-gate * (Not Supported) 19240Sstevel@tonic-gate * 19250Sstevel@tonic-gate * A doubly linked list of pages is returned to the caller. Each page 19260Sstevel@tonic-gate * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 19270Sstevel@tonic-gate * lock. 19280Sstevel@tonic-gate * 19290Sstevel@tonic-gate * Unable to change the parameters to page_create() in a minor release, 19300Sstevel@tonic-gate * we renamed page_create() to page_create_va(), changed all known calls 19310Sstevel@tonic-gate * from page_create() to page_create_va(), and created this wrapper. 19320Sstevel@tonic-gate * 19330Sstevel@tonic-gate * Upon a major release, we should break compatibility by deleting this 19340Sstevel@tonic-gate * wrapper, and replacing all the strings "page_create_va", with "page_create". 19350Sstevel@tonic-gate * 19360Sstevel@tonic-gate * NOTE: There is a copy of this interface as page_create_io() in 19370Sstevel@tonic-gate * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 19380Sstevel@tonic-gate * there. 19390Sstevel@tonic-gate */ 19400Sstevel@tonic-gate page_t * 19410Sstevel@tonic-gate page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 19420Sstevel@tonic-gate { 19430Sstevel@tonic-gate caddr_t random_vaddr; 19440Sstevel@tonic-gate struct seg kseg; 19450Sstevel@tonic-gate 19460Sstevel@tonic-gate #ifdef DEBUG 19470Sstevel@tonic-gate cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 19480Sstevel@tonic-gate (void *)caller()); 19490Sstevel@tonic-gate #endif 19500Sstevel@tonic-gate 19510Sstevel@tonic-gate random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 19520Sstevel@tonic-gate (uintptr_t)(off >> PAGESHIFT)); 19530Sstevel@tonic-gate kseg.s_as = &kas; 19540Sstevel@tonic-gate 19550Sstevel@tonic-gate return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 19560Sstevel@tonic-gate } 19570Sstevel@tonic-gate 19580Sstevel@tonic-gate #ifdef DEBUG 19590Sstevel@tonic-gate uint32_t pg_alloc_pgs_mtbf = 0; 19600Sstevel@tonic-gate #endif 19610Sstevel@tonic-gate 19620Sstevel@tonic-gate /* 19630Sstevel@tonic-gate * Used for large page support. It will attempt to allocate 19640Sstevel@tonic-gate * a large page(s) off the freelist. 19650Sstevel@tonic-gate * 19660Sstevel@tonic-gate * Returns non zero on failure. 19670Sstevel@tonic-gate */ 19680Sstevel@tonic-gate int 1969749Ssusans page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1970749Ssusans page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz) 19710Sstevel@tonic-gate { 19720Sstevel@tonic-gate pgcnt_t npgs, curnpgs, totpgs; 19730Sstevel@tonic-gate size_t pgsz; 19740Sstevel@tonic-gate page_t *pplist = NULL, *pp; 19750Sstevel@tonic-gate int err = 0; 19760Sstevel@tonic-gate lgrp_t *lgrp; 19770Sstevel@tonic-gate 19780Sstevel@tonic-gate ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 19790Sstevel@tonic-gate 19800Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[0]); 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate #ifdef DEBUG 19830Sstevel@tonic-gate if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 19840Sstevel@tonic-gate return (ENOMEM); 19850Sstevel@tonic-gate } 19860Sstevel@tonic-gate #endif 19870Sstevel@tonic-gate 19880Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 19890Sstevel@tonic-gate totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 19900Sstevel@tonic-gate 19910Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 19920Sstevel@tonic-gate /* 19930Sstevel@tonic-gate * One must be NULL but not both. 19940Sstevel@tonic-gate * And one must be non NULL but not both. 19950Sstevel@tonic-gate */ 19960Sstevel@tonic-gate ASSERT(basepp != NULL || ppa != NULL); 19970Sstevel@tonic-gate ASSERT(basepp == NULL || ppa == NULL); 19980Sstevel@tonic-gate 19990Sstevel@tonic-gate (void) page_create_wait(npgs, PG_WAIT); 20000Sstevel@tonic-gate 20010Sstevel@tonic-gate while (npgs && szc) { 20020Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, addr, pgsz); 2003749Ssusans pp = page_get_freelist(vp, 0, seg, addr, pgsz, 0, lgrp); 20040Sstevel@tonic-gate if (pp != NULL) { 20050Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[1]); 20060Sstevel@tonic-gate page_list_concat(&pplist, &pp); 20070Sstevel@tonic-gate ASSERT(npgs >= curnpgs); 20080Sstevel@tonic-gate npgs -= curnpgs; 20090Sstevel@tonic-gate } else if (anypgsz) { 20100Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[2]); 20110Sstevel@tonic-gate szc--; 20120Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 20130Sstevel@tonic-gate curnpgs = pgsz >> PAGESHIFT; 20140Sstevel@tonic-gate } else { 20150Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[3]); 20160Sstevel@tonic-gate ASSERT(npgs == totpgs); 20170Sstevel@tonic-gate page_create_putback(npgs); 20180Sstevel@tonic-gate return (ENOMEM); 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate } 20210Sstevel@tonic-gate if (szc == 0) { 20220Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[4]); 20230Sstevel@tonic-gate ASSERT(npgs != 0); 20240Sstevel@tonic-gate page_create_putback(npgs); 20250Sstevel@tonic-gate err = ENOMEM; 20260Sstevel@tonic-gate } else if (basepp != NULL) { 20270Sstevel@tonic-gate ASSERT(npgs == 0); 20280Sstevel@tonic-gate ASSERT(ppa == NULL); 20290Sstevel@tonic-gate *basepp = pplist; 20300Sstevel@tonic-gate } 20310Sstevel@tonic-gate 20320Sstevel@tonic-gate npgs = totpgs - npgs; 20330Sstevel@tonic-gate pp = pplist; 20340Sstevel@tonic-gate 20350Sstevel@tonic-gate /* 20360Sstevel@tonic-gate * Clear the free and age bits. Also if we were passed in a ppa then 20370Sstevel@tonic-gate * fill it in with all the constituent pages from the large page. But 20380Sstevel@tonic-gate * if we failed to allocate all the pages just free what we got. 20390Sstevel@tonic-gate */ 20400Sstevel@tonic-gate while (npgs != 0) { 20410Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 20420Sstevel@tonic-gate ASSERT(PP_ISAGED(pp)); 20430Sstevel@tonic-gate if (ppa != NULL || err != 0) { 20440Sstevel@tonic-gate if (err == 0) { 20450Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[5]); 20460Sstevel@tonic-gate PP_CLRFREE(pp); 20470Sstevel@tonic-gate PP_CLRAGED(pp); 20480Sstevel@tonic-gate page_sub(&pplist, pp); 20490Sstevel@tonic-gate *ppa++ = pp; 20500Sstevel@tonic-gate npgs--; 20510Sstevel@tonic-gate } else { 20520Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[6]); 20530Sstevel@tonic-gate ASSERT(pp->p_szc != 0); 20540Sstevel@tonic-gate curnpgs = page_get_pagecnt(pp->p_szc); 20550Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs); 20560Sstevel@tonic-gate page_list_add_pages(pp, 0); 20570Sstevel@tonic-gate page_create_putback(curnpgs); 20580Sstevel@tonic-gate ASSERT(npgs >= curnpgs); 20590Sstevel@tonic-gate npgs -= curnpgs; 20600Sstevel@tonic-gate } 20610Sstevel@tonic-gate pp = pplist; 20620Sstevel@tonic-gate } else { 20630Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[7]); 20640Sstevel@tonic-gate PP_CLRFREE(pp); 20650Sstevel@tonic-gate PP_CLRAGED(pp); 20660Sstevel@tonic-gate pp = pp->p_next; 20670Sstevel@tonic-gate npgs--; 20680Sstevel@tonic-gate } 20690Sstevel@tonic-gate } 20700Sstevel@tonic-gate return (err); 20710Sstevel@tonic-gate } 20720Sstevel@tonic-gate 20730Sstevel@tonic-gate /* 20740Sstevel@tonic-gate * Get a single large page off of the freelists, and set it up for use. 20750Sstevel@tonic-gate * Number of bytes requested must be a supported page size. 20760Sstevel@tonic-gate * 20770Sstevel@tonic-gate * Note that this call may fail even if there is sufficient 20780Sstevel@tonic-gate * memory available or PG_WAIT is set, so the caller must 20790Sstevel@tonic-gate * be willing to fallback on page_create_va(), block and retry, 20800Sstevel@tonic-gate * or fail the requester. 20810Sstevel@tonic-gate */ 20820Sstevel@tonic-gate page_t * 20830Sstevel@tonic-gate page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 20840Sstevel@tonic-gate struct seg *seg, caddr_t vaddr, void *arg) 20850Sstevel@tonic-gate { 20860Sstevel@tonic-gate pgcnt_t npages, pcftotal; 20870Sstevel@tonic-gate page_t *pp; 20880Sstevel@tonic-gate page_t *rootpp; 20890Sstevel@tonic-gate lgrp_t *lgrp; 20900Sstevel@tonic-gate uint_t enough; 20910Sstevel@tonic-gate uint_t pcf_index; 20920Sstevel@tonic-gate uint_t i; 20930Sstevel@tonic-gate struct pcf *p; 20940Sstevel@tonic-gate struct pcf *q; 20950Sstevel@tonic-gate lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate ASSERT(vp != NULL); 20980Sstevel@tonic-gate 20990Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | 21000Sstevel@tonic-gate PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 21010Sstevel@tonic-gate /* but no others */ 21020Sstevel@tonic-gate 21030Sstevel@tonic-gate ASSERT((flags & PG_EXCL) == PG_EXCL); 21040Sstevel@tonic-gate 21050Sstevel@tonic-gate npages = btop(bytes); 21060Sstevel@tonic-gate 21070Sstevel@tonic-gate if (!kcage_on || panicstr) { 21080Sstevel@tonic-gate /* 21090Sstevel@tonic-gate * Cage is OFF, or we are single threaded in 21100Sstevel@tonic-gate * panic, so make everything a RELOC request. 21110Sstevel@tonic-gate */ 21120Sstevel@tonic-gate flags &= ~PG_NORELOC; 21130Sstevel@tonic-gate } 21140Sstevel@tonic-gate 21150Sstevel@tonic-gate /* 21160Sstevel@tonic-gate * Make sure there's adequate physical memory available. 21170Sstevel@tonic-gate * Note: PG_WAIT is ignored here. 21180Sstevel@tonic-gate */ 21190Sstevel@tonic-gate if (freemem <= throttlefree + npages) { 21200Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[1]); 21210Sstevel@tonic-gate return (NULL); 21220Sstevel@tonic-gate } 21230Sstevel@tonic-gate 21240Sstevel@tonic-gate /* 21250Sstevel@tonic-gate * If cage is on, dampen draw from cage when available 21260Sstevel@tonic-gate * cage space is low. 21270Sstevel@tonic-gate */ 21280Sstevel@tonic-gate if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 21290Sstevel@tonic-gate kcage_freemem < kcage_throttlefree + npages) { 21300Sstevel@tonic-gate 21310Sstevel@tonic-gate /* 21320Sstevel@tonic-gate * The cage is on, the caller wants PG_NORELOC 21330Sstevel@tonic-gate * pages and available cage memory is very low. 21340Sstevel@tonic-gate * Call kcage_create_throttle() to attempt to 21350Sstevel@tonic-gate * control demand on the cage. 21360Sstevel@tonic-gate */ 21370Sstevel@tonic-gate if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 21380Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[2]); 21390Sstevel@tonic-gate return (NULL); 21400Sstevel@tonic-gate } 21410Sstevel@tonic-gate } 21420Sstevel@tonic-gate 21430Sstevel@tonic-gate enough = 0; 21440Sstevel@tonic-gate pcf_index = PCF_INDEX(); 21450Sstevel@tonic-gate p = &pcf[pcf_index]; 21460Sstevel@tonic-gate q = &pcf[PCF_FANOUT]; 21470Sstevel@tonic-gate for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 21480Sstevel@tonic-gate if (p->pcf_count > npages) { 21490Sstevel@tonic-gate /* 21500Sstevel@tonic-gate * a good one to try. 21510Sstevel@tonic-gate */ 21520Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 21530Sstevel@tonic-gate if (p->pcf_count > npages) { 21540Sstevel@tonic-gate p->pcf_count -= (uint_t)npages; 21550Sstevel@tonic-gate /* 21560Sstevel@tonic-gate * freemem is not protected by any lock. 21570Sstevel@tonic-gate * Thus, we cannot have any assertion 21580Sstevel@tonic-gate * containing freemem here. 21590Sstevel@tonic-gate */ 21600Sstevel@tonic-gate freemem -= npages; 21610Sstevel@tonic-gate enough = 1; 21620Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 21630Sstevel@tonic-gate break; 21640Sstevel@tonic-gate } 21650Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 21660Sstevel@tonic-gate } 21670Sstevel@tonic-gate pcftotal += p->pcf_count; 21680Sstevel@tonic-gate p++; 21690Sstevel@tonic-gate if (p >= q) { 21700Sstevel@tonic-gate p = pcf; 21710Sstevel@tonic-gate } 21720Sstevel@tonic-gate } 21730Sstevel@tonic-gate 21740Sstevel@tonic-gate if (!enough) { 21750Sstevel@tonic-gate /* If there isn't enough memory available, give up. */ 21760Sstevel@tonic-gate if (pcftotal < npages) { 21770Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[3]); 21780Sstevel@tonic-gate return (NULL); 21790Sstevel@tonic-gate } 21800Sstevel@tonic-gate 21810Sstevel@tonic-gate /* try to collect pages from several pcf bins */ 21820Sstevel@tonic-gate for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 21830Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 21840Sstevel@tonic-gate pcftotal += p->pcf_count; 21850Sstevel@tonic-gate if (pcftotal >= npages) { 21860Sstevel@tonic-gate /* 21870Sstevel@tonic-gate * Wow! There are enough pages laying around 21880Sstevel@tonic-gate * to satisfy the request. Do the accounting, 21890Sstevel@tonic-gate * drop the locks we acquired, and go back. 21900Sstevel@tonic-gate * 21910Sstevel@tonic-gate * freemem is not protected by any lock. So, 21920Sstevel@tonic-gate * we cannot have any assertion containing 21930Sstevel@tonic-gate * freemem. 21940Sstevel@tonic-gate */ 21950Sstevel@tonic-gate pgcnt_t tpages = npages; 21960Sstevel@tonic-gate freemem -= npages; 21970Sstevel@tonic-gate while (p >= pcf) { 21980Sstevel@tonic-gate if (p->pcf_count <= tpages) { 21990Sstevel@tonic-gate tpages -= p->pcf_count; 22000Sstevel@tonic-gate p->pcf_count = 0; 22010Sstevel@tonic-gate } else { 22020Sstevel@tonic-gate p->pcf_count -= (uint_t)tpages; 22030Sstevel@tonic-gate tpages = 0; 22040Sstevel@tonic-gate } 22050Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 22060Sstevel@tonic-gate p--; 22070Sstevel@tonic-gate } 22080Sstevel@tonic-gate ASSERT(tpages == 0); 22090Sstevel@tonic-gate break; 22100Sstevel@tonic-gate } 22110Sstevel@tonic-gate p++; 22120Sstevel@tonic-gate } 22130Sstevel@tonic-gate if (i == PCF_FANOUT) { 22140Sstevel@tonic-gate /* failed to collect pages - release the locks */ 22150Sstevel@tonic-gate while (--p >= pcf) { 22160Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 22170Sstevel@tonic-gate } 22180Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[4]); 22190Sstevel@tonic-gate return (NULL); 22200Sstevel@tonic-gate } 22210Sstevel@tonic-gate } 22220Sstevel@tonic-gate 22230Sstevel@tonic-gate /* 22240Sstevel@tonic-gate * This is where this function behaves fundamentally differently 22250Sstevel@tonic-gate * than page_create_va(); since we're intending to map the page 22260Sstevel@tonic-gate * with a single TTE, we have to get it as a physically contiguous 22270Sstevel@tonic-gate * hardware pagesize chunk. If we can't, we fail. 22280Sstevel@tonic-gate */ 22290Sstevel@tonic-gate if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 22300Sstevel@tonic-gate LGRP_EXISTS(lgrp_table[*lgrpid])) 22310Sstevel@tonic-gate lgrp = lgrp_table[*lgrpid]; 22320Sstevel@tonic-gate else 22330Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, bytes); 22340Sstevel@tonic-gate 22350Sstevel@tonic-gate if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 22360Sstevel@tonic-gate bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 22370Sstevel@tonic-gate page_create_putback(npages); 22380Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[5]); 22390Sstevel@tonic-gate return (NULL); 22400Sstevel@tonic-gate } 22410Sstevel@tonic-gate 22420Sstevel@tonic-gate /* 22430Sstevel@tonic-gate * if we got the page with the wrong mtype give it back this is a 22440Sstevel@tonic-gate * workaround for CR 6249718. When CR 6249718 is fixed we never get 22450Sstevel@tonic-gate * inside "if" and the workaround becomes just a nop 22460Sstevel@tonic-gate */ 22470Sstevel@tonic-gate if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 22480Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 22490Sstevel@tonic-gate page_create_putback(npages); 22500Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[6]); 22510Sstevel@tonic-gate return (NULL); 22520Sstevel@tonic-gate } 22530Sstevel@tonic-gate 22540Sstevel@tonic-gate /* 22550Sstevel@tonic-gate * If satisfying this request has left us with too little 22560Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 22570Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 22580Sstevel@tonic-gate * daemon in situations where it would decide that there's 22590Sstevel@tonic-gate * nothing to do. 22600Sstevel@tonic-gate */ 22610Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 22620Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 22630Sstevel@tonic-gate "pageout_cv_signal:freemem %ld", freemem); 22640Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 22650Sstevel@tonic-gate } 22660Sstevel@tonic-gate 22670Sstevel@tonic-gate pp = rootpp; 22680Sstevel@tonic-gate while (npages--) { 22690Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 22700Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 22710Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 22720Sstevel@tonic-gate PP_CLRFREE(pp); 22730Sstevel@tonic-gate PP_CLRAGED(pp); 22740Sstevel@tonic-gate if (!page_hashin(pp, vp, off, NULL)) 22750Sstevel@tonic-gate panic("page_create_large: hashin failed: page %p", 22760Sstevel@tonic-gate (void *)pp); 22770Sstevel@tonic-gate page_io_lock(pp); 22780Sstevel@tonic-gate off += PAGESIZE; 22790Sstevel@tonic-gate pp = pp->p_next; 22800Sstevel@tonic-gate } 22810Sstevel@tonic-gate 22820Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[0]); 22830Sstevel@tonic-gate return (rootpp); 22840Sstevel@tonic-gate } 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate page_t * 22870Sstevel@tonic-gate page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 22880Sstevel@tonic-gate struct seg *seg, caddr_t vaddr) 22890Sstevel@tonic-gate { 22900Sstevel@tonic-gate page_t *plist = NULL; 22910Sstevel@tonic-gate pgcnt_t npages; 22920Sstevel@tonic-gate pgcnt_t found_on_free = 0; 22930Sstevel@tonic-gate pgcnt_t pages_req; 22940Sstevel@tonic-gate page_t *npp = NULL; 22950Sstevel@tonic-gate uint_t enough; 22960Sstevel@tonic-gate uint_t i; 22970Sstevel@tonic-gate uint_t pcf_index; 22980Sstevel@tonic-gate struct pcf *p; 22990Sstevel@tonic-gate struct pcf *q; 23000Sstevel@tonic-gate lgrp_t *lgrp; 23010Sstevel@tonic-gate 23020Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 23030Sstevel@tonic-gate "page_create_start:vp %p off %llx bytes %lu flags %x", 23040Sstevel@tonic-gate vp, off, bytes, flags); 23050Sstevel@tonic-gate 23060Sstevel@tonic-gate ASSERT(bytes != 0 && vp != NULL); 23070Sstevel@tonic-gate 23080Sstevel@tonic-gate if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 23090Sstevel@tonic-gate panic("page_create: invalid flags"); 23100Sstevel@tonic-gate /*NOTREACHED*/ 23110Sstevel@tonic-gate } 23120Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | 23130Sstevel@tonic-gate PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 23140Sstevel@tonic-gate /* but no others */ 23150Sstevel@tonic-gate 23160Sstevel@tonic-gate pages_req = npages = btopr(bytes); 23170Sstevel@tonic-gate /* 23180Sstevel@tonic-gate * Try to see whether request is too large to *ever* be 23190Sstevel@tonic-gate * satisfied, in order to prevent deadlock. We arbitrarily 23200Sstevel@tonic-gate * decide to limit maximum size requests to max_page_get. 23210Sstevel@tonic-gate */ 23220Sstevel@tonic-gate if (npages >= max_page_get) { 23230Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) { 23240Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 23250Sstevel@tonic-gate "page_create_toobig:vp %p off %llx npages " 23260Sstevel@tonic-gate "%lu max_page_get %lu", 23270Sstevel@tonic-gate vp, off, npages, max_page_get); 23280Sstevel@tonic-gate return (NULL); 23290Sstevel@tonic-gate } else { 23300Sstevel@tonic-gate cmn_err(CE_WARN, 23310Sstevel@tonic-gate "Request for too much kernel memory " 23320Sstevel@tonic-gate "(%lu bytes), will hang forever", bytes); 23330Sstevel@tonic-gate for (;;) 23340Sstevel@tonic-gate delay(1000000000); 23350Sstevel@tonic-gate } 23360Sstevel@tonic-gate } 23370Sstevel@tonic-gate 23380Sstevel@tonic-gate if (!kcage_on || panicstr) { 23390Sstevel@tonic-gate /* 23400Sstevel@tonic-gate * Cage is OFF, or we are single threaded in 23410Sstevel@tonic-gate * panic, so make everything a RELOC request. 23420Sstevel@tonic-gate */ 23430Sstevel@tonic-gate flags &= ~PG_NORELOC; 23440Sstevel@tonic-gate } 23450Sstevel@tonic-gate 23460Sstevel@tonic-gate if (freemem <= throttlefree + npages) 23470Sstevel@tonic-gate if (!page_create_throttle(npages, flags)) 23480Sstevel@tonic-gate return (NULL); 23490Sstevel@tonic-gate 23500Sstevel@tonic-gate /* 23510Sstevel@tonic-gate * If cage is on, dampen draw from cage when available 23520Sstevel@tonic-gate * cage space is low. 23530Sstevel@tonic-gate */ 23540Sstevel@tonic-gate if ((flags & PG_NORELOC) && 23550Sstevel@tonic-gate kcage_freemem < kcage_throttlefree + npages) { 23560Sstevel@tonic-gate 23570Sstevel@tonic-gate /* 23580Sstevel@tonic-gate * The cage is on, the caller wants PG_NORELOC 23590Sstevel@tonic-gate * pages and available cage memory is very low. 23600Sstevel@tonic-gate * Call kcage_create_throttle() to attempt to 23610Sstevel@tonic-gate * control demand on the cage. 23620Sstevel@tonic-gate */ 23630Sstevel@tonic-gate if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 23640Sstevel@tonic-gate return (NULL); 23650Sstevel@tonic-gate } 23660Sstevel@tonic-gate 23670Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[0]); 23680Sstevel@tonic-gate 23690Sstevel@tonic-gate enough = 0; 23700Sstevel@tonic-gate pcf_index = PCF_INDEX(); 23710Sstevel@tonic-gate 23720Sstevel@tonic-gate p = &pcf[pcf_index]; 23730Sstevel@tonic-gate q = &pcf[PCF_FANOUT]; 23740Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 23750Sstevel@tonic-gate if (p->pcf_count > npages) { 23760Sstevel@tonic-gate /* 23770Sstevel@tonic-gate * a good one to try. 23780Sstevel@tonic-gate */ 23790Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 23800Sstevel@tonic-gate if (p->pcf_count > npages) { 23810Sstevel@tonic-gate p->pcf_count -= (uint_t)npages; 23820Sstevel@tonic-gate /* 23830Sstevel@tonic-gate * freemem is not protected by any lock. 23840Sstevel@tonic-gate * Thus, we cannot have any assertion 23850Sstevel@tonic-gate * containing freemem here. 23860Sstevel@tonic-gate */ 23870Sstevel@tonic-gate freemem -= npages; 23880Sstevel@tonic-gate enough = 1; 23890Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 23900Sstevel@tonic-gate break; 23910Sstevel@tonic-gate } 23920Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 23930Sstevel@tonic-gate } 23940Sstevel@tonic-gate p++; 23950Sstevel@tonic-gate if (p >= q) { 23960Sstevel@tonic-gate p = pcf; 23970Sstevel@tonic-gate } 23980Sstevel@tonic-gate } 23990Sstevel@tonic-gate 24000Sstevel@tonic-gate if (!enough) { 24010Sstevel@tonic-gate /* 24020Sstevel@tonic-gate * Have to look harder. If npages is greater than 24030Sstevel@tonic-gate * one, then we might have to coalecse the counters. 24040Sstevel@tonic-gate * 24050Sstevel@tonic-gate * Go wait. We come back having accounted 24060Sstevel@tonic-gate * for the memory. 24070Sstevel@tonic-gate */ 24080Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[1]); 24090Sstevel@tonic-gate if (!page_create_wait(npages, flags)) { 24100Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[2]); 24110Sstevel@tonic-gate return (NULL); 24120Sstevel@tonic-gate } 24130Sstevel@tonic-gate } 24140Sstevel@tonic-gate 24150Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 24160Sstevel@tonic-gate "page_create_success:vp %p off %llx", vp, off); 24170Sstevel@tonic-gate 24180Sstevel@tonic-gate /* 24190Sstevel@tonic-gate * If satisfying this request has left us with too little 24200Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 24210Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 24220Sstevel@tonic-gate * daemon in situations where it would decide that there's 24230Sstevel@tonic-gate * nothing to do. 24240Sstevel@tonic-gate */ 24250Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 24260Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 24270Sstevel@tonic-gate "pageout_cv_signal:freemem %ld", freemem); 24280Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 24290Sstevel@tonic-gate } 24300Sstevel@tonic-gate 24310Sstevel@tonic-gate /* 24320Sstevel@tonic-gate * Loop around collecting the requested number of pages. 24330Sstevel@tonic-gate * Most of the time, we have to `create' a new page. With 24340Sstevel@tonic-gate * this in mind, pull the page off the free list before 24350Sstevel@tonic-gate * getting the hash lock. This will minimize the hash 24360Sstevel@tonic-gate * lock hold time, nesting, and the like. If it turns 24370Sstevel@tonic-gate * out we don't need the page, we put it back at the end. 24380Sstevel@tonic-gate */ 24390Sstevel@tonic-gate while (npages--) { 24400Sstevel@tonic-gate page_t *pp; 24410Sstevel@tonic-gate kmutex_t *phm = NULL; 24420Sstevel@tonic-gate ulong_t index; 24430Sstevel@tonic-gate 24440Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 24450Sstevel@tonic-gate top: 24460Sstevel@tonic-gate ASSERT(phm == NULL); 24470Sstevel@tonic-gate ASSERT(index == PAGE_HASH_FUNC(vp, off)); 24480Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 24490Sstevel@tonic-gate 24500Sstevel@tonic-gate if (npp == NULL) { 24510Sstevel@tonic-gate /* 24520Sstevel@tonic-gate * Try to get a page from the freelist (ie, 24530Sstevel@tonic-gate * a page with no [vp, off] tag). If that 24540Sstevel@tonic-gate * fails, use the cachelist. 24550Sstevel@tonic-gate * 24560Sstevel@tonic-gate * During the first attempt at both the free 24570Sstevel@tonic-gate * and cache lists we try for the correct color. 24580Sstevel@tonic-gate */ 24590Sstevel@tonic-gate /* 24600Sstevel@tonic-gate * XXXX-how do we deal with virtual indexed 24610Sstevel@tonic-gate * caches and and colors? 24620Sstevel@tonic-gate */ 24630Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[4]); 24640Sstevel@tonic-gate /* 24650Sstevel@tonic-gate * Get lgroup to allocate next page of shared memory 24660Sstevel@tonic-gate * from and use it to specify where to allocate 24670Sstevel@tonic-gate * the physical memory 24680Sstevel@tonic-gate */ 24690Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 24700Sstevel@tonic-gate npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 24710Sstevel@tonic-gate flags | PG_MATCH_COLOR, lgrp); 24720Sstevel@tonic-gate if (npp == NULL) { 24730Sstevel@tonic-gate npp = page_get_cachelist(vp, off, seg, 24740Sstevel@tonic-gate vaddr, flags | PG_MATCH_COLOR, lgrp); 24750Sstevel@tonic-gate if (npp == NULL) { 24760Sstevel@tonic-gate npp = page_create_get_something(vp, 24770Sstevel@tonic-gate off, seg, vaddr, 24780Sstevel@tonic-gate flags & ~PG_MATCH_COLOR); 24790Sstevel@tonic-gate } 24800Sstevel@tonic-gate 24810Sstevel@tonic-gate if (PP_ISAGED(npp) == 0) { 24820Sstevel@tonic-gate /* 24830Sstevel@tonic-gate * Since this page came from the 24840Sstevel@tonic-gate * cachelist, we must destroy the 24850Sstevel@tonic-gate * old vnode association. 24860Sstevel@tonic-gate */ 24870Sstevel@tonic-gate page_hashout(npp, NULL); 24880Sstevel@tonic-gate } 24890Sstevel@tonic-gate } 24900Sstevel@tonic-gate } 24910Sstevel@tonic-gate 24920Sstevel@tonic-gate /* 24930Sstevel@tonic-gate * We own this page! 24940Sstevel@tonic-gate */ 24950Sstevel@tonic-gate ASSERT(PAGE_EXCL(npp)); 24960Sstevel@tonic-gate ASSERT(npp->p_vnode == NULL); 24970Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(npp)); 24980Sstevel@tonic-gate PP_CLRFREE(npp); 24990Sstevel@tonic-gate PP_CLRAGED(npp); 25000Sstevel@tonic-gate 25010Sstevel@tonic-gate /* 25020Sstevel@tonic-gate * Here we have a page in our hot little mits and are 25030Sstevel@tonic-gate * just waiting to stuff it on the appropriate lists. 25040Sstevel@tonic-gate * Get the mutex and check to see if it really does 25050Sstevel@tonic-gate * not exist. 25060Sstevel@tonic-gate */ 25070Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 25080Sstevel@tonic-gate mutex_enter(phm); 25090Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 25100Sstevel@tonic-gate if (pp == NULL) { 25110Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 25120Sstevel@tonic-gate pp = npp; 25130Sstevel@tonic-gate npp = NULL; 25140Sstevel@tonic-gate if (!page_hashin(pp, vp, off, phm)) { 25150Sstevel@tonic-gate /* 25160Sstevel@tonic-gate * Since we hold the page hash mutex and 25170Sstevel@tonic-gate * just searched for this page, page_hashin 25180Sstevel@tonic-gate * had better not fail. If it does, that 25190Sstevel@tonic-gate * means somethread did not follow the 25200Sstevel@tonic-gate * page hash mutex rules. Panic now and 25210Sstevel@tonic-gate * get it over with. As usual, go down 25220Sstevel@tonic-gate * holding all the locks. 25230Sstevel@tonic-gate */ 25240Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 25250Sstevel@tonic-gate panic("page_create: " 25260Sstevel@tonic-gate "hashin failed %p %p %llx %p", 25270Sstevel@tonic-gate (void *)pp, (void *)vp, off, (void *)phm); 25280Sstevel@tonic-gate /*NOTREACHED*/ 25290Sstevel@tonic-gate } 25300Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 25310Sstevel@tonic-gate mutex_exit(phm); 25320Sstevel@tonic-gate phm = NULL; 25330Sstevel@tonic-gate 25340Sstevel@tonic-gate /* 25350Sstevel@tonic-gate * Hat layer locking need not be done to set 25360Sstevel@tonic-gate * the following bits since the page is not hashed 25370Sstevel@tonic-gate * and was on the free list (i.e., had no mappings). 25380Sstevel@tonic-gate * 25390Sstevel@tonic-gate * Set the reference bit to protect 25400Sstevel@tonic-gate * against immediate pageout 25410Sstevel@tonic-gate * 25420Sstevel@tonic-gate * XXXmh modify freelist code to set reference 25430Sstevel@tonic-gate * bit so we don't have to do it here. 25440Sstevel@tonic-gate */ 25450Sstevel@tonic-gate page_set_props(pp, P_REF); 25460Sstevel@tonic-gate found_on_free++; 25470Sstevel@tonic-gate } else { 25480Sstevel@tonic-gate VM_STAT_ADD(page_create_exists); 25490Sstevel@tonic-gate if (flags & PG_EXCL) { 25500Sstevel@tonic-gate /* 25510Sstevel@tonic-gate * Found an existing page, and the caller 25520Sstevel@tonic-gate * wanted all new pages. Undo all of the work 25530Sstevel@tonic-gate * we have done. 25540Sstevel@tonic-gate */ 25550Sstevel@tonic-gate mutex_exit(phm); 25560Sstevel@tonic-gate phm = NULL; 25570Sstevel@tonic-gate while (plist != NULL) { 25580Sstevel@tonic-gate pp = plist; 25590Sstevel@tonic-gate page_sub(&plist, pp); 25600Sstevel@tonic-gate page_io_unlock(pp); 25610Sstevel@tonic-gate /* large pages should not end up here */ 25620Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 25630Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/ 25640Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 25650Sstevel@tonic-gate } 25660Sstevel@tonic-gate VM_STAT_ADD(page_create_found_one); 25670Sstevel@tonic-gate goto fail; 25680Sstevel@tonic-gate } 25690Sstevel@tonic-gate ASSERT(flags & PG_WAIT); 25700Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 25710Sstevel@tonic-gate /* 25720Sstevel@tonic-gate * Start all over again if we blocked trying 25730Sstevel@tonic-gate * to lock the page. 25740Sstevel@tonic-gate */ 25750Sstevel@tonic-gate mutex_exit(phm); 25760Sstevel@tonic-gate VM_STAT_ADD(page_create_page_lock_failed); 25770Sstevel@tonic-gate phm = NULL; 25780Sstevel@tonic-gate goto top; 25790Sstevel@tonic-gate } 25800Sstevel@tonic-gate mutex_exit(phm); 25810Sstevel@tonic-gate phm = NULL; 25820Sstevel@tonic-gate 25830Sstevel@tonic-gate if (PP_ISFREE(pp)) { 25840Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 25850Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_get_cache); 25860Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 25870Sstevel@tonic-gate PP_CLRFREE(pp); 25880Sstevel@tonic-gate found_on_free++; 25890Sstevel@tonic-gate } 25900Sstevel@tonic-gate } 25910Sstevel@tonic-gate 25920Sstevel@tonic-gate /* 25930Sstevel@tonic-gate * Got a page! It is locked. Acquire the i/o 25940Sstevel@tonic-gate * lock since we are going to use the p_next and 25950Sstevel@tonic-gate * p_prev fields to link the requested pages together. 25960Sstevel@tonic-gate */ 25970Sstevel@tonic-gate page_io_lock(pp); 25980Sstevel@tonic-gate page_add(&plist, pp); 25990Sstevel@tonic-gate plist = plist->p_next; 26000Sstevel@tonic-gate off += PAGESIZE; 26010Sstevel@tonic-gate vaddr += PAGESIZE; 26020Sstevel@tonic-gate } 26030Sstevel@tonic-gate 26040Sstevel@tonic-gate ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 26050Sstevel@tonic-gate fail: 26060Sstevel@tonic-gate if (npp != NULL) { 26070Sstevel@tonic-gate /* 26080Sstevel@tonic-gate * Did not need this page after all. 26090Sstevel@tonic-gate * Put it back on the free list. 26100Sstevel@tonic-gate */ 26110Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 26120Sstevel@tonic-gate PP_SETFREE(npp); 26130Sstevel@tonic-gate PP_SETAGED(npp); 26140Sstevel@tonic-gate npp->p_offset = (u_offset_t)-1; 26150Sstevel@tonic-gate page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 26160Sstevel@tonic-gate page_unlock(npp); 26170Sstevel@tonic-gate 26180Sstevel@tonic-gate } 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate ASSERT(pages_req >= found_on_free); 26210Sstevel@tonic-gate 26220Sstevel@tonic-gate { 26230Sstevel@tonic-gate uint_t overshoot = (uint_t)(pages_req - found_on_free); 26240Sstevel@tonic-gate 26250Sstevel@tonic-gate if (overshoot) { 26260Sstevel@tonic-gate VM_STAT_ADD(page_create_overshoot); 26270Sstevel@tonic-gate p = &pcf[pcf_index]; 26280Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 26290Sstevel@tonic-gate if (p->pcf_block) { 26300Sstevel@tonic-gate p->pcf_reserve += overshoot; 26310Sstevel@tonic-gate } else { 26320Sstevel@tonic-gate p->pcf_count += overshoot; 26330Sstevel@tonic-gate if (p->pcf_wait) { 26340Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 26350Sstevel@tonic-gate if (freemem_wait) { 26360Sstevel@tonic-gate cv_signal(&freemem_cv); 26370Sstevel@tonic-gate p->pcf_wait--; 26380Sstevel@tonic-gate } else { 26390Sstevel@tonic-gate p->pcf_wait = 0; 26400Sstevel@tonic-gate } 26410Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 26420Sstevel@tonic-gate } 26430Sstevel@tonic-gate } 26440Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 26450Sstevel@tonic-gate /* freemem is approximate, so this test OK */ 26460Sstevel@tonic-gate if (!p->pcf_block) 26470Sstevel@tonic-gate freemem += overshoot; 26480Sstevel@tonic-gate } 26490Sstevel@tonic-gate } 26500Sstevel@tonic-gate 26510Sstevel@tonic-gate return (plist); 26520Sstevel@tonic-gate } 26530Sstevel@tonic-gate 26540Sstevel@tonic-gate /* 26550Sstevel@tonic-gate * One or more constituent pages of this large page has been marked 26560Sstevel@tonic-gate * toxic. Simply demote the large page to PAGESIZE pages and let 26570Sstevel@tonic-gate * page_free() handle it. This routine should only be called by 26580Sstevel@tonic-gate * large page free routines (page_free_pages() and page_destroy_pages(). 26590Sstevel@tonic-gate * All pages are locked SE_EXCL and have already been marked free. 26600Sstevel@tonic-gate */ 26610Sstevel@tonic-gate static void 26620Sstevel@tonic-gate page_free_toxic_pages(page_t *rootpp) 26630Sstevel@tonic-gate { 26640Sstevel@tonic-gate page_t *tpp; 26650Sstevel@tonic-gate pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 26660Sstevel@tonic-gate uint_t szc = rootpp->p_szc; 26670Sstevel@tonic-gate 26680Sstevel@tonic-gate for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 26690Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 26700Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 26710Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 26720Sstevel@tonic-gate tpp->p_szc = 0; 26730Sstevel@tonic-gate } 26740Sstevel@tonic-gate 26750Sstevel@tonic-gate while (rootpp != NULL) { 26760Sstevel@tonic-gate tpp = rootpp; 26770Sstevel@tonic-gate page_sub(&rootpp, tpp); 26780Sstevel@tonic-gate ASSERT(PP_ISFREE(tpp)); 26790Sstevel@tonic-gate PP_CLRFREE(tpp); 26800Sstevel@tonic-gate page_free(tpp, 1); 26810Sstevel@tonic-gate } 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate 26840Sstevel@tonic-gate /* 26850Sstevel@tonic-gate * Put page on the "free" list. 26860Sstevel@tonic-gate * The free list is really two lists maintained by 26870Sstevel@tonic-gate * the PSM of whatever machine we happen to be on. 26880Sstevel@tonic-gate */ 26890Sstevel@tonic-gate void 26900Sstevel@tonic-gate page_free(page_t *pp, int dontneed) 26910Sstevel@tonic-gate { 26920Sstevel@tonic-gate struct pcf *p; 26930Sstevel@tonic-gate uint_t pcf_index; 26940Sstevel@tonic-gate 26950Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && 26960Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr); 26970Sstevel@tonic-gate 26980Sstevel@tonic-gate if (PP_ISFREE(pp)) { 26990Sstevel@tonic-gate panic("page_free: page %p is free", (void *)pp); 27000Sstevel@tonic-gate } 27010Sstevel@tonic-gate 27020Sstevel@tonic-gate if (pp->p_szc != 0) { 27030Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 27040Sstevel@tonic-gate pp->p_vnode == &kvp) { 27050Sstevel@tonic-gate panic("page_free: anon or kernel " 27060Sstevel@tonic-gate "or no vnode large page %p", (void *)pp); 27070Sstevel@tonic-gate } 27080Sstevel@tonic-gate page_demote_vp_pages(pp); 27090Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 27100Sstevel@tonic-gate } 27110Sstevel@tonic-gate 27120Sstevel@tonic-gate /* 27130Sstevel@tonic-gate * The page_struct_lock need not be acquired to examine these 27140Sstevel@tonic-gate * fields since the page has an "exclusive" lock. 27150Sstevel@tonic-gate */ 27162414Saguzovsk if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 27172414Saguzovsk pp->p_slckcnt != 0) { 27182414Saguzovsk panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 27192414Saguzovsk "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 27202414Saguzovsk pp->p_cowcnt, pp->p_slckcnt); 27210Sstevel@tonic-gate /*NOTREACHED*/ 27220Sstevel@tonic-gate } 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp)); 27250Sstevel@tonic-gate 27260Sstevel@tonic-gate PP_SETFREE(pp); 27270Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 27280Sstevel@tonic-gate !hat_ismod(pp)); 27290Sstevel@tonic-gate page_clr_all_props(pp); 27300Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp)); 27310Sstevel@tonic-gate 27320Sstevel@tonic-gate /* 27330Sstevel@tonic-gate * Now we add the page to the head of the free list. 27340Sstevel@tonic-gate * But if this page is associated with a paged vnode 27350Sstevel@tonic-gate * then we adjust the head forward so that the page is 27360Sstevel@tonic-gate * effectively at the end of the list. 27370Sstevel@tonic-gate */ 27380Sstevel@tonic-gate if (pp->p_vnode == NULL) { 27390Sstevel@tonic-gate /* 27400Sstevel@tonic-gate * Page has no identity, put it on the free list. 27410Sstevel@tonic-gate */ 27420Sstevel@tonic-gate PP_SETAGED(pp); 27430Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 27440Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 27450Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free); 27460Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 27470Sstevel@tonic-gate "page_free_free:pp %p", pp); 27480Sstevel@tonic-gate } else { 27490Sstevel@tonic-gate PP_CLRAGED(pp); 27500Sstevel@tonic-gate 27510Sstevel@tonic-gate if (!dontneed || nopageage) { 27520Sstevel@tonic-gate /* move it to the tail of the list */ 27530Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_cache); 27560Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 27570Sstevel@tonic-gate "page_free_cache_tail:pp %p", pp); 27580Sstevel@tonic-gate } else { 27590Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 27600Sstevel@tonic-gate 27610Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_dontneed); 27620Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 27630Sstevel@tonic-gate "page_free_cache_head:pp %p", pp); 27640Sstevel@tonic-gate } 27650Sstevel@tonic-gate } 27660Sstevel@tonic-gate page_unlock(pp); 27670Sstevel@tonic-gate 27680Sstevel@tonic-gate /* 27690Sstevel@tonic-gate * Now do the `freemem' accounting. 27700Sstevel@tonic-gate */ 27710Sstevel@tonic-gate pcf_index = PCF_INDEX(); 27720Sstevel@tonic-gate p = &pcf[pcf_index]; 27730Sstevel@tonic-gate 27740Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 27750Sstevel@tonic-gate if (p->pcf_block) { 27760Sstevel@tonic-gate p->pcf_reserve += 1; 27770Sstevel@tonic-gate } else { 27780Sstevel@tonic-gate p->pcf_count += 1; 27790Sstevel@tonic-gate if (p->pcf_wait) { 27800Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 27810Sstevel@tonic-gate /* 27820Sstevel@tonic-gate * Check to see if some other thread 27830Sstevel@tonic-gate * is actually waiting. Another bucket 27840Sstevel@tonic-gate * may have woken it up by now. If there 27850Sstevel@tonic-gate * are no waiters, then set our pcf_wait 27860Sstevel@tonic-gate * count to zero to avoid coming in here 27870Sstevel@tonic-gate * next time. Also, since only one page 27880Sstevel@tonic-gate * was put on the free list, just wake 27890Sstevel@tonic-gate * up one waiter. 27900Sstevel@tonic-gate */ 27910Sstevel@tonic-gate if (freemem_wait) { 27920Sstevel@tonic-gate cv_signal(&freemem_cv); 27930Sstevel@tonic-gate p->pcf_wait--; 27940Sstevel@tonic-gate } else { 27950Sstevel@tonic-gate p->pcf_wait = 0; 27960Sstevel@tonic-gate } 27970Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 27980Sstevel@tonic-gate } 27990Sstevel@tonic-gate } 28000Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 28010Sstevel@tonic-gate 28020Sstevel@tonic-gate /* freemem is approximate, so this test OK */ 28030Sstevel@tonic-gate if (!p->pcf_block) 28040Sstevel@tonic-gate freemem += 1; 28050Sstevel@tonic-gate } 28060Sstevel@tonic-gate 28070Sstevel@tonic-gate /* 28080Sstevel@tonic-gate * Put page on the "free" list during intial startup. 28090Sstevel@tonic-gate * This happens during initial single threaded execution. 28100Sstevel@tonic-gate */ 28110Sstevel@tonic-gate void 28120Sstevel@tonic-gate page_free_at_startup(page_t *pp) 28130Sstevel@tonic-gate { 28140Sstevel@tonic-gate struct pcf *p; 28150Sstevel@tonic-gate uint_t pcf_index; 28160Sstevel@tonic-gate 28170Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 28180Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free); 28190Sstevel@tonic-gate 28200Sstevel@tonic-gate /* 28210Sstevel@tonic-gate * Now do the `freemem' accounting. 28220Sstevel@tonic-gate */ 28230Sstevel@tonic-gate pcf_index = PCF_INDEX(); 28240Sstevel@tonic-gate p = &pcf[pcf_index]; 28250Sstevel@tonic-gate 28260Sstevel@tonic-gate ASSERT(p->pcf_block == 0); 28270Sstevel@tonic-gate ASSERT(p->pcf_wait == 0); 28280Sstevel@tonic-gate p->pcf_count += 1; 28290Sstevel@tonic-gate 28300Sstevel@tonic-gate /* freemem is approximate, so this is OK */ 28310Sstevel@tonic-gate freemem += 1; 28320Sstevel@tonic-gate } 28330Sstevel@tonic-gate 28340Sstevel@tonic-gate void 28350Sstevel@tonic-gate page_free_pages(page_t *pp) 28360Sstevel@tonic-gate { 28370Sstevel@tonic-gate page_t *tpp, *rootpp = NULL; 28380Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 28390Sstevel@tonic-gate pgcnt_t i; 28400Sstevel@tonic-gate uint_t szc = pp->p_szc; 28410Sstevel@tonic-gate 28420Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_pages); 28430Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 28440Sstevel@tonic-gate "page_free_free:pp %p", pp); 28450Sstevel@tonic-gate 28460Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 28470Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 28480Sstevel@tonic-gate panic("page_free_pages: not root page %p", (void *)pp); 28490Sstevel@tonic-gate /*NOTREACHED*/ 28500Sstevel@tonic-gate } 28510Sstevel@tonic-gate 2852414Skchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 28530Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 28540Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 28550Sstevel@tonic-gate if (PP_ISFREE(tpp)) { 28560Sstevel@tonic-gate panic("page_free_pages: page %p is free", (void *)tpp); 28570Sstevel@tonic-gate /*NOTREACHED*/ 28580Sstevel@tonic-gate } 28590Sstevel@tonic-gate if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 28602414Saguzovsk tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 28610Sstevel@tonic-gate panic("page_free_pages %p", (void *)tpp); 28620Sstevel@tonic-gate /*NOTREACHED*/ 28630Sstevel@tonic-gate } 28640Sstevel@tonic-gate 28650Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp)); 28660Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL); 28670Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 28680Sstevel@tonic-gate 28690Sstevel@tonic-gate PP_SETFREE(tpp); 28700Sstevel@tonic-gate page_clr_all_props(tpp); 28710Sstevel@tonic-gate PP_SETAGED(tpp); 28720Sstevel@tonic-gate tpp->p_offset = (u_offset_t)-1; 28730Sstevel@tonic-gate ASSERT(tpp->p_next == tpp); 28740Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp); 28750Sstevel@tonic-gate page_list_concat(&rootpp, &tpp); 28760Sstevel@tonic-gate } 28770Sstevel@tonic-gate ASSERT(rootpp == pp); 28780Sstevel@tonic-gate 28790Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 28800Sstevel@tonic-gate page_create_putback(pgcnt); 28810Sstevel@tonic-gate } 28820Sstevel@tonic-gate 28830Sstevel@tonic-gate int free_pages = 1; 28840Sstevel@tonic-gate 28850Sstevel@tonic-gate /* 28860Sstevel@tonic-gate * This routine attempts to return pages to the cachelist via page_release(). 28870Sstevel@tonic-gate * It does not *have* to be successful in all cases, since the pageout scanner 28880Sstevel@tonic-gate * will catch any pages it misses. It does need to be fast and not introduce 28890Sstevel@tonic-gate * too much overhead. 28900Sstevel@tonic-gate * 28910Sstevel@tonic-gate * If a page isn't found on the unlocked sweep of the page_hash bucket, we 28920Sstevel@tonic-gate * don't lock and retry. This is ok, since the page scanner will eventually 28930Sstevel@tonic-gate * find any page we miss in free_vp_pages(). 28940Sstevel@tonic-gate */ 28950Sstevel@tonic-gate void 28960Sstevel@tonic-gate free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 28970Sstevel@tonic-gate { 28980Sstevel@tonic-gate page_t *pp; 28990Sstevel@tonic-gate u_offset_t eoff; 29000Sstevel@tonic-gate extern int swap_in_range(vnode_t *, u_offset_t, size_t); 29010Sstevel@tonic-gate 29020Sstevel@tonic-gate eoff = off + len; 29030Sstevel@tonic-gate 29040Sstevel@tonic-gate if (free_pages == 0) 29050Sstevel@tonic-gate return; 29060Sstevel@tonic-gate if (swap_in_range(vp, off, len)) 29070Sstevel@tonic-gate return; 29080Sstevel@tonic-gate 29090Sstevel@tonic-gate for (; off < eoff; off += PAGESIZE) { 29100Sstevel@tonic-gate 29110Sstevel@tonic-gate /* 29120Sstevel@tonic-gate * find the page using a fast, but inexact search. It'll be OK 29130Sstevel@tonic-gate * if a few pages slip through the cracks here. 29140Sstevel@tonic-gate */ 29150Sstevel@tonic-gate pp = page_exists(vp, off); 29160Sstevel@tonic-gate 29170Sstevel@tonic-gate /* 29180Sstevel@tonic-gate * If we didn't find the page (it may not exist), the page 29190Sstevel@tonic-gate * is free, looks still in use (shared), or we can't lock it, 29200Sstevel@tonic-gate * just give up. 29210Sstevel@tonic-gate */ 29220Sstevel@tonic-gate if (pp == NULL || 29230Sstevel@tonic-gate PP_ISFREE(pp) || 29240Sstevel@tonic-gate page_share_cnt(pp) > 0 || 29250Sstevel@tonic-gate !page_trylock(pp, SE_EXCL)) 29260Sstevel@tonic-gate continue; 29270Sstevel@tonic-gate 29280Sstevel@tonic-gate /* 29290Sstevel@tonic-gate * Once we have locked pp, verify that it's still the 29300Sstevel@tonic-gate * correct page and not already free 29310Sstevel@tonic-gate */ 29320Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 29330Sstevel@tonic-gate if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 29340Sstevel@tonic-gate page_unlock(pp); 29350Sstevel@tonic-gate continue; 29360Sstevel@tonic-gate } 29370Sstevel@tonic-gate 29380Sstevel@tonic-gate /* 29390Sstevel@tonic-gate * try to release the page... 29400Sstevel@tonic-gate */ 29410Sstevel@tonic-gate (void) page_release(pp, 1); 29420Sstevel@tonic-gate } 29430Sstevel@tonic-gate } 29440Sstevel@tonic-gate 29450Sstevel@tonic-gate /* 29460Sstevel@tonic-gate * Reclaim the given page from the free list. 29470Sstevel@tonic-gate * Returns 1 on success or 0 on failure. 29480Sstevel@tonic-gate * 29490Sstevel@tonic-gate * The page is unlocked if it can't be reclaimed (when freemem == 0). 29500Sstevel@tonic-gate * If `lock' is non-null, it will be dropped and re-acquired if 29510Sstevel@tonic-gate * the routine must wait while freemem is 0. 29520Sstevel@tonic-gate * 29530Sstevel@tonic-gate * As it turns out, boot_getpages() does this. It picks a page, 29540Sstevel@tonic-gate * based on where OBP mapped in some address, gets its pfn, searches 29550Sstevel@tonic-gate * the memsegs, locks the page, then pulls it off the free list! 29560Sstevel@tonic-gate */ 29570Sstevel@tonic-gate int 29580Sstevel@tonic-gate page_reclaim(page_t *pp, kmutex_t *lock) 29590Sstevel@tonic-gate { 29600Sstevel@tonic-gate struct pcf *p; 29610Sstevel@tonic-gate uint_t pcf_index; 29620Sstevel@tonic-gate struct cpu *cpup; 29630Sstevel@tonic-gate uint_t i; 29641074Smec pgcnt_t npgs, need; 29651074Smec pgcnt_t collected = 0; 29660Sstevel@tonic-gate 29670Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 29680Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2969917Selowe 2970917Selowe npgs = page_get_pagecnt(pp->p_szc); 29710Sstevel@tonic-gate 29720Sstevel@tonic-gate /* 29730Sstevel@tonic-gate * If `freemem' is 0, we cannot reclaim this page from the 29740Sstevel@tonic-gate * freelist, so release every lock we might hold: the page, 29750Sstevel@tonic-gate * and the `lock' before blocking. 29760Sstevel@tonic-gate * 29770Sstevel@tonic-gate * The only way `freemem' can become 0 while there are pages 29780Sstevel@tonic-gate * marked free (have their p->p_free bit set) is when the 29790Sstevel@tonic-gate * system is low on memory and doing a page_create(). In 29800Sstevel@tonic-gate * order to guarantee that once page_create() starts acquiring 29810Sstevel@tonic-gate * pages it will be able to get all that it needs since `freemem' 29820Sstevel@tonic-gate * was decreased by the requested amount. So, we need to release 29830Sstevel@tonic-gate * this page, and let page_create() have it. 29840Sstevel@tonic-gate * 29850Sstevel@tonic-gate * Since `freemem' being zero is not supposed to happen, just 29860Sstevel@tonic-gate * use the usual hash stuff as a starting point. If that bucket 29870Sstevel@tonic-gate * is empty, then assume the worst, and start at the beginning 29880Sstevel@tonic-gate * of the pcf array. If we always start at the beginning 29890Sstevel@tonic-gate * when acquiring more than one pcf lock, there won't be any 29900Sstevel@tonic-gate * deadlock problems. 29910Sstevel@tonic-gate */ 29920Sstevel@tonic-gate 29930Sstevel@tonic-gate /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 29940Sstevel@tonic-gate 2995973Selowe if (freemem <= throttlefree && !page_create_throttle(npgs, 0)) { 29960Sstevel@tonic-gate pcf_acquire_all(); 29970Sstevel@tonic-gate goto page_reclaim_nomem; 29980Sstevel@tonic-gate } 29990Sstevel@tonic-gate 30000Sstevel@tonic-gate pcf_index = PCF_INDEX(); 30010Sstevel@tonic-gate p = &pcf[pcf_index]; 30020Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 3003917Selowe if (p->pcf_count >= npgs) { 3004917Selowe collected = npgs; 3005917Selowe p->pcf_count -= npgs; 30060Sstevel@tonic-gate } 30070Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 3008917Selowe need = npgs - collected; 3009917Selowe 3010917Selowe if (need > 0) { 30110Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero); 30120Sstevel@tonic-gate /* 30130Sstevel@tonic-gate * Check again. Its possible that some other thread 30140Sstevel@tonic-gate * could have been right behind us, and added one 30150Sstevel@tonic-gate * to a list somewhere. Acquire each of the pcf locks 30160Sstevel@tonic-gate * until we find a page. 30170Sstevel@tonic-gate */ 30180Sstevel@tonic-gate p = pcf; 30190Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 30200Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 3021917Selowe if (p->pcf_count) { 3022917Selowe if (p->pcf_count >= need) { 3023917Selowe p->pcf_count -= need; 3024917Selowe collected += need; 3025917Selowe need = 0; 3026917Selowe break; 3027917Selowe } else if (p->pcf_count) { 3028917Selowe collected += p->pcf_count; 3029917Selowe need -= p->pcf_count; 3030917Selowe p->pcf_count = 0; 3031917Selowe } 30320Sstevel@tonic-gate } 30330Sstevel@tonic-gate p++; 30340Sstevel@tonic-gate } 30350Sstevel@tonic-gate 3036917Selowe if (need > 0) { 30370Sstevel@tonic-gate page_reclaim_nomem: 30380Sstevel@tonic-gate /* 30390Sstevel@tonic-gate * We really can't have page `pp'. 30400Sstevel@tonic-gate * Time for the no-memory dance with 30410Sstevel@tonic-gate * page_free(). This is just like 30420Sstevel@tonic-gate * page_create_wait(). Plus the added 30430Sstevel@tonic-gate * attraction of releasing whatever mutex 30440Sstevel@tonic-gate * we held when we were called with in `lock'. 30450Sstevel@tonic-gate * Page_unlock() will wakeup any thread 30460Sstevel@tonic-gate * waiting around for this page. 30470Sstevel@tonic-gate */ 30480Sstevel@tonic-gate if (lock) { 30490Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero_locked); 30500Sstevel@tonic-gate mutex_exit(lock); 30510Sstevel@tonic-gate } 30520Sstevel@tonic-gate page_unlock(pp); 30530Sstevel@tonic-gate 30540Sstevel@tonic-gate /* 30550Sstevel@tonic-gate * get this before we drop all the pcf locks. 30560Sstevel@tonic-gate */ 30570Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 30580Sstevel@tonic-gate 30590Sstevel@tonic-gate p = pcf; 3060917Selowe p->pcf_count += collected; 30610Sstevel@tonic-gate for (i = 0; i < PCF_FANOUT; i++) { 30620Sstevel@tonic-gate p->pcf_wait++; 30630Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 30640Sstevel@tonic-gate p++; 30650Sstevel@tonic-gate } 30660Sstevel@tonic-gate 30670Sstevel@tonic-gate freemem_wait++; 30680Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 30690Sstevel@tonic-gate freemem_wait--; 30700Sstevel@tonic-gate 30710Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 30720Sstevel@tonic-gate 30730Sstevel@tonic-gate if (lock) { 30740Sstevel@tonic-gate mutex_enter(lock); 30750Sstevel@tonic-gate } 30760Sstevel@tonic-gate return (0); 30770Sstevel@tonic-gate } 30780Sstevel@tonic-gate 30790Sstevel@tonic-gate /* 3080917Selowe * We beat the PCF bins over the head until 3081917Selowe * we got the memory that we wanted. 30820Sstevel@tonic-gate * The pcf accounting has been done, 30830Sstevel@tonic-gate * though none of the pcf_wait flags have been set, 30840Sstevel@tonic-gate * drop the locks and continue on. 30850Sstevel@tonic-gate */ 3086917Selowe ASSERT(collected == npgs); 30870Sstevel@tonic-gate while (p >= pcf) { 30880Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 30890Sstevel@tonic-gate p--; 30900Sstevel@tonic-gate } 30910Sstevel@tonic-gate } 30920Sstevel@tonic-gate 30930Sstevel@tonic-gate /* 30940Sstevel@tonic-gate * freemem is not protected by any lock. Thus, we cannot 30950Sstevel@tonic-gate * have any assertion containing freemem here. 30960Sstevel@tonic-gate */ 3097917Selowe freemem -= npgs; 30980Sstevel@tonic-gate 30990Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_reclaim); 31000Sstevel@tonic-gate if (PP_ISAGED(pp)) { 3101917Selowe if (npgs > 1) { 3102917Selowe page_list_sub_pages(pp, pp->p_szc); 3103917Selowe } else { 3104917Selowe page_list_sub(pp, PG_FREE_LIST); 3105917Selowe } 31060Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 31070Sstevel@tonic-gate "page_reclaim_free:pp %p", pp); 31080Sstevel@tonic-gate } else { 3109917Selowe ASSERT(npgs == 1); 31100Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 31110Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 31120Sstevel@tonic-gate "page_reclaim_cache:pp %p", pp); 31130Sstevel@tonic-gate } 31140Sstevel@tonic-gate 31150Sstevel@tonic-gate /* 31160Sstevel@tonic-gate * clear the p_free & p_age bits since this page is no longer 31170Sstevel@tonic-gate * on the free list. Notice that there was a brief time where 31180Sstevel@tonic-gate * a page is marked as free, but is not on the list. 31190Sstevel@tonic-gate * 31200Sstevel@tonic-gate * Set the reference bit to protect against immediate pageout. 31210Sstevel@tonic-gate */ 3122973Selowe for (i = 0; i < npgs; i++, pp++) { 3123917Selowe PP_CLRFREE(pp); 3124917Selowe PP_CLRAGED(pp); 3125917Selowe page_set_props(pp, P_REF); 3126917Selowe } 31270Sstevel@tonic-gate 31280Sstevel@tonic-gate CPU_STATS_ENTER_K(); 31290Sstevel@tonic-gate cpup = CPU; /* get cpup now that CPU cannot change */ 31300Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 31310Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 31320Sstevel@tonic-gate CPU_STATS_EXIT_K(); 31330Sstevel@tonic-gate 31340Sstevel@tonic-gate return (1); 31350Sstevel@tonic-gate } 31360Sstevel@tonic-gate 31370Sstevel@tonic-gate 31380Sstevel@tonic-gate 31390Sstevel@tonic-gate /* 31400Sstevel@tonic-gate * Destroy identity of the page and put it back on 31410Sstevel@tonic-gate * the page free list. Assumes that the caller has 31420Sstevel@tonic-gate * acquired the "exclusive" lock on the page. 31430Sstevel@tonic-gate */ 31440Sstevel@tonic-gate void 31450Sstevel@tonic-gate page_destroy(page_t *pp, int dontfree) 31460Sstevel@tonic-gate { 31470Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && 31480Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr); 31492414Saguzovsk ASSERT(pp->p_slckcnt == 0 || panicstr); 31500Sstevel@tonic-gate 31510Sstevel@tonic-gate if (pp->p_szc != 0) { 31520Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 31530Sstevel@tonic-gate pp->p_vnode == &kvp) { 31540Sstevel@tonic-gate panic("page_destroy: anon or kernel or no vnode " 31550Sstevel@tonic-gate "large page %p", (void *)pp); 31560Sstevel@tonic-gate } 31570Sstevel@tonic-gate page_demote_vp_pages(pp); 31580Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 31590Sstevel@tonic-gate } 31600Sstevel@tonic-gate 31610Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 31620Sstevel@tonic-gate 31630Sstevel@tonic-gate /* 31640Sstevel@tonic-gate * Unload translations, if any, then hash out the 31650Sstevel@tonic-gate * page to erase its identity. 31660Sstevel@tonic-gate */ 31670Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 31680Sstevel@tonic-gate page_hashout(pp, NULL); 31690Sstevel@tonic-gate 31700Sstevel@tonic-gate if (!dontfree) { 31710Sstevel@tonic-gate /* 31720Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 31730Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt 31740Sstevel@tonic-gate * and cowcnt since the page has an "exclusive" lock. 31750Sstevel@tonic-gate */ 31760Sstevel@tonic-gate if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 31770Sstevel@tonic-gate mutex_enter(&freemem_lock); 31780Sstevel@tonic-gate if (pp->p_lckcnt != 0) { 31790Sstevel@tonic-gate availrmem++; 31800Sstevel@tonic-gate pp->p_lckcnt = 0; 31810Sstevel@tonic-gate } 31820Sstevel@tonic-gate if (pp->p_cowcnt != 0) { 31830Sstevel@tonic-gate availrmem += pp->p_cowcnt; 31840Sstevel@tonic-gate pp->p_cowcnt = 0; 31850Sstevel@tonic-gate } 31860Sstevel@tonic-gate mutex_exit(&freemem_lock); 31870Sstevel@tonic-gate } 31880Sstevel@tonic-gate /* 31890Sstevel@tonic-gate * Put the page on the "free" list. 31900Sstevel@tonic-gate */ 31910Sstevel@tonic-gate page_free(pp, 0); 31920Sstevel@tonic-gate } 31930Sstevel@tonic-gate } 31940Sstevel@tonic-gate 31950Sstevel@tonic-gate void 31960Sstevel@tonic-gate page_destroy_pages(page_t *pp) 31970Sstevel@tonic-gate { 31980Sstevel@tonic-gate 31990Sstevel@tonic-gate page_t *tpp, *rootpp = NULL; 32000Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 32010Sstevel@tonic-gate pgcnt_t i, pglcks = 0; 32020Sstevel@tonic-gate uint_t szc = pp->p_szc; 32030Sstevel@tonic-gate 32040Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 32050Sstevel@tonic-gate 32060Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_pages); 32070Sstevel@tonic-gate 32080Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 32090Sstevel@tonic-gate 32100Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 32110Sstevel@tonic-gate panic("page_destroy_pages: not root page %p", (void *)pp); 32120Sstevel@tonic-gate /*NOTREACHED*/ 32130Sstevel@tonic-gate } 32140Sstevel@tonic-gate 3215414Skchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 32160Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 32170Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 32182414Saguzovsk ASSERT(tpp->p_slckcnt == 0 || panicstr); 32190Sstevel@tonic-gate (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 32200Sstevel@tonic-gate page_hashout(tpp, NULL); 32210Sstevel@tonic-gate ASSERT(tpp->p_offset == (u_offset_t)-1); 32220Sstevel@tonic-gate if (tpp->p_lckcnt != 0) { 32230Sstevel@tonic-gate pglcks++; 32240Sstevel@tonic-gate tpp->p_lckcnt = 0; 32250Sstevel@tonic-gate } else if (tpp->p_cowcnt != 0) { 32260Sstevel@tonic-gate pglcks += tpp->p_cowcnt; 32270Sstevel@tonic-gate tpp->p_cowcnt = 0; 32280Sstevel@tonic-gate } 32290Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp)); 32300Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL); 32310Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 32320Sstevel@tonic-gate 32330Sstevel@tonic-gate PP_SETFREE(tpp); 32340Sstevel@tonic-gate page_clr_all_props(tpp); 32350Sstevel@tonic-gate PP_SETAGED(tpp); 32360Sstevel@tonic-gate ASSERT(tpp->p_next == tpp); 32370Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp); 32380Sstevel@tonic-gate page_list_concat(&rootpp, &tpp); 32390Sstevel@tonic-gate } 32400Sstevel@tonic-gate 32410Sstevel@tonic-gate ASSERT(rootpp == pp); 32420Sstevel@tonic-gate if (pglcks != 0) { 32430Sstevel@tonic-gate mutex_enter(&freemem_lock); 32440Sstevel@tonic-gate availrmem += pglcks; 32450Sstevel@tonic-gate mutex_exit(&freemem_lock); 32460Sstevel@tonic-gate } 32470Sstevel@tonic-gate 32480Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 32490Sstevel@tonic-gate page_create_putback(pgcnt); 32500Sstevel@tonic-gate } 32510Sstevel@tonic-gate 32520Sstevel@tonic-gate /* 32530Sstevel@tonic-gate * Similar to page_destroy(), but destroys pages which are 32540Sstevel@tonic-gate * locked and known to be on the page free list. Since 32550Sstevel@tonic-gate * the page is known to be free and locked, no one can access 32560Sstevel@tonic-gate * it. 32570Sstevel@tonic-gate * 32580Sstevel@tonic-gate * Also, the number of free pages does not change. 32590Sstevel@tonic-gate */ 32600Sstevel@tonic-gate void 32610Sstevel@tonic-gate page_destroy_free(page_t *pp) 32620Sstevel@tonic-gate { 32630Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 32640Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 32650Sstevel@tonic-gate ASSERT(pp->p_vnode); 32660Sstevel@tonic-gate ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 32670Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 32680Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 32690Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 32700Sstevel@tonic-gate 32710Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_free); 32720Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 32730Sstevel@tonic-gate 32740Sstevel@tonic-gate page_hashout(pp, NULL); 32750Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 32760Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 32770Sstevel@tonic-gate ASSERT(pp->p_hash == NULL); 32780Sstevel@tonic-gate 32790Sstevel@tonic-gate PP_SETAGED(pp); 32800Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 32810Sstevel@tonic-gate page_unlock(pp); 32820Sstevel@tonic-gate 32830Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 32840Sstevel@tonic-gate if (freemem_wait) { 32850Sstevel@tonic-gate cv_signal(&freemem_cv); 32860Sstevel@tonic-gate } 32870Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 32880Sstevel@tonic-gate } 32890Sstevel@tonic-gate 32900Sstevel@tonic-gate /* 32910Sstevel@tonic-gate * Rename the page "opp" to have an identity specified 32920Sstevel@tonic-gate * by [vp, off]. If a page already exists with this name 32930Sstevel@tonic-gate * it is locked and destroyed. Note that the page's 32940Sstevel@tonic-gate * translations are not unloaded during the rename. 32950Sstevel@tonic-gate * 32960Sstevel@tonic-gate * This routine is used by the anon layer to "steal" the 32970Sstevel@tonic-gate * original page and is not unlike destroying a page and 32980Sstevel@tonic-gate * creating a new page using the same page frame. 32990Sstevel@tonic-gate * 33000Sstevel@tonic-gate * XXX -- Could deadlock if caller 1 tries to rename A to B while 33010Sstevel@tonic-gate * caller 2 tries to rename B to A. 33020Sstevel@tonic-gate */ 33030Sstevel@tonic-gate void 33040Sstevel@tonic-gate page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 33050Sstevel@tonic-gate { 33060Sstevel@tonic-gate page_t *pp; 33070Sstevel@tonic-gate int olckcnt = 0; 33080Sstevel@tonic-gate int ocowcnt = 0; 33090Sstevel@tonic-gate kmutex_t *phm; 33100Sstevel@tonic-gate ulong_t index; 33110Sstevel@tonic-gate 33120Sstevel@tonic-gate ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 33130Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 33140Sstevel@tonic-gate ASSERT(PP_ISFREE(opp) == 0); 33150Sstevel@tonic-gate 33160Sstevel@tonic-gate VM_STAT_ADD(page_rename_count); 33170Sstevel@tonic-gate 33180Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 33190Sstevel@tonic-gate "page rename:pp %p vp %p off %llx", opp, vp, off); 33200Sstevel@tonic-gate 332163Saguzovsk /* 332263Saguzovsk * CacheFS may call page_rename for a large NFS page 332363Saguzovsk * when both CacheFS and NFS mount points are used 332463Saguzovsk * by applications. Demote this large page before 332563Saguzovsk * renaming it, to ensure that there are no "partial" 332663Saguzovsk * large pages left lying around. 332763Saguzovsk */ 332863Saguzovsk if (opp->p_szc != 0) { 332963Saguzovsk vnode_t *ovp = opp->p_vnode; 333063Saguzovsk ASSERT(ovp != NULL); 333163Saguzovsk ASSERT(!IS_SWAPFSVP(ovp)); 333263Saguzovsk ASSERT(ovp != &kvp); 333363Saguzovsk page_demote_vp_pages(opp); 333463Saguzovsk ASSERT(opp->p_szc == 0); 333563Saguzovsk } 333663Saguzovsk 33370Sstevel@tonic-gate page_hashout(opp, NULL); 33380Sstevel@tonic-gate PP_CLRAGED(opp); 33390Sstevel@tonic-gate 33400Sstevel@tonic-gate /* 33410Sstevel@tonic-gate * Acquire the appropriate page hash lock, since 33420Sstevel@tonic-gate * we're going to rename the page. 33430Sstevel@tonic-gate */ 33440Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 33450Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 33460Sstevel@tonic-gate mutex_enter(phm); 33470Sstevel@tonic-gate top: 33480Sstevel@tonic-gate /* 33490Sstevel@tonic-gate * Look for an existing page with this name and destroy it if found. 33500Sstevel@tonic-gate * By holding the page hash lock all the way to the page_hashin() 33510Sstevel@tonic-gate * call, we are assured that no page can be created with this 33520Sstevel@tonic-gate * identity. In the case when the phm lock is dropped to undo any 33530Sstevel@tonic-gate * hat layer mappings, the existing page is held with an "exclusive" 33540Sstevel@tonic-gate * lock, again preventing another page from being created with 33550Sstevel@tonic-gate * this identity. 33560Sstevel@tonic-gate */ 33570Sstevel@tonic-gate PAGE_HASH_SEARCH(index, pp, vp, off); 33580Sstevel@tonic-gate if (pp != NULL) { 33590Sstevel@tonic-gate VM_STAT_ADD(page_rename_exists); 33600Sstevel@tonic-gate 33610Sstevel@tonic-gate /* 33620Sstevel@tonic-gate * As it turns out, this is one of only two places where 33630Sstevel@tonic-gate * page_lock() needs to hold the passed in lock in the 33640Sstevel@tonic-gate * successful case. In all of the others, the lock could 33650Sstevel@tonic-gate * be dropped as soon as the attempt is made to lock 33660Sstevel@tonic-gate * the page. It is tempting to add yet another arguement, 33670Sstevel@tonic-gate * PL_KEEP or PL_DROP, to let page_lock know what to do. 33680Sstevel@tonic-gate */ 33690Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 33700Sstevel@tonic-gate /* 33710Sstevel@tonic-gate * Went to sleep because the page could not 33720Sstevel@tonic-gate * be locked. We were woken up when the page 33730Sstevel@tonic-gate * was unlocked, or when the page was destroyed. 33740Sstevel@tonic-gate * In either case, `phm' was dropped while we 33750Sstevel@tonic-gate * slept. Hence we should not just roar through 33760Sstevel@tonic-gate * this loop. 33770Sstevel@tonic-gate */ 33780Sstevel@tonic-gate goto top; 33790Sstevel@tonic-gate } 33800Sstevel@tonic-gate 338163Saguzovsk /* 338263Saguzovsk * If an existing page is a large page, then demote 338363Saguzovsk * it to ensure that no "partial" large pages are 338463Saguzovsk * "created" after page_rename. An existing page 338563Saguzovsk * can be a CacheFS page, and can't belong to swapfs. 338663Saguzovsk */ 33870Sstevel@tonic-gate if (hat_page_is_mapped(pp)) { 33880Sstevel@tonic-gate /* 33890Sstevel@tonic-gate * Unload translations. Since we hold the 33900Sstevel@tonic-gate * exclusive lock on this page, the page 33910Sstevel@tonic-gate * can not be changed while we drop phm. 33920Sstevel@tonic-gate * This is also not a lock protocol violation, 33930Sstevel@tonic-gate * but rather the proper way to do things. 33940Sstevel@tonic-gate */ 33950Sstevel@tonic-gate mutex_exit(phm); 33960Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 339763Saguzovsk if (pp->p_szc != 0) { 339863Saguzovsk ASSERT(!IS_SWAPFSVP(vp)); 339963Saguzovsk ASSERT(vp != &kvp); 340063Saguzovsk page_demote_vp_pages(pp); 340163Saguzovsk ASSERT(pp->p_szc == 0); 340263Saguzovsk } 340363Saguzovsk mutex_enter(phm); 340463Saguzovsk } else if (pp->p_szc != 0) { 340563Saguzovsk ASSERT(!IS_SWAPFSVP(vp)); 340663Saguzovsk ASSERT(vp != &kvp); 340763Saguzovsk mutex_exit(phm); 340863Saguzovsk page_demote_vp_pages(pp); 340963Saguzovsk ASSERT(pp->p_szc == 0); 34100Sstevel@tonic-gate mutex_enter(phm); 34110Sstevel@tonic-gate } 34120Sstevel@tonic-gate page_hashout(pp, phm); 34130Sstevel@tonic-gate } 34140Sstevel@tonic-gate /* 34150Sstevel@tonic-gate * Hash in the page with the new identity. 34160Sstevel@tonic-gate */ 34170Sstevel@tonic-gate if (!page_hashin(opp, vp, off, phm)) { 34180Sstevel@tonic-gate /* 34190Sstevel@tonic-gate * We were holding phm while we searched for [vp, off] 34200Sstevel@tonic-gate * and only dropped phm if we found and locked a page. 34210Sstevel@tonic-gate * If we can't create this page now, then some thing 34220Sstevel@tonic-gate * is really broken. 34230Sstevel@tonic-gate */ 34240Sstevel@tonic-gate panic("page_rename: Can't hash in page: %p", (void *)pp); 34250Sstevel@tonic-gate /*NOTREACHED*/ 34260Sstevel@tonic-gate } 34270Sstevel@tonic-gate 34280Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 34290Sstevel@tonic-gate mutex_exit(phm); 34300Sstevel@tonic-gate 34310Sstevel@tonic-gate /* 34320Sstevel@tonic-gate * Now that we have dropped phm, lets get around to finishing up 34330Sstevel@tonic-gate * with pp. 34340Sstevel@tonic-gate */ 34350Sstevel@tonic-gate if (pp != NULL) { 34360Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 34370Sstevel@tonic-gate /* for now large pages should not end up here */ 34380Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 34390Sstevel@tonic-gate /* 34400Sstevel@tonic-gate * Save the locks for transfer to the new page and then 34410Sstevel@tonic-gate * clear them so page_free doesn't think they're important. 34420Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 34430Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 34440Sstevel@tonic-gate */ 34450Sstevel@tonic-gate olckcnt = pp->p_lckcnt; 34460Sstevel@tonic-gate ocowcnt = pp->p_cowcnt; 34470Sstevel@tonic-gate pp->p_lckcnt = pp->p_cowcnt = 0; 34480Sstevel@tonic-gate 34490Sstevel@tonic-gate /* 34500Sstevel@tonic-gate * Put the page on the "free" list after we drop 34510Sstevel@tonic-gate * the lock. The less work under the lock the better. 34520Sstevel@tonic-gate */ 34530Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 34540Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred); 34550Sstevel@tonic-gate } 34560Sstevel@tonic-gate 34570Sstevel@tonic-gate /* 34580Sstevel@tonic-gate * Transfer the lock count from the old page (if any). 34590Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 34600Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 34610Sstevel@tonic-gate */ 34620Sstevel@tonic-gate opp->p_lckcnt += olckcnt; 34630Sstevel@tonic-gate opp->p_cowcnt += ocowcnt; 34640Sstevel@tonic-gate } 34650Sstevel@tonic-gate 34660Sstevel@tonic-gate /* 34670Sstevel@tonic-gate * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 34680Sstevel@tonic-gate * 34690Sstevel@tonic-gate * Pages are normally inserted at the start of a vnode's v_pages list. 34700Sstevel@tonic-gate * If the vnode is VMODSORT and the page is modified, it goes at the end. 34710Sstevel@tonic-gate * This can happen when a modified page is relocated for DR. 34720Sstevel@tonic-gate * 34730Sstevel@tonic-gate * Returns 1 on success and 0 on failure. 34740Sstevel@tonic-gate */ 34750Sstevel@tonic-gate static int 34760Sstevel@tonic-gate page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 34770Sstevel@tonic-gate { 34780Sstevel@tonic-gate page_t **listp; 34790Sstevel@tonic-gate page_t *tp; 34800Sstevel@tonic-gate ulong_t index; 34810Sstevel@tonic-gate 34820Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 34830Sstevel@tonic-gate ASSERT(vp != NULL); 34840Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 34850Sstevel@tonic-gate 34860Sstevel@tonic-gate /* 34870Sstevel@tonic-gate * Be sure to set these up before the page is inserted on the hash 34880Sstevel@tonic-gate * list. As soon as the page is placed on the list some other 34890Sstevel@tonic-gate * thread might get confused and wonder how this page could 34900Sstevel@tonic-gate * possibly hash to this list. 34910Sstevel@tonic-gate */ 34920Sstevel@tonic-gate pp->p_vnode = vp; 34930Sstevel@tonic-gate pp->p_offset = offset; 34940Sstevel@tonic-gate 34950Sstevel@tonic-gate /* 34960Sstevel@tonic-gate * record if this page is on a swap vnode 34970Sstevel@tonic-gate */ 34980Sstevel@tonic-gate if ((vp->v_flag & VISSWAP) != 0) 34990Sstevel@tonic-gate PP_SETSWAP(pp); 35000Sstevel@tonic-gate 35010Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, offset); 35020Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 35030Sstevel@tonic-gate listp = &page_hash[index]; 35040Sstevel@tonic-gate 35050Sstevel@tonic-gate /* 35060Sstevel@tonic-gate * If this page is already hashed in, fail this attempt to add it. 35070Sstevel@tonic-gate */ 35080Sstevel@tonic-gate for (tp = *listp; tp != NULL; tp = tp->p_hash) { 35090Sstevel@tonic-gate if (tp->p_vnode == vp && tp->p_offset == offset) { 35100Sstevel@tonic-gate pp->p_vnode = NULL; 35110Sstevel@tonic-gate pp->p_offset = (u_offset_t)(-1); 35120Sstevel@tonic-gate return (0); 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate } 35150Sstevel@tonic-gate pp->p_hash = *listp; 35160Sstevel@tonic-gate *listp = pp; 35170Sstevel@tonic-gate 35180Sstevel@tonic-gate /* 35190Sstevel@tonic-gate * Add the page to the vnode's list of pages 35200Sstevel@tonic-gate */ 35210Sstevel@tonic-gate if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 35220Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 35230Sstevel@tonic-gate else 35240Sstevel@tonic-gate listp = &vp->v_pages; 35250Sstevel@tonic-gate 35260Sstevel@tonic-gate page_vpadd(listp, pp); 35270Sstevel@tonic-gate 35280Sstevel@tonic-gate return (1); 35290Sstevel@tonic-gate } 35300Sstevel@tonic-gate 35310Sstevel@tonic-gate /* 35320Sstevel@tonic-gate * Add page `pp' to both the hash and vp chains for [vp, offset]. 35330Sstevel@tonic-gate * 35340Sstevel@tonic-gate * Returns 1 on success and 0 on failure. 35350Sstevel@tonic-gate * If hold is passed in, it is not dropped. 35360Sstevel@tonic-gate */ 35370Sstevel@tonic-gate int 35380Sstevel@tonic-gate page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 35390Sstevel@tonic-gate { 35400Sstevel@tonic-gate kmutex_t *phm = NULL; 35410Sstevel@tonic-gate kmutex_t *vphm; 35420Sstevel@tonic-gate int rc; 35430Sstevel@tonic-gate 35440Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 35450Sstevel@tonic-gate 35460Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 35470Sstevel@tonic-gate "page_hashin:pp %p vp %p offset %llx", 35480Sstevel@tonic-gate pp, vp, offset); 35490Sstevel@tonic-gate 35500Sstevel@tonic-gate VM_STAT_ADD(hashin_count); 35510Sstevel@tonic-gate 35520Sstevel@tonic-gate if (hold != NULL) 35530Sstevel@tonic-gate phm = hold; 35540Sstevel@tonic-gate else { 35550Sstevel@tonic-gate VM_STAT_ADD(hashin_not_held); 35560Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 35570Sstevel@tonic-gate mutex_enter(phm); 35580Sstevel@tonic-gate } 35590Sstevel@tonic-gate 35600Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 35610Sstevel@tonic-gate mutex_enter(vphm); 35620Sstevel@tonic-gate rc = page_do_hashin(pp, vp, offset); 35630Sstevel@tonic-gate mutex_exit(vphm); 35640Sstevel@tonic-gate if (hold == NULL) 35650Sstevel@tonic-gate mutex_exit(phm); 35660Sstevel@tonic-gate if (rc == 0) 35670Sstevel@tonic-gate VM_STAT_ADD(hashin_already); 35680Sstevel@tonic-gate return (rc); 35690Sstevel@tonic-gate } 35700Sstevel@tonic-gate 35710Sstevel@tonic-gate /* 35720Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association. 35730Sstevel@tonic-gate * All mutexes must be held 35740Sstevel@tonic-gate */ 35750Sstevel@tonic-gate static void 35760Sstevel@tonic-gate page_do_hashout(page_t *pp) 35770Sstevel@tonic-gate { 35780Sstevel@tonic-gate page_t **hpp; 35790Sstevel@tonic-gate page_t *hp; 35800Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 35810Sstevel@tonic-gate 35820Sstevel@tonic-gate ASSERT(vp != NULL); 35830Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 35840Sstevel@tonic-gate 35850Sstevel@tonic-gate /* 35860Sstevel@tonic-gate * First, take pp off of its hash chain. 35870Sstevel@tonic-gate */ 35880Sstevel@tonic-gate hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 35890Sstevel@tonic-gate 35900Sstevel@tonic-gate for (;;) { 35910Sstevel@tonic-gate hp = *hpp; 35920Sstevel@tonic-gate if (hp == pp) 35930Sstevel@tonic-gate break; 35940Sstevel@tonic-gate if (hp == NULL) { 35950Sstevel@tonic-gate panic("page_do_hashout"); 35960Sstevel@tonic-gate /*NOTREACHED*/ 35970Sstevel@tonic-gate } 35980Sstevel@tonic-gate hpp = &hp->p_hash; 35990Sstevel@tonic-gate } 36000Sstevel@tonic-gate *hpp = pp->p_hash; 36010Sstevel@tonic-gate 36020Sstevel@tonic-gate /* 36030Sstevel@tonic-gate * Now remove it from its associated vnode. 36040Sstevel@tonic-gate */ 36050Sstevel@tonic-gate if (vp->v_pages) 36060Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 36070Sstevel@tonic-gate 36080Sstevel@tonic-gate pp->p_hash = NULL; 36090Sstevel@tonic-gate page_clr_all_props(pp); 36100Sstevel@tonic-gate PP_CLRSWAP(pp); 36110Sstevel@tonic-gate pp->p_vnode = NULL; 36120Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 36130Sstevel@tonic-gate } 36140Sstevel@tonic-gate 36150Sstevel@tonic-gate /* 36160Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association. 36170Sstevel@tonic-gate * 36180Sstevel@tonic-gate * When `phm' is non-NULL it contains the address of the mutex protecting the 36190Sstevel@tonic-gate * hash list pp is on. It is not dropped. 36200Sstevel@tonic-gate */ 36210Sstevel@tonic-gate void 36220Sstevel@tonic-gate page_hashout(page_t *pp, kmutex_t *phm) 36230Sstevel@tonic-gate { 36240Sstevel@tonic-gate vnode_t *vp; 36250Sstevel@tonic-gate ulong_t index; 36260Sstevel@tonic-gate kmutex_t *nphm; 36270Sstevel@tonic-gate kmutex_t *vphm; 36280Sstevel@tonic-gate kmutex_t *sep; 36290Sstevel@tonic-gate 36300Sstevel@tonic-gate ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 36310Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 36320Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 36330Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 36340Sstevel@tonic-gate 36350Sstevel@tonic-gate vp = pp->p_vnode; 36360Sstevel@tonic-gate 36370Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 36380Sstevel@tonic-gate "page_hashout:pp %p vp %p", pp, vp); 36390Sstevel@tonic-gate 36400Sstevel@tonic-gate /* Kernel probe */ 36410Sstevel@tonic-gate TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 36420Sstevel@tonic-gate tnf_opaque, vnode, vp, 36430Sstevel@tonic-gate tnf_offset, offset, pp->p_offset); 36440Sstevel@tonic-gate 36450Sstevel@tonic-gate /* 36460Sstevel@tonic-gate * 36470Sstevel@tonic-gate */ 36480Sstevel@tonic-gate VM_STAT_ADD(hashout_count); 36490Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, pp->p_offset); 36500Sstevel@tonic-gate if (phm == NULL) { 36510Sstevel@tonic-gate VM_STAT_ADD(hashout_not_held); 36520Sstevel@tonic-gate nphm = PAGE_HASH_MUTEX(index); 36530Sstevel@tonic-gate mutex_enter(nphm); 36540Sstevel@tonic-gate } 36550Sstevel@tonic-gate ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 36560Sstevel@tonic-gate 36570Sstevel@tonic-gate 36580Sstevel@tonic-gate /* 36590Sstevel@tonic-gate * grab page vnode mutex and remove it... 36600Sstevel@tonic-gate */ 36610Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 36620Sstevel@tonic-gate mutex_enter(vphm); 36630Sstevel@tonic-gate 36640Sstevel@tonic-gate page_do_hashout(pp); 36650Sstevel@tonic-gate 36660Sstevel@tonic-gate mutex_exit(vphm); 36670Sstevel@tonic-gate if (phm == NULL) 36680Sstevel@tonic-gate mutex_exit(nphm); 36690Sstevel@tonic-gate 36700Sstevel@tonic-gate /* 36710Sstevel@tonic-gate * Wake up processes waiting for this page. The page's 36720Sstevel@tonic-gate * identity has been changed, and is probably not the 36730Sstevel@tonic-gate * desired page any longer. 36740Sstevel@tonic-gate */ 36750Sstevel@tonic-gate sep = page_se_mutex(pp); 36760Sstevel@tonic-gate mutex_enter(sep); 3677800Sstans pp->p_selock &= ~SE_EWANTED; 36780Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 36790Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 36800Sstevel@tonic-gate mutex_exit(sep); 36810Sstevel@tonic-gate } 36820Sstevel@tonic-gate 36830Sstevel@tonic-gate /* 36840Sstevel@tonic-gate * Add the page to the front of a linked list of pages 36850Sstevel@tonic-gate * using the p_next & p_prev pointers for the list. 36860Sstevel@tonic-gate * The caller is responsible for protecting the list pointers. 36870Sstevel@tonic-gate */ 36880Sstevel@tonic-gate void 36890Sstevel@tonic-gate page_add(page_t **ppp, page_t *pp) 36900Sstevel@tonic-gate { 36910Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 36920Sstevel@tonic-gate 36930Sstevel@tonic-gate page_add_common(ppp, pp); 36940Sstevel@tonic-gate } 36950Sstevel@tonic-gate 36960Sstevel@tonic-gate 36970Sstevel@tonic-gate 36980Sstevel@tonic-gate /* 36990Sstevel@tonic-gate * Common code for page_add() and mach_page_add() 37000Sstevel@tonic-gate */ 37010Sstevel@tonic-gate void 37020Sstevel@tonic-gate page_add_common(page_t **ppp, page_t *pp) 37030Sstevel@tonic-gate { 37040Sstevel@tonic-gate if (*ppp == NULL) { 37050Sstevel@tonic-gate pp->p_next = pp->p_prev = pp; 37060Sstevel@tonic-gate } else { 37070Sstevel@tonic-gate pp->p_next = *ppp; 37080Sstevel@tonic-gate pp->p_prev = (*ppp)->p_prev; 37090Sstevel@tonic-gate (*ppp)->p_prev = pp; 37100Sstevel@tonic-gate pp->p_prev->p_next = pp; 37110Sstevel@tonic-gate } 37120Sstevel@tonic-gate *ppp = pp; 37130Sstevel@tonic-gate } 37140Sstevel@tonic-gate 37150Sstevel@tonic-gate 37160Sstevel@tonic-gate /* 37170Sstevel@tonic-gate * Remove this page from a linked list of pages 37180Sstevel@tonic-gate * using the p_next & p_prev pointers for the list. 37190Sstevel@tonic-gate * 37200Sstevel@tonic-gate * The caller is responsible for protecting the list pointers. 37210Sstevel@tonic-gate */ 37220Sstevel@tonic-gate void 37230Sstevel@tonic-gate page_sub(page_t **ppp, page_t *pp) 37240Sstevel@tonic-gate { 37250Sstevel@tonic-gate ASSERT((PP_ISFREE(pp)) ? 1 : 37260Sstevel@tonic-gate (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 37270Sstevel@tonic-gate 37280Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) { 37290Sstevel@tonic-gate panic("page_sub: bad arg(s): pp %p, *ppp %p", 37300Sstevel@tonic-gate (void *)pp, (void *)(*ppp)); 37310Sstevel@tonic-gate /*NOTREACHED*/ 37320Sstevel@tonic-gate } 37330Sstevel@tonic-gate 37340Sstevel@tonic-gate page_sub_common(ppp, pp); 37350Sstevel@tonic-gate } 37360Sstevel@tonic-gate 37370Sstevel@tonic-gate 37380Sstevel@tonic-gate /* 37390Sstevel@tonic-gate * Common code for page_sub() and mach_page_sub() 37400Sstevel@tonic-gate */ 37410Sstevel@tonic-gate void 37420Sstevel@tonic-gate page_sub_common(page_t **ppp, page_t *pp) 37430Sstevel@tonic-gate { 37440Sstevel@tonic-gate if (*ppp == pp) 37450Sstevel@tonic-gate *ppp = pp->p_next; /* go to next page */ 37460Sstevel@tonic-gate 37470Sstevel@tonic-gate if (*ppp == pp) 37480Sstevel@tonic-gate *ppp = NULL; /* page list is gone */ 37490Sstevel@tonic-gate else { 37500Sstevel@tonic-gate pp->p_prev->p_next = pp->p_next; 37510Sstevel@tonic-gate pp->p_next->p_prev = pp->p_prev; 37520Sstevel@tonic-gate } 37530Sstevel@tonic-gate pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 37540Sstevel@tonic-gate } 37550Sstevel@tonic-gate 37560Sstevel@tonic-gate 37570Sstevel@tonic-gate /* 37580Sstevel@tonic-gate * Break page list cppp into two lists with npages in the first list. 37590Sstevel@tonic-gate * The tail is returned in nppp. 37600Sstevel@tonic-gate */ 37610Sstevel@tonic-gate void 37620Sstevel@tonic-gate page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 37630Sstevel@tonic-gate { 37640Sstevel@tonic-gate page_t *s1pp = *oppp; 37650Sstevel@tonic-gate page_t *s2pp; 37660Sstevel@tonic-gate page_t *e1pp, *e2pp; 37670Sstevel@tonic-gate long n = 0; 37680Sstevel@tonic-gate 37690Sstevel@tonic-gate if (s1pp == NULL) { 37700Sstevel@tonic-gate *nppp = NULL; 37710Sstevel@tonic-gate return; 37720Sstevel@tonic-gate } 37730Sstevel@tonic-gate if (npages == 0) { 37740Sstevel@tonic-gate *nppp = s1pp; 37750Sstevel@tonic-gate *oppp = NULL; 37760Sstevel@tonic-gate return; 37770Sstevel@tonic-gate } 37780Sstevel@tonic-gate for (n = 0, s2pp = *oppp; n < npages; n++) { 37790Sstevel@tonic-gate s2pp = s2pp->p_next; 37800Sstevel@tonic-gate } 37810Sstevel@tonic-gate /* Fix head and tail of new lists */ 37820Sstevel@tonic-gate e1pp = s2pp->p_prev; 37830Sstevel@tonic-gate e2pp = s1pp->p_prev; 37840Sstevel@tonic-gate s1pp->p_prev = e1pp; 37850Sstevel@tonic-gate e1pp->p_next = s1pp; 37860Sstevel@tonic-gate s2pp->p_prev = e2pp; 37870Sstevel@tonic-gate e2pp->p_next = s2pp; 37880Sstevel@tonic-gate 37890Sstevel@tonic-gate /* second list empty */ 37900Sstevel@tonic-gate if (s2pp == s1pp) { 37910Sstevel@tonic-gate *oppp = s1pp; 37920Sstevel@tonic-gate *nppp = NULL; 37930Sstevel@tonic-gate } else { 37940Sstevel@tonic-gate *oppp = s1pp; 37950Sstevel@tonic-gate *nppp = s2pp; 37960Sstevel@tonic-gate } 37970Sstevel@tonic-gate } 37980Sstevel@tonic-gate 37990Sstevel@tonic-gate /* 38000Sstevel@tonic-gate * Concatenate page list nppp onto the end of list ppp. 38010Sstevel@tonic-gate */ 38020Sstevel@tonic-gate void 38030Sstevel@tonic-gate page_list_concat(page_t **ppp, page_t **nppp) 38040Sstevel@tonic-gate { 38050Sstevel@tonic-gate page_t *s1pp, *s2pp, *e1pp, *e2pp; 38060Sstevel@tonic-gate 38070Sstevel@tonic-gate if (*nppp == NULL) { 38080Sstevel@tonic-gate return; 38090Sstevel@tonic-gate } 38100Sstevel@tonic-gate if (*ppp == NULL) { 38110Sstevel@tonic-gate *ppp = *nppp; 38120Sstevel@tonic-gate return; 38130Sstevel@tonic-gate } 38140Sstevel@tonic-gate s1pp = *ppp; 38150Sstevel@tonic-gate e1pp = s1pp->p_prev; 38160Sstevel@tonic-gate s2pp = *nppp; 38170Sstevel@tonic-gate e2pp = s2pp->p_prev; 38180Sstevel@tonic-gate s1pp->p_prev = e2pp; 38190Sstevel@tonic-gate e2pp->p_next = s1pp; 38200Sstevel@tonic-gate e1pp->p_next = s2pp; 38210Sstevel@tonic-gate s2pp->p_prev = e1pp; 38220Sstevel@tonic-gate } 38230Sstevel@tonic-gate 38240Sstevel@tonic-gate /* 38250Sstevel@tonic-gate * return the next page in the page list 38260Sstevel@tonic-gate */ 38270Sstevel@tonic-gate page_t * 38280Sstevel@tonic-gate page_list_next(page_t *pp) 38290Sstevel@tonic-gate { 38300Sstevel@tonic-gate return (pp->p_next); 38310Sstevel@tonic-gate } 38320Sstevel@tonic-gate 38330Sstevel@tonic-gate 38340Sstevel@tonic-gate /* 38350Sstevel@tonic-gate * Add the page to the front of the linked list of pages 38360Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list. 38370Sstevel@tonic-gate * 38380Sstevel@tonic-gate * The caller is responsible for protecting the lists. 38390Sstevel@tonic-gate */ 38400Sstevel@tonic-gate void 38410Sstevel@tonic-gate page_vpadd(page_t **ppp, page_t *pp) 38420Sstevel@tonic-gate { 38430Sstevel@tonic-gate if (*ppp == NULL) { 38440Sstevel@tonic-gate pp->p_vpnext = pp->p_vpprev = pp; 38450Sstevel@tonic-gate } else { 38460Sstevel@tonic-gate pp->p_vpnext = *ppp; 38470Sstevel@tonic-gate pp->p_vpprev = (*ppp)->p_vpprev; 38480Sstevel@tonic-gate (*ppp)->p_vpprev = pp; 38490Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp; 38500Sstevel@tonic-gate } 38510Sstevel@tonic-gate *ppp = pp; 38520Sstevel@tonic-gate } 38530Sstevel@tonic-gate 38540Sstevel@tonic-gate /* 38550Sstevel@tonic-gate * Remove this page from the linked list of pages 38560Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list. 38570Sstevel@tonic-gate * 38580Sstevel@tonic-gate * The caller is responsible for protecting the lists. 38590Sstevel@tonic-gate */ 38600Sstevel@tonic-gate void 38610Sstevel@tonic-gate page_vpsub(page_t **ppp, page_t *pp) 38620Sstevel@tonic-gate { 38630Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) { 38640Sstevel@tonic-gate panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 38650Sstevel@tonic-gate (void *)pp, (void *)(*ppp)); 38660Sstevel@tonic-gate /*NOTREACHED*/ 38670Sstevel@tonic-gate } 38680Sstevel@tonic-gate 38690Sstevel@tonic-gate if (*ppp == pp) 38700Sstevel@tonic-gate *ppp = pp->p_vpnext; /* go to next page */ 38710Sstevel@tonic-gate 38720Sstevel@tonic-gate if (*ppp == pp) 38730Sstevel@tonic-gate *ppp = NULL; /* page list is gone */ 38740Sstevel@tonic-gate else { 38750Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp->p_vpnext; 38760Sstevel@tonic-gate pp->p_vpnext->p_vpprev = pp->p_vpprev; 38770Sstevel@tonic-gate } 38780Sstevel@tonic-gate pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 38790Sstevel@tonic-gate } 38800Sstevel@tonic-gate 38810Sstevel@tonic-gate /* 38820Sstevel@tonic-gate * Lock a physical page into memory "long term". Used to support "lock 38830Sstevel@tonic-gate * in memory" functions. Accepts the page to be locked, and a cow variable 38840Sstevel@tonic-gate * to indicate whether a the lock will travel to the new page during 38850Sstevel@tonic-gate * a potential copy-on-write. 38860Sstevel@tonic-gate */ 38870Sstevel@tonic-gate int 38880Sstevel@tonic-gate page_pp_lock( 38890Sstevel@tonic-gate page_t *pp, /* page to be locked */ 38900Sstevel@tonic-gate int cow, /* cow lock */ 38910Sstevel@tonic-gate int kernel) /* must succeed -- ignore checking */ 38920Sstevel@tonic-gate { 38930Sstevel@tonic-gate int r = 0; /* result -- assume failure */ 38940Sstevel@tonic-gate 38950Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 38960Sstevel@tonic-gate 38970Sstevel@tonic-gate page_struct_lock(pp); 38980Sstevel@tonic-gate /* 38990Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 39000Sstevel@tonic-gate */ 39010Sstevel@tonic-gate if (cow) { 39020Sstevel@tonic-gate mutex_enter(&freemem_lock); 39030Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) && 39040Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 39050Sstevel@tonic-gate availrmem--; 39060Sstevel@tonic-gate pages_locked++; 39070Sstevel@tonic-gate mutex_exit(&freemem_lock); 39080Sstevel@tonic-gate r = 1; 39090Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 39100Sstevel@tonic-gate cmn_err(CE_WARN, 39110Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 39120Sstevel@tonic-gate page_pptonum(pp)); 39130Sstevel@tonic-gate } 39140Sstevel@tonic-gate } else 39150Sstevel@tonic-gate mutex_exit(&freemem_lock); 39160Sstevel@tonic-gate } else { 39170Sstevel@tonic-gate if (pp->p_lckcnt) { 39180Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 39190Sstevel@tonic-gate r = 1; 39200Sstevel@tonic-gate if (++pp->p_lckcnt == 39210Sstevel@tonic-gate (ushort_t)PAGE_LOCK_MAXIMUM) { 39220Sstevel@tonic-gate cmn_err(CE_WARN, "Page lock limit " 39230Sstevel@tonic-gate "reached on pfn 0x%lx", 39240Sstevel@tonic-gate page_pptonum(pp)); 39250Sstevel@tonic-gate } 39260Sstevel@tonic-gate } 39270Sstevel@tonic-gate } else { 39280Sstevel@tonic-gate if (kernel) { 39290Sstevel@tonic-gate /* availrmem accounting done by caller */ 39300Sstevel@tonic-gate ++pp->p_lckcnt; 39310Sstevel@tonic-gate r = 1; 39320Sstevel@tonic-gate } else { 39330Sstevel@tonic-gate mutex_enter(&freemem_lock); 39340Sstevel@tonic-gate if (availrmem > pages_pp_maximum) { 39350Sstevel@tonic-gate availrmem--; 39360Sstevel@tonic-gate pages_locked++; 39370Sstevel@tonic-gate ++pp->p_lckcnt; 39380Sstevel@tonic-gate r = 1; 39390Sstevel@tonic-gate } 39400Sstevel@tonic-gate mutex_exit(&freemem_lock); 39410Sstevel@tonic-gate } 39420Sstevel@tonic-gate } 39430Sstevel@tonic-gate } 39440Sstevel@tonic-gate page_struct_unlock(pp); 39450Sstevel@tonic-gate return (r); 39460Sstevel@tonic-gate } 39470Sstevel@tonic-gate 39480Sstevel@tonic-gate /* 39490Sstevel@tonic-gate * Decommit a lock on a physical page frame. Account for cow locks if 39500Sstevel@tonic-gate * appropriate. 39510Sstevel@tonic-gate */ 39520Sstevel@tonic-gate void 39530Sstevel@tonic-gate page_pp_unlock( 39540Sstevel@tonic-gate page_t *pp, /* page to be unlocked */ 39550Sstevel@tonic-gate int cow, /* expect cow lock */ 39560Sstevel@tonic-gate int kernel) /* this was a kernel lock */ 39570Sstevel@tonic-gate { 39580Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 39590Sstevel@tonic-gate 39600Sstevel@tonic-gate page_struct_lock(pp); 39610Sstevel@tonic-gate /* 39620Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 39630Sstevel@tonic-gate * If cowcnt or lcknt is already 0 do nothing; i.e., we 39640Sstevel@tonic-gate * could be called to unlock even if nothing is locked. This could 39650Sstevel@tonic-gate * happen if locked file pages were truncated (removing the lock) 39660Sstevel@tonic-gate * and the file was grown again and new pages faulted in; the new 39670Sstevel@tonic-gate * pages are unlocked but the segment still thinks they're locked. 39680Sstevel@tonic-gate */ 39690Sstevel@tonic-gate if (cow) { 39700Sstevel@tonic-gate if (pp->p_cowcnt) { 39710Sstevel@tonic-gate mutex_enter(&freemem_lock); 39720Sstevel@tonic-gate pp->p_cowcnt--; 39730Sstevel@tonic-gate availrmem++; 39740Sstevel@tonic-gate pages_locked--; 39750Sstevel@tonic-gate mutex_exit(&freemem_lock); 39760Sstevel@tonic-gate } 39770Sstevel@tonic-gate } else { 39780Sstevel@tonic-gate if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 39790Sstevel@tonic-gate if (!kernel) { 39800Sstevel@tonic-gate mutex_enter(&freemem_lock); 39810Sstevel@tonic-gate availrmem++; 39820Sstevel@tonic-gate pages_locked--; 39830Sstevel@tonic-gate mutex_exit(&freemem_lock); 39840Sstevel@tonic-gate } 39850Sstevel@tonic-gate } 39860Sstevel@tonic-gate } 39870Sstevel@tonic-gate page_struct_unlock(pp); 39880Sstevel@tonic-gate } 39890Sstevel@tonic-gate 39900Sstevel@tonic-gate /* 39910Sstevel@tonic-gate * This routine reserves availrmem for npages; 39920Sstevel@tonic-gate * flags: KM_NOSLEEP or KM_SLEEP 39930Sstevel@tonic-gate * returns 1 on success or 0 on failure 39940Sstevel@tonic-gate */ 39950Sstevel@tonic-gate int 39960Sstevel@tonic-gate page_resv(pgcnt_t npages, uint_t flags) 39970Sstevel@tonic-gate { 39980Sstevel@tonic-gate mutex_enter(&freemem_lock); 39990Sstevel@tonic-gate while (availrmem < tune.t_minarmem + npages) { 40000Sstevel@tonic-gate if (flags & KM_NOSLEEP) { 40010Sstevel@tonic-gate mutex_exit(&freemem_lock); 40020Sstevel@tonic-gate return (0); 40030Sstevel@tonic-gate } 40040Sstevel@tonic-gate mutex_exit(&freemem_lock); 40050Sstevel@tonic-gate page_needfree(npages); 40060Sstevel@tonic-gate kmem_reap(); 40070Sstevel@tonic-gate delay(hz >> 2); 40080Sstevel@tonic-gate page_needfree(-(spgcnt_t)npages); 40090Sstevel@tonic-gate mutex_enter(&freemem_lock); 40100Sstevel@tonic-gate } 40110Sstevel@tonic-gate availrmem -= npages; 40120Sstevel@tonic-gate mutex_exit(&freemem_lock); 40130Sstevel@tonic-gate return (1); 40140Sstevel@tonic-gate } 40150Sstevel@tonic-gate 40160Sstevel@tonic-gate /* 40170Sstevel@tonic-gate * This routine unreserves availrmem for npages; 40180Sstevel@tonic-gate */ 40190Sstevel@tonic-gate void 40200Sstevel@tonic-gate page_unresv(pgcnt_t npages) 40210Sstevel@tonic-gate { 40220Sstevel@tonic-gate mutex_enter(&freemem_lock); 40230Sstevel@tonic-gate availrmem += npages; 40240Sstevel@tonic-gate mutex_exit(&freemem_lock); 40250Sstevel@tonic-gate } 40260Sstevel@tonic-gate 40270Sstevel@tonic-gate /* 40280Sstevel@tonic-gate * See Statement at the beginning of segvn_lockop() regarding 40290Sstevel@tonic-gate * the way we handle cowcnts and lckcnts. 40300Sstevel@tonic-gate * 40310Sstevel@tonic-gate * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 40320Sstevel@tonic-gate * that breaks COW has PROT_WRITE. 40330Sstevel@tonic-gate * 40340Sstevel@tonic-gate * Note that, we may also break COW in case we are softlocking 40350Sstevel@tonic-gate * on read access during physio; 40360Sstevel@tonic-gate * in this softlock case, the vpage may not have PROT_WRITE. 40370Sstevel@tonic-gate * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 40380Sstevel@tonic-gate * if the vpage doesn't have PROT_WRITE. 40390Sstevel@tonic-gate * 40400Sstevel@tonic-gate * This routine is never called if we are stealing a page 40410Sstevel@tonic-gate * in anon_private. 40420Sstevel@tonic-gate * 40430Sstevel@tonic-gate * The caller subtracted from availrmem for read only mapping. 40440Sstevel@tonic-gate * if lckcnt is 1 increment availrmem. 40450Sstevel@tonic-gate */ 40460Sstevel@tonic-gate void 40470Sstevel@tonic-gate page_pp_useclaim( 40480Sstevel@tonic-gate page_t *opp, /* original page frame losing lock */ 40490Sstevel@tonic-gate page_t *npp, /* new page frame gaining lock */ 40500Sstevel@tonic-gate uint_t write_perm) /* set if vpage has PROT_WRITE */ 40510Sstevel@tonic-gate { 40520Sstevel@tonic-gate int payback = 0; 40530Sstevel@tonic-gate 40540Sstevel@tonic-gate ASSERT(PAGE_LOCKED(opp)); 40550Sstevel@tonic-gate ASSERT(PAGE_LOCKED(npp)); 40560Sstevel@tonic-gate 40570Sstevel@tonic-gate page_struct_lock(opp); 40580Sstevel@tonic-gate 40590Sstevel@tonic-gate ASSERT(npp->p_cowcnt == 0); 40600Sstevel@tonic-gate ASSERT(npp->p_lckcnt == 0); 40610Sstevel@tonic-gate 40620Sstevel@tonic-gate /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 40630Sstevel@tonic-gate if ((write_perm && opp->p_cowcnt != 0) || 40640Sstevel@tonic-gate (!write_perm && opp->p_lckcnt != 0)) { 40650Sstevel@tonic-gate 40660Sstevel@tonic-gate if (write_perm) { 40670Sstevel@tonic-gate npp->p_cowcnt++; 40680Sstevel@tonic-gate ASSERT(opp->p_cowcnt != 0); 40690Sstevel@tonic-gate opp->p_cowcnt--; 40700Sstevel@tonic-gate } else { 40710Sstevel@tonic-gate 40720Sstevel@tonic-gate ASSERT(opp->p_lckcnt != 0); 40730Sstevel@tonic-gate 40740Sstevel@tonic-gate /* 40750Sstevel@tonic-gate * We didn't need availrmem decremented if p_lckcnt on 40760Sstevel@tonic-gate * original page is 1. Here, we are unlocking 40770Sstevel@tonic-gate * read-only copy belonging to original page and 40780Sstevel@tonic-gate * are locking a copy belonging to new page. 40790Sstevel@tonic-gate */ 40800Sstevel@tonic-gate if (opp->p_lckcnt == 1) 40810Sstevel@tonic-gate payback = 1; 40820Sstevel@tonic-gate 40830Sstevel@tonic-gate npp->p_lckcnt++; 40840Sstevel@tonic-gate opp->p_lckcnt--; 40850Sstevel@tonic-gate } 40860Sstevel@tonic-gate } 40870Sstevel@tonic-gate if (payback) { 40880Sstevel@tonic-gate mutex_enter(&freemem_lock); 40890Sstevel@tonic-gate availrmem++; 40900Sstevel@tonic-gate pages_useclaim--; 40910Sstevel@tonic-gate mutex_exit(&freemem_lock); 40920Sstevel@tonic-gate } 40930Sstevel@tonic-gate page_struct_unlock(opp); 40940Sstevel@tonic-gate } 40950Sstevel@tonic-gate 40960Sstevel@tonic-gate /* 40970Sstevel@tonic-gate * Simple claim adjust functions -- used to support changes in 40980Sstevel@tonic-gate * claims due to changes in access permissions. Used by segvn_setprot(). 40990Sstevel@tonic-gate */ 41000Sstevel@tonic-gate int 41010Sstevel@tonic-gate page_addclaim(page_t *pp) 41020Sstevel@tonic-gate { 41030Sstevel@tonic-gate int r = 0; /* result */ 41040Sstevel@tonic-gate 41050Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 41060Sstevel@tonic-gate 41070Sstevel@tonic-gate page_struct_lock(pp); 41080Sstevel@tonic-gate ASSERT(pp->p_lckcnt != 0); 41090Sstevel@tonic-gate 41100Sstevel@tonic-gate if (pp->p_lckcnt == 1) { 41110Sstevel@tonic-gate if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 41120Sstevel@tonic-gate --pp->p_lckcnt; 41130Sstevel@tonic-gate r = 1; 41140Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 41150Sstevel@tonic-gate cmn_err(CE_WARN, 41160Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 41170Sstevel@tonic-gate page_pptonum(pp)); 41180Sstevel@tonic-gate } 41190Sstevel@tonic-gate } 41200Sstevel@tonic-gate } else { 41210Sstevel@tonic-gate mutex_enter(&freemem_lock); 41220Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) && 41230Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 41240Sstevel@tonic-gate --availrmem; 41250Sstevel@tonic-gate ++pages_claimed; 41260Sstevel@tonic-gate mutex_exit(&freemem_lock); 41270Sstevel@tonic-gate --pp->p_lckcnt; 41280Sstevel@tonic-gate r = 1; 41290Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 41300Sstevel@tonic-gate cmn_err(CE_WARN, 41310Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 41320Sstevel@tonic-gate page_pptonum(pp)); 41330Sstevel@tonic-gate } 41340Sstevel@tonic-gate } else 41350Sstevel@tonic-gate mutex_exit(&freemem_lock); 41360Sstevel@tonic-gate } 41370Sstevel@tonic-gate page_struct_unlock(pp); 41380Sstevel@tonic-gate return (r); 41390Sstevel@tonic-gate } 41400Sstevel@tonic-gate 41410Sstevel@tonic-gate int 41420Sstevel@tonic-gate page_subclaim(page_t *pp) 41430Sstevel@tonic-gate { 41440Sstevel@tonic-gate int r = 0; 41450Sstevel@tonic-gate 41460Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 41470Sstevel@tonic-gate 41480Sstevel@tonic-gate page_struct_lock(pp); 41490Sstevel@tonic-gate ASSERT(pp->p_cowcnt != 0); 41500Sstevel@tonic-gate 41510Sstevel@tonic-gate if (pp->p_lckcnt) { 41520Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 41530Sstevel@tonic-gate r = 1; 41540Sstevel@tonic-gate /* 41550Sstevel@tonic-gate * for availrmem 41560Sstevel@tonic-gate */ 41570Sstevel@tonic-gate mutex_enter(&freemem_lock); 41580Sstevel@tonic-gate availrmem++; 41590Sstevel@tonic-gate pages_claimed--; 41600Sstevel@tonic-gate mutex_exit(&freemem_lock); 41610Sstevel@tonic-gate 41620Sstevel@tonic-gate pp->p_cowcnt--; 41630Sstevel@tonic-gate 41640Sstevel@tonic-gate if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 41650Sstevel@tonic-gate cmn_err(CE_WARN, 41660Sstevel@tonic-gate "Page lock limit reached on pfn 0x%lx", 41670Sstevel@tonic-gate page_pptonum(pp)); 41680Sstevel@tonic-gate } 41690Sstevel@tonic-gate } 41700Sstevel@tonic-gate } else { 41710Sstevel@tonic-gate r = 1; 41720Sstevel@tonic-gate pp->p_cowcnt--; 41730Sstevel@tonic-gate pp->p_lckcnt++; 41740Sstevel@tonic-gate } 41750Sstevel@tonic-gate page_struct_unlock(pp); 41760Sstevel@tonic-gate return (r); 41770Sstevel@tonic-gate } 41780Sstevel@tonic-gate 41790Sstevel@tonic-gate int 41800Sstevel@tonic-gate page_addclaim_pages(page_t **ppa) 41810Sstevel@tonic-gate { 41820Sstevel@tonic-gate 41830Sstevel@tonic-gate pgcnt_t lckpgs = 0, pg_idx; 41840Sstevel@tonic-gate 41850Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_addclaim_pages); 41860Sstevel@tonic-gate 41870Sstevel@tonic-gate mutex_enter(&page_llock); 41880Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 41890Sstevel@tonic-gate 41900Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx])); 41910Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_lckcnt != 0); 41920Sstevel@tonic-gate if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 41930Sstevel@tonic-gate mutex_exit(&page_llock); 41940Sstevel@tonic-gate return (0); 41950Sstevel@tonic-gate } 41960Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt > 1) 41970Sstevel@tonic-gate lckpgs++; 41980Sstevel@tonic-gate } 41990Sstevel@tonic-gate 42000Sstevel@tonic-gate if (lckpgs != 0) { 42010Sstevel@tonic-gate mutex_enter(&freemem_lock); 42020Sstevel@tonic-gate if (availrmem >= pages_pp_maximum + lckpgs) { 42030Sstevel@tonic-gate availrmem -= lckpgs; 42040Sstevel@tonic-gate pages_claimed += lckpgs; 42050Sstevel@tonic-gate } else { 42060Sstevel@tonic-gate mutex_exit(&freemem_lock); 42070Sstevel@tonic-gate mutex_exit(&page_llock); 42080Sstevel@tonic-gate return (0); 42090Sstevel@tonic-gate } 42100Sstevel@tonic-gate mutex_exit(&freemem_lock); 42110Sstevel@tonic-gate } 42120Sstevel@tonic-gate 42130Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 42140Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt--; 42150Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt++; 42160Sstevel@tonic-gate } 42170Sstevel@tonic-gate mutex_exit(&page_llock); 42180Sstevel@tonic-gate return (1); 42190Sstevel@tonic-gate } 42200Sstevel@tonic-gate 42210Sstevel@tonic-gate int 42220Sstevel@tonic-gate page_subclaim_pages(page_t **ppa) 42230Sstevel@tonic-gate { 42240Sstevel@tonic-gate pgcnt_t ulckpgs = 0, pg_idx; 42250Sstevel@tonic-gate 42260Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_subclaim_pages); 42270Sstevel@tonic-gate 42280Sstevel@tonic-gate mutex_enter(&page_llock); 42290Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 42300Sstevel@tonic-gate 42310Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx])); 42320Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_cowcnt != 0); 42330Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 42340Sstevel@tonic-gate mutex_exit(&page_llock); 42350Sstevel@tonic-gate return (0); 42360Sstevel@tonic-gate } 42370Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt != 0) 42380Sstevel@tonic-gate ulckpgs++; 42390Sstevel@tonic-gate } 42400Sstevel@tonic-gate 42410Sstevel@tonic-gate if (ulckpgs != 0) { 42420Sstevel@tonic-gate mutex_enter(&freemem_lock); 42430Sstevel@tonic-gate availrmem += ulckpgs; 42440Sstevel@tonic-gate pages_claimed -= ulckpgs; 42450Sstevel@tonic-gate mutex_exit(&freemem_lock); 42460Sstevel@tonic-gate } 42470Sstevel@tonic-gate 42480Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 42490Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt--; 42500Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt++; 42510Sstevel@tonic-gate 42520Sstevel@tonic-gate } 42530Sstevel@tonic-gate mutex_exit(&page_llock); 42540Sstevel@tonic-gate return (1); 42550Sstevel@tonic-gate } 42560Sstevel@tonic-gate 42570Sstevel@tonic-gate page_t * 42580Sstevel@tonic-gate page_numtopp(pfn_t pfnum, se_t se) 42590Sstevel@tonic-gate { 42600Sstevel@tonic-gate page_t *pp; 42610Sstevel@tonic-gate 42620Sstevel@tonic-gate retry: 42630Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 42640Sstevel@tonic-gate if (pp == NULL) { 42650Sstevel@tonic-gate return ((page_t *)NULL); 42660Sstevel@tonic-gate } 42670Sstevel@tonic-gate 42680Sstevel@tonic-gate /* 42690Sstevel@tonic-gate * Acquire the appropriate lock on the page. 42700Sstevel@tonic-gate */ 42710Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 42720Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) 42730Sstevel@tonic-gate goto retry; 42740Sstevel@tonic-gate continue; 42750Sstevel@tonic-gate } 42760Sstevel@tonic-gate 42770Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 42780Sstevel@tonic-gate page_unlock(pp); 42790Sstevel@tonic-gate goto retry; 42800Sstevel@tonic-gate } 42810Sstevel@tonic-gate 42820Sstevel@tonic-gate return (pp); 42830Sstevel@tonic-gate } 42840Sstevel@tonic-gate 42850Sstevel@tonic-gate page_t * 42860Sstevel@tonic-gate page_numtopp_noreclaim(pfn_t pfnum, se_t se) 42870Sstevel@tonic-gate { 42880Sstevel@tonic-gate page_t *pp; 42890Sstevel@tonic-gate 42900Sstevel@tonic-gate retry: 42910Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 42920Sstevel@tonic-gate if (pp == NULL) { 42930Sstevel@tonic-gate return ((page_t *)NULL); 42940Sstevel@tonic-gate } 42950Sstevel@tonic-gate 42960Sstevel@tonic-gate /* 42970Sstevel@tonic-gate * Acquire the appropriate lock on the page. 42980Sstevel@tonic-gate */ 42990Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 43000Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) 43010Sstevel@tonic-gate goto retry; 43020Sstevel@tonic-gate continue; 43030Sstevel@tonic-gate } 43040Sstevel@tonic-gate 43050Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 43060Sstevel@tonic-gate page_unlock(pp); 43070Sstevel@tonic-gate goto retry; 43080Sstevel@tonic-gate } 43090Sstevel@tonic-gate 43100Sstevel@tonic-gate return (pp); 43110Sstevel@tonic-gate } 43120Sstevel@tonic-gate 43130Sstevel@tonic-gate /* 43140Sstevel@tonic-gate * This routine is like page_numtopp, but will only return page structs 43150Sstevel@tonic-gate * for pages which are ok for loading into hardware using the page struct. 43160Sstevel@tonic-gate */ 43170Sstevel@tonic-gate page_t * 43180Sstevel@tonic-gate page_numtopp_nowait(pfn_t pfnum, se_t se) 43190Sstevel@tonic-gate { 43200Sstevel@tonic-gate page_t *pp; 43210Sstevel@tonic-gate 43220Sstevel@tonic-gate retry: 43230Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 43240Sstevel@tonic-gate if (pp == NULL) { 43250Sstevel@tonic-gate return ((page_t *)NULL); 43260Sstevel@tonic-gate } 43270Sstevel@tonic-gate 43280Sstevel@tonic-gate /* 43290Sstevel@tonic-gate * Try to acquire the appropriate lock on the page. 43300Sstevel@tonic-gate */ 43310Sstevel@tonic-gate if (PP_ISFREE(pp)) 43320Sstevel@tonic-gate pp = NULL; 43330Sstevel@tonic-gate else { 43340Sstevel@tonic-gate if (!page_trylock(pp, se)) 43350Sstevel@tonic-gate pp = NULL; 43360Sstevel@tonic-gate else { 43370Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 43380Sstevel@tonic-gate page_unlock(pp); 43390Sstevel@tonic-gate goto retry; 43400Sstevel@tonic-gate } 43410Sstevel@tonic-gate if (PP_ISFREE(pp)) { 43420Sstevel@tonic-gate page_unlock(pp); 43430Sstevel@tonic-gate pp = NULL; 43440Sstevel@tonic-gate } 43450Sstevel@tonic-gate } 43460Sstevel@tonic-gate } 43470Sstevel@tonic-gate return (pp); 43480Sstevel@tonic-gate } 43490Sstevel@tonic-gate 43500Sstevel@tonic-gate /* 43510Sstevel@tonic-gate * Returns a count of dirty pages that are in the process 43520Sstevel@tonic-gate * of being written out. If 'cleanit' is set, try to push the page. 43530Sstevel@tonic-gate */ 43540Sstevel@tonic-gate pgcnt_t 43550Sstevel@tonic-gate page_busy(int cleanit) 43560Sstevel@tonic-gate { 43570Sstevel@tonic-gate page_t *page0 = page_first(); 43580Sstevel@tonic-gate page_t *pp = page0; 43590Sstevel@tonic-gate pgcnt_t nppbusy = 0; 43600Sstevel@tonic-gate u_offset_t off; 43610Sstevel@tonic-gate 43620Sstevel@tonic-gate do { 43630Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 43640Sstevel@tonic-gate 43650Sstevel@tonic-gate /* 43660Sstevel@tonic-gate * A page is a candidate for syncing if it is: 43670Sstevel@tonic-gate * 43680Sstevel@tonic-gate * (a) On neither the freelist nor the cachelist 43690Sstevel@tonic-gate * (b) Hashed onto a vnode 43700Sstevel@tonic-gate * (c) Not a kernel page 43710Sstevel@tonic-gate * (d) Dirty 43720Sstevel@tonic-gate * (e) Not part of a swapfile 43730Sstevel@tonic-gate * (f) a page which belongs to a real vnode; eg has a non-null 43740Sstevel@tonic-gate * v_vfsp pointer. 43750Sstevel@tonic-gate * (g) Backed by a filesystem which doesn't have a 43760Sstevel@tonic-gate * stubbed-out sync operation 43770Sstevel@tonic-gate */ 43780Sstevel@tonic-gate if (!PP_ISFREE(pp) && vp != NULL && vp != &kvp && 43790Sstevel@tonic-gate hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 43800Sstevel@tonic-gate vfs_can_sync(vp->v_vfsp)) { 43810Sstevel@tonic-gate nppbusy++; 43820Sstevel@tonic-gate vfs_syncprogress(); 43830Sstevel@tonic-gate 43840Sstevel@tonic-gate if (!cleanit) 43850Sstevel@tonic-gate continue; 43860Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) 43870Sstevel@tonic-gate continue; 43880Sstevel@tonic-gate 43890Sstevel@tonic-gate if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 43900Sstevel@tonic-gate pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 43910Sstevel@tonic-gate !(hat_pagesync(pp, 43920Sstevel@tonic-gate HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 43930Sstevel@tonic-gate page_unlock(pp); 43940Sstevel@tonic-gate continue; 43950Sstevel@tonic-gate } 43960Sstevel@tonic-gate off = pp->p_offset; 43970Sstevel@tonic-gate VN_HOLD(vp); 43980Sstevel@tonic-gate page_unlock(pp); 43990Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, off, PAGESIZE, 44000Sstevel@tonic-gate B_ASYNC | B_FREE, kcred); 44010Sstevel@tonic-gate VN_RELE(vp); 44020Sstevel@tonic-gate } 44030Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 44040Sstevel@tonic-gate 44050Sstevel@tonic-gate return (nppbusy); 44060Sstevel@tonic-gate } 44070Sstevel@tonic-gate 44080Sstevel@tonic-gate void page_invalidate_pages(void); 44090Sstevel@tonic-gate 44100Sstevel@tonic-gate /* 44110Sstevel@tonic-gate * callback handler to vm sub-system 44120Sstevel@tonic-gate * 44130Sstevel@tonic-gate * callers make sure no recursive entries to this func. 44140Sstevel@tonic-gate */ 44150Sstevel@tonic-gate /*ARGSUSED*/ 44160Sstevel@tonic-gate boolean_t 44170Sstevel@tonic-gate callb_vm_cpr(void *arg, int code) 44180Sstevel@tonic-gate { 44190Sstevel@tonic-gate if (code == CB_CODE_CPR_CHKPT) 44200Sstevel@tonic-gate page_invalidate_pages(); 44210Sstevel@tonic-gate return (B_TRUE); 44220Sstevel@tonic-gate } 44230Sstevel@tonic-gate 44240Sstevel@tonic-gate /* 44250Sstevel@tonic-gate * Invalidate all pages of the system. 44260Sstevel@tonic-gate * It shouldn't be called until all user page activities are all stopped. 44270Sstevel@tonic-gate */ 44280Sstevel@tonic-gate void 44290Sstevel@tonic-gate page_invalidate_pages() 44300Sstevel@tonic-gate { 44310Sstevel@tonic-gate page_t *pp; 44320Sstevel@tonic-gate page_t *page0; 44330Sstevel@tonic-gate pgcnt_t nbusypages; 44340Sstevel@tonic-gate int retry = 0; 44350Sstevel@tonic-gate const int MAXRETRIES = 4; 44360Sstevel@tonic-gate #if defined(__sparc) 44370Sstevel@tonic-gate extern struct vnode prom_ppages; 44380Sstevel@tonic-gate #endif /* __sparc */ 44390Sstevel@tonic-gate 44400Sstevel@tonic-gate top: 44410Sstevel@tonic-gate /* 44420Sstevel@tonic-gate * Flush dirty pages and destory the clean ones. 44430Sstevel@tonic-gate */ 44440Sstevel@tonic-gate nbusypages = 0; 44450Sstevel@tonic-gate 44460Sstevel@tonic-gate pp = page0 = page_first(); 44470Sstevel@tonic-gate do { 44480Sstevel@tonic-gate struct vnode *vp; 44490Sstevel@tonic-gate u_offset_t offset; 44500Sstevel@tonic-gate int mod; 44510Sstevel@tonic-gate 44520Sstevel@tonic-gate /* 44530Sstevel@tonic-gate * skip the page if it has no vnode or the page associated 44540Sstevel@tonic-gate * with the kernel vnode or prom allocated kernel mem. 44550Sstevel@tonic-gate */ 44560Sstevel@tonic-gate #if defined(__sparc) 44570Sstevel@tonic-gate if ((vp = pp->p_vnode) == NULL || vp == &kvp || 44580Sstevel@tonic-gate vp == &prom_ppages) 44590Sstevel@tonic-gate #else /* x86 doesn't have prom or prom_ppage */ 44600Sstevel@tonic-gate if ((vp = pp->p_vnode) == NULL || vp == &kvp) 44610Sstevel@tonic-gate #endif /* __sparc */ 44620Sstevel@tonic-gate continue; 44630Sstevel@tonic-gate 44640Sstevel@tonic-gate /* 44650Sstevel@tonic-gate * skip the page which is already free invalidated. 44660Sstevel@tonic-gate */ 44670Sstevel@tonic-gate if (PP_ISFREE(pp) && PP_ISAGED(pp)) 44680Sstevel@tonic-gate continue; 44690Sstevel@tonic-gate 44700Sstevel@tonic-gate /* 44710Sstevel@tonic-gate * skip pages that are already locked or can't be "exclusively" 44720Sstevel@tonic-gate * locked or are already free. After we lock the page, check 44730Sstevel@tonic-gate * the free and age bits again to be sure it's not destroied 44740Sstevel@tonic-gate * yet. 44750Sstevel@tonic-gate * To achieve max. parallelization, we use page_trylock instead 44760Sstevel@tonic-gate * of page_lock so that we don't get block on individual pages 44770Sstevel@tonic-gate * while we have thousands of other pages to process. 44780Sstevel@tonic-gate */ 44790Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 44800Sstevel@tonic-gate nbusypages++; 44810Sstevel@tonic-gate continue; 44820Sstevel@tonic-gate } else if (PP_ISFREE(pp)) { 44830Sstevel@tonic-gate if (!PP_ISAGED(pp)) { 44840Sstevel@tonic-gate page_destroy_free(pp); 44850Sstevel@tonic-gate } else { 44860Sstevel@tonic-gate page_unlock(pp); 44870Sstevel@tonic-gate } 44880Sstevel@tonic-gate continue; 44890Sstevel@tonic-gate } 44900Sstevel@tonic-gate /* 44910Sstevel@tonic-gate * Is this page involved in some I/O? shared? 44920Sstevel@tonic-gate * 44930Sstevel@tonic-gate * The page_struct_lock need not be acquired to 44940Sstevel@tonic-gate * examine these fields since the page has an 44950Sstevel@tonic-gate * "exclusive" lock. 44960Sstevel@tonic-gate */ 44970Sstevel@tonic-gate if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 44980Sstevel@tonic-gate page_unlock(pp); 44990Sstevel@tonic-gate continue; 45000Sstevel@tonic-gate } 45010Sstevel@tonic-gate 45020Sstevel@tonic-gate if (vp->v_type == VCHR) { 45030Sstevel@tonic-gate panic("vp->v_type == VCHR"); 45040Sstevel@tonic-gate /*NOTREACHED*/ 45050Sstevel@tonic-gate } 45060Sstevel@tonic-gate 45070Sstevel@tonic-gate if (!page_try_demote_pages(pp)) { 45080Sstevel@tonic-gate page_unlock(pp); 45090Sstevel@tonic-gate continue; 45100Sstevel@tonic-gate } 45110Sstevel@tonic-gate 45120Sstevel@tonic-gate /* 45130Sstevel@tonic-gate * Check the modified bit. Leave the bits alone in hardware 45140Sstevel@tonic-gate * (they will be modified if we do the putpage). 45150Sstevel@tonic-gate */ 45160Sstevel@tonic-gate mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 45170Sstevel@tonic-gate & P_MOD); 45180Sstevel@tonic-gate if (mod) { 45190Sstevel@tonic-gate offset = pp->p_offset; 45200Sstevel@tonic-gate /* 45210Sstevel@tonic-gate * Hold the vnode before releasing the page lock 45220Sstevel@tonic-gate * to prevent it from being freed and re-used by 45230Sstevel@tonic-gate * some other thread. 45240Sstevel@tonic-gate */ 45250Sstevel@tonic-gate VN_HOLD(vp); 45260Sstevel@tonic-gate page_unlock(pp); 45270Sstevel@tonic-gate /* 45280Sstevel@tonic-gate * No error return is checked here. Callers such as 45290Sstevel@tonic-gate * cpr deals with the dirty pages at the dump time 45300Sstevel@tonic-gate * if this putpage fails. 45310Sstevel@tonic-gate */ 45320Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 45330Sstevel@tonic-gate kcred); 45340Sstevel@tonic-gate VN_RELE(vp); 45350Sstevel@tonic-gate } else { 45360Sstevel@tonic-gate page_destroy(pp, 0); 45370Sstevel@tonic-gate } 45380Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 45390Sstevel@tonic-gate if (nbusypages && retry++ < MAXRETRIES) { 45400Sstevel@tonic-gate delay(1); 45410Sstevel@tonic-gate goto top; 45420Sstevel@tonic-gate } 45430Sstevel@tonic-gate } 45440Sstevel@tonic-gate 45450Sstevel@tonic-gate /* 45460Sstevel@tonic-gate * Replace the page "old" with the page "new" on the page hash and vnode lists 45470Sstevel@tonic-gate * 45480Sstevel@tonic-gate * the replacemnt must be done in place, ie the equivalent sequence: 45490Sstevel@tonic-gate * 45500Sstevel@tonic-gate * vp = old->p_vnode; 45510Sstevel@tonic-gate * off = old->p_offset; 45520Sstevel@tonic-gate * page_do_hashout(old) 45530Sstevel@tonic-gate * page_do_hashin(new, vp, off) 45540Sstevel@tonic-gate * 45550Sstevel@tonic-gate * doesn't work, since 45560Sstevel@tonic-gate * 1) if old is the only page on the vnode, the v_pages list has a window 45570Sstevel@tonic-gate * where it looks empty. This will break file system assumptions. 45580Sstevel@tonic-gate * and 45590Sstevel@tonic-gate * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 45600Sstevel@tonic-gate */ 45610Sstevel@tonic-gate static void 45620Sstevel@tonic-gate page_do_relocate_hash(page_t *new, page_t *old) 45630Sstevel@tonic-gate { 45640Sstevel@tonic-gate page_t **hash_list; 45650Sstevel@tonic-gate vnode_t *vp = old->p_vnode; 45660Sstevel@tonic-gate kmutex_t *sep; 45670Sstevel@tonic-gate 45680Sstevel@tonic-gate ASSERT(PAGE_EXCL(old)); 45690Sstevel@tonic-gate ASSERT(PAGE_EXCL(new)); 45700Sstevel@tonic-gate ASSERT(vp != NULL); 45710Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 45720Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 45730Sstevel@tonic-gate 45740Sstevel@tonic-gate /* 45750Sstevel@tonic-gate * First find old page on the page hash list 45760Sstevel@tonic-gate */ 45770Sstevel@tonic-gate hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 45780Sstevel@tonic-gate 45790Sstevel@tonic-gate for (;;) { 45800Sstevel@tonic-gate if (*hash_list == old) 45810Sstevel@tonic-gate break; 45820Sstevel@tonic-gate if (*hash_list == NULL) { 45830Sstevel@tonic-gate panic("page_do_hashout"); 45840Sstevel@tonic-gate /*NOTREACHED*/ 45850Sstevel@tonic-gate } 45860Sstevel@tonic-gate hash_list = &(*hash_list)->p_hash; 45870Sstevel@tonic-gate } 45880Sstevel@tonic-gate 45890Sstevel@tonic-gate /* 45900Sstevel@tonic-gate * update new and replace old with new on the page hash list 45910Sstevel@tonic-gate */ 45920Sstevel@tonic-gate new->p_vnode = old->p_vnode; 45930Sstevel@tonic-gate new->p_offset = old->p_offset; 45940Sstevel@tonic-gate new->p_hash = old->p_hash; 45950Sstevel@tonic-gate *hash_list = new; 45960Sstevel@tonic-gate 45970Sstevel@tonic-gate if ((new->p_vnode->v_flag & VISSWAP) != 0) 45980Sstevel@tonic-gate PP_SETSWAP(new); 45990Sstevel@tonic-gate 46000Sstevel@tonic-gate /* 46010Sstevel@tonic-gate * replace old with new on the vnode's page list 46020Sstevel@tonic-gate */ 46030Sstevel@tonic-gate if (old->p_vpnext == old) { 46040Sstevel@tonic-gate new->p_vpnext = new; 46050Sstevel@tonic-gate new->p_vpprev = new; 46060Sstevel@tonic-gate } else { 46070Sstevel@tonic-gate new->p_vpnext = old->p_vpnext; 46080Sstevel@tonic-gate new->p_vpprev = old->p_vpprev; 46090Sstevel@tonic-gate new->p_vpnext->p_vpprev = new; 46100Sstevel@tonic-gate new->p_vpprev->p_vpnext = new; 46110Sstevel@tonic-gate } 46120Sstevel@tonic-gate if (vp->v_pages == old) 46130Sstevel@tonic-gate vp->v_pages = new; 46140Sstevel@tonic-gate 46150Sstevel@tonic-gate /* 46160Sstevel@tonic-gate * clear out the old page 46170Sstevel@tonic-gate */ 46180Sstevel@tonic-gate old->p_hash = NULL; 46190Sstevel@tonic-gate old->p_vpnext = NULL; 46200Sstevel@tonic-gate old->p_vpprev = NULL; 46210Sstevel@tonic-gate old->p_vnode = NULL; 46220Sstevel@tonic-gate PP_CLRSWAP(old); 46230Sstevel@tonic-gate old->p_offset = (u_offset_t)-1; 46240Sstevel@tonic-gate page_clr_all_props(old); 46250Sstevel@tonic-gate 46260Sstevel@tonic-gate /* 46270Sstevel@tonic-gate * Wake up processes waiting for this page. The page's 46280Sstevel@tonic-gate * identity has been changed, and is probably not the 46290Sstevel@tonic-gate * desired page any longer. 46300Sstevel@tonic-gate */ 46310Sstevel@tonic-gate sep = page_se_mutex(old); 46320Sstevel@tonic-gate mutex_enter(sep); 4633800Sstans old->p_selock &= ~SE_EWANTED; 46340Sstevel@tonic-gate if (CV_HAS_WAITERS(&old->p_cv)) 46350Sstevel@tonic-gate cv_broadcast(&old->p_cv); 46360Sstevel@tonic-gate mutex_exit(sep); 46370Sstevel@tonic-gate } 46380Sstevel@tonic-gate 46390Sstevel@tonic-gate /* 46400Sstevel@tonic-gate * This function moves the identity of page "pp_old" to page "pp_new". 46410Sstevel@tonic-gate * Both pages must be locked on entry. "pp_new" is free, has no identity, 46420Sstevel@tonic-gate * and need not be hashed out from anywhere. 46430Sstevel@tonic-gate */ 46440Sstevel@tonic-gate void 46450Sstevel@tonic-gate page_relocate_hash(page_t *pp_new, page_t *pp_old) 46460Sstevel@tonic-gate { 46470Sstevel@tonic-gate vnode_t *vp = pp_old->p_vnode; 46480Sstevel@tonic-gate u_offset_t off = pp_old->p_offset; 46490Sstevel@tonic-gate kmutex_t *phm, *vphm; 46500Sstevel@tonic-gate 46510Sstevel@tonic-gate /* 46520Sstevel@tonic-gate * Rehash two pages 46530Sstevel@tonic-gate */ 46540Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_old)); 46550Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_new)); 46560Sstevel@tonic-gate ASSERT(vp != NULL); 46570Sstevel@tonic-gate ASSERT(pp_new->p_vnode == NULL); 46580Sstevel@tonic-gate 46590Sstevel@tonic-gate /* 46600Sstevel@tonic-gate * hashout then hashin while holding the mutexes 46610Sstevel@tonic-gate */ 46620Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 46630Sstevel@tonic-gate mutex_enter(phm); 46640Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 46650Sstevel@tonic-gate mutex_enter(vphm); 46660Sstevel@tonic-gate 46670Sstevel@tonic-gate page_do_relocate_hash(pp_new, pp_old); 46680Sstevel@tonic-gate 46690Sstevel@tonic-gate mutex_exit(vphm); 46700Sstevel@tonic-gate mutex_exit(phm); 46710Sstevel@tonic-gate 46720Sstevel@tonic-gate /* 46730Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 46740Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 46750Sstevel@tonic-gate */ 46760Sstevel@tonic-gate ASSERT(pp_new->p_lckcnt == 0); 46770Sstevel@tonic-gate ASSERT(pp_new->p_cowcnt == 0); 46780Sstevel@tonic-gate pp_new->p_lckcnt = pp_old->p_lckcnt; 46790Sstevel@tonic-gate pp_new->p_cowcnt = pp_old->p_cowcnt; 46800Sstevel@tonic-gate pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 46810Sstevel@tonic-gate 46820Sstevel@tonic-gate /* The following comment preserved from page_flip(). */ 46830Sstevel@tonic-gate /* XXX - Do we need to protect fsdata? */ 46840Sstevel@tonic-gate pp_new->p_fsdata = pp_old->p_fsdata; 46850Sstevel@tonic-gate } 46860Sstevel@tonic-gate 46870Sstevel@tonic-gate /* 46880Sstevel@tonic-gate * Helper routine used to lock all remaining members of a 46890Sstevel@tonic-gate * large page. The caller is responsible for passing in a locked 46900Sstevel@tonic-gate * pp. If pp is a large page, then it succeeds in locking all the 46910Sstevel@tonic-gate * remaining constituent pages or it returns with only the 46920Sstevel@tonic-gate * original page locked. 46930Sstevel@tonic-gate * 46940Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 46950Sstevel@tonic-gate * 46960Sstevel@tonic-gate * If success is returned this routine gurantees p_szc for all constituent 46970Sstevel@tonic-gate * pages of a large page pp belongs to can't change. To achieve this we 46980Sstevel@tonic-gate * recheck szc of pp after locking all constituent pages and retry if szc 46990Sstevel@tonic-gate * changed (it could only decrease). Since hat_page_demote() needs an EXCL 47000Sstevel@tonic-gate * lock on one of constituent pages it can't be running after all constituent 47010Sstevel@tonic-gate * pages are locked. hat_page_demote() with a lock on a constituent page 47020Sstevel@tonic-gate * outside of this large page (i.e. pp belonged to a larger large page) is 47030Sstevel@tonic-gate * already done with all constituent pages of pp since the root's p_szc is 47040Sstevel@tonic-gate * changed last. Thefore no need to synchronize with hat_page_demote() that 47050Sstevel@tonic-gate * locked a constituent page outside of pp's current large page. 47060Sstevel@tonic-gate */ 47070Sstevel@tonic-gate #ifdef DEBUG 47080Sstevel@tonic-gate uint32_t gpg_trylock_mtbf = 0; 47090Sstevel@tonic-gate #endif 47100Sstevel@tonic-gate 47110Sstevel@tonic-gate int 47120Sstevel@tonic-gate group_page_trylock(page_t *pp, se_t se) 47130Sstevel@tonic-gate { 47140Sstevel@tonic-gate page_t *tpp; 47150Sstevel@tonic-gate pgcnt_t npgs, i, j; 47160Sstevel@tonic-gate uint_t pszc = pp->p_szc; 47170Sstevel@tonic-gate 47180Sstevel@tonic-gate #ifdef DEBUG 47190Sstevel@tonic-gate if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 47200Sstevel@tonic-gate return (0); 47210Sstevel@tonic-gate } 47220Sstevel@tonic-gate #endif 47230Sstevel@tonic-gate 47240Sstevel@tonic-gate if (pp != PP_GROUPLEADER(pp, pszc)) { 47250Sstevel@tonic-gate return (0); 47260Sstevel@tonic-gate } 47270Sstevel@tonic-gate 47280Sstevel@tonic-gate retry: 47290Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, se)); 47300Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 47310Sstevel@tonic-gate if (pszc == 0) { 47320Sstevel@tonic-gate return (1); 47330Sstevel@tonic-gate } 47340Sstevel@tonic-gate npgs = page_get_pagecnt(pszc); 47350Sstevel@tonic-gate tpp = pp + 1; 47360Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) { 47370Sstevel@tonic-gate if (!page_trylock(tpp, se)) { 47380Sstevel@tonic-gate tpp = pp + 1; 47390Sstevel@tonic-gate for (j = 1; j < i; j++, tpp++) { 47400Sstevel@tonic-gate page_unlock(tpp); 47410Sstevel@tonic-gate } 47420Sstevel@tonic-gate return (0); 47430Sstevel@tonic-gate } 47440Sstevel@tonic-gate } 47450Sstevel@tonic-gate if (pp->p_szc != pszc) { 47460Sstevel@tonic-gate ASSERT(pp->p_szc < pszc); 47470Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL && pp->p_vnode != &kvp && 47480Sstevel@tonic-gate !IS_SWAPFSVP(pp->p_vnode)); 47490Sstevel@tonic-gate tpp = pp + 1; 47500Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) { 47510Sstevel@tonic-gate page_unlock(tpp); 47520Sstevel@tonic-gate } 47530Sstevel@tonic-gate pszc = pp->p_szc; 47540Sstevel@tonic-gate goto retry; 47550Sstevel@tonic-gate } 47560Sstevel@tonic-gate return (1); 47570Sstevel@tonic-gate } 47580Sstevel@tonic-gate 47590Sstevel@tonic-gate void 47600Sstevel@tonic-gate group_page_unlock(page_t *pp) 47610Sstevel@tonic-gate { 47620Sstevel@tonic-gate page_t *tpp; 47630Sstevel@tonic-gate pgcnt_t npgs, i; 47640Sstevel@tonic-gate 47650Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 47660Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 47670Sstevel@tonic-gate ASSERT(pp == PP_PAGEROOT(pp)); 47680Sstevel@tonic-gate npgs = page_get_pagecnt(pp->p_szc); 47690Sstevel@tonic-gate for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 47700Sstevel@tonic-gate page_unlock(tpp); 47710Sstevel@tonic-gate } 47720Sstevel@tonic-gate } 47730Sstevel@tonic-gate 47740Sstevel@tonic-gate /* 47750Sstevel@tonic-gate * returns 47760Sstevel@tonic-gate * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 47770Sstevel@tonic-gate * ERANGE : this is not a base page 47780Sstevel@tonic-gate * EBUSY : failure to get locks on the page/pages 47790Sstevel@tonic-gate * ENOMEM : failure to obtain replacement pages 47800Sstevel@tonic-gate * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 47810Sstevel@tonic-gate * 47820Sstevel@tonic-gate * Return with all constituent members of target and replacement 47830Sstevel@tonic-gate * SE_EXCL locked. It is the callers responsibility to drop the 47840Sstevel@tonic-gate * locks. 47850Sstevel@tonic-gate */ 47860Sstevel@tonic-gate int 47870Sstevel@tonic-gate do_page_relocate( 47880Sstevel@tonic-gate page_t **target, 47890Sstevel@tonic-gate page_t **replacement, 47900Sstevel@tonic-gate int grouplock, 47910Sstevel@tonic-gate spgcnt_t *nrelocp, 47920Sstevel@tonic-gate lgrp_t *lgrp) 47930Sstevel@tonic-gate { 47940Sstevel@tonic-gate #ifdef DEBUG 47950Sstevel@tonic-gate page_t *first_repl; 47960Sstevel@tonic-gate #endif /* DEBUG */ 47970Sstevel@tonic-gate page_t *repl; 47980Sstevel@tonic-gate page_t *targ; 47990Sstevel@tonic-gate page_t *pl = NULL; 48000Sstevel@tonic-gate uint_t ppattr; 48010Sstevel@tonic-gate pfn_t pfn, repl_pfn; 48020Sstevel@tonic-gate uint_t szc; 48030Sstevel@tonic-gate spgcnt_t npgs, i; 48040Sstevel@tonic-gate int repl_contig = 0; 48050Sstevel@tonic-gate uint_t flags = 0; 48060Sstevel@tonic-gate spgcnt_t dofree = 0; 48070Sstevel@tonic-gate 48080Sstevel@tonic-gate *nrelocp = 0; 48090Sstevel@tonic-gate 48100Sstevel@tonic-gate #if defined(__sparc) 48110Sstevel@tonic-gate /* 48120Sstevel@tonic-gate * We need to wait till OBP has completed 48130Sstevel@tonic-gate * its boot-time handoff of its resources to the kernel 48140Sstevel@tonic-gate * before we allow page relocation 48150Sstevel@tonic-gate */ 48160Sstevel@tonic-gate if (page_relocate_ready == 0) { 48170Sstevel@tonic-gate return (EAGAIN); 48180Sstevel@tonic-gate } 48190Sstevel@tonic-gate #endif 48200Sstevel@tonic-gate 48210Sstevel@tonic-gate /* 48220Sstevel@tonic-gate * If this is not a base page, 48230Sstevel@tonic-gate * just return with 0x0 pages relocated. 48240Sstevel@tonic-gate */ 48250Sstevel@tonic-gate targ = *target; 48260Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ)); 48270Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ)); 48280Sstevel@tonic-gate szc = targ->p_szc; 48290Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes); 48300Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 48310Sstevel@tonic-gate pfn = targ->p_pagenum; 48320Sstevel@tonic-gate if (pfn != PFN_BASE(pfn, szc)) { 48330Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 48340Sstevel@tonic-gate return (ERANGE); 48350Sstevel@tonic-gate } 48360Sstevel@tonic-gate 48370Sstevel@tonic-gate if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 48380Sstevel@tonic-gate repl_pfn = repl->p_pagenum; 48390Sstevel@tonic-gate if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 48400Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 48410Sstevel@tonic-gate return (ERANGE); 48420Sstevel@tonic-gate } 48430Sstevel@tonic-gate repl_contig = 1; 48440Sstevel@tonic-gate } 48450Sstevel@tonic-gate 48460Sstevel@tonic-gate /* 48470Sstevel@tonic-gate * We must lock all members of this large page or we cannot 48480Sstevel@tonic-gate * relocate any part of it. 48490Sstevel@tonic-gate */ 48500Sstevel@tonic-gate if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 48510Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 48520Sstevel@tonic-gate return (EBUSY); 48530Sstevel@tonic-gate } 48540Sstevel@tonic-gate 48550Sstevel@tonic-gate /* 48560Sstevel@tonic-gate * reread szc it could have been decreased before 48570Sstevel@tonic-gate * group_page_trylock() was done. 48580Sstevel@tonic-gate */ 48590Sstevel@tonic-gate szc = targ->p_szc; 48600Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes); 48610Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 48620Sstevel@tonic-gate ASSERT(pfn == PFN_BASE(pfn, szc)); 48630Sstevel@tonic-gate 48640Sstevel@tonic-gate npgs = page_get_pagecnt(targ->p_szc); 48650Sstevel@tonic-gate 48660Sstevel@tonic-gate if (repl == NULL) { 48670Sstevel@tonic-gate dofree = npgs; /* Size of target page in MMU pages */ 48680Sstevel@tonic-gate if (!page_create_wait(dofree, 0)) { 48690Sstevel@tonic-gate if (grouplock != 0) { 48700Sstevel@tonic-gate group_page_unlock(targ); 48710Sstevel@tonic-gate } 48720Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 48730Sstevel@tonic-gate return (ENOMEM); 48740Sstevel@tonic-gate } 48750Sstevel@tonic-gate 48760Sstevel@tonic-gate /* 48770Sstevel@tonic-gate * seg kmem pages require that the target and replacement 48780Sstevel@tonic-gate * page be the same pagesize. 48790Sstevel@tonic-gate */ 48800Sstevel@tonic-gate flags = (targ->p_vnode == &kvp) ? PGR_SAMESZC : 0; 48810Sstevel@tonic-gate repl = page_get_replacement_page(targ, lgrp, flags); 48820Sstevel@tonic-gate if (repl == NULL) { 48830Sstevel@tonic-gate if (grouplock != 0) { 48840Sstevel@tonic-gate group_page_unlock(targ); 48850Sstevel@tonic-gate } 48860Sstevel@tonic-gate page_create_putback(dofree); 48870Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 48880Sstevel@tonic-gate return (ENOMEM); 48890Sstevel@tonic-gate } 48900Sstevel@tonic-gate } 48910Sstevel@tonic-gate #ifdef DEBUG 48920Sstevel@tonic-gate else { 48930Sstevel@tonic-gate ASSERT(PAGE_LOCKED(repl)); 48940Sstevel@tonic-gate } 48950Sstevel@tonic-gate #endif /* DEBUG */ 48960Sstevel@tonic-gate 48970Sstevel@tonic-gate #if defined(__sparc) 48980Sstevel@tonic-gate /* 48990Sstevel@tonic-gate * Let hat_page_relocate() complete the relocation if it's kernel page 49000Sstevel@tonic-gate */ 49010Sstevel@tonic-gate if (targ->p_vnode == &kvp) { 49020Sstevel@tonic-gate *replacement = repl; 49030Sstevel@tonic-gate if (hat_page_relocate(target, replacement, nrelocp) != 0) { 49040Sstevel@tonic-gate if (grouplock != 0) { 49050Sstevel@tonic-gate group_page_unlock(targ); 49060Sstevel@tonic-gate } 49070Sstevel@tonic-gate if (dofree) { 49080Sstevel@tonic-gate *replacement = NULL; 49090Sstevel@tonic-gate page_free_replacement_page(repl); 49100Sstevel@tonic-gate page_create_putback(dofree); 49110Sstevel@tonic-gate } 49120Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 49130Sstevel@tonic-gate return (EAGAIN); 49140Sstevel@tonic-gate } 49150Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 49160Sstevel@tonic-gate return (0); 49170Sstevel@tonic-gate } 49180Sstevel@tonic-gate #else 49190Sstevel@tonic-gate #if defined(lint) 49200Sstevel@tonic-gate dofree = dofree; 49210Sstevel@tonic-gate #endif 49220Sstevel@tonic-gate #endif 49230Sstevel@tonic-gate 49240Sstevel@tonic-gate #ifdef DEBUG 49250Sstevel@tonic-gate first_repl = repl; 49260Sstevel@tonic-gate #endif /* DEBUG */ 49270Sstevel@tonic-gate 49280Sstevel@tonic-gate for (i = 0; i < npgs; i++) { 49290Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ)); 49302414Saguzovsk ASSERT(targ->p_slckcnt == 0); 49312414Saguzovsk ASSERT(repl->p_slckcnt == 0); 49320Sstevel@tonic-gate 49330Sstevel@tonic-gate (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 49340Sstevel@tonic-gate 49350Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0); 49360Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ)); 49370Sstevel@tonic-gate ASSERT(targ->p_pagenum == (pfn + i)); 49380Sstevel@tonic-gate ASSERT(repl_contig == 0 || 49390Sstevel@tonic-gate repl->p_pagenum == (repl_pfn + i)); 49400Sstevel@tonic-gate 49410Sstevel@tonic-gate /* 49420Sstevel@tonic-gate * Copy the page contents and attributes then 49430Sstevel@tonic-gate * relocate the page in the page hash. 49440Sstevel@tonic-gate */ 49450Sstevel@tonic-gate ppcopy(targ, repl); 49460Sstevel@tonic-gate ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 49470Sstevel@tonic-gate page_clr_all_props(repl); 49480Sstevel@tonic-gate page_set_props(repl, ppattr); 49490Sstevel@tonic-gate page_relocate_hash(repl, targ); 49500Sstevel@tonic-gate 49510Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0); 49520Sstevel@tonic-gate ASSERT(hat_page_getshare(repl) == 0); 49530Sstevel@tonic-gate /* 49540Sstevel@tonic-gate * Now clear the props on targ, after the 49550Sstevel@tonic-gate * page_relocate_hash(), they no longer 49560Sstevel@tonic-gate * have any meaning. 49570Sstevel@tonic-gate */ 49580Sstevel@tonic-gate page_clr_all_props(targ); 49590Sstevel@tonic-gate ASSERT(targ->p_next == targ); 49600Sstevel@tonic-gate ASSERT(targ->p_prev == targ); 49610Sstevel@tonic-gate page_list_concat(&pl, &targ); 49620Sstevel@tonic-gate 49630Sstevel@tonic-gate targ++; 49640Sstevel@tonic-gate if (repl_contig != 0) { 49650Sstevel@tonic-gate repl++; 49660Sstevel@tonic-gate } else { 49670Sstevel@tonic-gate repl = repl->p_next; 49680Sstevel@tonic-gate } 49690Sstevel@tonic-gate } 49700Sstevel@tonic-gate /* assert that we have come full circle with repl */ 49710Sstevel@tonic-gate ASSERT(repl_contig == 1 || first_repl == repl); 49720Sstevel@tonic-gate 49730Sstevel@tonic-gate *target = pl; 49740Sstevel@tonic-gate if (*replacement == NULL) { 49750Sstevel@tonic-gate ASSERT(first_repl == repl); 49760Sstevel@tonic-gate *replacement = repl; 49770Sstevel@tonic-gate } 49780Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 49790Sstevel@tonic-gate *nrelocp = npgs; 49800Sstevel@tonic-gate return (0); 49810Sstevel@tonic-gate } 49820Sstevel@tonic-gate /* 49830Sstevel@tonic-gate * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 49840Sstevel@tonic-gate */ 49850Sstevel@tonic-gate int 49860Sstevel@tonic-gate page_relocate( 49870Sstevel@tonic-gate page_t **target, 49880Sstevel@tonic-gate page_t **replacement, 49890Sstevel@tonic-gate int grouplock, 49900Sstevel@tonic-gate int freetarget, 49910Sstevel@tonic-gate spgcnt_t *nrelocp, 49920Sstevel@tonic-gate lgrp_t *lgrp) 49930Sstevel@tonic-gate { 49940Sstevel@tonic-gate spgcnt_t ret; 49950Sstevel@tonic-gate 49960Sstevel@tonic-gate /* do_page_relocate returns 0 on success or errno value */ 49970Sstevel@tonic-gate ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 49980Sstevel@tonic-gate 49990Sstevel@tonic-gate if (ret != 0 || freetarget == 0) { 50000Sstevel@tonic-gate return (ret); 50010Sstevel@tonic-gate } 50020Sstevel@tonic-gate if (*nrelocp == 1) { 50030Sstevel@tonic-gate ASSERT(*target != NULL); 50040Sstevel@tonic-gate page_free(*target, 1); 50050Sstevel@tonic-gate } else { 50060Sstevel@tonic-gate page_t *tpp = *target; 50070Sstevel@tonic-gate uint_t szc = tpp->p_szc; 50080Sstevel@tonic-gate pgcnt_t npgs = page_get_pagecnt(szc); 50090Sstevel@tonic-gate ASSERT(npgs > 1); 50100Sstevel@tonic-gate ASSERT(szc != 0); 50110Sstevel@tonic-gate do { 50120Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 50130Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 50140Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 50150Sstevel@tonic-gate PP_SETFREE(tpp); 50160Sstevel@tonic-gate PP_SETAGED(tpp); 50170Sstevel@tonic-gate npgs--; 50180Sstevel@tonic-gate } while ((tpp = tpp->p_next) != *target); 50190Sstevel@tonic-gate ASSERT(npgs == 0); 50200Sstevel@tonic-gate page_list_add_pages(*target, 0); 50210Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 50220Sstevel@tonic-gate page_create_putback(npgs); 50230Sstevel@tonic-gate } 50240Sstevel@tonic-gate return (ret); 50250Sstevel@tonic-gate } 50260Sstevel@tonic-gate 50270Sstevel@tonic-gate /* 50280Sstevel@tonic-gate * it is up to the caller to deal with pcf accounting. 50290Sstevel@tonic-gate */ 50300Sstevel@tonic-gate void 50310Sstevel@tonic-gate page_free_replacement_page(page_t *pplist) 50320Sstevel@tonic-gate { 50330Sstevel@tonic-gate page_t *pp; 50340Sstevel@tonic-gate 50350Sstevel@tonic-gate while (pplist != NULL) { 50360Sstevel@tonic-gate /* 50370Sstevel@tonic-gate * pp_targ is a linked list. 50380Sstevel@tonic-gate */ 50390Sstevel@tonic-gate pp = pplist; 50400Sstevel@tonic-gate if (pp->p_szc == 0) { 50410Sstevel@tonic-gate page_sub(&pplist, pp); 50420Sstevel@tonic-gate page_clr_all_props(pp); 50430Sstevel@tonic-gate PP_SETFREE(pp); 50440Sstevel@tonic-gate PP_SETAGED(pp); 50450Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 50460Sstevel@tonic-gate page_unlock(pp); 50470Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 50480Sstevel@tonic-gate } else { 50490Sstevel@tonic-gate spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 50500Sstevel@tonic-gate page_t *tpp; 50510Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs); 50520Sstevel@tonic-gate tpp = pp; 50530Sstevel@tonic-gate do { 50540Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 50550Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 50560Sstevel@tonic-gate page_clr_all_props(pp); 50570Sstevel@tonic-gate PP_SETFREE(tpp); 50580Sstevel@tonic-gate PP_SETAGED(tpp); 50590Sstevel@tonic-gate } while ((tpp = tpp->p_next) != pp); 50600Sstevel@tonic-gate page_list_add_pages(pp, 0); 50610Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 50620Sstevel@tonic-gate } 50630Sstevel@tonic-gate } 50640Sstevel@tonic-gate } 50650Sstevel@tonic-gate 50660Sstevel@tonic-gate /* 50670Sstevel@tonic-gate * Relocate target to non-relocatable replacement page. 50680Sstevel@tonic-gate */ 50690Sstevel@tonic-gate int 50700Sstevel@tonic-gate page_relocate_cage(page_t **target, page_t **replacement) 50710Sstevel@tonic-gate { 50720Sstevel@tonic-gate page_t *tpp, *rpp; 50730Sstevel@tonic-gate spgcnt_t pgcnt, npgs; 50740Sstevel@tonic-gate int result; 50750Sstevel@tonic-gate 50760Sstevel@tonic-gate tpp = *target; 50770Sstevel@tonic-gate 50780Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 50790Sstevel@tonic-gate ASSERT(tpp->p_szc == 0); 50800Sstevel@tonic-gate 50810Sstevel@tonic-gate pgcnt = btop(page_get_pagesize(tpp->p_szc)); 50820Sstevel@tonic-gate 50830Sstevel@tonic-gate do { 50840Sstevel@tonic-gate (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 50850Sstevel@tonic-gate rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 50860Sstevel@tonic-gate if (rpp == NULL) { 50870Sstevel@tonic-gate page_create_putback(pgcnt); 50880Sstevel@tonic-gate kcage_cageout_wakeup(); 50890Sstevel@tonic-gate } 50900Sstevel@tonic-gate } while (rpp == NULL); 50910Sstevel@tonic-gate 50920Sstevel@tonic-gate ASSERT(PP_ISNORELOC(rpp)); 50930Sstevel@tonic-gate 50940Sstevel@tonic-gate result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 50950Sstevel@tonic-gate 50960Sstevel@tonic-gate if (result == 0) { 50970Sstevel@tonic-gate *replacement = rpp; 50980Sstevel@tonic-gate if (pgcnt != npgs) 50990Sstevel@tonic-gate panic("page_relocate_cage: partial relocation"); 51000Sstevel@tonic-gate } 51010Sstevel@tonic-gate 51020Sstevel@tonic-gate return (result); 51030Sstevel@tonic-gate } 51040Sstevel@tonic-gate 51050Sstevel@tonic-gate /* 51060Sstevel@tonic-gate * Release the page lock on a page, place on cachelist 51070Sstevel@tonic-gate * tail if no longer mapped. Caller can let us know if 51080Sstevel@tonic-gate * the page is known to be clean. 51090Sstevel@tonic-gate */ 51100Sstevel@tonic-gate int 51110Sstevel@tonic-gate page_release(page_t *pp, int checkmod) 51120Sstevel@tonic-gate { 51130Sstevel@tonic-gate int status; 51140Sstevel@tonic-gate 51150Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 51160Sstevel@tonic-gate (pp->p_vnode != NULL)); 51170Sstevel@tonic-gate 51180Sstevel@tonic-gate if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 51190Sstevel@tonic-gate ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 51200Sstevel@tonic-gate pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 51210Sstevel@tonic-gate !hat_page_is_mapped(pp)) { 51220Sstevel@tonic-gate 51230Sstevel@tonic-gate /* 51240Sstevel@tonic-gate * If page is modified, unlock it 51250Sstevel@tonic-gate * 51260Sstevel@tonic-gate * (p_nrm & P_MOD) bit has the latest stuff because: 51270Sstevel@tonic-gate * (1) We found that this page doesn't have any mappings 51280Sstevel@tonic-gate * _after_ holding SE_EXCL and 51290Sstevel@tonic-gate * (2) We didn't drop SE_EXCL lock after the check in (1) 51300Sstevel@tonic-gate */ 51310Sstevel@tonic-gate if (checkmod && hat_ismod(pp)) { 51320Sstevel@tonic-gate page_unlock(pp); 51330Sstevel@tonic-gate status = PGREL_MOD; 51340Sstevel@tonic-gate } else { 51350Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 51360Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred); 51370Sstevel@tonic-gate status = PGREL_CLEAN; 51380Sstevel@tonic-gate } 51390Sstevel@tonic-gate } else { 51400Sstevel@tonic-gate page_unlock(pp); 51410Sstevel@tonic-gate status = PGREL_NOTREL; 51420Sstevel@tonic-gate } 51430Sstevel@tonic-gate return (status); 51440Sstevel@tonic-gate } 51450Sstevel@tonic-gate 5146917Selowe /* 5147917Selowe * Given a constituent page, try to demote the large page on the freelist. 5148917Selowe * 5149917Selowe * Returns nonzero if the page could be demoted successfully. Returns with 5150917Selowe * the constituent page still locked. 5151917Selowe */ 5152917Selowe int 5153917Selowe page_try_demote_free_pages(page_t *pp) 5154917Selowe { 5155917Selowe page_t *rootpp = pp; 5156917Selowe pfn_t pfn = page_pptonum(pp); 5157917Selowe spgcnt_t npgs; 5158917Selowe uint_t szc = pp->p_szc; 5159917Selowe 5160917Selowe ASSERT(PP_ISFREE(pp)); 5161917Selowe ASSERT(PAGE_EXCL(pp)); 5162917Selowe 5163917Selowe /* 5164917Selowe * Adjust rootpp and lock it, if `pp' is not the base 5165917Selowe * constituent page. 5166917Selowe */ 5167917Selowe npgs = page_get_pagecnt(pp->p_szc); 5168917Selowe if (npgs == 1) { 5169917Selowe return (0); 5170917Selowe } 5171917Selowe 5172917Selowe if (!IS_P2ALIGNED(pfn, npgs)) { 5173917Selowe pfn = P2ALIGN(pfn, npgs); 5174917Selowe rootpp = page_numtopp_nolock(pfn); 5175917Selowe } 5176917Selowe 5177917Selowe if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5178917Selowe return (0); 5179917Selowe } 5180917Selowe 5181917Selowe if (rootpp->p_szc != szc) { 5182917Selowe if (pp != rootpp) 5183917Selowe page_unlock(rootpp); 5184917Selowe return (0); 5185917Selowe } 5186917Selowe 5187917Selowe page_demote_free_pages(rootpp); 5188917Selowe 5189917Selowe if (pp != rootpp) 5190917Selowe page_unlock(rootpp); 5191917Selowe 5192917Selowe ASSERT(PP_ISFREE(pp)); 5193917Selowe ASSERT(PAGE_EXCL(pp)); 5194917Selowe return (1); 5195917Selowe } 5196917Selowe 5197917Selowe /* 5198917Selowe * Given a constituent page, try to demote the large page. 5199917Selowe * 5200917Selowe * Returns nonzero if the page could be demoted successfully. Returns with 5201917Selowe * the constituent page still locked. 5202917Selowe */ 52030Sstevel@tonic-gate int 52040Sstevel@tonic-gate page_try_demote_pages(page_t *pp) 52050Sstevel@tonic-gate { 52060Sstevel@tonic-gate page_t *tpp, *rootpp = pp; 52070Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 52080Sstevel@tonic-gate spgcnt_t i, npgs; 52090Sstevel@tonic-gate uint_t szc = pp->p_szc; 52100Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 52110Sstevel@tonic-gate 5212917Selowe ASSERT(PAGE_EXCL(pp)); 52130Sstevel@tonic-gate 52140Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 52150Sstevel@tonic-gate 5216917Selowe if (pp->p_szc == 0) { 52170Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 52180Sstevel@tonic-gate return (1); 52190Sstevel@tonic-gate } 52200Sstevel@tonic-gate 52210Sstevel@tonic-gate if (vp != NULL && !IS_SWAPFSVP(vp) && vp != &kvp) { 52220Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5223917Selowe page_demote_vp_pages(pp); 52240Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 52250Sstevel@tonic-gate return (1); 52260Sstevel@tonic-gate } 52270Sstevel@tonic-gate 52280Sstevel@tonic-gate /* 5229917Selowe * Adjust rootpp if passed in is not the base 52300Sstevel@tonic-gate * constituent page. 52310Sstevel@tonic-gate */ 5232917Selowe npgs = page_get_pagecnt(pp->p_szc); 52330Sstevel@tonic-gate ASSERT(npgs > 1); 52340Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, npgs)) { 52350Sstevel@tonic-gate pfn = P2ALIGN(pfn, npgs); 52360Sstevel@tonic-gate rootpp = page_numtopp_nolock(pfn); 52370Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 52380Sstevel@tonic-gate ASSERT(rootpp->p_vnode != NULL); 52390Sstevel@tonic-gate ASSERT(rootpp->p_szc == szc); 52400Sstevel@tonic-gate } 52410Sstevel@tonic-gate 52420Sstevel@tonic-gate /* 52430Sstevel@tonic-gate * We can't demote kernel pages since we can't hat_unload() 52440Sstevel@tonic-gate * the mappings. 52450Sstevel@tonic-gate */ 52460Sstevel@tonic-gate if (rootpp->p_vnode == &kvp) 52470Sstevel@tonic-gate return (0); 52480Sstevel@tonic-gate 52490Sstevel@tonic-gate /* 52500Sstevel@tonic-gate * Attempt to lock all constituent pages except the page passed 52510Sstevel@tonic-gate * in since it's already locked. 52520Sstevel@tonic-gate */ 5253414Skchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52540Sstevel@tonic-gate ASSERT(!PP_ISFREE(tpp)); 52550Sstevel@tonic-gate ASSERT(tpp->p_vnode != NULL); 52560Sstevel@tonic-gate 52570Sstevel@tonic-gate if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 52580Sstevel@tonic-gate break; 52590Sstevel@tonic-gate ASSERT(tpp->p_szc == rootpp->p_szc); 52600Sstevel@tonic-gate ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 52610Sstevel@tonic-gate } 52620Sstevel@tonic-gate 52630Sstevel@tonic-gate /* 5264917Selowe * If we failed to lock them all then unlock what we have 5265917Selowe * locked so far and bail. 52660Sstevel@tonic-gate */ 52670Sstevel@tonic-gate if (i < npgs) { 52680Sstevel@tonic-gate tpp = rootpp; 52690Sstevel@tonic-gate while (i-- > 0) { 52700Sstevel@tonic-gate if (tpp != pp) 52710Sstevel@tonic-gate page_unlock(tpp); 5272414Skchow tpp++; 52730Sstevel@tonic-gate } 52740Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 52750Sstevel@tonic-gate return (0); 52760Sstevel@tonic-gate } 52770Sstevel@tonic-gate 5278414Skchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52790Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 52802414Saguzovsk ASSERT(tpp->p_slckcnt == 0); 5281917Selowe (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 52820Sstevel@tonic-gate tpp->p_szc = 0; 52830Sstevel@tonic-gate } 52840Sstevel@tonic-gate 52850Sstevel@tonic-gate /* 52860Sstevel@tonic-gate * Unlock all pages except the page passed in. 52870Sstevel@tonic-gate */ 5288414Skchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52890Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 52900Sstevel@tonic-gate if (tpp != pp) 52910Sstevel@tonic-gate page_unlock(tpp); 52920Sstevel@tonic-gate } 5293917Selowe 52940Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 52950Sstevel@tonic-gate return (1); 52960Sstevel@tonic-gate } 52970Sstevel@tonic-gate 52980Sstevel@tonic-gate /* 52990Sstevel@tonic-gate * Called by page_free() and page_destroy() to demote the page size code 53000Sstevel@tonic-gate * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 53010Sstevel@tonic-gate * p_szc on free list, neither can we just clear p_szc of a single page_t 53020Sstevel@tonic-gate * within a large page since it will break other code that relies on p_szc 53030Sstevel@tonic-gate * being the same for all page_t's of a large page). Anonymous pages should 53040Sstevel@tonic-gate * never end up here because anon_map_getpages() cannot deal with p_szc 53050Sstevel@tonic-gate * changes after a single constituent page is locked. While anonymous or 53060Sstevel@tonic-gate * kernel large pages are demoted or freed the entire large page at a time 53070Sstevel@tonic-gate * with all constituent pages locked EXCL for the file system pages we 53080Sstevel@tonic-gate * have to be able to demote a large page (i.e. decrease all constituent pages 53090Sstevel@tonic-gate * p_szc) with only just an EXCL lock on one of constituent pages. The reason 53100Sstevel@tonic-gate * we can easily deal with anonymous page demotion the entire large page at a 53110Sstevel@tonic-gate * time is that those operation originate at address space level and concern 53120Sstevel@tonic-gate * the entire large page region with actual demotion only done when pages are 53130Sstevel@tonic-gate * not shared with any other processes (therefore we can always get EXCL lock 53140Sstevel@tonic-gate * on all anonymous constituent pages after clearing segment page 53150Sstevel@tonic-gate * cache). However file system pages can be truncated or invalidated at a 53160Sstevel@tonic-gate * PAGESIZE level from the file system side and end up in page_free() or 53170Sstevel@tonic-gate * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 53180Sstevel@tonic-gate * and therfore pageout should be able to demote a large page by EXCL locking 53190Sstevel@tonic-gate * any constituent page that is not under SOFTLOCK). In those cases we cannot 53200Sstevel@tonic-gate * rely on being able to lock EXCL all constituent pages. 53210Sstevel@tonic-gate * 53220Sstevel@tonic-gate * To prevent szc changes on file system pages one has to lock all constituent 53230Sstevel@tonic-gate * pages at least SHARED (or call page_szc_lock()). The only subsystem that 53240Sstevel@tonic-gate * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 53250Sstevel@tonic-gate * prevent szc changes is hat layer that uses its own page level mlist 53260Sstevel@tonic-gate * locks. hat assumes that szc doesn't change after mlist lock for a page is 53270Sstevel@tonic-gate * taken. Therefore we need to change szc under hat level locks if we only 53280Sstevel@tonic-gate * have an EXCL lock on a single constituent page and hat still references any 53290Sstevel@tonic-gate * of constituent pages. (Note we can't "ignore" hat layer by simply 53300Sstevel@tonic-gate * hat_pageunload() all constituent pages without having EXCL locks on all of 53310Sstevel@tonic-gate * constituent pages). We use hat_page_demote() call to safely demote szc of 53320Sstevel@tonic-gate * all constituent pages under hat locks when we only have an EXCL lock on one 53330Sstevel@tonic-gate * of constituent pages. 53340Sstevel@tonic-gate * 53350Sstevel@tonic-gate * This routine calls page_szc_lock() before calling hat_page_demote() to 53360Sstevel@tonic-gate * allow segvn in one special case not to lock all constituent pages SHARED 53370Sstevel@tonic-gate * before calling hat_memload_array() that relies on p_szc not changeing even 53380Sstevel@tonic-gate * before hat level mlist lock is taken. In that case segvn uses 53390Sstevel@tonic-gate * page_szc_lock() to prevent hat_page_demote() changeing p_szc values. 53400Sstevel@tonic-gate * 53410Sstevel@tonic-gate * Anonymous or kernel page demotion still has to lock all pages exclusively 53420Sstevel@tonic-gate * and do hat_pageunload() on all constituent pages before demoting the page 53430Sstevel@tonic-gate * therefore there's no need for anonymous or kernel page demotion to use 53440Sstevel@tonic-gate * hat_page_demote() mechanism. 53450Sstevel@tonic-gate * 53460Sstevel@tonic-gate * hat_page_demote() removes all large mappings that map pp and then decreases 53470Sstevel@tonic-gate * p_szc starting from the last constituent page of the large page. By working 53480Sstevel@tonic-gate * from the tail of a large page in pfn decreasing order allows one looking at 53490Sstevel@tonic-gate * the root page to know that hat_page_demote() is done for root's szc area. 53500Sstevel@tonic-gate * e.g. if a root page has szc 1 one knows it only has to lock all constituent 53510Sstevel@tonic-gate * pages within szc 1 area to prevent szc changes because hat_page_demote() 53520Sstevel@tonic-gate * that started on this page when it had szc > 1 is done for this szc 1 area. 53530Sstevel@tonic-gate * 53540Sstevel@tonic-gate * We are guranteed that all constituent pages of pp's large page belong to 53550Sstevel@tonic-gate * the same vnode with the consecutive offsets increasing in the direction of 53560Sstevel@tonic-gate * the pfn i.e. the identity of constituent pages can't change until their 53570Sstevel@tonic-gate * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 53580Sstevel@tonic-gate * large mappings to pp even though we don't lock any constituent page except 53590Sstevel@tonic-gate * pp (i.e. we won't unload e.g. kernel locked page). 53600Sstevel@tonic-gate */ 53610Sstevel@tonic-gate static void 53620Sstevel@tonic-gate page_demote_vp_pages(page_t *pp) 53630Sstevel@tonic-gate { 53640Sstevel@tonic-gate kmutex_t *mtx; 53650Sstevel@tonic-gate 53660Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 53670Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 53680Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 53690Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 53700Sstevel@tonic-gate ASSERT(pp->p_vnode != &kvp); 53710Sstevel@tonic-gate 53720Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 53730Sstevel@tonic-gate 53740Sstevel@tonic-gate mtx = page_szc_lock(pp); 53750Sstevel@tonic-gate if (mtx != NULL) { 53760Sstevel@tonic-gate hat_page_demote(pp); 53770Sstevel@tonic-gate mutex_exit(mtx); 53780Sstevel@tonic-gate } 53790Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 53800Sstevel@tonic-gate } 53810Sstevel@tonic-gate 53820Sstevel@tonic-gate /* 53830Sstevel@tonic-gate * Mark any existing pages for migration in the given range 53840Sstevel@tonic-gate */ 53850Sstevel@tonic-gate void 53860Sstevel@tonic-gate page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 53870Sstevel@tonic-gate struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 53880Sstevel@tonic-gate u_offset_t vnoff, int rflag) 53890Sstevel@tonic-gate { 53900Sstevel@tonic-gate struct anon *ap; 53910Sstevel@tonic-gate vnode_t *curvp; 53920Sstevel@tonic-gate lgrp_t *from; 53930Sstevel@tonic-gate pgcnt_t i; 53940Sstevel@tonic-gate pgcnt_t nlocked; 53950Sstevel@tonic-gate u_offset_t off; 53960Sstevel@tonic-gate pfn_t pfn; 53970Sstevel@tonic-gate size_t pgsz; 53980Sstevel@tonic-gate size_t segpgsz; 53990Sstevel@tonic-gate pgcnt_t pages; 54000Sstevel@tonic-gate uint_t pszc; 54010Sstevel@tonic-gate page_t **ppa; 54020Sstevel@tonic-gate pgcnt_t ppa_nentries; 54030Sstevel@tonic-gate page_t *pp; 54040Sstevel@tonic-gate caddr_t va; 54050Sstevel@tonic-gate ulong_t an_idx; 54060Sstevel@tonic-gate anon_sync_obj_t cookie; 54070Sstevel@tonic-gate 54080Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 54090Sstevel@tonic-gate 54100Sstevel@tonic-gate /* 54110Sstevel@tonic-gate * Don't do anything if don't need to do lgroup optimizations 54120Sstevel@tonic-gate * on this system 54130Sstevel@tonic-gate */ 54140Sstevel@tonic-gate if (!lgrp_optimizations()) 54150Sstevel@tonic-gate return; 54160Sstevel@tonic-gate 54170Sstevel@tonic-gate /* 54180Sstevel@tonic-gate * Align address and length to (potentially large) page boundary 54190Sstevel@tonic-gate */ 54200Sstevel@tonic-gate segpgsz = page_get_pagesize(seg->s_szc); 54210Sstevel@tonic-gate addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 54220Sstevel@tonic-gate if (rflag) 54230Sstevel@tonic-gate len = P2ROUNDUP(len, segpgsz); 54240Sstevel@tonic-gate 54250Sstevel@tonic-gate /* 54260Sstevel@tonic-gate * Allocate page array to accomodate largest page size 54270Sstevel@tonic-gate */ 54280Sstevel@tonic-gate pgsz = page_get_pagesize(page_num_pagesizes() - 1); 54290Sstevel@tonic-gate ppa_nentries = btop(pgsz); 54300Sstevel@tonic-gate ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 54310Sstevel@tonic-gate 54320Sstevel@tonic-gate /* 54330Sstevel@tonic-gate * Do one (large) page at a time 54340Sstevel@tonic-gate */ 54350Sstevel@tonic-gate va = addr; 54360Sstevel@tonic-gate while (va < addr + len) { 54370Sstevel@tonic-gate /* 54380Sstevel@tonic-gate * Lookup (root) page for vnode and offset corresponding to 54390Sstevel@tonic-gate * this virtual address 54400Sstevel@tonic-gate * Try anonmap first since there may be copy-on-write 54410Sstevel@tonic-gate * pages, but initialize vnode pointer and offset using 54420Sstevel@tonic-gate * vnode arguments just in case there isn't an amp. 54430Sstevel@tonic-gate */ 54440Sstevel@tonic-gate curvp = vp; 54450Sstevel@tonic-gate off = vnoff + va - seg->s_base; 54460Sstevel@tonic-gate if (amp) { 54470Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 54480Sstevel@tonic-gate an_idx = anon_index + seg_page(seg, va); 54490Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie); 54500Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 54510Sstevel@tonic-gate if (ap) 54520Sstevel@tonic-gate swap_xlate(ap, &curvp, &off); 54530Sstevel@tonic-gate anon_array_exit(&cookie); 54540Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 54550Sstevel@tonic-gate } 54560Sstevel@tonic-gate 54570Sstevel@tonic-gate pp = NULL; 54580Sstevel@tonic-gate if (curvp) 54590Sstevel@tonic-gate pp = page_lookup(curvp, off, SE_SHARED); 54600Sstevel@tonic-gate 54610Sstevel@tonic-gate /* 54620Sstevel@tonic-gate * If there isn't a page at this virtual address, 54630Sstevel@tonic-gate * skip to next page 54640Sstevel@tonic-gate */ 54650Sstevel@tonic-gate if (pp == NULL) { 54660Sstevel@tonic-gate va += PAGESIZE; 54670Sstevel@tonic-gate continue; 54680Sstevel@tonic-gate } 54690Sstevel@tonic-gate 54700Sstevel@tonic-gate /* 54710Sstevel@tonic-gate * Figure out which lgroup this page is in for kstats 54720Sstevel@tonic-gate */ 54730Sstevel@tonic-gate pfn = page_pptonum(pp); 54740Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn); 54750Sstevel@tonic-gate 54760Sstevel@tonic-gate /* 54770Sstevel@tonic-gate * Get page size, and round up and skip to next page boundary 54780Sstevel@tonic-gate * if unaligned address 54790Sstevel@tonic-gate */ 54800Sstevel@tonic-gate pszc = pp->p_szc; 54810Sstevel@tonic-gate pgsz = page_get_pagesize(pszc); 54820Sstevel@tonic-gate pages = btop(pgsz); 54830Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsz) || 54840Sstevel@tonic-gate !IS_P2ALIGNED(pfn, pages) || 54850Sstevel@tonic-gate pgsz > segpgsz) { 54860Sstevel@tonic-gate pgsz = MIN(pgsz, segpgsz); 54870Sstevel@tonic-gate page_unlock(pp); 54880Sstevel@tonic-gate i = btop(P2END((uintptr_t)va, pgsz) - 54890Sstevel@tonic-gate (uintptr_t)va); 54900Sstevel@tonic-gate va = (caddr_t)P2END((uintptr_t)va, pgsz); 54910Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 54920Sstevel@tonic-gate continue; 54930Sstevel@tonic-gate } 54940Sstevel@tonic-gate 54950Sstevel@tonic-gate /* 54960Sstevel@tonic-gate * Upgrade to exclusive lock on page 54970Sstevel@tonic-gate */ 54980Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 54990Sstevel@tonic-gate page_unlock(pp); 55000Sstevel@tonic-gate va += pgsz; 55010Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 55020Sstevel@tonic-gate btop(pgsz)); 55030Sstevel@tonic-gate continue; 55040Sstevel@tonic-gate } 55050Sstevel@tonic-gate 55060Sstevel@tonic-gate /* 55070Sstevel@tonic-gate * Remember pages locked exclusively and how many 55080Sstevel@tonic-gate */ 55090Sstevel@tonic-gate ppa[0] = pp; 55100Sstevel@tonic-gate nlocked = 1; 55110Sstevel@tonic-gate 55120Sstevel@tonic-gate /* 55130Sstevel@tonic-gate * Lock constituent pages if this is large page 55140Sstevel@tonic-gate */ 55150Sstevel@tonic-gate if (pages > 1) { 55160Sstevel@tonic-gate /* 55170Sstevel@tonic-gate * Lock all constituents except root page, since it 55180Sstevel@tonic-gate * should be locked already. 55190Sstevel@tonic-gate */ 55200Sstevel@tonic-gate for (i = 1; i < pages; i++) { 5521414Skchow pp++; 55220Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 55230Sstevel@tonic-gate break; 55240Sstevel@tonic-gate } 55250Sstevel@tonic-gate if (PP_ISFREE(pp) || 55260Sstevel@tonic-gate pp->p_szc != pszc) { 55270Sstevel@tonic-gate /* 55280Sstevel@tonic-gate * hat_page_demote() raced in with us. 55290Sstevel@tonic-gate */ 55300Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(curvp)); 55310Sstevel@tonic-gate page_unlock(pp); 55320Sstevel@tonic-gate break; 55330Sstevel@tonic-gate } 55340Sstevel@tonic-gate ppa[nlocked] = pp; 55350Sstevel@tonic-gate nlocked++; 55360Sstevel@tonic-gate } 55370Sstevel@tonic-gate } 55380Sstevel@tonic-gate 55390Sstevel@tonic-gate /* 55400Sstevel@tonic-gate * If all constituent pages couldn't be locked, 55410Sstevel@tonic-gate * unlock pages locked so far and skip to next page. 55420Sstevel@tonic-gate */ 55430Sstevel@tonic-gate if (nlocked != pages) { 55440Sstevel@tonic-gate for (i = 0; i < nlocked; i++) 55450Sstevel@tonic-gate page_unlock(ppa[i]); 55460Sstevel@tonic-gate va += pgsz; 55470Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 55480Sstevel@tonic-gate btop(pgsz)); 55490Sstevel@tonic-gate continue; 55500Sstevel@tonic-gate } 55510Sstevel@tonic-gate 55520Sstevel@tonic-gate /* 55530Sstevel@tonic-gate * hat_page_demote() can no longer happen 55540Sstevel@tonic-gate * since last cons page had the right p_szc after 55550Sstevel@tonic-gate * all cons pages were locked. all cons pages 55560Sstevel@tonic-gate * should now have the same p_szc. 55570Sstevel@tonic-gate */ 55580Sstevel@tonic-gate 55590Sstevel@tonic-gate /* 55600Sstevel@tonic-gate * All constituent pages locked successfully, so mark 55610Sstevel@tonic-gate * large page for migration and unload the mappings of 55620Sstevel@tonic-gate * constituent pages, so a fault will occur on any part of the 55630Sstevel@tonic-gate * large page 55640Sstevel@tonic-gate */ 55650Sstevel@tonic-gate PP_SETMIGRATE(ppa[0]); 55660Sstevel@tonic-gate for (i = 0; i < nlocked; i++) { 55670Sstevel@tonic-gate pp = ppa[i]; 55680Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 55690Sstevel@tonic-gate ASSERT(hat_page_getshare(pp) == 0); 55700Sstevel@tonic-gate page_unlock(pp); 55710Sstevel@tonic-gate } 55720Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 55730Sstevel@tonic-gate 55740Sstevel@tonic-gate va += pgsz; 55750Sstevel@tonic-gate } 55760Sstevel@tonic-gate kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 55770Sstevel@tonic-gate } 55780Sstevel@tonic-gate 55790Sstevel@tonic-gate /* 55800Sstevel@tonic-gate * Migrate any pages that have been marked for migration in the given range 55810Sstevel@tonic-gate */ 55820Sstevel@tonic-gate void 55830Sstevel@tonic-gate page_migrate( 55840Sstevel@tonic-gate struct seg *seg, 55850Sstevel@tonic-gate caddr_t addr, 55860Sstevel@tonic-gate page_t **ppa, 55870Sstevel@tonic-gate pgcnt_t npages) 55880Sstevel@tonic-gate { 55890Sstevel@tonic-gate lgrp_t *from; 55900Sstevel@tonic-gate lgrp_t *to; 55910Sstevel@tonic-gate page_t *newpp; 55920Sstevel@tonic-gate page_t *pp; 55930Sstevel@tonic-gate pfn_t pfn; 55940Sstevel@tonic-gate size_t pgsz; 55950Sstevel@tonic-gate spgcnt_t page_cnt; 55960Sstevel@tonic-gate spgcnt_t i; 55970Sstevel@tonic-gate uint_t pszc; 55980Sstevel@tonic-gate 55990Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 56000Sstevel@tonic-gate 56010Sstevel@tonic-gate while (npages > 0) { 56020Sstevel@tonic-gate pp = *ppa; 56030Sstevel@tonic-gate pszc = pp->p_szc; 56040Sstevel@tonic-gate pgsz = page_get_pagesize(pszc); 56050Sstevel@tonic-gate page_cnt = btop(pgsz); 56060Sstevel@tonic-gate 56070Sstevel@tonic-gate /* 56080Sstevel@tonic-gate * Check to see whether this page is marked for migration 56090Sstevel@tonic-gate * 56100Sstevel@tonic-gate * Assume that root page of large page is marked for 56110Sstevel@tonic-gate * migration and none of the other constituent pages 56120Sstevel@tonic-gate * are marked. This really simplifies clearing the 56130Sstevel@tonic-gate * migrate bit by not having to clear it from each 56140Sstevel@tonic-gate * constituent page. 56150Sstevel@tonic-gate * 56160Sstevel@tonic-gate * note we don't want to relocate an entire large page if 56170Sstevel@tonic-gate * someone is only using one subpage. 56180Sstevel@tonic-gate */ 56190Sstevel@tonic-gate if (npages < page_cnt) 56200Sstevel@tonic-gate break; 56210Sstevel@tonic-gate 56220Sstevel@tonic-gate /* 56230Sstevel@tonic-gate * Is it marked for migration? 56240Sstevel@tonic-gate */ 56250Sstevel@tonic-gate if (!PP_ISMIGRATE(pp)) 56260Sstevel@tonic-gate goto next; 56270Sstevel@tonic-gate 56280Sstevel@tonic-gate /* 56290Sstevel@tonic-gate * Determine lgroups that page is being migrated between 56300Sstevel@tonic-gate */ 56310Sstevel@tonic-gate pfn = page_pptonum(pp); 56320Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, page_cnt)) { 56330Sstevel@tonic-gate break; 56340Sstevel@tonic-gate } 56350Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn); 56360Sstevel@tonic-gate to = lgrp_mem_choose(seg, addr, pgsz); 56370Sstevel@tonic-gate 56380Sstevel@tonic-gate /* 56390Sstevel@tonic-gate * Check to see whether we are trying to migrate page to lgroup 56400Sstevel@tonic-gate * where it is allocated already 56410Sstevel@tonic-gate */ 56420Sstevel@tonic-gate if (to == from) { 56430Sstevel@tonic-gate PP_CLRMIGRATE(pp); 56440Sstevel@tonic-gate goto next; 56450Sstevel@tonic-gate } 56460Sstevel@tonic-gate 56470Sstevel@tonic-gate /* 56480Sstevel@tonic-gate * Need to get exclusive lock's to migrate 56490Sstevel@tonic-gate */ 56500Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) { 56510Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[i])); 56520Sstevel@tonic-gate if (page_pptonum(ppa[i]) != pfn + i || 56530Sstevel@tonic-gate ppa[i]->p_szc != pszc) { 56540Sstevel@tonic-gate break; 56550Sstevel@tonic-gate } 56560Sstevel@tonic-gate if (!page_tryupgrade(ppa[i])) { 56570Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, 56580Sstevel@tonic-gate LGRP_PM_FAIL_LOCK_PGS, 56590Sstevel@tonic-gate page_cnt); 56600Sstevel@tonic-gate break; 56610Sstevel@tonic-gate } 56620Sstevel@tonic-gate } 56630Sstevel@tonic-gate if (i != page_cnt) { 56640Sstevel@tonic-gate while (--i != -1) { 56650Sstevel@tonic-gate page_downgrade(ppa[i]); 56660Sstevel@tonic-gate } 56670Sstevel@tonic-gate goto next; 56680Sstevel@tonic-gate } 56690Sstevel@tonic-gate 56700Sstevel@tonic-gate (void) page_create_wait(page_cnt, PG_WAIT); 56710Sstevel@tonic-gate newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 56720Sstevel@tonic-gate if (newpp == NULL) { 56730Sstevel@tonic-gate page_create_putback(page_cnt); 56740Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) { 56750Sstevel@tonic-gate page_downgrade(ppa[i]); 56760Sstevel@tonic-gate } 56770Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 56780Sstevel@tonic-gate page_cnt); 56790Sstevel@tonic-gate goto next; 56800Sstevel@tonic-gate } 56810Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc); 56820Sstevel@tonic-gate /* 56830Sstevel@tonic-gate * Clear migrate bit and relocate page 56840Sstevel@tonic-gate */ 56850Sstevel@tonic-gate PP_CLRMIGRATE(pp); 56860Sstevel@tonic-gate if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 56870Sstevel@tonic-gate panic("page_migrate: page_relocate failed"); 56880Sstevel@tonic-gate } 56890Sstevel@tonic-gate ASSERT(page_cnt * PAGESIZE == pgsz); 56900Sstevel@tonic-gate 56910Sstevel@tonic-gate /* 56920Sstevel@tonic-gate * Keep stats for number of pages migrated from and to 56930Sstevel@tonic-gate * each lgroup 56940Sstevel@tonic-gate */ 56950Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 56960Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 56970Sstevel@tonic-gate /* 56980Sstevel@tonic-gate * update the page_t array we were passed in and 56990Sstevel@tonic-gate * unlink constituent pages of a large page. 57000Sstevel@tonic-gate */ 57010Sstevel@tonic-gate for (i = 0; i < page_cnt; ++i, ++pp) { 57020Sstevel@tonic-gate ASSERT(PAGE_EXCL(newpp)); 57030Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc); 57040Sstevel@tonic-gate ppa[i] = newpp; 57050Sstevel@tonic-gate pp = newpp; 57060Sstevel@tonic-gate page_sub(&newpp, pp); 57070Sstevel@tonic-gate page_downgrade(pp); 57080Sstevel@tonic-gate } 57090Sstevel@tonic-gate ASSERT(newpp == NULL); 57100Sstevel@tonic-gate next: 57110Sstevel@tonic-gate addr += pgsz; 57120Sstevel@tonic-gate ppa += page_cnt; 57130Sstevel@tonic-gate npages -= page_cnt; 57140Sstevel@tonic-gate } 57150Sstevel@tonic-gate } 57160Sstevel@tonic-gate 57170Sstevel@tonic-gate ulong_t mem_waiters = 0; 57180Sstevel@tonic-gate ulong_t max_count = 20; 57190Sstevel@tonic-gate #define MAX_DELAY 0x1ff 57200Sstevel@tonic-gate 57210Sstevel@tonic-gate /* 57220Sstevel@tonic-gate * Check if enough memory is available to proceed. 57230Sstevel@tonic-gate * Depending on system configuration and how much memory is 57240Sstevel@tonic-gate * reserved for swap we need to check against two variables. 57250Sstevel@tonic-gate * e.g. on systems with little physical swap availrmem can be 57260Sstevel@tonic-gate * more reliable indicator of how much memory is available. 57270Sstevel@tonic-gate * On systems with large phys swap freemem can be better indicator. 57280Sstevel@tonic-gate * If freemem drops below threshold level don't return an error 57290Sstevel@tonic-gate * immediately but wake up pageout to free memory and block. 57300Sstevel@tonic-gate * This is done number of times. If pageout is not able to free 57310Sstevel@tonic-gate * memory within certain time return an error. 57320Sstevel@tonic-gate * The same applies for availrmem but kmem_reap is used to 57330Sstevel@tonic-gate * free memory. 57340Sstevel@tonic-gate */ 57350Sstevel@tonic-gate int 57360Sstevel@tonic-gate page_mem_avail(pgcnt_t npages) 57370Sstevel@tonic-gate { 57380Sstevel@tonic-gate ulong_t count; 57390Sstevel@tonic-gate 57400Sstevel@tonic-gate #if defined(__i386) 57410Sstevel@tonic-gate if (freemem > desfree + npages && 57420Sstevel@tonic-gate availrmem > swapfs_reserve + npages && 57430Sstevel@tonic-gate btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 57440Sstevel@tonic-gate npages) 57450Sstevel@tonic-gate return (1); 57460Sstevel@tonic-gate #else 57470Sstevel@tonic-gate if (freemem > desfree + npages && 57480Sstevel@tonic-gate availrmem > swapfs_reserve + npages) 57490Sstevel@tonic-gate return (1); 57500Sstevel@tonic-gate #endif 57510Sstevel@tonic-gate 57520Sstevel@tonic-gate count = max_count; 57530Sstevel@tonic-gate atomic_add_long(&mem_waiters, 1); 57540Sstevel@tonic-gate 57550Sstevel@tonic-gate while (freemem < desfree + npages && --count) { 57560Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 57570Sstevel@tonic-gate if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 57580Sstevel@tonic-gate atomic_add_long(&mem_waiters, -1); 57590Sstevel@tonic-gate return (0); 57600Sstevel@tonic-gate } 57610Sstevel@tonic-gate } 57620Sstevel@tonic-gate if (count == 0) { 57630Sstevel@tonic-gate atomic_add_long(&mem_waiters, -1); 57640Sstevel@tonic-gate return (0); 57650Sstevel@tonic-gate } 57660Sstevel@tonic-gate 57670Sstevel@tonic-gate count = max_count; 57680Sstevel@tonic-gate while (availrmem < swapfs_reserve + npages && --count) { 57690Sstevel@tonic-gate kmem_reap(); 57700Sstevel@tonic-gate if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 57710Sstevel@tonic-gate atomic_add_long(&mem_waiters, -1); 57720Sstevel@tonic-gate return (0); 57730Sstevel@tonic-gate } 57740Sstevel@tonic-gate } 57750Sstevel@tonic-gate atomic_add_long(&mem_waiters, -1); 57760Sstevel@tonic-gate if (count == 0) 57770Sstevel@tonic-gate return (0); 57780Sstevel@tonic-gate 57790Sstevel@tonic-gate #if defined(__i386) 57800Sstevel@tonic-gate if (btop(vmem_size(heap_arena, VMEM_FREE)) < 57810Sstevel@tonic-gate tune.t_minarmem + npages) 57820Sstevel@tonic-gate return (0); 57830Sstevel@tonic-gate #endif 57840Sstevel@tonic-gate return (1); 57850Sstevel@tonic-gate } 57860Sstevel@tonic-gate 57872048Sstans #define MAX_CNT 60 /* max num of iterations */ 57882048Sstans /* 57892048Sstans * Reclaim/reserve availrmem for npages. 57902048Sstans * If there is not enough memory start reaping seg, kmem caches. 57912048Sstans * Start pageout scanner (via page_needfree()). 57922048Sstans * Exit after ~ MAX_CNT s regardless of how much memory has been released. 57932048Sstans * Note: There is no guarantee that any availrmem will be freed as 57942048Sstans * this memory typically is locked (kernel heap) or reserved for swap. 57952048Sstans * Also due to memory fragmentation kmem allocator may not be able 57962048Sstans * to free any memory (single user allocated buffer will prevent 57972048Sstans * freeing slab or a page). 57982048Sstans */ 57992048Sstans int 58002048Sstans page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 58012048Sstans { 58022048Sstans int i = 0; 58032048Sstans int ret = 0; 58042048Sstans pgcnt_t deficit; 58052048Sstans pgcnt_t old_availrmem; 58062048Sstans 58072048Sstans mutex_enter(&freemem_lock); 58082048Sstans old_availrmem = availrmem - 1; 58092048Sstans while ((availrmem < tune.t_minarmem + npages + epages) && 58102048Sstans (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 58112048Sstans old_availrmem = availrmem; 58122048Sstans deficit = tune.t_minarmem + npages + epages - availrmem; 58132048Sstans mutex_exit(&freemem_lock); 58142048Sstans page_needfree(deficit); 58152048Sstans seg_preap(); 58162048Sstans kmem_reap(); 58172048Sstans delay(hz); 58182048Sstans page_needfree(-(spgcnt_t)deficit); 58192048Sstans mutex_enter(&freemem_lock); 58202048Sstans } 58212048Sstans 58222048Sstans if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 58232048Sstans availrmem -= npages; 58242048Sstans ret = 1; 58252048Sstans } 58262048Sstans 58272048Sstans mutex_exit(&freemem_lock); 58282048Sstans 58292048Sstans return (ret); 58302048Sstans } 58310Sstevel@tonic-gate 58320Sstevel@tonic-gate /* 58330Sstevel@tonic-gate * Search the memory segments to locate the desired page. Within a 58340Sstevel@tonic-gate * segment, pages increase linearly with one page structure per 58350Sstevel@tonic-gate * physical page frame (size PAGESIZE). The search begins 58360Sstevel@tonic-gate * with the segment that was accessed last, to take advantage of locality. 58370Sstevel@tonic-gate * If the hint misses, we start from the beginning of the sorted memseg list 58380Sstevel@tonic-gate */ 58390Sstevel@tonic-gate 58400Sstevel@tonic-gate 58410Sstevel@tonic-gate /* 58420Sstevel@tonic-gate * Some data structures for pfn to pp lookup. 58430Sstevel@tonic-gate */ 58440Sstevel@tonic-gate ulong_t mhash_per_slot; 58450Sstevel@tonic-gate struct memseg *memseg_hash[N_MEM_SLOTS]; 58460Sstevel@tonic-gate 58470Sstevel@tonic-gate page_t * 58480Sstevel@tonic-gate page_numtopp_nolock(pfn_t pfnum) 58490Sstevel@tonic-gate { 58500Sstevel@tonic-gate struct memseg *seg; 58510Sstevel@tonic-gate page_t *pp; 5852414Skchow vm_cpu_data_t *vc = CPU->cpu_vm_data; 5853414Skchow 5854414Skchow ASSERT(vc != NULL); 58550Sstevel@tonic-gate 58560Sstevel@tonic-gate MEMSEG_STAT_INCR(nsearch); 58570Sstevel@tonic-gate 58580Sstevel@tonic-gate /* Try last winner first */ 5859414Skchow if (((seg = vc->vc_pnum_memseg) != NULL) && 58600Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 58610Sstevel@tonic-gate MEMSEG_STAT_INCR(nlastwon); 58620Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 58630Sstevel@tonic-gate if (pp->p_pagenum == pfnum) 58640Sstevel@tonic-gate return ((page_t *)pp); 58650Sstevel@tonic-gate } 58660Sstevel@tonic-gate 58670Sstevel@tonic-gate /* Else Try hash */ 58680Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 58690Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 58700Sstevel@tonic-gate MEMSEG_STAT_INCR(nhashwon); 5871414Skchow vc->vc_pnum_memseg = seg; 58720Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 58730Sstevel@tonic-gate if (pp->p_pagenum == pfnum) 58740Sstevel@tonic-gate return ((page_t *)pp); 58750Sstevel@tonic-gate } 58760Sstevel@tonic-gate 58770Sstevel@tonic-gate /* Else Brute force */ 58780Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) { 58790Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5880414Skchow vc->vc_pnum_memseg = seg; 58810Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 58820Sstevel@tonic-gate return ((page_t *)pp); 58830Sstevel@tonic-gate } 58840Sstevel@tonic-gate } 5885414Skchow vc->vc_pnum_memseg = NULL; 58860Sstevel@tonic-gate MEMSEG_STAT_INCR(nnotfound); 58870Sstevel@tonic-gate return ((page_t *)NULL); 58880Sstevel@tonic-gate 58890Sstevel@tonic-gate } 58900Sstevel@tonic-gate 58910Sstevel@tonic-gate struct memseg * 58920Sstevel@tonic-gate page_numtomemseg_nolock(pfn_t pfnum) 58930Sstevel@tonic-gate { 58940Sstevel@tonic-gate struct memseg *seg; 58950Sstevel@tonic-gate page_t *pp; 58960Sstevel@tonic-gate 58970Sstevel@tonic-gate /* Try hash */ 58980Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 58990Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 59000Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 59010Sstevel@tonic-gate if (pp->p_pagenum == pfnum) 59020Sstevel@tonic-gate return (seg); 59030Sstevel@tonic-gate } 59040Sstevel@tonic-gate 59050Sstevel@tonic-gate /* Else Brute force */ 59060Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) { 59070Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 59080Sstevel@tonic-gate return (seg); 59090Sstevel@tonic-gate } 59100Sstevel@tonic-gate } 59110Sstevel@tonic-gate return ((struct memseg *)NULL); 59120Sstevel@tonic-gate } 59130Sstevel@tonic-gate 59140Sstevel@tonic-gate /* 59150Sstevel@tonic-gate * Given a page and a count return the page struct that is 59160Sstevel@tonic-gate * n structs away from the current one in the global page 59170Sstevel@tonic-gate * list. 59180Sstevel@tonic-gate * 59190Sstevel@tonic-gate * This function wraps to the first page upon 59200Sstevel@tonic-gate * reaching the end of the memseg list. 59210Sstevel@tonic-gate */ 59220Sstevel@tonic-gate page_t * 59230Sstevel@tonic-gate page_nextn(page_t *pp, ulong_t n) 59240Sstevel@tonic-gate { 59250Sstevel@tonic-gate struct memseg *seg; 59260Sstevel@tonic-gate page_t *ppn; 5927414Skchow vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5928414Skchow 5929414Skchow ASSERT(vc != NULL); 5930414Skchow 5931414Skchow if (((seg = vc->vc_pnext_memseg) == NULL) || 59320Sstevel@tonic-gate (seg->pages_base == seg->pages_end) || 59330Sstevel@tonic-gate !(pp >= seg->pages && pp < seg->epages)) { 59340Sstevel@tonic-gate 59350Sstevel@tonic-gate for (seg = memsegs; seg; seg = seg->next) { 59360Sstevel@tonic-gate if (pp >= seg->pages && pp < seg->epages) 59370Sstevel@tonic-gate break; 59380Sstevel@tonic-gate } 59390Sstevel@tonic-gate 59400Sstevel@tonic-gate if (seg == NULL) { 59410Sstevel@tonic-gate /* Memory delete got in, return something valid. */ 59420Sstevel@tonic-gate /* TODO: fix me. */ 59430Sstevel@tonic-gate seg = memsegs; 59440Sstevel@tonic-gate pp = seg->pages; 59450Sstevel@tonic-gate } 59460Sstevel@tonic-gate } 59470Sstevel@tonic-gate 59480Sstevel@tonic-gate /* check for wraparound - possible if n is large */ 59490Sstevel@tonic-gate while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 59500Sstevel@tonic-gate n -= seg->epages - pp; 59510Sstevel@tonic-gate seg = seg->next; 59520Sstevel@tonic-gate if (seg == NULL) 59530Sstevel@tonic-gate seg = memsegs; 59540Sstevel@tonic-gate pp = seg->pages; 59550Sstevel@tonic-gate } 5956414Skchow vc->vc_pnext_memseg = seg; 59570Sstevel@tonic-gate return (ppn); 59580Sstevel@tonic-gate } 59590Sstevel@tonic-gate 59600Sstevel@tonic-gate /* 59610Sstevel@tonic-gate * Initialize for a loop using page_next_scan_large(). 59620Sstevel@tonic-gate */ 59630Sstevel@tonic-gate page_t * 59640Sstevel@tonic-gate page_next_scan_init(void **cookie) 59650Sstevel@tonic-gate { 59660Sstevel@tonic-gate ASSERT(cookie != NULL); 59670Sstevel@tonic-gate *cookie = (void *)memsegs; 59680Sstevel@tonic-gate return ((page_t *)memsegs->pages); 59690Sstevel@tonic-gate } 59700Sstevel@tonic-gate 59710Sstevel@tonic-gate /* 59720Sstevel@tonic-gate * Return the next page in a scan of page_t's, assuming we want 59730Sstevel@tonic-gate * to skip over sub-pages within larger page sizes. 59740Sstevel@tonic-gate * 59750Sstevel@tonic-gate * The cookie is used to keep track of the current memseg. 59760Sstevel@tonic-gate */ 59770Sstevel@tonic-gate page_t * 59780Sstevel@tonic-gate page_next_scan_large( 59790Sstevel@tonic-gate page_t *pp, 59800Sstevel@tonic-gate ulong_t *n, 59810Sstevel@tonic-gate void **cookie) 59820Sstevel@tonic-gate { 59830Sstevel@tonic-gate struct memseg *seg = (struct memseg *)*cookie; 59840Sstevel@tonic-gate page_t *new_pp; 59850Sstevel@tonic-gate ulong_t cnt; 59860Sstevel@tonic-gate pfn_t pfn; 59870Sstevel@tonic-gate 59880Sstevel@tonic-gate 59890Sstevel@tonic-gate /* 59900Sstevel@tonic-gate * get the count of page_t's to skip based on the page size 59910Sstevel@tonic-gate */ 59920Sstevel@tonic-gate ASSERT(pp != NULL); 59930Sstevel@tonic-gate if (pp->p_szc == 0) { 59940Sstevel@tonic-gate cnt = 1; 59950Sstevel@tonic-gate } else { 59960Sstevel@tonic-gate pfn = page_pptonum(pp); 59970Sstevel@tonic-gate cnt = page_get_pagecnt(pp->p_szc); 59980Sstevel@tonic-gate cnt -= pfn & (cnt - 1); 59990Sstevel@tonic-gate } 60000Sstevel@tonic-gate *n += cnt; 60010Sstevel@tonic-gate new_pp = pp + cnt; 60020Sstevel@tonic-gate 60030Sstevel@tonic-gate /* 60040Sstevel@tonic-gate * Catch if we went past the end of the current memory segment. If so, 60050Sstevel@tonic-gate * just move to the next segment with pages. 60060Sstevel@tonic-gate */ 60070Sstevel@tonic-gate if (new_pp >= seg->epages) { 60080Sstevel@tonic-gate do { 60090Sstevel@tonic-gate seg = seg->next; 60100Sstevel@tonic-gate if (seg == NULL) 60110Sstevel@tonic-gate seg = memsegs; 60120Sstevel@tonic-gate } while (seg->pages == seg->epages); 60130Sstevel@tonic-gate new_pp = seg->pages; 60140Sstevel@tonic-gate *cookie = (void *)seg; 60150Sstevel@tonic-gate } 60160Sstevel@tonic-gate 60170Sstevel@tonic-gate return (new_pp); 60180Sstevel@tonic-gate } 60190Sstevel@tonic-gate 60200Sstevel@tonic-gate 60210Sstevel@tonic-gate /* 60220Sstevel@tonic-gate * Returns next page in list. Note: this function wraps 60230Sstevel@tonic-gate * to the first page in the list upon reaching the end 60240Sstevel@tonic-gate * of the list. Callers should be aware of this fact. 60250Sstevel@tonic-gate */ 60260Sstevel@tonic-gate 60270Sstevel@tonic-gate /* We should change this be a #define */ 60280Sstevel@tonic-gate 60290Sstevel@tonic-gate page_t * 60300Sstevel@tonic-gate page_next(page_t *pp) 60310Sstevel@tonic-gate { 60320Sstevel@tonic-gate return (page_nextn(pp, 1)); 60330Sstevel@tonic-gate } 60340Sstevel@tonic-gate 60350Sstevel@tonic-gate page_t * 60360Sstevel@tonic-gate page_first() 60370Sstevel@tonic-gate { 60380Sstevel@tonic-gate return ((page_t *)memsegs->pages); 60390Sstevel@tonic-gate } 60400Sstevel@tonic-gate 60410Sstevel@tonic-gate 60420Sstevel@tonic-gate /* 60430Sstevel@tonic-gate * This routine is called at boot with the initial memory configuration 60440Sstevel@tonic-gate * and when memory is added or removed. 60450Sstevel@tonic-gate */ 60460Sstevel@tonic-gate void 60470Sstevel@tonic-gate build_pfn_hash() 60480Sstevel@tonic-gate { 60490Sstevel@tonic-gate pfn_t cur; 60500Sstevel@tonic-gate pgcnt_t index; 60510Sstevel@tonic-gate struct memseg *pseg; 60520Sstevel@tonic-gate int i; 60530Sstevel@tonic-gate 60540Sstevel@tonic-gate /* 60550Sstevel@tonic-gate * Clear memseg_hash array. 60560Sstevel@tonic-gate * Since memory add/delete is designed to operate concurrently 60570Sstevel@tonic-gate * with normal operation, the hash rebuild must be able to run 60580Sstevel@tonic-gate * concurrently with page_numtopp_nolock(). To support this 60590Sstevel@tonic-gate * functionality, assignments to memseg_hash array members must 60600Sstevel@tonic-gate * be done atomically. 60610Sstevel@tonic-gate * 60620Sstevel@tonic-gate * NOTE: bzero() does not currently guarantee this for kernel 60630Sstevel@tonic-gate * threads, and cannot be used here. 60640Sstevel@tonic-gate */ 60650Sstevel@tonic-gate for (i = 0; i < N_MEM_SLOTS; i++) 60660Sstevel@tonic-gate memseg_hash[i] = NULL; 60670Sstevel@tonic-gate 60680Sstevel@tonic-gate hat_kpm_mseghash_clear(N_MEM_SLOTS); 60690Sstevel@tonic-gate 60700Sstevel@tonic-gate /* 60710Sstevel@tonic-gate * Physmax is the last valid pfn. 60720Sstevel@tonic-gate */ 60730Sstevel@tonic-gate mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 60740Sstevel@tonic-gate for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 60750Sstevel@tonic-gate index = MEMSEG_PFN_HASH(pseg->pages_base); 60760Sstevel@tonic-gate cur = pseg->pages_base; 60770Sstevel@tonic-gate do { 60780Sstevel@tonic-gate if (index >= N_MEM_SLOTS) 60790Sstevel@tonic-gate index = MEMSEG_PFN_HASH(cur); 60800Sstevel@tonic-gate 60810Sstevel@tonic-gate if (memseg_hash[index] == NULL || 60820Sstevel@tonic-gate memseg_hash[index]->pages_base > pseg->pages_base) { 60830Sstevel@tonic-gate memseg_hash[index] = pseg; 60840Sstevel@tonic-gate hat_kpm_mseghash_update(index, pseg); 60850Sstevel@tonic-gate } 60860Sstevel@tonic-gate cur += mhash_per_slot; 60870Sstevel@tonic-gate index++; 60880Sstevel@tonic-gate } while (cur < pseg->pages_end); 60890Sstevel@tonic-gate } 60900Sstevel@tonic-gate } 60910Sstevel@tonic-gate 60920Sstevel@tonic-gate /* 60930Sstevel@tonic-gate * Return the pagenum for the pp 60940Sstevel@tonic-gate */ 60950Sstevel@tonic-gate pfn_t 60960Sstevel@tonic-gate page_pptonum(page_t *pp) 60970Sstevel@tonic-gate { 60980Sstevel@tonic-gate return (pp->p_pagenum); 60990Sstevel@tonic-gate } 61000Sstevel@tonic-gate 61010Sstevel@tonic-gate /* 61020Sstevel@tonic-gate * interface to the referenced and modified etc bits 61030Sstevel@tonic-gate * in the PSM part of the page struct 61040Sstevel@tonic-gate * when no locking is desired. 61050Sstevel@tonic-gate */ 61060Sstevel@tonic-gate void 61070Sstevel@tonic-gate page_set_props(page_t *pp, uint_t flags) 61080Sstevel@tonic-gate { 61090Sstevel@tonic-gate ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 61100Sstevel@tonic-gate pp->p_nrm |= (uchar_t)flags; 61110Sstevel@tonic-gate } 61120Sstevel@tonic-gate 61130Sstevel@tonic-gate void 61140Sstevel@tonic-gate page_clr_all_props(page_t *pp) 61150Sstevel@tonic-gate { 61160Sstevel@tonic-gate pp->p_nrm = 0; 61170Sstevel@tonic-gate } 61180Sstevel@tonic-gate 61190Sstevel@tonic-gate /* 6120917Selowe * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6121917Selowe */ 6122917Selowe int 6123917Selowe page_clear_lck_cow(page_t *pp, int adjust) 6124917Selowe { 6125917Selowe int f_amount; 6126917Selowe 6127917Selowe ASSERT(PAGE_EXCL(pp)); 6128917Selowe 6129917Selowe /* 6130917Selowe * The page_struct_lock need not be acquired here since 6131917Selowe * we require the caller hold the page exclusively locked. 6132917Selowe */ 6133917Selowe f_amount = 0; 6134917Selowe if (pp->p_lckcnt) { 6135917Selowe f_amount = 1; 6136917Selowe pp->p_lckcnt = 0; 6137917Selowe } 6138917Selowe if (pp->p_cowcnt) { 6139917Selowe f_amount += pp->p_cowcnt; 6140917Selowe pp->p_cowcnt = 0; 6141917Selowe } 6142917Selowe 6143917Selowe if (adjust && f_amount) { 6144917Selowe mutex_enter(&freemem_lock); 6145917Selowe availrmem += f_amount; 6146917Selowe mutex_exit(&freemem_lock); 6147917Selowe } 6148917Selowe 6149917Selowe return (f_amount); 6150917Selowe } 6151917Selowe 6152917Selowe /* 61530Sstevel@tonic-gate * The following functions is called from free_vp_pages() 61540Sstevel@tonic-gate * for an inexact estimate of a newly free'd page... 61550Sstevel@tonic-gate */ 61560Sstevel@tonic-gate ulong_t 61570Sstevel@tonic-gate page_share_cnt(page_t *pp) 61580Sstevel@tonic-gate { 61590Sstevel@tonic-gate return (hat_page_getshare(pp)); 61600Sstevel@tonic-gate } 61610Sstevel@tonic-gate 61620Sstevel@tonic-gate int 61630Sstevel@tonic-gate page_isshared(page_t *pp) 61640Sstevel@tonic-gate { 61650Sstevel@tonic-gate return (hat_page_getshare(pp) > 1); 61660Sstevel@tonic-gate } 61670Sstevel@tonic-gate 61680Sstevel@tonic-gate int 61690Sstevel@tonic-gate page_isfree(page_t *pp) 61700Sstevel@tonic-gate { 61710Sstevel@tonic-gate return (PP_ISFREE(pp)); 61720Sstevel@tonic-gate } 61730Sstevel@tonic-gate 61740Sstevel@tonic-gate int 61750Sstevel@tonic-gate page_isref(page_t *pp) 61760Sstevel@tonic-gate { 61770Sstevel@tonic-gate return (hat_page_getattr(pp, P_REF)); 61780Sstevel@tonic-gate } 61790Sstevel@tonic-gate 61800Sstevel@tonic-gate int 61810Sstevel@tonic-gate page_ismod(page_t *pp) 61820Sstevel@tonic-gate { 61830Sstevel@tonic-gate return (hat_page_getattr(pp, P_MOD)); 61840Sstevel@tonic-gate } 6185