10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51498Sbs21162 * Common Development and Distribution License (the "License"). 61498Sbs21162 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*6695Saguzovsk * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/param.h> 290Sstevel@tonic-gate #include <sys/user.h> 300Sstevel@tonic-gate #include <sys/mman.h> 310Sstevel@tonic-gate #include <sys/kmem.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/cmn_err.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <sys/tuneable.h> 360Sstevel@tonic-gate #include <vm/hat.h> 370Sstevel@tonic-gate #include <vm/seg.h> 380Sstevel@tonic-gate #include <vm/as.h> 390Sstevel@tonic-gate #include <vm/anon.h> 400Sstevel@tonic-gate #include <vm/page.h> 410Sstevel@tonic-gate #include <sys/buf.h> 420Sstevel@tonic-gate #include <sys/swap.h> 430Sstevel@tonic-gate #include <sys/atomic.h> 440Sstevel@tonic-gate #include <vm/seg_spt.h> 450Sstevel@tonic-gate #include <sys/debug.h> 460Sstevel@tonic-gate #include <sys/vtrace.h> 470Sstevel@tonic-gate #include <sys/shm.h> 482768Ssl108498 #include <sys/shm_impl.h> 490Sstevel@tonic-gate #include <sys/lgrp.h> 500Sstevel@tonic-gate #include <sys/vmsystm.h> 512768Ssl108498 #include <sys/policy.h> 522768Ssl108498 #include <sys/project.h> 530Sstevel@tonic-gate #include <sys/tnf_probe.h> 542768Ssl108498 #include <sys/zone.h> 550Sstevel@tonic-gate 560Sstevel@tonic-gate #define SEGSPTADDR (caddr_t)0x0 570Sstevel@tonic-gate 580Sstevel@tonic-gate /* 590Sstevel@tonic-gate * # pages used for spt 600Sstevel@tonic-gate */ 613480Sjfrank size_t spt_used; 620Sstevel@tonic-gate 630Sstevel@tonic-gate /* 640Sstevel@tonic-gate * segspt_minfree is the memory left for system after ISM 650Sstevel@tonic-gate * locked its pages; it is set up to 5% of availrmem in 660Sstevel@tonic-gate * sptcreate when ISM is created. ISM should not use more 670Sstevel@tonic-gate * than ~90% of availrmem; if it does, then the performance 680Sstevel@tonic-gate * of the system may decrease. Machines with large memories may 690Sstevel@tonic-gate * be able to use up more memory for ISM so we set the default 700Sstevel@tonic-gate * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 710Sstevel@tonic-gate * If somebody wants even more memory for ISM (risking hanging 720Sstevel@tonic-gate * the system) they can patch the segspt_minfree to smaller number. 730Sstevel@tonic-gate */ 740Sstevel@tonic-gate pgcnt_t segspt_minfree = 0; 750Sstevel@tonic-gate 760Sstevel@tonic-gate static int segspt_create(struct seg *seg, caddr_t argsp); 770Sstevel@tonic-gate static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 780Sstevel@tonic-gate static void segspt_free(struct seg *seg); 790Sstevel@tonic-gate static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 800Sstevel@tonic-gate static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 810Sstevel@tonic-gate 820Sstevel@tonic-gate static void 830Sstevel@tonic-gate segspt_badop() 840Sstevel@tonic-gate { 850Sstevel@tonic-gate panic("segspt_badop called"); 860Sstevel@tonic-gate /*NOTREACHED*/ 870Sstevel@tonic-gate } 880Sstevel@tonic-gate 890Sstevel@tonic-gate #define SEGSPT_BADOP(t) (t(*)())segspt_badop 900Sstevel@tonic-gate 910Sstevel@tonic-gate struct seg_ops segspt_ops = { 920Sstevel@tonic-gate SEGSPT_BADOP(int), /* dup */ 930Sstevel@tonic-gate segspt_unmap, 940Sstevel@tonic-gate segspt_free, 950Sstevel@tonic-gate SEGSPT_BADOP(int), /* fault */ 960Sstevel@tonic-gate SEGSPT_BADOP(faultcode_t), /* faulta */ 970Sstevel@tonic-gate SEGSPT_BADOP(int), /* setprot */ 980Sstevel@tonic-gate SEGSPT_BADOP(int), /* checkprot */ 990Sstevel@tonic-gate SEGSPT_BADOP(int), /* kluster */ 1000Sstevel@tonic-gate SEGSPT_BADOP(size_t), /* swapout */ 1010Sstevel@tonic-gate SEGSPT_BADOP(int), /* sync */ 1020Sstevel@tonic-gate SEGSPT_BADOP(size_t), /* incore */ 1030Sstevel@tonic-gate SEGSPT_BADOP(int), /* lockop */ 1040Sstevel@tonic-gate SEGSPT_BADOP(int), /* getprot */ 1050Sstevel@tonic-gate SEGSPT_BADOP(u_offset_t), /* getoffset */ 1060Sstevel@tonic-gate SEGSPT_BADOP(int), /* gettype */ 1070Sstevel@tonic-gate SEGSPT_BADOP(int), /* getvp */ 1080Sstevel@tonic-gate SEGSPT_BADOP(int), /* advise */ 1090Sstevel@tonic-gate SEGSPT_BADOP(void), /* dump */ 1100Sstevel@tonic-gate SEGSPT_BADOP(int), /* pagelock */ 1110Sstevel@tonic-gate SEGSPT_BADOP(int), /* setpgsz */ 1120Sstevel@tonic-gate SEGSPT_BADOP(int), /* getmemid */ 1130Sstevel@tonic-gate segspt_getpolicy, /* getpolicy */ 114670Selowe SEGSPT_BADOP(int), /* capable */ 1150Sstevel@tonic-gate }; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate static int segspt_shmdup(struct seg *seg, struct seg *newseg); 1180Sstevel@tonic-gate static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 1190Sstevel@tonic-gate static void segspt_shmfree(struct seg *seg); 1200Sstevel@tonic-gate static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 1210Sstevel@tonic-gate caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 1220Sstevel@tonic-gate static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 1230Sstevel@tonic-gate static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 1240Sstevel@tonic-gate register size_t len, register uint_t prot); 1250Sstevel@tonic-gate static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 1260Sstevel@tonic-gate uint_t prot); 1270Sstevel@tonic-gate static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 1280Sstevel@tonic-gate static size_t segspt_shmswapout(struct seg *seg); 1290Sstevel@tonic-gate static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 1300Sstevel@tonic-gate register char *vec); 1310Sstevel@tonic-gate static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 1320Sstevel@tonic-gate int attr, uint_t flags); 1330Sstevel@tonic-gate static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 1340Sstevel@tonic-gate int attr, int op, ulong_t *lockmap, size_t pos); 1350Sstevel@tonic-gate static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 1360Sstevel@tonic-gate uint_t *protv); 1370Sstevel@tonic-gate static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 1380Sstevel@tonic-gate static int segspt_shmgettype(struct seg *seg, caddr_t addr); 1390Sstevel@tonic-gate static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 1400Sstevel@tonic-gate static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 1410Sstevel@tonic-gate uint_t behav); 1420Sstevel@tonic-gate static void segspt_shmdump(struct seg *seg); 1430Sstevel@tonic-gate static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 1440Sstevel@tonic-gate struct page ***, enum lock_type, enum seg_rw); 1450Sstevel@tonic-gate static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 1460Sstevel@tonic-gate static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 1470Sstevel@tonic-gate static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148670Selowe static int segspt_shmcapable(struct seg *, segcapability_t); 1490Sstevel@tonic-gate 1500Sstevel@tonic-gate struct seg_ops segspt_shmops = { 1510Sstevel@tonic-gate segspt_shmdup, 1520Sstevel@tonic-gate segspt_shmunmap, 1530Sstevel@tonic-gate segspt_shmfree, 1540Sstevel@tonic-gate segspt_shmfault, 1550Sstevel@tonic-gate segspt_shmfaulta, 1560Sstevel@tonic-gate segspt_shmsetprot, 1570Sstevel@tonic-gate segspt_shmcheckprot, 1580Sstevel@tonic-gate segspt_shmkluster, 1590Sstevel@tonic-gate segspt_shmswapout, 1600Sstevel@tonic-gate segspt_shmsync, 1610Sstevel@tonic-gate segspt_shmincore, 1620Sstevel@tonic-gate segspt_shmlockop, 1630Sstevel@tonic-gate segspt_shmgetprot, 1640Sstevel@tonic-gate segspt_shmgetoffset, 1650Sstevel@tonic-gate segspt_shmgettype, 1660Sstevel@tonic-gate segspt_shmgetvp, 1670Sstevel@tonic-gate segspt_shmadvise, /* advise */ 1680Sstevel@tonic-gate segspt_shmdump, 1690Sstevel@tonic-gate segspt_shmpagelock, 1700Sstevel@tonic-gate segspt_shmsetpgsz, 1710Sstevel@tonic-gate segspt_shmgetmemid, 1720Sstevel@tonic-gate segspt_shmgetpolicy, 173670Selowe segspt_shmcapable, 1740Sstevel@tonic-gate }; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate static void segspt_purge(struct seg *seg); 177*6695Saguzovsk static int segspt_reclaim(void *, caddr_t, size_t, struct page **, 178*6695Saguzovsk enum seg_rw, int); 1790Sstevel@tonic-gate static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 1800Sstevel@tonic-gate page_t **ppa); 1810Sstevel@tonic-gate 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate /*ARGSUSED*/ 1850Sstevel@tonic-gate int 1860Sstevel@tonic-gate sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 1872768Ssl108498 uint_t prot, uint_t flags, uint_t share_szc) 1880Sstevel@tonic-gate { 1890Sstevel@tonic-gate int err; 1900Sstevel@tonic-gate struct as *newas; 1910Sstevel@tonic-gate struct segspt_crargs sptcargs; 1920Sstevel@tonic-gate 1930Sstevel@tonic-gate #ifdef DEBUG 1940Sstevel@tonic-gate TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 1952768Ssl108498 tnf_ulong, size, size ); 1960Sstevel@tonic-gate #endif 1970Sstevel@tonic-gate if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 1980Sstevel@tonic-gate segspt_minfree = availrmem/20; /* for the system */ 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate if (!hat_supported(HAT_SHARED_PT, (void *)0)) 2010Sstevel@tonic-gate return (EINVAL); 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* 2040Sstevel@tonic-gate * get a new as for this shared memory segment 2050Sstevel@tonic-gate */ 2060Sstevel@tonic-gate newas = as_alloc(); 2072768Ssl108498 newas->a_proc = NULL; 2080Sstevel@tonic-gate sptcargs.amp = amp; 2090Sstevel@tonic-gate sptcargs.prot = prot; 2100Sstevel@tonic-gate sptcargs.flags = flags; 2110Sstevel@tonic-gate sptcargs.szc = share_szc; 2120Sstevel@tonic-gate /* 2130Sstevel@tonic-gate * create a shared page table (spt) segment 2140Sstevel@tonic-gate */ 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 2170Sstevel@tonic-gate as_free(newas); 2180Sstevel@tonic-gate return (err); 2190Sstevel@tonic-gate } 2200Sstevel@tonic-gate *sptseg = sptcargs.seg_spt; 2210Sstevel@tonic-gate return (0); 2220Sstevel@tonic-gate } 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate void 2250Sstevel@tonic-gate sptdestroy(struct as *as, struct anon_map *amp) 2260Sstevel@tonic-gate { 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate #ifdef DEBUG 2290Sstevel@tonic-gate TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 2300Sstevel@tonic-gate #endif 2310Sstevel@tonic-gate (void) as_unmap(as, SEGSPTADDR, amp->size); 2320Sstevel@tonic-gate as_free(as); 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate 2350Sstevel@tonic-gate /* 2360Sstevel@tonic-gate * called from seg_free(). 2370Sstevel@tonic-gate * free (i.e., unlock, unmap, return to free list) 2380Sstevel@tonic-gate * all the pages in the given seg. 2390Sstevel@tonic-gate */ 2400Sstevel@tonic-gate void 2410Sstevel@tonic-gate segspt_free(struct seg *seg) 2420Sstevel@tonic-gate { 2430Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)seg->s_data; 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate if (sptd != NULL) { 2480Sstevel@tonic-gate if (sptd->spt_realsize) 2490Sstevel@tonic-gate segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 2500Sstevel@tonic-gate 2512768Ssl108498 if (sptd->spt_ppa_lckcnt) 2522768Ssl108498 kmem_free(sptd->spt_ppa_lckcnt, 2532768Ssl108498 sizeof (*sptd->spt_ppa_lckcnt) 2542768Ssl108498 * btopr(sptd->spt_amp->size)); 2550Sstevel@tonic-gate kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 2565224Smec cv_destroy(&sptd->spt_cv); 2570Sstevel@tonic-gate mutex_destroy(&sptd->spt_lock); 2580Sstevel@tonic-gate kmem_free(sptd, sizeof (*sptd)); 2590Sstevel@tonic-gate } 2600Sstevel@tonic-gate } 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate /*ARGSUSED*/ 2630Sstevel@tonic-gate static int 2640Sstevel@tonic-gate segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 2650Sstevel@tonic-gate uint_t flags) 2660Sstevel@tonic-gate { 2670Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate return (0); 2700Sstevel@tonic-gate } 2710Sstevel@tonic-gate 2720Sstevel@tonic-gate /*ARGSUSED*/ 2730Sstevel@tonic-gate static size_t 2740Sstevel@tonic-gate segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 2750Sstevel@tonic-gate { 2760Sstevel@tonic-gate caddr_t eo_seg; 2770Sstevel@tonic-gate pgcnt_t npages; 2780Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 2790Sstevel@tonic-gate struct seg *sptseg; 2800Sstevel@tonic-gate struct spt_data *sptd; 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2830Sstevel@tonic-gate #ifdef lint 2840Sstevel@tonic-gate seg = seg; 2850Sstevel@tonic-gate #endif 2860Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 2870Sstevel@tonic-gate sptd = sptseg->s_data; 2880Sstevel@tonic-gate 2890Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2900Sstevel@tonic-gate eo_seg = addr + len; 2910Sstevel@tonic-gate while (addr < eo_seg) { 2920Sstevel@tonic-gate /* page exists, and it's locked. */ 2930Sstevel@tonic-gate *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 2945224Smec SEG_PAGE_ANON; 2950Sstevel@tonic-gate addr += PAGESIZE; 2960Sstevel@tonic-gate } 2970Sstevel@tonic-gate return (len); 2980Sstevel@tonic-gate } else { 2990Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 3000Sstevel@tonic-gate struct anon *ap; 3010Sstevel@tonic-gate page_t *pp; 3020Sstevel@tonic-gate pgcnt_t anon_index; 3030Sstevel@tonic-gate struct vnode *vp; 3040Sstevel@tonic-gate u_offset_t off; 3050Sstevel@tonic-gate ulong_t i; 3060Sstevel@tonic-gate int ret; 3070Sstevel@tonic-gate anon_sync_obj_t cookie; 3080Sstevel@tonic-gate 3090Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 3100Sstevel@tonic-gate anon_index = seg_page(seg, addr); 3110Sstevel@tonic-gate npages = btopr(len); 3120Sstevel@tonic-gate if (anon_index + npages > btopr(shmd->shm_amp->size)) { 3130Sstevel@tonic-gate return (EINVAL); 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3160Sstevel@tonic-gate for (i = 0; i < npages; i++, anon_index++) { 3170Sstevel@tonic-gate ret = 0; 3180Sstevel@tonic-gate anon_array_enter(amp, anon_index, &cookie); 3190Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 3200Sstevel@tonic-gate if (ap != NULL) { 3210Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 3220Sstevel@tonic-gate anon_array_exit(&cookie); 3230Sstevel@tonic-gate pp = page_lookup_nowait(vp, off, SE_SHARED); 3240Sstevel@tonic-gate if (pp != NULL) { 3250Sstevel@tonic-gate ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 3260Sstevel@tonic-gate page_unlock(pp); 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate } else { 3290Sstevel@tonic-gate anon_array_exit(&cookie); 3300Sstevel@tonic-gate } 3310Sstevel@tonic-gate if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 3320Sstevel@tonic-gate ret |= SEG_PAGE_LOCKED; 3330Sstevel@tonic-gate } 3340Sstevel@tonic-gate *vec++ = (char)ret; 3350Sstevel@tonic-gate } 3360Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 3370Sstevel@tonic-gate return (len); 3380Sstevel@tonic-gate } 3390Sstevel@tonic-gate } 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate static int 3420Sstevel@tonic-gate segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 3430Sstevel@tonic-gate { 3440Sstevel@tonic-gate size_t share_size; 3450Sstevel@tonic-gate 3460Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate /* 3490Sstevel@tonic-gate * seg.s_size may have been rounded up to the largest page size 3500Sstevel@tonic-gate * in shmat(). 3510Sstevel@tonic-gate * XXX This should be cleanedup. sptdestroy should take a length 3520Sstevel@tonic-gate * argument which should be the same as sptcreate. Then 3530Sstevel@tonic-gate * this rounding would not be needed (or is done in shm.c) 3540Sstevel@tonic-gate * Only the check for full segment will be needed. 3550Sstevel@tonic-gate * 3560Sstevel@tonic-gate * XXX -- shouldn't raddr == 0 always? These tests don't seem 3570Sstevel@tonic-gate * to be useful at all. 3580Sstevel@tonic-gate */ 3590Sstevel@tonic-gate share_size = page_get_pagesize(seg->s_szc); 3600Sstevel@tonic-gate ssize = P2ROUNDUP(ssize, share_size); 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate if (raddr == seg->s_base && ssize == seg->s_size) { 3630Sstevel@tonic-gate seg_free(seg); 3640Sstevel@tonic-gate return (0); 3650Sstevel@tonic-gate } else 3660Sstevel@tonic-gate return (EINVAL); 3670Sstevel@tonic-gate } 3680Sstevel@tonic-gate 3690Sstevel@tonic-gate int 3700Sstevel@tonic-gate segspt_create(struct seg *seg, caddr_t argsp) 3710Sstevel@tonic-gate { 3720Sstevel@tonic-gate int err; 3730Sstevel@tonic-gate caddr_t addr = seg->s_base; 3740Sstevel@tonic-gate struct spt_data *sptd; 3750Sstevel@tonic-gate struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 3760Sstevel@tonic-gate struct anon_map *amp = sptcargs->amp; 3772768Ssl108498 struct kshmid *sp = amp->a_sp; 3780Sstevel@tonic-gate struct cred *cred = CRED(); 3790Sstevel@tonic-gate ulong_t i, j, anon_index = 0; 3800Sstevel@tonic-gate pgcnt_t npages = btopr(amp->size); 3810Sstevel@tonic-gate struct vnode *vp; 3820Sstevel@tonic-gate page_t **ppa; 3830Sstevel@tonic-gate uint_t hat_flags; 3842414Saguzovsk size_t pgsz; 3852414Saguzovsk pgcnt_t pgcnt; 3862414Saguzovsk caddr_t a; 3872414Saguzovsk pgcnt_t pidx; 3882414Saguzovsk size_t sz; 3892768Ssl108498 proc_t *procp = curproc; 3902768Ssl108498 rctl_qty_t lockedbytes = 0; 3912768Ssl108498 kproject_t *proj; 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate /* 3940Sstevel@tonic-gate * We are holding the a_lock on the underlying dummy as, 3950Sstevel@tonic-gate * so we can make calls to the HAT layer. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 3982768Ssl108498 ASSERT(sp != NULL); 3990Sstevel@tonic-gate 4000Sstevel@tonic-gate #ifdef DEBUG 4010Sstevel@tonic-gate TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 4025224Smec tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size); 4030Sstevel@tonic-gate #endif 4040Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 4050Sstevel@tonic-gate if (err = anon_swap_adjust(npages)) 4060Sstevel@tonic-gate return (err); 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate err = ENOMEM; 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 4110Sstevel@tonic-gate goto out1; 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 4140Sstevel@tonic-gate if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 4150Sstevel@tonic-gate KM_NOSLEEP)) == NULL) 4160Sstevel@tonic-gate goto out2; 4170Sstevel@tonic-gate } 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 4220Sstevel@tonic-gate goto out3; 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate seg->s_ops = &segspt_ops; 4250Sstevel@tonic-gate sptd->spt_vp = vp; 4260Sstevel@tonic-gate sptd->spt_amp = amp; 4270Sstevel@tonic-gate sptd->spt_prot = sptcargs->prot; 4280Sstevel@tonic-gate sptd->spt_flags = sptcargs->flags; 4290Sstevel@tonic-gate seg->s_data = (caddr_t)sptd; 4300Sstevel@tonic-gate sptd->spt_ppa = NULL; 4310Sstevel@tonic-gate sptd->spt_ppa_lckcnt = NULL; 4320Sstevel@tonic-gate seg->s_szc = sptcargs->szc; 4335224Smec cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL); 4345224Smec sptd->spt_gen = 0; 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 4372414Saguzovsk if (seg->s_szc > amp->a_szc) { 4382414Saguzovsk amp->a_szc = seg->s_szc; 4392414Saguzovsk } 4400Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Set policy to affect initial allocation of pages in 4440Sstevel@tonic-gate * anon_map_createpages() 4450Sstevel@tonic-gate */ 4460Sstevel@tonic-gate (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 4470Sstevel@tonic-gate NULL, 0, ptob(npages)); 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate if (sptcargs->flags & SHM_PAGEABLE) { 4500Sstevel@tonic-gate size_t share_sz; 4510Sstevel@tonic-gate pgcnt_t new_npgs, more_pgs; 4520Sstevel@tonic-gate struct anon_hdr *nahp; 4533379Ssl108498 zone_t *zone; 4540Sstevel@tonic-gate 4550Sstevel@tonic-gate share_sz = page_get_pagesize(seg->s_szc); 4560Sstevel@tonic-gate if (!IS_P2ALIGNED(amp->size, share_sz)) { 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * We are rounding up the size of the anon array 4590Sstevel@tonic-gate * on 4 M boundary because we always create 4 M 4600Sstevel@tonic-gate * of page(s) when locking, faulting pages and we 4610Sstevel@tonic-gate * don't have to check for all corner cases e.g. 4620Sstevel@tonic-gate * if there is enough space to allocate 4 M 4630Sstevel@tonic-gate * page. 4640Sstevel@tonic-gate */ 4650Sstevel@tonic-gate new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 4660Sstevel@tonic-gate more_pgs = new_npgs - npages; 4670Sstevel@tonic-gate 4683379Ssl108498 /* 4693458Ssl108498 * The zone will never be NULL, as a fully created 4703458Ssl108498 * shm always has an owning zone. 4713379Ssl108498 */ 4723458Ssl108498 zone = sp->shm_perm.ipc_zone; 4733458Ssl108498 ASSERT(zone != NULL); 4743379Ssl108498 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 4750Sstevel@tonic-gate err = ENOMEM; 4760Sstevel@tonic-gate goto out4; 4770Sstevel@tonic-gate } 4783379Ssl108498 4790Sstevel@tonic-gate nahp = anon_create(new_npgs, ANON_SLEEP); 4800Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 4810Sstevel@tonic-gate (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 4820Sstevel@tonic-gate ANON_SLEEP); 4830Sstevel@tonic-gate anon_release(amp->ahp, npages); 4840Sstevel@tonic-gate amp->ahp = nahp; 4853379Ssl108498 ASSERT(amp->swresv == ptob(npages)); 4860Sstevel@tonic-gate amp->swresv = amp->size = ptob(new_npgs); 4870Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 4880Sstevel@tonic-gate npages = new_npgs; 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 4920Sstevel@tonic-gate sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 4930Sstevel@tonic-gate sptd->spt_pcachecnt = 0; 4940Sstevel@tonic-gate sptd->spt_realsize = ptob(npages); 4950Sstevel@tonic-gate sptcargs->seg_spt = seg; 4960Sstevel@tonic-gate return (0); 4970Sstevel@tonic-gate } 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate /* 5000Sstevel@tonic-gate * get array of pages for each anon slot in amp 5010Sstevel@tonic-gate */ 5020Sstevel@tonic-gate if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 5030Sstevel@tonic-gate seg, addr, S_CREATE, cred)) != 0) 5040Sstevel@tonic-gate goto out4; 5050Sstevel@tonic-gate 5062768Ssl108498 mutex_enter(&sp->shm_mlock); 5072768Ssl108498 5082768Ssl108498 /* May be partially locked, so, count bytes to charge for locking */ 5092768Ssl108498 for (i = 0; i < npages; i++) 5102768Ssl108498 if (ppa[i]->p_lckcnt == 0) 5112768Ssl108498 lockedbytes += PAGESIZE; 5122768Ssl108498 5132768Ssl108498 proj = sp->shm_perm.ipc_proj; 5142768Ssl108498 5152768Ssl108498 if (lockedbytes > 0) { 5162768Ssl108498 mutex_enter(&procp->p_lock); 5172768Ssl108498 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 5182768Ssl108498 mutex_exit(&procp->p_lock); 5192768Ssl108498 mutex_exit(&sp->shm_mlock); 5202768Ssl108498 for (i = 0; i < npages; i++) 5212768Ssl108498 page_unlock(ppa[i]); 5222768Ssl108498 err = ENOMEM; 5232768Ssl108498 goto out4; 5242768Ssl108498 } 5252768Ssl108498 mutex_exit(&procp->p_lock); 5262768Ssl108498 } 5272768Ssl108498 5280Sstevel@tonic-gate /* 5290Sstevel@tonic-gate * addr is initial address corresponding to the first page on ppa list 5300Sstevel@tonic-gate */ 5310Sstevel@tonic-gate for (i = 0; i < npages; i++) { 5320Sstevel@tonic-gate /* attempt to lock all pages */ 5332768Ssl108498 if (page_pp_lock(ppa[i], 0, 1) == 0) { 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * if unable to lock any page, unlock all 5360Sstevel@tonic-gate * of them and return error 5370Sstevel@tonic-gate */ 5380Sstevel@tonic-gate for (j = 0; j < i; j++) 5390Sstevel@tonic-gate page_pp_unlock(ppa[j], 0, 1); 5402768Ssl108498 for (i = 0; i < npages; i++) 5410Sstevel@tonic-gate page_unlock(ppa[i]); 5422768Ssl108498 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 5432768Ssl108498 mutex_exit(&sp->shm_mlock); 5440Sstevel@tonic-gate err = ENOMEM; 5450Sstevel@tonic-gate goto out4; 5460Sstevel@tonic-gate } 5470Sstevel@tonic-gate } 5482768Ssl108498 mutex_exit(&sp->shm_mlock); 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate /* 5510Sstevel@tonic-gate * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 5520Sstevel@tonic-gate * for the entire life of the segment. For example platforms 5530Sstevel@tonic-gate * that do not support Dynamic Reconfiguration. 5540Sstevel@tonic-gate */ 5550Sstevel@tonic-gate hat_flags = HAT_LOAD_SHARE; 5560Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 5570Sstevel@tonic-gate hat_flags |= HAT_LOAD_LOCK; 5580Sstevel@tonic-gate 5592414Saguzovsk /* 5602414Saguzovsk * Load translations one lare page at a time 5612414Saguzovsk * to make sure we don't create mappings bigger than 5622414Saguzovsk * segment's size code in case underlying pages 5632414Saguzovsk * are shared with segvn's segment that uses bigger 5642414Saguzovsk * size code than we do. 5652414Saguzovsk */ 5662414Saguzovsk pgsz = page_get_pagesize(seg->s_szc); 5672414Saguzovsk pgcnt = page_get_pagecnt(seg->s_szc); 5682414Saguzovsk for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 5692414Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 5702414Saguzovsk hat_memload_array(seg->s_as->a_hat, a, sz, 5712414Saguzovsk &ppa[pidx], sptd->spt_prot, hat_flags); 5722414Saguzovsk } 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* 5750Sstevel@tonic-gate * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 5760Sstevel@tonic-gate * we will leave the pages locked SE_SHARED for the life 5770Sstevel@tonic-gate * of the ISM segment. This will prevent any calls to 5780Sstevel@tonic-gate * hat_pageunload() on this ISM segment for those platforms. 5790Sstevel@tonic-gate */ 5800Sstevel@tonic-gate if (!(hat_flags & HAT_LOAD_LOCK)) { 5810Sstevel@tonic-gate /* 5820Sstevel@tonic-gate * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 5830Sstevel@tonic-gate * we no longer need to hold the SE_SHARED lock on the pages, 5840Sstevel@tonic-gate * since L_PAGELOCK and F_SOFTLOCK calls will grab the 5850Sstevel@tonic-gate * SE_SHARED lock on the pages as necessary. 5860Sstevel@tonic-gate */ 5870Sstevel@tonic-gate for (i = 0; i < npages; i++) 5880Sstevel@tonic-gate page_unlock(ppa[i]); 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate sptd->spt_pcachecnt = 0; 5910Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * npages)); 5920Sstevel@tonic-gate sptd->spt_realsize = ptob(npages); 5930Sstevel@tonic-gate atomic_add_long(&spt_used, npages); 5940Sstevel@tonic-gate sptcargs->seg_spt = seg; 5950Sstevel@tonic-gate return (0); 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate out4: 5980Sstevel@tonic-gate seg->s_data = NULL; 5990Sstevel@tonic-gate kmem_free(vp, sizeof (*vp)); 6005224Smec cv_destroy(&sptd->spt_cv); 6010Sstevel@tonic-gate out3: 6020Sstevel@tonic-gate mutex_destroy(&sptd->spt_lock); 6030Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) 6040Sstevel@tonic-gate kmem_free(ppa, (sizeof (*ppa) * npages)); 6050Sstevel@tonic-gate out2: 6060Sstevel@tonic-gate kmem_free(sptd, sizeof (*sptd)); 6070Sstevel@tonic-gate out1: 6080Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) 6090Sstevel@tonic-gate anon_swap_restore(npages); 6100Sstevel@tonic-gate return (err); 6110Sstevel@tonic-gate } 6120Sstevel@tonic-gate 6130Sstevel@tonic-gate /*ARGSUSED*/ 6140Sstevel@tonic-gate void 6150Sstevel@tonic-gate segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 6160Sstevel@tonic-gate { 6170Sstevel@tonic-gate struct page *pp; 6180Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)seg->s_data; 6190Sstevel@tonic-gate pgcnt_t npages; 6200Sstevel@tonic-gate ulong_t anon_idx; 6210Sstevel@tonic-gate struct anon_map *amp; 6220Sstevel@tonic-gate struct anon *ap; 6230Sstevel@tonic-gate struct vnode *vp; 6240Sstevel@tonic-gate u_offset_t off; 6250Sstevel@tonic-gate uint_t hat_flags; 6260Sstevel@tonic-gate int root = 0; 6270Sstevel@tonic-gate pgcnt_t pgs, curnpgs = 0; 6280Sstevel@tonic-gate page_t *rootpp; 6292768Ssl108498 rctl_qty_t unlocked_bytes = 0; 6302768Ssl108498 kproject_t *proj; 6312768Ssl108498 kshmid_t *sp; 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate len = P2ROUNDUP(len, PAGESIZE); 6360Sstevel@tonic-gate 6370Sstevel@tonic-gate npages = btop(len); 6380Sstevel@tonic-gate 6394528Spaulsan hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP; 6400Sstevel@tonic-gate if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 6410Sstevel@tonic-gate (sptd->spt_flags & SHM_PAGEABLE)) { 6424528Spaulsan hat_flags = HAT_UNLOAD_UNMAP; 6430Sstevel@tonic-gate } 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate amp = sptd->spt_amp; 6480Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) 6490Sstevel@tonic-gate npages = btop(amp->size); 6500Sstevel@tonic-gate 6512768Ssl108498 ASSERT(amp != NULL); 6522768Ssl108498 6532768Ssl108498 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 6542768Ssl108498 sp = amp->a_sp; 6552768Ssl108498 proj = sp->shm_perm.ipc_proj; 6562768Ssl108498 mutex_enter(&sp->shm_mlock); 6572768Ssl108498 } 6580Sstevel@tonic-gate for (anon_idx = 0; anon_idx < npages; anon_idx++) { 6590Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 6600Sstevel@tonic-gate if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 6610Sstevel@tonic-gate panic("segspt_free_pages: null app"); 6620Sstevel@tonic-gate /*NOTREACHED*/ 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate } else { 6650Sstevel@tonic-gate if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 6660Sstevel@tonic-gate == NULL) 6670Sstevel@tonic-gate continue; 6680Sstevel@tonic-gate } 6690Sstevel@tonic-gate ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 6700Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 6710Sstevel@tonic-gate 6720Sstevel@tonic-gate /* 6730Sstevel@tonic-gate * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 6740Sstevel@tonic-gate * the pages won't be having SE_SHARED lock at this 6750Sstevel@tonic-gate * point. 6760Sstevel@tonic-gate * 6770Sstevel@tonic-gate * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 6780Sstevel@tonic-gate * the pages are still held SE_SHARED locked from the 6790Sstevel@tonic-gate * original segspt_create() 6800Sstevel@tonic-gate * 6810Sstevel@tonic-gate * Our goal is to get SE_EXCL lock on each page, remove 6820Sstevel@tonic-gate * permanent lock on it and invalidate the page. 6830Sstevel@tonic-gate */ 6840Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 6854528Spaulsan if (hat_flags == HAT_UNLOAD_UNMAP) 6860Sstevel@tonic-gate pp = page_lookup(vp, off, SE_EXCL); 6870Sstevel@tonic-gate else { 6880Sstevel@tonic-gate if ((pp = page_find(vp, off)) == NULL) { 6890Sstevel@tonic-gate panic("segspt_free_pages: " 6900Sstevel@tonic-gate "page not locked"); 6910Sstevel@tonic-gate /*NOTREACHED*/ 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 6940Sstevel@tonic-gate page_unlock(pp); 6950Sstevel@tonic-gate pp = page_lookup(vp, off, SE_EXCL); 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate if (pp == NULL) { 6990Sstevel@tonic-gate panic("segspt_free_pages: " 7000Sstevel@tonic-gate "page not in the system"); 7010Sstevel@tonic-gate /*NOTREACHED*/ 7020Sstevel@tonic-gate } 7032768Ssl108498 ASSERT(pp->p_lckcnt > 0); 7040Sstevel@tonic-gate page_pp_unlock(pp, 0, 1); 7052768Ssl108498 if (pp->p_lckcnt == 0) 7065224Smec unlocked_bytes += PAGESIZE; 7070Sstevel@tonic-gate } else { 7080Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 7090Sstevel@tonic-gate continue; 7100Sstevel@tonic-gate } 7110Sstevel@tonic-gate /* 7120Sstevel@tonic-gate * It's logical to invalidate the pages here as in most cases 7130Sstevel@tonic-gate * these were created by segspt. 7140Sstevel@tonic-gate */ 7150Sstevel@tonic-gate if (pp->p_szc != 0) { 7160Sstevel@tonic-gate if (root == 0) { 7170Sstevel@tonic-gate ASSERT(curnpgs == 0); 7180Sstevel@tonic-gate root = 1; 7190Sstevel@tonic-gate rootpp = pp; 7200Sstevel@tonic-gate pgs = curnpgs = page_get_pagecnt(pp->p_szc); 7210Sstevel@tonic-gate ASSERT(pgs > 1); 7220Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pgs, pgs)); 7230Sstevel@tonic-gate ASSERT(!(page_pptonum(pp) & (pgs - 1))); 7240Sstevel@tonic-gate curnpgs--; 7250Sstevel@tonic-gate } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 7260Sstevel@tonic-gate ASSERT(curnpgs == 1); 7270Sstevel@tonic-gate ASSERT(page_pptonum(pp) == 7280Sstevel@tonic-gate page_pptonum(rootpp) + (pgs - 1)); 7290Sstevel@tonic-gate page_destroy_pages(rootpp); 7300Sstevel@tonic-gate root = 0; 7310Sstevel@tonic-gate curnpgs = 0; 7320Sstevel@tonic-gate } else { 7330Sstevel@tonic-gate ASSERT(curnpgs > 1); 7340Sstevel@tonic-gate ASSERT(page_pptonum(pp) == 7350Sstevel@tonic-gate page_pptonum(rootpp) + (pgs - curnpgs)); 7360Sstevel@tonic-gate curnpgs--; 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate } else { 7390Sstevel@tonic-gate if (root != 0 || curnpgs != 0) { 7400Sstevel@tonic-gate panic("segspt_free_pages: bad large page"); 7410Sstevel@tonic-gate /*NOTREACHED*/ 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate /*LINTED: constant in conditional context */ 7440Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 7450Sstevel@tonic-gate } 7460Sstevel@tonic-gate } 7472768Ssl108498 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 7482768Ssl108498 if (unlocked_bytes > 0) 7492768Ssl108498 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 7502768Ssl108498 mutex_exit(&sp->shm_mlock); 7512768Ssl108498 } 7520Sstevel@tonic-gate if (root != 0 || curnpgs != 0) { 7530Sstevel@tonic-gate panic("segspt_free_pages: bad large page"); 7540Sstevel@tonic-gate /*NOTREACHED*/ 7550Sstevel@tonic-gate } 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate /* 7580Sstevel@tonic-gate * mark that pages have been released 7590Sstevel@tonic-gate */ 7600Sstevel@tonic-gate sptd->spt_realsize = 0; 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 7630Sstevel@tonic-gate atomic_add_long(&spt_used, -npages); 7640Sstevel@tonic-gate anon_swap_restore(npages); 7650Sstevel@tonic-gate } 7660Sstevel@tonic-gate } 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate /* 7690Sstevel@tonic-gate * Get memory allocation policy info for specified address in given segment 7700Sstevel@tonic-gate */ 7710Sstevel@tonic-gate static lgrp_mem_policy_info_t * 7720Sstevel@tonic-gate segspt_getpolicy(struct seg *seg, caddr_t addr) 7730Sstevel@tonic-gate { 7740Sstevel@tonic-gate struct anon_map *amp; 7750Sstevel@tonic-gate ulong_t anon_index; 7760Sstevel@tonic-gate lgrp_mem_policy_info_t *policy_info; 7770Sstevel@tonic-gate struct spt_data *spt_data; 7780Sstevel@tonic-gate 7790Sstevel@tonic-gate ASSERT(seg != NULL); 7800Sstevel@tonic-gate 7810Sstevel@tonic-gate /* 7820Sstevel@tonic-gate * Get anon_map from segspt 7830Sstevel@tonic-gate * 7840Sstevel@tonic-gate * Assume that no lock needs to be held on anon_map, since 7850Sstevel@tonic-gate * it should be protected by its reference count which must be 7860Sstevel@tonic-gate * nonzero for an existing segment 7870Sstevel@tonic-gate * Need to grab readers lock on policy tree though 7880Sstevel@tonic-gate */ 7890Sstevel@tonic-gate spt_data = (struct spt_data *)seg->s_data; 7900Sstevel@tonic-gate if (spt_data == NULL) 7910Sstevel@tonic-gate return (NULL); 7920Sstevel@tonic-gate amp = spt_data->spt_amp; 7930Sstevel@tonic-gate ASSERT(amp->refcnt != 0); 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate /* 7960Sstevel@tonic-gate * Get policy info 7970Sstevel@tonic-gate * 7980Sstevel@tonic-gate * Assume starting anon index of 0 7990Sstevel@tonic-gate */ 8000Sstevel@tonic-gate anon_index = seg_page(seg, addr); 8010Sstevel@tonic-gate policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate return (policy_info); 8040Sstevel@tonic-gate } 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate /* 8070Sstevel@tonic-gate * DISM only. 8080Sstevel@tonic-gate * Return locked pages over a given range. 8090Sstevel@tonic-gate * 8100Sstevel@tonic-gate * We will cache all DISM locked pages and save the pplist for the 8110Sstevel@tonic-gate * entire segment in the ppa field of the underlying DISM segment structure. 8120Sstevel@tonic-gate * Later, during a call to segspt_reclaim() we will use this ppa array 8130Sstevel@tonic-gate * to page_unlock() all of the pages and then we will free this ppa list. 8140Sstevel@tonic-gate */ 8150Sstevel@tonic-gate /*ARGSUSED*/ 8160Sstevel@tonic-gate static int 8170Sstevel@tonic-gate segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 8180Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 8190Sstevel@tonic-gate { 8200Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 8210Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 8220Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 8230Sstevel@tonic-gate pgcnt_t pg_idx, npages, tot_npages, npgs; 8240Sstevel@tonic-gate struct page **pplist, **pl, **ppa, *pp; 8250Sstevel@tonic-gate struct anon_map *amp; 8260Sstevel@tonic-gate spgcnt_t an_idx; 8270Sstevel@tonic-gate int ret = ENOTSUP; 8280Sstevel@tonic-gate uint_t pl_built = 0; 8290Sstevel@tonic-gate struct anon *ap; 8300Sstevel@tonic-gate struct vnode *vp; 8310Sstevel@tonic-gate u_offset_t off; 8320Sstevel@tonic-gate pgcnt_t claim_availrmem = 0; 8330Sstevel@tonic-gate uint_t szc; 8340Sstevel@tonic-gate 8350Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 836*6695Saguzovsk ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8370Sstevel@tonic-gate 8380Sstevel@tonic-gate /* 8390Sstevel@tonic-gate * We want to lock/unlock the entire ISM segment. Therefore, 8400Sstevel@tonic-gate * we will be using the underlying sptseg and it's base address 8410Sstevel@tonic-gate * and length for the caching arguments. 8420Sstevel@tonic-gate */ 8430Sstevel@tonic-gate ASSERT(sptseg); 8440Sstevel@tonic-gate ASSERT(sptd); 8450Sstevel@tonic-gate 8460Sstevel@tonic-gate pg_idx = seg_page(seg, addr); 8470Sstevel@tonic-gate npages = btopr(len); 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate /* 8500Sstevel@tonic-gate * check if the request is larger than number of pages covered 8510Sstevel@tonic-gate * by amp 8520Sstevel@tonic-gate */ 8530Sstevel@tonic-gate if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 8540Sstevel@tonic-gate *ppp = NULL; 8550Sstevel@tonic-gate return (ENOTSUP); 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 8590Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 8600Sstevel@tonic-gate 861*6695Saguzovsk seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 862*6695Saguzovsk sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * If someone is blocked while unmapping, we purge 8660Sstevel@tonic-gate * segment page cache and thus reclaim pplist synchronously 8670Sstevel@tonic-gate * without waiting for seg_pasync_thread. This speeds up 8680Sstevel@tonic-gate * unmapping in cases where munmap(2) is called, while 8690Sstevel@tonic-gate * raw async i/o is still in progress or where a thread 8700Sstevel@tonic-gate * exits on data fault in a multithreaded application. 8710Sstevel@tonic-gate */ 872*6695Saguzovsk if ((sptd->spt_flags & DISM_PPA_CHANGED) || 873*6695Saguzovsk (AS_ISUNMAPWAIT(seg->s_as) && 874*6695Saguzovsk shmd->shm_softlockcnt > 0)) { 8750Sstevel@tonic-gate segspt_purge(seg); 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate return (0); 8780Sstevel@tonic-gate } 8790Sstevel@tonic-gate 880*6695Saguzovsk /* The L_PAGELOCK case ... */ 881*6695Saguzovsk 8820Sstevel@tonic-gate if (sptd->spt_flags & DISM_PPA_CHANGED) { 8830Sstevel@tonic-gate segspt_purge(seg); 8840Sstevel@tonic-gate /* 8850Sstevel@tonic-gate * for DISM ppa needs to be rebuild since 8860Sstevel@tonic-gate * number of locked pages could be changed 8870Sstevel@tonic-gate */ 8880Sstevel@tonic-gate *ppp = NULL; 8890Sstevel@tonic-gate return (ENOTSUP); 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate 8920Sstevel@tonic-gate /* 8930Sstevel@tonic-gate * First try to find pages in segment page cache, without 8940Sstevel@tonic-gate * holding the segment lock. 8950Sstevel@tonic-gate */ 896*6695Saguzovsk pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 897*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED); 8980Sstevel@tonic-gate if (pplist != NULL) { 8990Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 9000Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 9010Sstevel@tonic-gate ppa = sptd->spt_ppa; 9020Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 9030Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 904*6695Saguzovsk seg_pinactive(seg, NULL, seg->s_base, 9050Sstevel@tonic-gate sptd->spt_amp->size, ppa, 906*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 9070Sstevel@tonic-gate *ppp = NULL; 9080Sstevel@tonic-gate return (ENOTSUP); 9090Sstevel@tonic-gate } 9100Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 9110Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 9120Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 9130Sstevel@tonic-gate } else { 9140Sstevel@tonic-gate an_idx++; 9150Sstevel@tonic-gate } 9160Sstevel@tonic-gate } 9170Sstevel@tonic-gate /* 9180Sstevel@tonic-gate * Since we cache the entire DISM segment, we want to 9190Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 9200Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 9210Sstevel@tonic-gate */ 9220Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 9230Sstevel@tonic-gate return (0); 9240Sstevel@tonic-gate } 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 9270Sstevel@tonic-gate /* 9280Sstevel@tonic-gate * try to find pages in segment page cache with mutex 9290Sstevel@tonic-gate */ 930*6695Saguzovsk pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 931*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED); 9320Sstevel@tonic-gate if (pplist != NULL) { 9330Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 9340Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 9350Sstevel@tonic-gate ppa = sptd->spt_ppa; 9360Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 9370Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 9380Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 939*6695Saguzovsk seg_pinactive(seg, NULL, seg->s_base, 9400Sstevel@tonic-gate sptd->spt_amp->size, ppa, 941*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 9420Sstevel@tonic-gate *ppp = NULL; 9430Sstevel@tonic-gate return (ENOTSUP); 9440Sstevel@tonic-gate } 9450Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 9460Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 9470Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 9480Sstevel@tonic-gate } else { 9490Sstevel@tonic-gate an_idx++; 9500Sstevel@tonic-gate } 9510Sstevel@tonic-gate } 9520Sstevel@tonic-gate /* 9530Sstevel@tonic-gate * Since we cache the entire DISM segment, we want to 9540Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 9550Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 9560Sstevel@tonic-gate */ 9570Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 9580Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 9590Sstevel@tonic-gate return (0); 9600Sstevel@tonic-gate } 961*6695Saguzovsk if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 962*6695Saguzovsk SEGP_FORCE_WIRED) == SEGP_FAIL) { 9630Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 9640Sstevel@tonic-gate *ppp = NULL; 9650Sstevel@tonic-gate return (ENOTSUP); 9660Sstevel@tonic-gate } 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate /* 9690Sstevel@tonic-gate * No need to worry about protections because DISM pages are always rw. 9700Sstevel@tonic-gate */ 9710Sstevel@tonic-gate pl = pplist = NULL; 9720Sstevel@tonic-gate amp = sptd->spt_amp; 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate /* 9750Sstevel@tonic-gate * Do we need to build the ppa array? 9760Sstevel@tonic-gate */ 9770Sstevel@tonic-gate if (sptd->spt_ppa == NULL) { 9780Sstevel@tonic-gate pgcnt_t lpg_cnt = 0; 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate pl_built = 1; 9810Sstevel@tonic-gate tot_npages = btopr(sptd->spt_amp->size); 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 9840Sstevel@tonic-gate pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 9850Sstevel@tonic-gate pl = pplist; 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 9880Sstevel@tonic-gate for (an_idx = 0; an_idx < tot_npages; ) { 9890Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 9900Sstevel@tonic-gate /* 9910Sstevel@tonic-gate * Cache only mlocked pages. For large pages 9920Sstevel@tonic-gate * if one (constituent) page is mlocked 9930Sstevel@tonic-gate * all pages for that large page 9940Sstevel@tonic-gate * are cached also. This is for quick 9950Sstevel@tonic-gate * lookups of ppa array; 9960Sstevel@tonic-gate */ 9970Sstevel@tonic-gate if ((ap != NULL) && (lpg_cnt != 0 || 9980Sstevel@tonic-gate (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 10010Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 10020Sstevel@tonic-gate ASSERT(pp != NULL); 10030Sstevel@tonic-gate if (lpg_cnt == 0) { 10041498Sbs21162 lpg_cnt++; 10051498Sbs21162 /* 10061498Sbs21162 * For a small page, we are done -- 10071498Sbs21162 * lpg_count is reset to 0 below. 10081498Sbs21162 * 10091498Sbs21162 * For a large page, we are guaranteed 10101498Sbs21162 * to find the anon structures of all 10111498Sbs21162 * constituent pages and a non-zero 10121498Sbs21162 * lpg_cnt ensures that we don't test 10131498Sbs21162 * for mlock for these. We are done 10141498Sbs21162 * when lpg_count reaches (npgs + 1). 10151498Sbs21162 * If we are not the first constituent 10161498Sbs21162 * page, restart at the first one. 10171498Sbs21162 */ 10180Sstevel@tonic-gate npgs = page_get_pagecnt(pp->p_szc); 10190Sstevel@tonic-gate if (!IS_P2ALIGNED(an_idx, npgs)) { 10200Sstevel@tonic-gate an_idx = P2ALIGN(an_idx, npgs); 10210Sstevel@tonic-gate page_unlock(pp); 10220Sstevel@tonic-gate continue; 10230Sstevel@tonic-gate } 10240Sstevel@tonic-gate } 10251498Sbs21162 if (++lpg_cnt > npgs) 10260Sstevel@tonic-gate lpg_cnt = 0; 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate /* 10290Sstevel@tonic-gate * availrmem is decremented only 10300Sstevel@tonic-gate * for unlocked pages 10310Sstevel@tonic-gate */ 10320Sstevel@tonic-gate if (sptd->spt_ppa_lckcnt[an_idx] == 0) 10330Sstevel@tonic-gate claim_availrmem++; 10340Sstevel@tonic-gate pplist[an_idx] = pp; 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate an_idx++; 10370Sstevel@tonic-gate } 10380Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 10390Sstevel@tonic-gate 1040*6695Saguzovsk if (claim_availrmem) { 1041*6695Saguzovsk mutex_enter(&freemem_lock); 1042*6695Saguzovsk if (availrmem < tune.t_minarmem + claim_availrmem) { 1043*6695Saguzovsk mutex_exit(&freemem_lock); 1044*6695Saguzovsk ret = ENOTSUP; 1045*6695Saguzovsk claim_availrmem = 0; 1046*6695Saguzovsk goto insert_fail; 1047*6695Saguzovsk } else { 1048*6695Saguzovsk availrmem -= claim_availrmem; 1049*6695Saguzovsk } 10500Sstevel@tonic-gate mutex_exit(&freemem_lock); 10510Sstevel@tonic-gate } 10520Sstevel@tonic-gate 10530Sstevel@tonic-gate sptd->spt_ppa = pl; 10540Sstevel@tonic-gate } else { 10550Sstevel@tonic-gate /* 10560Sstevel@tonic-gate * We already have a valid ppa[]. 10570Sstevel@tonic-gate */ 10580Sstevel@tonic-gate pl = sptd->spt_ppa; 10590Sstevel@tonic-gate } 10600Sstevel@tonic-gate 10610Sstevel@tonic-gate ASSERT(pl != NULL); 10620Sstevel@tonic-gate 1063*6695Saguzovsk ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1064*6695Saguzovsk sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 10650Sstevel@tonic-gate segspt_reclaim); 10660Sstevel@tonic-gate if (ret == SEGP_FAIL) { 10670Sstevel@tonic-gate /* 10680Sstevel@tonic-gate * seg_pinsert failed. We return 10690Sstevel@tonic-gate * ENOTSUP, so that the as_pagelock() code will 10700Sstevel@tonic-gate * then try the slower F_SOFTLOCK path. 10710Sstevel@tonic-gate */ 1072934Srd117015 if (pl_built) { 1073934Srd117015 /* 1074934Srd117015 * No one else has referenced the ppa[]. 1075934Srd117015 * We created it and we need to destroy it. 1076934Srd117015 */ 1077934Srd117015 sptd->spt_ppa = NULL; 1078934Srd117015 } 10790Sstevel@tonic-gate ret = ENOTSUP; 10800Sstevel@tonic-gate goto insert_fail; 10810Sstevel@tonic-gate } 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate /* 10840Sstevel@tonic-gate * In either case, we increment softlockcnt on the 'real' segment. 10850Sstevel@tonic-gate */ 10860Sstevel@tonic-gate sptd->spt_pcachecnt++; 10870Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 10880Sstevel@tonic-gate 10890Sstevel@tonic-gate ppa = sptd->spt_ppa; 10900Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 10910Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 10920Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 1093*6695Saguzovsk seg_pinactive(seg, NULL, seg->s_base, 1094*6695Saguzovsk sptd->spt_amp->size, 1095*6695Saguzovsk pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 10960Sstevel@tonic-gate *ppp = NULL; 10970Sstevel@tonic-gate return (ENOTSUP); 10980Sstevel@tonic-gate } 10990Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 11000Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 11010Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 11020Sstevel@tonic-gate } else { 11030Sstevel@tonic-gate an_idx++; 11040Sstevel@tonic-gate } 11050Sstevel@tonic-gate } 11060Sstevel@tonic-gate /* 11070Sstevel@tonic-gate * We can now drop the sptd->spt_lock since the ppa[] 11080Sstevel@tonic-gate * exists and he have incremented pacachecnt. 11090Sstevel@tonic-gate */ 11100Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 11110Sstevel@tonic-gate 11120Sstevel@tonic-gate /* 11130Sstevel@tonic-gate * Since we cache the entire segment, we want to 11140Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 11150Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 11160Sstevel@tonic-gate */ 11170Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 1118*6695Saguzovsk return (0); 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate insert_fail: 11210Sstevel@tonic-gate /* 11220Sstevel@tonic-gate * We will only reach this code if we tried and failed. 11230Sstevel@tonic-gate * 11240Sstevel@tonic-gate * And we can drop the lock on the dummy seg, once we've failed 11250Sstevel@tonic-gate * to set up a new ppa[]. 11260Sstevel@tonic-gate */ 11270Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate if (pl_built) { 1130*6695Saguzovsk if (claim_availrmem) { 1131*6695Saguzovsk mutex_enter(&freemem_lock); 1132*6695Saguzovsk availrmem += claim_availrmem; 1133*6695Saguzovsk mutex_exit(&freemem_lock); 1134*6695Saguzovsk } 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate /* 11370Sstevel@tonic-gate * We created pl and we need to destroy it. 11380Sstevel@tonic-gate */ 11390Sstevel@tonic-gate pplist = pl; 11400Sstevel@tonic-gate for (an_idx = 0; an_idx < tot_npages; an_idx++) { 11410Sstevel@tonic-gate if (pplist[an_idx] != NULL) 11420Sstevel@tonic-gate page_unlock(pplist[an_idx]); 11430Sstevel@tonic-gate } 11440Sstevel@tonic-gate kmem_free(pl, sizeof (page_t *) * tot_npages); 11450Sstevel@tonic-gate } 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 11480Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 11490Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 11500Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 11510Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 11520Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 11530Sstevel@tonic-gate } 11540Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 11550Sstevel@tonic-gate } 11560Sstevel@tonic-gate } 11570Sstevel@tonic-gate *ppp = NULL; 11580Sstevel@tonic-gate return (ret); 11590Sstevel@tonic-gate } 11600Sstevel@tonic-gate 11610Sstevel@tonic-gate 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate /* 11640Sstevel@tonic-gate * return locked pages over a given range. 11650Sstevel@tonic-gate * 11660Sstevel@tonic-gate * We will cache the entire ISM segment and save the pplist for the 11670Sstevel@tonic-gate * entire segment in the ppa field of the underlying ISM segment structure. 11680Sstevel@tonic-gate * Later, during a call to segspt_reclaim() we will use this ppa array 11690Sstevel@tonic-gate * to page_unlock() all of the pages and then we will free this ppa list. 11700Sstevel@tonic-gate */ 11710Sstevel@tonic-gate /*ARGSUSED*/ 11720Sstevel@tonic-gate static int 11730Sstevel@tonic-gate segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 11740Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 11750Sstevel@tonic-gate { 11760Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 11770Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 11780Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 11790Sstevel@tonic-gate pgcnt_t np, page_index, npages; 11800Sstevel@tonic-gate caddr_t a, spt_base; 11810Sstevel@tonic-gate struct page **pplist, **pl, *pp; 11820Sstevel@tonic-gate struct anon_map *amp; 11830Sstevel@tonic-gate ulong_t anon_index; 11840Sstevel@tonic-gate int ret = ENOTSUP; 11850Sstevel@tonic-gate uint_t pl_built = 0; 11860Sstevel@tonic-gate struct anon *ap; 11870Sstevel@tonic-gate struct vnode *vp; 11880Sstevel@tonic-gate u_offset_t off; 11890Sstevel@tonic-gate 11900Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1191*6695Saguzovsk ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 1192*6695Saguzovsk 11930Sstevel@tonic-gate 11940Sstevel@tonic-gate /* 11950Sstevel@tonic-gate * We want to lock/unlock the entire ISM segment. Therefore, 11960Sstevel@tonic-gate * we will be using the underlying sptseg and it's base address 11970Sstevel@tonic-gate * and length for the caching arguments. 11980Sstevel@tonic-gate */ 11990Sstevel@tonic-gate ASSERT(sptseg); 12000Sstevel@tonic-gate ASSERT(sptd); 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 12030Sstevel@tonic-gate return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 12040Sstevel@tonic-gate } 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate page_index = seg_page(seg, addr); 12070Sstevel@tonic-gate npages = btopr(len); 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate /* 12100Sstevel@tonic-gate * check if the request is larger than number of pages covered 12110Sstevel@tonic-gate * by amp 12120Sstevel@tonic-gate */ 12130Sstevel@tonic-gate if (page_index + npages > btopr(sptd->spt_amp->size)) { 12140Sstevel@tonic-gate *ppp = NULL; 12150Sstevel@tonic-gate return (ENOTSUP); 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate 12180Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 12190Sstevel@tonic-gate 12200Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 12210Sstevel@tonic-gate 1222*6695Saguzovsk seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 1223*6695Saguzovsk sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate /* 12260Sstevel@tonic-gate * If someone is blocked while unmapping, we purge 12270Sstevel@tonic-gate * segment page cache and thus reclaim pplist synchronously 12280Sstevel@tonic-gate * without waiting for seg_pasync_thread. This speeds up 12290Sstevel@tonic-gate * unmapping in cases where munmap(2) is called, while 12300Sstevel@tonic-gate * raw async i/o is still in progress or where a thread 12310Sstevel@tonic-gate * exits on data fault in a multithreaded application. 12320Sstevel@tonic-gate */ 12330Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 12340Sstevel@tonic-gate segspt_purge(seg); 12350Sstevel@tonic-gate } 12360Sstevel@tonic-gate return (0); 1237*6695Saguzovsk } 12380Sstevel@tonic-gate 1239*6695Saguzovsk /* The L_PAGELOCK case... */ 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate /* 12420Sstevel@tonic-gate * First try to find pages in segment page cache, without 12430Sstevel@tonic-gate * holding the segment lock. 12440Sstevel@tonic-gate */ 1245*6695Saguzovsk pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1246*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED); 12470Sstevel@tonic-gate if (pplist != NULL) { 12480Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 12490Sstevel@tonic-gate ASSERT(sptd->spt_ppa[page_index]); 12500Sstevel@tonic-gate /* 12510Sstevel@tonic-gate * Since we cache the entire ISM segment, we want to 12520Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 12530Sstevel@tonic-gate * to the requested addr, i.e. page_index. 12540Sstevel@tonic-gate */ 12550Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 12560Sstevel@tonic-gate return (0); 12570Sstevel@tonic-gate } 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate /* 12620Sstevel@tonic-gate * try to find pages in segment page cache 12630Sstevel@tonic-gate */ 1264*6695Saguzovsk pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1265*6695Saguzovsk S_WRITE, SEGP_FORCE_WIRED); 12660Sstevel@tonic-gate if (pplist != NULL) { 12670Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 12680Sstevel@tonic-gate /* 12690Sstevel@tonic-gate * Since we cache the entire segment, we want to 12700Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 12710Sstevel@tonic-gate * to the requested addr, i.e. page_index. 12720Sstevel@tonic-gate */ 12730Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 12740Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 12750Sstevel@tonic-gate return (0); 12760Sstevel@tonic-gate } 12770Sstevel@tonic-gate 1278*6695Saguzovsk if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 1279*6695Saguzovsk SEGP_FORCE_WIRED) == SEGP_FAIL) { 12800Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 12810Sstevel@tonic-gate *ppp = NULL; 12820Sstevel@tonic-gate return (ENOTSUP); 12830Sstevel@tonic-gate } 12840Sstevel@tonic-gate 12850Sstevel@tonic-gate /* 12860Sstevel@tonic-gate * No need to worry about protections because ISM pages 12870Sstevel@tonic-gate * are always rw. 12880Sstevel@tonic-gate */ 12890Sstevel@tonic-gate pl = pplist = NULL; 12900Sstevel@tonic-gate 12910Sstevel@tonic-gate /* 12920Sstevel@tonic-gate * Do we need to build the ppa array? 12930Sstevel@tonic-gate */ 12940Sstevel@tonic-gate if (sptd->spt_ppa == NULL) { 12950Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 12960Sstevel@tonic-gate 12970Sstevel@tonic-gate spt_base = sptseg->s_base; 12980Sstevel@tonic-gate pl_built = 1; 12990Sstevel@tonic-gate 13000Sstevel@tonic-gate /* 13010Sstevel@tonic-gate * availrmem is decremented once during anon_swap_adjust() 13020Sstevel@tonic-gate * and is incremented during the anon_unresv(), which is 13030Sstevel@tonic-gate * called from shm_rm_amp() when the segment is destroyed. 13040Sstevel@tonic-gate */ 13050Sstevel@tonic-gate amp = sptd->spt_amp; 13060Sstevel@tonic-gate ASSERT(amp != NULL); 13070Sstevel@tonic-gate 13080Sstevel@tonic-gate /* pcachecnt is protected by sptd->spt_lock */ 13090Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 13100Sstevel@tonic-gate pplist = kmem_zalloc(sizeof (page_t *) 13110Sstevel@tonic-gate * btopr(sptd->spt_amp->size), KM_SLEEP); 13120Sstevel@tonic-gate pl = pplist; 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate anon_index = seg_page(sptseg, spt_base); 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 13170Sstevel@tonic-gate for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 13180Sstevel@tonic-gate a += PAGESIZE, anon_index++, pplist++) { 13190Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 13200Sstevel@tonic-gate ASSERT(ap != NULL); 13210Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 13220Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 13230Sstevel@tonic-gate ASSERT(pp != NULL); 13240Sstevel@tonic-gate *pplist = pp; 13250Sstevel@tonic-gate } 13260Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate if (a < (spt_base + sptd->spt_amp->size)) { 13290Sstevel@tonic-gate ret = ENOTSUP; 13300Sstevel@tonic-gate goto insert_fail; 13310Sstevel@tonic-gate } 13320Sstevel@tonic-gate sptd->spt_ppa = pl; 13330Sstevel@tonic-gate } else { 13340Sstevel@tonic-gate /* 13350Sstevel@tonic-gate * We already have a valid ppa[]. 13360Sstevel@tonic-gate */ 13370Sstevel@tonic-gate pl = sptd->spt_ppa; 13380Sstevel@tonic-gate } 13390Sstevel@tonic-gate 13400Sstevel@tonic-gate ASSERT(pl != NULL); 13410Sstevel@tonic-gate 1342*6695Saguzovsk ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1343*6695Saguzovsk sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 1344*6695Saguzovsk segspt_reclaim); 13450Sstevel@tonic-gate if (ret == SEGP_FAIL) { 13460Sstevel@tonic-gate /* 13470Sstevel@tonic-gate * seg_pinsert failed. We return 13480Sstevel@tonic-gate * ENOTSUP, so that the as_pagelock() code will 13490Sstevel@tonic-gate * then try the slower F_SOFTLOCK path. 13500Sstevel@tonic-gate */ 13510Sstevel@tonic-gate if (pl_built) { 13520Sstevel@tonic-gate /* 13530Sstevel@tonic-gate * No one else has referenced the ppa[]. 13540Sstevel@tonic-gate * We created it and we need to destroy it. 13550Sstevel@tonic-gate */ 13560Sstevel@tonic-gate sptd->spt_ppa = NULL; 13570Sstevel@tonic-gate } 13580Sstevel@tonic-gate ret = ENOTSUP; 13590Sstevel@tonic-gate goto insert_fail; 13600Sstevel@tonic-gate } 13610Sstevel@tonic-gate 13620Sstevel@tonic-gate /* 13630Sstevel@tonic-gate * In either case, we increment softlockcnt on the 'real' segment. 13640Sstevel@tonic-gate */ 13650Sstevel@tonic-gate sptd->spt_pcachecnt++; 13660Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate /* 13690Sstevel@tonic-gate * We can now drop the sptd->spt_lock since the ppa[] 13700Sstevel@tonic-gate * exists and he have incremented pacachecnt. 13710Sstevel@tonic-gate */ 13720Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 13730Sstevel@tonic-gate 13740Sstevel@tonic-gate /* 13750Sstevel@tonic-gate * Since we cache the entire segment, we want to 13760Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 13770Sstevel@tonic-gate * to the requested addr, i.e. page_index. 13780Sstevel@tonic-gate */ 13790Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 1380*6695Saguzovsk return (0); 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate insert_fail: 13830Sstevel@tonic-gate /* 13840Sstevel@tonic-gate * We will only reach this code if we tried and failed. 13850Sstevel@tonic-gate * 13860Sstevel@tonic-gate * And we can drop the lock on the dummy seg, once we've failed 13870Sstevel@tonic-gate * to set up a new ppa[]. 13880Sstevel@tonic-gate */ 13890Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 13900Sstevel@tonic-gate 13910Sstevel@tonic-gate if (pl_built) { 13920Sstevel@tonic-gate /* 13930Sstevel@tonic-gate * We created pl and we need to destroy it. 13940Sstevel@tonic-gate */ 13950Sstevel@tonic-gate pplist = pl; 13960Sstevel@tonic-gate np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 13970Sstevel@tonic-gate while (np) { 13980Sstevel@tonic-gate page_unlock(*pplist); 13990Sstevel@tonic-gate np--; 14000Sstevel@tonic-gate pplist++; 14010Sstevel@tonic-gate } 14025224Smec kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size)); 14030Sstevel@tonic-gate } 14040Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 14050Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 14060Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 14070Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 14080Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 14090Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 14100Sstevel@tonic-gate } 14110Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 14120Sstevel@tonic-gate } 14130Sstevel@tonic-gate } 14140Sstevel@tonic-gate *ppp = NULL; 14150Sstevel@tonic-gate return (ret); 14160Sstevel@tonic-gate } 14170Sstevel@tonic-gate 14180Sstevel@tonic-gate /* 14190Sstevel@tonic-gate * purge any cached pages in the I/O page cache 14200Sstevel@tonic-gate */ 14210Sstevel@tonic-gate static void 14220Sstevel@tonic-gate segspt_purge(struct seg *seg) 14230Sstevel@tonic-gate { 1424*6695Saguzovsk seg_ppurge(seg, NULL, SEGP_FORCE_WIRED); 14250Sstevel@tonic-gate } 14260Sstevel@tonic-gate 14270Sstevel@tonic-gate static int 1428*6695Saguzovsk segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 1429*6695Saguzovsk enum seg_rw rw, int async) 14300Sstevel@tonic-gate { 1431*6695Saguzovsk struct seg *seg = (struct seg *)ptag; 14320Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 14330Sstevel@tonic-gate struct seg *sptseg; 14340Sstevel@tonic-gate struct spt_data *sptd; 14350Sstevel@tonic-gate pgcnt_t npages, i, free_availrmem = 0; 14360Sstevel@tonic-gate int done = 0; 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate #ifdef lint 14390Sstevel@tonic-gate addr = addr; 14400Sstevel@tonic-gate #endif 14410Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 14420Sstevel@tonic-gate sptd = sptseg->s_data; 14430Sstevel@tonic-gate npages = (len >> PAGESHIFT); 14440Sstevel@tonic-gate ASSERT(npages); 14450Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt != 0); 14460Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 14470Sstevel@tonic-gate ASSERT(npages == btopr(sptd->spt_amp->size)); 1448*6695Saguzovsk ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1449*6695Saguzovsk 14500Sstevel@tonic-gate /* 14510Sstevel@tonic-gate * Acquire the lock on the dummy seg and destroy the 14520Sstevel@tonic-gate * ppa array IF this is the last pcachecnt. 14530Sstevel@tonic-gate */ 14540Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 14550Sstevel@tonic-gate if (--sptd->spt_pcachecnt == 0) { 14560Sstevel@tonic-gate for (i = 0; i < npages; i++) { 14570Sstevel@tonic-gate if (pplist[i] == NULL) { 14580Sstevel@tonic-gate continue; 14590Sstevel@tonic-gate } 14600Sstevel@tonic-gate if (rw == S_WRITE) { 14610Sstevel@tonic-gate hat_setrefmod(pplist[i]); 14620Sstevel@tonic-gate } else { 14630Sstevel@tonic-gate hat_setref(pplist[i]); 14640Sstevel@tonic-gate } 14650Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) && 14662768Ssl108498 (sptd->spt_ppa_lckcnt[i] == 0)) 14670Sstevel@tonic-gate free_availrmem++; 14680Sstevel@tonic-gate page_unlock(pplist[i]); 14690Sstevel@tonic-gate } 1470*6695Saguzovsk if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) { 14710Sstevel@tonic-gate mutex_enter(&freemem_lock); 14720Sstevel@tonic-gate availrmem += free_availrmem; 14730Sstevel@tonic-gate mutex_exit(&freemem_lock); 14740Sstevel@tonic-gate } 14750Sstevel@tonic-gate /* 14760Sstevel@tonic-gate * Since we want to cach/uncache the entire ISM segment, 14770Sstevel@tonic-gate * we will track the pplist in a segspt specific field 14780Sstevel@tonic-gate * ppa, that is initialized at the time we add an entry to 14790Sstevel@tonic-gate * the cache. 14800Sstevel@tonic-gate */ 14810Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 14820Sstevel@tonic-gate kmem_free(pplist, sizeof (page_t *) * npages); 14830Sstevel@tonic-gate sptd->spt_ppa = NULL; 14840Sstevel@tonic-gate sptd->spt_flags &= ~DISM_PPA_CHANGED; 14855224Smec sptd->spt_gen++; 14865224Smec cv_broadcast(&sptd->spt_cv); 14870Sstevel@tonic-gate done = 1; 14880Sstevel@tonic-gate } 14890Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 1490*6695Saguzovsk 1491*6695Saguzovsk /* 1492*6695Saguzovsk * If we are pcache async thread or called via seg_ppurge_wiredpp() we 1493*6695Saguzovsk * may not hold AS lock (in this case async argument is not 0). This 1494*6695Saguzovsk * means if softlockcnt drops to 0 after the decrement below address 1495*6695Saguzovsk * space may get freed. We can't allow it since after softlock 1496*6695Saguzovsk * derement to 0 we still need to access as structure for possible 1497*6695Saguzovsk * wakeup of unmap waiters. To prevent the disappearance of as we take 1498*6695Saguzovsk * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes 1499*6695Saguzovsk * this mutex as a barrier to make sure this routine completes before 1500*6695Saguzovsk * segment is freed. 1501*6695Saguzovsk * 1502*6695Saguzovsk * The second complication we have to deal with in async case is a 1503*6695Saguzovsk * possibility of missed wake up of unmap wait thread. When we don't 1504*6695Saguzovsk * hold as lock here we may take a_contents lock before unmap wait 1505*6695Saguzovsk * thread that was first to see softlockcnt was still not 0. As a 1506*6695Saguzovsk * result we'll fail to wake up an unmap wait thread. To avoid this 1507*6695Saguzovsk * race we set nounmapwait flag in as structure if we drop softlockcnt 1508*6695Saguzovsk * to 0 if async is not 0. unmapwait thread 1509*6695Saguzovsk * will not block if this flag is set. 1510*6695Saguzovsk */ 1511*6695Saguzovsk if (async) 1512*6695Saguzovsk mutex_enter(&shmd->shm_segfree_syncmtx); 1513*6695Saguzovsk 15140Sstevel@tonic-gate /* 15150Sstevel@tonic-gate * Now decrement softlockcnt. 15160Sstevel@tonic-gate */ 1517*6695Saguzovsk ASSERT(shmd->shm_softlockcnt > 0); 15180Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 15190Sstevel@tonic-gate 15200Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 1521*6695Saguzovsk if (async || AS_ISUNMAPWAIT(seg->s_as)) { 15220Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 1523*6695Saguzovsk if (async) 1524*6695Saguzovsk AS_SETNOUNMAPWAIT(seg->s_as); 15250Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 15260Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 15270Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 15280Sstevel@tonic-gate } 15290Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 15300Sstevel@tonic-gate } 15310Sstevel@tonic-gate } 1532*6695Saguzovsk 1533*6695Saguzovsk if (async) 1534*6695Saguzovsk mutex_exit(&shmd->shm_segfree_syncmtx); 1535*6695Saguzovsk 15360Sstevel@tonic-gate return (done); 15370Sstevel@tonic-gate } 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate /* 15400Sstevel@tonic-gate * Do a F_SOFTUNLOCK call over the range requested. 15410Sstevel@tonic-gate * The range must have already been F_SOFTLOCK'ed. 15420Sstevel@tonic-gate * 15430Sstevel@tonic-gate * The calls to acquire and release the anon map lock mutex were 15440Sstevel@tonic-gate * removed in order to avoid a deadly embrace during a DR 15450Sstevel@tonic-gate * memory delete operation. (Eg. DR blocks while waiting for a 15460Sstevel@tonic-gate * exclusive lock on a page that is being used for kaio; the 15470Sstevel@tonic-gate * thread that will complete the kaio and call segspt_softunlock 15480Sstevel@tonic-gate * blocks on the anon map lock; another thread holding the anon 15490Sstevel@tonic-gate * map lock blocks on another page lock via the segspt_shmfault 15500Sstevel@tonic-gate * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 15510Sstevel@tonic-gate * 15520Sstevel@tonic-gate * The appropriateness of the removal is based upon the following: 15530Sstevel@tonic-gate * 1. If we are holding a segment's reader lock and the page is held 15540Sstevel@tonic-gate * shared, then the corresponding element in anonmap which points to 15550Sstevel@tonic-gate * anon struct cannot change and there is no need to acquire the 15560Sstevel@tonic-gate * anonymous map lock. 15570Sstevel@tonic-gate * 2. Threads in segspt_softunlock have a reader lock on the segment 15580Sstevel@tonic-gate * and already have the shared page lock, so we are guaranteed that 15590Sstevel@tonic-gate * the anon map slot cannot change and therefore can call anon_get_ptr() 15600Sstevel@tonic-gate * without grabbing the anonymous map lock. 15610Sstevel@tonic-gate * 3. Threads that softlock a shared page break copy-on-write, even if 15620Sstevel@tonic-gate * its a read. Thus cow faults can be ignored with respect to soft 15630Sstevel@tonic-gate * unlocking, since the breaking of cow means that the anon slot(s) will 15640Sstevel@tonic-gate * not be shared. 15650Sstevel@tonic-gate */ 15660Sstevel@tonic-gate static void 15670Sstevel@tonic-gate segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 15680Sstevel@tonic-gate size_t len, enum seg_rw rw) 15690Sstevel@tonic-gate { 15700Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 15710Sstevel@tonic-gate struct seg *sptseg; 15720Sstevel@tonic-gate struct spt_data *sptd; 15730Sstevel@tonic-gate page_t *pp; 15740Sstevel@tonic-gate caddr_t adr; 15750Sstevel@tonic-gate struct vnode *vp; 15760Sstevel@tonic-gate u_offset_t offset; 15770Sstevel@tonic-gate ulong_t anon_index; 15780Sstevel@tonic-gate struct anon_map *amp; /* XXX - for locknest */ 15790Sstevel@tonic-gate struct anon *ap = NULL; 15800Sstevel@tonic-gate pgcnt_t npages; 15810Sstevel@tonic-gate 15820Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 15830Sstevel@tonic-gate 15840Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 15850Sstevel@tonic-gate sptd = sptseg->s_data; 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate /* 15880Sstevel@tonic-gate * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 15890Sstevel@tonic-gate * and therefore their pages are SE_SHARED locked 15900Sstevel@tonic-gate * for the entire life of the segment. 15910Sstevel@tonic-gate */ 15920Sstevel@tonic-gate if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 15935224Smec ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 15940Sstevel@tonic-gate goto softlock_decrement; 15950Sstevel@tonic-gate } 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate /* 15980Sstevel@tonic-gate * Any thread is free to do a page_find and 15990Sstevel@tonic-gate * page_unlock() on the pages within this seg. 16000Sstevel@tonic-gate * 16010Sstevel@tonic-gate * We are already holding the as->a_lock on the user's 16020Sstevel@tonic-gate * real segment, but we need to hold the a_lock on the 16030Sstevel@tonic-gate * underlying dummy as. This is mostly to satisfy the 16040Sstevel@tonic-gate * underlying HAT layer. 16050Sstevel@tonic-gate */ 16060Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 16070Sstevel@tonic-gate hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 16080Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 16090Sstevel@tonic-gate 16100Sstevel@tonic-gate amp = sptd->spt_amp; 16110Sstevel@tonic-gate ASSERT(amp != NULL); 16120Sstevel@tonic-gate anon_index = seg_page(sptseg, sptseg_addr); 16130Sstevel@tonic-gate 16140Sstevel@tonic-gate for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 16150Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index++); 16160Sstevel@tonic-gate ASSERT(ap != NULL); 16170Sstevel@tonic-gate swap_xlate(ap, &vp, &offset); 16180Sstevel@tonic-gate 16190Sstevel@tonic-gate /* 16200Sstevel@tonic-gate * Use page_find() instead of page_lookup() to 16210Sstevel@tonic-gate * find the page since we know that it has a 16220Sstevel@tonic-gate * "shared" lock. 16230Sstevel@tonic-gate */ 16240Sstevel@tonic-gate pp = page_find(vp, offset); 16250Sstevel@tonic-gate ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 16260Sstevel@tonic-gate if (pp == NULL) { 16270Sstevel@tonic-gate panic("segspt_softunlock: " 16280Sstevel@tonic-gate "addr %p, ap %p, vp %p, off %llx", 16290Sstevel@tonic-gate (void *)adr, (void *)ap, (void *)vp, offset); 16300Sstevel@tonic-gate /*NOTREACHED*/ 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate if (rw == S_WRITE) { 16340Sstevel@tonic-gate hat_setrefmod(pp); 16350Sstevel@tonic-gate } else if (rw != S_OTHER) { 16360Sstevel@tonic-gate hat_setref(pp); 16370Sstevel@tonic-gate } 16380Sstevel@tonic-gate page_unlock(pp); 16390Sstevel@tonic-gate } 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate softlock_decrement: 16420Sstevel@tonic-gate npages = btopr(len); 1643*6695Saguzovsk ASSERT(shmd->shm_softlockcnt >= npages); 16440Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 16450Sstevel@tonic-gate if (shmd->shm_softlockcnt == 0) { 16460Sstevel@tonic-gate /* 16470Sstevel@tonic-gate * All SOFTLOCKS are gone. Wakeup any waiting 16480Sstevel@tonic-gate * unmappers so they can try again to unmap. 16490Sstevel@tonic-gate * Check for waiters first without the mutex 16500Sstevel@tonic-gate * held so we don't always grab the mutex on 16510Sstevel@tonic-gate * softunlocks. 16520Sstevel@tonic-gate */ 16530Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 16540Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 16550Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 16560Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 16570Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 16580Sstevel@tonic-gate } 16590Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 16600Sstevel@tonic-gate } 16610Sstevel@tonic-gate } 16620Sstevel@tonic-gate } 16630Sstevel@tonic-gate 16640Sstevel@tonic-gate int 16650Sstevel@tonic-gate segspt_shmattach(struct seg *seg, caddr_t *argsp) 16660Sstevel@tonic-gate { 16670Sstevel@tonic-gate struct shm_data *shmd_arg = (struct shm_data *)argsp; 16680Sstevel@tonic-gate struct shm_data *shmd; 16690Sstevel@tonic-gate struct anon_map *shm_amp = shmd_arg->shm_amp; 16700Sstevel@tonic-gate struct spt_data *sptd; 16710Sstevel@tonic-gate int error = 0; 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 16740Sstevel@tonic-gate 16750Sstevel@tonic-gate shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 16760Sstevel@tonic-gate if (shmd == NULL) 16770Sstevel@tonic-gate return (ENOMEM); 16780Sstevel@tonic-gate 16790Sstevel@tonic-gate shmd->shm_sptas = shmd_arg->shm_sptas; 16800Sstevel@tonic-gate shmd->shm_amp = shm_amp; 16810Sstevel@tonic-gate shmd->shm_sptseg = shmd_arg->shm_sptseg; 16820Sstevel@tonic-gate 16830Sstevel@tonic-gate (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 16840Sstevel@tonic-gate NULL, 0, seg->s_size); 16850Sstevel@tonic-gate 1686*6695Saguzovsk mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 1687*6695Saguzovsk 16880Sstevel@tonic-gate seg->s_data = (void *)shmd; 16890Sstevel@tonic-gate seg->s_ops = &segspt_shmops; 16900Sstevel@tonic-gate seg->s_szc = shmd->shm_sptseg->s_szc; 16910Sstevel@tonic-gate sptd = shmd->shm_sptseg->s_data; 16920Sstevel@tonic-gate 16930Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 16940Sstevel@tonic-gate if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 16950Sstevel@tonic-gate KM_NOSLEEP)) == NULL) { 16960Sstevel@tonic-gate seg->s_data = (void *)NULL; 16970Sstevel@tonic-gate kmem_free(shmd, (sizeof (*shmd))); 16980Sstevel@tonic-gate return (ENOMEM); 16990Sstevel@tonic-gate } 17000Sstevel@tonic-gate shmd->shm_lckpgs = 0; 17010Sstevel@tonic-gate if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 17020Sstevel@tonic-gate if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 17030Sstevel@tonic-gate shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 17040Sstevel@tonic-gate seg->s_size, seg->s_szc)) != 0) { 17050Sstevel@tonic-gate kmem_free(shmd->shm_vpage, 17065224Smec btopr(shm_amp->size)); 17070Sstevel@tonic-gate } 17080Sstevel@tonic-gate } 17090Sstevel@tonic-gate } else { 17100Sstevel@tonic-gate error = hat_share(seg->s_as->a_hat, seg->s_base, 17115224Smec shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 17125224Smec seg->s_size, seg->s_szc); 17130Sstevel@tonic-gate } 17140Sstevel@tonic-gate if (error) { 17150Sstevel@tonic-gate seg->s_szc = 0; 17160Sstevel@tonic-gate seg->s_data = (void *)NULL; 17170Sstevel@tonic-gate kmem_free(shmd, (sizeof (*shmd))); 17180Sstevel@tonic-gate } else { 17190Sstevel@tonic-gate ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 17200Sstevel@tonic-gate shm_amp->refcnt++; 17210Sstevel@tonic-gate ANON_LOCK_EXIT(&shm_amp->a_rwlock); 17220Sstevel@tonic-gate } 17230Sstevel@tonic-gate return (error); 17240Sstevel@tonic-gate } 17250Sstevel@tonic-gate 17260Sstevel@tonic-gate int 17270Sstevel@tonic-gate segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 17280Sstevel@tonic-gate { 17290Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 17300Sstevel@tonic-gate int reclaim = 1; 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 17330Sstevel@tonic-gate retry: 17340Sstevel@tonic-gate if (shmd->shm_softlockcnt > 0) { 17350Sstevel@tonic-gate if (reclaim == 1) { 17360Sstevel@tonic-gate segspt_purge(seg); 17370Sstevel@tonic-gate reclaim = 0; 17380Sstevel@tonic-gate goto retry; 17390Sstevel@tonic-gate } 17400Sstevel@tonic-gate return (EAGAIN); 17410Sstevel@tonic-gate } 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate if (ssize != seg->s_size) { 17440Sstevel@tonic-gate #ifdef DEBUG 17450Sstevel@tonic-gate cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 17460Sstevel@tonic-gate ssize, seg->s_size); 17470Sstevel@tonic-gate #endif 17480Sstevel@tonic-gate return (EINVAL); 17490Sstevel@tonic-gate } 17500Sstevel@tonic-gate 17510Sstevel@tonic-gate (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 17520Sstevel@tonic-gate NULL, 0); 17530Sstevel@tonic-gate hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 17540Sstevel@tonic-gate 17550Sstevel@tonic-gate seg_free(seg); 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate return (0); 17580Sstevel@tonic-gate } 17590Sstevel@tonic-gate 17600Sstevel@tonic-gate void 17610Sstevel@tonic-gate segspt_shmfree(struct seg *seg) 17620Sstevel@tonic-gate { 17630Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 17640Sstevel@tonic-gate struct anon_map *shm_amp = shmd->shm_amp; 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 17670Sstevel@tonic-gate 17680Sstevel@tonic-gate (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 17695224Smec MC_UNLOCK, NULL, 0); 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate /* 17720Sstevel@tonic-gate * Need to increment refcnt when attaching 17730Sstevel@tonic-gate * and decrement when detaching because of dup(). 17740Sstevel@tonic-gate */ 17750Sstevel@tonic-gate ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 17760Sstevel@tonic-gate shm_amp->refcnt--; 17770Sstevel@tonic-gate ANON_LOCK_EXIT(&shm_amp->a_rwlock); 17780Sstevel@tonic-gate 17790Sstevel@tonic-gate if (shmd->shm_vpage) { /* only for DISM */ 17800Sstevel@tonic-gate kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 17810Sstevel@tonic-gate shmd->shm_vpage = NULL; 17820Sstevel@tonic-gate } 1783*6695Saguzovsk 1784*6695Saguzovsk /* 1785*6695Saguzovsk * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's 1786*6695Saguzovsk * still working with this segment without holding as lock. 1787*6695Saguzovsk */ 1788*6695Saguzovsk ASSERT(shmd->shm_softlockcnt == 0); 1789*6695Saguzovsk mutex_enter(&shmd->shm_segfree_syncmtx); 1790*6695Saguzovsk mutex_destroy(&shmd->shm_segfree_syncmtx); 1791*6695Saguzovsk 17920Sstevel@tonic-gate kmem_free(shmd, sizeof (*shmd)); 17930Sstevel@tonic-gate } 17940Sstevel@tonic-gate 17950Sstevel@tonic-gate /*ARGSUSED*/ 17960Sstevel@tonic-gate int 17970Sstevel@tonic-gate segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 17980Sstevel@tonic-gate { 17990Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 18000Sstevel@tonic-gate 18010Sstevel@tonic-gate /* 18020Sstevel@tonic-gate * Shared page table is more than shared mapping. 18030Sstevel@tonic-gate * Individual process sharing page tables can't change prot 18040Sstevel@tonic-gate * because there is only one set of page tables. 18050Sstevel@tonic-gate * This will be allowed after private page table is 18060Sstevel@tonic-gate * supported. 18070Sstevel@tonic-gate */ 18080Sstevel@tonic-gate /* need to return correct status error? */ 18090Sstevel@tonic-gate return (0); 18100Sstevel@tonic-gate } 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate 18130Sstevel@tonic-gate faultcode_t 18140Sstevel@tonic-gate segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 18150Sstevel@tonic-gate size_t len, enum fault_type type, enum seg_rw rw) 18160Sstevel@tonic-gate { 18170Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 18180Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 18190Sstevel@tonic-gate struct as *curspt = shmd->shm_sptas; 18200Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 18210Sstevel@tonic-gate pgcnt_t npages; 18222414Saguzovsk size_t size; 18230Sstevel@tonic-gate caddr_t segspt_addr, shm_addr; 18240Sstevel@tonic-gate page_t **ppa; 18250Sstevel@tonic-gate int i; 18260Sstevel@tonic-gate ulong_t an_idx = 0; 18270Sstevel@tonic-gate int err = 0; 1828721Smec int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 18292414Saguzovsk size_t pgsz; 18302414Saguzovsk pgcnt_t pgcnt; 18312414Saguzovsk caddr_t a; 18322414Saguzovsk pgcnt_t pidx; 18330Sstevel@tonic-gate 18340Sstevel@tonic-gate #ifdef lint 18350Sstevel@tonic-gate hat = hat; 18360Sstevel@tonic-gate #endif 18370Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 18380Sstevel@tonic-gate 18390Sstevel@tonic-gate /* 18400Sstevel@tonic-gate * Because of the way spt is implemented 18410Sstevel@tonic-gate * the realsize of the segment does not have to be 18420Sstevel@tonic-gate * equal to the segment size itself. The segment size is 18430Sstevel@tonic-gate * often in multiples of a page size larger than PAGESIZE. 18440Sstevel@tonic-gate * The realsize is rounded up to the nearest PAGESIZE 18450Sstevel@tonic-gate * based on what the user requested. This is a bit of 18460Sstevel@tonic-gate * ungliness that is historical but not easily fixed 18470Sstevel@tonic-gate * without re-designing the higher levels of ISM. 18480Sstevel@tonic-gate */ 18490Sstevel@tonic-gate ASSERT(addr >= seg->s_base); 18500Sstevel@tonic-gate if (((addr + len) - seg->s_base) > sptd->spt_realsize) 18510Sstevel@tonic-gate return (FC_NOMAP); 18520Sstevel@tonic-gate /* 18530Sstevel@tonic-gate * For all of the following cases except F_PROT, we need to 18540Sstevel@tonic-gate * make any necessary adjustments to addr and len 18550Sstevel@tonic-gate * and get all of the necessary page_t's into an array called ppa[]. 18560Sstevel@tonic-gate * 18570Sstevel@tonic-gate * The code in shmat() forces base addr and len of ISM segment 18580Sstevel@tonic-gate * to be aligned to largest page size supported. Therefore, 18590Sstevel@tonic-gate * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 18600Sstevel@tonic-gate * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 18610Sstevel@tonic-gate * in large pagesize chunks, or else we will screw up the HAT 18620Sstevel@tonic-gate * layer by calling hat_memload_array() with differing page sizes 18630Sstevel@tonic-gate * over a given virtual range. 18640Sstevel@tonic-gate */ 18652414Saguzovsk pgsz = page_get_pagesize(sptseg->s_szc); 18662414Saguzovsk pgcnt = page_get_pagecnt(sptseg->s_szc); 18672414Saguzovsk shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 18682414Saguzovsk size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 18690Sstevel@tonic-gate npages = btopr(size); 18700Sstevel@tonic-gate 18710Sstevel@tonic-gate /* 18720Sstevel@tonic-gate * Now we need to convert from addr in segshm to addr in segspt. 18730Sstevel@tonic-gate */ 18740Sstevel@tonic-gate an_idx = seg_page(seg, shm_addr); 18750Sstevel@tonic-gate segspt_addr = sptseg->s_base + ptob(an_idx); 18760Sstevel@tonic-gate 18770Sstevel@tonic-gate ASSERT((segspt_addr + ptob(npages)) <= 18785224Smec (sptseg->s_base + sptd->spt_realsize)); 18790Sstevel@tonic-gate ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 18800Sstevel@tonic-gate 18810Sstevel@tonic-gate switch (type) { 18820Sstevel@tonic-gate 18830Sstevel@tonic-gate case F_SOFTLOCK: 18840Sstevel@tonic-gate 18850Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 18860Sstevel@tonic-gate /* 18870Sstevel@tonic-gate * Fall through to the F_INVAL case to load up the hat layer 18880Sstevel@tonic-gate * entries with the HAT_LOAD_LOCK flag. 18890Sstevel@tonic-gate */ 18900Sstevel@tonic-gate /* FALLTHRU */ 18910Sstevel@tonic-gate case F_INVAL: 18920Sstevel@tonic-gate 18930Sstevel@tonic-gate if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 18940Sstevel@tonic-gate return (FC_NOMAP); 18950Sstevel@tonic-gate 18960Sstevel@tonic-gate ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 18970Sstevel@tonic-gate 18980Sstevel@tonic-gate err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 18990Sstevel@tonic-gate if (err != 0) { 19000Sstevel@tonic-gate if (type == F_SOFTLOCK) { 19010Sstevel@tonic-gate atomic_add_long((ulong_t *)( 19020Sstevel@tonic-gate &(shmd->shm_softlockcnt)), -npages); 19030Sstevel@tonic-gate } 19040Sstevel@tonic-gate goto dism_err; 19050Sstevel@tonic-gate } 19060Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 19072414Saguzovsk a = segspt_addr; 19082414Saguzovsk pidx = 0; 19090Sstevel@tonic-gate if (type == F_SOFTLOCK) { 19100Sstevel@tonic-gate 19110Sstevel@tonic-gate /* 19120Sstevel@tonic-gate * Load up the translation keeping it 19130Sstevel@tonic-gate * locked and don't unlock the page. 19140Sstevel@tonic-gate */ 19152414Saguzovsk for (; pidx < npages; a += pgsz, pidx += pgcnt) { 19162414Saguzovsk hat_memload_array(sptseg->s_as->a_hat, 19172414Saguzovsk a, pgsz, &ppa[pidx], sptd->spt_prot, 19182414Saguzovsk HAT_LOAD_LOCK | HAT_LOAD_SHARE); 19192414Saguzovsk } 19200Sstevel@tonic-gate } else { 19210Sstevel@tonic-gate if (hat == seg->s_as->a_hat) { 19220Sstevel@tonic-gate 19230Sstevel@tonic-gate /* 19240Sstevel@tonic-gate * Migrate pages marked for migration 19250Sstevel@tonic-gate */ 19260Sstevel@tonic-gate if (lgrp_optimizations()) 19270Sstevel@tonic-gate page_migrate(seg, shm_addr, ppa, 19280Sstevel@tonic-gate npages); 19290Sstevel@tonic-gate 19300Sstevel@tonic-gate /* CPU HAT */ 19312414Saguzovsk for (; pidx < npages; 19322414Saguzovsk a += pgsz, pidx += pgcnt) { 19332414Saguzovsk hat_memload_array(sptseg->s_as->a_hat, 19342414Saguzovsk a, pgsz, &ppa[pidx], 19352414Saguzovsk sptd->spt_prot, 19362414Saguzovsk HAT_LOAD_SHARE); 19372414Saguzovsk } 19380Sstevel@tonic-gate } else { 19390Sstevel@tonic-gate /* XHAT. Pass real address */ 19400Sstevel@tonic-gate hat_memload_array(hat, shm_addr, 19410Sstevel@tonic-gate size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 19420Sstevel@tonic-gate } 19430Sstevel@tonic-gate 19440Sstevel@tonic-gate /* 19450Sstevel@tonic-gate * And now drop the SE_SHARED lock(s). 19460Sstevel@tonic-gate */ 1947721Smec if (dyn_ism_unmap) { 1948721Smec for (i = 0; i < npages; i++) { 1949721Smec page_unlock(ppa[i]); 1950721Smec } 1951721Smec } 19520Sstevel@tonic-gate } 19530Sstevel@tonic-gate 1954721Smec if (!dyn_ism_unmap) { 19550Sstevel@tonic-gate if (hat_share(seg->s_as->a_hat, shm_addr, 19560Sstevel@tonic-gate curspt->a_hat, segspt_addr, ptob(npages), 19570Sstevel@tonic-gate seg->s_szc) != 0) { 19580Sstevel@tonic-gate panic("hat_share err in DISM fault"); 19590Sstevel@tonic-gate /* NOTREACHED */ 19600Sstevel@tonic-gate } 1961721Smec if (type == F_INVAL) { 1962721Smec for (i = 0; i < npages; i++) { 1963721Smec page_unlock(ppa[i]); 1964721Smec } 1965721Smec } 19660Sstevel@tonic-gate } 19670Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 19680Sstevel@tonic-gate dism_err: 19690Sstevel@tonic-gate kmem_free(ppa, npages * sizeof (page_t *)); 19700Sstevel@tonic-gate return (err); 19710Sstevel@tonic-gate 19720Sstevel@tonic-gate case F_SOFTUNLOCK: 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate /* 19750Sstevel@tonic-gate * This is a bit ugly, we pass in the real seg pointer, 19760Sstevel@tonic-gate * but the segspt_addr is the virtual address within the 19770Sstevel@tonic-gate * dummy seg. 19780Sstevel@tonic-gate */ 19790Sstevel@tonic-gate segspt_softunlock(seg, segspt_addr, size, rw); 19800Sstevel@tonic-gate return (0); 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate case F_PROT: 19830Sstevel@tonic-gate 19840Sstevel@tonic-gate /* 19850Sstevel@tonic-gate * This takes care of the unusual case where a user 19860Sstevel@tonic-gate * allocates a stack in shared memory and a register 19870Sstevel@tonic-gate * window overflow is written to that stack page before 19880Sstevel@tonic-gate * it is otherwise modified. 19890Sstevel@tonic-gate * 19900Sstevel@tonic-gate * We can get away with this because ISM segments are 19910Sstevel@tonic-gate * always rw. Other than this unusual case, there 19920Sstevel@tonic-gate * should be no instances of protection violations. 19930Sstevel@tonic-gate */ 19940Sstevel@tonic-gate return (0); 19950Sstevel@tonic-gate 19960Sstevel@tonic-gate default: 19970Sstevel@tonic-gate #ifdef DEBUG 19980Sstevel@tonic-gate panic("segspt_dismfault default type?"); 19990Sstevel@tonic-gate #else 20000Sstevel@tonic-gate return (FC_NOMAP); 20010Sstevel@tonic-gate #endif 20020Sstevel@tonic-gate } 20030Sstevel@tonic-gate } 20040Sstevel@tonic-gate 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate faultcode_t 20070Sstevel@tonic-gate segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 20080Sstevel@tonic-gate size_t len, enum fault_type type, enum seg_rw rw) 20090Sstevel@tonic-gate { 20100Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 20110Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 20120Sstevel@tonic-gate struct as *curspt = shmd->shm_sptas; 20130Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 20140Sstevel@tonic-gate pgcnt_t npages; 20152414Saguzovsk size_t size; 20160Sstevel@tonic-gate caddr_t sptseg_addr, shm_addr; 20170Sstevel@tonic-gate page_t *pp, **ppa; 20180Sstevel@tonic-gate int i; 20190Sstevel@tonic-gate u_offset_t offset; 20200Sstevel@tonic-gate ulong_t anon_index = 0; 20210Sstevel@tonic-gate struct vnode *vp; 20220Sstevel@tonic-gate struct anon_map *amp; /* XXX - for locknest */ 20230Sstevel@tonic-gate struct anon *ap = NULL; 20242414Saguzovsk size_t pgsz; 20252414Saguzovsk pgcnt_t pgcnt; 20262414Saguzovsk caddr_t a; 20272414Saguzovsk pgcnt_t pidx; 20282414Saguzovsk size_t sz; 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate #ifdef lint 20310Sstevel@tonic-gate hat = hat; 20320Sstevel@tonic-gate #endif 20330Sstevel@tonic-gate 20340Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 20370Sstevel@tonic-gate return (segspt_dismfault(hat, seg, addr, len, type, rw)); 20380Sstevel@tonic-gate } 20390Sstevel@tonic-gate 20400Sstevel@tonic-gate /* 20410Sstevel@tonic-gate * Because of the way spt is implemented 20420Sstevel@tonic-gate * the realsize of the segment does not have to be 20430Sstevel@tonic-gate * equal to the segment size itself. The segment size is 20440Sstevel@tonic-gate * often in multiples of a page size larger than PAGESIZE. 20450Sstevel@tonic-gate * The realsize is rounded up to the nearest PAGESIZE 20460Sstevel@tonic-gate * based on what the user requested. This is a bit of 20470Sstevel@tonic-gate * ungliness that is historical but not easily fixed 20480Sstevel@tonic-gate * without re-designing the higher levels of ISM. 20490Sstevel@tonic-gate */ 20500Sstevel@tonic-gate ASSERT(addr >= seg->s_base); 20510Sstevel@tonic-gate if (((addr + len) - seg->s_base) > sptd->spt_realsize) 20520Sstevel@tonic-gate return (FC_NOMAP); 20530Sstevel@tonic-gate /* 20540Sstevel@tonic-gate * For all of the following cases except F_PROT, we need to 20550Sstevel@tonic-gate * make any necessary adjustments to addr and len 20560Sstevel@tonic-gate * and get all of the necessary page_t's into an array called ppa[]. 20570Sstevel@tonic-gate * 20580Sstevel@tonic-gate * The code in shmat() forces base addr and len of ISM segment 20590Sstevel@tonic-gate * to be aligned to largest page size supported. Therefore, 20600Sstevel@tonic-gate * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 20610Sstevel@tonic-gate * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 20620Sstevel@tonic-gate * in large pagesize chunks, or else we will screw up the HAT 20630Sstevel@tonic-gate * layer by calling hat_memload_array() with differing page sizes 20640Sstevel@tonic-gate * over a given virtual range. 20650Sstevel@tonic-gate */ 20662414Saguzovsk pgsz = page_get_pagesize(sptseg->s_szc); 20672414Saguzovsk pgcnt = page_get_pagecnt(sptseg->s_szc); 20682414Saguzovsk shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 20692414Saguzovsk size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 20700Sstevel@tonic-gate npages = btopr(size); 20710Sstevel@tonic-gate 20720Sstevel@tonic-gate /* 20730Sstevel@tonic-gate * Now we need to convert from addr in segshm to addr in segspt. 20740Sstevel@tonic-gate */ 20750Sstevel@tonic-gate anon_index = seg_page(seg, shm_addr); 20760Sstevel@tonic-gate sptseg_addr = sptseg->s_base + ptob(anon_index); 20770Sstevel@tonic-gate 20780Sstevel@tonic-gate /* 20790Sstevel@tonic-gate * And now we may have to adjust npages downward if we have 20800Sstevel@tonic-gate * exceeded the realsize of the segment or initial anon 20810Sstevel@tonic-gate * allocations. 20820Sstevel@tonic-gate */ 20830Sstevel@tonic-gate if ((sptseg_addr + ptob(npages)) > 20840Sstevel@tonic-gate (sptseg->s_base + sptd->spt_realsize)) 20850Sstevel@tonic-gate size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 20860Sstevel@tonic-gate 20870Sstevel@tonic-gate npages = btopr(size); 20880Sstevel@tonic-gate 20890Sstevel@tonic-gate ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 20900Sstevel@tonic-gate ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 20910Sstevel@tonic-gate 20920Sstevel@tonic-gate switch (type) { 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate case F_SOFTLOCK: 20950Sstevel@tonic-gate 20960Sstevel@tonic-gate /* 20970Sstevel@tonic-gate * availrmem is decremented once during anon_swap_adjust() 20980Sstevel@tonic-gate * and is incremented during the anon_unresv(), which is 20990Sstevel@tonic-gate * called from shm_rm_amp() when the segment is destroyed. 21000Sstevel@tonic-gate */ 21010Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 21020Sstevel@tonic-gate /* 21030Sstevel@tonic-gate * Some platforms assume that ISM pages are SE_SHARED 21040Sstevel@tonic-gate * locked for the entire life of the segment. 21050Sstevel@tonic-gate */ 21060Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 21070Sstevel@tonic-gate return (0); 21080Sstevel@tonic-gate /* 21090Sstevel@tonic-gate * Fall through to the F_INVAL case to load up the hat layer 21100Sstevel@tonic-gate * entries with the HAT_LOAD_LOCK flag. 21110Sstevel@tonic-gate */ 21120Sstevel@tonic-gate 21130Sstevel@tonic-gate /* FALLTHRU */ 21140Sstevel@tonic-gate case F_INVAL: 21150Sstevel@tonic-gate 21160Sstevel@tonic-gate if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 21170Sstevel@tonic-gate return (FC_NOMAP); 21180Sstevel@tonic-gate 21190Sstevel@tonic-gate /* 21200Sstevel@tonic-gate * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 21210Sstevel@tonic-gate * may still rely on this call to hat_share(). That 21220Sstevel@tonic-gate * would imply that those hat's can fault on a 21230Sstevel@tonic-gate * HAT_LOAD_LOCK translation, which would seem 21240Sstevel@tonic-gate * contradictory. 21250Sstevel@tonic-gate */ 21260Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 21270Sstevel@tonic-gate if (hat_share(seg->s_as->a_hat, seg->s_base, 21280Sstevel@tonic-gate curspt->a_hat, sptseg->s_base, 21290Sstevel@tonic-gate sptseg->s_size, sptseg->s_szc) != 0) { 21300Sstevel@tonic-gate panic("hat_share error in ISM fault"); 21310Sstevel@tonic-gate /*NOTREACHED*/ 21320Sstevel@tonic-gate } 21330Sstevel@tonic-gate return (0); 21340Sstevel@tonic-gate } 21350Sstevel@tonic-gate ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 21360Sstevel@tonic-gate 21370Sstevel@tonic-gate /* 21380Sstevel@tonic-gate * I see no need to lock the real seg, 21390Sstevel@tonic-gate * here, because all of our work will be on the underlying 21400Sstevel@tonic-gate * dummy seg. 21410Sstevel@tonic-gate * 21420Sstevel@tonic-gate * sptseg_addr and npages now account for large pages. 21430Sstevel@tonic-gate */ 21440Sstevel@tonic-gate amp = sptd->spt_amp; 21450Sstevel@tonic-gate ASSERT(amp != NULL); 21460Sstevel@tonic-gate anon_index = seg_page(sptseg, sptseg_addr); 21470Sstevel@tonic-gate 21480Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 21490Sstevel@tonic-gate for (i = 0; i < npages; i++) { 21500Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index++); 21510Sstevel@tonic-gate ASSERT(ap != NULL); 21520Sstevel@tonic-gate swap_xlate(ap, &vp, &offset); 21530Sstevel@tonic-gate pp = page_lookup(vp, offset, SE_SHARED); 21540Sstevel@tonic-gate ASSERT(pp != NULL); 21550Sstevel@tonic-gate ppa[i] = pp; 21560Sstevel@tonic-gate } 21570Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 21580Sstevel@tonic-gate ASSERT(i == npages); 21590Sstevel@tonic-gate 21600Sstevel@tonic-gate /* 21610Sstevel@tonic-gate * We are already holding the as->a_lock on the user's 21620Sstevel@tonic-gate * real segment, but we need to hold the a_lock on the 21630Sstevel@tonic-gate * underlying dummy as. This is mostly to satisfy the 21640Sstevel@tonic-gate * underlying HAT layer. 21650Sstevel@tonic-gate */ 21660Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 21672414Saguzovsk a = sptseg_addr; 21682414Saguzovsk pidx = 0; 21690Sstevel@tonic-gate if (type == F_SOFTLOCK) { 21700Sstevel@tonic-gate /* 21710Sstevel@tonic-gate * Load up the translation keeping it 21720Sstevel@tonic-gate * locked and don't unlock the page. 21730Sstevel@tonic-gate */ 21742414Saguzovsk for (; pidx < npages; a += pgsz, pidx += pgcnt) { 21752414Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 21762414Saguzovsk hat_memload_array(sptseg->s_as->a_hat, a, 21772414Saguzovsk sz, &ppa[pidx], sptd->spt_prot, 21782414Saguzovsk HAT_LOAD_LOCK | HAT_LOAD_SHARE); 21792414Saguzovsk } 21800Sstevel@tonic-gate } else { 21810Sstevel@tonic-gate if (hat == seg->s_as->a_hat) { 21820Sstevel@tonic-gate 21830Sstevel@tonic-gate /* 21840Sstevel@tonic-gate * Migrate pages marked for migration. 21850Sstevel@tonic-gate */ 21860Sstevel@tonic-gate if (lgrp_optimizations()) 21870Sstevel@tonic-gate page_migrate(seg, shm_addr, ppa, 21880Sstevel@tonic-gate npages); 21890Sstevel@tonic-gate 21900Sstevel@tonic-gate /* CPU HAT */ 21912414Saguzovsk for (; pidx < npages; 21922414Saguzovsk a += pgsz, pidx += pgcnt) { 21932414Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 21942414Saguzovsk hat_memload_array(sptseg->s_as->a_hat, 21952414Saguzovsk a, sz, &ppa[pidx], 21962414Saguzovsk sptd->spt_prot, HAT_LOAD_SHARE); 21972414Saguzovsk } 21980Sstevel@tonic-gate } else { 21990Sstevel@tonic-gate /* XHAT. Pass real address */ 22000Sstevel@tonic-gate hat_memload_array(hat, shm_addr, 22010Sstevel@tonic-gate ptob(npages), ppa, sptd->spt_prot, 22020Sstevel@tonic-gate HAT_LOAD_SHARE); 22030Sstevel@tonic-gate } 22040Sstevel@tonic-gate 22050Sstevel@tonic-gate /* 22060Sstevel@tonic-gate * And now drop the SE_SHARED lock(s). 22070Sstevel@tonic-gate */ 22080Sstevel@tonic-gate for (i = 0; i < npages; i++) 22090Sstevel@tonic-gate page_unlock(ppa[i]); 22100Sstevel@tonic-gate } 22110Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 22120Sstevel@tonic-gate 22130Sstevel@tonic-gate kmem_free(ppa, sizeof (page_t *) * npages); 22140Sstevel@tonic-gate return (0); 22150Sstevel@tonic-gate case F_SOFTUNLOCK: 22160Sstevel@tonic-gate 22170Sstevel@tonic-gate /* 22180Sstevel@tonic-gate * This is a bit ugly, we pass in the real seg pointer, 22190Sstevel@tonic-gate * but the sptseg_addr is the virtual address within the 22200Sstevel@tonic-gate * dummy seg. 22210Sstevel@tonic-gate */ 22220Sstevel@tonic-gate segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 22230Sstevel@tonic-gate return (0); 22240Sstevel@tonic-gate 22250Sstevel@tonic-gate case F_PROT: 22260Sstevel@tonic-gate 22270Sstevel@tonic-gate /* 22280Sstevel@tonic-gate * This takes care of the unusual case where a user 22290Sstevel@tonic-gate * allocates a stack in shared memory and a register 22300Sstevel@tonic-gate * window overflow is written to that stack page before 22310Sstevel@tonic-gate * it is otherwise modified. 22320Sstevel@tonic-gate * 22330Sstevel@tonic-gate * We can get away with this because ISM segments are 22340Sstevel@tonic-gate * always rw. Other than this unusual case, there 22350Sstevel@tonic-gate * should be no instances of protection violations. 22360Sstevel@tonic-gate */ 22370Sstevel@tonic-gate return (0); 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate default: 22400Sstevel@tonic-gate #ifdef DEBUG 22410Sstevel@tonic-gate cmn_err(CE_WARN, "segspt_shmfault default type?"); 22420Sstevel@tonic-gate #endif 22430Sstevel@tonic-gate return (FC_NOMAP); 22440Sstevel@tonic-gate } 22450Sstevel@tonic-gate } 22460Sstevel@tonic-gate 22470Sstevel@tonic-gate /*ARGSUSED*/ 22480Sstevel@tonic-gate static faultcode_t 22490Sstevel@tonic-gate segspt_shmfaulta(struct seg *seg, caddr_t addr) 22500Sstevel@tonic-gate { 22510Sstevel@tonic-gate return (0); 22520Sstevel@tonic-gate } 22530Sstevel@tonic-gate 22540Sstevel@tonic-gate /*ARGSUSED*/ 22550Sstevel@tonic-gate static int 22560Sstevel@tonic-gate segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 22570Sstevel@tonic-gate { 22580Sstevel@tonic-gate return (0); 22590Sstevel@tonic-gate } 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate /*ARGSUSED*/ 22620Sstevel@tonic-gate static size_t 22630Sstevel@tonic-gate segspt_shmswapout(struct seg *seg) 22640Sstevel@tonic-gate { 22650Sstevel@tonic-gate return (0); 22660Sstevel@tonic-gate } 22670Sstevel@tonic-gate 22680Sstevel@tonic-gate /* 22690Sstevel@tonic-gate * duplicate the shared page tables 22700Sstevel@tonic-gate */ 22710Sstevel@tonic-gate int 22720Sstevel@tonic-gate segspt_shmdup(struct seg *seg, struct seg *newseg) 22730Sstevel@tonic-gate { 22740Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 22750Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 22760Sstevel@tonic-gate struct shm_data *shmd_new; 22770Sstevel@tonic-gate struct seg *spt_seg = shmd->shm_sptseg; 22780Sstevel@tonic-gate struct spt_data *sptd = spt_seg->s_data; 2279721Smec int error = 0; 22800Sstevel@tonic-gate 22810Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 22820Sstevel@tonic-gate 22830Sstevel@tonic-gate shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 22840Sstevel@tonic-gate newseg->s_data = (void *)shmd_new; 22850Sstevel@tonic-gate shmd_new->shm_sptas = shmd->shm_sptas; 22860Sstevel@tonic-gate shmd_new->shm_amp = amp; 22870Sstevel@tonic-gate shmd_new->shm_sptseg = shmd->shm_sptseg; 22880Sstevel@tonic-gate newseg->s_ops = &segspt_shmops; 22890Sstevel@tonic-gate newseg->s_szc = seg->s_szc; 22900Sstevel@tonic-gate ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 22910Sstevel@tonic-gate 22920Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 22930Sstevel@tonic-gate amp->refcnt++; 22940Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 22950Sstevel@tonic-gate 22960Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 22970Sstevel@tonic-gate shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 22980Sstevel@tonic-gate shmd_new->shm_lckpgs = 0; 2299721Smec if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2300721Smec if ((error = hat_share(newseg->s_as->a_hat, 2301721Smec newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2302721Smec seg->s_size, seg->s_szc)) != 0) { 2303721Smec kmem_free(shmd_new->shm_vpage, 23045224Smec btopr(amp->size)); 2305721Smec } 2306721Smec } 2307721Smec return (error); 2308721Smec } else { 2309721Smec return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2310721Smec shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2311721Smec seg->s_szc)); 2312721Smec 23130Sstevel@tonic-gate } 23140Sstevel@tonic-gate } 23150Sstevel@tonic-gate 23160Sstevel@tonic-gate /*ARGSUSED*/ 23170Sstevel@tonic-gate int 23180Sstevel@tonic-gate segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 23190Sstevel@tonic-gate { 23200Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 23210Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 23220Sstevel@tonic-gate 23230Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 23240Sstevel@tonic-gate 23250Sstevel@tonic-gate /* 23260Sstevel@tonic-gate * ISM segment is always rw. 23270Sstevel@tonic-gate */ 23280Sstevel@tonic-gate return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 23290Sstevel@tonic-gate } 23300Sstevel@tonic-gate 23310Sstevel@tonic-gate /* 23320Sstevel@tonic-gate * Return an array of locked large pages, for empty slots allocate 23330Sstevel@tonic-gate * private zero-filled anon pages. 23340Sstevel@tonic-gate */ 23350Sstevel@tonic-gate static int 23360Sstevel@tonic-gate spt_anon_getpages( 23370Sstevel@tonic-gate struct seg *sptseg, 23380Sstevel@tonic-gate caddr_t sptaddr, 23390Sstevel@tonic-gate size_t len, 23400Sstevel@tonic-gate page_t *ppa[]) 23410Sstevel@tonic-gate { 23420Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 23430Sstevel@tonic-gate struct anon_map *amp = sptd->spt_amp; 23440Sstevel@tonic-gate enum seg_rw rw = sptd->spt_prot; 23450Sstevel@tonic-gate uint_t szc = sptseg->s_szc; 23460Sstevel@tonic-gate size_t pg_sz, share_sz = page_get_pagesize(szc); 23470Sstevel@tonic-gate pgcnt_t lp_npgs; 23480Sstevel@tonic-gate caddr_t lp_addr, e_sptaddr; 23490Sstevel@tonic-gate uint_t vpprot, ppa_szc = 0; 23500Sstevel@tonic-gate struct vpage *vpage = NULL; 23510Sstevel@tonic-gate ulong_t j, ppa_idx; 23520Sstevel@tonic-gate int err, ierr = 0; 23530Sstevel@tonic-gate pgcnt_t an_idx; 23540Sstevel@tonic-gate anon_sync_obj_t cookie; 23555224Smec int anon_locked = 0; 23565224Smec pgcnt_t amp_pgs; 23575224Smec 23580Sstevel@tonic-gate 23590Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 23600Sstevel@tonic-gate ASSERT(len != 0); 23610Sstevel@tonic-gate 23620Sstevel@tonic-gate pg_sz = share_sz; 23630Sstevel@tonic-gate lp_npgs = btop(pg_sz); 23640Sstevel@tonic-gate lp_addr = sptaddr; 23650Sstevel@tonic-gate e_sptaddr = sptaddr + len; 23660Sstevel@tonic-gate an_idx = seg_page(sptseg, sptaddr); 23670Sstevel@tonic-gate ppa_idx = 0; 23680Sstevel@tonic-gate 23690Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 23705224Smec 23715224Smec amp_pgs = page_get_pagecnt(amp->a_szc); 23725224Smec 23730Sstevel@tonic-gate /*CONSTCOND*/ 23740Sstevel@tonic-gate while (1) { 23750Sstevel@tonic-gate for (; lp_addr < e_sptaddr; 23765224Smec an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) { 23770Sstevel@tonic-gate 23785224Smec /* 23795224Smec * If we're currently locked, and we get to a new 23805224Smec * page, unlock our current anon chunk. 23815224Smec */ 23825224Smec if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) { 23835224Smec anon_array_exit(&cookie); 23845224Smec anon_locked = 0; 23855224Smec } 23865224Smec if (!anon_locked) { 23875224Smec anon_array_enter(amp, an_idx, &cookie); 23885224Smec anon_locked = 1; 23895224Smec } 23900Sstevel@tonic-gate ppa_szc = (uint_t)-1; 23910Sstevel@tonic-gate ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 23920Sstevel@tonic-gate lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 23934426Saguzovsk &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred); 23940Sstevel@tonic-gate 23950Sstevel@tonic-gate if (ierr != 0) { 23960Sstevel@tonic-gate if (ierr > 0) { 23970Sstevel@tonic-gate err = FC_MAKE_ERR(ierr); 23980Sstevel@tonic-gate goto lpgs_err; 23990Sstevel@tonic-gate } 24000Sstevel@tonic-gate break; 24010Sstevel@tonic-gate } 24020Sstevel@tonic-gate } 24030Sstevel@tonic-gate if (lp_addr == e_sptaddr) { 24040Sstevel@tonic-gate break; 24050Sstevel@tonic-gate } 24060Sstevel@tonic-gate ASSERT(lp_addr < e_sptaddr); 24070Sstevel@tonic-gate 24080Sstevel@tonic-gate /* 24090Sstevel@tonic-gate * ierr == -1 means we failed to allocate a large page. 24100Sstevel@tonic-gate * so do a size down operation. 24110Sstevel@tonic-gate * 24120Sstevel@tonic-gate * ierr == -2 means some other process that privately shares 24130Sstevel@tonic-gate * pages with this process has allocated a larger page and we 24140Sstevel@tonic-gate * need to retry with larger pages. So do a size up 24150Sstevel@tonic-gate * operation. This relies on the fact that large pages are 24160Sstevel@tonic-gate * never partially shared i.e. if we share any constituent 24170Sstevel@tonic-gate * page of a large page with another process we must share the 24180Sstevel@tonic-gate * entire large page. Note this cannot happen for SOFTLOCK 24190Sstevel@tonic-gate * case, unless current address (lpaddr) is at the beginning 24200Sstevel@tonic-gate * of the next page size boundary because the other process 24210Sstevel@tonic-gate * couldn't have relocated locked pages. 24220Sstevel@tonic-gate */ 24230Sstevel@tonic-gate ASSERT(ierr == -1 || ierr == -2); 24240Sstevel@tonic-gate if (segvn_anypgsz) { 24250Sstevel@tonic-gate ASSERT(ierr == -2 || szc != 0); 24260Sstevel@tonic-gate ASSERT(ierr == -1 || szc < sptseg->s_szc); 24270Sstevel@tonic-gate szc = (ierr == -1) ? szc - 1 : szc + 1; 24280Sstevel@tonic-gate } else { 24290Sstevel@tonic-gate /* 24300Sstevel@tonic-gate * For faults and segvn_anypgsz == 0 24310Sstevel@tonic-gate * we need to be careful not to loop forever 24320Sstevel@tonic-gate * if existing page is found with szc other 24330Sstevel@tonic-gate * than 0 or seg->s_szc. This could be due 24340Sstevel@tonic-gate * to page relocations on behalf of DR or 24350Sstevel@tonic-gate * more likely large page creation. For this 24360Sstevel@tonic-gate * case simply re-size to existing page's szc 24370Sstevel@tonic-gate * if returned by anon_map_getpages(). 24380Sstevel@tonic-gate */ 24390Sstevel@tonic-gate if (ppa_szc == (uint_t)-1) { 24400Sstevel@tonic-gate szc = (ierr == -1) ? 0 : sptseg->s_szc; 24410Sstevel@tonic-gate } else { 24420Sstevel@tonic-gate ASSERT(ppa_szc <= sptseg->s_szc); 24430Sstevel@tonic-gate ASSERT(ierr == -2 || ppa_szc < szc); 24440Sstevel@tonic-gate ASSERT(ierr == -1 || ppa_szc > szc); 24450Sstevel@tonic-gate szc = ppa_szc; 24460Sstevel@tonic-gate } 24470Sstevel@tonic-gate } 24480Sstevel@tonic-gate pg_sz = page_get_pagesize(szc); 24490Sstevel@tonic-gate lp_npgs = btop(pg_sz); 24500Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 24510Sstevel@tonic-gate } 24525224Smec if (anon_locked) { 24535224Smec anon_array_exit(&cookie); 24545224Smec } 24550Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 24560Sstevel@tonic-gate return (0); 24570Sstevel@tonic-gate 24580Sstevel@tonic-gate lpgs_err: 24595224Smec if (anon_locked) { 24605224Smec anon_array_exit(&cookie); 24615224Smec } 24620Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 24630Sstevel@tonic-gate for (j = 0; j < ppa_idx; j++) 24640Sstevel@tonic-gate page_unlock(ppa[j]); 24650Sstevel@tonic-gate return (err); 24660Sstevel@tonic-gate } 24670Sstevel@tonic-gate 24682768Ssl108498 /* 24692768Ssl108498 * count the number of bytes in a set of spt pages that are currently not 24702768Ssl108498 * locked 24712768Ssl108498 */ 24722768Ssl108498 static rctl_qty_t 24732768Ssl108498 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 24742768Ssl108498 { 24752768Ssl108498 ulong_t i; 24762768Ssl108498 rctl_qty_t unlocked = 0; 24772768Ssl108498 24782768Ssl108498 for (i = 0; i < npages; i++) { 24792768Ssl108498 if (ppa[i]->p_lckcnt == 0) 24802768Ssl108498 unlocked += PAGESIZE; 24812768Ssl108498 } 24822768Ssl108498 return (unlocked); 24832768Ssl108498 } 24842768Ssl108498 24850Sstevel@tonic-gate int 24860Sstevel@tonic-gate spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 24872768Ssl108498 page_t **ppa, ulong_t *lockmap, size_t pos, 24882768Ssl108498 rctl_qty_t *locked) 24890Sstevel@tonic-gate { 24900Sstevel@tonic-gate struct shm_data *shmd = seg->s_data; 24910Sstevel@tonic-gate struct spt_data *sptd = shmd->shm_sptseg->s_data; 24920Sstevel@tonic-gate ulong_t i; 24930Sstevel@tonic-gate int kernel; 24940Sstevel@tonic-gate 24952768Ssl108498 /* return the number of bytes actually locked */ 24962768Ssl108498 *locked = 0; 24970Sstevel@tonic-gate for (i = 0; i < npages; anon_index++, pos++, i++) { 24980Sstevel@tonic-gate if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 24990Sstevel@tonic-gate if (sptd->spt_ppa_lckcnt[anon_index] < 25000Sstevel@tonic-gate (ushort_t)DISM_LOCK_MAX) { 25010Sstevel@tonic-gate if (++sptd->spt_ppa_lckcnt[anon_index] == 25020Sstevel@tonic-gate (ushort_t)DISM_LOCK_MAX) { 25030Sstevel@tonic-gate cmn_err(CE_WARN, 25040Sstevel@tonic-gate "DISM page lock limit " 25050Sstevel@tonic-gate "reached on DISM offset 0x%lx\n", 25060Sstevel@tonic-gate anon_index << PAGESHIFT); 25070Sstevel@tonic-gate } 25080Sstevel@tonic-gate kernel = (sptd->spt_ppa && 25090Sstevel@tonic-gate sptd->spt_ppa[anon_index]) ? 1 : 0; 25100Sstevel@tonic-gate if (!page_pp_lock(ppa[i], 0, kernel)) { 25110Sstevel@tonic-gate sptd->spt_ppa_lckcnt[anon_index]--; 25120Sstevel@tonic-gate return (EAGAIN); 25130Sstevel@tonic-gate } 25142768Ssl108498 /* if this is a newly locked page, count it */ 25152768Ssl108498 if (ppa[i]->p_lckcnt == 1) { 25162768Ssl108498 *locked += PAGESIZE; 25172768Ssl108498 } 25180Sstevel@tonic-gate shmd->shm_lckpgs++; 25190Sstevel@tonic-gate shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 25200Sstevel@tonic-gate if (lockmap != NULL) 25210Sstevel@tonic-gate BT_SET(lockmap, pos); 25220Sstevel@tonic-gate } 25230Sstevel@tonic-gate } 25240Sstevel@tonic-gate } 25250Sstevel@tonic-gate return (0); 25260Sstevel@tonic-gate } 25270Sstevel@tonic-gate 25280Sstevel@tonic-gate /*ARGSUSED*/ 25290Sstevel@tonic-gate static int 25300Sstevel@tonic-gate segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 25310Sstevel@tonic-gate int attr, int op, ulong_t *lockmap, size_t pos) 25320Sstevel@tonic-gate { 25330Sstevel@tonic-gate struct shm_data *shmd = seg->s_data; 25340Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 25350Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 25362768Ssl108498 struct kshmid *sp = sptd->spt_amp->a_sp; 25370Sstevel@tonic-gate pgcnt_t npages, a_npages; 25380Sstevel@tonic-gate page_t **ppa; 25390Sstevel@tonic-gate pgcnt_t an_idx, a_an_idx, ppa_idx; 25400Sstevel@tonic-gate caddr_t spt_addr, a_addr; /* spt and aligned address */ 25410Sstevel@tonic-gate size_t a_len; /* aligned len */ 25420Sstevel@tonic-gate size_t share_sz; 25430Sstevel@tonic-gate ulong_t i; 25440Sstevel@tonic-gate int sts = 0; 25452768Ssl108498 rctl_qty_t unlocked = 0; 25462768Ssl108498 rctl_qty_t locked = 0; 25472768Ssl108498 struct proc *p = curproc; 25482768Ssl108498 kproject_t *proj; 25490Sstevel@tonic-gate 25500Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 25512768Ssl108498 ASSERT(sp != NULL); 25520Sstevel@tonic-gate 25530Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 25540Sstevel@tonic-gate return (0); 25550Sstevel@tonic-gate } 25560Sstevel@tonic-gate 25570Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 25580Sstevel@tonic-gate an_idx = seg_page(seg, addr); 25590Sstevel@tonic-gate npages = btopr(len); 25600Sstevel@tonic-gate 25610Sstevel@tonic-gate if (an_idx + npages > btopr(shmd->shm_amp->size)) { 25620Sstevel@tonic-gate return (ENOMEM); 25630Sstevel@tonic-gate } 25640Sstevel@tonic-gate 25652768Ssl108498 /* 25662768Ssl108498 * A shm's project never changes, so no lock needed. 25672768Ssl108498 * The shm has a hold on the project, so it will not go away. 25682768Ssl108498 * Since we have a mapping to shm within this zone, we know 25692768Ssl108498 * that the zone will not go away. 25702768Ssl108498 */ 25712768Ssl108498 proj = sp->shm_perm.ipc_proj; 25722768Ssl108498 25730Sstevel@tonic-gate if (op == MC_LOCK) { 25742768Ssl108498 25750Sstevel@tonic-gate /* 25760Sstevel@tonic-gate * Need to align addr and size request if they are not 25770Sstevel@tonic-gate * aligned so we can always allocate large page(s) however 25780Sstevel@tonic-gate * we only lock what was requested in initial request. 25790Sstevel@tonic-gate */ 25800Sstevel@tonic-gate share_sz = page_get_pagesize(sptseg->s_szc); 25810Sstevel@tonic-gate a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 25820Sstevel@tonic-gate a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 25835224Smec share_sz); 25840Sstevel@tonic-gate a_npages = btop(a_len); 25850Sstevel@tonic-gate a_an_idx = seg_page(seg, a_addr); 25860Sstevel@tonic-gate spt_addr = sptseg->s_base + ptob(a_an_idx); 25870Sstevel@tonic-gate ppa_idx = an_idx - a_an_idx; 25880Sstevel@tonic-gate 25890Sstevel@tonic-gate if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 25905224Smec KM_NOSLEEP)) == NULL) { 25910Sstevel@tonic-gate return (ENOMEM); 25920Sstevel@tonic-gate } 25930Sstevel@tonic-gate 25940Sstevel@tonic-gate /* 25950Sstevel@tonic-gate * Don't cache any new pages for IO and 25960Sstevel@tonic-gate * flush any cached pages. 25970Sstevel@tonic-gate */ 25980Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 25990Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 26000Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 26010Sstevel@tonic-gate 26020Sstevel@tonic-gate sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 26030Sstevel@tonic-gate if (sts != 0) { 26040Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 26050Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 26060Sstevel@tonic-gate return (sts); 26070Sstevel@tonic-gate } 26080Sstevel@tonic-gate 26092768Ssl108498 mutex_enter(&sp->shm_mlock); 26102768Ssl108498 /* enforce locked memory rctl */ 26112768Ssl108498 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 26122768Ssl108498 26132768Ssl108498 mutex_enter(&p->p_lock); 26142768Ssl108498 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 26152768Ssl108498 mutex_exit(&p->p_lock); 26162768Ssl108498 sts = EAGAIN; 26172768Ssl108498 } else { 26182768Ssl108498 mutex_exit(&p->p_lock); 26192768Ssl108498 sts = spt_lockpages(seg, an_idx, npages, 26202768Ssl108498 &ppa[ppa_idx], lockmap, pos, &locked); 26212768Ssl108498 26222768Ssl108498 /* 26232768Ssl108498 * correct locked count if not all pages could be 26242768Ssl108498 * locked 26252768Ssl108498 */ 26262768Ssl108498 if ((unlocked - locked) > 0) { 26272768Ssl108498 rctl_decr_locked_mem(NULL, proj, 26282768Ssl108498 (unlocked - locked), 0); 26292768Ssl108498 } 26302768Ssl108498 } 26310Sstevel@tonic-gate /* 26322768Ssl108498 * unlock pages 26330Sstevel@tonic-gate */ 26342768Ssl108498 for (i = 0; i < a_npages; i++) 26350Sstevel@tonic-gate page_unlock(ppa[i]); 26360Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 26370Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 26382768Ssl108498 mutex_exit(&sp->shm_mlock); 26390Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 26400Sstevel@tonic-gate 26410Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 26420Sstevel@tonic-gate 26430Sstevel@tonic-gate } else if (op == MC_UNLOCK) { /* unlock */ 26440Sstevel@tonic-gate struct anon_map *amp; 26450Sstevel@tonic-gate struct anon *ap; 26460Sstevel@tonic-gate struct vnode *vp; 26470Sstevel@tonic-gate u_offset_t off; 26480Sstevel@tonic-gate struct page *pp; 26490Sstevel@tonic-gate int kernel; 26500Sstevel@tonic-gate anon_sync_obj_t cookie; 26512768Ssl108498 rctl_qty_t unlocked = 0; 2652*6695Saguzovsk page_t **ppa; 26530Sstevel@tonic-gate 26540Sstevel@tonic-gate amp = sptd->spt_amp; 26550Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 26560Sstevel@tonic-gate if (shmd->shm_lckpgs == 0) { 26570Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 26580Sstevel@tonic-gate return (0); 26590Sstevel@tonic-gate } 26600Sstevel@tonic-gate /* 26610Sstevel@tonic-gate * Don't cache new IO pages. 26620Sstevel@tonic-gate */ 26630Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 26640Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 26650Sstevel@tonic-gate 26662768Ssl108498 mutex_enter(&sp->shm_mlock); 26670Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 26680Sstevel@tonic-gate for (i = 0; i < npages; i++, an_idx++) { 26690Sstevel@tonic-gate if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 26700Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie); 26710Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 26720Sstevel@tonic-gate ASSERT(ap); 26730Sstevel@tonic-gate 26740Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 26750Sstevel@tonic-gate anon_array_exit(&cookie); 26760Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 26770Sstevel@tonic-gate ASSERT(pp); 26780Sstevel@tonic-gate /* 26790Sstevel@tonic-gate * the availrmem is decremented only for 26800Sstevel@tonic-gate * pages which are not in seg pcache, 26810Sstevel@tonic-gate * for pages in seg pcache availrmem was 26820Sstevel@tonic-gate * decremented in _dismpagelock() (if 26830Sstevel@tonic-gate * they were not locked here) 26840Sstevel@tonic-gate */ 26850Sstevel@tonic-gate kernel = (sptd->spt_ppa && 26860Sstevel@tonic-gate sptd->spt_ppa[an_idx]) ? 1 : 0; 26872768Ssl108498 ASSERT(pp->p_lckcnt > 0); 26880Sstevel@tonic-gate page_pp_unlock(pp, 0, kernel); 26892768Ssl108498 if (pp->p_lckcnt == 0) 26902768Ssl108498 unlocked += PAGESIZE; 26910Sstevel@tonic-gate page_unlock(pp); 26920Sstevel@tonic-gate shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 26930Sstevel@tonic-gate sptd->spt_ppa_lckcnt[an_idx]--; 26940Sstevel@tonic-gate shmd->shm_lckpgs--; 26950Sstevel@tonic-gate } 26960Sstevel@tonic-gate } 26970Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 2698*6695Saguzovsk if ((ppa = sptd->spt_ppa) != NULL) 26990Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 27000Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 27012768Ssl108498 27022768Ssl108498 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 27032768Ssl108498 mutex_exit(&sp->shm_mlock); 2704*6695Saguzovsk 2705*6695Saguzovsk if (ppa != NULL) 2706*6695Saguzovsk seg_ppurge_wiredpp(ppa); 27070Sstevel@tonic-gate } 27080Sstevel@tonic-gate return (sts); 27090Sstevel@tonic-gate } 27100Sstevel@tonic-gate 27110Sstevel@tonic-gate /*ARGSUSED*/ 27120Sstevel@tonic-gate int 27130Sstevel@tonic-gate segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 27140Sstevel@tonic-gate { 27150Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27160Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27170Sstevel@tonic-gate spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 27180Sstevel@tonic-gate 27190Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27200Sstevel@tonic-gate 27210Sstevel@tonic-gate /* 27220Sstevel@tonic-gate * ISM segment is always rw. 27230Sstevel@tonic-gate */ 27240Sstevel@tonic-gate while (--pgno >= 0) 27250Sstevel@tonic-gate *protv++ = sptd->spt_prot; 27260Sstevel@tonic-gate return (0); 27270Sstevel@tonic-gate } 27280Sstevel@tonic-gate 27290Sstevel@tonic-gate /*ARGSUSED*/ 27300Sstevel@tonic-gate u_offset_t 27310Sstevel@tonic-gate segspt_shmgetoffset(struct seg *seg, caddr_t addr) 27320Sstevel@tonic-gate { 27330Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27340Sstevel@tonic-gate 27350Sstevel@tonic-gate /* Offset does not matter in ISM memory */ 27360Sstevel@tonic-gate 27370Sstevel@tonic-gate return ((u_offset_t)0); 27380Sstevel@tonic-gate } 27390Sstevel@tonic-gate 27400Sstevel@tonic-gate /* ARGSUSED */ 27410Sstevel@tonic-gate int 27420Sstevel@tonic-gate segspt_shmgettype(struct seg *seg, caddr_t addr) 27430Sstevel@tonic-gate { 27440Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27450Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27460Sstevel@tonic-gate 27470Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27480Sstevel@tonic-gate 27490Sstevel@tonic-gate /* 27500Sstevel@tonic-gate * The shared memory mapping is always MAP_SHARED, SWAP is only 27510Sstevel@tonic-gate * reserved for DISM 27520Sstevel@tonic-gate */ 27530Sstevel@tonic-gate return (MAP_SHARED | 27545224Smec ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 27550Sstevel@tonic-gate } 27560Sstevel@tonic-gate 27570Sstevel@tonic-gate /*ARGSUSED*/ 27580Sstevel@tonic-gate int 27590Sstevel@tonic-gate segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 27600Sstevel@tonic-gate { 27610Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27620Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27630Sstevel@tonic-gate 27640Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27650Sstevel@tonic-gate 27660Sstevel@tonic-gate *vpp = sptd->spt_vp; 27670Sstevel@tonic-gate return (0); 27680Sstevel@tonic-gate } 27690Sstevel@tonic-gate 27705224Smec /* 27715224Smec * We need to wait for pending IO to complete to a DISM segment in order for 27725224Smec * pages to get kicked out of the seg_pcache. 120 seconds should be more 27735224Smec * than enough time to wait. 27745224Smec */ 27755224Smec static clock_t spt_pcache_wait = 120; 27765224Smec 27770Sstevel@tonic-gate /*ARGSUSED*/ 27780Sstevel@tonic-gate static int 27790Sstevel@tonic-gate segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 27800Sstevel@tonic-gate { 27815224Smec struct shm_data *shmd = (struct shm_data *)seg->s_data; 27820Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27830Sstevel@tonic-gate struct anon_map *amp; 27845224Smec pgcnt_t pg_idx; 27855224Smec ushort_t gen; 27865224Smec clock_t end_lbolt; 27875224Smec int writer; 2788*6695Saguzovsk page_t **ppa; 27890Sstevel@tonic-gate 27900Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27910Sstevel@tonic-gate 27920Sstevel@tonic-gate if (behav == MADV_FREE) { 27930Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 27940Sstevel@tonic-gate return (0); 27950Sstevel@tonic-gate 27960Sstevel@tonic-gate amp = sptd->spt_amp; 27970Sstevel@tonic-gate pg_idx = seg_page(seg, addr); 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 2800*6695Saguzovsk if ((ppa = sptd->spt_ppa) == NULL) { 28015224Smec mutex_exit(&sptd->spt_lock); 28025224Smec ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 28035224Smec anon_disclaim(amp, pg_idx, len); 28045224Smec ANON_LOCK_EXIT(&->a_rwlock); 28055224Smec return (0); 28065224Smec } 28075224Smec 28085224Smec sptd->spt_flags |= DISM_PPA_CHANGED; 28095224Smec gen = sptd->spt_gen; 28105224Smec 28110Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 28120Sstevel@tonic-gate 28130Sstevel@tonic-gate /* 28140Sstevel@tonic-gate * Purge all DISM cached pages 28150Sstevel@tonic-gate */ 2816*6695Saguzovsk seg_ppurge_wiredpp(ppa); 28170Sstevel@tonic-gate 28185224Smec /* 28195224Smec * Drop the AS_LOCK so that other threads can grab it 28205224Smec * in the as_pageunlock path and hopefully get the segment 28215224Smec * kicked out of the seg_pcache. We bump the shm_softlockcnt 28225224Smec * to keep this segment resident. 28235224Smec */ 28245224Smec writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); 28255224Smec atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 28265224Smec AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); 28275224Smec 28280Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 28295224Smec 28305224Smec end_lbolt = lbolt + (hz * spt_pcache_wait); 28315224Smec 28325224Smec /* 28335224Smec * Try to wait for pages to get kicked out of the seg_pcache. 28345224Smec */ 28355224Smec while (sptd->spt_gen == gen && 28365224Smec (sptd->spt_flags & DISM_PPA_CHANGED) && 28375224Smec lbolt < end_lbolt) { 28385224Smec if (!cv_timedwait_sig(&sptd->spt_cv, 28395224Smec &sptd->spt_lock, end_lbolt)) { 28405224Smec break; 28415224Smec } 28425224Smec } 28435224Smec 28445224Smec mutex_exit(&sptd->spt_lock); 28455224Smec 28465224Smec /* Regrab the AS_LOCK and release our hold on the segment */ 28475224Smec AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, 28485224Smec writer ? RW_WRITER : RW_READER); 28495224Smec atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 28505224Smec if (shmd->shm_softlockcnt <= 0) { 28515224Smec if (AS_ISUNMAPWAIT(seg->s_as)) { 28525224Smec mutex_enter(&seg->s_as->a_contents); 28535224Smec if (AS_ISUNMAPWAIT(seg->s_as)) { 28545224Smec AS_CLRUNMAPWAIT(seg->s_as); 28555224Smec cv_broadcast(&seg->s_as->a_cv); 28565224Smec } 28575224Smec mutex_exit(&seg->s_as->a_contents); 28585224Smec } 28595224Smec } 28605224Smec 28610Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 28625224Smec anon_disclaim(amp, pg_idx, len); 28630Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 28640Sstevel@tonic-gate } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 28650Sstevel@tonic-gate behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 28660Sstevel@tonic-gate int already_set; 28670Sstevel@tonic-gate ulong_t anon_index; 28680Sstevel@tonic-gate lgrp_mem_policy_t policy; 28690Sstevel@tonic-gate caddr_t shm_addr; 28700Sstevel@tonic-gate size_t share_size; 28710Sstevel@tonic-gate size_t size; 28720Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 28730Sstevel@tonic-gate caddr_t sptseg_addr; 28740Sstevel@tonic-gate 28750Sstevel@tonic-gate /* 28760Sstevel@tonic-gate * Align address and length to page size of underlying segment 28770Sstevel@tonic-gate */ 28780Sstevel@tonic-gate share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 28790Sstevel@tonic-gate shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 28800Sstevel@tonic-gate size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 28810Sstevel@tonic-gate share_size); 28820Sstevel@tonic-gate 28830Sstevel@tonic-gate amp = shmd->shm_amp; 28840Sstevel@tonic-gate anon_index = seg_page(seg, shm_addr); 28850Sstevel@tonic-gate 28860Sstevel@tonic-gate /* 28870Sstevel@tonic-gate * And now we may have to adjust size downward if we have 28880Sstevel@tonic-gate * exceeded the realsize of the segment or initial anon 28890Sstevel@tonic-gate * allocations. 28900Sstevel@tonic-gate */ 28910Sstevel@tonic-gate sptseg_addr = sptseg->s_base + ptob(anon_index); 28920Sstevel@tonic-gate if ((sptseg_addr + size) > 28930Sstevel@tonic-gate (sptseg->s_base + sptd->spt_realsize)) 28940Sstevel@tonic-gate size = (sptseg->s_base + sptd->spt_realsize) - 28950Sstevel@tonic-gate sptseg_addr; 28960Sstevel@tonic-gate 28970Sstevel@tonic-gate /* 28980Sstevel@tonic-gate * Set memory allocation policy for this segment 28990Sstevel@tonic-gate */ 29000Sstevel@tonic-gate policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 29010Sstevel@tonic-gate already_set = lgrp_shm_policy_set(policy, amp, anon_index, 29020Sstevel@tonic-gate NULL, 0, len); 29030Sstevel@tonic-gate 29040Sstevel@tonic-gate /* 29050Sstevel@tonic-gate * If random memory allocation policy set already, 29060Sstevel@tonic-gate * don't bother reapplying it. 29070Sstevel@tonic-gate */ 29080Sstevel@tonic-gate if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 29090Sstevel@tonic-gate return (0); 29100Sstevel@tonic-gate 29110Sstevel@tonic-gate /* 29120Sstevel@tonic-gate * Mark any existing pages in the given range for 29130Sstevel@tonic-gate * migration, flushing the I/O page cache, and using 29140Sstevel@tonic-gate * underlying segment to calculate anon index and get 29150Sstevel@tonic-gate * anonmap and vnode pointer from 29160Sstevel@tonic-gate */ 29170Sstevel@tonic-gate if (shmd->shm_softlockcnt > 0) 29180Sstevel@tonic-gate segspt_purge(seg); 29190Sstevel@tonic-gate 29200Sstevel@tonic-gate page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 29210Sstevel@tonic-gate } 29220Sstevel@tonic-gate 29230Sstevel@tonic-gate return (0); 29240Sstevel@tonic-gate } 29250Sstevel@tonic-gate 29260Sstevel@tonic-gate /*ARGSUSED*/ 29270Sstevel@tonic-gate void 29280Sstevel@tonic-gate segspt_shmdump(struct seg *seg) 29290Sstevel@tonic-gate { 29300Sstevel@tonic-gate /* no-op for ISM segment */ 29310Sstevel@tonic-gate } 29320Sstevel@tonic-gate 29330Sstevel@tonic-gate /*ARGSUSED*/ 29340Sstevel@tonic-gate static faultcode_t 29350Sstevel@tonic-gate segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 29360Sstevel@tonic-gate { 29370Sstevel@tonic-gate return (ENOTSUP); 29380Sstevel@tonic-gate } 29390Sstevel@tonic-gate 29400Sstevel@tonic-gate /* 29410Sstevel@tonic-gate * get a memory ID for an addr in a given segment 29420Sstevel@tonic-gate */ 29430Sstevel@tonic-gate static int 29440Sstevel@tonic-gate segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 29450Sstevel@tonic-gate { 29460Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 29470Sstevel@tonic-gate struct anon *ap; 29480Sstevel@tonic-gate size_t anon_index; 29490Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 29500Sstevel@tonic-gate struct spt_data *sptd = shmd->shm_sptseg->s_data; 29510Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 29520Sstevel@tonic-gate anon_sync_obj_t cookie; 29530Sstevel@tonic-gate 29540Sstevel@tonic-gate anon_index = seg_page(seg, addr); 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate if (addr > (seg->s_base + sptd->spt_realsize)) { 29570Sstevel@tonic-gate return (EFAULT); 29580Sstevel@tonic-gate } 29590Sstevel@tonic-gate 29600Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 29610Sstevel@tonic-gate anon_array_enter(amp, anon_index, &cookie); 29620Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 29630Sstevel@tonic-gate if (ap == NULL) { 29640Sstevel@tonic-gate struct page *pp; 29650Sstevel@tonic-gate caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 29660Sstevel@tonic-gate 29670Sstevel@tonic-gate pp = anon_zero(sptseg, spt_addr, &ap, kcred); 29680Sstevel@tonic-gate if (pp == NULL) { 29690Sstevel@tonic-gate anon_array_exit(&cookie); 29700Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 29710Sstevel@tonic-gate return (ENOMEM); 29720Sstevel@tonic-gate } 29730Sstevel@tonic-gate (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 29740Sstevel@tonic-gate page_unlock(pp); 29750Sstevel@tonic-gate } 29760Sstevel@tonic-gate anon_array_exit(&cookie); 29770Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 29780Sstevel@tonic-gate memidp->val[0] = (uintptr_t)ap; 29790Sstevel@tonic-gate memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 29800Sstevel@tonic-gate return (0); 29810Sstevel@tonic-gate } 29820Sstevel@tonic-gate 29830Sstevel@tonic-gate /* 29840Sstevel@tonic-gate * Get memory allocation policy info for specified address in given segment 29850Sstevel@tonic-gate */ 29860Sstevel@tonic-gate static lgrp_mem_policy_info_t * 29870Sstevel@tonic-gate segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 29880Sstevel@tonic-gate { 29890Sstevel@tonic-gate struct anon_map *amp; 29900Sstevel@tonic-gate ulong_t anon_index; 29910Sstevel@tonic-gate lgrp_mem_policy_info_t *policy_info; 29920Sstevel@tonic-gate struct shm_data *shm_data; 29930Sstevel@tonic-gate 29940Sstevel@tonic-gate ASSERT(seg != NULL); 29950Sstevel@tonic-gate 29960Sstevel@tonic-gate /* 29970Sstevel@tonic-gate * Get anon_map from segshm 29980Sstevel@tonic-gate * 29990Sstevel@tonic-gate * Assume that no lock needs to be held on anon_map, since 30000Sstevel@tonic-gate * it should be protected by its reference count which must be 30010Sstevel@tonic-gate * nonzero for an existing segment 30020Sstevel@tonic-gate * Need to grab readers lock on policy tree though 30030Sstevel@tonic-gate */ 30040Sstevel@tonic-gate shm_data = (struct shm_data *)seg->s_data; 30050Sstevel@tonic-gate if (shm_data == NULL) 30060Sstevel@tonic-gate return (NULL); 30070Sstevel@tonic-gate amp = shm_data->shm_amp; 30080Sstevel@tonic-gate ASSERT(amp->refcnt != 0); 30090Sstevel@tonic-gate 30100Sstevel@tonic-gate /* 30110Sstevel@tonic-gate * Get policy info 30120Sstevel@tonic-gate * 30130Sstevel@tonic-gate * Assume starting anon index of 0 30140Sstevel@tonic-gate */ 30150Sstevel@tonic-gate anon_index = seg_page(seg, addr); 30160Sstevel@tonic-gate policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 30170Sstevel@tonic-gate 30180Sstevel@tonic-gate return (policy_info); 30190Sstevel@tonic-gate } 3020670Selowe 3021670Selowe /*ARGSUSED*/ 3022670Selowe static int 3023670Selowe segspt_shmcapable(struct seg *seg, segcapability_t capability) 3024670Selowe { 3025670Selowe return (0); 3026670Selowe } 3027