1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*0Sstevel@tonic-gate /* All Rights Reserved */ 29*0Sstevel@tonic-gate 30*0Sstevel@tonic-gate /* 31*0Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 32*0Sstevel@tonic-gate * under license from the Regents of the University of California. 33*0Sstevel@tonic-gate */ 34*0Sstevel@tonic-gate 35*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 36*0Sstevel@tonic-gate 37*0Sstevel@tonic-gate /* 38*0Sstevel@tonic-gate * segkp is a segment driver that administers the allocation and deallocation 39*0Sstevel@tonic-gate * of pageable variable size chunks of kernel virtual address space. Each 40*0Sstevel@tonic-gate * allocated resource is page-aligned. 41*0Sstevel@tonic-gate * 42*0Sstevel@tonic-gate * The user may specify whether the resource should be initialized to 0, 43*0Sstevel@tonic-gate * include a redzone, or locked in memory. 44*0Sstevel@tonic-gate */ 45*0Sstevel@tonic-gate 46*0Sstevel@tonic-gate #include <sys/types.h> 47*0Sstevel@tonic-gate #include <sys/t_lock.h> 48*0Sstevel@tonic-gate #include <sys/thread.h> 49*0Sstevel@tonic-gate #include <sys/param.h> 50*0Sstevel@tonic-gate #include <sys/errno.h> 51*0Sstevel@tonic-gate #include <sys/sysmacros.h> 52*0Sstevel@tonic-gate #include <sys/systm.h> 53*0Sstevel@tonic-gate #include <sys/buf.h> 54*0Sstevel@tonic-gate #include <sys/mman.h> 55*0Sstevel@tonic-gate #include <sys/vnode.h> 56*0Sstevel@tonic-gate #include <sys/cmn_err.h> 57*0Sstevel@tonic-gate #include <sys/swap.h> 58*0Sstevel@tonic-gate #include <sys/tuneable.h> 59*0Sstevel@tonic-gate #include <sys/kmem.h> 60*0Sstevel@tonic-gate #include <sys/vmem.h> 61*0Sstevel@tonic-gate #include <sys/cred.h> 62*0Sstevel@tonic-gate #include <sys/dumphdr.h> 63*0Sstevel@tonic-gate #include <sys/debug.h> 64*0Sstevel@tonic-gate #include <sys/vtrace.h> 65*0Sstevel@tonic-gate #include <sys/stack.h> 66*0Sstevel@tonic-gate #include <sys/atomic.h> 67*0Sstevel@tonic-gate #include <sys/archsystm.h> 68*0Sstevel@tonic-gate #include <sys/lgrp.h> 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate #include <vm/as.h> 71*0Sstevel@tonic-gate #include <vm/seg.h> 72*0Sstevel@tonic-gate #include <vm/seg_kp.h> 73*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 74*0Sstevel@tonic-gate #include <vm/anon.h> 75*0Sstevel@tonic-gate #include <vm/page.h> 76*0Sstevel@tonic-gate #include <vm/hat.h> 77*0Sstevel@tonic-gate #include <sys/bitmap.h> 78*0Sstevel@tonic-gate 79*0Sstevel@tonic-gate /* 80*0Sstevel@tonic-gate * Private seg op routines 81*0Sstevel@tonic-gate */ 82*0Sstevel@tonic-gate static void segkp_badop(void); 83*0Sstevel@tonic-gate static void segkp_dump(struct seg *seg); 84*0Sstevel@tonic-gate static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 85*0Sstevel@tonic-gate uint_t prot); 86*0Sstevel@tonic-gate static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 87*0Sstevel@tonic-gate static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 88*0Sstevel@tonic-gate struct page ***page, enum lock_type type, 89*0Sstevel@tonic-gate enum seg_rw rw); 90*0Sstevel@tonic-gate static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 91*0Sstevel@tonic-gate static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 92*0Sstevel@tonic-gate static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 93*0Sstevel@tonic-gate struct segkp_data **tkpd, struct anon_map *amp); 94*0Sstevel@tonic-gate static void segkp_release_internal(struct seg *seg, 95*0Sstevel@tonic-gate struct segkp_data *kpd, size_t len); 96*0Sstevel@tonic-gate static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 97*0Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags); 98*0Sstevel@tonic-gate static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 99*0Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags); 100*0Sstevel@tonic-gate static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 101*0Sstevel@tonic-gate static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 102*0Sstevel@tonic-gate static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg, 103*0Sstevel@tonic-gate caddr_t addr); 104*0Sstevel@tonic-gate 105*0Sstevel@tonic-gate /* 106*0Sstevel@tonic-gate * Lock used to protect the hash table(s) and caches. 107*0Sstevel@tonic-gate */ 108*0Sstevel@tonic-gate static kmutex_t segkp_lock; 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate /* 111*0Sstevel@tonic-gate * The segkp caches 112*0Sstevel@tonic-gate */ 113*0Sstevel@tonic-gate static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 114*0Sstevel@tonic-gate 115*0Sstevel@tonic-gate #define SEGKP_BADOP(t) (t(*)())segkp_badop 116*0Sstevel@tonic-gate 117*0Sstevel@tonic-gate /* 118*0Sstevel@tonic-gate * When there are fewer than red_minavail bytes left on the stack, 119*0Sstevel@tonic-gate * segkp_map_red() will map in the redzone (if called). 5000 seems 120*0Sstevel@tonic-gate * to work reasonably well... 121*0Sstevel@tonic-gate */ 122*0Sstevel@tonic-gate long red_minavail = 5000; 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate /* 125*0Sstevel@tonic-gate * will be set to 1 for 32 bit x86 systems only, in startup.c 126*0Sstevel@tonic-gate */ 127*0Sstevel@tonic-gate int segkp_fromheap = 0; 128*0Sstevel@tonic-gate ulong_t *segkp_bitmap; 129*0Sstevel@tonic-gate 130*0Sstevel@tonic-gate /* 131*0Sstevel@tonic-gate * If segkp_map_red() is called with the redzone already mapped and 132*0Sstevel@tonic-gate * with less than RED_DEEP_THRESHOLD bytes available on the stack, 133*0Sstevel@tonic-gate * then the stack situation has become quite serious; if much more stack 134*0Sstevel@tonic-gate * is consumed, we have the potential of scrogging the next thread/LWP 135*0Sstevel@tonic-gate * structure. To help debug the "can't happen" panics which may 136*0Sstevel@tonic-gate * result from this condition, we record lbolt and the calling thread 137*0Sstevel@tonic-gate * in red_deep_lbolt and red_deep_thread respectively. 138*0Sstevel@tonic-gate */ 139*0Sstevel@tonic-gate #define RED_DEEP_THRESHOLD 2000 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate clock_t red_deep_lbolt; 142*0Sstevel@tonic-gate kthread_t *red_deep_thread; 143*0Sstevel@tonic-gate 144*0Sstevel@tonic-gate uint32_t red_nmapped; 145*0Sstevel@tonic-gate uint32_t red_closest = UINT_MAX; 146*0Sstevel@tonic-gate uint32_t red_ndoubles; 147*0Sstevel@tonic-gate 148*0Sstevel@tonic-gate pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 149*0Sstevel@tonic-gate 150*0Sstevel@tonic-gate static struct seg_ops segkp_ops = { 151*0Sstevel@tonic-gate SEGKP_BADOP(int), /* dup */ 152*0Sstevel@tonic-gate SEGKP_BADOP(int), /* unmap */ 153*0Sstevel@tonic-gate SEGKP_BADOP(void), /* free */ 154*0Sstevel@tonic-gate segkp_fault, 155*0Sstevel@tonic-gate SEGKP_BADOP(faultcode_t), /* faulta */ 156*0Sstevel@tonic-gate SEGKP_BADOP(int), /* setprot */ 157*0Sstevel@tonic-gate segkp_checkprot, 158*0Sstevel@tonic-gate segkp_kluster, 159*0Sstevel@tonic-gate SEGKP_BADOP(size_t), /* swapout */ 160*0Sstevel@tonic-gate SEGKP_BADOP(int), /* sync */ 161*0Sstevel@tonic-gate SEGKP_BADOP(size_t), /* incore */ 162*0Sstevel@tonic-gate SEGKP_BADOP(int), /* lockop */ 163*0Sstevel@tonic-gate SEGKP_BADOP(int), /* getprot */ 164*0Sstevel@tonic-gate SEGKP_BADOP(u_offset_t), /* getoffset */ 165*0Sstevel@tonic-gate SEGKP_BADOP(int), /* gettype */ 166*0Sstevel@tonic-gate SEGKP_BADOP(int), /* getvp */ 167*0Sstevel@tonic-gate SEGKP_BADOP(int), /* advise */ 168*0Sstevel@tonic-gate segkp_dump, /* dump */ 169*0Sstevel@tonic-gate segkp_pagelock, /* pagelock */ 170*0Sstevel@tonic-gate SEGKP_BADOP(int), /* setpgsz */ 171*0Sstevel@tonic-gate segkp_getmemid, /* getmemid */ 172*0Sstevel@tonic-gate segkp_getpolicy, /* getpolicy */ 173*0Sstevel@tonic-gate }; 174*0Sstevel@tonic-gate 175*0Sstevel@tonic-gate 176*0Sstevel@tonic-gate static void 177*0Sstevel@tonic-gate segkp_badop(void) 178*0Sstevel@tonic-gate { 179*0Sstevel@tonic-gate panic("segkp_badop"); 180*0Sstevel@tonic-gate /*NOTREACHED*/ 181*0Sstevel@tonic-gate } 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate static void segkpinit_mem_config(struct seg *); 184*0Sstevel@tonic-gate 185*0Sstevel@tonic-gate static uint32_t segkp_indel; 186*0Sstevel@tonic-gate 187*0Sstevel@tonic-gate /* 188*0Sstevel@tonic-gate * Allocate the segment specific private data struct and fill it in 189*0Sstevel@tonic-gate * with the per kp segment mutex, anon ptr. array and hash table. 190*0Sstevel@tonic-gate */ 191*0Sstevel@tonic-gate int 192*0Sstevel@tonic-gate segkp_create(struct seg *seg) 193*0Sstevel@tonic-gate { 194*0Sstevel@tonic-gate struct segkp_segdata *kpsd; 195*0Sstevel@tonic-gate size_t np; 196*0Sstevel@tonic-gate 197*0Sstevel@tonic-gate ASSERT(seg != NULL && seg->s_as == &kas); 198*0Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 199*0Sstevel@tonic-gate 200*0Sstevel@tonic-gate if (seg->s_size & PAGEOFFSET) { 201*0Sstevel@tonic-gate panic("Bad segkp size"); 202*0Sstevel@tonic-gate /*NOTREACHED*/ 203*0Sstevel@tonic-gate } 204*0Sstevel@tonic-gate 205*0Sstevel@tonic-gate kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 206*0Sstevel@tonic-gate 207*0Sstevel@tonic-gate /* 208*0Sstevel@tonic-gate * Allocate the virtual memory for segkp and initialize it 209*0Sstevel@tonic-gate */ 210*0Sstevel@tonic-gate if (segkp_fromheap) { 211*0Sstevel@tonic-gate np = btop(kvseg.s_size); 212*0Sstevel@tonic-gate segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 213*0Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 214*0Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 215*0Sstevel@tonic-gate } else { 216*0Sstevel@tonic-gate segkp_bitmap = NULL; 217*0Sstevel@tonic-gate np = btop(seg->s_size); 218*0Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 219*0Sstevel@tonic-gate seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 220*0Sstevel@tonic-gate VM_SLEEP); 221*0Sstevel@tonic-gate } 222*0Sstevel@tonic-gate 223*0Sstevel@tonic-gate kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 224*0Sstevel@tonic-gate 225*0Sstevel@tonic-gate kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 226*0Sstevel@tonic-gate KM_SLEEP); 227*0Sstevel@tonic-gate seg->s_data = (void *)kpsd; 228*0Sstevel@tonic-gate seg->s_ops = &segkp_ops; 229*0Sstevel@tonic-gate segkpinit_mem_config(seg); 230*0Sstevel@tonic-gate return (0); 231*0Sstevel@tonic-gate } 232*0Sstevel@tonic-gate 233*0Sstevel@tonic-gate 234*0Sstevel@tonic-gate /* 235*0Sstevel@tonic-gate * Find a free 'freelist' and initialize it with the appropriate attributes 236*0Sstevel@tonic-gate */ 237*0Sstevel@tonic-gate void * 238*0Sstevel@tonic-gate segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 239*0Sstevel@tonic-gate { 240*0Sstevel@tonic-gate int i; 241*0Sstevel@tonic-gate 242*0Sstevel@tonic-gate if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 243*0Sstevel@tonic-gate return ((void *)-1); 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 246*0Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) { 247*0Sstevel@tonic-gate if (segkp_cache[i].kpf_inuse) 248*0Sstevel@tonic-gate continue; 249*0Sstevel@tonic-gate segkp_cache[i].kpf_inuse = 1; 250*0Sstevel@tonic-gate segkp_cache[i].kpf_max = maxsize; 251*0Sstevel@tonic-gate segkp_cache[i].kpf_flags = flags; 252*0Sstevel@tonic-gate segkp_cache[i].kpf_seg = seg; 253*0Sstevel@tonic-gate segkp_cache[i].kpf_len = len; 254*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 255*0Sstevel@tonic-gate return ((void *)(uintptr_t)i); 256*0Sstevel@tonic-gate } 257*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 258*0Sstevel@tonic-gate return ((void *)-1); 259*0Sstevel@tonic-gate } 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * Free all the cache resources. 263*0Sstevel@tonic-gate */ 264*0Sstevel@tonic-gate void 265*0Sstevel@tonic-gate segkp_cache_free(void) 266*0Sstevel@tonic-gate { 267*0Sstevel@tonic-gate struct segkp_data *kpd; 268*0Sstevel@tonic-gate struct seg *seg; 269*0Sstevel@tonic-gate int i; 270*0Sstevel@tonic-gate 271*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 272*0Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) { 273*0Sstevel@tonic-gate if (!segkp_cache[i].kpf_inuse) 274*0Sstevel@tonic-gate continue; 275*0Sstevel@tonic-gate /* 276*0Sstevel@tonic-gate * Disconnect the freelist and process each element 277*0Sstevel@tonic-gate */ 278*0Sstevel@tonic-gate kpd = segkp_cache[i].kpf_list; 279*0Sstevel@tonic-gate seg = segkp_cache[i].kpf_seg; 280*0Sstevel@tonic-gate segkp_cache[i].kpf_list = NULL; 281*0Sstevel@tonic-gate segkp_cache[i].kpf_count = 0; 282*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 283*0Sstevel@tonic-gate 284*0Sstevel@tonic-gate while (kpd != NULL) { 285*0Sstevel@tonic-gate struct segkp_data *next; 286*0Sstevel@tonic-gate 287*0Sstevel@tonic-gate next = kpd->kp_next; 288*0Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len); 289*0Sstevel@tonic-gate kpd = next; 290*0Sstevel@tonic-gate } 291*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 292*0Sstevel@tonic-gate } 293*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 294*0Sstevel@tonic-gate } 295*0Sstevel@tonic-gate 296*0Sstevel@tonic-gate /* 297*0Sstevel@tonic-gate * There are 2 entries into segkp_get_internal. The first includes a cookie 298*0Sstevel@tonic-gate * used to access a pool of cached segkp resources. The second does not 299*0Sstevel@tonic-gate * use the cache. 300*0Sstevel@tonic-gate */ 301*0Sstevel@tonic-gate caddr_t 302*0Sstevel@tonic-gate segkp_get(struct seg *seg, size_t len, uint_t flags) 303*0Sstevel@tonic-gate { 304*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 305*0Sstevel@tonic-gate 306*0Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 307*0Sstevel@tonic-gate kpd->kp_cookie = -1; 308*0Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 309*0Sstevel@tonic-gate } 310*0Sstevel@tonic-gate return (NULL); 311*0Sstevel@tonic-gate } 312*0Sstevel@tonic-gate 313*0Sstevel@tonic-gate /* 314*0Sstevel@tonic-gate * Return a 'cached' segkp address 315*0Sstevel@tonic-gate */ 316*0Sstevel@tonic-gate caddr_t 317*0Sstevel@tonic-gate segkp_cache_get(void *cookie) 318*0Sstevel@tonic-gate { 319*0Sstevel@tonic-gate struct segkp_cache *freelist = NULL; 320*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 321*0Sstevel@tonic-gate int index = (int)(uintptr_t)cookie; 322*0Sstevel@tonic-gate struct seg *seg; 323*0Sstevel@tonic-gate size_t len; 324*0Sstevel@tonic-gate uint_t flags; 325*0Sstevel@tonic-gate 326*0Sstevel@tonic-gate if (index < 0 || index >= SEGKP_MAX_CACHE) 327*0Sstevel@tonic-gate return (NULL); 328*0Sstevel@tonic-gate freelist = &segkp_cache[index]; 329*0Sstevel@tonic-gate 330*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 331*0Sstevel@tonic-gate seg = freelist->kpf_seg; 332*0Sstevel@tonic-gate flags = freelist->kpf_flags; 333*0Sstevel@tonic-gate if (freelist->kpf_list != NULL) { 334*0Sstevel@tonic-gate kpd = freelist->kpf_list; 335*0Sstevel@tonic-gate freelist->kpf_list = kpd->kp_next; 336*0Sstevel@tonic-gate freelist->kpf_count--; 337*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 338*0Sstevel@tonic-gate kpd->kp_next = NULL; 339*0Sstevel@tonic-gate segkp_insert(seg, kpd); 340*0Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 341*0Sstevel@tonic-gate } 342*0Sstevel@tonic-gate len = freelist->kpf_len; 343*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 344*0Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 345*0Sstevel@tonic-gate kpd->kp_cookie = index; 346*0Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 347*0Sstevel@tonic-gate } 348*0Sstevel@tonic-gate return (NULL); 349*0Sstevel@tonic-gate } 350*0Sstevel@tonic-gate 351*0Sstevel@tonic-gate caddr_t 352*0Sstevel@tonic-gate segkp_get_withanonmap( 353*0Sstevel@tonic-gate struct seg *seg, 354*0Sstevel@tonic-gate size_t len, 355*0Sstevel@tonic-gate uint_t flags, 356*0Sstevel@tonic-gate struct anon_map *amp) 357*0Sstevel@tonic-gate { 358*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate ASSERT(amp != NULL); 361*0Sstevel@tonic-gate flags |= KPD_HASAMP; 362*0Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 363*0Sstevel@tonic-gate kpd->kp_cookie = -1; 364*0Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 365*0Sstevel@tonic-gate } 366*0Sstevel@tonic-gate return (NULL); 367*0Sstevel@tonic-gate } 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate /* 370*0Sstevel@tonic-gate * This does the real work of segkp allocation. 371*0Sstevel@tonic-gate * Return to client base addr. len must be page-aligned. A null value is 372*0Sstevel@tonic-gate * returned if there are no more vm resources (e.g. pages, swap). The len 373*0Sstevel@tonic-gate * and base recorded in the private data structure include the redzone 374*0Sstevel@tonic-gate * and the redzone length (if applicable). If the user requests a redzone 375*0Sstevel@tonic-gate * either the first or last page is left unmapped depending whether stacks 376*0Sstevel@tonic-gate * grow to low or high memory. 377*0Sstevel@tonic-gate * 378*0Sstevel@tonic-gate * The client may also specify a no-wait flag. If that is set then the 379*0Sstevel@tonic-gate * request will choose a non-blocking path when requesting resources. 380*0Sstevel@tonic-gate * The default is make the client wait. 381*0Sstevel@tonic-gate */ 382*0Sstevel@tonic-gate static caddr_t 383*0Sstevel@tonic-gate segkp_get_internal( 384*0Sstevel@tonic-gate struct seg *seg, 385*0Sstevel@tonic-gate size_t len, 386*0Sstevel@tonic-gate uint_t flags, 387*0Sstevel@tonic-gate struct segkp_data **tkpd, 388*0Sstevel@tonic-gate struct anon_map *amp) 389*0Sstevel@tonic-gate { 390*0Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 391*0Sstevel@tonic-gate struct segkp_data *kpd; 392*0Sstevel@tonic-gate caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 393*0Sstevel@tonic-gate pgcnt_t np = 0; /* number of pages in the resource */ 394*0Sstevel@tonic-gate pgcnt_t segkpindex; 395*0Sstevel@tonic-gate long i; 396*0Sstevel@tonic-gate caddr_t va; 397*0Sstevel@tonic-gate pgcnt_t pages = 0; 398*0Sstevel@tonic-gate ulong_t anon_idx = 0; 399*0Sstevel@tonic-gate int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 400*0Sstevel@tonic-gate caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 401*0Sstevel@tonic-gate 402*0Sstevel@tonic-gate if (len & PAGEOFFSET) { 403*0Sstevel@tonic-gate panic("segkp_get: len is not page-aligned"); 404*0Sstevel@tonic-gate /*NOTREACHED*/ 405*0Sstevel@tonic-gate } 406*0Sstevel@tonic-gate 407*0Sstevel@tonic-gate ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 408*0Sstevel@tonic-gate 409*0Sstevel@tonic-gate /* Only allow KPD_NO_ANON if we are going to lock it down */ 410*0Sstevel@tonic-gate if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 411*0Sstevel@tonic-gate return (NULL); 412*0Sstevel@tonic-gate 413*0Sstevel@tonic-gate if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 414*0Sstevel@tonic-gate return (NULL); 415*0Sstevel@tonic-gate /* 416*0Sstevel@tonic-gate * Fix up the len to reflect the REDZONE if applicable 417*0Sstevel@tonic-gate */ 418*0Sstevel@tonic-gate if (flags & KPD_HASREDZONE) 419*0Sstevel@tonic-gate len += PAGESIZE; 420*0Sstevel@tonic-gate np = btop(len); 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 423*0Sstevel@tonic-gate if (vbase == NULL) { 424*0Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 425*0Sstevel@tonic-gate return (NULL); 426*0Sstevel@tonic-gate } 427*0Sstevel@tonic-gate 428*0Sstevel@tonic-gate /* If locking, reserve physical memory */ 429*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 430*0Sstevel@tonic-gate pages = btop(SEGKP_MAPLEN(len, flags)); 431*0Sstevel@tonic-gate if (page_resv(pages, kmflag) == 0) { 432*0Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len); 433*0Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 434*0Sstevel@tonic-gate return (NULL); 435*0Sstevel@tonic-gate } 436*0Sstevel@tonic-gate if ((flags & KPD_NO_ANON) == 0) 437*0Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages); 438*0Sstevel@tonic-gate } 439*0Sstevel@tonic-gate 440*0Sstevel@tonic-gate /* 441*0Sstevel@tonic-gate * Reserve sufficient swap space for this vm resource. We'll 442*0Sstevel@tonic-gate * actually allocate it in the loop below, but reserving it 443*0Sstevel@tonic-gate * here allows us to back out more gracefully than if we 444*0Sstevel@tonic-gate * had an allocation failure in the body of the loop. 445*0Sstevel@tonic-gate * 446*0Sstevel@tonic-gate * Note that we don't need swap space for the red zone page. 447*0Sstevel@tonic-gate */ 448*0Sstevel@tonic-gate if (amp != NULL) { 449*0Sstevel@tonic-gate ASSERT((flags & KPD_NO_ANON) == 0); 450*0Sstevel@tonic-gate /* The reserve has been done and the anon_hdr is separate. */ 451*0Sstevel@tonic-gate anon_idx = 0; 452*0Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx; 453*0Sstevel@tonic-gate kpd->kp_anon = amp->ahp; 454*0Sstevel@tonic-gate 455*0Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 456*0Sstevel@tonic-gate kpd, vbase, len, flags, 1); 457*0Sstevel@tonic-gate 458*0Sstevel@tonic-gate } else if ((flags & KPD_NO_ANON) == 0) { 459*0Sstevel@tonic-gate if (anon_resv(SEGKP_MAPLEN(len, flags)) == 0) { 460*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 461*0Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, 462*0Sstevel@tonic-gate -pages); 463*0Sstevel@tonic-gate page_unresv(pages); 464*0Sstevel@tonic-gate } 465*0Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len); 466*0Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 467*0Sstevel@tonic-gate return (NULL); 468*0Sstevel@tonic-gate } 469*0Sstevel@tonic-gate anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 470*0Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx; 471*0Sstevel@tonic-gate kpd->kp_anon = kpsd->kpsd_anon; 472*0Sstevel@tonic-gate 473*0Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 474*0Sstevel@tonic-gate kpd, vbase, len, flags, 1); 475*0Sstevel@tonic-gate } else { 476*0Sstevel@tonic-gate kpd->kp_anon = NULL; 477*0Sstevel@tonic-gate kpd->kp_anon_idx = 0; 478*0Sstevel@tonic-gate } 479*0Sstevel@tonic-gate 480*0Sstevel@tonic-gate /* 481*0Sstevel@tonic-gate * Allocate page and anon resources for the virtual address range 482*0Sstevel@tonic-gate * except the redzone 483*0Sstevel@tonic-gate */ 484*0Sstevel@tonic-gate if (segkp_fromheap) 485*0Sstevel@tonic-gate segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 486*0Sstevel@tonic-gate for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 487*0Sstevel@tonic-gate page_t *pl[2]; 488*0Sstevel@tonic-gate struct vnode *vp; 489*0Sstevel@tonic-gate anoff_t off; 490*0Sstevel@tonic-gate int err; 491*0Sstevel@tonic-gate page_t *pp = NULL; 492*0Sstevel@tonic-gate 493*0Sstevel@tonic-gate /* 494*0Sstevel@tonic-gate * Mark this page to be a segkp page in the bitmap. 495*0Sstevel@tonic-gate */ 496*0Sstevel@tonic-gate if (segkp_fromheap) { 497*0Sstevel@tonic-gate BT_ATOMIC_SET(segkp_bitmap, segkpindex); 498*0Sstevel@tonic-gate segkpindex++; 499*0Sstevel@tonic-gate } 500*0Sstevel@tonic-gate 501*0Sstevel@tonic-gate /* 502*0Sstevel@tonic-gate * If this page is the red zone page, we don't need swap 503*0Sstevel@tonic-gate * space for it. Note that we skip over the code that 504*0Sstevel@tonic-gate * establishes MMU mappings, so that the page remains 505*0Sstevel@tonic-gate * invalid. 506*0Sstevel@tonic-gate */ 507*0Sstevel@tonic-gate if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 508*0Sstevel@tonic-gate continue; 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate if (kpd->kp_anon != NULL) { 511*0Sstevel@tonic-gate struct anon *ap; 512*0Sstevel@tonic-gate 513*0Sstevel@tonic-gate ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 514*0Sstevel@tonic-gate == NULL); 515*0Sstevel@tonic-gate /* 516*0Sstevel@tonic-gate * Determine the "vp" and "off" of the anon slot. 517*0Sstevel@tonic-gate */ 518*0Sstevel@tonic-gate ap = anon_alloc(NULL, 0); 519*0Sstevel@tonic-gate if (amp != NULL) 520*0Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 521*0Sstevel@tonic-gate (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 522*0Sstevel@tonic-gate ap, ANON_SLEEP); 523*0Sstevel@tonic-gate if (amp != NULL) 524*0Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 525*0Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 526*0Sstevel@tonic-gate 527*0Sstevel@tonic-gate /* 528*0Sstevel@tonic-gate * Create a page with the specified identity. The 529*0Sstevel@tonic-gate * page is returned with the "shared" lock held. 530*0Sstevel@tonic-gate */ 531*0Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 532*0Sstevel@tonic-gate NULL, pl, PAGESIZE, seg, va, S_CREATE, 533*0Sstevel@tonic-gate kcred); 534*0Sstevel@tonic-gate if (err) { 535*0Sstevel@tonic-gate /* 536*0Sstevel@tonic-gate * XXX - This should not fail. 537*0Sstevel@tonic-gate */ 538*0Sstevel@tonic-gate panic("segkp_get: no pages"); 539*0Sstevel@tonic-gate /*NOTREACHED*/ 540*0Sstevel@tonic-gate } 541*0Sstevel@tonic-gate pp = pl[0]; 542*0Sstevel@tonic-gate } else { 543*0Sstevel@tonic-gate ASSERT(page_exists(&kvp, 544*0Sstevel@tonic-gate (u_offset_t)(uintptr_t)va) == NULL); 545*0Sstevel@tonic-gate 546*0Sstevel@tonic-gate if ((pp = page_create_va(&kvp, 547*0Sstevel@tonic-gate (u_offset_t)(uintptr_t)va, PAGESIZE, 548*0Sstevel@tonic-gate (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 549*0Sstevel@tonic-gate PG_NORELOC, seg, va)) == NULL) { 550*0Sstevel@tonic-gate /* 551*0Sstevel@tonic-gate * Legitimize resource; then destroy it. 552*0Sstevel@tonic-gate * Easier than trying to unwind here. 553*0Sstevel@tonic-gate */ 554*0Sstevel@tonic-gate kpd->kp_flags = flags; 555*0Sstevel@tonic-gate kpd->kp_base = vbase; 556*0Sstevel@tonic-gate kpd->kp_len = len; 557*0Sstevel@tonic-gate segkp_release_internal(seg, kpd, va - vbase); 558*0Sstevel@tonic-gate return (NULL); 559*0Sstevel@tonic-gate } 560*0Sstevel@tonic-gate page_io_unlock(pp); 561*0Sstevel@tonic-gate } 562*0Sstevel@tonic-gate 563*0Sstevel@tonic-gate if (flags & KPD_ZERO) 564*0Sstevel@tonic-gate pagezero(pp, 0, PAGESIZE); 565*0Sstevel@tonic-gate 566*0Sstevel@tonic-gate /* 567*0Sstevel@tonic-gate * Load and lock an MMU translation for the page. 568*0Sstevel@tonic-gate */ 569*0Sstevel@tonic-gate hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 570*0Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate /* 573*0Sstevel@tonic-gate * Now, release lock on the page. 574*0Sstevel@tonic-gate */ 575*0Sstevel@tonic-gate if (flags & KPD_LOCKED) 576*0Sstevel@tonic-gate page_downgrade(pp); 577*0Sstevel@tonic-gate else 578*0Sstevel@tonic-gate page_unlock(pp); 579*0Sstevel@tonic-gate } 580*0Sstevel@tonic-gate 581*0Sstevel@tonic-gate kpd->kp_flags = flags; 582*0Sstevel@tonic-gate kpd->kp_base = vbase; 583*0Sstevel@tonic-gate kpd->kp_len = len; 584*0Sstevel@tonic-gate segkp_insert(seg, kpd); 585*0Sstevel@tonic-gate *tkpd = kpd; 586*0Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 587*0Sstevel@tonic-gate } 588*0Sstevel@tonic-gate 589*0Sstevel@tonic-gate /* 590*0Sstevel@tonic-gate * Release the resource to cache if the pool(designate by the cookie) 591*0Sstevel@tonic-gate * has less than the maximum allowable. If inserted in cache, 592*0Sstevel@tonic-gate * segkp_delete insures element is taken off of active list. 593*0Sstevel@tonic-gate */ 594*0Sstevel@tonic-gate void 595*0Sstevel@tonic-gate segkp_release(struct seg *seg, caddr_t vaddr) 596*0Sstevel@tonic-gate { 597*0Sstevel@tonic-gate struct segkp_cache *freelist; 598*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) { 601*0Sstevel@tonic-gate panic("segkp_release: null kpd"); 602*0Sstevel@tonic-gate /*NOTREACHED*/ 603*0Sstevel@tonic-gate } 604*0Sstevel@tonic-gate 605*0Sstevel@tonic-gate if (kpd->kp_cookie != -1) { 606*0Sstevel@tonic-gate freelist = &segkp_cache[kpd->kp_cookie]; 607*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 608*0Sstevel@tonic-gate if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 609*0Sstevel@tonic-gate segkp_delete(seg, kpd); 610*0Sstevel@tonic-gate kpd->kp_next = freelist->kpf_list; 611*0Sstevel@tonic-gate freelist->kpf_list = kpd; 612*0Sstevel@tonic-gate freelist->kpf_count++; 613*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 614*0Sstevel@tonic-gate return; 615*0Sstevel@tonic-gate } else { 616*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 617*0Sstevel@tonic-gate kpd->kp_cookie = -1; 618*0Sstevel@tonic-gate } 619*0Sstevel@tonic-gate } 620*0Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len); 621*0Sstevel@tonic-gate } 622*0Sstevel@tonic-gate 623*0Sstevel@tonic-gate /* 624*0Sstevel@tonic-gate * Free the entire resource. segkp_unlock gets called with the start of the 625*0Sstevel@tonic-gate * mapped portion of the resource. The length is the size of the mapped 626*0Sstevel@tonic-gate * portion 627*0Sstevel@tonic-gate */ 628*0Sstevel@tonic-gate static void 629*0Sstevel@tonic-gate segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 630*0Sstevel@tonic-gate { 631*0Sstevel@tonic-gate caddr_t va; 632*0Sstevel@tonic-gate long i; 633*0Sstevel@tonic-gate long redzone; 634*0Sstevel@tonic-gate size_t np; 635*0Sstevel@tonic-gate page_t *pp; 636*0Sstevel@tonic-gate struct vnode *vp; 637*0Sstevel@tonic-gate anoff_t off; 638*0Sstevel@tonic-gate struct anon *ap; 639*0Sstevel@tonic-gate pgcnt_t segkpindex; 640*0Sstevel@tonic-gate 641*0Sstevel@tonic-gate ASSERT(kpd != NULL); 642*0Sstevel@tonic-gate ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 643*0Sstevel@tonic-gate np = btop(len); 644*0Sstevel@tonic-gate 645*0Sstevel@tonic-gate /* Remove from active hash list */ 646*0Sstevel@tonic-gate if (kpd->kp_cookie == -1) { 647*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 648*0Sstevel@tonic-gate segkp_delete(seg, kpd); 649*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 650*0Sstevel@tonic-gate } 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate /* 653*0Sstevel@tonic-gate * Precompute redzone page index. 654*0Sstevel@tonic-gate */ 655*0Sstevel@tonic-gate redzone = -1; 656*0Sstevel@tonic-gate if (kpd->kp_flags & KPD_HASREDZONE) 657*0Sstevel@tonic-gate redzone = KPD_REDZONE(kpd); 658*0Sstevel@tonic-gate 659*0Sstevel@tonic-gate 660*0Sstevel@tonic-gate va = kpd->kp_base; 661*0Sstevel@tonic-gate 662*0Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 663*0Sstevel@tonic-gate ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 664*0Sstevel@tonic-gate /* 665*0Sstevel@tonic-gate * Free up those anon resources that are quiescent. 666*0Sstevel@tonic-gate */ 667*0Sstevel@tonic-gate if (segkp_fromheap) 668*0Sstevel@tonic-gate segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 669*0Sstevel@tonic-gate for (i = 0; i < np; i++, va += PAGESIZE) { 670*0Sstevel@tonic-gate 671*0Sstevel@tonic-gate /* 672*0Sstevel@tonic-gate * Clear the bit for this page from the bitmap. 673*0Sstevel@tonic-gate */ 674*0Sstevel@tonic-gate if (segkp_fromheap) { 675*0Sstevel@tonic-gate BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 676*0Sstevel@tonic-gate segkpindex++; 677*0Sstevel@tonic-gate } 678*0Sstevel@tonic-gate 679*0Sstevel@tonic-gate if (i == redzone) 680*0Sstevel@tonic-gate continue; 681*0Sstevel@tonic-gate if (kpd->kp_anon) { 682*0Sstevel@tonic-gate /* 683*0Sstevel@tonic-gate * Free up anon resources and destroy the 684*0Sstevel@tonic-gate * associated pages. 685*0Sstevel@tonic-gate * 686*0Sstevel@tonic-gate * Release the lock if there is one. Have to get the 687*0Sstevel@tonic-gate * page to do this, unfortunately. 688*0Sstevel@tonic-gate */ 689*0Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 690*0Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, 691*0Sstevel@tonic-gate kpd->kp_anon_idx + i); 692*0Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 693*0Sstevel@tonic-gate /* Find the shared-locked page. */ 694*0Sstevel@tonic-gate pp = page_find(vp, (u_offset_t)off); 695*0Sstevel@tonic-gate if (pp == NULL) { 696*0Sstevel@tonic-gate panic("segkp_release: " 697*0Sstevel@tonic-gate "kp_anon: no page to unlock "); 698*0Sstevel@tonic-gate /*NOTREACHED*/ 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate page_unlock(pp); 701*0Sstevel@tonic-gate } 702*0Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASAMP) == 0) { 703*0Sstevel@tonic-gate anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 704*0Sstevel@tonic-gate PAGESIZE); 705*0Sstevel@tonic-gate anon_unresv(PAGESIZE); 706*0Sstevel@tonic-gate } 707*0Sstevel@tonic-gate TRACE_5(TR_FAC_VM, 708*0Sstevel@tonic-gate TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 709*0Sstevel@tonic-gate kpd, va, PAGESIZE, 0, 0); 710*0Sstevel@tonic-gate } else { 711*0Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 712*0Sstevel@tonic-gate pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 713*0Sstevel@tonic-gate if (pp == NULL) { 714*0Sstevel@tonic-gate panic("segkp_release: " 715*0Sstevel@tonic-gate "no page to unlock"); 716*0Sstevel@tonic-gate /*NOTREACHED*/ 717*0Sstevel@tonic-gate } 718*0Sstevel@tonic-gate /* 719*0Sstevel@tonic-gate * We should just upgrade the lock here 720*0Sstevel@tonic-gate * but there is no upgrade that waits. 721*0Sstevel@tonic-gate */ 722*0Sstevel@tonic-gate page_unlock(pp); 723*0Sstevel@tonic-gate } 724*0Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 725*0Sstevel@tonic-gate SE_EXCL); 726*0Sstevel@tonic-gate if (pp != NULL) 727*0Sstevel@tonic-gate page_destroy(pp, 0); 728*0Sstevel@tonic-gate } 729*0Sstevel@tonic-gate } 730*0Sstevel@tonic-gate 731*0Sstevel@tonic-gate /* If locked, release physical memory reservation */ 732*0Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 733*0Sstevel@tonic-gate pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 734*0Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 735*0Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages); 736*0Sstevel@tonic-gate page_unresv(pages); 737*0Sstevel@tonic-gate } 738*0Sstevel@tonic-gate 739*0Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 740*0Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 741*0Sstevel@tonic-gate } 742*0Sstevel@tonic-gate 743*0Sstevel@tonic-gate /* 744*0Sstevel@tonic-gate * segkp_map_red() will check the current frame pointer against the 745*0Sstevel@tonic-gate * stack base. If the amount of stack remaining is questionable 746*0Sstevel@tonic-gate * (less than red_minavail), then segkp_map_red() will map in the redzone 747*0Sstevel@tonic-gate * and return 1. Otherwise, it will return 0. segkp_map_red() can 748*0Sstevel@tonic-gate * _only_ be called when: 749*0Sstevel@tonic-gate * 750*0Sstevel@tonic-gate * - it is safe to sleep on page_create_va(). 751*0Sstevel@tonic-gate * - the caller is non-swappable. 752*0Sstevel@tonic-gate * 753*0Sstevel@tonic-gate * It is up to the caller to remember whether segkp_map_red() successfully 754*0Sstevel@tonic-gate * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 755*0Sstevel@tonic-gate * time. Note that the caller must _remain_ non-swappable until after 756*0Sstevel@tonic-gate * calling segkp_unmap_red(). 757*0Sstevel@tonic-gate * 758*0Sstevel@tonic-gate * Currently, this routine is only called from pagefault() (which necessarily 759*0Sstevel@tonic-gate * satisfies the above conditions). 760*0Sstevel@tonic-gate */ 761*0Sstevel@tonic-gate #if defined(STACK_GROWTH_DOWN) 762*0Sstevel@tonic-gate int 763*0Sstevel@tonic-gate segkp_map_red(void) 764*0Sstevel@tonic-gate { 765*0Sstevel@tonic-gate uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 766*0Sstevel@tonic-gate #ifndef _LP64 767*0Sstevel@tonic-gate caddr_t stkbase; 768*0Sstevel@tonic-gate #endif 769*0Sstevel@tonic-gate 770*0Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate /* 773*0Sstevel@tonic-gate * Optimize for the common case where we simply return. 774*0Sstevel@tonic-gate */ 775*0Sstevel@tonic-gate if ((curthread->t_red_pp == NULL) && 776*0Sstevel@tonic-gate (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 777*0Sstevel@tonic-gate return (0); 778*0Sstevel@tonic-gate 779*0Sstevel@tonic-gate #if defined(_LP64) 780*0Sstevel@tonic-gate /* 781*0Sstevel@tonic-gate * XXX We probably need something better than this. 782*0Sstevel@tonic-gate */ 783*0Sstevel@tonic-gate panic("kernel stack overflow"); 784*0Sstevel@tonic-gate /*NOTREACHED*/ 785*0Sstevel@tonic-gate #else /* _LP64 */ 786*0Sstevel@tonic-gate if (curthread->t_red_pp == NULL) { 787*0Sstevel@tonic-gate page_t *red_pp; 788*0Sstevel@tonic-gate struct seg kseg; 789*0Sstevel@tonic-gate 790*0Sstevel@tonic-gate caddr_t red_va = (caddr_t) 791*0Sstevel@tonic-gate (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 792*0Sstevel@tonic-gate PAGESIZE); 793*0Sstevel@tonic-gate 794*0Sstevel@tonic-gate ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 795*0Sstevel@tonic-gate NULL); 796*0Sstevel@tonic-gate 797*0Sstevel@tonic-gate /* 798*0Sstevel@tonic-gate * Allocate the physical for the red page. 799*0Sstevel@tonic-gate */ 800*0Sstevel@tonic-gate /* 801*0Sstevel@tonic-gate * No PG_NORELOC here to avoid waits. Unlikely to get 802*0Sstevel@tonic-gate * a relocate happening in the short time the page exists 803*0Sstevel@tonic-gate * and it will be OK anyway. 804*0Sstevel@tonic-gate */ 805*0Sstevel@tonic-gate 806*0Sstevel@tonic-gate kseg.s_as = &kas; 807*0Sstevel@tonic-gate red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 808*0Sstevel@tonic-gate PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 809*0Sstevel@tonic-gate ASSERT(red_pp != NULL); 810*0Sstevel@tonic-gate 811*0Sstevel@tonic-gate /* 812*0Sstevel@tonic-gate * So we now have a page to jam into the redzone... 813*0Sstevel@tonic-gate */ 814*0Sstevel@tonic-gate page_io_unlock(red_pp); 815*0Sstevel@tonic-gate 816*0Sstevel@tonic-gate hat_memload(kas.a_hat, red_va, red_pp, 817*0Sstevel@tonic-gate (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 818*0Sstevel@tonic-gate page_downgrade(red_pp); 819*0Sstevel@tonic-gate 820*0Sstevel@tonic-gate /* 821*0Sstevel@tonic-gate * The page is left SE_SHARED locked so we can hold on to 822*0Sstevel@tonic-gate * the page_t pointer. 823*0Sstevel@tonic-gate */ 824*0Sstevel@tonic-gate curthread->t_red_pp = red_pp; 825*0Sstevel@tonic-gate 826*0Sstevel@tonic-gate atomic_add_32(&red_nmapped, 1); 827*0Sstevel@tonic-gate while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 828*0Sstevel@tonic-gate (void) cas32(&red_closest, red_closest, 829*0Sstevel@tonic-gate (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 830*0Sstevel@tonic-gate } 831*0Sstevel@tonic-gate return (1); 832*0Sstevel@tonic-gate } 833*0Sstevel@tonic-gate 834*0Sstevel@tonic-gate stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 835*0Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE); 836*0Sstevel@tonic-gate 837*0Sstevel@tonic-gate atomic_add_32(&red_ndoubles, 1); 838*0Sstevel@tonic-gate 839*0Sstevel@tonic-gate if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 840*0Sstevel@tonic-gate /* 841*0Sstevel@tonic-gate * Oh boy. We're already deep within the mapped-in 842*0Sstevel@tonic-gate * redzone page, and the caller is trying to prepare 843*0Sstevel@tonic-gate * for a deep stack run. We're running without a 844*0Sstevel@tonic-gate * redzone right now: if the caller plows off the 845*0Sstevel@tonic-gate * end of the stack, it'll plow another thread or 846*0Sstevel@tonic-gate * LWP structure. That situation could result in 847*0Sstevel@tonic-gate * a very hard-to-debug panic, so, in the spirit of 848*0Sstevel@tonic-gate * recording the name of one's killer in one's own 849*0Sstevel@tonic-gate * blood, we're going to record lbolt and the calling 850*0Sstevel@tonic-gate * thread. 851*0Sstevel@tonic-gate */ 852*0Sstevel@tonic-gate red_deep_lbolt = lbolt; 853*0Sstevel@tonic-gate red_deep_thread = curthread; 854*0Sstevel@tonic-gate } 855*0Sstevel@tonic-gate 856*0Sstevel@tonic-gate /* 857*0Sstevel@tonic-gate * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 858*0Sstevel@tonic-gate */ 859*0Sstevel@tonic-gate ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 860*0Sstevel@tonic-gate return (0); 861*0Sstevel@tonic-gate #endif /* _LP64 */ 862*0Sstevel@tonic-gate } 863*0Sstevel@tonic-gate 864*0Sstevel@tonic-gate void 865*0Sstevel@tonic-gate segkp_unmap_red(void) 866*0Sstevel@tonic-gate { 867*0Sstevel@tonic-gate page_t *pp; 868*0Sstevel@tonic-gate caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 869*0Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE); 870*0Sstevel@tonic-gate 871*0Sstevel@tonic-gate ASSERT(curthread->t_red_pp != NULL); 872*0Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 873*0Sstevel@tonic-gate 874*0Sstevel@tonic-gate /* 875*0Sstevel@tonic-gate * Because we locked the mapping down, we can't simply rely 876*0Sstevel@tonic-gate * on page_destroy() to clean everything up; we need to call 877*0Sstevel@tonic-gate * hat_unload() to explicitly unlock the mapping resources. 878*0Sstevel@tonic-gate */ 879*0Sstevel@tonic-gate hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 880*0Sstevel@tonic-gate 881*0Sstevel@tonic-gate pp = curthread->t_red_pp; 882*0Sstevel@tonic-gate 883*0Sstevel@tonic-gate ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 884*0Sstevel@tonic-gate 885*0Sstevel@tonic-gate /* 886*0Sstevel@tonic-gate * Need to upgrade the SE_SHARED lock to SE_EXCL. 887*0Sstevel@tonic-gate */ 888*0Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 889*0Sstevel@tonic-gate /* 890*0Sstevel@tonic-gate * As there is now wait for upgrade, release the 891*0Sstevel@tonic-gate * SE_SHARED lock and wait for SE_EXCL. 892*0Sstevel@tonic-gate */ 893*0Sstevel@tonic-gate page_unlock(pp); 894*0Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 895*0Sstevel@tonic-gate /* pp may be NULL here, hence the test below */ 896*0Sstevel@tonic-gate } 897*0Sstevel@tonic-gate 898*0Sstevel@tonic-gate /* 899*0Sstevel@tonic-gate * Destroy the page, with dontfree set to zero (i.e. free it). 900*0Sstevel@tonic-gate */ 901*0Sstevel@tonic-gate if (pp != NULL) 902*0Sstevel@tonic-gate page_destroy(pp, 0); 903*0Sstevel@tonic-gate curthread->t_red_pp = NULL; 904*0Sstevel@tonic-gate } 905*0Sstevel@tonic-gate #else 906*0Sstevel@tonic-gate #error Red stacks only supported with downwards stack growth. 907*0Sstevel@tonic-gate #endif 908*0Sstevel@tonic-gate 909*0Sstevel@tonic-gate /* 910*0Sstevel@tonic-gate * Handle a fault on an address corresponding to one of the 911*0Sstevel@tonic-gate * resources in the segkp segment. 912*0Sstevel@tonic-gate */ 913*0Sstevel@tonic-gate faultcode_t 914*0Sstevel@tonic-gate segkp_fault( 915*0Sstevel@tonic-gate struct hat *hat, 916*0Sstevel@tonic-gate struct seg *seg, 917*0Sstevel@tonic-gate caddr_t vaddr, 918*0Sstevel@tonic-gate size_t len, 919*0Sstevel@tonic-gate enum fault_type type, 920*0Sstevel@tonic-gate enum seg_rw rw) 921*0Sstevel@tonic-gate { 922*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 923*0Sstevel@tonic-gate int err; 924*0Sstevel@tonic-gate 925*0Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 926*0Sstevel@tonic-gate 927*0Sstevel@tonic-gate /* 928*0Sstevel@tonic-gate * Sanity checks. 929*0Sstevel@tonic-gate */ 930*0Sstevel@tonic-gate if (type == F_PROT) { 931*0Sstevel@tonic-gate panic("segkp_fault: unexpected F_PROT fault"); 932*0Sstevel@tonic-gate /*NOTREACHED*/ 933*0Sstevel@tonic-gate } 934*0Sstevel@tonic-gate 935*0Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) 936*0Sstevel@tonic-gate return (FC_NOMAP); 937*0Sstevel@tonic-gate 938*0Sstevel@tonic-gate mutex_enter(&kpd->kp_lock); 939*0Sstevel@tonic-gate 940*0Sstevel@tonic-gate if (type == F_SOFTLOCK) { 941*0Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 942*0Sstevel@tonic-gate /* 943*0Sstevel@tonic-gate * The F_SOFTLOCK case has more stringent 944*0Sstevel@tonic-gate * range requirements: the given range must exactly coincide 945*0Sstevel@tonic-gate * with the resource's mapped portion. Note reference to 946*0Sstevel@tonic-gate * redzone is handled since vaddr would not equal base 947*0Sstevel@tonic-gate */ 948*0Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 949*0Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 950*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 951*0Sstevel@tonic-gate return (FC_MAKE_ERR(EFAULT)); 952*0Sstevel@tonic-gate } 953*0Sstevel@tonic-gate 954*0Sstevel@tonic-gate if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 955*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 956*0Sstevel@tonic-gate return (FC_MAKE_ERR(err)); 957*0Sstevel@tonic-gate } 958*0Sstevel@tonic-gate kpd->kp_flags |= KPD_LOCKED; 959*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 960*0Sstevel@tonic-gate return (0); 961*0Sstevel@tonic-gate } 962*0Sstevel@tonic-gate 963*0Sstevel@tonic-gate if (type == F_INVAL) { 964*0Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 965*0Sstevel@tonic-gate 966*0Sstevel@tonic-gate /* 967*0Sstevel@tonic-gate * Check if we touched the redzone. Somewhat optimistic 968*0Sstevel@tonic-gate * here if we are touching the redzone of our own stack 969*0Sstevel@tonic-gate * since we wouldn't have a stack to get this far... 970*0Sstevel@tonic-gate */ 971*0Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASREDZONE) && 972*0Sstevel@tonic-gate btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 973*0Sstevel@tonic-gate panic("segkp_fault: accessing redzone"); 974*0Sstevel@tonic-gate 975*0Sstevel@tonic-gate /* 976*0Sstevel@tonic-gate * This fault may occur while the page is being F_SOFTLOCK'ed. 977*0Sstevel@tonic-gate * Return since a 2nd segkp_load is unnecessary and also would 978*0Sstevel@tonic-gate * result in the page being locked twice and eventually 979*0Sstevel@tonic-gate * hang the thread_reaper thread. 980*0Sstevel@tonic-gate */ 981*0Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 982*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 983*0Sstevel@tonic-gate return (0); 984*0Sstevel@tonic-gate } 985*0Sstevel@tonic-gate 986*0Sstevel@tonic-gate err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 987*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 988*0Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0); 989*0Sstevel@tonic-gate } 990*0Sstevel@tonic-gate 991*0Sstevel@tonic-gate if (type == F_SOFTUNLOCK) { 992*0Sstevel@tonic-gate uint_t flags; 993*0Sstevel@tonic-gate 994*0Sstevel@tonic-gate /* 995*0Sstevel@tonic-gate * Make sure the addr is LOCKED and it has anon backing 996*0Sstevel@tonic-gate * before unlocking 997*0Sstevel@tonic-gate */ 998*0Sstevel@tonic-gate if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) { 999*0Sstevel@tonic-gate panic("segkp_fault: bad unlock"); 1000*0Sstevel@tonic-gate /*NOTREACHED*/ 1001*0Sstevel@tonic-gate } 1002*0Sstevel@tonic-gate 1003*0Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 1004*0Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 1005*0Sstevel@tonic-gate panic("segkp_fault: bad range"); 1006*0Sstevel@tonic-gate /*NOTREACHED*/ 1007*0Sstevel@tonic-gate } 1008*0Sstevel@tonic-gate 1009*0Sstevel@tonic-gate if (rw == S_WRITE) 1010*0Sstevel@tonic-gate flags = kpd->kp_flags | KPD_WRITEDIRTY; 1011*0Sstevel@tonic-gate else 1012*0Sstevel@tonic-gate flags = kpd->kp_flags; 1013*0Sstevel@tonic-gate err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 1014*0Sstevel@tonic-gate kpd->kp_flags &= ~KPD_LOCKED; 1015*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 1016*0Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0); 1017*0Sstevel@tonic-gate } 1018*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 1019*0Sstevel@tonic-gate panic("segkp_fault: bogus fault type: %d\n", type); 1020*0Sstevel@tonic-gate /*NOTREACHED*/ 1021*0Sstevel@tonic-gate } 1022*0Sstevel@tonic-gate 1023*0Sstevel@tonic-gate /* 1024*0Sstevel@tonic-gate * Check that the given protections suffice over the range specified by 1025*0Sstevel@tonic-gate * vaddr and len. For this segment type, the only issue is whether or 1026*0Sstevel@tonic-gate * not the range lies completely within the mapped part of an allocated 1027*0Sstevel@tonic-gate * resource. 1028*0Sstevel@tonic-gate */ 1029*0Sstevel@tonic-gate /* ARGSUSED */ 1030*0Sstevel@tonic-gate static int 1031*0Sstevel@tonic-gate segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1032*0Sstevel@tonic-gate { 1033*0Sstevel@tonic-gate struct segkp_data *kpd = NULL; 1034*0Sstevel@tonic-gate caddr_t mbase; 1035*0Sstevel@tonic-gate size_t mlen; 1036*0Sstevel@tonic-gate 1037*0Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) 1038*0Sstevel@tonic-gate return (EACCES); 1039*0Sstevel@tonic-gate 1040*0Sstevel@tonic-gate mutex_enter(&kpd->kp_lock); 1041*0Sstevel@tonic-gate mbase = stom(kpd->kp_base, kpd->kp_flags); 1042*0Sstevel@tonic-gate mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1043*0Sstevel@tonic-gate if (len > mlen || vaddr < mbase || 1044*0Sstevel@tonic-gate ((vaddr + len) > (mbase + mlen))) { 1045*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 1046*0Sstevel@tonic-gate return (EACCES); 1047*0Sstevel@tonic-gate } 1048*0Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 1049*0Sstevel@tonic-gate return (0); 1050*0Sstevel@tonic-gate } 1051*0Sstevel@tonic-gate 1052*0Sstevel@tonic-gate 1053*0Sstevel@tonic-gate /* 1054*0Sstevel@tonic-gate * Check to see if it makes sense to do kluster/read ahead to 1055*0Sstevel@tonic-gate * addr + delta relative to the mapping at addr. We assume here 1056*0Sstevel@tonic-gate * that delta is a signed PAGESIZE'd multiple (which can be negative). 1057*0Sstevel@tonic-gate * 1058*0Sstevel@tonic-gate * For seg_u we always "approve" of this action from our standpoint. 1059*0Sstevel@tonic-gate */ 1060*0Sstevel@tonic-gate /*ARGSUSED*/ 1061*0Sstevel@tonic-gate static int 1062*0Sstevel@tonic-gate segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1063*0Sstevel@tonic-gate { 1064*0Sstevel@tonic-gate return (0); 1065*0Sstevel@tonic-gate } 1066*0Sstevel@tonic-gate 1067*0Sstevel@tonic-gate /* 1068*0Sstevel@tonic-gate * Load and possibly lock intra-slot resources in the range given by 1069*0Sstevel@tonic-gate * vaddr and len. 1070*0Sstevel@tonic-gate */ 1071*0Sstevel@tonic-gate static int 1072*0Sstevel@tonic-gate segkp_load( 1073*0Sstevel@tonic-gate struct hat *hat, 1074*0Sstevel@tonic-gate struct seg *seg, 1075*0Sstevel@tonic-gate caddr_t vaddr, 1076*0Sstevel@tonic-gate size_t len, 1077*0Sstevel@tonic-gate struct segkp_data *kpd, 1078*0Sstevel@tonic-gate uint_t flags) 1079*0Sstevel@tonic-gate { 1080*0Sstevel@tonic-gate caddr_t va; 1081*0Sstevel@tonic-gate caddr_t vlim; 1082*0Sstevel@tonic-gate ulong_t i; 1083*0Sstevel@tonic-gate uint_t lock; 1084*0Sstevel@tonic-gate 1085*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1086*0Sstevel@tonic-gate 1087*0Sstevel@tonic-gate len = P2ROUNDUP(len, PAGESIZE); 1088*0Sstevel@tonic-gate 1089*0Sstevel@tonic-gate /* If locking, reserve physical memory */ 1090*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 1091*0Sstevel@tonic-gate pgcnt_t pages = btop(len); 1092*0Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1093*0Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages); 1094*0Sstevel@tonic-gate (void) page_resv(pages, KM_SLEEP); 1095*0Sstevel@tonic-gate } 1096*0Sstevel@tonic-gate 1097*0Sstevel@tonic-gate /* 1098*0Sstevel@tonic-gate * Loop through the pages in the given range. 1099*0Sstevel@tonic-gate */ 1100*0Sstevel@tonic-gate va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1101*0Sstevel@tonic-gate vaddr = va; 1102*0Sstevel@tonic-gate vlim = va + len; 1103*0Sstevel@tonic-gate lock = flags & KPD_LOCKED; 1104*0Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1105*0Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) { 1106*0Sstevel@tonic-gate page_t *pl[2]; /* second element NULL terminator */ 1107*0Sstevel@tonic-gate struct vnode *vp; 1108*0Sstevel@tonic-gate anoff_t off; 1109*0Sstevel@tonic-gate int err; 1110*0Sstevel@tonic-gate struct anon *ap; 1111*0Sstevel@tonic-gate 1112*0Sstevel@tonic-gate /* 1113*0Sstevel@tonic-gate * Summon the page. If it's not resident, arrange 1114*0Sstevel@tonic-gate * for synchronous i/o to pull it in. 1115*0Sstevel@tonic-gate */ 1116*0Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1117*0Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 1118*0Sstevel@tonic-gate 1119*0Sstevel@tonic-gate /* 1120*0Sstevel@tonic-gate * The returned page list will have exactly one entry, 1121*0Sstevel@tonic-gate * which is returned to us already kept. 1122*0Sstevel@tonic-gate */ 1123*0Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1124*0Sstevel@tonic-gate pl, PAGESIZE, seg, va, S_READ, kcred); 1125*0Sstevel@tonic-gate 1126*0Sstevel@tonic-gate if (err) { 1127*0Sstevel@tonic-gate /* 1128*0Sstevel@tonic-gate * Back out of what we've done so far. 1129*0Sstevel@tonic-gate */ 1130*0Sstevel@tonic-gate (void) segkp_unlock(hat, seg, vaddr, 1131*0Sstevel@tonic-gate (va - vaddr), kpd, flags); 1132*0Sstevel@tonic-gate return (err); 1133*0Sstevel@tonic-gate } 1134*0Sstevel@tonic-gate 1135*0Sstevel@tonic-gate /* 1136*0Sstevel@tonic-gate * Load an MMU translation for the page. 1137*0Sstevel@tonic-gate */ 1138*0Sstevel@tonic-gate hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1139*0Sstevel@tonic-gate lock ? HAT_LOAD_LOCK : HAT_LOAD); 1140*0Sstevel@tonic-gate 1141*0Sstevel@tonic-gate if (!lock) { 1142*0Sstevel@tonic-gate /* 1143*0Sstevel@tonic-gate * Now, release "shared" lock on the page. 1144*0Sstevel@tonic-gate */ 1145*0Sstevel@tonic-gate page_unlock(pl[0]); 1146*0Sstevel@tonic-gate } 1147*0Sstevel@tonic-gate } 1148*0Sstevel@tonic-gate return (0); 1149*0Sstevel@tonic-gate } 1150*0Sstevel@tonic-gate 1151*0Sstevel@tonic-gate /* 1152*0Sstevel@tonic-gate * At the very least unload the mmu-translations and unlock the range if locked 1153*0Sstevel@tonic-gate * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1154*0Sstevel@tonic-gate * any dirty pages should be written to disk. 1155*0Sstevel@tonic-gate */ 1156*0Sstevel@tonic-gate static int 1157*0Sstevel@tonic-gate segkp_unlock( 1158*0Sstevel@tonic-gate struct hat *hat, 1159*0Sstevel@tonic-gate struct seg *seg, 1160*0Sstevel@tonic-gate caddr_t vaddr, 1161*0Sstevel@tonic-gate size_t len, 1162*0Sstevel@tonic-gate struct segkp_data *kpd, 1163*0Sstevel@tonic-gate uint_t flags) 1164*0Sstevel@tonic-gate { 1165*0Sstevel@tonic-gate caddr_t va; 1166*0Sstevel@tonic-gate caddr_t vlim; 1167*0Sstevel@tonic-gate ulong_t i; 1168*0Sstevel@tonic-gate struct page *pp; 1169*0Sstevel@tonic-gate struct vnode *vp; 1170*0Sstevel@tonic-gate anoff_t off; 1171*0Sstevel@tonic-gate struct anon *ap; 1172*0Sstevel@tonic-gate 1173*0Sstevel@tonic-gate #ifdef lint 1174*0Sstevel@tonic-gate seg = seg; 1175*0Sstevel@tonic-gate #endif /* lint */ 1176*0Sstevel@tonic-gate 1177*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1178*0Sstevel@tonic-gate 1179*0Sstevel@tonic-gate /* 1180*0Sstevel@tonic-gate * Loop through the pages in the given range. It is assumed 1181*0Sstevel@tonic-gate * segkp_unlock is called with page aligned base 1182*0Sstevel@tonic-gate */ 1183*0Sstevel@tonic-gate va = vaddr; 1184*0Sstevel@tonic-gate vlim = va + len; 1185*0Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1186*0Sstevel@tonic-gate hat_unload(hat, va, len, 1187*0Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1188*0Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) { 1189*0Sstevel@tonic-gate /* 1190*0Sstevel@tonic-gate * Find the page associated with this part of the 1191*0Sstevel@tonic-gate * slot, tracking it down through its associated swap 1192*0Sstevel@tonic-gate * space. 1193*0Sstevel@tonic-gate */ 1194*0Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1195*0Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 1196*0Sstevel@tonic-gate 1197*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 1198*0Sstevel@tonic-gate if ((pp = page_find(vp, off)) == NULL) { 1199*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 1200*0Sstevel@tonic-gate panic("segkp_softunlock: missing page"); 1201*0Sstevel@tonic-gate /*NOTREACHED*/ 1202*0Sstevel@tonic-gate } 1203*0Sstevel@tonic-gate } 1204*0Sstevel@tonic-gate } else { 1205*0Sstevel@tonic-gate /* 1206*0Sstevel@tonic-gate * Nothing to do if the slot is not locked and the 1207*0Sstevel@tonic-gate * page doesn't exist. 1208*0Sstevel@tonic-gate */ 1209*0Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1210*0Sstevel@tonic-gate continue; 1211*0Sstevel@tonic-gate } 1212*0Sstevel@tonic-gate 1213*0Sstevel@tonic-gate /* 1214*0Sstevel@tonic-gate * If the page doesn't have any translations, is 1215*0Sstevel@tonic-gate * dirty and not being shared, then push it out 1216*0Sstevel@tonic-gate * asynchronously and avoid waiting for the 1217*0Sstevel@tonic-gate * pageout daemon to do it for us. 1218*0Sstevel@tonic-gate * 1219*0Sstevel@tonic-gate * XXX - Do we really need to get the "exclusive" 1220*0Sstevel@tonic-gate * lock via an upgrade? 1221*0Sstevel@tonic-gate */ 1222*0Sstevel@tonic-gate if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1223*0Sstevel@tonic-gate hat_ismod(pp) && page_tryupgrade(pp)) { 1224*0Sstevel@tonic-gate /* 1225*0Sstevel@tonic-gate * Hold the vnode before releasing the page lock to 1226*0Sstevel@tonic-gate * prevent it from being freed and re-used by some 1227*0Sstevel@tonic-gate * other thread. 1228*0Sstevel@tonic-gate */ 1229*0Sstevel@tonic-gate VN_HOLD(vp); 1230*0Sstevel@tonic-gate page_unlock(pp); 1231*0Sstevel@tonic-gate 1232*0Sstevel@tonic-gate /* 1233*0Sstevel@tonic-gate * Want most powerful credentials we can get so 1234*0Sstevel@tonic-gate * use kcred. 1235*0Sstevel@tonic-gate */ 1236*0Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1237*0Sstevel@tonic-gate B_ASYNC | B_FREE, kcred); 1238*0Sstevel@tonic-gate VN_RELE(vp); 1239*0Sstevel@tonic-gate } else { 1240*0Sstevel@tonic-gate page_unlock(pp); 1241*0Sstevel@tonic-gate } 1242*0Sstevel@tonic-gate } 1243*0Sstevel@tonic-gate 1244*0Sstevel@tonic-gate /* If unlocking, release physical memory */ 1245*0Sstevel@tonic-gate if (flags & KPD_LOCKED) { 1246*0Sstevel@tonic-gate pgcnt_t pages = btopr(len); 1247*0Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1248*0Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages); 1249*0Sstevel@tonic-gate page_unresv(pages); 1250*0Sstevel@tonic-gate } 1251*0Sstevel@tonic-gate return (0); 1252*0Sstevel@tonic-gate } 1253*0Sstevel@tonic-gate 1254*0Sstevel@tonic-gate /* 1255*0Sstevel@tonic-gate * Insert the kpd in the hash table. 1256*0Sstevel@tonic-gate */ 1257*0Sstevel@tonic-gate static void 1258*0Sstevel@tonic-gate segkp_insert(struct seg *seg, struct segkp_data *kpd) 1259*0Sstevel@tonic-gate { 1260*0Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1261*0Sstevel@tonic-gate int index; 1262*0Sstevel@tonic-gate 1263*0Sstevel@tonic-gate /* 1264*0Sstevel@tonic-gate * Insert the kpd based on the address that will be returned 1265*0Sstevel@tonic-gate * via segkp_release. 1266*0Sstevel@tonic-gate */ 1267*0Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1268*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 1269*0Sstevel@tonic-gate kpd->kp_next = kpsd->kpsd_hash[index]; 1270*0Sstevel@tonic-gate kpsd->kpsd_hash[index] = kpd; 1271*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 1272*0Sstevel@tonic-gate } 1273*0Sstevel@tonic-gate 1274*0Sstevel@tonic-gate /* 1275*0Sstevel@tonic-gate * Remove kpd from the hash table. 1276*0Sstevel@tonic-gate */ 1277*0Sstevel@tonic-gate static void 1278*0Sstevel@tonic-gate segkp_delete(struct seg *seg, struct segkp_data *kpd) 1279*0Sstevel@tonic-gate { 1280*0Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1281*0Sstevel@tonic-gate struct segkp_data **kpp; 1282*0Sstevel@tonic-gate int index; 1283*0Sstevel@tonic-gate 1284*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&segkp_lock)); 1285*0Sstevel@tonic-gate 1286*0Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1287*0Sstevel@tonic-gate for (kpp = &kpsd->kpsd_hash[index]; 1288*0Sstevel@tonic-gate *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1289*0Sstevel@tonic-gate if (*kpp == kpd) { 1290*0Sstevel@tonic-gate *kpp = kpd->kp_next; 1291*0Sstevel@tonic-gate return; 1292*0Sstevel@tonic-gate } 1293*0Sstevel@tonic-gate } 1294*0Sstevel@tonic-gate panic("segkp_delete: unable to find element to delete"); 1295*0Sstevel@tonic-gate /*NOTREACHED*/ 1296*0Sstevel@tonic-gate } 1297*0Sstevel@tonic-gate 1298*0Sstevel@tonic-gate /* 1299*0Sstevel@tonic-gate * Find the kpd associated with a vaddr. 1300*0Sstevel@tonic-gate * 1301*0Sstevel@tonic-gate * Most of the callers of segkp_find will pass the vaddr that 1302*0Sstevel@tonic-gate * hashes to the desired index, but there are cases where 1303*0Sstevel@tonic-gate * this is not true in which case we have to (potentially) scan 1304*0Sstevel@tonic-gate * the whole table looking for it. This should be very rare 1305*0Sstevel@tonic-gate * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1306*0Sstevel@tonic-gate * middle of the segkp_data region). 1307*0Sstevel@tonic-gate */ 1308*0Sstevel@tonic-gate static struct segkp_data * 1309*0Sstevel@tonic-gate segkp_find(struct seg *seg, caddr_t vaddr) 1310*0Sstevel@tonic-gate { 1311*0Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1312*0Sstevel@tonic-gate struct segkp_data *kpd; 1313*0Sstevel@tonic-gate int i; 1314*0Sstevel@tonic-gate int stop; 1315*0Sstevel@tonic-gate 1316*0Sstevel@tonic-gate i = stop = SEGKP_HASH(vaddr); 1317*0Sstevel@tonic-gate mutex_enter(&segkp_lock); 1318*0Sstevel@tonic-gate do { 1319*0Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1320*0Sstevel@tonic-gate kpd = kpd->kp_next) { 1321*0Sstevel@tonic-gate if (vaddr >= kpd->kp_base && 1322*0Sstevel@tonic-gate vaddr < kpd->kp_base + kpd->kp_len) { 1323*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 1324*0Sstevel@tonic-gate return (kpd); 1325*0Sstevel@tonic-gate } 1326*0Sstevel@tonic-gate } 1327*0Sstevel@tonic-gate if (--i < 0) 1328*0Sstevel@tonic-gate i = SEGKP_HASHSZ - 1; /* Wrap */ 1329*0Sstevel@tonic-gate } while (i != stop); 1330*0Sstevel@tonic-gate mutex_exit(&segkp_lock); 1331*0Sstevel@tonic-gate return (NULL); /* Not found */ 1332*0Sstevel@tonic-gate } 1333*0Sstevel@tonic-gate 1334*0Sstevel@tonic-gate /* 1335*0Sstevel@tonic-gate * returns size of swappable area. 1336*0Sstevel@tonic-gate */ 1337*0Sstevel@tonic-gate size_t 1338*0Sstevel@tonic-gate swapsize(caddr_t v) 1339*0Sstevel@tonic-gate { 1340*0Sstevel@tonic-gate struct segkp_data *kpd; 1341*0Sstevel@tonic-gate 1342*0Sstevel@tonic-gate if ((kpd = segkp_find(segkp, v)) != NULL) 1343*0Sstevel@tonic-gate return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1344*0Sstevel@tonic-gate else 1345*0Sstevel@tonic-gate return (NULL); 1346*0Sstevel@tonic-gate } 1347*0Sstevel@tonic-gate 1348*0Sstevel@tonic-gate /* 1349*0Sstevel@tonic-gate * Dump out all the active segkp pages 1350*0Sstevel@tonic-gate */ 1351*0Sstevel@tonic-gate static void 1352*0Sstevel@tonic-gate segkp_dump(struct seg *seg) 1353*0Sstevel@tonic-gate { 1354*0Sstevel@tonic-gate int i; 1355*0Sstevel@tonic-gate struct segkp_data *kpd; 1356*0Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1357*0Sstevel@tonic-gate 1358*0Sstevel@tonic-gate for (i = 0; i < SEGKP_HASHSZ; i++) { 1359*0Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i]; 1360*0Sstevel@tonic-gate kpd != NULL; kpd = kpd->kp_next) { 1361*0Sstevel@tonic-gate pfn_t pfn; 1362*0Sstevel@tonic-gate caddr_t addr; 1363*0Sstevel@tonic-gate caddr_t eaddr; 1364*0Sstevel@tonic-gate 1365*0Sstevel@tonic-gate addr = kpd->kp_base; 1366*0Sstevel@tonic-gate eaddr = addr + kpd->kp_len; 1367*0Sstevel@tonic-gate while (addr < eaddr) { 1368*0Sstevel@tonic-gate ASSERT(seg->s_as == &kas); 1369*0Sstevel@tonic-gate pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1370*0Sstevel@tonic-gate if (pfn != PFN_INVALID) 1371*0Sstevel@tonic-gate dump_addpage(seg->s_as, addr, pfn); 1372*0Sstevel@tonic-gate addr += PAGESIZE; 1373*0Sstevel@tonic-gate dump_timeleft = dump_timeout; 1374*0Sstevel@tonic-gate } 1375*0Sstevel@tonic-gate } 1376*0Sstevel@tonic-gate } 1377*0Sstevel@tonic-gate } 1378*0Sstevel@tonic-gate 1379*0Sstevel@tonic-gate /*ARGSUSED*/ 1380*0Sstevel@tonic-gate static int 1381*0Sstevel@tonic-gate segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1382*0Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 1383*0Sstevel@tonic-gate { 1384*0Sstevel@tonic-gate return (ENOTSUP); 1385*0Sstevel@tonic-gate } 1386*0Sstevel@tonic-gate 1387*0Sstevel@tonic-gate /*ARGSUSED*/ 1388*0Sstevel@tonic-gate static int 1389*0Sstevel@tonic-gate segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1390*0Sstevel@tonic-gate { 1391*0Sstevel@tonic-gate return (ENODEV); 1392*0Sstevel@tonic-gate } 1393*0Sstevel@tonic-gate 1394*0Sstevel@tonic-gate /*ARGSUSED*/ 1395*0Sstevel@tonic-gate static lgrp_mem_policy_info_t * 1396*0Sstevel@tonic-gate segkp_getpolicy(struct seg *seg, caddr_t addr) 1397*0Sstevel@tonic-gate { 1398*0Sstevel@tonic-gate return (NULL); 1399*0Sstevel@tonic-gate } 1400*0Sstevel@tonic-gate 1401*0Sstevel@tonic-gate #include <sys/mem_config.h> 1402*0Sstevel@tonic-gate 1403*0Sstevel@tonic-gate /*ARGSUSED*/ 1404*0Sstevel@tonic-gate static void 1405*0Sstevel@tonic-gate segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1406*0Sstevel@tonic-gate {} 1407*0Sstevel@tonic-gate 1408*0Sstevel@tonic-gate /* 1409*0Sstevel@tonic-gate * During memory delete, turn off caches so that pages are not held. 1410*0Sstevel@tonic-gate * A better solution may be to unlock the pages while they are 1411*0Sstevel@tonic-gate * in the cache so that they may be collected naturally. 1412*0Sstevel@tonic-gate */ 1413*0Sstevel@tonic-gate 1414*0Sstevel@tonic-gate /*ARGSUSED*/ 1415*0Sstevel@tonic-gate static int 1416*0Sstevel@tonic-gate segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1417*0Sstevel@tonic-gate { 1418*0Sstevel@tonic-gate atomic_add_32(&segkp_indel, 1); 1419*0Sstevel@tonic-gate segkp_cache_free(); 1420*0Sstevel@tonic-gate return (0); 1421*0Sstevel@tonic-gate } 1422*0Sstevel@tonic-gate 1423*0Sstevel@tonic-gate /*ARGSUSED*/ 1424*0Sstevel@tonic-gate static void 1425*0Sstevel@tonic-gate segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1426*0Sstevel@tonic-gate { 1427*0Sstevel@tonic-gate atomic_add_32(&segkp_indel, -1); 1428*0Sstevel@tonic-gate } 1429*0Sstevel@tonic-gate 1430*0Sstevel@tonic-gate static kphysm_setup_vector_t segkp_mem_config_vec = { 1431*0Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION, 1432*0Sstevel@tonic-gate segkp_mem_config_post_add, 1433*0Sstevel@tonic-gate segkp_mem_config_pre_del, 1434*0Sstevel@tonic-gate segkp_mem_config_post_del, 1435*0Sstevel@tonic-gate }; 1436*0Sstevel@tonic-gate 1437*0Sstevel@tonic-gate static void 1438*0Sstevel@tonic-gate segkpinit_mem_config(struct seg *seg) 1439*0Sstevel@tonic-gate { 1440*0Sstevel@tonic-gate int ret; 1441*0Sstevel@tonic-gate 1442*0Sstevel@tonic-gate ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1443*0Sstevel@tonic-gate ASSERT(ret == 0); 1444*0Sstevel@tonic-gate } 1445