10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53247Sgjelinek * Common Development and Distribution License (the "License").
63247Sgjelinek * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*12117SStan.Studzinski@Sun.COM * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
240Sstevel@tonic-gate
250Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
260Sstevel@tonic-gate /* All Rights Reserved */
270Sstevel@tonic-gate
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
300Sstevel@tonic-gate * under license from the Regents of the University of California.
310Sstevel@tonic-gate */
320Sstevel@tonic-gate
330Sstevel@tonic-gate /*
340Sstevel@tonic-gate * segkp is a segment driver that administers the allocation and deallocation
350Sstevel@tonic-gate * of pageable variable size chunks of kernel virtual address space. Each
360Sstevel@tonic-gate * allocated resource is page-aligned.
370Sstevel@tonic-gate *
380Sstevel@tonic-gate * The user may specify whether the resource should be initialized to 0,
390Sstevel@tonic-gate * include a redzone, or locked in memory.
400Sstevel@tonic-gate */
410Sstevel@tonic-gate
420Sstevel@tonic-gate #include <sys/types.h>
430Sstevel@tonic-gate #include <sys/t_lock.h>
440Sstevel@tonic-gate #include <sys/thread.h>
450Sstevel@tonic-gate #include <sys/param.h>
460Sstevel@tonic-gate #include <sys/errno.h>
470Sstevel@tonic-gate #include <sys/sysmacros.h>
480Sstevel@tonic-gate #include <sys/systm.h>
490Sstevel@tonic-gate #include <sys/buf.h>
500Sstevel@tonic-gate #include <sys/mman.h>
510Sstevel@tonic-gate #include <sys/vnode.h>
520Sstevel@tonic-gate #include <sys/cmn_err.h>
530Sstevel@tonic-gate #include <sys/swap.h>
540Sstevel@tonic-gate #include <sys/tuneable.h>
550Sstevel@tonic-gate #include <sys/kmem.h>
560Sstevel@tonic-gate #include <sys/vmem.h>
570Sstevel@tonic-gate #include <sys/cred.h>
580Sstevel@tonic-gate #include <sys/dumphdr.h>
590Sstevel@tonic-gate #include <sys/debug.h>
600Sstevel@tonic-gate #include <sys/vtrace.h>
610Sstevel@tonic-gate #include <sys/stack.h>
620Sstevel@tonic-gate #include <sys/atomic.h>
630Sstevel@tonic-gate #include <sys/archsystm.h>
640Sstevel@tonic-gate #include <sys/lgrp.h>
650Sstevel@tonic-gate
660Sstevel@tonic-gate #include <vm/as.h>
670Sstevel@tonic-gate #include <vm/seg.h>
680Sstevel@tonic-gate #include <vm/seg_kp.h>
690Sstevel@tonic-gate #include <vm/seg_kmem.h>
700Sstevel@tonic-gate #include <vm/anon.h>
710Sstevel@tonic-gate #include <vm/page.h>
720Sstevel@tonic-gate #include <vm/hat.h>
730Sstevel@tonic-gate #include <sys/bitmap.h>
740Sstevel@tonic-gate
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate * Private seg op routines
770Sstevel@tonic-gate */
780Sstevel@tonic-gate static void segkp_badop(void);
790Sstevel@tonic-gate static void segkp_dump(struct seg *seg);
800Sstevel@tonic-gate static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
810Sstevel@tonic-gate uint_t prot);
820Sstevel@tonic-gate static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
830Sstevel@tonic-gate static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
840Sstevel@tonic-gate struct page ***page, enum lock_type type,
850Sstevel@tonic-gate enum seg_rw rw);
860Sstevel@tonic-gate static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
870Sstevel@tonic-gate static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
880Sstevel@tonic-gate static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
890Sstevel@tonic-gate struct segkp_data **tkpd, struct anon_map *amp);
900Sstevel@tonic-gate static void segkp_release_internal(struct seg *seg,
910Sstevel@tonic-gate struct segkp_data *kpd, size_t len);
920Sstevel@tonic-gate static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
930Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags);
940Sstevel@tonic-gate static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
950Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags);
960Sstevel@tonic-gate static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
970Sstevel@tonic-gate static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
980Sstevel@tonic-gate static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
990Sstevel@tonic-gate caddr_t addr);
100670Selowe static int segkp_capable(struct seg *seg, segcapability_t capability);
1010Sstevel@tonic-gate
1020Sstevel@tonic-gate /*
1030Sstevel@tonic-gate * Lock used to protect the hash table(s) and caches.
1040Sstevel@tonic-gate */
1050Sstevel@tonic-gate static kmutex_t segkp_lock;
1060Sstevel@tonic-gate
1070Sstevel@tonic-gate /*
1080Sstevel@tonic-gate * The segkp caches
1090Sstevel@tonic-gate */
1100Sstevel@tonic-gate static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
1110Sstevel@tonic-gate
1120Sstevel@tonic-gate #define SEGKP_BADOP(t) (t(*)())segkp_badop
1130Sstevel@tonic-gate
1140Sstevel@tonic-gate /*
1150Sstevel@tonic-gate * When there are fewer than red_minavail bytes left on the stack,
1160Sstevel@tonic-gate * segkp_map_red() will map in the redzone (if called). 5000 seems
1170Sstevel@tonic-gate * to work reasonably well...
1180Sstevel@tonic-gate */
1190Sstevel@tonic-gate long red_minavail = 5000;
1200Sstevel@tonic-gate
1210Sstevel@tonic-gate /*
1220Sstevel@tonic-gate * will be set to 1 for 32 bit x86 systems only, in startup.c
1230Sstevel@tonic-gate */
1240Sstevel@tonic-gate int segkp_fromheap = 0;
1250Sstevel@tonic-gate ulong_t *segkp_bitmap;
1260Sstevel@tonic-gate
1270Sstevel@tonic-gate /*
1280Sstevel@tonic-gate * If segkp_map_red() is called with the redzone already mapped and
1290Sstevel@tonic-gate * with less than RED_DEEP_THRESHOLD bytes available on the stack,
1300Sstevel@tonic-gate * then the stack situation has become quite serious; if much more stack
1310Sstevel@tonic-gate * is consumed, we have the potential of scrogging the next thread/LWP
1320Sstevel@tonic-gate * structure. To help debug the "can't happen" panics which may
13311066Srafael.vanoni@sun.com * result from this condition, we record hrestime and the calling thread
13411066Srafael.vanoni@sun.com * in red_deep_hires and red_deep_thread respectively.
1350Sstevel@tonic-gate */
1360Sstevel@tonic-gate #define RED_DEEP_THRESHOLD 2000
1370Sstevel@tonic-gate
13811066Srafael.vanoni@sun.com hrtime_t red_deep_hires;
1390Sstevel@tonic-gate kthread_t *red_deep_thread;
1400Sstevel@tonic-gate
1410Sstevel@tonic-gate uint32_t red_nmapped;
1420Sstevel@tonic-gate uint32_t red_closest = UINT_MAX;
1430Sstevel@tonic-gate uint32_t red_ndoubles;
1440Sstevel@tonic-gate
1450Sstevel@tonic-gate pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
1463247Sgjelinek pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
1470Sstevel@tonic-gate
1480Sstevel@tonic-gate static struct seg_ops segkp_ops = {
1490Sstevel@tonic-gate SEGKP_BADOP(int), /* dup */
1500Sstevel@tonic-gate SEGKP_BADOP(int), /* unmap */
1510Sstevel@tonic-gate SEGKP_BADOP(void), /* free */
1520Sstevel@tonic-gate segkp_fault,
1530Sstevel@tonic-gate SEGKP_BADOP(faultcode_t), /* faulta */
1540Sstevel@tonic-gate SEGKP_BADOP(int), /* setprot */
1550Sstevel@tonic-gate segkp_checkprot,
1560Sstevel@tonic-gate segkp_kluster,
1570Sstevel@tonic-gate SEGKP_BADOP(size_t), /* swapout */
1580Sstevel@tonic-gate SEGKP_BADOP(int), /* sync */
1590Sstevel@tonic-gate SEGKP_BADOP(size_t), /* incore */
1600Sstevel@tonic-gate SEGKP_BADOP(int), /* lockop */
1610Sstevel@tonic-gate SEGKP_BADOP(int), /* getprot */
1620Sstevel@tonic-gate SEGKP_BADOP(u_offset_t), /* getoffset */
1630Sstevel@tonic-gate SEGKP_BADOP(int), /* gettype */
1640Sstevel@tonic-gate SEGKP_BADOP(int), /* getvp */
1650Sstevel@tonic-gate SEGKP_BADOP(int), /* advise */
1660Sstevel@tonic-gate segkp_dump, /* dump */
1670Sstevel@tonic-gate segkp_pagelock, /* pagelock */
1680Sstevel@tonic-gate SEGKP_BADOP(int), /* setpgsz */
1690Sstevel@tonic-gate segkp_getmemid, /* getmemid */
1700Sstevel@tonic-gate segkp_getpolicy, /* getpolicy */
171670Selowe segkp_capable, /* capable */
1720Sstevel@tonic-gate };
1730Sstevel@tonic-gate
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate static void
segkp_badop(void)1760Sstevel@tonic-gate segkp_badop(void)
1770Sstevel@tonic-gate {
1780Sstevel@tonic-gate panic("segkp_badop");
1790Sstevel@tonic-gate /*NOTREACHED*/
1800Sstevel@tonic-gate }
1810Sstevel@tonic-gate
1820Sstevel@tonic-gate static void segkpinit_mem_config(struct seg *);
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate static uint32_t segkp_indel;
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate /*
1870Sstevel@tonic-gate * Allocate the segment specific private data struct and fill it in
1880Sstevel@tonic-gate * with the per kp segment mutex, anon ptr. array and hash table.
1890Sstevel@tonic-gate */
1900Sstevel@tonic-gate int
segkp_create(struct seg * seg)1910Sstevel@tonic-gate segkp_create(struct seg *seg)
1920Sstevel@tonic-gate {
1930Sstevel@tonic-gate struct segkp_segdata *kpsd;
1940Sstevel@tonic-gate size_t np;
1950Sstevel@tonic-gate
1960Sstevel@tonic-gate ASSERT(seg != NULL && seg->s_as == &kas);
1970Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
1980Sstevel@tonic-gate
1990Sstevel@tonic-gate if (seg->s_size & PAGEOFFSET) {
2000Sstevel@tonic-gate panic("Bad segkp size");
2010Sstevel@tonic-gate /*NOTREACHED*/
2020Sstevel@tonic-gate }
2030Sstevel@tonic-gate
2040Sstevel@tonic-gate kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
2050Sstevel@tonic-gate
2060Sstevel@tonic-gate /*
2070Sstevel@tonic-gate * Allocate the virtual memory for segkp and initialize it
2080Sstevel@tonic-gate */
2090Sstevel@tonic-gate if (segkp_fromheap) {
2100Sstevel@tonic-gate np = btop(kvseg.s_size);
2110Sstevel@tonic-gate segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
2120Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
2130Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
2140Sstevel@tonic-gate } else {
2150Sstevel@tonic-gate segkp_bitmap = NULL;
2160Sstevel@tonic-gate np = btop(seg->s_size);
2170Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
2180Sstevel@tonic-gate seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
2190Sstevel@tonic-gate VM_SLEEP);
2200Sstevel@tonic-gate }
2210Sstevel@tonic-gate
2220Sstevel@tonic-gate kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
2230Sstevel@tonic-gate
2240Sstevel@tonic-gate kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
2250Sstevel@tonic-gate KM_SLEEP);
2260Sstevel@tonic-gate seg->s_data = (void *)kpsd;
2270Sstevel@tonic-gate seg->s_ops = &segkp_ops;
2280Sstevel@tonic-gate segkpinit_mem_config(seg);
2290Sstevel@tonic-gate return (0);
2300Sstevel@tonic-gate }
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate /*
2340Sstevel@tonic-gate * Find a free 'freelist' and initialize it with the appropriate attributes
2350Sstevel@tonic-gate */
2360Sstevel@tonic-gate void *
segkp_cache_init(struct seg * seg,int maxsize,size_t len,uint_t flags)2370Sstevel@tonic-gate segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
2380Sstevel@tonic-gate {
2390Sstevel@tonic-gate int i;
2400Sstevel@tonic-gate
2410Sstevel@tonic-gate if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
2420Sstevel@tonic-gate return ((void *)-1);
2430Sstevel@tonic-gate
2440Sstevel@tonic-gate mutex_enter(&segkp_lock);
2450Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) {
2460Sstevel@tonic-gate if (segkp_cache[i].kpf_inuse)
2470Sstevel@tonic-gate continue;
2480Sstevel@tonic-gate segkp_cache[i].kpf_inuse = 1;
2490Sstevel@tonic-gate segkp_cache[i].kpf_max = maxsize;
2500Sstevel@tonic-gate segkp_cache[i].kpf_flags = flags;
2510Sstevel@tonic-gate segkp_cache[i].kpf_seg = seg;
2520Sstevel@tonic-gate segkp_cache[i].kpf_len = len;
2530Sstevel@tonic-gate mutex_exit(&segkp_lock);
2540Sstevel@tonic-gate return ((void *)(uintptr_t)i);
2550Sstevel@tonic-gate }
2560Sstevel@tonic-gate mutex_exit(&segkp_lock);
2570Sstevel@tonic-gate return ((void *)-1);
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate /*
2610Sstevel@tonic-gate * Free all the cache resources.
2620Sstevel@tonic-gate */
2630Sstevel@tonic-gate void
segkp_cache_free(void)2640Sstevel@tonic-gate segkp_cache_free(void)
2650Sstevel@tonic-gate {
2660Sstevel@tonic-gate struct segkp_data *kpd;
2670Sstevel@tonic-gate struct seg *seg;
2680Sstevel@tonic-gate int i;
2690Sstevel@tonic-gate
2700Sstevel@tonic-gate mutex_enter(&segkp_lock);
2710Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) {
2720Sstevel@tonic-gate if (!segkp_cache[i].kpf_inuse)
2730Sstevel@tonic-gate continue;
2740Sstevel@tonic-gate /*
2750Sstevel@tonic-gate * Disconnect the freelist and process each element
2760Sstevel@tonic-gate */
2770Sstevel@tonic-gate kpd = segkp_cache[i].kpf_list;
2780Sstevel@tonic-gate seg = segkp_cache[i].kpf_seg;
2790Sstevel@tonic-gate segkp_cache[i].kpf_list = NULL;
2800Sstevel@tonic-gate segkp_cache[i].kpf_count = 0;
2810Sstevel@tonic-gate mutex_exit(&segkp_lock);
2820Sstevel@tonic-gate
2830Sstevel@tonic-gate while (kpd != NULL) {
2840Sstevel@tonic-gate struct segkp_data *next;
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate next = kpd->kp_next;
2870Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len);
2880Sstevel@tonic-gate kpd = next;
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate mutex_enter(&segkp_lock);
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate mutex_exit(&segkp_lock);
2930Sstevel@tonic-gate }
2940Sstevel@tonic-gate
2950Sstevel@tonic-gate /*
2960Sstevel@tonic-gate * There are 2 entries into segkp_get_internal. The first includes a cookie
2970Sstevel@tonic-gate * used to access a pool of cached segkp resources. The second does not
2980Sstevel@tonic-gate * use the cache.
2990Sstevel@tonic-gate */
3000Sstevel@tonic-gate caddr_t
segkp_get(struct seg * seg,size_t len,uint_t flags)3010Sstevel@tonic-gate segkp_get(struct seg *seg, size_t len, uint_t flags)
3020Sstevel@tonic-gate {
3030Sstevel@tonic-gate struct segkp_data *kpd = NULL;
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
3060Sstevel@tonic-gate kpd->kp_cookie = -1;
3070Sstevel@tonic-gate return (stom(kpd->kp_base, flags));
3080Sstevel@tonic-gate }
3090Sstevel@tonic-gate return (NULL);
3100Sstevel@tonic-gate }
3110Sstevel@tonic-gate
3120Sstevel@tonic-gate /*
3130Sstevel@tonic-gate * Return a 'cached' segkp address
3140Sstevel@tonic-gate */
3150Sstevel@tonic-gate caddr_t
segkp_cache_get(void * cookie)3160Sstevel@tonic-gate segkp_cache_get(void *cookie)
3170Sstevel@tonic-gate {
3180Sstevel@tonic-gate struct segkp_cache *freelist = NULL;
3190Sstevel@tonic-gate struct segkp_data *kpd = NULL;
3200Sstevel@tonic-gate int index = (int)(uintptr_t)cookie;
3210Sstevel@tonic-gate struct seg *seg;
3220Sstevel@tonic-gate size_t len;
3230Sstevel@tonic-gate uint_t flags;
3240Sstevel@tonic-gate
3250Sstevel@tonic-gate if (index < 0 || index >= SEGKP_MAX_CACHE)
3260Sstevel@tonic-gate return (NULL);
3270Sstevel@tonic-gate freelist = &segkp_cache[index];
3280Sstevel@tonic-gate
3290Sstevel@tonic-gate mutex_enter(&segkp_lock);
3300Sstevel@tonic-gate seg = freelist->kpf_seg;
3310Sstevel@tonic-gate flags = freelist->kpf_flags;
3320Sstevel@tonic-gate if (freelist->kpf_list != NULL) {
3330Sstevel@tonic-gate kpd = freelist->kpf_list;
3340Sstevel@tonic-gate freelist->kpf_list = kpd->kp_next;
3350Sstevel@tonic-gate freelist->kpf_count--;
3360Sstevel@tonic-gate mutex_exit(&segkp_lock);
3370Sstevel@tonic-gate kpd->kp_next = NULL;
3380Sstevel@tonic-gate segkp_insert(seg, kpd);
3390Sstevel@tonic-gate return (stom(kpd->kp_base, flags));
3400Sstevel@tonic-gate }
3410Sstevel@tonic-gate len = freelist->kpf_len;
3420Sstevel@tonic-gate mutex_exit(&segkp_lock);
3430Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
3440Sstevel@tonic-gate kpd->kp_cookie = index;
3450Sstevel@tonic-gate return (stom(kpd->kp_base, flags));
3460Sstevel@tonic-gate }
3470Sstevel@tonic-gate return (NULL);
3480Sstevel@tonic-gate }
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate caddr_t
segkp_get_withanonmap(struct seg * seg,size_t len,uint_t flags,struct anon_map * amp)3510Sstevel@tonic-gate segkp_get_withanonmap(
3520Sstevel@tonic-gate struct seg *seg,
3530Sstevel@tonic-gate size_t len,
3540Sstevel@tonic-gate uint_t flags,
3550Sstevel@tonic-gate struct anon_map *amp)
3560Sstevel@tonic-gate {
3570Sstevel@tonic-gate struct segkp_data *kpd = NULL;
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate ASSERT(amp != NULL);
3600Sstevel@tonic-gate flags |= KPD_HASAMP;
3610Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
3620Sstevel@tonic-gate kpd->kp_cookie = -1;
3630Sstevel@tonic-gate return (stom(kpd->kp_base, flags));
3640Sstevel@tonic-gate }
3650Sstevel@tonic-gate return (NULL);
3660Sstevel@tonic-gate }
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate /*
3690Sstevel@tonic-gate * This does the real work of segkp allocation.
3700Sstevel@tonic-gate * Return to client base addr. len must be page-aligned. A null value is
3710Sstevel@tonic-gate * returned if there are no more vm resources (e.g. pages, swap). The len
3720Sstevel@tonic-gate * and base recorded in the private data structure include the redzone
3730Sstevel@tonic-gate * and the redzone length (if applicable). If the user requests a redzone
3740Sstevel@tonic-gate * either the first or last page is left unmapped depending whether stacks
3750Sstevel@tonic-gate * grow to low or high memory.
3760Sstevel@tonic-gate *
3770Sstevel@tonic-gate * The client may also specify a no-wait flag. If that is set then the
3780Sstevel@tonic-gate * request will choose a non-blocking path when requesting resources.
3790Sstevel@tonic-gate * The default is make the client wait.
3800Sstevel@tonic-gate */
3810Sstevel@tonic-gate static caddr_t
segkp_get_internal(struct seg * seg,size_t len,uint_t flags,struct segkp_data ** tkpd,struct anon_map * amp)3820Sstevel@tonic-gate segkp_get_internal(
3830Sstevel@tonic-gate struct seg *seg,
3840Sstevel@tonic-gate size_t len,
3850Sstevel@tonic-gate uint_t flags,
3860Sstevel@tonic-gate struct segkp_data **tkpd,
3870Sstevel@tonic-gate struct anon_map *amp)
3880Sstevel@tonic-gate {
3890Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
3900Sstevel@tonic-gate struct segkp_data *kpd;
3910Sstevel@tonic-gate caddr_t vbase = NULL; /* always first virtual, may not be mapped */
3920Sstevel@tonic-gate pgcnt_t np = 0; /* number of pages in the resource */
3930Sstevel@tonic-gate pgcnt_t segkpindex;
3940Sstevel@tonic-gate long i;
3950Sstevel@tonic-gate caddr_t va;
3960Sstevel@tonic-gate pgcnt_t pages = 0;
3970Sstevel@tonic-gate ulong_t anon_idx = 0;
3980Sstevel@tonic-gate int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
3990Sstevel@tonic-gate caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
4000Sstevel@tonic-gate
4010Sstevel@tonic-gate if (len & PAGEOFFSET) {
4020Sstevel@tonic-gate panic("segkp_get: len is not page-aligned");
4030Sstevel@tonic-gate /*NOTREACHED*/
4040Sstevel@tonic-gate }
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
4070Sstevel@tonic-gate
4080Sstevel@tonic-gate /* Only allow KPD_NO_ANON if we are going to lock it down */
4090Sstevel@tonic-gate if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
4100Sstevel@tonic-gate return (NULL);
4110Sstevel@tonic-gate
4120Sstevel@tonic-gate if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
4130Sstevel@tonic-gate return (NULL);
4140Sstevel@tonic-gate /*
4150Sstevel@tonic-gate * Fix up the len to reflect the REDZONE if applicable
4160Sstevel@tonic-gate */
4170Sstevel@tonic-gate if (flags & KPD_HASREDZONE)
4180Sstevel@tonic-gate len += PAGESIZE;
4190Sstevel@tonic-gate np = btop(len);
4200Sstevel@tonic-gate
4210Sstevel@tonic-gate vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
4220Sstevel@tonic-gate if (vbase == NULL) {
4230Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data));
4240Sstevel@tonic-gate return (NULL);
4250Sstevel@tonic-gate }
4260Sstevel@tonic-gate
4270Sstevel@tonic-gate /* If locking, reserve physical memory */
4280Sstevel@tonic-gate if (flags & KPD_LOCKED) {
4290Sstevel@tonic-gate pages = btop(SEGKP_MAPLEN(len, flags));
4300Sstevel@tonic-gate if (page_resv(pages, kmflag) == 0) {
4310Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len);
4320Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data));
4330Sstevel@tonic-gate return (NULL);
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate if ((flags & KPD_NO_ANON) == 0)
4360Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages);
4370Sstevel@tonic-gate }
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate /*
4400Sstevel@tonic-gate * Reserve sufficient swap space for this vm resource. We'll
4410Sstevel@tonic-gate * actually allocate it in the loop below, but reserving it
4420Sstevel@tonic-gate * here allows us to back out more gracefully than if we
4430Sstevel@tonic-gate * had an allocation failure in the body of the loop.
4440Sstevel@tonic-gate *
4450Sstevel@tonic-gate * Note that we don't need swap space for the red zone page.
4460Sstevel@tonic-gate */
4470Sstevel@tonic-gate if (amp != NULL) {
4483247Sgjelinek /*
4493247Sgjelinek * The swap reservation has been done, if required, and the
4503247Sgjelinek * anon_hdr is separate.
4513247Sgjelinek */
4520Sstevel@tonic-gate anon_idx = 0;
4530Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx;
4540Sstevel@tonic-gate kpd->kp_anon = amp->ahp;
4550Sstevel@tonic-gate
4560Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
4570Sstevel@tonic-gate kpd, vbase, len, flags, 1);
4580Sstevel@tonic-gate
4590Sstevel@tonic-gate } else if ((flags & KPD_NO_ANON) == 0) {
4603247Sgjelinek if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
4610Sstevel@tonic-gate if (flags & KPD_LOCKED) {
4620Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked,
4630Sstevel@tonic-gate -pages);
4640Sstevel@tonic-gate page_unresv(pages);
4650Sstevel@tonic-gate }
4660Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len);
4670Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data));
4680Sstevel@tonic-gate return (NULL);
4690Sstevel@tonic-gate }
4703247Sgjelinek atomic_add_long(&anon_segkp_pages_resv,
4713247Sgjelinek btop(SEGKP_MAPLEN(len, flags)));
4720Sstevel@tonic-gate anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
4730Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx;
4740Sstevel@tonic-gate kpd->kp_anon = kpsd->kpsd_anon;
4750Sstevel@tonic-gate
4760Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
4770Sstevel@tonic-gate kpd, vbase, len, flags, 1);
4780Sstevel@tonic-gate } else {
4790Sstevel@tonic-gate kpd->kp_anon = NULL;
4800Sstevel@tonic-gate kpd->kp_anon_idx = 0;
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate
4830Sstevel@tonic-gate /*
4840Sstevel@tonic-gate * Allocate page and anon resources for the virtual address range
4850Sstevel@tonic-gate * except the redzone
4860Sstevel@tonic-gate */
4870Sstevel@tonic-gate if (segkp_fromheap)
4880Sstevel@tonic-gate segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
4890Sstevel@tonic-gate for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
4900Sstevel@tonic-gate page_t *pl[2];
4910Sstevel@tonic-gate struct vnode *vp;
4920Sstevel@tonic-gate anoff_t off;
4930Sstevel@tonic-gate int err;
4940Sstevel@tonic-gate page_t *pp = NULL;
4950Sstevel@tonic-gate
4960Sstevel@tonic-gate /*
4970Sstevel@tonic-gate * Mark this page to be a segkp page in the bitmap.
4980Sstevel@tonic-gate */
4990Sstevel@tonic-gate if (segkp_fromheap) {
5000Sstevel@tonic-gate BT_ATOMIC_SET(segkp_bitmap, segkpindex);
5010Sstevel@tonic-gate segkpindex++;
5020Sstevel@tonic-gate }
5030Sstevel@tonic-gate
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate * If this page is the red zone page, we don't need swap
5060Sstevel@tonic-gate * space for it. Note that we skip over the code that
5070Sstevel@tonic-gate * establishes MMU mappings, so that the page remains
5080Sstevel@tonic-gate * invalid.
5090Sstevel@tonic-gate */
5100Sstevel@tonic-gate if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
5110Sstevel@tonic-gate continue;
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate if (kpd->kp_anon != NULL) {
5140Sstevel@tonic-gate struct anon *ap;
5150Sstevel@tonic-gate
5160Sstevel@tonic-gate ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
5170Sstevel@tonic-gate == NULL);
5180Sstevel@tonic-gate /*
5190Sstevel@tonic-gate * Determine the "vp" and "off" of the anon slot.
5200Sstevel@tonic-gate */
5210Sstevel@tonic-gate ap = anon_alloc(NULL, 0);
5220Sstevel@tonic-gate if (amp != NULL)
5230Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
5240Sstevel@tonic-gate (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
5250Sstevel@tonic-gate ap, ANON_SLEEP);
5260Sstevel@tonic-gate if (amp != NULL)
5270Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock);
5280Sstevel@tonic-gate swap_xlate(ap, &vp, &off);
5290Sstevel@tonic-gate
5300Sstevel@tonic-gate /*
5310Sstevel@tonic-gate * Create a page with the specified identity. The
5320Sstevel@tonic-gate * page is returned with the "shared" lock held.
5330Sstevel@tonic-gate */
5340Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
5350Sstevel@tonic-gate NULL, pl, PAGESIZE, seg, va, S_CREATE,
5365331Samw kcred, NULL);
5370Sstevel@tonic-gate if (err) {
5380Sstevel@tonic-gate /*
5390Sstevel@tonic-gate * XXX - This should not fail.
5400Sstevel@tonic-gate */
5410Sstevel@tonic-gate panic("segkp_get: no pages");
5420Sstevel@tonic-gate /*NOTREACHED*/
5430Sstevel@tonic-gate }
5440Sstevel@tonic-gate pp = pl[0];
5450Sstevel@tonic-gate } else {
5460Sstevel@tonic-gate ASSERT(page_exists(&kvp,
5470Sstevel@tonic-gate (u_offset_t)(uintptr_t)va) == NULL);
5480Sstevel@tonic-gate
5490Sstevel@tonic-gate if ((pp = page_create_va(&kvp,
5500Sstevel@tonic-gate (u_offset_t)(uintptr_t)va, PAGESIZE,
5510Sstevel@tonic-gate (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
5520Sstevel@tonic-gate PG_NORELOC, seg, va)) == NULL) {
5530Sstevel@tonic-gate /*
5540Sstevel@tonic-gate * Legitimize resource; then destroy it.
5550Sstevel@tonic-gate * Easier than trying to unwind here.
5560Sstevel@tonic-gate */
5570Sstevel@tonic-gate kpd->kp_flags = flags;
5580Sstevel@tonic-gate kpd->kp_base = vbase;
5590Sstevel@tonic-gate kpd->kp_len = len;
5600Sstevel@tonic-gate segkp_release_internal(seg, kpd, va - vbase);
5610Sstevel@tonic-gate return (NULL);
5620Sstevel@tonic-gate }
5630Sstevel@tonic-gate page_io_unlock(pp);
5640Sstevel@tonic-gate }
5650Sstevel@tonic-gate
5660Sstevel@tonic-gate if (flags & KPD_ZERO)
5670Sstevel@tonic-gate pagezero(pp, 0, PAGESIZE);
5680Sstevel@tonic-gate
5690Sstevel@tonic-gate /*
5700Sstevel@tonic-gate * Load and lock an MMU translation for the page.
5710Sstevel@tonic-gate */
5720Sstevel@tonic-gate hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
5730Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
5740Sstevel@tonic-gate
5750Sstevel@tonic-gate /*
5760Sstevel@tonic-gate * Now, release lock on the page.
5770Sstevel@tonic-gate */
578*12117SStan.Studzinski@Sun.COM if (flags & KPD_LOCKED) {
579*12117SStan.Studzinski@Sun.COM /*
580*12117SStan.Studzinski@Sun.COM * Indicate to page_retire framework that this
581*12117SStan.Studzinski@Sun.COM * page can only be retired when it is freed.
582*12117SStan.Studzinski@Sun.COM */
583*12117SStan.Studzinski@Sun.COM PP_SETRAF(pp);
5840Sstevel@tonic-gate page_downgrade(pp);
585*12117SStan.Studzinski@Sun.COM } else
5860Sstevel@tonic-gate page_unlock(pp);
5870Sstevel@tonic-gate }
5880Sstevel@tonic-gate
5890Sstevel@tonic-gate kpd->kp_flags = flags;
5900Sstevel@tonic-gate kpd->kp_base = vbase;
5910Sstevel@tonic-gate kpd->kp_len = len;
5920Sstevel@tonic-gate segkp_insert(seg, kpd);
5930Sstevel@tonic-gate *tkpd = kpd;
5940Sstevel@tonic-gate return (stom(kpd->kp_base, flags));
5950Sstevel@tonic-gate }
5960Sstevel@tonic-gate
5970Sstevel@tonic-gate /*
5980Sstevel@tonic-gate * Release the resource to cache if the pool(designate by the cookie)
5990Sstevel@tonic-gate * has less than the maximum allowable. If inserted in cache,
6000Sstevel@tonic-gate * segkp_delete insures element is taken off of active list.
6010Sstevel@tonic-gate */
6020Sstevel@tonic-gate void
segkp_release(struct seg * seg,caddr_t vaddr)6030Sstevel@tonic-gate segkp_release(struct seg *seg, caddr_t vaddr)
6040Sstevel@tonic-gate {
6050Sstevel@tonic-gate struct segkp_cache *freelist;
6060Sstevel@tonic-gate struct segkp_data *kpd = NULL;
6070Sstevel@tonic-gate
6080Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) {
6090Sstevel@tonic-gate panic("segkp_release: null kpd");
6100Sstevel@tonic-gate /*NOTREACHED*/
6110Sstevel@tonic-gate }
6120Sstevel@tonic-gate
6130Sstevel@tonic-gate if (kpd->kp_cookie != -1) {
6140Sstevel@tonic-gate freelist = &segkp_cache[kpd->kp_cookie];
6150Sstevel@tonic-gate mutex_enter(&segkp_lock);
6160Sstevel@tonic-gate if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
6170Sstevel@tonic-gate segkp_delete(seg, kpd);
6180Sstevel@tonic-gate kpd->kp_next = freelist->kpf_list;
6190Sstevel@tonic-gate freelist->kpf_list = kpd;
6200Sstevel@tonic-gate freelist->kpf_count++;
6210Sstevel@tonic-gate mutex_exit(&segkp_lock);
6220Sstevel@tonic-gate return;
6230Sstevel@tonic-gate } else {
6240Sstevel@tonic-gate mutex_exit(&segkp_lock);
6250Sstevel@tonic-gate kpd->kp_cookie = -1;
6260Sstevel@tonic-gate }
6270Sstevel@tonic-gate }
6280Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len);
6290Sstevel@tonic-gate }
6300Sstevel@tonic-gate
6310Sstevel@tonic-gate /*
6320Sstevel@tonic-gate * Free the entire resource. segkp_unlock gets called with the start of the
6330Sstevel@tonic-gate * mapped portion of the resource. The length is the size of the mapped
6340Sstevel@tonic-gate * portion
6350Sstevel@tonic-gate */
6360Sstevel@tonic-gate static void
segkp_release_internal(struct seg * seg,struct segkp_data * kpd,size_t len)6370Sstevel@tonic-gate segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
6380Sstevel@tonic-gate {
6390Sstevel@tonic-gate caddr_t va;
6400Sstevel@tonic-gate long i;
6410Sstevel@tonic-gate long redzone;
6420Sstevel@tonic-gate size_t np;
6430Sstevel@tonic-gate page_t *pp;
6440Sstevel@tonic-gate struct vnode *vp;
6450Sstevel@tonic-gate anoff_t off;
6460Sstevel@tonic-gate struct anon *ap;
6470Sstevel@tonic-gate pgcnt_t segkpindex;
6480Sstevel@tonic-gate
6490Sstevel@tonic-gate ASSERT(kpd != NULL);
6500Sstevel@tonic-gate ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
6510Sstevel@tonic-gate np = btop(len);
6520Sstevel@tonic-gate
6530Sstevel@tonic-gate /* Remove from active hash list */
6540Sstevel@tonic-gate if (kpd->kp_cookie == -1) {
6550Sstevel@tonic-gate mutex_enter(&segkp_lock);
6560Sstevel@tonic-gate segkp_delete(seg, kpd);
6570Sstevel@tonic-gate mutex_exit(&segkp_lock);
6580Sstevel@tonic-gate }
6590Sstevel@tonic-gate
6600Sstevel@tonic-gate /*
6610Sstevel@tonic-gate * Precompute redzone page index.
6620Sstevel@tonic-gate */
6630Sstevel@tonic-gate redzone = -1;
6640Sstevel@tonic-gate if (kpd->kp_flags & KPD_HASREDZONE)
6650Sstevel@tonic-gate redzone = KPD_REDZONE(kpd);
6660Sstevel@tonic-gate
6670Sstevel@tonic-gate
6680Sstevel@tonic-gate va = kpd->kp_base;
6690Sstevel@tonic-gate
6700Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
6710Sstevel@tonic-gate ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
6720Sstevel@tonic-gate /*
6730Sstevel@tonic-gate * Free up those anon resources that are quiescent.
6740Sstevel@tonic-gate */
6750Sstevel@tonic-gate if (segkp_fromheap)
6760Sstevel@tonic-gate segkpindex = btop((uintptr_t)(va - kvseg.s_base));
6770Sstevel@tonic-gate for (i = 0; i < np; i++, va += PAGESIZE) {
6780Sstevel@tonic-gate
6790Sstevel@tonic-gate /*
6800Sstevel@tonic-gate * Clear the bit for this page from the bitmap.
6810Sstevel@tonic-gate */
6820Sstevel@tonic-gate if (segkp_fromheap) {
6830Sstevel@tonic-gate BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
6840Sstevel@tonic-gate segkpindex++;
6850Sstevel@tonic-gate }
6860Sstevel@tonic-gate
6870Sstevel@tonic-gate if (i == redzone)
6880Sstevel@tonic-gate continue;
6890Sstevel@tonic-gate if (kpd->kp_anon) {
6900Sstevel@tonic-gate /*
6910Sstevel@tonic-gate * Free up anon resources and destroy the
6920Sstevel@tonic-gate * associated pages.
6930Sstevel@tonic-gate *
6940Sstevel@tonic-gate * Release the lock if there is one. Have to get the
6950Sstevel@tonic-gate * page to do this, unfortunately.
6960Sstevel@tonic-gate */
6970Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) {
6980Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon,
6990Sstevel@tonic-gate kpd->kp_anon_idx + i);
7000Sstevel@tonic-gate swap_xlate(ap, &vp, &off);
7010Sstevel@tonic-gate /* Find the shared-locked page. */
7020Sstevel@tonic-gate pp = page_find(vp, (u_offset_t)off);
7030Sstevel@tonic-gate if (pp == NULL) {
7040Sstevel@tonic-gate panic("segkp_release: "
7050Sstevel@tonic-gate "kp_anon: no page to unlock ");
7060Sstevel@tonic-gate /*NOTREACHED*/
7070Sstevel@tonic-gate }
708*12117SStan.Studzinski@Sun.COM if (PP_ISRAF(pp))
709*12117SStan.Studzinski@Sun.COM PP_CLRRAF(pp);
710*12117SStan.Studzinski@Sun.COM
7110Sstevel@tonic-gate page_unlock(pp);
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASAMP) == 0) {
7140Sstevel@tonic-gate anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
7150Sstevel@tonic-gate PAGESIZE);
7163247Sgjelinek anon_unresv_zone(PAGESIZE, NULL);
7173247Sgjelinek atomic_add_long(&anon_segkp_pages_resv,
7183247Sgjelinek -1);
7190Sstevel@tonic-gate }
7200Sstevel@tonic-gate TRACE_5(TR_FAC_VM,
7210Sstevel@tonic-gate TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
7220Sstevel@tonic-gate kpd, va, PAGESIZE, 0, 0);
7230Sstevel@tonic-gate } else {
7240Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) {
7250Sstevel@tonic-gate pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
7260Sstevel@tonic-gate if (pp == NULL) {
7270Sstevel@tonic-gate panic("segkp_release: "
7280Sstevel@tonic-gate "no page to unlock");
7290Sstevel@tonic-gate /*NOTREACHED*/
7300Sstevel@tonic-gate }
731*12117SStan.Studzinski@Sun.COM if (PP_ISRAF(pp))
732*12117SStan.Studzinski@Sun.COM PP_CLRRAF(pp);
7330Sstevel@tonic-gate /*
7340Sstevel@tonic-gate * We should just upgrade the lock here
7350Sstevel@tonic-gate * but there is no upgrade that waits.
7360Sstevel@tonic-gate */
7370Sstevel@tonic-gate page_unlock(pp);
7380Sstevel@tonic-gate }
7390Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
7400Sstevel@tonic-gate SE_EXCL);
7410Sstevel@tonic-gate if (pp != NULL)
7420Sstevel@tonic-gate page_destroy(pp, 0);
7430Sstevel@tonic-gate }
7440Sstevel@tonic-gate }
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate /* If locked, release physical memory reservation */
7470Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) {
7480Sstevel@tonic-gate pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
7490Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0)
7500Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages);
7510Sstevel@tonic-gate page_unresv(pages);
7520Sstevel@tonic-gate }
7530Sstevel@tonic-gate
7540Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
7550Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data));
7560Sstevel@tonic-gate }
7570Sstevel@tonic-gate
7580Sstevel@tonic-gate /*
7590Sstevel@tonic-gate * segkp_map_red() will check the current frame pointer against the
7600Sstevel@tonic-gate * stack base. If the amount of stack remaining is questionable
7610Sstevel@tonic-gate * (less than red_minavail), then segkp_map_red() will map in the redzone
7620Sstevel@tonic-gate * and return 1. Otherwise, it will return 0. segkp_map_red() can
7630Sstevel@tonic-gate * _only_ be called when:
7640Sstevel@tonic-gate *
7650Sstevel@tonic-gate * - it is safe to sleep on page_create_va().
7660Sstevel@tonic-gate * - the caller is non-swappable.
7670Sstevel@tonic-gate *
7680Sstevel@tonic-gate * It is up to the caller to remember whether segkp_map_red() successfully
7690Sstevel@tonic-gate * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
7700Sstevel@tonic-gate * time. Note that the caller must _remain_ non-swappable until after
7710Sstevel@tonic-gate * calling segkp_unmap_red().
7720Sstevel@tonic-gate *
7730Sstevel@tonic-gate * Currently, this routine is only called from pagefault() (which necessarily
7740Sstevel@tonic-gate * satisfies the above conditions).
7750Sstevel@tonic-gate */
7760Sstevel@tonic-gate #if defined(STACK_GROWTH_DOWN)
7770Sstevel@tonic-gate int
segkp_map_red(void)7780Sstevel@tonic-gate segkp_map_red(void)
7790Sstevel@tonic-gate {
7800Sstevel@tonic-gate uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
7810Sstevel@tonic-gate #ifndef _LP64
7820Sstevel@tonic-gate caddr_t stkbase;
7830Sstevel@tonic-gate #endif
7840Sstevel@tonic-gate
7850Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
7860Sstevel@tonic-gate
7870Sstevel@tonic-gate /*
7880Sstevel@tonic-gate * Optimize for the common case where we simply return.
7890Sstevel@tonic-gate */
7900Sstevel@tonic-gate if ((curthread->t_red_pp == NULL) &&
7910Sstevel@tonic-gate (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
7920Sstevel@tonic-gate return (0);
7930Sstevel@tonic-gate
7940Sstevel@tonic-gate #if defined(_LP64)
7950Sstevel@tonic-gate /*
7960Sstevel@tonic-gate * XXX We probably need something better than this.
7970Sstevel@tonic-gate */
7980Sstevel@tonic-gate panic("kernel stack overflow");
7990Sstevel@tonic-gate /*NOTREACHED*/
8000Sstevel@tonic-gate #else /* _LP64 */
8010Sstevel@tonic-gate if (curthread->t_red_pp == NULL) {
8020Sstevel@tonic-gate page_t *red_pp;
8030Sstevel@tonic-gate struct seg kseg;
8040Sstevel@tonic-gate
8050Sstevel@tonic-gate caddr_t red_va = (caddr_t)
8060Sstevel@tonic-gate (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
8070Sstevel@tonic-gate PAGESIZE);
8080Sstevel@tonic-gate
8090Sstevel@tonic-gate ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
8100Sstevel@tonic-gate NULL);
8110Sstevel@tonic-gate
8120Sstevel@tonic-gate /*
8130Sstevel@tonic-gate * Allocate the physical for the red page.
8140Sstevel@tonic-gate */
8150Sstevel@tonic-gate /*
8160Sstevel@tonic-gate * No PG_NORELOC here to avoid waits. Unlikely to get
8170Sstevel@tonic-gate * a relocate happening in the short time the page exists
8180Sstevel@tonic-gate * and it will be OK anyway.
8190Sstevel@tonic-gate */
8200Sstevel@tonic-gate
8210Sstevel@tonic-gate kseg.s_as = &kas;
8220Sstevel@tonic-gate red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
8230Sstevel@tonic-gate PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
8240Sstevel@tonic-gate ASSERT(red_pp != NULL);
8250Sstevel@tonic-gate
8260Sstevel@tonic-gate /*
8270Sstevel@tonic-gate * So we now have a page to jam into the redzone...
8280Sstevel@tonic-gate */
8290Sstevel@tonic-gate page_io_unlock(red_pp);
8300Sstevel@tonic-gate
8310Sstevel@tonic-gate hat_memload(kas.a_hat, red_va, red_pp,
8320Sstevel@tonic-gate (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
8330Sstevel@tonic-gate page_downgrade(red_pp);
8340Sstevel@tonic-gate
8350Sstevel@tonic-gate /*
8360Sstevel@tonic-gate * The page is left SE_SHARED locked so we can hold on to
8370Sstevel@tonic-gate * the page_t pointer.
8380Sstevel@tonic-gate */
8390Sstevel@tonic-gate curthread->t_red_pp = red_pp;
8400Sstevel@tonic-gate
8410Sstevel@tonic-gate atomic_add_32(&red_nmapped, 1);
8420Sstevel@tonic-gate while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
8430Sstevel@tonic-gate (void) cas32(&red_closest, red_closest,
8440Sstevel@tonic-gate (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
8450Sstevel@tonic-gate }
8460Sstevel@tonic-gate return (1);
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate
8490Sstevel@tonic-gate stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
8500Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE);
8510Sstevel@tonic-gate
8520Sstevel@tonic-gate atomic_add_32(&red_ndoubles, 1);
8530Sstevel@tonic-gate
8540Sstevel@tonic-gate if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
8550Sstevel@tonic-gate /*
8560Sstevel@tonic-gate * Oh boy. We're already deep within the mapped-in
8570Sstevel@tonic-gate * redzone page, and the caller is trying to prepare
8580Sstevel@tonic-gate * for a deep stack run. We're running without a
8590Sstevel@tonic-gate * redzone right now: if the caller plows off the
8600Sstevel@tonic-gate * end of the stack, it'll plow another thread or
8610Sstevel@tonic-gate * LWP structure. That situation could result in
8620Sstevel@tonic-gate * a very hard-to-debug panic, so, in the spirit of
8630Sstevel@tonic-gate * recording the name of one's killer in one's own
86411066Srafael.vanoni@sun.com * blood, we're going to record hrestime and the calling
8650Sstevel@tonic-gate * thread.
8660Sstevel@tonic-gate */
86711066Srafael.vanoni@sun.com red_deep_hires = hrestime.tv_nsec;
8680Sstevel@tonic-gate red_deep_thread = curthread;
8690Sstevel@tonic-gate }
8700Sstevel@tonic-gate
8710Sstevel@tonic-gate /*
8720Sstevel@tonic-gate * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
8730Sstevel@tonic-gate */
8740Sstevel@tonic-gate ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
8750Sstevel@tonic-gate return (0);
8760Sstevel@tonic-gate #endif /* _LP64 */
8770Sstevel@tonic-gate }
8780Sstevel@tonic-gate
8790Sstevel@tonic-gate void
segkp_unmap_red(void)8800Sstevel@tonic-gate segkp_unmap_red(void)
8810Sstevel@tonic-gate {
8820Sstevel@tonic-gate page_t *pp;
8830Sstevel@tonic-gate caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
8840Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE);
8850Sstevel@tonic-gate
8860Sstevel@tonic-gate ASSERT(curthread->t_red_pp != NULL);
8870Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
8880Sstevel@tonic-gate
8890Sstevel@tonic-gate /*
8900Sstevel@tonic-gate * Because we locked the mapping down, we can't simply rely
8910Sstevel@tonic-gate * on page_destroy() to clean everything up; we need to call
8920Sstevel@tonic-gate * hat_unload() to explicitly unlock the mapping resources.
8930Sstevel@tonic-gate */
8940Sstevel@tonic-gate hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
8950Sstevel@tonic-gate
8960Sstevel@tonic-gate pp = curthread->t_red_pp;
8970Sstevel@tonic-gate
8980Sstevel@tonic-gate ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
8990Sstevel@tonic-gate
9000Sstevel@tonic-gate /*
9010Sstevel@tonic-gate * Need to upgrade the SE_SHARED lock to SE_EXCL.
9020Sstevel@tonic-gate */
9030Sstevel@tonic-gate if (!page_tryupgrade(pp)) {
9040Sstevel@tonic-gate /*
9050Sstevel@tonic-gate * As there is now wait for upgrade, release the
9060Sstevel@tonic-gate * SE_SHARED lock and wait for SE_EXCL.
9070Sstevel@tonic-gate */
9080Sstevel@tonic-gate page_unlock(pp);
9090Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
9100Sstevel@tonic-gate /* pp may be NULL here, hence the test below */
9110Sstevel@tonic-gate }
9120Sstevel@tonic-gate
9130Sstevel@tonic-gate /*
9140Sstevel@tonic-gate * Destroy the page, with dontfree set to zero (i.e. free it).
9150Sstevel@tonic-gate */
9160Sstevel@tonic-gate if (pp != NULL)
9170Sstevel@tonic-gate page_destroy(pp, 0);
9180Sstevel@tonic-gate curthread->t_red_pp = NULL;
9190Sstevel@tonic-gate }
9200Sstevel@tonic-gate #else
9210Sstevel@tonic-gate #error Red stacks only supported with downwards stack growth.
9220Sstevel@tonic-gate #endif
9230Sstevel@tonic-gate
9240Sstevel@tonic-gate /*
9250Sstevel@tonic-gate * Handle a fault on an address corresponding to one of the
9260Sstevel@tonic-gate * resources in the segkp segment.
9270Sstevel@tonic-gate */
9280Sstevel@tonic-gate faultcode_t
segkp_fault(struct hat * hat,struct seg * seg,caddr_t vaddr,size_t len,enum fault_type type,enum seg_rw rw)9290Sstevel@tonic-gate segkp_fault(
9300Sstevel@tonic-gate struct hat *hat,
9310Sstevel@tonic-gate struct seg *seg,
9320Sstevel@tonic-gate caddr_t vaddr,
9330Sstevel@tonic-gate size_t len,
9340Sstevel@tonic-gate enum fault_type type,
9350Sstevel@tonic-gate enum seg_rw rw)
9360Sstevel@tonic-gate {
9370Sstevel@tonic-gate struct segkp_data *kpd = NULL;
9380Sstevel@tonic-gate int err;
9390Sstevel@tonic-gate
9400Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
9410Sstevel@tonic-gate
9420Sstevel@tonic-gate /*
9430Sstevel@tonic-gate * Sanity checks.
9440Sstevel@tonic-gate */
9450Sstevel@tonic-gate if (type == F_PROT) {
9460Sstevel@tonic-gate panic("segkp_fault: unexpected F_PROT fault");
9470Sstevel@tonic-gate /*NOTREACHED*/
9480Sstevel@tonic-gate }
9490Sstevel@tonic-gate
9500Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL)
9510Sstevel@tonic-gate return (FC_NOMAP);
9520Sstevel@tonic-gate
9530Sstevel@tonic-gate mutex_enter(&kpd->kp_lock);
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate if (type == F_SOFTLOCK) {
9560Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_LOCKED));
9570Sstevel@tonic-gate /*
9580Sstevel@tonic-gate * The F_SOFTLOCK case has more stringent
9590Sstevel@tonic-gate * range requirements: the given range must exactly coincide
9600Sstevel@tonic-gate * with the resource's mapped portion. Note reference to
9610Sstevel@tonic-gate * redzone is handled since vaddr would not equal base
9620Sstevel@tonic-gate */
9630Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
9640Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
9650Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
9660Sstevel@tonic-gate return (FC_MAKE_ERR(EFAULT));
9670Sstevel@tonic-gate }
9680Sstevel@tonic-gate
9690Sstevel@tonic-gate if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
9700Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
9710Sstevel@tonic-gate return (FC_MAKE_ERR(err));
9720Sstevel@tonic-gate }
9730Sstevel@tonic-gate kpd->kp_flags |= KPD_LOCKED;
9740Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
9750Sstevel@tonic-gate return (0);
9760Sstevel@tonic-gate }
9770Sstevel@tonic-gate
9780Sstevel@tonic-gate if (type == F_INVAL) {
9790Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
9800Sstevel@tonic-gate
9810Sstevel@tonic-gate /*
9820Sstevel@tonic-gate * Check if we touched the redzone. Somewhat optimistic
9830Sstevel@tonic-gate * here if we are touching the redzone of our own stack
9840Sstevel@tonic-gate * since we wouldn't have a stack to get this far...
9850Sstevel@tonic-gate */
9860Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASREDZONE) &&
9870Sstevel@tonic-gate btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
9880Sstevel@tonic-gate panic("segkp_fault: accessing redzone");
9890Sstevel@tonic-gate
9900Sstevel@tonic-gate /*
9910Sstevel@tonic-gate * This fault may occur while the page is being F_SOFTLOCK'ed.
9920Sstevel@tonic-gate * Return since a 2nd segkp_load is unnecessary and also would
9930Sstevel@tonic-gate * result in the page being locked twice and eventually
9940Sstevel@tonic-gate * hang the thread_reaper thread.
9950Sstevel@tonic-gate */
9960Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) {
9970Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
9980Sstevel@tonic-gate return (0);
9990Sstevel@tonic-gate }
10000Sstevel@tonic-gate
10010Sstevel@tonic-gate err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
10020Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
10030Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0);
10040Sstevel@tonic-gate }
10050Sstevel@tonic-gate
10060Sstevel@tonic-gate if (type == F_SOFTUNLOCK) {
10070Sstevel@tonic-gate uint_t flags;
10080Sstevel@tonic-gate
10090Sstevel@tonic-gate /*
10100Sstevel@tonic-gate * Make sure the addr is LOCKED and it has anon backing
10110Sstevel@tonic-gate * before unlocking
10120Sstevel@tonic-gate */
101311292SJonathan.Adams@Sun.COM if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
10140Sstevel@tonic-gate panic("segkp_fault: bad unlock");
10150Sstevel@tonic-gate /*NOTREACHED*/
10160Sstevel@tonic-gate }
10170Sstevel@tonic-gate
10180Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
10190Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
10200Sstevel@tonic-gate panic("segkp_fault: bad range");
10210Sstevel@tonic-gate /*NOTREACHED*/
10220Sstevel@tonic-gate }
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate if (rw == S_WRITE)
10250Sstevel@tonic-gate flags = kpd->kp_flags | KPD_WRITEDIRTY;
10260Sstevel@tonic-gate else
10270Sstevel@tonic-gate flags = kpd->kp_flags;
10280Sstevel@tonic-gate err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
10290Sstevel@tonic-gate kpd->kp_flags &= ~KPD_LOCKED;
10300Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
10310Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0);
10320Sstevel@tonic-gate }
10330Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
10340Sstevel@tonic-gate panic("segkp_fault: bogus fault type: %d\n", type);
10350Sstevel@tonic-gate /*NOTREACHED*/
10360Sstevel@tonic-gate }
10370Sstevel@tonic-gate
10380Sstevel@tonic-gate /*
10390Sstevel@tonic-gate * Check that the given protections suffice over the range specified by
10400Sstevel@tonic-gate * vaddr and len. For this segment type, the only issue is whether or
10410Sstevel@tonic-gate * not the range lies completely within the mapped part of an allocated
10420Sstevel@tonic-gate * resource.
10430Sstevel@tonic-gate */
10440Sstevel@tonic-gate /* ARGSUSED */
10450Sstevel@tonic-gate static int
segkp_checkprot(struct seg * seg,caddr_t vaddr,size_t len,uint_t prot)10460Sstevel@tonic-gate segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
10470Sstevel@tonic-gate {
10480Sstevel@tonic-gate struct segkp_data *kpd = NULL;
10490Sstevel@tonic-gate caddr_t mbase;
10500Sstevel@tonic-gate size_t mlen;
10510Sstevel@tonic-gate
10520Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL)
10530Sstevel@tonic-gate return (EACCES);
10540Sstevel@tonic-gate
10550Sstevel@tonic-gate mutex_enter(&kpd->kp_lock);
10560Sstevel@tonic-gate mbase = stom(kpd->kp_base, kpd->kp_flags);
10570Sstevel@tonic-gate mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
10580Sstevel@tonic-gate if (len > mlen || vaddr < mbase ||
10590Sstevel@tonic-gate ((vaddr + len) > (mbase + mlen))) {
10600Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
10610Sstevel@tonic-gate return (EACCES);
10620Sstevel@tonic-gate }
10630Sstevel@tonic-gate mutex_exit(&kpd->kp_lock);
10640Sstevel@tonic-gate return (0);
10650Sstevel@tonic-gate }
10660Sstevel@tonic-gate
10670Sstevel@tonic-gate
10680Sstevel@tonic-gate /*
10690Sstevel@tonic-gate * Check to see if it makes sense to do kluster/read ahead to
10700Sstevel@tonic-gate * addr + delta relative to the mapping at addr. We assume here
10710Sstevel@tonic-gate * that delta is a signed PAGESIZE'd multiple (which can be negative).
10720Sstevel@tonic-gate *
10730Sstevel@tonic-gate * For seg_u we always "approve" of this action from our standpoint.
10740Sstevel@tonic-gate */
10750Sstevel@tonic-gate /*ARGSUSED*/
10760Sstevel@tonic-gate static int
segkp_kluster(struct seg * seg,caddr_t addr,ssize_t delta)10770Sstevel@tonic-gate segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
10780Sstevel@tonic-gate {
10790Sstevel@tonic-gate return (0);
10800Sstevel@tonic-gate }
10810Sstevel@tonic-gate
10820Sstevel@tonic-gate /*
10830Sstevel@tonic-gate * Load and possibly lock intra-slot resources in the range given by
10840Sstevel@tonic-gate * vaddr and len.
10850Sstevel@tonic-gate */
10860Sstevel@tonic-gate static int
segkp_load(struct hat * hat,struct seg * seg,caddr_t vaddr,size_t len,struct segkp_data * kpd,uint_t flags)10870Sstevel@tonic-gate segkp_load(
10880Sstevel@tonic-gate struct hat *hat,
10890Sstevel@tonic-gate struct seg *seg,
10900Sstevel@tonic-gate caddr_t vaddr,
10910Sstevel@tonic-gate size_t len,
10920Sstevel@tonic-gate struct segkp_data *kpd,
10930Sstevel@tonic-gate uint_t flags)
10940Sstevel@tonic-gate {
10950Sstevel@tonic-gate caddr_t va;
10960Sstevel@tonic-gate caddr_t vlim;
10970Sstevel@tonic-gate ulong_t i;
10980Sstevel@tonic-gate uint_t lock;
10990Sstevel@tonic-gate
11000Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock));
11010Sstevel@tonic-gate
11020Sstevel@tonic-gate len = P2ROUNDUP(len, PAGESIZE);
11030Sstevel@tonic-gate
11040Sstevel@tonic-gate /* If locking, reserve physical memory */
11050Sstevel@tonic-gate if (flags & KPD_LOCKED) {
11060Sstevel@tonic-gate pgcnt_t pages = btop(len);
11070Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0)
11080Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages);
11090Sstevel@tonic-gate (void) page_resv(pages, KM_SLEEP);
11100Sstevel@tonic-gate }
11110Sstevel@tonic-gate
11120Sstevel@tonic-gate /*
11130Sstevel@tonic-gate * Loop through the pages in the given range.
11140Sstevel@tonic-gate */
11150Sstevel@tonic-gate va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
11160Sstevel@tonic-gate vaddr = va;
11170Sstevel@tonic-gate vlim = va + len;
11180Sstevel@tonic-gate lock = flags & KPD_LOCKED;
11190Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
11200Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) {
11210Sstevel@tonic-gate page_t *pl[2]; /* second element NULL terminator */
11220Sstevel@tonic-gate struct vnode *vp;
11230Sstevel@tonic-gate anoff_t off;
11240Sstevel@tonic-gate int err;
11250Sstevel@tonic-gate struct anon *ap;
11260Sstevel@tonic-gate
11270Sstevel@tonic-gate /*
11280Sstevel@tonic-gate * Summon the page. If it's not resident, arrange
11290Sstevel@tonic-gate * for synchronous i/o to pull it in.
11300Sstevel@tonic-gate */
11310Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
11320Sstevel@tonic-gate swap_xlate(ap, &vp, &off);
11330Sstevel@tonic-gate
11340Sstevel@tonic-gate /*
11350Sstevel@tonic-gate * The returned page list will have exactly one entry,
11360Sstevel@tonic-gate * which is returned to us already kept.
11370Sstevel@tonic-gate */
11380Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
11395331Samw pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
11400Sstevel@tonic-gate
11410Sstevel@tonic-gate if (err) {
11420Sstevel@tonic-gate /*
11430Sstevel@tonic-gate * Back out of what we've done so far.
11440Sstevel@tonic-gate */
11450Sstevel@tonic-gate (void) segkp_unlock(hat, seg, vaddr,
11460Sstevel@tonic-gate (va - vaddr), kpd, flags);
11470Sstevel@tonic-gate return (err);
11480Sstevel@tonic-gate }
11490Sstevel@tonic-gate
11500Sstevel@tonic-gate /*
11510Sstevel@tonic-gate * Load an MMU translation for the page.
11520Sstevel@tonic-gate */
11530Sstevel@tonic-gate hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
11540Sstevel@tonic-gate lock ? HAT_LOAD_LOCK : HAT_LOAD);
11550Sstevel@tonic-gate
11560Sstevel@tonic-gate if (!lock) {
11570Sstevel@tonic-gate /*
11580Sstevel@tonic-gate * Now, release "shared" lock on the page.
11590Sstevel@tonic-gate */
11600Sstevel@tonic-gate page_unlock(pl[0]);
11610Sstevel@tonic-gate }
11620Sstevel@tonic-gate }
11630Sstevel@tonic-gate return (0);
11640Sstevel@tonic-gate }
11650Sstevel@tonic-gate
11660Sstevel@tonic-gate /*
11670Sstevel@tonic-gate * At the very least unload the mmu-translations and unlock the range if locked
11680Sstevel@tonic-gate * Can be called with the following flag value KPD_WRITEDIRTY which specifies
11690Sstevel@tonic-gate * any dirty pages should be written to disk.
11700Sstevel@tonic-gate */
11710Sstevel@tonic-gate static int
segkp_unlock(struct hat * hat,struct seg * seg,caddr_t vaddr,size_t len,struct segkp_data * kpd,uint_t flags)11720Sstevel@tonic-gate segkp_unlock(
11730Sstevel@tonic-gate struct hat *hat,
11740Sstevel@tonic-gate struct seg *seg,
11750Sstevel@tonic-gate caddr_t vaddr,
11760Sstevel@tonic-gate size_t len,
11770Sstevel@tonic-gate struct segkp_data *kpd,
11780Sstevel@tonic-gate uint_t flags)
11790Sstevel@tonic-gate {
11800Sstevel@tonic-gate caddr_t va;
11810Sstevel@tonic-gate caddr_t vlim;
11820Sstevel@tonic-gate ulong_t i;
11830Sstevel@tonic-gate struct page *pp;
11840Sstevel@tonic-gate struct vnode *vp;
11850Sstevel@tonic-gate anoff_t off;
11860Sstevel@tonic-gate struct anon *ap;
11870Sstevel@tonic-gate
11880Sstevel@tonic-gate #ifdef lint
11890Sstevel@tonic-gate seg = seg;
11900Sstevel@tonic-gate #endif /* lint */
11910Sstevel@tonic-gate
11920Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock));
11930Sstevel@tonic-gate
11940Sstevel@tonic-gate /*
11950Sstevel@tonic-gate * Loop through the pages in the given range. It is assumed
11960Sstevel@tonic-gate * segkp_unlock is called with page aligned base
11970Sstevel@tonic-gate */
11980Sstevel@tonic-gate va = vaddr;
11990Sstevel@tonic-gate vlim = va + len;
12000Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
12010Sstevel@tonic-gate hat_unload(hat, va, len,
12020Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
12030Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) {
12040Sstevel@tonic-gate /*
12050Sstevel@tonic-gate * Find the page associated with this part of the
12060Sstevel@tonic-gate * slot, tracking it down through its associated swap
12070Sstevel@tonic-gate * space.
12080Sstevel@tonic-gate */
12090Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
12100Sstevel@tonic-gate swap_xlate(ap, &vp, &off);
12110Sstevel@tonic-gate
12120Sstevel@tonic-gate if (flags & KPD_LOCKED) {
12130Sstevel@tonic-gate if ((pp = page_find(vp, off)) == NULL) {
12140Sstevel@tonic-gate if (flags & KPD_LOCKED) {
12150Sstevel@tonic-gate panic("segkp_softunlock: missing page");
12160Sstevel@tonic-gate /*NOTREACHED*/
12170Sstevel@tonic-gate }
12180Sstevel@tonic-gate }
12190Sstevel@tonic-gate } else {
12200Sstevel@tonic-gate /*
12210Sstevel@tonic-gate * Nothing to do if the slot is not locked and the
12220Sstevel@tonic-gate * page doesn't exist.
12230Sstevel@tonic-gate */
12240Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
12250Sstevel@tonic-gate continue;
12260Sstevel@tonic-gate }
12270Sstevel@tonic-gate
12280Sstevel@tonic-gate /*
12290Sstevel@tonic-gate * If the page doesn't have any translations, is
12300Sstevel@tonic-gate * dirty and not being shared, then push it out
12310Sstevel@tonic-gate * asynchronously and avoid waiting for the
12320Sstevel@tonic-gate * pageout daemon to do it for us.
12330Sstevel@tonic-gate *
12340Sstevel@tonic-gate * XXX - Do we really need to get the "exclusive"
12350Sstevel@tonic-gate * lock via an upgrade?
12360Sstevel@tonic-gate */
12370Sstevel@tonic-gate if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
12380Sstevel@tonic-gate hat_ismod(pp) && page_tryupgrade(pp)) {
12390Sstevel@tonic-gate /*
12400Sstevel@tonic-gate * Hold the vnode before releasing the page lock to
12410Sstevel@tonic-gate * prevent it from being freed and re-used by some
12420Sstevel@tonic-gate * other thread.
12430Sstevel@tonic-gate */
12440Sstevel@tonic-gate VN_HOLD(vp);
12450Sstevel@tonic-gate page_unlock(pp);
12460Sstevel@tonic-gate
12470Sstevel@tonic-gate /*
12480Sstevel@tonic-gate * Want most powerful credentials we can get so
12490Sstevel@tonic-gate * use kcred.
12500Sstevel@tonic-gate */
12510Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
12525331Samw B_ASYNC | B_FREE, kcred, NULL);
12530Sstevel@tonic-gate VN_RELE(vp);
12540Sstevel@tonic-gate } else {
12550Sstevel@tonic-gate page_unlock(pp);
12560Sstevel@tonic-gate }
12570Sstevel@tonic-gate }
12580Sstevel@tonic-gate
12590Sstevel@tonic-gate /* If unlocking, release physical memory */
12600Sstevel@tonic-gate if (flags & KPD_LOCKED) {
12610Sstevel@tonic-gate pgcnt_t pages = btopr(len);
12620Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0)
12630Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages);
12640Sstevel@tonic-gate page_unresv(pages);
12650Sstevel@tonic-gate }
12660Sstevel@tonic-gate return (0);
12670Sstevel@tonic-gate }
12680Sstevel@tonic-gate
12690Sstevel@tonic-gate /*
12700Sstevel@tonic-gate * Insert the kpd in the hash table.
12710Sstevel@tonic-gate */
12720Sstevel@tonic-gate static void
segkp_insert(struct seg * seg,struct segkp_data * kpd)12730Sstevel@tonic-gate segkp_insert(struct seg *seg, struct segkp_data *kpd)
12740Sstevel@tonic-gate {
12750Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
12760Sstevel@tonic-gate int index;
12770Sstevel@tonic-gate
12780Sstevel@tonic-gate /*
12790Sstevel@tonic-gate * Insert the kpd based on the address that will be returned
12800Sstevel@tonic-gate * via segkp_release.
12810Sstevel@tonic-gate */
12820Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
12830Sstevel@tonic-gate mutex_enter(&segkp_lock);
12840Sstevel@tonic-gate kpd->kp_next = kpsd->kpsd_hash[index];
12850Sstevel@tonic-gate kpsd->kpsd_hash[index] = kpd;
12860Sstevel@tonic-gate mutex_exit(&segkp_lock);
12870Sstevel@tonic-gate }
12880Sstevel@tonic-gate
12890Sstevel@tonic-gate /*
12900Sstevel@tonic-gate * Remove kpd from the hash table.
12910Sstevel@tonic-gate */
12920Sstevel@tonic-gate static void
segkp_delete(struct seg * seg,struct segkp_data * kpd)12930Sstevel@tonic-gate segkp_delete(struct seg *seg, struct segkp_data *kpd)
12940Sstevel@tonic-gate {
12950Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
12960Sstevel@tonic-gate struct segkp_data **kpp;
12970Sstevel@tonic-gate int index;
12980Sstevel@tonic-gate
12990Sstevel@tonic-gate ASSERT(MUTEX_HELD(&segkp_lock));
13000Sstevel@tonic-gate
13010Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
13020Sstevel@tonic-gate for (kpp = &kpsd->kpsd_hash[index];
13030Sstevel@tonic-gate *kpp != NULL; kpp = &((*kpp)->kp_next)) {
13040Sstevel@tonic-gate if (*kpp == kpd) {
13050Sstevel@tonic-gate *kpp = kpd->kp_next;
13060Sstevel@tonic-gate return;
13070Sstevel@tonic-gate }
13080Sstevel@tonic-gate }
13090Sstevel@tonic-gate panic("segkp_delete: unable to find element to delete");
13100Sstevel@tonic-gate /*NOTREACHED*/
13110Sstevel@tonic-gate }
13120Sstevel@tonic-gate
13130Sstevel@tonic-gate /*
13140Sstevel@tonic-gate * Find the kpd associated with a vaddr.
13150Sstevel@tonic-gate *
13160Sstevel@tonic-gate * Most of the callers of segkp_find will pass the vaddr that
13170Sstevel@tonic-gate * hashes to the desired index, but there are cases where
13180Sstevel@tonic-gate * this is not true in which case we have to (potentially) scan
13190Sstevel@tonic-gate * the whole table looking for it. This should be very rare
13200Sstevel@tonic-gate * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
13210Sstevel@tonic-gate * middle of the segkp_data region).
13220Sstevel@tonic-gate */
13230Sstevel@tonic-gate static struct segkp_data *
segkp_find(struct seg * seg,caddr_t vaddr)13240Sstevel@tonic-gate segkp_find(struct seg *seg, caddr_t vaddr)
13250Sstevel@tonic-gate {
13260Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
13270Sstevel@tonic-gate struct segkp_data *kpd;
13280Sstevel@tonic-gate int i;
13290Sstevel@tonic-gate int stop;
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate i = stop = SEGKP_HASH(vaddr);
13320Sstevel@tonic-gate mutex_enter(&segkp_lock);
13330Sstevel@tonic-gate do {
13340Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
133511066Srafael.vanoni@sun.com kpd = kpd->kp_next) {
13360Sstevel@tonic-gate if (vaddr >= kpd->kp_base &&
13370Sstevel@tonic-gate vaddr < kpd->kp_base + kpd->kp_len) {
13380Sstevel@tonic-gate mutex_exit(&segkp_lock);
13390Sstevel@tonic-gate return (kpd);
13400Sstevel@tonic-gate }
13410Sstevel@tonic-gate }
13420Sstevel@tonic-gate if (--i < 0)
13430Sstevel@tonic-gate i = SEGKP_HASHSZ - 1; /* Wrap */
13440Sstevel@tonic-gate } while (i != stop);
13450Sstevel@tonic-gate mutex_exit(&segkp_lock);
13460Sstevel@tonic-gate return (NULL); /* Not found */
13470Sstevel@tonic-gate }
13480Sstevel@tonic-gate
13490Sstevel@tonic-gate /*
13500Sstevel@tonic-gate * returns size of swappable area.
13510Sstevel@tonic-gate */
13520Sstevel@tonic-gate size_t
swapsize(caddr_t v)13530Sstevel@tonic-gate swapsize(caddr_t v)
13540Sstevel@tonic-gate {
13550Sstevel@tonic-gate struct segkp_data *kpd;
13560Sstevel@tonic-gate
13570Sstevel@tonic-gate if ((kpd = segkp_find(segkp, v)) != NULL)
13580Sstevel@tonic-gate return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
13590Sstevel@tonic-gate else
13600Sstevel@tonic-gate return (NULL);
13610Sstevel@tonic-gate }
13620Sstevel@tonic-gate
13630Sstevel@tonic-gate /*
13640Sstevel@tonic-gate * Dump out all the active segkp pages
13650Sstevel@tonic-gate */
13660Sstevel@tonic-gate static void
segkp_dump(struct seg * seg)13670Sstevel@tonic-gate segkp_dump(struct seg *seg)
13680Sstevel@tonic-gate {
13690Sstevel@tonic-gate int i;
13700Sstevel@tonic-gate struct segkp_data *kpd;
13710Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
13720Sstevel@tonic-gate
13730Sstevel@tonic-gate for (i = 0; i < SEGKP_HASHSZ; i++) {
13740Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i];
13750Sstevel@tonic-gate kpd != NULL; kpd = kpd->kp_next) {
13760Sstevel@tonic-gate pfn_t pfn;
13770Sstevel@tonic-gate caddr_t addr;
13780Sstevel@tonic-gate caddr_t eaddr;
13790Sstevel@tonic-gate
13800Sstevel@tonic-gate addr = kpd->kp_base;
13810Sstevel@tonic-gate eaddr = addr + kpd->kp_len;
13820Sstevel@tonic-gate while (addr < eaddr) {
13830Sstevel@tonic-gate ASSERT(seg->s_as == &kas);
13840Sstevel@tonic-gate pfn = hat_getpfnum(seg->s_as->a_hat, addr);
13850Sstevel@tonic-gate if (pfn != PFN_INVALID)
13860Sstevel@tonic-gate dump_addpage(seg->s_as, addr, pfn);
13870Sstevel@tonic-gate addr += PAGESIZE;
13880Sstevel@tonic-gate dump_timeleft = dump_timeout;
13890Sstevel@tonic-gate }
13900Sstevel@tonic-gate }
13910Sstevel@tonic-gate }
13920Sstevel@tonic-gate }
13930Sstevel@tonic-gate
13940Sstevel@tonic-gate /*ARGSUSED*/
13950Sstevel@tonic-gate static int
segkp_pagelock(struct seg * seg,caddr_t addr,size_t len,struct page *** ppp,enum lock_type type,enum seg_rw rw)13960Sstevel@tonic-gate segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
13970Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw)
13980Sstevel@tonic-gate {
13990Sstevel@tonic-gate return (ENOTSUP);
14000Sstevel@tonic-gate }
14010Sstevel@tonic-gate
14020Sstevel@tonic-gate /*ARGSUSED*/
14030Sstevel@tonic-gate static int
segkp_getmemid(struct seg * seg,caddr_t addr,memid_t * memidp)14040Sstevel@tonic-gate segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
14050Sstevel@tonic-gate {
14060Sstevel@tonic-gate return (ENODEV);
14070Sstevel@tonic-gate }
14080Sstevel@tonic-gate
14090Sstevel@tonic-gate /*ARGSUSED*/
14100Sstevel@tonic-gate static lgrp_mem_policy_info_t *
segkp_getpolicy(struct seg * seg,caddr_t addr)14110Sstevel@tonic-gate segkp_getpolicy(struct seg *seg, caddr_t addr)
14120Sstevel@tonic-gate {
14130Sstevel@tonic-gate return (NULL);
14140Sstevel@tonic-gate }
14150Sstevel@tonic-gate
1416670Selowe /*ARGSUSED*/
1417670Selowe static int
segkp_capable(struct seg * seg,segcapability_t capability)1418670Selowe segkp_capable(struct seg *seg, segcapability_t capability)
1419670Selowe {
1420670Selowe return (0);
1421670Selowe }
1422670Selowe
14230Sstevel@tonic-gate #include <sys/mem_config.h>
14240Sstevel@tonic-gate
14250Sstevel@tonic-gate /*ARGSUSED*/
14260Sstevel@tonic-gate static void
segkp_mem_config_post_add(void * arg,pgcnt_t delta_pages)14270Sstevel@tonic-gate segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
14280Sstevel@tonic-gate {}
14290Sstevel@tonic-gate
14300Sstevel@tonic-gate /*
14310Sstevel@tonic-gate * During memory delete, turn off caches so that pages are not held.
14320Sstevel@tonic-gate * A better solution may be to unlock the pages while they are
14330Sstevel@tonic-gate * in the cache so that they may be collected naturally.
14340Sstevel@tonic-gate */
14350Sstevel@tonic-gate
14360Sstevel@tonic-gate /*ARGSUSED*/
14370Sstevel@tonic-gate static int
segkp_mem_config_pre_del(void * arg,pgcnt_t delta_pages)14380Sstevel@tonic-gate segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
14390Sstevel@tonic-gate {
14400Sstevel@tonic-gate atomic_add_32(&segkp_indel, 1);
14410Sstevel@tonic-gate segkp_cache_free();
14420Sstevel@tonic-gate return (0);
14430Sstevel@tonic-gate }
14440Sstevel@tonic-gate
14450Sstevel@tonic-gate /*ARGSUSED*/
14460Sstevel@tonic-gate static void
segkp_mem_config_post_del(void * arg,pgcnt_t delta_pages,int cancelled)14470Sstevel@tonic-gate segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
14480Sstevel@tonic-gate {
14490Sstevel@tonic-gate atomic_add_32(&segkp_indel, -1);
14500Sstevel@tonic-gate }
14510Sstevel@tonic-gate
14520Sstevel@tonic-gate static kphysm_setup_vector_t segkp_mem_config_vec = {
14530Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION,
14540Sstevel@tonic-gate segkp_mem_config_post_add,
14550Sstevel@tonic-gate segkp_mem_config_pre_del,
14560Sstevel@tonic-gate segkp_mem_config_post_del,
14570Sstevel@tonic-gate };
14580Sstevel@tonic-gate
14590Sstevel@tonic-gate static void
segkpinit_mem_config(struct seg * seg)14600Sstevel@tonic-gate segkpinit_mem_config(struct seg *seg)
14610Sstevel@tonic-gate {
14620Sstevel@tonic-gate int ret;
14630Sstevel@tonic-gate
14640Sstevel@tonic-gate ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
14650Sstevel@tonic-gate ASSERT(ret == 0);
14660Sstevel@tonic-gate }
1467