10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 22*1219Sraf 230Sstevel@tonic-gate /* 24*1219Sraf * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 250Sstevel@tonic-gate * Use is subject to license terms. 260Sstevel@tonic-gate */ 270Sstevel@tonic-gate 280Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 290Sstevel@tonic-gate 30*1219Sraf #include "c_synonyms.h" 310Sstevel@tonic-gate #include "umem_base.h" 320Sstevel@tonic-gate #include "vmem_base.h" 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include <unistd.h> 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 37652Sjwadams * The following functions are for pre- and post-fork1(2) handling. See 38652Sjwadams * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used. 390Sstevel@tonic-gate */ 400Sstevel@tonic-gate 410Sstevel@tonic-gate static void 420Sstevel@tonic-gate umem_lockup_cache(umem_cache_t *cp) 430Sstevel@tonic-gate { 440Sstevel@tonic-gate int idx; 450Sstevel@tonic-gate int ncpus = cp->cache_cpu_mask + 1; 460Sstevel@tonic-gate 470Sstevel@tonic-gate for (idx = 0; idx < ncpus; idx++) 480Sstevel@tonic-gate (void) mutex_lock(&cp->cache_cpu[idx].cc_lock); 490Sstevel@tonic-gate 500Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock); 510Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock); 520Sstevel@tonic-gate } 530Sstevel@tonic-gate 540Sstevel@tonic-gate static void 550Sstevel@tonic-gate umem_release_cache(umem_cache_t *cp) 560Sstevel@tonic-gate { 570Sstevel@tonic-gate int idx; 580Sstevel@tonic-gate int ncpus = cp->cache_cpu_mask + 1; 590Sstevel@tonic-gate 600Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock); 610Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock); 620Sstevel@tonic-gate 630Sstevel@tonic-gate for (idx = 0; idx < ncpus; idx++) 640Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_cpu[idx].cc_lock); 650Sstevel@tonic-gate } 660Sstevel@tonic-gate 670Sstevel@tonic-gate static void 680Sstevel@tonic-gate umem_lockup_log_header(umem_log_header_t *lhp) 690Sstevel@tonic-gate { 700Sstevel@tonic-gate int idx; 710Sstevel@tonic-gate if (lhp == NULL) 720Sstevel@tonic-gate return; 730Sstevel@tonic-gate for (idx = 0; idx < umem_max_ncpus; idx++) 740Sstevel@tonic-gate (void) mutex_lock(&lhp->lh_cpu[idx].clh_lock); 750Sstevel@tonic-gate 760Sstevel@tonic-gate (void) mutex_lock(&lhp->lh_lock); 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate static void 800Sstevel@tonic-gate umem_release_log_header(umem_log_header_t *lhp) 810Sstevel@tonic-gate { 820Sstevel@tonic-gate int idx; 830Sstevel@tonic-gate if (lhp == NULL) 840Sstevel@tonic-gate return; 850Sstevel@tonic-gate 860Sstevel@tonic-gate (void) mutex_unlock(&lhp->lh_lock); 870Sstevel@tonic-gate 880Sstevel@tonic-gate for (idx = 0; idx < umem_max_ncpus; idx++) 890Sstevel@tonic-gate (void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock); 900Sstevel@tonic-gate } 910Sstevel@tonic-gate 920Sstevel@tonic-gate static void 930Sstevel@tonic-gate umem_lockup(void) 940Sstevel@tonic-gate { 950Sstevel@tonic-gate umem_cache_t *cp; 960Sstevel@tonic-gate 970Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock); 980Sstevel@tonic-gate /* 990Sstevel@tonic-gate * If another thread is busy initializing the library, we must 1000Sstevel@tonic-gate * wait for it to complete (by calling umem_init()) before allowing 1010Sstevel@tonic-gate * the fork() to proceed. 1020Sstevel@tonic-gate */ 1030Sstevel@tonic-gate if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) { 1040Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock); 1050Sstevel@tonic-gate (void) umem_init(); 1060Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock); 1070Sstevel@tonic-gate } 108652Sjwadams 109652Sjwadams vmem_lockup(); 110652Sjwadams vmem_sbrk_lockup(); 111652Sjwadams 1120Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock); 1130Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock); 1140Sstevel@tonic-gate (void) mutex_lock(&umem_flags_lock); 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate umem_lockup_cache(&umem_null_cache); 1170Sstevel@tonic-gate for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; 1180Sstevel@tonic-gate cp = cp->cache_prev) 1190Sstevel@tonic-gate umem_lockup_cache(cp); 1200Sstevel@tonic-gate 1210Sstevel@tonic-gate umem_lockup_log_header(umem_transaction_log); 1220Sstevel@tonic-gate umem_lockup_log_header(umem_content_log); 1230Sstevel@tonic-gate umem_lockup_log_header(umem_failure_log); 1240Sstevel@tonic-gate umem_lockup_log_header(umem_slab_log); 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv); 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate } 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate static void 131652Sjwadams umem_do_release(int as_child) 1320Sstevel@tonic-gate { 1330Sstevel@tonic-gate umem_cache_t *cp; 134652Sjwadams int cleanup_update = 0; 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate /* 137652Sjwadams * Clean up the update state if we are the child process and 138652Sjwadams * another thread was processing updates. 1390Sstevel@tonic-gate */ 140652Sjwadams if (as_child) { 141652Sjwadams if (umem_update_thr != thr_self()) { 142652Sjwadams umem_update_thr = 0; 143652Sjwadams cleanup_update = 1; 144652Sjwadams } 145652Sjwadams if (umem_st_update_thr != thr_self()) { 146652Sjwadams umem_st_update_thr = 0; 147652Sjwadams cleanup_update = 1; 148652Sjwadams } 149652Sjwadams } 1500Sstevel@tonic-gate 151652Sjwadams if (cleanup_update) { 1520Sstevel@tonic-gate umem_reaping = UMEM_REAP_DONE; 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 1550Sstevel@tonic-gate cp = cp->cache_next) { 1560Sstevel@tonic-gate if (cp->cache_uflags & UMU_NOTIFY) 1570Sstevel@tonic-gate cp->cache_uflags &= ~UMU_NOTIFY; 1580Sstevel@tonic-gate 1590Sstevel@tonic-gate /* 1600Sstevel@tonic-gate * If the cache is active, we just re-add it to 1610Sstevel@tonic-gate * the update list. This will re-do any active 1620Sstevel@tonic-gate * updates on the cache, but that won't break 1630Sstevel@tonic-gate * anything. 1640Sstevel@tonic-gate * 1650Sstevel@tonic-gate * The worst that can happen is a cache has 1660Sstevel@tonic-gate * its magazines rescaled twice, instead of once. 1670Sstevel@tonic-gate */ 1680Sstevel@tonic-gate if (cp->cache_uflags & UMU_ACTIVE) { 1690Sstevel@tonic-gate umem_cache_t *cnext, *cprev; 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate ASSERT(cp->cache_unext == NULL && 1720Sstevel@tonic-gate cp->cache_uprev == NULL); 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate cp->cache_uflags &= ~UMU_ACTIVE; 1750Sstevel@tonic-gate cp->cache_unext = cnext = &umem_null_cache; 1760Sstevel@tonic-gate cp->cache_uprev = cprev = 1770Sstevel@tonic-gate umem_null_cache.cache_uprev; 1780Sstevel@tonic-gate cnext->cache_uprev = cp; 1790Sstevel@tonic-gate cprev->cache_unext = cp; 1800Sstevel@tonic-gate } 1810Sstevel@tonic-gate } 1820Sstevel@tonic-gate } 1830Sstevel@tonic-gate 184652Sjwadams umem_release_log_header(umem_slab_log); 185652Sjwadams umem_release_log_header(umem_failure_log); 186652Sjwadams umem_release_log_header(umem_content_log); 187652Sjwadams umem_release_log_header(umem_transaction_log); 188652Sjwadams 189652Sjwadams for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 190652Sjwadams cp = cp->cache_next) 191652Sjwadams umem_release_cache(cp); 192652Sjwadams umem_release_cache(&umem_null_cache); 193652Sjwadams 194652Sjwadams (void) mutex_unlock(&umem_flags_lock); 195652Sjwadams (void) mutex_unlock(&umem_update_lock); 196652Sjwadams (void) mutex_unlock(&umem_cache_lock); 197652Sjwadams 198652Sjwadams vmem_sbrk_release(); 199652Sjwadams vmem_release(); 200652Sjwadams 201652Sjwadams (void) mutex_unlock(&umem_init_lock); 202652Sjwadams } 203652Sjwadams 204652Sjwadams static void 205652Sjwadams umem_release(void) 206652Sjwadams { 207652Sjwadams umem_do_release(0); 208652Sjwadams } 209652Sjwadams 210652Sjwadams static void 211652Sjwadams umem_release_child(void) 212652Sjwadams { 213652Sjwadams umem_do_release(1); 2140Sstevel@tonic-gate } 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate void 2170Sstevel@tonic-gate umem_forkhandler_init(void) 2180Sstevel@tonic-gate { 2190Sstevel@tonic-gate /* 2200Sstevel@tonic-gate * There is no way to unregister these atfork functions, 2210Sstevel@tonic-gate * but we don't need to. The dynamic linker and libc take 2220Sstevel@tonic-gate * care of unregistering them if/when the library is unloaded. 2230Sstevel@tonic-gate */ 2240Sstevel@tonic-gate (void) pthread_atfork(umem_lockup, umem_release, umem_release_child); 2250Sstevel@tonic-gate } 226