10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
23*652Sjwadams  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "mtlib.h"
300Sstevel@tonic-gate #include "umem_base.h"
310Sstevel@tonic-gate #include "vmem_base.h"
320Sstevel@tonic-gate 
330Sstevel@tonic-gate #include <unistd.h>
340Sstevel@tonic-gate 
350Sstevel@tonic-gate /*
36*652Sjwadams  * The following functions are for pre- and post-fork1(2) handling.  See
37*652Sjwadams  * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate static void
410Sstevel@tonic-gate umem_lockup_cache(umem_cache_t *cp)
420Sstevel@tonic-gate {
430Sstevel@tonic-gate 	int idx;
440Sstevel@tonic-gate 	int ncpus = cp->cache_cpu_mask + 1;
450Sstevel@tonic-gate 
460Sstevel@tonic-gate 	for (idx = 0; idx < ncpus; idx++)
470Sstevel@tonic-gate 		(void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
480Sstevel@tonic-gate 
490Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_depot_lock);
500Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
510Sstevel@tonic-gate }
520Sstevel@tonic-gate 
530Sstevel@tonic-gate static void
540Sstevel@tonic-gate umem_release_cache(umem_cache_t *cp)
550Sstevel@tonic-gate {
560Sstevel@tonic-gate 	int idx;
570Sstevel@tonic-gate 	int ncpus = cp->cache_cpu_mask + 1;
580Sstevel@tonic-gate 
590Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
600Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_depot_lock);
610Sstevel@tonic-gate 
620Sstevel@tonic-gate 	for (idx = 0; idx < ncpus; idx++)
630Sstevel@tonic-gate 		(void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
640Sstevel@tonic-gate }
650Sstevel@tonic-gate 
660Sstevel@tonic-gate static void
670Sstevel@tonic-gate umem_lockup_log_header(umem_log_header_t *lhp)
680Sstevel@tonic-gate {
690Sstevel@tonic-gate 	int idx;
700Sstevel@tonic-gate 	if (lhp == NULL)
710Sstevel@tonic-gate 		return;
720Sstevel@tonic-gate 	for (idx = 0; idx < umem_max_ncpus; idx++)
730Sstevel@tonic-gate 		(void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
740Sstevel@tonic-gate 
750Sstevel@tonic-gate 	(void) mutex_lock(&lhp->lh_lock);
760Sstevel@tonic-gate }
770Sstevel@tonic-gate 
780Sstevel@tonic-gate static void
790Sstevel@tonic-gate umem_release_log_header(umem_log_header_t *lhp)
800Sstevel@tonic-gate {
810Sstevel@tonic-gate 	int idx;
820Sstevel@tonic-gate 	if (lhp == NULL)
830Sstevel@tonic-gate 		return;
840Sstevel@tonic-gate 
850Sstevel@tonic-gate 	(void) mutex_unlock(&lhp->lh_lock);
860Sstevel@tonic-gate 
870Sstevel@tonic-gate 	for (idx = 0; idx < umem_max_ncpus; idx++)
880Sstevel@tonic-gate 		(void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
890Sstevel@tonic-gate }
900Sstevel@tonic-gate 
910Sstevel@tonic-gate static void
920Sstevel@tonic-gate umem_lockup(void)
930Sstevel@tonic-gate {
940Sstevel@tonic-gate 	umem_cache_t *cp;
950Sstevel@tonic-gate 
960Sstevel@tonic-gate 	(void) mutex_lock(&umem_init_lock);
970Sstevel@tonic-gate 	/*
980Sstevel@tonic-gate 	 * If another thread is busy initializing the library, we must
990Sstevel@tonic-gate 	 * wait for it to complete (by calling umem_init()) before allowing
1000Sstevel@tonic-gate 	 * the fork() to proceed.
1010Sstevel@tonic-gate 	 */
1020Sstevel@tonic-gate 	if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
1030Sstevel@tonic-gate 		(void) mutex_unlock(&umem_init_lock);
1040Sstevel@tonic-gate 		(void) umem_init();
1050Sstevel@tonic-gate 		(void) mutex_lock(&umem_init_lock);
1060Sstevel@tonic-gate 	}
107*652Sjwadams 
108*652Sjwadams 	vmem_lockup();
109*652Sjwadams 	vmem_sbrk_lockup();
110*652Sjwadams 
1110Sstevel@tonic-gate 	(void) mutex_lock(&umem_cache_lock);
1120Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
1130Sstevel@tonic-gate 	(void) mutex_lock(&umem_flags_lock);
1140Sstevel@tonic-gate 
1150Sstevel@tonic-gate 	umem_lockup_cache(&umem_null_cache);
1160Sstevel@tonic-gate 	for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1170Sstevel@tonic-gate 	    cp = cp->cache_prev)
1180Sstevel@tonic-gate 		umem_lockup_cache(cp);
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate 	umem_lockup_log_header(umem_transaction_log);
1210Sstevel@tonic-gate 	umem_lockup_log_header(umem_content_log);
1220Sstevel@tonic-gate 	umem_lockup_log_header(umem_failure_log);
1230Sstevel@tonic-gate 	umem_lockup_log_header(umem_slab_log);
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	(void) cond_broadcast(&umem_update_cv);
1260Sstevel@tonic-gate 
1270Sstevel@tonic-gate }
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate static void
130*652Sjwadams umem_do_release(int as_child)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate 	umem_cache_t *cp;
133*652Sjwadams 	int cleanup_update = 0;
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate 	/*
136*652Sjwadams 	 * Clean up the update state if we are the child process and
137*652Sjwadams 	 * another thread was processing updates.
1380Sstevel@tonic-gate 	 */
139*652Sjwadams 	if (as_child) {
140*652Sjwadams 		if (umem_update_thr != thr_self()) {
141*652Sjwadams 			umem_update_thr = 0;
142*652Sjwadams 			cleanup_update = 1;
143*652Sjwadams 		}
144*652Sjwadams 		if (umem_st_update_thr != thr_self()) {
145*652Sjwadams 			umem_st_update_thr = 0;
146*652Sjwadams 			cleanup_update = 1;
147*652Sjwadams 		}
148*652Sjwadams 	}
1490Sstevel@tonic-gate 
150*652Sjwadams 	if (cleanup_update) {
1510Sstevel@tonic-gate 		umem_reaping = UMEM_REAP_DONE;
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate 		for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1540Sstevel@tonic-gate 		    cp = cp->cache_next) {
1550Sstevel@tonic-gate 			if (cp->cache_uflags & UMU_NOTIFY)
1560Sstevel@tonic-gate 				cp->cache_uflags &= ~UMU_NOTIFY;
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 			/*
1590Sstevel@tonic-gate 			 * If the cache is active, we just re-add it to
1600Sstevel@tonic-gate 			 * the update list.  This will re-do any active
1610Sstevel@tonic-gate 			 * updates on the cache, but that won't break
1620Sstevel@tonic-gate 			 * anything.
1630Sstevel@tonic-gate 			 *
1640Sstevel@tonic-gate 			 * The worst that can happen is a cache has
1650Sstevel@tonic-gate 			 * its magazines rescaled twice, instead of once.
1660Sstevel@tonic-gate 			 */
1670Sstevel@tonic-gate 			if (cp->cache_uflags & UMU_ACTIVE) {
1680Sstevel@tonic-gate 				umem_cache_t *cnext, *cprev;
1690Sstevel@tonic-gate 
1700Sstevel@tonic-gate 				ASSERT(cp->cache_unext == NULL &&
1710Sstevel@tonic-gate 				    cp->cache_uprev == NULL);
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate 				cp->cache_uflags &= ~UMU_ACTIVE;
1740Sstevel@tonic-gate 				cp->cache_unext = cnext = &umem_null_cache;
1750Sstevel@tonic-gate 				cp->cache_uprev = cprev =
1760Sstevel@tonic-gate 				    umem_null_cache.cache_uprev;
1770Sstevel@tonic-gate 				cnext->cache_uprev = cp;
1780Sstevel@tonic-gate 				cprev->cache_unext = cp;
1790Sstevel@tonic-gate 			}
1800Sstevel@tonic-gate 		}
1810Sstevel@tonic-gate 	}
1820Sstevel@tonic-gate 
183*652Sjwadams 	umem_release_log_header(umem_slab_log);
184*652Sjwadams 	umem_release_log_header(umem_failure_log);
185*652Sjwadams 	umem_release_log_header(umem_content_log);
186*652Sjwadams 	umem_release_log_header(umem_transaction_log);
187*652Sjwadams 
188*652Sjwadams 	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
189*652Sjwadams 	    cp = cp->cache_next)
190*652Sjwadams 		umem_release_cache(cp);
191*652Sjwadams 	umem_release_cache(&umem_null_cache);
192*652Sjwadams 
193*652Sjwadams 	(void) mutex_unlock(&umem_flags_lock);
194*652Sjwadams 	(void) mutex_unlock(&umem_update_lock);
195*652Sjwadams 	(void) mutex_unlock(&umem_cache_lock);
196*652Sjwadams 
197*652Sjwadams 	vmem_sbrk_release();
198*652Sjwadams 	vmem_release();
199*652Sjwadams 
200*652Sjwadams 	(void) mutex_unlock(&umem_init_lock);
201*652Sjwadams }
202*652Sjwadams 
203*652Sjwadams static void
204*652Sjwadams umem_release(void)
205*652Sjwadams {
206*652Sjwadams 	umem_do_release(0);
207*652Sjwadams }
208*652Sjwadams 
209*652Sjwadams static void
210*652Sjwadams umem_release_child(void)
211*652Sjwadams {
212*652Sjwadams 	umem_do_release(1);
2130Sstevel@tonic-gate }
2140Sstevel@tonic-gate 
2150Sstevel@tonic-gate void
2160Sstevel@tonic-gate umem_forkhandler_init(void)
2170Sstevel@tonic-gate {
2180Sstevel@tonic-gate 	/*
2190Sstevel@tonic-gate 	 * There is no way to unregister these atfork functions,
2200Sstevel@tonic-gate 	 * but we don't need to.  The dynamic linker and libc take
2210Sstevel@tonic-gate 	 * care of unregistering them if/when the library is unloaded.
2220Sstevel@tonic-gate 	 */
2230Sstevel@tonic-gate 	(void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
2240Sstevel@tonic-gate }
225