10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*6812Sraf * Common Development and Distribution License (the "License").
6*6812Sraf * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
211219Sraf
220Sstevel@tonic-gate /*
23*6812Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
240Sstevel@tonic-gate * Use is subject to license terms.
250Sstevel@tonic-gate */
260Sstevel@tonic-gate
270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
280Sstevel@tonic-gate
290Sstevel@tonic-gate #include "umem_base.h"
300Sstevel@tonic-gate #include "vmem_base.h"
310Sstevel@tonic-gate
320Sstevel@tonic-gate #include <unistd.h>
330Sstevel@tonic-gate
340Sstevel@tonic-gate /*
35652Sjwadams * The following functions are for pre- and post-fork1(2) handling. See
36652Sjwadams * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
370Sstevel@tonic-gate */
380Sstevel@tonic-gate
390Sstevel@tonic-gate static void
umem_lockup_cache(umem_cache_t * cp)400Sstevel@tonic-gate umem_lockup_cache(umem_cache_t *cp)
410Sstevel@tonic-gate {
420Sstevel@tonic-gate int idx;
430Sstevel@tonic-gate int ncpus = cp->cache_cpu_mask + 1;
440Sstevel@tonic-gate
450Sstevel@tonic-gate for (idx = 0; idx < ncpus; idx++)
460Sstevel@tonic-gate (void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
470Sstevel@tonic-gate
480Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
490Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
500Sstevel@tonic-gate }
510Sstevel@tonic-gate
520Sstevel@tonic-gate static void
umem_release_cache(umem_cache_t * cp)530Sstevel@tonic-gate umem_release_cache(umem_cache_t *cp)
540Sstevel@tonic-gate {
550Sstevel@tonic-gate int idx;
560Sstevel@tonic-gate int ncpus = cp->cache_cpu_mask + 1;
570Sstevel@tonic-gate
580Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
590Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
600Sstevel@tonic-gate
610Sstevel@tonic-gate for (idx = 0; idx < ncpus; idx++)
620Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
630Sstevel@tonic-gate }
640Sstevel@tonic-gate
650Sstevel@tonic-gate static void
umem_lockup_log_header(umem_log_header_t * lhp)660Sstevel@tonic-gate umem_lockup_log_header(umem_log_header_t *lhp)
670Sstevel@tonic-gate {
680Sstevel@tonic-gate int idx;
690Sstevel@tonic-gate if (lhp == NULL)
700Sstevel@tonic-gate return;
710Sstevel@tonic-gate for (idx = 0; idx < umem_max_ncpus; idx++)
720Sstevel@tonic-gate (void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
730Sstevel@tonic-gate
740Sstevel@tonic-gate (void) mutex_lock(&lhp->lh_lock);
750Sstevel@tonic-gate }
760Sstevel@tonic-gate
770Sstevel@tonic-gate static void
umem_release_log_header(umem_log_header_t * lhp)780Sstevel@tonic-gate umem_release_log_header(umem_log_header_t *lhp)
790Sstevel@tonic-gate {
800Sstevel@tonic-gate int idx;
810Sstevel@tonic-gate if (lhp == NULL)
820Sstevel@tonic-gate return;
830Sstevel@tonic-gate
840Sstevel@tonic-gate (void) mutex_unlock(&lhp->lh_lock);
850Sstevel@tonic-gate
860Sstevel@tonic-gate for (idx = 0; idx < umem_max_ncpus; idx++)
870Sstevel@tonic-gate (void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
880Sstevel@tonic-gate }
890Sstevel@tonic-gate
900Sstevel@tonic-gate static void
umem_lockup(void)910Sstevel@tonic-gate umem_lockup(void)
920Sstevel@tonic-gate {
930Sstevel@tonic-gate umem_cache_t *cp;
940Sstevel@tonic-gate
950Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock);
960Sstevel@tonic-gate /*
970Sstevel@tonic-gate * If another thread is busy initializing the library, we must
980Sstevel@tonic-gate * wait for it to complete (by calling umem_init()) before allowing
990Sstevel@tonic-gate * the fork() to proceed.
1000Sstevel@tonic-gate */
1010Sstevel@tonic-gate if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
1020Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock);
1030Sstevel@tonic-gate (void) umem_init();
1040Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock);
1050Sstevel@tonic-gate }
106652Sjwadams
107652Sjwadams vmem_lockup();
108652Sjwadams vmem_sbrk_lockup();
109652Sjwadams
1100Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock);
1110Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
1120Sstevel@tonic-gate (void) mutex_lock(&umem_flags_lock);
1130Sstevel@tonic-gate
1140Sstevel@tonic-gate umem_lockup_cache(&umem_null_cache);
1150Sstevel@tonic-gate for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1160Sstevel@tonic-gate cp = cp->cache_prev)
1170Sstevel@tonic-gate umem_lockup_cache(cp);
1180Sstevel@tonic-gate
1190Sstevel@tonic-gate umem_lockup_log_header(umem_transaction_log);
1200Sstevel@tonic-gate umem_lockup_log_header(umem_content_log);
1210Sstevel@tonic-gate umem_lockup_log_header(umem_failure_log);
1220Sstevel@tonic-gate umem_lockup_log_header(umem_slab_log);
1230Sstevel@tonic-gate
1240Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv);
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate }
1270Sstevel@tonic-gate
1280Sstevel@tonic-gate static void
umem_do_release(int as_child)129652Sjwadams umem_do_release(int as_child)
1300Sstevel@tonic-gate {
1310Sstevel@tonic-gate umem_cache_t *cp;
132652Sjwadams int cleanup_update = 0;
1330Sstevel@tonic-gate
1340Sstevel@tonic-gate /*
135652Sjwadams * Clean up the update state if we are the child process and
136652Sjwadams * another thread was processing updates.
1370Sstevel@tonic-gate */
138652Sjwadams if (as_child) {
139652Sjwadams if (umem_update_thr != thr_self()) {
140652Sjwadams umem_update_thr = 0;
141652Sjwadams cleanup_update = 1;
142652Sjwadams }
143652Sjwadams if (umem_st_update_thr != thr_self()) {
144652Sjwadams umem_st_update_thr = 0;
145652Sjwadams cleanup_update = 1;
146652Sjwadams }
147652Sjwadams }
1480Sstevel@tonic-gate
149652Sjwadams if (cleanup_update) {
1500Sstevel@tonic-gate umem_reaping = UMEM_REAP_DONE;
1510Sstevel@tonic-gate
1520Sstevel@tonic-gate for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1530Sstevel@tonic-gate cp = cp->cache_next) {
1540Sstevel@tonic-gate if (cp->cache_uflags & UMU_NOTIFY)
1550Sstevel@tonic-gate cp->cache_uflags &= ~UMU_NOTIFY;
1560Sstevel@tonic-gate
1570Sstevel@tonic-gate /*
1580Sstevel@tonic-gate * If the cache is active, we just re-add it to
1590Sstevel@tonic-gate * the update list. This will re-do any active
1600Sstevel@tonic-gate * updates on the cache, but that won't break
1610Sstevel@tonic-gate * anything.
1620Sstevel@tonic-gate *
1630Sstevel@tonic-gate * The worst that can happen is a cache has
1640Sstevel@tonic-gate * its magazines rescaled twice, instead of once.
1650Sstevel@tonic-gate */
1660Sstevel@tonic-gate if (cp->cache_uflags & UMU_ACTIVE) {
1670Sstevel@tonic-gate umem_cache_t *cnext, *cprev;
1680Sstevel@tonic-gate
1690Sstevel@tonic-gate ASSERT(cp->cache_unext == NULL &&
1700Sstevel@tonic-gate cp->cache_uprev == NULL);
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate cp->cache_uflags &= ~UMU_ACTIVE;
1730Sstevel@tonic-gate cp->cache_unext = cnext = &umem_null_cache;
1740Sstevel@tonic-gate cp->cache_uprev = cprev =
1750Sstevel@tonic-gate umem_null_cache.cache_uprev;
1760Sstevel@tonic-gate cnext->cache_uprev = cp;
1770Sstevel@tonic-gate cprev->cache_unext = cp;
1780Sstevel@tonic-gate }
1790Sstevel@tonic-gate }
1800Sstevel@tonic-gate }
1810Sstevel@tonic-gate
182652Sjwadams umem_release_log_header(umem_slab_log);
183652Sjwadams umem_release_log_header(umem_failure_log);
184652Sjwadams umem_release_log_header(umem_content_log);
185652Sjwadams umem_release_log_header(umem_transaction_log);
186652Sjwadams
187652Sjwadams for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
188652Sjwadams cp = cp->cache_next)
189652Sjwadams umem_release_cache(cp);
190652Sjwadams umem_release_cache(&umem_null_cache);
191652Sjwadams
192652Sjwadams (void) mutex_unlock(&umem_flags_lock);
193652Sjwadams (void) mutex_unlock(&umem_update_lock);
194652Sjwadams (void) mutex_unlock(&umem_cache_lock);
195652Sjwadams
196652Sjwadams vmem_sbrk_release();
197652Sjwadams vmem_release();
198652Sjwadams
199652Sjwadams (void) mutex_unlock(&umem_init_lock);
200652Sjwadams }
201652Sjwadams
202652Sjwadams static void
umem_release(void)203652Sjwadams umem_release(void)
204652Sjwadams {
205652Sjwadams umem_do_release(0);
206652Sjwadams }
207652Sjwadams
208652Sjwadams static void
umem_release_child(void)209652Sjwadams umem_release_child(void)
210652Sjwadams {
211652Sjwadams umem_do_release(1);
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate
2140Sstevel@tonic-gate void
umem_forkhandler_init(void)2150Sstevel@tonic-gate umem_forkhandler_init(void)
2160Sstevel@tonic-gate {
2170Sstevel@tonic-gate /*
2180Sstevel@tonic-gate * There is no way to unregister these atfork functions,
2190Sstevel@tonic-gate * but we don't need to. The dynamic linker and libc take
2200Sstevel@tonic-gate * care of unregistering them if/when the library is unloaded.
2210Sstevel@tonic-gate */
2220Sstevel@tonic-gate (void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
2230Sstevel@tonic-gate }
224