1 /* $NetBSD: pthread_atfork.c,v 1.15 2020/05/15 14:37:21 joerg Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #if defined(LIBC_SCCS) && !defined(lint) 34 __RCSID("$NetBSD: pthread_atfork.c,v 1.15 2020/05/15 14:37:21 joerg Exp $"); 35 #endif /* LIBC_SCCS and not lint */ 36 37 #include "namespace.h" 38 39 #include <errno.h> 40 #include <stdlib.h> 41 #include <unistd.h> 42 #include <sys/queue.h> 43 #include "extern.h" 44 #include "reentrant.h" 45 46 #ifdef __weak_alias 47 __weak_alias(pthread_atfork, _pthread_atfork) 48 __weak_alias(fork, _fork) 49 #endif /* __weak_alias */ 50 51 pid_t __fork(void); /* XXX */ 52 pid_t __locked_fork(int *) __weak; /* XXX */ 53 54 pid_t 55 __locked_fork(int *my_errno) 56 { 57 return __fork(); 58 } 59 60 struct atfork_callback { 61 SIMPLEQ_ENTRY(atfork_callback) next; 62 void (*fn)(void); 63 }; 64 65 /* 66 * Hypothetically, we could protect the queues with a rwlock which is 67 * write-locked by pthread_atfork() and read-locked by fork(), but 68 * since the intended use of the functions is obtaining locks to hold 69 * across the fork, forking is going to be serialized anyway. 70 */ 71 static struct atfork_callback atfork_builtin; 72 #ifdef _REENTRANT 73 static mutex_t atfork_lock = MUTEX_INITIALIZER; 74 #endif 75 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback); 76 77 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq); 78 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq); 79 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq); 80 81 static struct atfork_callback * 82 af_alloc(void) 83 { 84 85 if (atfork_builtin.fn == NULL) 86 return &atfork_builtin; 87 88 return malloc(sizeof(atfork_builtin)); 89 } 90 91 static void 92 af_free(struct atfork_callback *af) 93 { 94 95 if (af != &atfork_builtin) 96 free(af); 97 } 98 99 int 100 pthread_atfork(void (*prepare)(void), void (*parent)(void), 101 void (*child)(void)) 102 { 103 struct atfork_callback *newprepare, *newparent, *newchild; 104 105 newprepare = newparent = newchild = NULL; 106 107 mutex_lock(&atfork_lock); 108 if (prepare != NULL) { 109 newprepare = af_alloc(); 110 if (newprepare == NULL) { 111 mutex_unlock(&atfork_lock); 112 return ENOMEM; 113 } 114 newprepare->fn = prepare; 115 } 116 117 if (parent != NULL) { 118 newparent = af_alloc(); 119 if (newparent == NULL) { 120 if (newprepare != NULL) 121 af_free(newprepare); 122 mutex_unlock(&atfork_lock); 123 return ENOMEM; 124 } 125 newparent->fn = parent; 126 } 127 128 if (child != NULL) { 129 newchild = af_alloc(); 130 if (newchild == NULL) { 131 if (newprepare != NULL) 132 af_free(newprepare); 133 if (newparent != NULL) 134 af_free(newparent); 135 mutex_unlock(&atfork_lock); 136 return ENOMEM; 137 } 138 newchild->fn = child; 139 } 140 141 /* 142 * The order in which the functions are called is specified as 143 * LIFO for the prepare handler and FIFO for the others; insert 144 * at the head and tail as appropriate so that SIMPLEQ_FOREACH() 145 * produces the right order. 146 */ 147 if (prepare) 148 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next); 149 if (parent) 150 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next); 151 if (child) 152 SIMPLEQ_INSERT_TAIL(&childq, newchild, next); 153 mutex_unlock(&atfork_lock); 154 155 return 0; 156 } 157 158 pid_t 159 fork(void) 160 { 161 struct atfork_callback *iter; 162 pid_t ret; 163 164 mutex_lock(&atfork_lock); 165 SIMPLEQ_FOREACH(iter, &prepareq, next) 166 (*iter->fn)(); 167 _malloc_prefork(); 168 169 ret = __locked_fork(&errno); 170 171 if (ret != 0) { 172 /* 173 * We are the parent. It doesn't matter here whether 174 * the fork call succeeded or failed. 175 */ 176 _malloc_postfork(); 177 SIMPLEQ_FOREACH(iter, &parentq, next) 178 (*iter->fn)(); 179 mutex_unlock(&atfork_lock); 180 } else { 181 /* We are the child */ 182 _malloc_postfork_child(); 183 SIMPLEQ_FOREACH(iter, &childq, next) 184 (*iter->fn)(); 185 /* 186 * Note: We are explicitly *not* unlocking 187 * atfork_lock. Unlocking atfork_lock is problematic, 188 * because if any threads in the parent blocked on it 189 * between the initial lock and the fork() syscall, 190 * unlocking in the child will try to schedule 191 * threads, and either the internal mutex interlock or 192 * the runqueue spinlock could have been held at the 193 * moment of fork(). Since the other threads do not 194 * exist in this process, the spinlock will never be 195 * unlocked, and we would wedge. 196 * Instead, we reinitialize atfork_lock, since we know 197 * that the state of the atfork lists is consistent here, 198 * and that there are no other threads to be affected by 199 * the forcible cleaning of the queue. 200 * This permits double-forking to work, although 201 * it requires knowing that it's "safe" to initialize 202 * a locked mutex in this context. 203 * 204 * The problem exists for users of this interface, 205 * too, since the intented use of pthread_atfork() is 206 * to acquire locks across the fork call to ensure 207 * that the child sees consistent state. There's not 208 * much that can usefully be done in a child handler, 209 * and conventional wisdom discourages using them, but 210 * they're part of the interface, so here we are... 211 */ 212 mutex_init(&atfork_lock, NULL); 213 } 214 215 return ret; 216 } 217