1 /* $NetBSD: pthread_atfork.c,v 1.18 2024/01/20 14:52:47 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #if defined(LIBC_SCCS) && !defined(lint)
34 __RCSID("$NetBSD: pthread_atfork.c,v 1.18 2024/01/20 14:52:47 christos Exp $");
35 #endif /* LIBC_SCCS and not lint */
36
37 #include "namespace.h"
38
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <sys/queue.h>
43 #include "extern.h"
44 #include "reentrant.h"
45
46 #ifdef __weak_alias
__weak_alias(pthread_atfork,_pthread_atfork)47 __weak_alias(pthread_atfork, _pthread_atfork)
48 __weak_alias(fork, _fork)
49 #endif /* __weak_alias */
50
51 pid_t
52 __locked_fork(int *my_errno)
53 {
54 return __fork();
55 }
56
57 struct atfork_callback {
58 SIMPLEQ_ENTRY(atfork_callback) next;
59 void (*fn)(void);
60 };
61
62 /*
63 * Hypothetically, we could protect the queues with a rwlock which is
64 * write-locked by pthread_atfork() and read-locked by fork(), but
65 * since the intended use of the functions is obtaining locks to hold
66 * across the fork, forking is going to be serialized anyway.
67 */
68 static struct atfork_callback atfork_builtin;
69 #ifdef _REENTRANT
70 static mutex_t atfork_lock = MUTEX_INITIALIZER;
71 #endif
72 SIMPLEQ_HEAD(atfork_callback_q, atfork_callback);
73
74 static struct atfork_callback_q prepareq = SIMPLEQ_HEAD_INITIALIZER(prepareq);
75 static struct atfork_callback_q parentq = SIMPLEQ_HEAD_INITIALIZER(parentq);
76 static struct atfork_callback_q childq = SIMPLEQ_HEAD_INITIALIZER(childq);
77
78 static struct atfork_callback *
af_alloc(void)79 af_alloc(void)
80 {
81
82 if (atfork_builtin.fn == NULL)
83 return &atfork_builtin;
84
85 return malloc(sizeof(atfork_builtin));
86 }
87
88 static void
af_free(struct atfork_callback * af)89 af_free(struct atfork_callback *af)
90 {
91
92 if (af != &atfork_builtin)
93 free(af);
94 }
95
96 int
pthread_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void))97 pthread_atfork(void (*prepare)(void), void (*parent)(void),
98 void (*child)(void))
99 {
100 struct atfork_callback *newprepare, *newparent, *newchild;
101 sigset_t mask, omask;
102 int error;
103
104 newprepare = newparent = newchild = NULL;
105
106 sigfillset(&mask);
107 thr_sigsetmask(SIG_SETMASK, &mask, &omask);
108
109 mutex_lock(&atfork_lock);
110 if (prepare != NULL) {
111 newprepare = af_alloc();
112 if (newprepare == NULL) {
113 error = ENOMEM;
114 goto out;
115 }
116 newprepare->fn = prepare;
117 }
118
119 if (parent != NULL) {
120 newparent = af_alloc();
121 if (newparent == NULL) {
122 if (newprepare != NULL)
123 af_free(newprepare);
124 error = ENOMEM;
125 goto out;
126 }
127 newparent->fn = parent;
128 }
129
130 if (child != NULL) {
131 newchild = af_alloc();
132 if (newchild == NULL) {
133 if (newprepare != NULL)
134 af_free(newprepare);
135 if (newparent != NULL)
136 af_free(newparent);
137 error = ENOMEM;
138 goto out;
139 }
140 newchild->fn = child;
141 }
142
143 /*
144 * The order in which the functions are called is specified as
145 * LIFO for the prepare handler and FIFO for the others; insert
146 * at the head and tail as appropriate so that SIMPLEQ_FOREACH()
147 * produces the right order.
148 */
149 if (prepare)
150 SIMPLEQ_INSERT_HEAD(&prepareq, newprepare, next);
151 if (parent)
152 SIMPLEQ_INSERT_TAIL(&parentq, newparent, next);
153 if (child)
154 SIMPLEQ_INSERT_TAIL(&childq, newchild, next);
155 error = 0;
156
157 out: mutex_unlock(&atfork_lock);
158 thr_sigsetmask(SIG_SETMASK, &omask, NULL);
159 return error;
160 }
161
162 pid_t
fork(void)163 fork(void)
164 {
165 struct atfork_callback *iter;
166 pid_t ret;
167
168 mutex_lock(&atfork_lock);
169 SIMPLEQ_FOREACH(iter, &prepareq, next)
170 (*iter->fn)();
171 _malloc_prefork();
172
173 ret = __locked_fork(&errno);
174
175 if (ret != 0) {
176 /*
177 * We are the parent. It doesn't matter here whether
178 * the fork call succeeded or failed.
179 */
180 _malloc_postfork();
181 SIMPLEQ_FOREACH(iter, &parentq, next)
182 (*iter->fn)();
183 mutex_unlock(&atfork_lock);
184 } else {
185 /* We are the child */
186 _malloc_postfork_child();
187 SIMPLEQ_FOREACH(iter, &childq, next)
188 (*iter->fn)();
189 /*
190 * Note: We are explicitly *not* unlocking
191 * atfork_lock. Unlocking atfork_lock is problematic,
192 * because if any threads in the parent blocked on it
193 * between the initial lock and the fork() syscall,
194 * unlocking in the child will try to schedule
195 * threads, and either the internal mutex interlock or
196 * the runqueue spinlock could have been held at the
197 * moment of fork(). Since the other threads do not
198 * exist in this process, the spinlock will never be
199 * unlocked, and we would wedge.
200 * Instead, we reinitialize atfork_lock, since we know
201 * that the state of the atfork lists is consistent here,
202 * and that there are no other threads to be affected by
203 * the forcible cleaning of the queue.
204 * This permits double-forking to work, although
205 * it requires knowing that it's "safe" to initialize
206 * a locked mutex in this context.
207 *
208 * The problem exists for users of this interface,
209 * too, since the intended use of pthread_atfork() is
210 * to acquire locks across the fork call to ensure
211 * that the child sees consistent state. There's not
212 * much that can usefully be done in a child handler,
213 * and conventional wisdom discourages using them, but
214 * they're part of the interface, so here we are...
215 */
216 mutex_init(&atfork_lock, NULL);
217 }
218
219 return ret;
220 }
221