1 /* $OpenBSD: atexit.c,v 1.26 2017/12/05 21:11:10 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2002 Daniel Hartmeier 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * - Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * - Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 20 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 21 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 27 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 */ 31 32 #include <sys/types.h> 33 #include <sys/mman.h> 34 #include <dlfcn.h> 35 #include <elf.h> 36 #pragma weak _DYNAMIC 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include "atexit.h" 41 #include "atfork.h" 42 #include "thread_private.h" 43 #include "tib.h" 44 45 typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak)); 46 47 struct thread_atexit_fn { 48 void (*func)(void *); 49 void *arg; 50 struct thread_atexit_fn *next; 51 }; 52 53 struct atexit *__atexit; 54 static int restartloop; 55 56 /* define and initialize the list */ 57 struct atfork_listhead _atfork_list = TAILQ_HEAD_INITIALIZER(_atfork_list); 58 59 60 /* 61 * Function pointers are stored in a linked list of pages. The list 62 * is initially empty, and pages are allocated on demand. The first 63 * function pointer in the first allocated page (the last one in 64 * the linked list) is reserved for the cleanup function. 65 * 66 * Outside the following functions, all pages are mprotect()'ed 67 * to prevent unintentional/malicious corruption. 68 */ 69 70 /* 71 * Register a function to be performed at exit or when a shared object 72 * with the given dso handle is unloaded dynamically. Also used as 73 * the backend for atexit(). For more info on this API, see: 74 * 75 * http://www.codesourcery.com/cxx-abi/abi.html#dso-dtor 76 */ 77 int 78 __cxa_atexit(void (*func)(void *), void *arg, void *dso) 79 { 80 struct atexit *p = __atexit; 81 struct atexit_fn *fnp; 82 int pgsize = getpagesize(); 83 int ret = -1; 84 85 if (pgsize < sizeof(*p)) 86 return (-1); 87 _ATEXIT_LOCK(); 88 p = __atexit; 89 if (p != NULL) { 90 if (p->ind + 1 >= p->max) 91 p = NULL; 92 else if (mprotect(p, pgsize, PROT_READ | PROT_WRITE)) 93 goto unlock; 94 } 95 if (p == NULL) { 96 p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE, 97 MAP_ANON | MAP_PRIVATE, -1, 0); 98 if (p == MAP_FAILED) 99 goto unlock; 100 if (__atexit == NULL) { 101 memset(&p->fns[0], 0, sizeof(p->fns[0])); 102 p->ind = 1; 103 } else 104 p->ind = 0; 105 p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) / 106 sizeof(p->fns[0]); 107 p->next = __atexit; 108 __atexit = p; 109 } 110 fnp = &p->fns[p->ind++]; 111 fnp->fn_ptr = func; 112 fnp->fn_arg = arg; 113 fnp->fn_dso = dso; 114 if (mprotect(p, pgsize, PROT_READ)) 115 goto unlock; 116 restartloop = 1; 117 ret = 0; 118 unlock: 119 _ATEXIT_UNLOCK(); 120 return (ret); 121 } 122 DEF_STRONG(__cxa_atexit); 123 124 /* 125 * Copy of atexit() used by libc and anything staticly linked into the 126 * executable. This passes NULL for the dso, so the callbacks are only 127 * invoked by exit() and not dlclose() 128 */ 129 int 130 atexit(void (*fn)(void)) 131 { 132 return (__cxa_atexit((void (*)(void *))fn, NULL, NULL)); 133 } 134 DEF_STRONG(atexit); 135 136 __weak_alias(__cxa_thread_atexit, __cxa_thread_atexit_impl); 137 138 int 139 __cxa_thread_atexit_impl(void (*func)(void *), void *arg, void *dso) 140 { 141 struct thread_atexit_fn *fnp; 142 struct tib *tib = TIB_GET(); 143 144 fnp = calloc(1, sizeof(struct thread_atexit_fn)); 145 if (fnp == NULL) 146 return -1; 147 148 if (_DYNAMIC) 149 dlctl(NULL, DL_REFERENCE, dso); 150 151 fnp->func = func; 152 fnp->arg = arg; 153 fnp->next = tib->tib_atexit; 154 tib->tib_atexit = fnp; 155 156 return 0; 157 } 158 159 void 160 _thread_finalize(void) 161 { 162 struct tib *tib = TIB_GET(); 163 164 while (tib->tib_atexit) { 165 struct thread_atexit_fn *fnp = tib->tib_atexit; 166 tib->tib_atexit = fnp->next; 167 fnp->func(fnp->arg); 168 free(fnp); 169 } 170 } 171 172 /* 173 * Call all handlers registered with __cxa_atexit() for the shared 174 * object owning 'dso'. 175 * Note: if 'dso' is NULL, then all remaining handlers are called. 176 */ 177 void 178 __cxa_finalize(void *dso) 179 { 180 struct atexit *p, *q; 181 struct atexit_fn fn; 182 int n, pgsize = getpagesize(); 183 static int call_depth; 184 185 if (dso == NULL) 186 _thread_finalize(); 187 188 _ATEXIT_LOCK(); 189 call_depth++; 190 191 restart: 192 restartloop = 0; 193 for (p = __atexit; p != NULL; p = p->next) { 194 for (n = p->ind; --n >= 0;) { 195 if (p->fns[n].fn_ptr == NULL) 196 continue; /* already called */ 197 if (dso != NULL && dso != p->fns[n].fn_dso) 198 continue; /* wrong DSO */ 199 200 /* 201 * Mark handler as having been already called to avoid 202 * dupes and loops, then call the appropriate function. 203 */ 204 fn = p->fns[n]; 205 if (mprotect(p, pgsize, PROT_READ | PROT_WRITE) == 0) { 206 p->fns[n].fn_ptr = NULL; 207 mprotect(p, pgsize, PROT_READ); 208 } 209 _ATEXIT_UNLOCK(); 210 (*fn.fn_ptr)(fn.fn_arg); 211 _ATEXIT_LOCK(); 212 if (restartloop) 213 goto restart; 214 } 215 } 216 217 call_depth--; 218 219 /* 220 * If called via exit(), unmap the pages since we have now run 221 * all the handlers. We defer this until calldepth == 0 so that 222 * we don't unmap things prematurely if called recursively. 223 */ 224 if (dso == NULL && call_depth == 0) { 225 for (p = __atexit; p != NULL; ) { 226 q = p; 227 p = p->next; 228 munmap(q, pgsize); 229 } 230 __atexit = NULL; 231 } 232 _ATEXIT_UNLOCK(); 233 234 /* 235 * If unloading a DSO, unregister any atfork handlers registered 236 * by it. Skip the locking if the list is currently empty. 237 */ 238 if (dso != NULL && TAILQ_FIRST(&_atfork_list) != NULL) { 239 struct atfork_fn *af, *afnext; 240 241 _ATFORK_LOCK(); 242 TAILQ_FOREACH_SAFE(af, &_atfork_list, fn_next, afnext) 243 if (af->fn_dso == dso) { 244 TAILQ_REMOVE(&_atfork_list, af, fn_next); 245 free(af); 246 } 247 _ATFORK_UNLOCK(); 248 249 } 250 } 251 DEF_STRONG(__cxa_finalize); 252 253 /* 254 * Register the cleanup function 255 */ 256 void 257 __atexit_register_cleanup(void (*func)(void)) 258 { 259 struct atexit *p; 260 int pgsize = getpagesize(); 261 262 if (pgsize < sizeof(*p)) 263 return; 264 _ATEXIT_LOCK(); 265 p = __atexit; 266 while (p != NULL && p->next != NULL) 267 p = p->next; 268 if (p == NULL) { 269 p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE, 270 MAP_ANON | MAP_PRIVATE, -1, 0); 271 if (p == MAP_FAILED) 272 goto unlock; 273 p->ind = 1; 274 p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) / 275 sizeof(p->fns[0]); 276 p->next = NULL; 277 __atexit = p; 278 } else { 279 if (mprotect(p, pgsize, PROT_READ | PROT_WRITE)) 280 goto unlock; 281 } 282 p->fns[0].fn_ptr = (void (*)(void *))func; 283 p->fns[0].fn_arg = NULL; 284 p->fns[0].fn_dso = NULL; 285 mprotect(p, pgsize, PROT_READ); 286 restartloop = 1; 287 unlock: 288 _ATEXIT_UNLOCK(); 289 } 290