xref: /netbsd-src/libexec/ld.elf_so/tls.c (revision cb63e24e8d6aae7ddac1859a9015f48b1d8bd90e)
1 /*	$NetBSD: tls.c,v 1.19 2023/06/07 13:50:04 joerg Exp $	*/
2 /*-
3  * Copyright (c) 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Joerg Sonnenberger.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: tls.c,v 1.19 2023/06/07 13:50:04 joerg Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/ucontext.h>
36 #include <lwp.h>
37 #include <stdalign.h>
38 #include <stddef.h>
39 #include <string.h>
40 #include "debug.h"
41 #include "rtld.h"
42 
43 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
44 
45 static struct tls_tcb *_rtld_tls_allocate_locked(void);
46 static void *_rtld_tls_module_allocate(struct tls_tcb *, size_t);
47 
48 #ifndef TLS_DTV_OFFSET
49 #define	TLS_DTV_OFFSET	0
50 #endif
51 
52 static size_t _rtld_tls_static_space;	/* Static TLS space allocated */
53 static size_t _rtld_tls_static_offset;	/* Next offset for static TLS to use */
54 size_t _rtld_tls_dtv_generation = 1;
55 size_t _rtld_tls_max_index = 1;
56 
57 #define	DTV_GENERATION(dtv)		((size_t)((dtv)[0]))
58 #define	DTV_MAX_INDEX(dtv)		((size_t)((dtv)[-1]))
59 #define	SET_DTV_GENERATION(dtv, val)	(dtv)[0] = (void *)(size_t)(val)
60 #define	SET_DTV_MAX_INDEX(dtv, val)	(dtv)[-1] = (void *)(size_t)(val)
61 
62 void *
63 _rtld_tls_get_addr(void *tls, size_t idx, size_t offset)
64 {
65 	struct tls_tcb *tcb = tls;
66 	void **dtv, **new_dtv;
67 	sigset_t mask;
68 
69 	_rtld_exclusive_enter(&mask);
70 
71 	dtv = tcb->tcb_dtv;
72 
73 	if (__predict_false(DTV_GENERATION(dtv) != _rtld_tls_dtv_generation)) {
74 		size_t to_copy = DTV_MAX_INDEX(dtv);
75 
76 		new_dtv = xcalloc((2 + _rtld_tls_max_index) * sizeof(*dtv));
77 		++new_dtv;
78 		if (to_copy > _rtld_tls_max_index)
79 			to_copy = _rtld_tls_max_index;
80 		memcpy(new_dtv + 1, dtv + 1, to_copy * sizeof(*dtv));
81 		xfree(dtv - 1);
82 		dtv = tcb->tcb_dtv = new_dtv;
83 		SET_DTV_MAX_INDEX(dtv, _rtld_tls_max_index);
84 		SET_DTV_GENERATION(dtv, _rtld_tls_dtv_generation);
85 	}
86 
87 	if (__predict_false(dtv[idx] == NULL))
88 		dtv[idx] = _rtld_tls_module_allocate(tcb, idx);
89 
90 	_rtld_exclusive_exit(&mask);
91 
92 	return (uint8_t *)dtv[idx] + offset;
93 }
94 
95 void
96 _rtld_tls_initial_allocation(void)
97 {
98 	struct tls_tcb *tcb;
99 
100 	_rtld_tls_static_space = _rtld_tls_static_offset +
101 	    RTLD_STATIC_TLS_RESERVATION;
102 
103 #ifndef __HAVE_TLS_VARIANT_I
104 	_rtld_tls_static_space = roundup2(_rtld_tls_static_space,
105 	    alignof(max_align_t));
106 #endif
107 	dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
108 
109 	tcb = _rtld_tls_allocate_locked();
110 #ifdef __HAVE___LWP_SETTCB
111 	__lwp_settcb(tcb);
112 #else
113 	_lwp_setprivate(tcb);
114 #endif
115 }
116 
117 static struct tls_tcb *
118 _rtld_tls_allocate_locked(void)
119 {
120 	Obj_Entry *obj;
121 	struct tls_tcb *tcb;
122 	uint8_t *p, *q;
123 
124 	p = xcalloc(_rtld_tls_static_space + sizeof(struct tls_tcb));
125 #ifdef __HAVE_TLS_VARIANT_I
126 	tcb = (struct tls_tcb *)p;
127 	p += sizeof(struct tls_tcb);
128 #else
129 	p += _rtld_tls_static_space;
130 	tcb = (struct tls_tcb *)p;
131 	tcb->tcb_self = tcb;
132 #endif
133 	dbg(("lwp %d tls tcb %p", _lwp_self(), tcb));
134 	tcb->tcb_dtv = xcalloc(sizeof(*tcb->tcb_dtv) * (2 + _rtld_tls_max_index));
135 	++tcb->tcb_dtv;
136 	SET_DTV_MAX_INDEX(tcb->tcb_dtv, _rtld_tls_max_index);
137 	SET_DTV_GENERATION(tcb->tcb_dtv, _rtld_tls_dtv_generation);
138 
139 	for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
140 		if (obj->tls_static) {
141 #ifdef __HAVE_TLS_VARIANT_I
142 			q = p + obj->tlsoffset;
143 #else
144 			q = p - obj->tlsoffset;
145 #endif
146 			dbg(("%s: [lwp %d] tls dtv %p index %zu offset %zu",
147 			    obj->path, _lwp_self(),
148 			    q, obj->tlsindex, obj->tlsoffset));
149 			if (obj->tlsinitsize)
150 				memcpy(q, obj->tlsinit, obj->tlsinitsize);
151 			tcb->tcb_dtv[obj->tlsindex] = q;
152 		}
153 	}
154 
155 	return tcb;
156 }
157 
158 struct tls_tcb *
159 _rtld_tls_allocate(void)
160 {
161 	struct tls_tcb *tcb;
162 	sigset_t mask;
163 
164 	_rtld_exclusive_enter(&mask);
165 	tcb = _rtld_tls_allocate_locked();
166 	_rtld_exclusive_exit(&mask);
167 
168 	return tcb;
169 }
170 
171 void
172 _rtld_tls_free(struct tls_tcb *tcb)
173 {
174 	size_t i, max_index;
175 	uint8_t *p, *p_end;
176 	sigset_t mask;
177 
178 	_rtld_exclusive_enter(&mask);
179 
180 #ifdef __HAVE_TLS_VARIANT_I
181 	p = (uint8_t *)tcb;
182 #else
183 	p = (uint8_t *)tcb - _rtld_tls_static_space;
184 #endif
185 	p_end = p + _rtld_tls_static_space;
186 
187 	max_index = DTV_MAX_INDEX(tcb->tcb_dtv);
188 	for (i = 1; i <= max_index; ++i) {
189 		if ((uint8_t *)tcb->tcb_dtv[i] < p ||
190 		    (uint8_t *)tcb->tcb_dtv[i] >= p_end)
191 			xfree(tcb->tcb_dtv[i]);
192 	}
193 	xfree(tcb->tcb_dtv - 1);
194 	xfree(p);
195 
196 	_rtld_exclusive_exit(&mask);
197 }
198 
199 static void *
200 _rtld_tls_module_allocate(struct tls_tcb *tcb, size_t idx)
201 {
202 	Obj_Entry *obj;
203 	uint8_t *p;
204 
205 	for (obj = _rtld_objlist; obj != NULL; obj = obj->next) {
206 		if (obj->tlsindex == idx)
207 			break;
208 	}
209 	if (obj == NULL) {
210 		_rtld_error("Module for TLS index %zu missing", idx);
211 		_rtld_die();
212 	}
213 	if (obj->tls_static) {
214 #ifdef __HAVE_TLS_VARIANT_I
215 		p = (uint8_t *)tcb + obj->tlsoffset + sizeof(struct tls_tcb);
216 #else
217 		p = (uint8_t *)tcb - obj->tlsoffset;
218 #endif
219 		return p;
220 	}
221 
222 	p = xmalloc(obj->tlssize);
223 	memcpy(p, obj->tlsinit, obj->tlsinitsize);
224 	memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
225 
226 	obj->tls_dynamic = 1;
227 
228 	return p;
229 }
230 
231 int
232 _rtld_tls_offset_allocate(Obj_Entry *obj)
233 {
234 	size_t offset, next_offset;
235 
236 	if (obj->tls_dynamic)
237 		return -1;
238 
239 	if (obj->tls_static)
240 		return 0;
241 	if (obj->tlssize == 0) {
242 		obj->tlsoffset = 0;
243 		obj->tls_static = 1;
244 		return 0;
245 	}
246 
247 #ifdef __HAVE_TLS_VARIANT_I
248 	offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
249 	next_offset = offset + obj->tlssize;
250 #else
251 	offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
252 	    obj->tlsalign);
253 	next_offset = offset;
254 #endif
255 
256 	/*
257 	 * Check if the static allocation was already done.
258 	 * This happens if dynamically loaded modules want to use
259 	 * static TLS space.
260 	 *
261 	 * XXX Keep an actual free list and callbacks for initialisation.
262 	 */
263 	if (_rtld_tls_static_space) {
264 		if (obj->tlsinitsize) {
265 			_rtld_error("%s: Use of initialized "
266 			    "Thread Local Storage with model initial-exec "
267 			    "and dlopen is not supported",
268 			    obj->path);
269 			return -1;
270 		}
271 		if (next_offset > _rtld_tls_static_space) {
272 			_rtld_error("%s: No space available "
273 			    "for static Thread Local Storage",
274 			    obj->path);
275 			return -1;
276 		}
277 	}
278 	obj->tlsoffset = offset;
279 	dbg(("%s: static tls offset 0x%zx size %zu\n",
280 	    obj->path, obj->tlsoffset, obj->tlssize));
281 	_rtld_tls_static_offset = next_offset;
282 	obj->tls_static = 1;
283 
284 	return 0;
285 }
286 
287 void
288 _rtld_tls_offset_free(Obj_Entry *obj)
289 {
290 
291 	/*
292 	 * XXX See above.
293 	 */
294 	obj->tls_static = 0;
295 	return;
296 }
297 
298 #if defined(__HAVE_COMMON___TLS_GET_ADDR) && defined(RTLD_LOADER)
299 /*
300  * The fast path is access to an already allocated DTV entry.
301  * This checks the current limit and the entry without needing any
302  * locking. Entries are only freed on dlclose() and it is an application
303  * bug if code of the module is still running at that point.
304  */
305 void *
306 __tls_get_addr(void *arg_)
307 {
308 	size_t *arg = (size_t *)arg_;
309 	void **dtv;
310 #ifdef __HAVE___LWP_GETTCB_FAST
311 	struct tls_tcb * const tcb = __lwp_gettcb_fast();
312 #else
313 	struct tls_tcb * const tcb = __lwp_getprivate_fast();
314 #endif
315 	size_t idx = arg[0], offset = arg[1] + TLS_DTV_OFFSET;
316 
317 	dtv = tcb->tcb_dtv;
318 
319 	if (__predict_true(idx < DTV_MAX_INDEX(dtv) && dtv[idx] != NULL))
320 		return (uint8_t *)dtv[idx] + offset;
321 
322 	return _rtld_tls_get_addr(tcb, idx, offset);
323 }
324 #endif
325 
326 #endif /* __HAVE_TLS_VARIANT_I || __HAVE_TLS_VARIANT_II */
327