1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2020 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #else
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
42 #endif
43 #endif
44
45 /* The unseen_objects list contains objects that have been registered
46 but not yet categorized in any way. The seen_objects list has had
47 its pc_begin and count fields initialized at minimum, and is sorted
48 by decreasing value of pc_begin. */
49 static struct object *unseen_objects;
50 static struct object *seen_objects;
51 #ifdef ATOMIC_FDE_FAST_PATH
52 static int any_objects_registered;
53 #endif
54
55 #ifdef __GTHREAD_MUTEX_INIT
56 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
57 #define init_object_mutex_once()
58 #else
59 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
60 static __gthread_mutex_t object_mutex;
61
62 static void
init_object_mutex(void)63 init_object_mutex (void)
64 {
65 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
66 }
67
68 static void
init_object_mutex_once(void)69 init_object_mutex_once (void)
70 {
71 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
72 __gthread_once (&once, init_object_mutex);
73 }
74 #else
75 /* ??? Several targets include this file with stubbing parts of gthr.h
76 and expect no locking to be done. */
77 #define init_object_mutex_once()
78 static __gthread_mutex_t object_mutex;
79 #endif
80 #endif
81
82 /* Called from crtbegin.o to register the unwind info for an object. */
83
84 void
__register_frame_info_bases(const void * begin,struct object * ob,void * tbase,void * dbase)85 __register_frame_info_bases (const void *begin, struct object *ob,
86 void *tbase, void *dbase)
87 {
88 /* If .eh_frame is empty, don't register at all. */
89 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
90 return;
91
92 ob->pc_begin = (void *)-1;
93 ob->tbase = tbase;
94 ob->dbase = dbase;
95 ob->u.single = begin;
96 ob->s.i = 0;
97 ob->s.b.encoding = DW_EH_PE_omit;
98 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
99 ob->fde_end = NULL;
100 #endif
101
102 init_object_mutex_once ();
103 __gthread_mutex_lock (&object_mutex);
104
105 ob->next = unseen_objects;
106 unseen_objects = ob;
107 #ifdef ATOMIC_FDE_FAST_PATH
108 /* Set flag that at least one library has registered FDEs.
109 Use relaxed MO here, it is up to the app to ensure that the library
110 loading/initialization happens-before using that library in other
111 threads (in particular unwinding with that library's functions
112 appearing in the backtraces). Calling that library's functions
113 without waiting for the library to initialize would be racy. */
114 if (!any_objects_registered)
115 __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
116 #endif
117
118 __gthread_mutex_unlock (&object_mutex);
119 }
120
121 void
__register_frame_info(const void * begin,struct object * ob)122 __register_frame_info (const void *begin, struct object *ob)
123 {
124 __register_frame_info_bases (begin, ob, 0, 0);
125 }
126
127 void
__register_frame(void * begin)128 __register_frame (void *begin)
129 {
130 struct object *ob;
131
132 /* If .eh_frame is empty, don't register at all. */
133 if (*(uword *) begin == 0)
134 return;
135
136 ob = malloc (sizeof (struct object));
137 __register_frame_info (begin, ob);
138 }
139
140 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
141 for different translation units. Called from the file generated by
142 collect2. */
143
144 void
__register_frame_info_table_bases(void * begin,struct object * ob,void * tbase,void * dbase)145 __register_frame_info_table_bases (void *begin, struct object *ob,
146 void *tbase, void *dbase)
147 {
148 ob->pc_begin = (void *)-1;
149 ob->tbase = tbase;
150 ob->dbase = dbase;
151 ob->u.array = begin;
152 ob->s.i = 0;
153 ob->s.b.from_array = 1;
154 ob->s.b.encoding = DW_EH_PE_omit;
155
156 init_object_mutex_once ();
157 __gthread_mutex_lock (&object_mutex);
158
159 ob->next = unseen_objects;
160 unseen_objects = ob;
161 #ifdef ATOMIC_FDE_FAST_PATH
162 /* Set flag that at least one library has registered FDEs.
163 Use relaxed MO here, it is up to the app to ensure that the library
164 loading/initialization happens-before using that library in other
165 threads (in particular unwinding with that library's functions
166 appearing in the backtraces). Calling that library's functions
167 without waiting for the library to initialize would be racy. */
168 if (!any_objects_registered)
169 __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
170 #endif
171
172 __gthread_mutex_unlock (&object_mutex);
173 }
174
175 void
__register_frame_info_table(void * begin,struct object * ob)176 __register_frame_info_table (void *begin, struct object *ob)
177 {
178 __register_frame_info_table_bases (begin, ob, 0, 0);
179 }
180
181 void
__register_frame_table(void * begin)182 __register_frame_table (void *begin)
183 {
184 struct object *ob = malloc (sizeof (struct object));
185 __register_frame_info_table (begin, ob);
186 }
187
188 /* Called from crtbegin.o to deregister the unwind info for an object. */
189 /* ??? Glibc has for a while now exported __register_frame_info and
190 __deregister_frame_info. If we call __register_frame_info_bases
191 from crtbegin (wherein it is declared weak), and this object does
192 not get pulled from libgcc.a for other reasons, then the
193 invocation of __deregister_frame_info will be resolved from glibc.
194 Since the registration did not happen there, we'll die.
195
196 Therefore, declare a new deregistration entry point that does the
197 exact same thing, but will resolve to the same library as
198 implements __register_frame_info_bases. */
199
200 void *
__deregister_frame_info_bases(const void * begin)201 __deregister_frame_info_bases (const void *begin)
202 {
203 struct object **p;
204 struct object *ob = 0;
205
206 /* If .eh_frame is empty, we haven't registered. */
207 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
208 return ob;
209
210 init_object_mutex_once ();
211 __gthread_mutex_lock (&object_mutex);
212
213 for (p = &unseen_objects; *p ; p = &(*p)->next)
214 if ((*p)->u.single == begin)
215 {
216 ob = *p;
217 *p = ob->next;
218 goto out;
219 }
220
221 for (p = &seen_objects; *p ; p = &(*p)->next)
222 if ((*p)->s.b.sorted)
223 {
224 if ((*p)->u.sort->orig_data == begin)
225 {
226 ob = *p;
227 *p = ob->next;
228 free (ob->u.sort);
229 goto out;
230 }
231 }
232 else
233 {
234 if ((*p)->u.single == begin)
235 {
236 ob = *p;
237 *p = ob->next;
238 goto out;
239 }
240 }
241
242 out:
243 __gthread_mutex_unlock (&object_mutex);
244 #if 0
245 gcc_assert (ob);
246 #endif
247 return (void *) ob;
248 }
249
250 void *
__deregister_frame_info(const void * begin)251 __deregister_frame_info (const void *begin)
252 {
253 return __deregister_frame_info_bases (begin);
254 }
255
256 void
__deregister_frame(void * begin)257 __deregister_frame (void *begin)
258 {
259 /* If .eh_frame is empty, we haven't registered. */
260 if (*(uword *) begin != 0)
261 free (__deregister_frame_info (begin));
262 }
263
264
265 /* Like base_of_encoded_value, but take the base from a struct object
266 instead of an _Unwind_Context. */
267
268 static _Unwind_Ptr
base_from_object(unsigned char encoding,struct object * ob)269 base_from_object (unsigned char encoding, struct object *ob)
270 {
271 if (encoding == DW_EH_PE_omit)
272 return 0;
273
274 switch (encoding & 0x70)
275 {
276 case DW_EH_PE_absptr:
277 case DW_EH_PE_pcrel:
278 case DW_EH_PE_aligned:
279 return 0;
280
281 case DW_EH_PE_textrel:
282 return (_Unwind_Ptr) ob->tbase;
283 case DW_EH_PE_datarel:
284 return (_Unwind_Ptr) ob->dbase;
285 default:
286 gcc_unreachable ();
287 }
288 }
289
290 /* Return the FDE pointer encoding from the CIE. */
291 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
292
293 static int
get_cie_encoding(const struct dwarf_cie * cie)294 get_cie_encoding (const struct dwarf_cie *cie)
295 {
296 const unsigned char *aug, *p;
297 _Unwind_Ptr dummy;
298 _uleb128_t utmp;
299 _sleb128_t stmp;
300
301 aug = cie->augmentation;
302 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
303 if (__builtin_expect (cie->version >= 4, 0))
304 {
305 if (p[0] != sizeof (void *) || p[1] != 0)
306 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
307 address sizes or segment selectors. */
308 p += 2; /* Skip address size and segment size. */
309 }
310
311 if (aug[0] != 'z')
312 return DW_EH_PE_absptr;
313
314 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
315 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
316 if (cie->version == 1) /* Skip return address column. */
317 p++;
318 else
319 p = read_uleb128 (p, &utmp);
320
321 aug++; /* Skip 'z' */
322 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
323 while (1)
324 {
325 /* This is what we're looking for. */
326 if (*aug == 'R')
327 return *p;
328 /* Personality encoding and pointer. */
329 else if (*aug == 'P')
330 {
331 /* ??? Avoid dereferencing indirect pointers, since we're
332 faking the base address. Gotta keep DW_EH_PE_aligned
333 intact, however. */
334 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
335 }
336 /* LSDA encoding. */
337 else if (*aug == 'L')
338 p++;
339 /* aarch64 b-key pointer authentication. */
340 else if (*aug == 'B')
341 p++;
342 /* Otherwise end of string, or unknown augmentation. */
343 else
344 return DW_EH_PE_absptr;
345 aug++;
346 }
347 }
348
349 static inline int
get_fde_encoding(const struct dwarf_fde * f)350 get_fde_encoding (const struct dwarf_fde *f)
351 {
352 return get_cie_encoding (get_cie (f));
353 }
354
355
356 /* Sorting an array of FDEs by address.
357 (Ideally we would have the linker sort the FDEs so we don't have to do
358 it at run time. But the linkers are not yet prepared for this.) */
359
360 /* Comparison routines. Three variants of increasing complexity. */
361
362 static int
fde_unencoded_compare(struct object * ob,const fde * x,const fde * y)363 fde_unencoded_compare (struct object *ob __attribute__((unused)),
364 const fde *x, const fde *y)
365 {
366 _Unwind_Ptr x_ptr, y_ptr;
367 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
368 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
369
370 if (x_ptr > y_ptr)
371 return 1;
372 if (x_ptr < y_ptr)
373 return -1;
374 return 0;
375 }
376
377 static int
fde_single_encoding_compare(struct object * ob,const fde * x,const fde * y)378 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
379 {
380 _Unwind_Ptr base, x_ptr, y_ptr;
381
382 base = base_from_object (ob->s.b.encoding, ob);
383 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
384 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
385
386 if (x_ptr > y_ptr)
387 return 1;
388 if (x_ptr < y_ptr)
389 return -1;
390 return 0;
391 }
392
393 static int
fde_mixed_encoding_compare(struct object * ob,const fde * x,const fde * y)394 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
395 {
396 int x_encoding, y_encoding;
397 _Unwind_Ptr x_ptr, y_ptr;
398
399 x_encoding = get_fde_encoding (x);
400 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
401 x->pc_begin, &x_ptr);
402
403 y_encoding = get_fde_encoding (y);
404 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
405 y->pc_begin, &y_ptr);
406
407 if (x_ptr > y_ptr)
408 return 1;
409 if (x_ptr < y_ptr)
410 return -1;
411 return 0;
412 }
413
414 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
415
416
417 /* This is a special mix of insertion sort and heap sort, optimized for
418 the data sets that actually occur. They look like
419 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
420 I.e. a linearly increasing sequence (coming from functions in the text
421 section), with additionally a few unordered elements (coming from functions
422 in gnu_linkonce sections) whose values are higher than the values in the
423 surrounding linear sequence (but not necessarily higher than the values
424 at the end of the linear sequence!).
425 The worst-case total run time is O(N) + O(n log (n)), where N is the
426 total number of FDEs and n is the number of erratic ones. */
427
428 struct fde_accumulator
429 {
430 struct fde_vector *linear;
431 struct fde_vector *erratic;
432 };
433
434 static inline int
start_fde_sort(struct fde_accumulator * accu,size_t count)435 start_fde_sort (struct fde_accumulator *accu, size_t count)
436 {
437 size_t size;
438 if (! count)
439 return 0;
440
441 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
442 if ((accu->linear = malloc (size)))
443 {
444 accu->linear->count = 0;
445 if ((accu->erratic = malloc (size)))
446 accu->erratic->count = 0;
447 return 1;
448 }
449 else
450 return 0;
451 }
452
453 static inline void
fde_insert(struct fde_accumulator * accu,const fde * this_fde)454 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
455 {
456 if (accu->linear)
457 accu->linear->array[accu->linear->count++] = this_fde;
458 }
459
460 /* Split LINEAR into a linear sequence with low values and an erratic
461 sequence with high values, put the linear one (of longest possible
462 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
463
464 Because the longest linear sequence we are trying to locate within the
465 incoming LINEAR array can be interspersed with (high valued) erratic
466 entries. We construct a chain indicating the sequenced entries.
467 To avoid having to allocate this chain, we overlay it onto the space of
468 the ERRATIC array during construction. A final pass iterates over the
469 chain to determine what should be placed in the ERRATIC array, and
470 what is the linear sequence. This overlay is safe from aliasing. */
471
472 static inline void
fde_split(struct object * ob,fde_compare_t fde_compare,struct fde_vector * linear,struct fde_vector * erratic)473 fde_split (struct object *ob, fde_compare_t fde_compare,
474 struct fde_vector *linear, struct fde_vector *erratic)
475 {
476 static const fde *marker;
477 size_t count = linear->count;
478 const fde *const *chain_end = ▮
479 size_t i, j, k;
480
481 /* This should optimize out, but it is wise to make sure this assumption
482 is correct. Should these have different sizes, we cannot cast between
483 them and the overlaying onto ERRATIC will not work. */
484 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
485
486 for (i = 0; i < count; i++)
487 {
488 const fde *const *probe;
489
490 for (probe = chain_end;
491 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
492 probe = chain_end)
493 {
494 chain_end = (const fde *const*) erratic->array[probe - linear->array];
495 erratic->array[probe - linear->array] = NULL;
496 }
497 erratic->array[i] = (const fde *) chain_end;
498 chain_end = &linear->array[i];
499 }
500
501 /* Each entry in LINEAR which is part of the linear sequence we have
502 discovered will correspond to a non-NULL entry in the chain we built in
503 the ERRATIC array. */
504 for (i = j = k = 0; i < count; i++)
505 if (erratic->array[i])
506 linear->array[j++] = linear->array[i];
507 else
508 erratic->array[k++] = linear->array[i];
509 linear->count = j;
510 erratic->count = k;
511 }
512
513 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
514
515 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
516 for the first (root) node; push it down to its rightful place. */
517
518 static void
frame_downheap(struct object * ob,fde_compare_t fde_compare,const fde ** a,int lo,int hi)519 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
520 int lo, int hi)
521 {
522 int i, j;
523
524 for (i = lo, j = 2*i+1;
525 j < hi;
526 j = 2*i+1)
527 {
528 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
529 ++j;
530
531 if (fde_compare (ob, a[i], a[j]) < 0)
532 {
533 SWAP (a[i], a[j]);
534 i = j;
535 }
536 else
537 break;
538 }
539 }
540
541 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
542 use a name that does not conflict. */
543
544 static void
frame_heapsort(struct object * ob,fde_compare_t fde_compare,struct fde_vector * erratic)545 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
546 struct fde_vector *erratic)
547 {
548 /* For a description of this algorithm, see:
549 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
550 p. 60-61. */
551 const fde ** a = erratic->array;
552 /* A portion of the array is called a "heap" if for all i>=0:
553 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
554 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
555 size_t n = erratic->count;
556 int m;
557
558 /* Expand our heap incrementally from the end of the array, heapifying
559 each resulting semi-heap as we go. After each step, a[m] is the top
560 of a heap. */
561 for (m = n/2-1; m >= 0; --m)
562 frame_downheap (ob, fde_compare, a, m, n);
563
564 /* Shrink our heap incrementally from the end of the array, first
565 swapping out the largest element a[0] and then re-heapifying the
566 resulting semi-heap. After each step, a[0..m) is a heap. */
567 for (m = n-1; m >= 1; --m)
568 {
569 SWAP (a[0], a[m]);
570 frame_downheap (ob, fde_compare, a, 0, m);
571 }
572 #undef SWAP
573 }
574
575 /* Merge V1 and V2, both sorted, and put the result into V1. */
576 static inline void
fde_merge(struct object * ob,fde_compare_t fde_compare,struct fde_vector * v1,struct fde_vector * v2)577 fde_merge (struct object *ob, fde_compare_t fde_compare,
578 struct fde_vector *v1, struct fde_vector *v2)
579 {
580 size_t i1, i2;
581 const fde * fde2;
582
583 i2 = v2->count;
584 if (i2 > 0)
585 {
586 i1 = v1->count;
587 do
588 {
589 i2--;
590 fde2 = v2->array[i2];
591 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
592 {
593 v1->array[i1+i2] = v1->array[i1-1];
594 i1--;
595 }
596 v1->array[i1+i2] = fde2;
597 }
598 while (i2 > 0);
599 v1->count += v2->count;
600 }
601 }
602
603 static inline void
end_fde_sort(struct object * ob,struct fde_accumulator * accu,size_t count)604 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
605 {
606 fde_compare_t fde_compare;
607
608 gcc_assert (!accu->linear || accu->linear->count == count);
609
610 if (ob->s.b.mixed_encoding)
611 fde_compare = fde_mixed_encoding_compare;
612 else if (ob->s.b.encoding == DW_EH_PE_absptr)
613 fde_compare = fde_unencoded_compare;
614 else
615 fde_compare = fde_single_encoding_compare;
616
617 if (accu->erratic)
618 {
619 fde_split (ob, fde_compare, accu->linear, accu->erratic);
620 gcc_assert (accu->linear->count + accu->erratic->count == count);
621 frame_heapsort (ob, fde_compare, accu->erratic);
622 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
623 free (accu->erratic);
624 }
625 else
626 {
627 /* We've not managed to malloc an erratic array,
628 so heap sort in the linear one. */
629 frame_heapsort (ob, fde_compare, accu->linear);
630 }
631 }
632
633
634 /* Update encoding, mixed_encoding, and pc_begin for OB for the
635 fde array beginning at THIS_FDE. Return the number of fdes
636 encountered along the way. */
637
638 static size_t
classify_object_over_fdes(struct object * ob,const fde * this_fde)639 classify_object_over_fdes (struct object *ob, const fde *this_fde)
640 {
641 const struct dwarf_cie *last_cie = 0;
642 size_t count = 0;
643 int encoding = DW_EH_PE_absptr;
644 _Unwind_Ptr base = 0;
645
646 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
647 {
648 const struct dwarf_cie *this_cie;
649 _Unwind_Ptr mask, pc_begin;
650
651 /* Skip CIEs. */
652 if (this_fde->CIE_delta == 0)
653 continue;
654
655 /* Determine the encoding for this FDE. Note mixed encoded
656 objects for later. */
657 this_cie = get_cie (this_fde);
658 if (this_cie != last_cie)
659 {
660 last_cie = this_cie;
661 encoding = get_cie_encoding (this_cie);
662 if (encoding == DW_EH_PE_omit)
663 return -1;
664 base = base_from_object (encoding, ob);
665 if (ob->s.b.encoding == DW_EH_PE_omit)
666 ob->s.b.encoding = encoding;
667 else if (ob->s.b.encoding != encoding)
668 ob->s.b.mixed_encoding = 1;
669 }
670
671 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
672 &pc_begin);
673
674 /* Take care to ignore link-once functions that were removed.
675 In these cases, the function address will be NULL, but if
676 the encoding is smaller than a pointer a true NULL may not
677 be representable. Assume 0 in the representable bits is NULL. */
678 mask = size_of_encoded_value (encoding);
679 if (mask < sizeof (void *))
680 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
681 else
682 mask = -1;
683
684 if ((pc_begin & mask) == 0)
685 continue;
686
687 count += 1;
688 if ((void *) pc_begin < ob->pc_begin)
689 ob->pc_begin = (void *) pc_begin;
690 }
691
692 return count;
693 }
694
695 static void
add_fdes(struct object * ob,struct fde_accumulator * accu,const fde * this_fde)696 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
697 {
698 const struct dwarf_cie *last_cie = 0;
699 int encoding = ob->s.b.encoding;
700 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
701
702 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
703 {
704 const struct dwarf_cie *this_cie;
705
706 /* Skip CIEs. */
707 if (this_fde->CIE_delta == 0)
708 continue;
709
710 if (ob->s.b.mixed_encoding)
711 {
712 /* Determine the encoding for this FDE. Note mixed encoded
713 objects for later. */
714 this_cie = get_cie (this_fde);
715 if (this_cie != last_cie)
716 {
717 last_cie = this_cie;
718 encoding = get_cie_encoding (this_cie);
719 base = base_from_object (encoding, ob);
720 }
721 }
722
723 if (encoding == DW_EH_PE_absptr)
724 {
725 _Unwind_Ptr ptr;
726 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
727 if (ptr == 0)
728 continue;
729 }
730 else
731 {
732 _Unwind_Ptr pc_begin, mask;
733
734 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
735 &pc_begin);
736
737 /* Take care to ignore link-once functions that were removed.
738 In these cases, the function address will be NULL, but if
739 the encoding is smaller than a pointer a true NULL may not
740 be representable. Assume 0 in the representable bits is NULL. */
741 mask = size_of_encoded_value (encoding);
742 if (mask < sizeof (void *))
743 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
744 else
745 mask = -1;
746
747 if ((pc_begin & mask) == 0)
748 continue;
749 }
750
751 fde_insert (accu, this_fde);
752 }
753 }
754
755 /* Set up a sorted array of pointers to FDEs for a loaded object. We
756 count up the entries before allocating the array because it's likely to
757 be faster. We can be called multiple times, should we have failed to
758 allocate a sorted fde array on a previous occasion. */
759
760 static inline void
init_object(struct object * ob)761 init_object (struct object* ob)
762 {
763 struct fde_accumulator accu;
764 size_t count;
765
766 count = ob->s.b.count;
767 if (count == 0)
768 {
769 if (ob->s.b.from_array)
770 {
771 fde **p = ob->u.array;
772 for (count = 0; *p; ++p)
773 {
774 size_t cur_count = classify_object_over_fdes (ob, *p);
775 if (cur_count == (size_t) -1)
776 goto unhandled_fdes;
777 count += cur_count;
778 }
779 }
780 else
781 {
782 count = classify_object_over_fdes (ob, ob->u.single);
783 if (count == (size_t) -1)
784 {
785 static const fde terminator;
786 unhandled_fdes:
787 ob->s.i = 0;
788 ob->s.b.encoding = DW_EH_PE_omit;
789 ob->u.single = &terminator;
790 return;
791 }
792 }
793
794 /* The count field we have in the main struct object is somewhat
795 limited, but should suffice for virtually all cases. If the
796 counted value doesn't fit, re-write a zero. The worst that
797 happens is that we re-count next time -- admittedly non-trivial
798 in that this implies some 2M fdes, but at least we function. */
799 ob->s.b.count = count;
800 if (ob->s.b.count != count)
801 ob->s.b.count = 0;
802 }
803
804 if (!start_fde_sort (&accu, count))
805 return;
806
807 if (ob->s.b.from_array)
808 {
809 fde **p;
810 for (p = ob->u.array; *p; ++p)
811 add_fdes (ob, &accu, *p);
812 }
813 else
814 add_fdes (ob, &accu, ob->u.single);
815
816 end_fde_sort (ob, &accu, count);
817
818 /* Save the original fde pointer, since this is the key by which the
819 DSO will deregister the object. */
820 accu.linear->orig_data = ob->u.single;
821 ob->u.sort = accu.linear;
822
823 ob->s.b.sorted = 1;
824 }
825
826 /* A linear search through a set of FDEs for the given PC. This is
827 used when there was insufficient memory to allocate and sort an
828 array. */
829
830 static const fde *
linear_search_fdes(struct object * ob,const fde * this_fde,void * pc)831 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
832 {
833 const struct dwarf_cie *last_cie = 0;
834 int encoding = ob->s.b.encoding;
835 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
836
837 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
838 {
839 const struct dwarf_cie *this_cie;
840 _Unwind_Ptr pc_begin, pc_range;
841
842 /* Skip CIEs. */
843 if (this_fde->CIE_delta == 0)
844 continue;
845
846 if (ob->s.b.mixed_encoding)
847 {
848 /* Determine the encoding for this FDE. Note mixed encoded
849 objects for later. */
850 this_cie = get_cie (this_fde);
851 if (this_cie != last_cie)
852 {
853 last_cie = this_cie;
854 encoding = get_cie_encoding (this_cie);
855 base = base_from_object (encoding, ob);
856 }
857 }
858
859 if (encoding == DW_EH_PE_absptr)
860 {
861 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
862 pc_begin = pc_array[0];
863 pc_range = pc_array[1];
864 if (pc_begin == 0)
865 continue;
866 }
867 else
868 {
869 _Unwind_Ptr mask;
870 const unsigned char *p;
871
872 p = read_encoded_value_with_base (encoding, base,
873 this_fde->pc_begin, &pc_begin);
874 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
875
876 /* Take care to ignore link-once functions that were removed.
877 In these cases, the function address will be NULL, but if
878 the encoding is smaller than a pointer a true NULL may not
879 be representable. Assume 0 in the representable bits is NULL. */
880 mask = size_of_encoded_value (encoding);
881 if (mask < sizeof (void *))
882 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
883 else
884 mask = -1;
885
886 if ((pc_begin & mask) == 0)
887 continue;
888 }
889
890 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
891 return this_fde;
892 }
893
894 return NULL;
895 }
896
897 /* Binary search for an FDE containing the given PC. Here are three
898 implementations of increasing complexity. */
899
900 static inline const fde *
binary_search_unencoded_fdes(struct object * ob,void * pc)901 binary_search_unencoded_fdes (struct object *ob, void *pc)
902 {
903 struct fde_vector *vec = ob->u.sort;
904 size_t lo, hi;
905
906 for (lo = 0, hi = vec->count; lo < hi; )
907 {
908 size_t i = (lo + hi) / 2;
909 const fde *const f = vec->array[i];
910 void *pc_begin;
911 uaddr pc_range;
912 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
913 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
914
915 if (pc < pc_begin)
916 hi = i;
917 else if (pc >= pc_begin + pc_range)
918 lo = i + 1;
919 else
920 return f;
921 }
922
923 return NULL;
924 }
925
926 static inline const fde *
binary_search_single_encoding_fdes(struct object * ob,void * pc)927 binary_search_single_encoding_fdes (struct object *ob, void *pc)
928 {
929 struct fde_vector *vec = ob->u.sort;
930 int encoding = ob->s.b.encoding;
931 _Unwind_Ptr base = base_from_object (encoding, ob);
932 size_t lo, hi;
933
934 for (lo = 0, hi = vec->count; lo < hi; )
935 {
936 size_t i = (lo + hi) / 2;
937 const fde *f = vec->array[i];
938 _Unwind_Ptr pc_begin, pc_range;
939 const unsigned char *p;
940
941 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
942 &pc_begin);
943 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
944
945 if ((_Unwind_Ptr) pc < pc_begin)
946 hi = i;
947 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
948 lo = i + 1;
949 else
950 return f;
951 }
952
953 return NULL;
954 }
955
956 static inline const fde *
binary_search_mixed_encoding_fdes(struct object * ob,void * pc)957 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
958 {
959 struct fde_vector *vec = ob->u.sort;
960 size_t lo, hi;
961
962 for (lo = 0, hi = vec->count; lo < hi; )
963 {
964 size_t i = (lo + hi) / 2;
965 const fde *f = vec->array[i];
966 _Unwind_Ptr pc_begin, pc_range;
967 const unsigned char *p;
968 int encoding;
969
970 encoding = get_fde_encoding (f);
971 p = read_encoded_value_with_base (encoding,
972 base_from_object (encoding, ob),
973 f->pc_begin, &pc_begin);
974 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
975
976 if ((_Unwind_Ptr) pc < pc_begin)
977 hi = i;
978 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
979 lo = i + 1;
980 else
981 return f;
982 }
983
984 return NULL;
985 }
986
987 static const fde *
search_object(struct object * ob,void * pc)988 search_object (struct object* ob, void *pc)
989 {
990 /* If the data hasn't been sorted, try to do this now. We may have
991 more memory available than last time we tried. */
992 if (! ob->s.b.sorted)
993 {
994 init_object (ob);
995
996 /* Despite the above comment, the normal reason to get here is
997 that we've not processed this object before. A quick range
998 check is in order. */
999 if (pc < ob->pc_begin)
1000 return NULL;
1001 }
1002
1003 if (ob->s.b.sorted)
1004 {
1005 if (ob->s.b.mixed_encoding)
1006 return binary_search_mixed_encoding_fdes (ob, pc);
1007 else if (ob->s.b.encoding == DW_EH_PE_absptr)
1008 return binary_search_unencoded_fdes (ob, pc);
1009 else
1010 return binary_search_single_encoding_fdes (ob, pc);
1011 }
1012 else
1013 {
1014 /* Long slow laborious linear search, cos we've no memory. */
1015 if (ob->s.b.from_array)
1016 {
1017 fde **p;
1018 for (p = ob->u.array; *p ; p++)
1019 {
1020 const fde *f = linear_search_fdes (ob, *p, pc);
1021 if (f)
1022 return f;
1023 }
1024 return NULL;
1025 }
1026 else
1027 return linear_search_fdes (ob, ob->u.single, pc);
1028 }
1029 }
1030
1031 const fde *
_Unwind_Find_FDE(void * pc,struct dwarf_eh_bases * bases)1032 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1033 {
1034 struct object *ob;
1035 const fde *f = NULL;
1036
1037 #ifdef ATOMIC_FDE_FAST_PATH
1038 /* For targets where unwind info is usually not registered through these
1039 APIs anymore, avoid taking a global lock.
1040 Use relaxed MO here, it is up to the app to ensure that the library
1041 loading/initialization happens-before using that library in other
1042 threads (in particular unwinding with that library's functions
1043 appearing in the backtraces). Calling that library's functions
1044 without waiting for the library to initialize would be racy. */
1045 if (__builtin_expect (!__atomic_load_n (&any_objects_registered,
1046 __ATOMIC_RELAXED), 1))
1047 return NULL;
1048 #endif
1049
1050 init_object_mutex_once ();
1051 __gthread_mutex_lock (&object_mutex);
1052
1053 /* Linear search through the classified objects, to find the one
1054 containing the pc. Note that pc_begin is sorted descending, and
1055 we expect objects to be non-overlapping. */
1056 for (ob = seen_objects; ob; ob = ob->next)
1057 if (pc >= ob->pc_begin)
1058 {
1059 f = search_object (ob, pc);
1060 if (f)
1061 goto fini;
1062 break;
1063 }
1064
1065 /* Classify and search the objects we've not yet processed. */
1066 while ((ob = unseen_objects))
1067 {
1068 struct object **p;
1069
1070 unseen_objects = ob->next;
1071 f = search_object (ob, pc);
1072
1073 /* Insert the object into the classified list. */
1074 for (p = &seen_objects; *p ; p = &(*p)->next)
1075 if ((*p)->pc_begin < ob->pc_begin)
1076 break;
1077 ob->next = *p;
1078 *p = ob;
1079
1080 if (f)
1081 goto fini;
1082 }
1083
1084 fini:
1085 __gthread_mutex_unlock (&object_mutex);
1086
1087 if (f)
1088 {
1089 int encoding;
1090 _Unwind_Ptr func;
1091
1092 bases->tbase = ob->tbase;
1093 bases->dbase = ob->dbase;
1094
1095 encoding = ob->s.b.encoding;
1096 if (ob->s.b.mixed_encoding)
1097 encoding = get_fde_encoding (f);
1098 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1099 f->pc_begin, &func);
1100 bases->func = (void *) func;
1101 }
1102
1103 return f;
1104 }
1105