xref: /netbsd-src/external/gpl3/gcc.old/dist/libgcc/unwind-dw2-fde.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /* Subroutines needed for unwinding stack frames for exception handling.  */
2 /* Copyright (C) 1997-2019 Free Software Foundation, Inc.
3    Contributed by Jason Merrill <jason@cygnus.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20 
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24 <http://www.gnu.org/licenses/>.  */
25 
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #else
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40     && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
42 #endif
43 #endif
44 
45 /* The unseen_objects list contains objects that have been registered
46    but not yet categorized in any way.  The seen_objects list has had
47    its pc_begin and count fields initialized at minimum, and is sorted
48    by decreasing value of pc_begin.  */
49 static struct object *unseen_objects;
50 static struct object *seen_objects;
51 #ifdef ATOMIC_FDE_FAST_PATH
52 static int any_objects_registered;
53 #endif
54 
55 #ifdef __GTHREAD_MUTEX_INIT
56 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
57 #define init_object_mutex_once()
58 #else
59 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
60 static __gthread_mutex_t object_mutex;
61 
62 static void
63 init_object_mutex (void)
64 {
65   __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
66 }
67 
68 static void
69 init_object_mutex_once (void)
70 {
71   static __gthread_once_t once = __GTHREAD_ONCE_INIT;
72   __gthread_once (&once, init_object_mutex);
73 }
74 #else
75 /* ???  Several targets include this file with stubbing parts of gthr.h
76    and expect no locking to be done.  */
77 #define init_object_mutex_once()
78 static __gthread_mutex_t object_mutex;
79 #endif
80 #endif
81 
82 /* Called from crtbegin.o to register the unwind info for an object.  */
83 
84 void
85 __register_frame_info_bases (const void *begin, struct object *ob,
86 			     void *tbase, void *dbase)
87 {
88   /* If .eh_frame is empty, don't register at all.  */
89   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
90     return;
91 
92   ob->pc_begin = (void *)-1;
93   ob->tbase = tbase;
94   ob->dbase = dbase;
95   ob->u.single = begin;
96   ob->s.i = 0;
97   ob->s.b.encoding = DW_EH_PE_omit;
98 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
99   ob->fde_end = NULL;
100 #endif
101 
102   init_object_mutex_once ();
103   __gthread_mutex_lock (&object_mutex);
104 
105   ob->next = unseen_objects;
106   unseen_objects = ob;
107 #ifdef ATOMIC_FDE_FAST_PATH
108   /* Set flag that at least one library has registered FDEs.
109      Use relaxed MO here, it is up to the app to ensure that the library
110      loading/initialization happens-before using that library in other
111      threads (in particular unwinding with that library's functions
112      appearing in the backtraces).  Calling that library's functions
113      without waiting for the library to initialize would be racy.  */
114   if (!any_objects_registered)
115     __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
116 #endif
117 
118   __gthread_mutex_unlock (&object_mutex);
119 }
120 
121 void
122 __register_frame_info (const void *begin, struct object *ob)
123 {
124   __register_frame_info_bases (begin, ob, 0, 0);
125 }
126 
127 void
128 __register_frame (void *begin)
129 {
130   struct object *ob;
131 
132   /* If .eh_frame is empty, don't register at all.  */
133   if (*(uword *) begin == 0)
134     return;
135 
136   ob = malloc (sizeof (struct object));
137   __register_frame_info (begin, ob);
138 }
139 
140 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
141    for different translation units.  Called from the file generated by
142    collect2.  */
143 
144 void
145 __register_frame_info_table_bases (void *begin, struct object *ob,
146 				   void *tbase, void *dbase)
147 {
148   ob->pc_begin = (void *)-1;
149   ob->tbase = tbase;
150   ob->dbase = dbase;
151   ob->u.array = begin;
152   ob->s.i = 0;
153   ob->s.b.from_array = 1;
154   ob->s.b.encoding = DW_EH_PE_omit;
155 
156   init_object_mutex_once ();
157   __gthread_mutex_lock (&object_mutex);
158 
159   ob->next = unseen_objects;
160   unseen_objects = ob;
161 #ifdef ATOMIC_FDE_FAST_PATH
162   /* Set flag that at least one library has registered FDEs.
163      Use relaxed MO here, it is up to the app to ensure that the library
164      loading/initialization happens-before using that library in other
165      threads (in particular unwinding with that library's functions
166      appearing in the backtraces).  Calling that library's functions
167      without waiting for the library to initialize would be racy.  */
168   if (!any_objects_registered)
169     __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
170 #endif
171 
172   __gthread_mutex_unlock (&object_mutex);
173 }
174 
175 void
176 __register_frame_info_table (void *begin, struct object *ob)
177 {
178   __register_frame_info_table_bases (begin, ob, 0, 0);
179 }
180 
181 void
182 __register_frame_table (void *begin)
183 {
184   struct object *ob = malloc (sizeof (struct object));
185   __register_frame_info_table (begin, ob);
186 }
187 
188 /* Called from crtbegin.o to deregister the unwind info for an object.  */
189 /* ??? Glibc has for a while now exported __register_frame_info and
190    __deregister_frame_info.  If we call __register_frame_info_bases
191    from crtbegin (wherein it is declared weak), and this object does
192    not get pulled from libgcc.a for other reasons, then the
193    invocation of __deregister_frame_info will be resolved from glibc.
194    Since the registration did not happen there, we'll die.
195 
196    Therefore, declare a new deregistration entry point that does the
197    exact same thing, but will resolve to the same library as
198    implements __register_frame_info_bases.  */
199 
200 void *
201 __deregister_frame_info_bases (const void *begin)
202 {
203   struct object **p;
204   struct object *ob = 0;
205 
206   /* If .eh_frame is empty, we haven't registered.  */
207   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
208     return ob;
209 
210   init_object_mutex_once ();
211   __gthread_mutex_lock (&object_mutex);
212 
213   for (p = &unseen_objects; *p ; p = &(*p)->next)
214     if ((*p)->u.single == begin)
215       {
216 	ob = *p;
217 	*p = ob->next;
218 	goto out;
219       }
220 
221   for (p = &seen_objects; *p ; p = &(*p)->next)
222     if ((*p)->s.b.sorted)
223       {
224 	if ((*p)->u.sort->orig_data == begin)
225 	  {
226 	    ob = *p;
227 	    *p = ob->next;
228 	    free (ob->u.sort);
229 	    goto out;
230 	  }
231       }
232     else
233       {
234 	if ((*p)->u.single == begin)
235 	  {
236 	    ob = *p;
237 	    *p = ob->next;
238 	    goto out;
239 	  }
240       }
241 
242  out:
243   __gthread_mutex_unlock (&object_mutex);
244 #if 0
245   gcc_assert (ob);
246 #endif
247   return (void *) ob;
248 }
249 
250 void *
251 __deregister_frame_info (const void *begin)
252 {
253   return __deregister_frame_info_bases (begin);
254 }
255 
256 void
257 __deregister_frame (void *begin)
258 {
259   /* If .eh_frame is empty, we haven't registered.  */
260   if (*(uword *) begin != 0)
261     free (__deregister_frame_info (begin));
262 }
263 
264 
265 /* Like base_of_encoded_value, but take the base from a struct object
266    instead of an _Unwind_Context.  */
267 
268 static _Unwind_Ptr
269 base_from_object (unsigned char encoding, struct object *ob)
270 {
271   if (encoding == DW_EH_PE_omit)
272     return 0;
273 
274   switch (encoding & 0x70)
275     {
276     case DW_EH_PE_absptr:
277     case DW_EH_PE_pcrel:
278     case DW_EH_PE_aligned:
279       return 0;
280 
281     case DW_EH_PE_textrel:
282       return (_Unwind_Ptr) ob->tbase;
283     case DW_EH_PE_datarel:
284       return (_Unwind_Ptr) ob->dbase;
285     default:
286       gcc_unreachable ();
287     }
288 }
289 
290 /* Return the FDE pointer encoding from the CIE.  */
291 /* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */
292 
293 static int
294 get_cie_encoding (const struct dwarf_cie *cie)
295 {
296   const unsigned char *aug, *p;
297   _Unwind_Ptr dummy;
298   _uleb128_t utmp;
299   _sleb128_t stmp;
300 
301   aug = cie->augmentation;
302   p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string.  */
303   if (__builtin_expect (cie->version >= 4, 0))
304     {
305       if (p[0] != sizeof (void *) || p[1] != 0)
306 	return DW_EH_PE_omit;		/* We are not prepared to handle unexpected
307 					   address sizes or segment selectors.  */
308       p += 2;				/* Skip address size and segment size.  */
309     }
310 
311   if (aug[0] != 'z')
312     return DW_EH_PE_absptr;
313 
314   p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
315   p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
316   if (cie->version == 1)		/* Skip return address column.  */
317     p++;
318   else
319     p = read_uleb128 (p, &utmp);
320 
321   aug++;				/* Skip 'z' */
322   p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
323   while (1)
324     {
325       /* This is what we're looking for.  */
326       if (*aug == 'R')
327 	return *p;
328       /* Personality encoding and pointer.  */
329       else if (*aug == 'P')
330 	{
331 	  /* ??? Avoid dereferencing indirect pointers, since we're
332 	     faking the base address.  Gotta keep DW_EH_PE_aligned
333 	     intact, however.  */
334 	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
335 	}
336       /* LSDA encoding.  */
337       else if (*aug == 'L')
338 	p++;
339       /* Otherwise end of string, or unknown augmentation.  */
340       else
341 	return DW_EH_PE_absptr;
342       aug++;
343     }
344 }
345 
346 static inline int
347 get_fde_encoding (const struct dwarf_fde *f)
348 {
349   return get_cie_encoding (get_cie (f));
350 }
351 
352 
353 /* Sorting an array of FDEs by address.
354    (Ideally we would have the linker sort the FDEs so we don't have to do
355    it at run time. But the linkers are not yet prepared for this.)  */
356 
357 /* Comparison routines.  Three variants of increasing complexity.  */
358 
359 static int
360 fde_unencoded_compare (struct object *ob __attribute__((unused)),
361 		       const fde *x, const fde *y)
362 {
363   _Unwind_Ptr x_ptr, y_ptr;
364   memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
365   memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
366 
367   if (x_ptr > y_ptr)
368     return 1;
369   if (x_ptr < y_ptr)
370     return -1;
371   return 0;
372 }
373 
374 static int
375 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
376 {
377   _Unwind_Ptr base, x_ptr, y_ptr;
378 
379   base = base_from_object (ob->s.b.encoding, ob);
380   read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
381   read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
382 
383   if (x_ptr > y_ptr)
384     return 1;
385   if (x_ptr < y_ptr)
386     return -1;
387   return 0;
388 }
389 
390 static int
391 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
392 {
393   int x_encoding, y_encoding;
394   _Unwind_Ptr x_ptr, y_ptr;
395 
396   x_encoding = get_fde_encoding (x);
397   read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
398 				x->pc_begin, &x_ptr);
399 
400   y_encoding = get_fde_encoding (y);
401   read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
402 				y->pc_begin, &y_ptr);
403 
404   if (x_ptr > y_ptr)
405     return 1;
406   if (x_ptr < y_ptr)
407     return -1;
408   return 0;
409 }
410 
411 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
412 
413 
414 /* This is a special mix of insertion sort and heap sort, optimized for
415    the data sets that actually occur. They look like
416    101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
417    I.e. a linearly increasing sequence (coming from functions in the text
418    section), with additionally a few unordered elements (coming from functions
419    in gnu_linkonce sections) whose values are higher than the values in the
420    surrounding linear sequence (but not necessarily higher than the values
421    at the end of the linear sequence!).
422    The worst-case total run time is O(N) + O(n log (n)), where N is the
423    total number of FDEs and n is the number of erratic ones.  */
424 
425 struct fde_accumulator
426 {
427   struct fde_vector *linear;
428   struct fde_vector *erratic;
429 };
430 
431 static inline int
432 start_fde_sort (struct fde_accumulator *accu, size_t count)
433 {
434   size_t size;
435   if (! count)
436     return 0;
437 
438   size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
439   if ((accu->linear = malloc (size)))
440     {
441       accu->linear->count = 0;
442       if ((accu->erratic = malloc (size)))
443 	accu->erratic->count = 0;
444       return 1;
445     }
446   else
447     return 0;
448 }
449 
450 static inline void
451 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
452 {
453   if (accu->linear)
454     accu->linear->array[accu->linear->count++] = this_fde;
455 }
456 
457 /* Split LINEAR into a linear sequence with low values and an erratic
458    sequence with high values, put the linear one (of longest possible
459    length) into LINEAR and the erratic one into ERRATIC. This is O(N).
460 
461    Because the longest linear sequence we are trying to locate within the
462    incoming LINEAR array can be interspersed with (high valued) erratic
463    entries.  We construct a chain indicating the sequenced entries.
464    To avoid having to allocate this chain, we overlay it onto the space of
465    the ERRATIC array during construction.  A final pass iterates over the
466    chain to determine what should be placed in the ERRATIC array, and
467    what is the linear sequence.  This overlay is safe from aliasing.  */
468 
469 static inline void
470 fde_split (struct object *ob, fde_compare_t fde_compare,
471 	   struct fde_vector *linear, struct fde_vector *erratic)
472 {
473   static const fde *marker;
474   size_t count = linear->count;
475   const fde *const *chain_end = &marker;
476   size_t i, j, k;
477 
478   /* This should optimize out, but it is wise to make sure this assumption
479      is correct. Should these have different sizes, we cannot cast between
480      them and the overlaying onto ERRATIC will not work.  */
481   gcc_assert (sizeof (const fde *) == sizeof (const fde **));
482 
483   for (i = 0; i < count; i++)
484     {
485       const fde *const *probe;
486 
487       for (probe = chain_end;
488 	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
489 	   probe = chain_end)
490 	{
491 	  chain_end = (const fde *const*) erratic->array[probe - linear->array];
492 	  erratic->array[probe - linear->array] = NULL;
493 	}
494       erratic->array[i] = (const fde *) chain_end;
495       chain_end = &linear->array[i];
496     }
497 
498   /* Each entry in LINEAR which is part of the linear sequence we have
499      discovered will correspond to a non-NULL entry in the chain we built in
500      the ERRATIC array.  */
501   for (i = j = k = 0; i < count; i++)
502     if (erratic->array[i])
503       linear->array[j++] = linear->array[i];
504     else
505       erratic->array[k++] = linear->array[i];
506   linear->count = j;
507   erratic->count = k;
508 }
509 
510 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
511 
512 /* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
513    for the first (root) node; push it down to its rightful place.  */
514 
515 static void
516 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
517 		int lo, int hi)
518 {
519   int i, j;
520 
521   for (i = lo, j = 2*i+1;
522        j < hi;
523        j = 2*i+1)
524     {
525       if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
526 	++j;
527 
528       if (fde_compare (ob, a[i], a[j]) < 0)
529 	{
530 	  SWAP (a[i], a[j]);
531 	  i = j;
532 	}
533       else
534 	break;
535     }
536 }
537 
538 /* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
539    use a name that does not conflict.  */
540 
541 static void
542 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
543 		struct fde_vector *erratic)
544 {
545   /* For a description of this algorithm, see:
546      Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
547      p. 60-61.  */
548   const fde ** a = erratic->array;
549   /* A portion of the array is called a "heap" if for all i>=0:
550      If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
551      If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
552   size_t n = erratic->count;
553   int m;
554 
555   /* Expand our heap incrementally from the end of the array, heapifying
556      each resulting semi-heap as we go.  After each step, a[m] is the top
557      of a heap.  */
558   for (m = n/2-1; m >= 0; --m)
559     frame_downheap (ob, fde_compare, a, m, n);
560 
561   /* Shrink our heap incrementally from the end of the array, first
562      swapping out the largest element a[0] and then re-heapifying the
563      resulting semi-heap.  After each step, a[0..m) is a heap.  */
564   for (m = n-1; m >= 1; --m)
565     {
566       SWAP (a[0], a[m]);
567       frame_downheap (ob, fde_compare, a, 0, m);
568     }
569 #undef SWAP
570 }
571 
572 /* Merge V1 and V2, both sorted, and put the result into V1.  */
573 static inline void
574 fde_merge (struct object *ob, fde_compare_t fde_compare,
575 	   struct fde_vector *v1, struct fde_vector *v2)
576 {
577   size_t i1, i2;
578   const fde * fde2;
579 
580   i2 = v2->count;
581   if (i2 > 0)
582     {
583       i1 = v1->count;
584       do
585 	{
586 	  i2--;
587 	  fde2 = v2->array[i2];
588 	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
589 	    {
590 	      v1->array[i1+i2] = v1->array[i1-1];
591 	      i1--;
592 	    }
593 	  v1->array[i1+i2] = fde2;
594 	}
595       while (i2 > 0);
596       v1->count += v2->count;
597     }
598 }
599 
600 static inline void
601 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
602 {
603   fde_compare_t fde_compare;
604 
605   gcc_assert (!accu->linear || accu->linear->count == count);
606 
607   if (ob->s.b.mixed_encoding)
608     fde_compare = fde_mixed_encoding_compare;
609   else if (ob->s.b.encoding == DW_EH_PE_absptr)
610     fde_compare = fde_unencoded_compare;
611   else
612     fde_compare = fde_single_encoding_compare;
613 
614   if (accu->erratic)
615     {
616       fde_split (ob, fde_compare, accu->linear, accu->erratic);
617       gcc_assert (accu->linear->count + accu->erratic->count == count);
618       frame_heapsort (ob, fde_compare, accu->erratic);
619       fde_merge (ob, fde_compare, accu->linear, accu->erratic);
620       free (accu->erratic);
621     }
622   else
623     {
624       /* We've not managed to malloc an erratic array,
625 	 so heap sort in the linear one.  */
626       frame_heapsort (ob, fde_compare, accu->linear);
627     }
628 }
629 
630 
631 /* Update encoding, mixed_encoding, and pc_begin for OB for the
632    fde array beginning at THIS_FDE.  Return the number of fdes
633    encountered along the way.  */
634 
635 static size_t
636 classify_object_over_fdes (struct object *ob, const fde *this_fde)
637 {
638   const struct dwarf_cie *last_cie = 0;
639   size_t count = 0;
640   int encoding = DW_EH_PE_absptr;
641   _Unwind_Ptr base = 0;
642 
643   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
644     {
645       const struct dwarf_cie *this_cie;
646       _Unwind_Ptr mask, pc_begin;
647 
648       /* Skip CIEs.  */
649       if (this_fde->CIE_delta == 0)
650 	continue;
651 
652       /* Determine the encoding for this FDE.  Note mixed encoded
653 	 objects for later.  */
654       this_cie = get_cie (this_fde);
655       if (this_cie != last_cie)
656 	{
657 	  last_cie = this_cie;
658 	  encoding = get_cie_encoding (this_cie);
659 	  if (encoding == DW_EH_PE_omit)
660 	    return -1;
661 	  base = base_from_object (encoding, ob);
662 	  if (ob->s.b.encoding == DW_EH_PE_omit)
663 	    ob->s.b.encoding = encoding;
664 	  else if (ob->s.b.encoding != encoding)
665 	    ob->s.b.mixed_encoding = 1;
666 	}
667 
668       read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
669 				    &pc_begin);
670 
671       /* Take care to ignore link-once functions that were removed.
672 	 In these cases, the function address will be NULL, but if
673 	 the encoding is smaller than a pointer a true NULL may not
674 	 be representable.  Assume 0 in the representable bits is NULL.  */
675       mask = size_of_encoded_value (encoding);
676       if (mask < sizeof (void *))
677 	mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
678       else
679 	mask = -1;
680 
681       if ((pc_begin & mask) == 0)
682 	continue;
683 
684       count += 1;
685       if ((void *) pc_begin < ob->pc_begin)
686 	ob->pc_begin = (void *) pc_begin;
687     }
688 
689   return count;
690 }
691 
692 static void
693 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
694 {
695   const struct dwarf_cie *last_cie = 0;
696   int encoding = ob->s.b.encoding;
697   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
698 
699   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
700     {
701       const struct dwarf_cie *this_cie;
702 
703       /* Skip CIEs.  */
704       if (this_fde->CIE_delta == 0)
705 	continue;
706 
707       if (ob->s.b.mixed_encoding)
708 	{
709 	  /* Determine the encoding for this FDE.  Note mixed encoded
710 	     objects for later.  */
711 	  this_cie = get_cie (this_fde);
712 	  if (this_cie != last_cie)
713 	    {
714 	      last_cie = this_cie;
715 	      encoding = get_cie_encoding (this_cie);
716 	      base = base_from_object (encoding, ob);
717 	    }
718 	}
719 
720       if (encoding == DW_EH_PE_absptr)
721 	{
722 	  _Unwind_Ptr ptr;
723 	  memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
724 	  if (ptr == 0)
725 	    continue;
726 	}
727       else
728 	{
729 	  _Unwind_Ptr pc_begin, mask;
730 
731 	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
732 					&pc_begin);
733 
734 	  /* Take care to ignore link-once functions that were removed.
735 	     In these cases, the function address will be NULL, but if
736 	     the encoding is smaller than a pointer a true NULL may not
737 	     be representable.  Assume 0 in the representable bits is NULL.  */
738 	  mask = size_of_encoded_value (encoding);
739 	  if (mask < sizeof (void *))
740 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
741 	  else
742 	    mask = -1;
743 
744 	  if ((pc_begin & mask) == 0)
745 	    continue;
746 	}
747 
748       fde_insert (accu, this_fde);
749     }
750 }
751 
752 /* Set up a sorted array of pointers to FDEs for a loaded object.  We
753    count up the entries before allocating the array because it's likely to
754    be faster.  We can be called multiple times, should we have failed to
755    allocate a sorted fde array on a previous occasion.  */
756 
757 static inline void
758 init_object (struct object* ob)
759 {
760   struct fde_accumulator accu;
761   size_t count;
762 
763   count = ob->s.b.count;
764   if (count == 0)
765     {
766       if (ob->s.b.from_array)
767 	{
768 	  fde **p = ob->u.array;
769 	  for (count = 0; *p; ++p)
770 	    {
771 	      size_t cur_count = classify_object_over_fdes (ob, *p);
772 	      if (cur_count == (size_t) -1)
773 		goto unhandled_fdes;
774 	      count += cur_count;
775 	    }
776 	}
777       else
778 	{
779 	  count = classify_object_over_fdes (ob, ob->u.single);
780 	  if (count == (size_t) -1)
781 	    {
782 	      static const fde terminator;
783 	    unhandled_fdes:
784 	      ob->s.i = 0;
785 	      ob->s.b.encoding = DW_EH_PE_omit;
786 	      ob->u.single = &terminator;
787 	      return;
788 	    }
789 	}
790 
791       /* The count field we have in the main struct object is somewhat
792 	 limited, but should suffice for virtually all cases.  If the
793 	 counted value doesn't fit, re-write a zero.  The worst that
794 	 happens is that we re-count next time -- admittedly non-trivial
795 	 in that this implies some 2M fdes, but at least we function.  */
796       ob->s.b.count = count;
797       if (ob->s.b.count != count)
798 	ob->s.b.count = 0;
799     }
800 
801   if (!start_fde_sort (&accu, count))
802     return;
803 
804   if (ob->s.b.from_array)
805     {
806       fde **p;
807       for (p = ob->u.array; *p; ++p)
808 	add_fdes (ob, &accu, *p);
809     }
810   else
811     add_fdes (ob, &accu, ob->u.single);
812 
813   end_fde_sort (ob, &accu, count);
814 
815   /* Save the original fde pointer, since this is the key by which the
816      DSO will deregister the object.  */
817   accu.linear->orig_data = ob->u.single;
818   ob->u.sort = accu.linear;
819 
820   ob->s.b.sorted = 1;
821 }
822 
823 /* A linear search through a set of FDEs for the given PC.  This is
824    used when there was insufficient memory to allocate and sort an
825    array.  */
826 
827 static const fde *
828 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
829 {
830   const struct dwarf_cie *last_cie = 0;
831   int encoding = ob->s.b.encoding;
832   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
833 
834   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
835     {
836       const struct dwarf_cie *this_cie;
837       _Unwind_Ptr pc_begin, pc_range;
838 
839       /* Skip CIEs.  */
840       if (this_fde->CIE_delta == 0)
841 	continue;
842 
843       if (ob->s.b.mixed_encoding)
844 	{
845 	  /* Determine the encoding for this FDE.  Note mixed encoded
846 	     objects for later.  */
847 	  this_cie = get_cie (this_fde);
848 	  if (this_cie != last_cie)
849 	    {
850 	      last_cie = this_cie;
851 	      encoding = get_cie_encoding (this_cie);
852 	      base = base_from_object (encoding, ob);
853 	    }
854 	}
855 
856       if (encoding == DW_EH_PE_absptr)
857 	{
858 	  const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
859 	  pc_begin = pc_array[0];
860 	  pc_range = pc_array[1];
861 	  if (pc_begin == 0)
862 	    continue;
863 	}
864       else
865 	{
866 	  _Unwind_Ptr mask;
867 	  const unsigned char *p;
868 
869 	  p = read_encoded_value_with_base (encoding, base,
870 					    this_fde->pc_begin, &pc_begin);
871 	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
872 
873 	  /* Take care to ignore link-once functions that were removed.
874 	     In these cases, the function address will be NULL, but if
875 	     the encoding is smaller than a pointer a true NULL may not
876 	     be representable.  Assume 0 in the representable bits is NULL.  */
877 	  mask = size_of_encoded_value (encoding);
878 	  if (mask < sizeof (void *))
879 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
880 	  else
881 	    mask = -1;
882 
883 	  if ((pc_begin & mask) == 0)
884 	    continue;
885 	}
886 
887       if ((_Unwind_Ptr) pc - pc_begin < pc_range)
888 	return this_fde;
889     }
890 
891   return NULL;
892 }
893 
894 /* Binary search for an FDE containing the given PC.  Here are three
895    implementations of increasing complexity.  */
896 
897 static inline const fde *
898 binary_search_unencoded_fdes (struct object *ob, void *pc)
899 {
900   struct fde_vector *vec = ob->u.sort;
901   size_t lo, hi;
902 
903   for (lo = 0, hi = vec->count; lo < hi; )
904     {
905       size_t i = (lo + hi) / 2;
906       const fde *const f = vec->array[i];
907       void *pc_begin;
908       uaddr pc_range;
909       memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
910       memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
911 
912       if (pc < pc_begin)
913 	hi = i;
914       else if (pc >= pc_begin + pc_range)
915 	lo = i + 1;
916       else
917 	return f;
918     }
919 
920   return NULL;
921 }
922 
923 static inline const fde *
924 binary_search_single_encoding_fdes (struct object *ob, void *pc)
925 {
926   struct fde_vector *vec = ob->u.sort;
927   int encoding = ob->s.b.encoding;
928   _Unwind_Ptr base = base_from_object (encoding, ob);
929   size_t lo, hi;
930 
931   for (lo = 0, hi = vec->count; lo < hi; )
932     {
933       size_t i = (lo + hi) / 2;
934       const fde *f = vec->array[i];
935       _Unwind_Ptr pc_begin, pc_range;
936       const unsigned char *p;
937 
938       p = read_encoded_value_with_base (encoding, base, f->pc_begin,
939 					&pc_begin);
940       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
941 
942       if ((_Unwind_Ptr) pc < pc_begin)
943 	hi = i;
944       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
945 	lo = i + 1;
946       else
947 	return f;
948     }
949 
950   return NULL;
951 }
952 
953 static inline const fde *
954 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
955 {
956   struct fde_vector *vec = ob->u.sort;
957   size_t lo, hi;
958 
959   for (lo = 0, hi = vec->count; lo < hi; )
960     {
961       size_t i = (lo + hi) / 2;
962       const fde *f = vec->array[i];
963       _Unwind_Ptr pc_begin, pc_range;
964       const unsigned char *p;
965       int encoding;
966 
967       encoding = get_fde_encoding (f);
968       p = read_encoded_value_with_base (encoding,
969 					base_from_object (encoding, ob),
970 					f->pc_begin, &pc_begin);
971       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
972 
973       if ((_Unwind_Ptr) pc < pc_begin)
974 	hi = i;
975       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
976 	lo = i + 1;
977       else
978 	return f;
979     }
980 
981   return NULL;
982 }
983 
984 static const fde *
985 search_object (struct object* ob, void *pc)
986 {
987   /* If the data hasn't been sorted, try to do this now.  We may have
988      more memory available than last time we tried.  */
989   if (! ob->s.b.sorted)
990     {
991       init_object (ob);
992 
993       /* Despite the above comment, the normal reason to get here is
994 	 that we've not processed this object before.  A quick range
995 	 check is in order.  */
996       if (pc < ob->pc_begin)
997 	return NULL;
998     }
999 
1000   if (ob->s.b.sorted)
1001     {
1002       if (ob->s.b.mixed_encoding)
1003 	return binary_search_mixed_encoding_fdes (ob, pc);
1004       else if (ob->s.b.encoding == DW_EH_PE_absptr)
1005 	return binary_search_unencoded_fdes (ob, pc);
1006       else
1007 	return binary_search_single_encoding_fdes (ob, pc);
1008     }
1009   else
1010     {
1011       /* Long slow laborious linear search, cos we've no memory.  */
1012       if (ob->s.b.from_array)
1013 	{
1014 	  fde **p;
1015 	  for (p = ob->u.array; *p ; p++)
1016 	    {
1017 	      const fde *f = linear_search_fdes (ob, *p, pc);
1018 	      if (f)
1019 		return f;
1020 	    }
1021 	  return NULL;
1022 	}
1023       else
1024 	return linear_search_fdes (ob, ob->u.single, pc);
1025     }
1026 }
1027 
1028 const fde *
1029 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1030 {
1031   struct object *ob;
1032   const fde *f = NULL;
1033 
1034 #ifdef ATOMIC_FDE_FAST_PATH
1035   /* For targets where unwind info is usually not registered through these
1036      APIs anymore, avoid taking a global lock.
1037      Use relaxed MO here, it is up to the app to ensure that the library
1038      loading/initialization happens-before using that library in other
1039      threads (in particular unwinding with that library's functions
1040      appearing in the backtraces).  Calling that library's functions
1041      without waiting for the library to initialize would be racy.  */
1042   if (__builtin_expect (!__atomic_load_n (&any_objects_registered,
1043 					  __ATOMIC_RELAXED), 1))
1044     return NULL;
1045 #endif
1046 
1047   init_object_mutex_once ();
1048   __gthread_mutex_lock (&object_mutex);
1049 
1050   /* Linear search through the classified objects, to find the one
1051      containing the pc.  Note that pc_begin is sorted descending, and
1052      we expect objects to be non-overlapping.  */
1053   for (ob = seen_objects; ob; ob = ob->next)
1054     if (pc >= ob->pc_begin)
1055       {
1056 	f = search_object (ob, pc);
1057 	if (f)
1058 	  goto fini;
1059 	break;
1060       }
1061 
1062   /* Classify and search the objects we've not yet processed.  */
1063   while ((ob = unseen_objects))
1064     {
1065       struct object **p;
1066 
1067       unseen_objects = ob->next;
1068       f = search_object (ob, pc);
1069 
1070       /* Insert the object into the classified list.  */
1071       for (p = &seen_objects; *p ; p = &(*p)->next)
1072 	if ((*p)->pc_begin < ob->pc_begin)
1073 	  break;
1074       ob->next = *p;
1075       *p = ob;
1076 
1077       if (f)
1078 	goto fini;
1079     }
1080 
1081  fini:
1082   __gthread_mutex_unlock (&object_mutex);
1083 
1084   if (f)
1085     {
1086       int encoding;
1087       _Unwind_Ptr func;
1088 
1089       bases->tbase = ob->tbase;
1090       bases->dbase = ob->dbase;
1091 
1092       encoding = ob->s.b.encoding;
1093       if (ob->s.b.mixed_encoding)
1094 	encoding = get_fde_encoding (f);
1095       read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1096 				    f->pc_begin, &func);
1097       bases->func = (void *) func;
1098     }
1099 
1100   return f;
1101 }
1102