xref: /netbsd-src/external/gpl3/gcc/dist/libgcc/unwind-dw2-fde.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Subroutines needed for unwinding stack frames for exception handling.  */
2 /* Copyright (C) 1997-2016 Free Software Foundation, Inc.
3    Contributed by Jason Merrill <jason@cygnus.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20 
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24 <http://www.gnu.org/licenses/>.  */
25 
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #endif
39 
40 /* The unseen_objects list contains objects that have been registered
41    but not yet categorized in any way.  The seen_objects list has had
42    its pc_begin and count fields initialized at minimum, and is sorted
43    by decreasing value of pc_begin.  */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
46 
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #define init_object_mutex_once()
50 #else
51 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
52 static __gthread_mutex_t object_mutex;
53 
54 static void
55 init_object_mutex (void)
56 {
57   __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
58 }
59 
60 static void
61 init_object_mutex_once (void)
62 {
63   static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64   __gthread_once (&once, init_object_mutex);
65 }
66 #else
67 /* ???  Several targets include this file with stubbing parts of gthr.h
68    and expect no locking to be done.  */
69 #define init_object_mutex_once()
70 static __gthread_mutex_t object_mutex;
71 #endif
72 #endif
73 
74 /* Called from crtbegin.o to register the unwind info for an object.  */
75 
76 void
77 __register_frame_info_bases (const void *begin, struct object *ob,
78 			     void *tbase, void *dbase)
79 {
80   /* If .eh_frame is empty, don't register at all.  */
81   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
82     return;
83 
84   ob->pc_begin = (void *)-1;
85   ob->tbase = tbase;
86   ob->dbase = dbase;
87   ob->u.single = begin;
88   ob->s.i = 0;
89   ob->s.b.encoding = DW_EH_PE_omit;
90 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
91   ob->fde_end = NULL;
92 #endif
93 
94   init_object_mutex_once ();
95   __gthread_mutex_lock (&object_mutex);
96 
97   ob->next = unseen_objects;
98   unseen_objects = ob;
99 
100   __gthread_mutex_unlock (&object_mutex);
101 }
102 
103 void
104 __register_frame_info (const void *begin, struct object *ob)
105 {
106   __register_frame_info_bases (begin, ob, 0, 0);
107 }
108 
109 void
110 __register_frame (void *begin)
111 {
112   struct object *ob;
113 
114   /* If .eh_frame is empty, don't register at all.  */
115   if (*(uword *) begin == 0)
116     return;
117 
118   ob = malloc (sizeof (struct object));
119   __register_frame_info (begin, ob);
120 }
121 
122 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
123    for different translation units.  Called from the file generated by
124    collect2.  */
125 
126 void
127 __register_frame_info_table_bases (void *begin, struct object *ob,
128 				   void *tbase, void *dbase)
129 {
130   ob->pc_begin = (void *)-1;
131   ob->tbase = tbase;
132   ob->dbase = dbase;
133   ob->u.array = begin;
134   ob->s.i = 0;
135   ob->s.b.from_array = 1;
136   ob->s.b.encoding = DW_EH_PE_omit;
137 
138   init_object_mutex_once ();
139   __gthread_mutex_lock (&object_mutex);
140 
141   ob->next = unseen_objects;
142   unseen_objects = ob;
143 
144   __gthread_mutex_unlock (&object_mutex);
145 }
146 
147 void
148 __register_frame_info_table (void *begin, struct object *ob)
149 {
150   __register_frame_info_table_bases (begin, ob, 0, 0);
151 }
152 
153 void
154 __register_frame_table (void *begin)
155 {
156   struct object *ob = malloc (sizeof (struct object));
157   __register_frame_info_table (begin, ob);
158 }
159 
160 /* Called from crtbegin.o to deregister the unwind info for an object.  */
161 /* ??? Glibc has for a while now exported __register_frame_info and
162    __deregister_frame_info.  If we call __register_frame_info_bases
163    from crtbegin (wherein it is declared weak), and this object does
164    not get pulled from libgcc.a for other reasons, then the
165    invocation of __deregister_frame_info will be resolved from glibc.
166    Since the registration did not happen there, we'll die.
167 
168    Therefore, declare a new deregistration entry point that does the
169    exact same thing, but will resolve to the same library as
170    implements __register_frame_info_bases.  */
171 
172 void *
173 __deregister_frame_info_bases (const void *begin)
174 {
175   struct object **p;
176   struct object *ob = 0;
177 
178   /* If .eh_frame is empty, we haven't registered.  */
179   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
180     return ob;
181 
182   init_object_mutex_once ();
183   __gthread_mutex_lock (&object_mutex);
184 
185   for (p = &unseen_objects; *p ; p = &(*p)->next)
186     if ((*p)->u.single == begin)
187       {
188 	ob = *p;
189 	*p = ob->next;
190 	goto out;
191       }
192 
193   for (p = &seen_objects; *p ; p = &(*p)->next)
194     if ((*p)->s.b.sorted)
195       {
196 	if ((*p)->u.sort->orig_data == begin)
197 	  {
198 	    ob = *p;
199 	    *p = ob->next;
200 	    free (ob->u.sort);
201 	    goto out;
202 	  }
203       }
204     else
205       {
206 	if ((*p)->u.single == begin)
207 	  {
208 	    ob = *p;
209 	    *p = ob->next;
210 	    goto out;
211 	  }
212       }
213 
214  out:
215   __gthread_mutex_unlock (&object_mutex);
216 #if 0
217   gcc_assert (ob);
218 #endif
219   return (void *) ob;
220 }
221 
222 void *
223 __deregister_frame_info (const void *begin)
224 {
225   return __deregister_frame_info_bases (begin);
226 }
227 
228 void
229 __deregister_frame (void *begin)
230 {
231   /* If .eh_frame is empty, we haven't registered.  */
232   if (*(uword *) begin != 0)
233     free (__deregister_frame_info (begin));
234 }
235 
236 
237 /* Like base_of_encoded_value, but take the base from a struct object
238    instead of an _Unwind_Context.  */
239 
240 static _Unwind_Ptr
241 base_from_object (unsigned char encoding, struct object *ob)
242 {
243   if (encoding == DW_EH_PE_omit)
244     return 0;
245 
246   switch (encoding & 0x70)
247     {
248     case DW_EH_PE_absptr:
249     case DW_EH_PE_pcrel:
250     case DW_EH_PE_aligned:
251       return 0;
252 
253     case DW_EH_PE_textrel:
254       return (_Unwind_Ptr) ob->tbase;
255     case DW_EH_PE_datarel:
256       return (_Unwind_Ptr) ob->dbase;
257     default:
258       gcc_unreachable ();
259     }
260 }
261 
262 /* Return the FDE pointer encoding from the CIE.  */
263 /* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */
264 
265 static int
266 get_cie_encoding (const struct dwarf_cie *cie)
267 {
268   const unsigned char *aug, *p;
269   _Unwind_Ptr dummy;
270   _uleb128_t utmp;
271   _sleb128_t stmp;
272 
273   aug = cie->augmentation;
274   p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string.  */
275   if (__builtin_expect (cie->version >= 4, 0))
276     {
277       if (p[0] != sizeof (void *) || p[1] != 0)
278 	return DW_EH_PE_omit;		/* We are not prepared to handle unexpected
279 					   address sizes or segment selectors.  */
280       p += 2;				/* Skip address size and segment size.  */
281     }
282 
283   if (aug[0] != 'z')
284     return DW_EH_PE_absptr;
285 
286   p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
287   p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
288   if (cie->version == 1)		/* Skip return address column.  */
289     p++;
290   else
291     p = read_uleb128 (p, &utmp);
292 
293   aug++;				/* Skip 'z' */
294   p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
295   while (1)
296     {
297       /* This is what we're looking for.  */
298       if (*aug == 'R')
299 	return *p;
300       /* Personality encoding and pointer.  */
301       else if (*aug == 'P')
302 	{
303 	  /* ??? Avoid dereferencing indirect pointers, since we're
304 	     faking the base address.  Gotta keep DW_EH_PE_aligned
305 	     intact, however.  */
306 	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
307 	}
308       /* LSDA encoding.  */
309       else if (*aug == 'L')
310 	p++;
311       /* Otherwise end of string, or unknown augmentation.  */
312       else
313 	return DW_EH_PE_absptr;
314       aug++;
315     }
316 }
317 
318 static inline int
319 get_fde_encoding (const struct dwarf_fde *f)
320 {
321   return get_cie_encoding (get_cie (f));
322 }
323 
324 
325 /* Sorting an array of FDEs by address.
326    (Ideally we would have the linker sort the FDEs so we don't have to do
327    it at run time. But the linkers are not yet prepared for this.)  */
328 
329 /* Comparison routines.  Three variants of increasing complexity.  */
330 
331 static int
332 fde_unencoded_compare (struct object *ob __attribute__((unused)),
333 		       const fde *x, const fde *y)
334 {
335   _Unwind_Ptr x_ptr, y_ptr;
336   memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
337   memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
338 
339   if (x_ptr > y_ptr)
340     return 1;
341   if (x_ptr < y_ptr)
342     return -1;
343   return 0;
344 }
345 
346 static int
347 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
348 {
349   _Unwind_Ptr base, x_ptr, y_ptr;
350 
351   base = base_from_object (ob->s.b.encoding, ob);
352   read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
353   read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
354 
355   if (x_ptr > y_ptr)
356     return 1;
357   if (x_ptr < y_ptr)
358     return -1;
359   return 0;
360 }
361 
362 static int
363 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
364 {
365   int x_encoding, y_encoding;
366   _Unwind_Ptr x_ptr, y_ptr;
367 
368   x_encoding = get_fde_encoding (x);
369   read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
370 				x->pc_begin, &x_ptr);
371 
372   y_encoding = get_fde_encoding (y);
373   read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
374 				y->pc_begin, &y_ptr);
375 
376   if (x_ptr > y_ptr)
377     return 1;
378   if (x_ptr < y_ptr)
379     return -1;
380   return 0;
381 }
382 
383 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
384 
385 
386 /* This is a special mix of insertion sort and heap sort, optimized for
387    the data sets that actually occur. They look like
388    101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
389    I.e. a linearly increasing sequence (coming from functions in the text
390    section), with additionally a few unordered elements (coming from functions
391    in gnu_linkonce sections) whose values are higher than the values in the
392    surrounding linear sequence (but not necessarily higher than the values
393    at the end of the linear sequence!).
394    The worst-case total run time is O(N) + O(n log (n)), where N is the
395    total number of FDEs and n is the number of erratic ones.  */
396 
397 struct fde_accumulator
398 {
399   struct fde_vector *linear;
400   struct fde_vector *erratic;
401 };
402 
403 static inline int
404 start_fde_sort (struct fde_accumulator *accu, size_t count)
405 {
406   size_t size;
407   if (! count)
408     return 0;
409 
410   size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
411   if ((accu->linear = malloc (size)))
412     {
413       accu->linear->count = 0;
414       if ((accu->erratic = malloc (size)))
415 	accu->erratic->count = 0;
416       return 1;
417     }
418   else
419     return 0;
420 }
421 
422 static inline void
423 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
424 {
425   if (accu->linear)
426     accu->linear->array[accu->linear->count++] = this_fde;
427 }
428 
429 /* Split LINEAR into a linear sequence with low values and an erratic
430    sequence with high values, put the linear one (of longest possible
431    length) into LINEAR and the erratic one into ERRATIC. This is O(N).
432 
433    Because the longest linear sequence we are trying to locate within the
434    incoming LINEAR array can be interspersed with (high valued) erratic
435    entries.  We construct a chain indicating the sequenced entries.
436    To avoid having to allocate this chain, we overlay it onto the space of
437    the ERRATIC array during construction.  A final pass iterates over the
438    chain to determine what should be placed in the ERRATIC array, and
439    what is the linear sequence.  This overlay is safe from aliasing.  */
440 
441 static inline void
442 fde_split (struct object *ob, fde_compare_t fde_compare,
443 	   struct fde_vector *linear, struct fde_vector *erratic)
444 {
445   static const fde *marker;
446   size_t count = linear->count;
447   const fde *const *chain_end = &marker;
448   size_t i, j, k;
449 
450   /* This should optimize out, but it is wise to make sure this assumption
451      is correct. Should these have different sizes, we cannot cast between
452      them and the overlaying onto ERRATIC will not work.  */
453   gcc_assert (sizeof (const fde *) == sizeof (const fde **));
454 
455   for (i = 0; i < count; i++)
456     {
457       const fde *const *probe;
458 
459       for (probe = chain_end;
460 	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
461 	   probe = chain_end)
462 	{
463 	  chain_end = (const fde *const*) erratic->array[probe - linear->array];
464 	  erratic->array[probe - linear->array] = NULL;
465 	}
466       erratic->array[i] = (const fde *) chain_end;
467       chain_end = &linear->array[i];
468     }
469 
470   /* Each entry in LINEAR which is part of the linear sequence we have
471      discovered will correspond to a non-NULL entry in the chain we built in
472      the ERRATIC array.  */
473   for (i = j = k = 0; i < count; i++)
474     if (erratic->array[i])
475       linear->array[j++] = linear->array[i];
476     else
477       erratic->array[k++] = linear->array[i];
478   linear->count = j;
479   erratic->count = k;
480 }
481 
482 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
483 
484 /* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
485    for the first (root) node; push it down to its rightful place.  */
486 
487 static void
488 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
489 		int lo, int hi)
490 {
491   int i, j;
492 
493   for (i = lo, j = 2*i+1;
494        j < hi;
495        j = 2*i+1)
496     {
497       if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
498 	++j;
499 
500       if (fde_compare (ob, a[i], a[j]) < 0)
501 	{
502 	  SWAP (a[i], a[j]);
503 	  i = j;
504 	}
505       else
506 	break;
507     }
508 }
509 
510 /* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
511    use a name that does not conflict.  */
512 
513 static void
514 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
515 		struct fde_vector *erratic)
516 {
517   /* For a description of this algorithm, see:
518      Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
519      p. 60-61.  */
520   const fde ** a = erratic->array;
521   /* A portion of the array is called a "heap" if for all i>=0:
522      If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
523      If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
524   size_t n = erratic->count;
525   int m;
526 
527   /* Expand our heap incrementally from the end of the array, heapifying
528      each resulting semi-heap as we go.  After each step, a[m] is the top
529      of a heap.  */
530   for (m = n/2-1; m >= 0; --m)
531     frame_downheap (ob, fde_compare, a, m, n);
532 
533   /* Shrink our heap incrementally from the end of the array, first
534      swapping out the largest element a[0] and then re-heapifying the
535      resulting semi-heap.  After each step, a[0..m) is a heap.  */
536   for (m = n-1; m >= 1; --m)
537     {
538       SWAP (a[0], a[m]);
539       frame_downheap (ob, fde_compare, a, 0, m);
540     }
541 #undef SWAP
542 }
543 
544 /* Merge V1 and V2, both sorted, and put the result into V1.  */
545 static inline void
546 fde_merge (struct object *ob, fde_compare_t fde_compare,
547 	   struct fde_vector *v1, struct fde_vector *v2)
548 {
549   size_t i1, i2;
550   const fde * fde2;
551 
552   i2 = v2->count;
553   if (i2 > 0)
554     {
555       i1 = v1->count;
556       do
557 	{
558 	  i2--;
559 	  fde2 = v2->array[i2];
560 	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
561 	    {
562 	      v1->array[i1+i2] = v1->array[i1-1];
563 	      i1--;
564 	    }
565 	  v1->array[i1+i2] = fde2;
566 	}
567       while (i2 > 0);
568       v1->count += v2->count;
569     }
570 }
571 
572 static inline void
573 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
574 {
575   fde_compare_t fde_compare;
576 
577   gcc_assert (!accu->linear || accu->linear->count == count);
578 
579   if (ob->s.b.mixed_encoding)
580     fde_compare = fde_mixed_encoding_compare;
581   else if (ob->s.b.encoding == DW_EH_PE_absptr)
582     fde_compare = fde_unencoded_compare;
583   else
584     fde_compare = fde_single_encoding_compare;
585 
586   if (accu->erratic)
587     {
588       fde_split (ob, fde_compare, accu->linear, accu->erratic);
589       gcc_assert (accu->linear->count + accu->erratic->count == count);
590       frame_heapsort (ob, fde_compare, accu->erratic);
591       fde_merge (ob, fde_compare, accu->linear, accu->erratic);
592       free (accu->erratic);
593     }
594   else
595     {
596       /* We've not managed to malloc an erratic array,
597 	 so heap sort in the linear one.  */
598       frame_heapsort (ob, fde_compare, accu->linear);
599     }
600 }
601 
602 
603 /* Update encoding, mixed_encoding, and pc_begin for OB for the
604    fde array beginning at THIS_FDE.  Return the number of fdes
605    encountered along the way.  */
606 
607 static size_t
608 classify_object_over_fdes (struct object *ob, const fde *this_fde)
609 {
610   const struct dwarf_cie *last_cie = 0;
611   size_t count = 0;
612   int encoding = DW_EH_PE_absptr;
613   _Unwind_Ptr base = 0;
614 
615   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
616     {
617       const struct dwarf_cie *this_cie;
618       _Unwind_Ptr mask, pc_begin;
619 
620       /* Skip CIEs.  */
621       if (this_fde->CIE_delta == 0)
622 	continue;
623 
624       /* Determine the encoding for this FDE.  Note mixed encoded
625 	 objects for later.  */
626       this_cie = get_cie (this_fde);
627       if (this_cie != last_cie)
628 	{
629 	  last_cie = this_cie;
630 	  encoding = get_cie_encoding (this_cie);
631 	  if (encoding == DW_EH_PE_omit)
632 	    return -1;
633 	  base = base_from_object (encoding, ob);
634 	  if (ob->s.b.encoding == DW_EH_PE_omit)
635 	    ob->s.b.encoding = encoding;
636 	  else if (ob->s.b.encoding != encoding)
637 	    ob->s.b.mixed_encoding = 1;
638 	}
639 
640       read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
641 				    &pc_begin);
642 
643       /* Take care to ignore link-once functions that were removed.
644 	 In these cases, the function address will be NULL, but if
645 	 the encoding is smaller than a pointer a true NULL may not
646 	 be representable.  Assume 0 in the representable bits is NULL.  */
647       mask = size_of_encoded_value (encoding);
648       if (mask < sizeof (void *))
649 	mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
650       else
651 	mask = -1;
652 
653       if ((pc_begin & mask) == 0)
654 	continue;
655 
656       count += 1;
657       if ((void *) pc_begin < ob->pc_begin)
658 	ob->pc_begin = (void *) pc_begin;
659     }
660 
661   return count;
662 }
663 
664 static void
665 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
666 {
667   const struct dwarf_cie *last_cie = 0;
668   int encoding = ob->s.b.encoding;
669   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
670 
671   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
672     {
673       const struct dwarf_cie *this_cie;
674 
675       /* Skip CIEs.  */
676       if (this_fde->CIE_delta == 0)
677 	continue;
678 
679       if (ob->s.b.mixed_encoding)
680 	{
681 	  /* Determine the encoding for this FDE.  Note mixed encoded
682 	     objects for later.  */
683 	  this_cie = get_cie (this_fde);
684 	  if (this_cie != last_cie)
685 	    {
686 	      last_cie = this_cie;
687 	      encoding = get_cie_encoding (this_cie);
688 	      base = base_from_object (encoding, ob);
689 	    }
690 	}
691 
692       if (encoding == DW_EH_PE_absptr)
693 	{
694 	  _Unwind_Ptr ptr;
695 	  memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
696 	  if (ptr == 0)
697 	    continue;
698 	}
699       else
700 	{
701 	  _Unwind_Ptr pc_begin, mask;
702 
703 	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
704 					&pc_begin);
705 
706 	  /* Take care to ignore link-once functions that were removed.
707 	     In these cases, the function address will be NULL, but if
708 	     the encoding is smaller than a pointer a true NULL may not
709 	     be representable.  Assume 0 in the representable bits is NULL.  */
710 	  mask = size_of_encoded_value (encoding);
711 	  if (mask < sizeof (void *))
712 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
713 	  else
714 	    mask = -1;
715 
716 	  if ((pc_begin & mask) == 0)
717 	    continue;
718 	}
719 
720       fde_insert (accu, this_fde);
721     }
722 }
723 
724 /* Set up a sorted array of pointers to FDEs for a loaded object.  We
725    count up the entries before allocating the array because it's likely to
726    be faster.  We can be called multiple times, should we have failed to
727    allocate a sorted fde array on a previous occasion.  */
728 
729 static inline void
730 init_object (struct object* ob)
731 {
732   struct fde_accumulator accu;
733   size_t count;
734 
735   count = ob->s.b.count;
736   if (count == 0)
737     {
738       if (ob->s.b.from_array)
739 	{
740 	  fde **p = ob->u.array;
741 	  for (count = 0; *p; ++p)
742 	    {
743 	      size_t cur_count = classify_object_over_fdes (ob, *p);
744 	      if (cur_count == (size_t) -1)
745 		goto unhandled_fdes;
746 	      count += cur_count;
747 	    }
748 	}
749       else
750 	{
751 	  count = classify_object_over_fdes (ob, ob->u.single);
752 	  if (count == (size_t) -1)
753 	    {
754 	      static const fde terminator;
755 	    unhandled_fdes:
756 	      ob->s.i = 0;
757 	      ob->s.b.encoding = DW_EH_PE_omit;
758 	      ob->u.single = &terminator;
759 	      return;
760 	    }
761 	}
762 
763       /* The count field we have in the main struct object is somewhat
764 	 limited, but should suffice for virtually all cases.  If the
765 	 counted value doesn't fit, re-write a zero.  The worst that
766 	 happens is that we re-count next time -- admittedly non-trivial
767 	 in that this implies some 2M fdes, but at least we function.  */
768       ob->s.b.count = count;
769       if (ob->s.b.count != count)
770 	ob->s.b.count = 0;
771     }
772 
773   if (!start_fde_sort (&accu, count))
774     return;
775 
776   if (ob->s.b.from_array)
777     {
778       fde **p;
779       for (p = ob->u.array; *p; ++p)
780 	add_fdes (ob, &accu, *p);
781     }
782   else
783     add_fdes (ob, &accu, ob->u.single);
784 
785   end_fde_sort (ob, &accu, count);
786 
787   /* Save the original fde pointer, since this is the key by which the
788      DSO will deregister the object.  */
789   accu.linear->orig_data = ob->u.single;
790   ob->u.sort = accu.linear;
791 
792   ob->s.b.sorted = 1;
793 }
794 
795 /* A linear search through a set of FDEs for the given PC.  This is
796    used when there was insufficient memory to allocate and sort an
797    array.  */
798 
799 static const fde *
800 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
801 {
802   const struct dwarf_cie *last_cie = 0;
803   int encoding = ob->s.b.encoding;
804   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
805 
806   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
807     {
808       const struct dwarf_cie *this_cie;
809       _Unwind_Ptr pc_begin, pc_range;
810 
811       /* Skip CIEs.  */
812       if (this_fde->CIE_delta == 0)
813 	continue;
814 
815       if (ob->s.b.mixed_encoding)
816 	{
817 	  /* Determine the encoding for this FDE.  Note mixed encoded
818 	     objects for later.  */
819 	  this_cie = get_cie (this_fde);
820 	  if (this_cie != last_cie)
821 	    {
822 	      last_cie = this_cie;
823 	      encoding = get_cie_encoding (this_cie);
824 	      base = base_from_object (encoding, ob);
825 	    }
826 	}
827 
828       if (encoding == DW_EH_PE_absptr)
829 	{
830 	  const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
831 	  pc_begin = pc_array[0];
832 	  pc_range = pc_array[1];
833 	  if (pc_begin == 0)
834 	    continue;
835 	}
836       else
837 	{
838 	  _Unwind_Ptr mask;
839 	  const unsigned char *p;
840 
841 	  p = read_encoded_value_with_base (encoding, base,
842 					    this_fde->pc_begin, &pc_begin);
843 	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
844 
845 	  /* Take care to ignore link-once functions that were removed.
846 	     In these cases, the function address will be NULL, but if
847 	     the encoding is smaller than a pointer a true NULL may not
848 	     be representable.  Assume 0 in the representable bits is NULL.  */
849 	  mask = size_of_encoded_value (encoding);
850 	  if (mask < sizeof (void *))
851 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
852 	  else
853 	    mask = -1;
854 
855 	  if ((pc_begin & mask) == 0)
856 	    continue;
857 	}
858 
859       if ((_Unwind_Ptr) pc - pc_begin < pc_range)
860 	return this_fde;
861     }
862 
863   return NULL;
864 }
865 
866 /* Binary search for an FDE containing the given PC.  Here are three
867    implementations of increasing complexity.  */
868 
869 static inline const fde *
870 binary_search_unencoded_fdes (struct object *ob, void *pc)
871 {
872   struct fde_vector *vec = ob->u.sort;
873   size_t lo, hi;
874 
875   for (lo = 0, hi = vec->count; lo < hi; )
876     {
877       size_t i = (lo + hi) / 2;
878       const fde *const f = vec->array[i];
879       void *pc_begin;
880       uaddr pc_range;
881       memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
882       memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
883 
884       if (pc < pc_begin)
885 	hi = i;
886       else if (pc >= pc_begin + pc_range)
887 	lo = i + 1;
888       else
889 	return f;
890     }
891 
892   return NULL;
893 }
894 
895 static inline const fde *
896 binary_search_single_encoding_fdes (struct object *ob, void *pc)
897 {
898   struct fde_vector *vec = ob->u.sort;
899   int encoding = ob->s.b.encoding;
900   _Unwind_Ptr base = base_from_object (encoding, ob);
901   size_t lo, hi;
902 
903   for (lo = 0, hi = vec->count; lo < hi; )
904     {
905       size_t i = (lo + hi) / 2;
906       const fde *f = vec->array[i];
907       _Unwind_Ptr pc_begin, pc_range;
908       const unsigned char *p;
909 
910       p = read_encoded_value_with_base (encoding, base, f->pc_begin,
911 					&pc_begin);
912       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
913 
914       if ((_Unwind_Ptr) pc < pc_begin)
915 	hi = i;
916       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
917 	lo = i + 1;
918       else
919 	return f;
920     }
921 
922   return NULL;
923 }
924 
925 static inline const fde *
926 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
927 {
928   struct fde_vector *vec = ob->u.sort;
929   size_t lo, hi;
930 
931   for (lo = 0, hi = vec->count; lo < hi; )
932     {
933       size_t i = (lo + hi) / 2;
934       const fde *f = vec->array[i];
935       _Unwind_Ptr pc_begin, pc_range;
936       const unsigned char *p;
937       int encoding;
938 
939       encoding = get_fde_encoding (f);
940       p = read_encoded_value_with_base (encoding,
941 					base_from_object (encoding, ob),
942 					f->pc_begin, &pc_begin);
943       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
944 
945       if ((_Unwind_Ptr) pc < pc_begin)
946 	hi = i;
947       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
948 	lo = i + 1;
949       else
950 	return f;
951     }
952 
953   return NULL;
954 }
955 
956 static const fde *
957 search_object (struct object* ob, void *pc)
958 {
959   /* If the data hasn't been sorted, try to do this now.  We may have
960      more memory available than last time we tried.  */
961   if (! ob->s.b.sorted)
962     {
963       init_object (ob);
964 
965       /* Despite the above comment, the normal reason to get here is
966 	 that we've not processed this object before.  A quick range
967 	 check is in order.  */
968       if (pc < ob->pc_begin)
969 	return NULL;
970     }
971 
972   if (ob->s.b.sorted)
973     {
974       if (ob->s.b.mixed_encoding)
975 	return binary_search_mixed_encoding_fdes (ob, pc);
976       else if (ob->s.b.encoding == DW_EH_PE_absptr)
977 	return binary_search_unencoded_fdes (ob, pc);
978       else
979 	return binary_search_single_encoding_fdes (ob, pc);
980     }
981   else
982     {
983       /* Long slow laborious linear search, cos we've no memory.  */
984       if (ob->s.b.from_array)
985 	{
986 	  fde **p;
987 	  for (p = ob->u.array; *p ; p++)
988 	    {
989 	      const fde *f = linear_search_fdes (ob, *p, pc);
990 	      if (f)
991 		return f;
992 	    }
993 	  return NULL;
994 	}
995       else
996 	return linear_search_fdes (ob, ob->u.single, pc);
997     }
998 }
999 
1000 const fde *
1001 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1002 {
1003   struct object *ob;
1004   const fde *f = NULL;
1005 
1006   init_object_mutex_once ();
1007   __gthread_mutex_lock (&object_mutex);
1008 
1009   /* Linear search through the classified objects, to find the one
1010      containing the pc.  Note that pc_begin is sorted descending, and
1011      we expect objects to be non-overlapping.  */
1012   for (ob = seen_objects; ob; ob = ob->next)
1013     if (pc >= ob->pc_begin)
1014       {
1015 	f = search_object (ob, pc);
1016 	if (f)
1017 	  goto fini;
1018 	break;
1019       }
1020 
1021   /* Classify and search the objects we've not yet processed.  */
1022   while ((ob = unseen_objects))
1023     {
1024       struct object **p;
1025 
1026       unseen_objects = ob->next;
1027       f = search_object (ob, pc);
1028 
1029       /* Insert the object into the classified list.  */
1030       for (p = &seen_objects; *p ; p = &(*p)->next)
1031 	if ((*p)->pc_begin < ob->pc_begin)
1032 	  break;
1033       ob->next = *p;
1034       *p = ob;
1035 
1036       if (f)
1037 	goto fini;
1038     }
1039 
1040  fini:
1041   __gthread_mutex_unlock (&object_mutex);
1042 
1043   if (f)
1044     {
1045       int encoding;
1046       _Unwind_Ptr func;
1047 
1048       bases->tbase = ob->tbase;
1049       bases->dbase = ob->dbase;
1050 
1051       encoding = ob->s.b.encoding;
1052       if (ob->s.b.mixed_encoding)
1053 	encoding = get_fde_encoding (f);
1054       read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1055 				    f->pc_begin, &func);
1056       bases->func = (void *) func;
1057     }
1058 
1059   return f;
1060 }
1061