1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
19 executable.)
20
21 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
22 WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with GCC; see the file COPYING. If not, write to the Free
28 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 02111-1307, USA. */
30
31 #ifndef _Unwind_Find_FDE
32 #include "tconfig.h"
33 #include "tsystem.h"
34 #include "dwarf2.h"
35 #include "unwind.h"
36 #define NO_BASE_OF_ENCODED_VALUE
37 #include "unwind-pe.h"
38 #include "unwind-dw2-fde.h"
39 #include "gthr.h"
40 #endif
41
42 /* The unseen_objects list contains objects that have been registered
43 but not yet categorized in any way. The seen_objects list has had
44 it's pc_begin and count fields initialized at minimum, and is sorted
45 by decreasing value of pc_begin. */
46 static struct object *unseen_objects;
47 static struct object *seen_objects;
48
49 #ifdef __GTHREAD_MUTEX_INIT
50 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
51 #else
52 static __gthread_mutex_t object_mutex;
53 #endif
54
55 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
56 static void
init_object_mutex(void)57 init_object_mutex (void)
58 {
59 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 }
61
62 static void
init_object_mutex_once(void)63 init_object_mutex_once (void)
64 {
65 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
66 __gthread_once (&once, init_object_mutex);
67 }
68 #else
69 #define init_object_mutex_once()
70 #endif
71
72 /* Called from crtbegin.o to register the unwind info for an object. */
73
74 void
__register_frame_info_bases(void * begin,struct object * ob,void * tbase,void * dbase)75 __register_frame_info_bases (void *begin, struct object *ob,
76 void *tbase, void *dbase)
77 {
78 /* If .eh_frame is empty, don't register at all. */
79 if (*(uword *) begin == 0)
80 return;
81
82 ob->pc_begin = (void *)-1;
83 ob->tbase = tbase;
84 ob->dbase = dbase;
85 ob->u.single = begin;
86 ob->s.i = 0;
87 ob->s.b.encoding = DW_EH_PE_omit;
88 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
89 ob->fde_end = NULL;
90 #endif
91
92 init_object_mutex_once ();
93 __gthread_mutex_lock (&object_mutex);
94
95 ob->next = unseen_objects;
96 unseen_objects = ob;
97
98 __gthread_mutex_unlock (&object_mutex);
99 }
100
101 void
__register_frame_info(void * begin,struct object * ob)102 __register_frame_info (void *begin, struct object *ob)
103 {
104 __register_frame_info_bases (begin, ob, 0, 0);
105 }
106
107 void
__register_frame(void * begin)108 __register_frame (void *begin)
109 {
110 struct object *ob;
111
112 /* If .eh_frame is empty, don't register at all. */
113 if (*(uword *) begin == 0)
114 return;
115
116 ob = (struct object *) malloc (sizeof (struct object));
117 __register_frame_info (begin, ob);
118 }
119
120 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
121 for different translation units. Called from the file generated by
122 collect2. */
123
124 void
__register_frame_info_table_bases(void * begin,struct object * ob,void * tbase,void * dbase)125 __register_frame_info_table_bases (void *begin, struct object *ob,
126 void *tbase, void *dbase)
127 {
128 ob->pc_begin = (void *)-1;
129 ob->tbase = tbase;
130 ob->dbase = dbase;
131 ob->u.array = begin;
132 ob->s.i = 0;
133 ob->s.b.from_array = 1;
134 ob->s.b.encoding = DW_EH_PE_omit;
135
136 init_object_mutex_once ();
137 __gthread_mutex_lock (&object_mutex);
138
139 ob->next = unseen_objects;
140 unseen_objects = ob;
141
142 __gthread_mutex_unlock (&object_mutex);
143 }
144
145 void
__register_frame_info_table(void * begin,struct object * ob)146 __register_frame_info_table (void *begin, struct object *ob)
147 {
148 __register_frame_info_table_bases (begin, ob, 0, 0);
149 }
150
151 void
__register_frame_table(void * begin)152 __register_frame_table (void *begin)
153 {
154 struct object *ob = (struct object *) malloc (sizeof (struct object));
155 __register_frame_info_table (begin, ob);
156 }
157
158 /* Called from crtbegin.o to deregister the unwind info for an object. */
159 /* ??? Glibc has for a while now exported __register_frame_info and
160 __deregister_frame_info. If we call __register_frame_info_bases
161 from crtbegin (wherein it is declared weak), and this object does
162 not get pulled from libgcc.a for other reasons, then the
163 invocation of __deregister_frame_info will be resolved from glibc.
164 Since the registration did not happen there, we'll abort.
165
166 Therefore, declare a new deregistration entry point that does the
167 exact same thing, but will resolve to the same library as
168 implements __register_frame_info_bases. */
169
170 void *
__deregister_frame_info_bases(void * begin)171 __deregister_frame_info_bases (void *begin)
172 {
173 struct object **p;
174 struct object *ob = 0;
175
176 /* If .eh_frame is empty, we haven't registered. */
177 if (*(uword *) begin == 0)
178 return ob;
179
180 init_object_mutex_once ();
181 __gthread_mutex_lock (&object_mutex);
182
183 for (p = &unseen_objects; *p ; p = &(*p)->next)
184 if ((*p)->u.single == begin)
185 {
186 ob = *p;
187 *p = ob->next;
188 goto out;
189 }
190
191 for (p = &seen_objects; *p ; p = &(*p)->next)
192 if ((*p)->s.b.sorted)
193 {
194 if ((*p)->u.sort->orig_data == begin)
195 {
196 ob = *p;
197 *p = ob->next;
198 free (ob->u.sort);
199 goto out;
200 }
201 }
202 else
203 {
204 if ((*p)->u.single == begin)
205 {
206 ob = *p;
207 *p = ob->next;
208 goto out;
209 }
210 }
211
212 __gthread_mutex_unlock (&object_mutex);
213 abort ();
214
215 out:
216 __gthread_mutex_unlock (&object_mutex);
217 return (void *) ob;
218 }
219
220 void *
__deregister_frame_info(void * begin)221 __deregister_frame_info (void *begin)
222 {
223 return __deregister_frame_info_bases (begin);
224 }
225
226 void
__deregister_frame(void * begin)227 __deregister_frame (void *begin)
228 {
229 /* If .eh_frame is empty, we haven't registered. */
230 if (*(uword *) begin != 0)
231 free (__deregister_frame_info (begin));
232 }
233
234
235 /* Like base_of_encoded_value, but take the base from a struct object
236 instead of an _Unwind_Context. */
237
238 static _Unwind_Ptr
base_from_object(unsigned char encoding,struct object * ob)239 base_from_object (unsigned char encoding, struct object *ob)
240 {
241 if (encoding == DW_EH_PE_omit)
242 return 0;
243
244 switch (encoding & 0x70)
245 {
246 case DW_EH_PE_absptr:
247 case DW_EH_PE_pcrel:
248 case DW_EH_PE_aligned:
249 return 0;
250
251 case DW_EH_PE_textrel:
252 return (_Unwind_Ptr) ob->tbase;
253 case DW_EH_PE_datarel:
254 return (_Unwind_Ptr) ob->dbase;
255 }
256 abort ();
257 }
258
259 /* Return the FDE pointer encoding from the CIE. */
260 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
261
262 static int
get_cie_encoding(struct dwarf_cie * cie)263 get_cie_encoding (struct dwarf_cie *cie)
264 {
265 const unsigned char *aug, *p;
266 _Unwind_Ptr dummy;
267 _Unwind_Word utmp;
268 _Unwind_Sword stmp;
269
270 aug = cie->augmentation;
271 if (aug[0] != 'z')
272 return DW_EH_PE_absptr;
273
274 p = aug + strlen (aug) + 1; /* Skip the augmentation string. */
275 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
276 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
277 p++; /* Skip return address column. */
278
279 aug++; /* Skip 'z' */
280 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
281 while (1)
282 {
283 /* This is what we're looking for. */
284 if (*aug == 'R')
285 return *p;
286 /* Personality encoding and pointer. */
287 else if (*aug == 'P')
288 {
289 /* ??? Avoid dereferencing indirect pointers, since we're
290 faking the base address. Gotta keep DW_EH_PE_aligned
291 intact, however. */
292 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
293 }
294 /* LSDA encoding. */
295 else if (*aug == 'L')
296 p++;
297 /* Otherwise end of string, or unknown augmentation. */
298 else
299 return DW_EH_PE_absptr;
300 aug++;
301 }
302 }
303
304 static inline int
get_fde_encoding(struct dwarf_fde * f)305 get_fde_encoding (struct dwarf_fde *f)
306 {
307 return get_cie_encoding (get_cie (f));
308 }
309
310
311 /* Sorting an array of FDEs by address.
312 (Ideally we would have the linker sort the FDEs so we don't have to do
313 it at run time. But the linkers are not yet prepared for this.) */
314
315 /* Comparison routines. Three variants of increasing complexity. */
316
317 static int
fde_unencoded_compare(struct object * ob,fde * x,fde * y)318 fde_unencoded_compare (struct object *ob __attribute__((unused)),
319 fde *x, fde *y)
320 {
321 _Unwind_Ptr x_ptr = *(_Unwind_Ptr *) x->pc_begin;
322 _Unwind_Ptr y_ptr = *(_Unwind_Ptr *) y->pc_begin;
323
324 if (x_ptr > y_ptr)
325 return 1;
326 if (x_ptr < y_ptr)
327 return -1;
328 return 0;
329 }
330
331 static int
fde_single_encoding_compare(struct object * ob,fde * x,fde * y)332 fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
333 {
334 _Unwind_Ptr base, x_ptr, y_ptr;
335
336 base = base_from_object (ob->s.b.encoding, ob);
337 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
338 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
339
340 if (x_ptr > y_ptr)
341 return 1;
342 if (x_ptr < y_ptr)
343 return -1;
344 return 0;
345 }
346
347 static int
fde_mixed_encoding_compare(struct object * ob,fde * x,fde * y)348 fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
349 {
350 int x_encoding, y_encoding;
351 _Unwind_Ptr x_ptr, y_ptr;
352
353 x_encoding = get_fde_encoding (x);
354 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
355 x->pc_begin, &x_ptr);
356
357 y_encoding = get_fde_encoding (y);
358 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
359 y->pc_begin, &y_ptr);
360
361 if (x_ptr > y_ptr)
362 return 1;
363 if (x_ptr < y_ptr)
364 return -1;
365 return 0;
366 }
367
368 typedef int (*fde_compare_t) (struct object *, fde *, fde *);
369
370
371 /* This is a special mix of insertion sort and heap sort, optimized for
372 the data sets that actually occur. They look like
373 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
374 I.e. a linearly increasing sequence (coming from functions in the text
375 section), with additionally a few unordered elements (coming from functions
376 in gnu_linkonce sections) whose values are higher than the values in the
377 surrounding linear sequence (but not necessarily higher than the values
378 at the end of the linear sequence!).
379 The worst-case total run time is O(N) + O(n log (n)), where N is the
380 total number of FDEs and n is the number of erratic ones. */
381
382 struct fde_accumulator
383 {
384 struct fde_vector *linear;
385 struct fde_vector *erratic;
386 };
387
388 static inline int
start_fde_sort(struct fde_accumulator * accu,size_t count)389 start_fde_sort (struct fde_accumulator *accu, size_t count)
390 {
391 size_t size;
392 if (! count)
393 return 0;
394
395 size = sizeof (struct fde_vector) + sizeof (fde *) * count;
396 if ((accu->linear = (struct fde_vector *) malloc (size)))
397 {
398 accu->linear->count = 0;
399 if ((accu->erratic = (struct fde_vector *) malloc (size)))
400 accu->erratic->count = 0;
401 return 1;
402 }
403 else
404 return 0;
405 }
406
407 static inline void
fde_insert(struct fde_accumulator * accu,fde * this_fde)408 fde_insert (struct fde_accumulator *accu, fde *this_fde)
409 {
410 if (accu->linear)
411 accu->linear->array[accu->linear->count++] = this_fde;
412 }
413
414 /* Split LINEAR into a linear sequence with low values and an erratic
415 sequence with high values, put the linear one (of longest possible
416 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
417
418 Because the longest linear sequence we are trying to locate within the
419 incoming LINEAR array can be interspersed with (high valued) erratic
420 entries. We construct a chain indicating the sequenced entries.
421 To avoid having to allocate this chain, we overlay it onto the space of
422 the ERRATIC array during construction. A final pass iterates over the
423 chain to determine what should be placed in the ERRATIC array, and
424 what is the linear sequence. This overlay is safe from aliasing. */
425
426 static inline void
fde_split(struct object * ob,fde_compare_t fde_compare,struct fde_vector * linear,struct fde_vector * erratic)427 fde_split (struct object *ob, fde_compare_t fde_compare,
428 struct fde_vector *linear, struct fde_vector *erratic)
429 {
430 static fde *marker;
431 size_t count = linear->count;
432 fde **chain_end = ▮
433 size_t i, j, k;
434
435 /* This should optimize out, but it is wise to make sure this assumption
436 is correct. Should these have different sizes, we cannot cast between
437 them and the overlaying onto ERRATIC will not work. */
438 if (sizeof (fde *) != sizeof (fde **))
439 abort ();
440
441 for (i = 0; i < count; i++)
442 {
443 fde **probe;
444
445 for (probe = chain_end;
446 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
447 probe = chain_end)
448 {
449 chain_end = (fde **) erratic->array[probe - linear->array];
450 erratic->array[probe - linear->array] = NULL;
451 }
452 erratic->array[i] = (fde *) chain_end;
453 chain_end = &linear->array[i];
454 }
455
456 /* Each entry in LINEAR which is part of the linear sequence we have
457 discovered will correspond to a non-NULL entry in the chain we built in
458 the ERRATIC array. */
459 for (i = j = k = 0; i < count; i++)
460 if (erratic->array[i])
461 linear->array[j++] = linear->array[i];
462 else
463 erratic->array[k++] = linear->array[i];
464 linear->count = j;
465 erratic->count = k;
466 }
467
468 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
469 use a name that does not conflict. */
470
471 static void
frame_heapsort(struct object * ob,fde_compare_t fde_compare,struct fde_vector * erratic)472 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
473 struct fde_vector *erratic)
474 {
475 /* For a description of this algorithm, see:
476 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
477 p. 60-61. */
478 fde ** a = erratic->array;
479 /* A portion of the array is called a "heap" if for all i>=0:
480 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
481 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
482 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
483 size_t n = erratic->count;
484 size_t m = n;
485 size_t i;
486
487 while (m > 0)
488 {
489 /* Invariant: a[m..n-1] is a heap. */
490 m--;
491 for (i = m; 2*i+1 < n; )
492 {
493 if (2*i+2 < n
494 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
495 && fde_compare (ob, a[2*i+2], a[i]) > 0)
496 {
497 SWAP (a[i], a[2*i+2]);
498 i = 2*i+2;
499 }
500 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
501 {
502 SWAP (a[i], a[2*i+1]);
503 i = 2*i+1;
504 }
505 else
506 break;
507 }
508 }
509 while (n > 1)
510 {
511 /* Invariant: a[0..n-1] is a heap. */
512 n--;
513 SWAP (a[0], a[n]);
514 for (i = 0; 2*i+1 < n; )
515 {
516 if (2*i+2 < n
517 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
518 && fde_compare (ob, a[2*i+2], a[i]) > 0)
519 {
520 SWAP (a[i], a[2*i+2]);
521 i = 2*i+2;
522 }
523 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
524 {
525 SWAP (a[i], a[2*i+1]);
526 i = 2*i+1;
527 }
528 else
529 break;
530 }
531 }
532 #undef SWAP
533 }
534
535 /* Merge V1 and V2, both sorted, and put the result into V1. */
536 static inline void
fde_merge(struct object * ob,fde_compare_t fde_compare,struct fde_vector * v1,struct fde_vector * v2)537 fde_merge (struct object *ob, fde_compare_t fde_compare,
538 struct fde_vector *v1, struct fde_vector *v2)
539 {
540 size_t i1, i2;
541 fde * fde2;
542
543 i2 = v2->count;
544 if (i2 > 0)
545 {
546 i1 = v1->count;
547 do
548 {
549 i2--;
550 fde2 = v2->array[i2];
551 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
552 {
553 v1->array[i1+i2] = v1->array[i1-1];
554 i1--;
555 }
556 v1->array[i1+i2] = fde2;
557 }
558 while (i2 > 0);
559 v1->count += v2->count;
560 }
561 }
562
563 static inline void
end_fde_sort(struct object * ob,struct fde_accumulator * accu,size_t count)564 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
565 {
566 fde_compare_t fde_compare;
567
568 if (accu->linear && accu->linear->count != count)
569 abort ();
570
571 if (ob->s.b.mixed_encoding)
572 fde_compare = fde_mixed_encoding_compare;
573 else if (ob->s.b.encoding == DW_EH_PE_absptr)
574 fde_compare = fde_unencoded_compare;
575 else
576 fde_compare = fde_single_encoding_compare;
577
578 if (accu->erratic)
579 {
580 fde_split (ob, fde_compare, accu->linear, accu->erratic);
581 if (accu->linear->count + accu->erratic->count != count)
582 abort ();
583 frame_heapsort (ob, fde_compare, accu->erratic);
584 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
585 free (accu->erratic);
586 }
587 else
588 {
589 /* We've not managed to malloc an erratic array,
590 so heap sort in the linear one. */
591 frame_heapsort (ob, fde_compare, accu->linear);
592 }
593 }
594
595
596 /* Update encoding, mixed_encoding, and pc_begin for OB for the
597 fde array beginning at THIS_FDE. Return the number of fdes
598 encountered along the way. */
599
600 static size_t
classify_object_over_fdes(struct object * ob,fde * this_fde)601 classify_object_over_fdes (struct object *ob, fde *this_fde)
602 {
603 struct dwarf_cie *last_cie = 0;
604 size_t count = 0;
605 int encoding = DW_EH_PE_absptr;
606 _Unwind_Ptr base = 0;
607
608 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
609 {
610 struct dwarf_cie *this_cie;
611 _Unwind_Ptr mask, pc_begin;
612
613 /* Skip CIEs. */
614 if (this_fde->CIE_delta == 0)
615 continue;
616
617 /* Determine the encoding for this FDE. Note mixed encoded
618 objects for later. */
619 this_cie = get_cie (this_fde);
620 if (this_cie != last_cie)
621 {
622 last_cie = this_cie;
623 encoding = get_cie_encoding (this_cie);
624 base = base_from_object (encoding, ob);
625 if (ob->s.b.encoding == DW_EH_PE_omit)
626 ob->s.b.encoding = encoding;
627 else if (ob->s.b.encoding != encoding)
628 ob->s.b.mixed_encoding = 1;
629 }
630
631 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
632 &pc_begin);
633
634 /* Take care to ignore link-once functions that were removed.
635 In these cases, the function address will be NULL, but if
636 the encoding is smaller than a pointer a true NULL may not
637 be representable. Assume 0 in the representable bits is NULL. */
638 mask = size_of_encoded_value (encoding);
639 if (mask < sizeof (void *))
640 mask = (1L << (mask << 3)) - 1;
641 else
642 mask = -1;
643
644 if ((pc_begin & mask) == 0)
645 continue;
646
647 count += 1;
648 if ((void *) pc_begin < ob->pc_begin)
649 ob->pc_begin = (void *) pc_begin;
650 }
651
652 return count;
653 }
654
655 static void
add_fdes(struct object * ob,struct fde_accumulator * accu,fde * this_fde)656 add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
657 {
658 struct dwarf_cie *last_cie = 0;
659 int encoding = ob->s.b.encoding;
660 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
661
662 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
663 {
664 struct dwarf_cie *this_cie;
665
666 /* Skip CIEs. */
667 if (this_fde->CIE_delta == 0)
668 continue;
669
670 if (ob->s.b.mixed_encoding)
671 {
672 /* Determine the encoding for this FDE. Note mixed encoded
673 objects for later. */
674 this_cie = get_cie (this_fde);
675 if (this_cie != last_cie)
676 {
677 last_cie = this_cie;
678 encoding = get_cie_encoding (this_cie);
679 base = base_from_object (encoding, ob);
680 }
681 }
682
683 if (encoding == DW_EH_PE_absptr)
684 {
685 if (*(_Unwind_Ptr *) this_fde->pc_begin == 0)
686 continue;
687 }
688 else
689 {
690 _Unwind_Ptr pc_begin, mask;
691
692 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
693 &pc_begin);
694
695 /* Take care to ignore link-once functions that were removed.
696 In these cases, the function address will be NULL, but if
697 the encoding is smaller than a pointer a true NULL may not
698 be representable. Assume 0 in the representable bits is NULL. */
699 mask = size_of_encoded_value (encoding);
700 if (mask < sizeof (void *))
701 mask = (1L << (mask << 3)) - 1;
702 else
703 mask = -1;
704
705 if ((pc_begin & mask) == 0)
706 continue;
707 }
708
709 fde_insert (accu, this_fde);
710 }
711 }
712
713 /* Set up a sorted array of pointers to FDEs for a loaded object. We
714 count up the entries before allocating the array because it's likely to
715 be faster. We can be called multiple times, should we have failed to
716 allocate a sorted fde array on a previous occasion. */
717
718 static inline void
init_object(struct object * ob)719 init_object (struct object* ob)
720 {
721 struct fde_accumulator accu;
722 size_t count;
723
724 count = ob->s.b.count;
725 if (count == 0)
726 {
727 if (ob->s.b.from_array)
728 {
729 fde **p = ob->u.array;
730 for (count = 0; *p; ++p)
731 count += classify_object_over_fdes (ob, *p);
732 }
733 else
734 count = classify_object_over_fdes (ob, ob->u.single);
735
736 /* The count field we have in the main struct object is somewhat
737 limited, but should suffice for virtually all cases. If the
738 counted value doesn't fit, re-write a zero. The worst that
739 happens is that we re-count next time -- admittedly non-trivial
740 in that this implies some 2M fdes, but at least we function. */
741 ob->s.b.count = count;
742 if (ob->s.b.count != count)
743 ob->s.b.count = 0;
744 }
745
746 if (!start_fde_sort (&accu, count))
747 return;
748
749 if (ob->s.b.from_array)
750 {
751 fde **p;
752 for (p = ob->u.array; *p; ++p)
753 add_fdes (ob, &accu, *p);
754 }
755 else
756 add_fdes (ob, &accu, ob->u.single);
757
758 end_fde_sort (ob, &accu, count);
759
760 /* Save the original fde pointer, since this is the key by which the
761 DSO will deregister the object. */
762 accu.linear->orig_data = ob->u.single;
763 ob->u.sort = accu.linear;
764
765 ob->s.b.sorted = 1;
766 }
767
768 /* A linear search through a set of FDEs for the given PC. This is
769 used when there was insufficient memory to allocate and sort an
770 array. */
771
772 static fde *
linear_search_fdes(struct object * ob,fde * this_fde,void * pc)773 linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
774 {
775 struct dwarf_cie *last_cie = 0;
776 int encoding = ob->s.b.encoding;
777 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
778
779 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
780 {
781 struct dwarf_cie *this_cie;
782 _Unwind_Ptr pc_begin, pc_range;
783
784 /* Skip CIEs. */
785 if (this_fde->CIE_delta == 0)
786 continue;
787
788 if (ob->s.b.mixed_encoding)
789 {
790 /* Determine the encoding for this FDE. Note mixed encoded
791 objects for later. */
792 this_cie = get_cie (this_fde);
793 if (this_cie != last_cie)
794 {
795 last_cie = this_cie;
796 encoding = get_cie_encoding (this_cie);
797 base = base_from_object (encoding, ob);
798 }
799 }
800
801 if (encoding == DW_EH_PE_absptr)
802 {
803 pc_begin = ((_Unwind_Ptr *) this_fde->pc_begin)[0];
804 pc_range = ((_Unwind_Ptr *) this_fde->pc_begin)[1];
805 if (pc_begin == 0)
806 continue;
807 }
808 else
809 {
810 _Unwind_Ptr mask;
811 const char *p;
812
813 p = read_encoded_value_with_base (encoding, base,
814 this_fde->pc_begin, &pc_begin);
815 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
816
817 /* Take care to ignore link-once functions that were removed.
818 In these cases, the function address will be NULL, but if
819 the encoding is smaller than a pointer a true NULL may not
820 be representable. Assume 0 in the representable bits is NULL. */
821 mask = size_of_encoded_value (encoding);
822 if (mask < sizeof (void *))
823 mask = (1L << (mask << 3)) - 1;
824 else
825 mask = -1;
826
827 if ((pc_begin & mask) == 0)
828 continue;
829 }
830
831 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
832 return this_fde;
833 }
834
835 return NULL;
836 }
837
838 /* Binary search for an FDE containing the given PC. Here are three
839 implementations of increasing complexity. */
840
841 static inline fde *
binary_search_unencoded_fdes(struct object * ob,void * pc)842 binary_search_unencoded_fdes (struct object *ob, void *pc)
843 {
844 struct fde_vector *vec = ob->u.sort;
845 size_t lo, hi;
846
847 for (lo = 0, hi = vec->count; lo < hi; )
848 {
849 size_t i = (lo + hi) / 2;
850 fde *f = vec->array[i];
851 void *pc_begin;
852 uaddr pc_range;
853
854 pc_begin = ((void **) f->pc_begin)[0];
855 pc_range = ((uaddr *) f->pc_begin)[1];
856
857 if (pc < pc_begin)
858 hi = i;
859 else if (pc >= pc_begin + pc_range)
860 lo = i + 1;
861 else
862 return f;
863 }
864
865 return NULL;
866 }
867
868 static inline fde *
binary_search_single_encoding_fdes(struct object * ob,void * pc)869 binary_search_single_encoding_fdes (struct object *ob, void *pc)
870 {
871 struct fde_vector *vec = ob->u.sort;
872 int encoding = ob->s.b.encoding;
873 _Unwind_Ptr base = base_from_object (encoding, ob);
874 size_t lo, hi;
875
876 for (lo = 0, hi = vec->count; lo < hi; )
877 {
878 size_t i = (lo + hi) / 2;
879 fde *f = vec->array[i];
880 _Unwind_Ptr pc_begin, pc_range;
881 const char *p;
882
883 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
884 &pc_begin);
885 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
886
887 if ((_Unwind_Ptr) pc < pc_begin)
888 hi = i;
889 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
890 lo = i + 1;
891 else
892 return f;
893 }
894
895 return NULL;
896 }
897
898 static inline fde *
binary_search_mixed_encoding_fdes(struct object * ob,void * pc)899 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
900 {
901 struct fde_vector *vec = ob->u.sort;
902 size_t lo, hi;
903
904 for (lo = 0, hi = vec->count; lo < hi; )
905 {
906 size_t i = (lo + hi) / 2;
907 fde *f = vec->array[i];
908 _Unwind_Ptr pc_begin, pc_range;
909 const char *p;
910 int encoding;
911
912 encoding = get_fde_encoding (f);
913 p = read_encoded_value_with_base (encoding,
914 base_from_object (encoding, ob),
915 f->pc_begin, &pc_begin);
916 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
917
918 if ((_Unwind_Ptr) pc < pc_begin)
919 hi = i;
920 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
921 lo = i + 1;
922 else
923 return f;
924 }
925
926 return NULL;
927 }
928
929 static fde *
search_object(struct object * ob,void * pc)930 search_object (struct object* ob, void *pc)
931 {
932 /* If the data hasn't been sorted, try to do this now. We may have
933 more memory available than last time we tried. */
934 if (! ob->s.b.sorted)
935 {
936 init_object (ob);
937
938 /* Despite the above comment, the normal reason to get here is
939 that we've not processed this object before. A quick range
940 check is in order. */
941 if (pc < ob->pc_begin)
942 return NULL;
943 }
944
945 if (ob->s.b.sorted)
946 {
947 if (ob->s.b.mixed_encoding)
948 return binary_search_mixed_encoding_fdes (ob, pc);
949 else if (ob->s.b.encoding == DW_EH_PE_absptr)
950 return binary_search_unencoded_fdes (ob, pc);
951 else
952 return binary_search_single_encoding_fdes (ob, pc);
953 }
954 else
955 {
956 /* Long slow labourious linear search, cos we've no memory. */
957 if (ob->s.b.from_array)
958 {
959 fde **p;
960 for (p = ob->u.array; *p ; p++)
961 {
962 fde *f = linear_search_fdes (ob, *p, pc);
963 if (f)
964 return f;
965 }
966 return NULL;
967 }
968 else
969 return linear_search_fdes (ob, ob->u.single, pc);
970 }
971 }
972
973 fde *
_Unwind_Find_FDE(void * pc,struct dwarf_eh_bases * bases)974 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
975 {
976 struct object *ob;
977 fde *f = NULL;
978
979 init_object_mutex_once ();
980 __gthread_mutex_lock (&object_mutex);
981
982 /* Linear search through the classified objects, to find the one
983 containing the pc. Note that pc_begin is sorted descending, and
984 we expect objects to be non-overlapping. */
985 for (ob = seen_objects; ob; ob = ob->next)
986 if (pc >= ob->pc_begin)
987 {
988 f = search_object (ob, pc);
989 if (f)
990 goto fini;
991 break;
992 }
993
994 /* Classify and search the objects we've not yet processed. */
995 while ((ob = unseen_objects))
996 {
997 struct object **p;
998
999 unseen_objects = ob->next;
1000 f = search_object (ob, pc);
1001
1002 /* Insert the object into the classified list. */
1003 for (p = &seen_objects; *p ; p = &(*p)->next)
1004 if ((*p)->pc_begin < ob->pc_begin)
1005 break;
1006 ob->next = *p;
1007 *p = ob;
1008
1009 if (f)
1010 goto fini;
1011 }
1012
1013 fini:
1014 __gthread_mutex_unlock (&object_mutex);
1015
1016 if (f)
1017 {
1018 int encoding;
1019
1020 bases->tbase = ob->tbase;
1021 bases->dbase = ob->dbase;
1022
1023 encoding = ob->s.b.encoding;
1024 if (ob->s.b.mixed_encoding)
1025 encoding = get_fde_encoding (f);
1026 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1027 f->pc_begin, (_Unwind_Ptr *)&bases->func);
1028 }
1029
1030 return f;
1031 }
1032