xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/dwarf2/expr.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 /* DWARF 2 Expression Evaluator.
2 
3    Copyright (C) 2001-2023 Free Software Foundation, Inc.
4 
5    Contributed by Daniel Berlin (dan@dberlin.org)
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "block.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2/expr.h"
30 #include "dwarf2/loc.h"
31 #include "dwarf2/read.h"
32 #include "frame.h"
33 #include "gdbsupport/underlying.h"
34 #include "gdbarch.h"
35 #include "objfiles.h"
36 
37 /* This holds gdbarch-specific types used by the DWARF expression
38    evaluator.  See comments in execute_stack_op.  */
39 
40 struct dwarf_gdbarch_types
41 {
42   struct type *dw_types[3] {};
43 };
44 
45 /* Cookie for gdbarch data.  */
46 
47 static const registry<gdbarch>::key<dwarf_gdbarch_types> dwarf_arch_cookie;
48 
49 /* Ensure that a FRAME is defined, throw an exception otherwise.  */
50 
51 static void
52 ensure_have_frame (frame_info_ptr frame, const char *op_name)
53 {
54   if (frame == nullptr)
55     throw_error (GENERIC_ERROR,
56 		 _("%s evaluation requires a frame."), op_name);
57 }
58 
59 /* Ensure that a PER_CU is defined and throw an exception otherwise.  */
60 
61 static void
62 ensure_have_per_cu (dwarf2_per_cu_data *per_cu, const char* op_name)
63 {
64   if (per_cu == nullptr)
65     throw_error (GENERIC_ERROR,
66 		 _("%s evaluation requires a compilation unit."), op_name);
67 }
68 
69 /* Return the number of bytes overlapping a contiguous chunk of N_BITS
70    bits whose first bit is located at bit offset START.  */
71 
72 static size_t
73 bits_to_bytes (ULONGEST start, ULONGEST n_bits)
74 {
75   return (start % HOST_CHAR_BIT + n_bits + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT;
76 }
77 
78 /* See expr.h.  */
79 
80 CORE_ADDR
81 read_addr_from_reg (frame_info_ptr frame, int reg)
82 {
83   struct gdbarch *gdbarch = get_frame_arch (frame);
84   int regnum = dwarf_reg_to_regnum_or_error (gdbarch, reg);
85 
86   return address_from_register (regnum, frame);
87 }
88 
89 struct piece_closure
90 {
91   /* Reference count.  */
92   int refc = 0;
93 
94   /* The objfile from which this closure's expression came.  */
95   dwarf2_per_objfile *per_objfile = nullptr;
96 
97   /* The CU from which this closure's expression came.  */
98   dwarf2_per_cu_data *per_cu = nullptr;
99 
100   /* The pieces describing this variable.  */
101   std::vector<dwarf_expr_piece> pieces;
102 
103   /* Frame ID of frame to which a register value is relative, used
104      only by DWARF_VALUE_REGISTER.  */
105   struct frame_id frame_id;
106 };
107 
108 /* Allocate a closure for a value formed from separately-described
109    PIECES.  */
110 
111 static piece_closure *
112 allocate_piece_closure (dwarf2_per_cu_data *per_cu,
113 			dwarf2_per_objfile *per_objfile,
114 			std::vector<dwarf_expr_piece> &&pieces,
115 			frame_info_ptr frame)
116 {
117   piece_closure *c = new piece_closure;
118 
119   c->refc = 1;
120   /* We must capture this here due to sharing of DWARF state.  */
121   c->per_objfile = per_objfile;
122   c->per_cu = per_cu;
123   c->pieces = std::move (pieces);
124   if (frame == nullptr)
125     c->frame_id = null_frame_id;
126   else
127     c->frame_id = get_frame_id (frame);
128 
129   for (dwarf_expr_piece &piece : c->pieces)
130     if (piece.location == DWARF_VALUE_STACK)
131       value_incref (piece.v.value);
132 
133   return c;
134 }
135 
136 /* Read or write a pieced value V.  If FROM != NULL, operate in "write
137    mode": copy FROM into the pieces comprising V.  If FROM == NULL,
138    operate in "read mode": fetch the contents of the (lazy) value V by
139    composing it from its pieces.  If CHECK_OPTIMIZED is true, then no
140    reading or writing is done; instead the return value of this
141    function is true if any piece is optimized out.  When
142    CHECK_OPTIMIZED is true, FROM must be nullptr.  */
143 
144 static bool
145 rw_pieced_value (value *v, value *from, bool check_optimized)
146 {
147   int i;
148   LONGEST offset = 0, max_offset;
149   gdb_byte *v_contents;
150   const gdb_byte *from_contents;
151   piece_closure *c
152     = (piece_closure *) value_computed_closure (v);
153   gdb::byte_vector buffer;
154   bool bits_big_endian = type_byte_order (value_type (v)) == BFD_ENDIAN_BIG;
155 
156   gdb_assert (!check_optimized || from == nullptr);
157   if (from != nullptr)
158     {
159       from_contents = value_contents (from).data ();
160       v_contents = nullptr;
161     }
162   else
163     {
164       if (value_type (v) != value_enclosing_type (v))
165 	internal_error (_("Should not be able to create a lazy value with "
166 			  "an enclosing type"));
167       if (check_optimized)
168 	v_contents = nullptr;
169       else
170 	v_contents = value_contents_raw (v).data ();
171       from_contents = nullptr;
172     }
173 
174   ULONGEST bits_to_skip = 8 * value_offset (v);
175   if (value_bitsize (v))
176     {
177       bits_to_skip += (8 * value_offset (value_parent (v))
178 		       + value_bitpos (v));
179       if (from != nullptr
180 	  && (type_byte_order (value_type (from))
181 	      == BFD_ENDIAN_BIG))
182 	{
183 	  /* Use the least significant bits of FROM.  */
184 	  max_offset = 8 * value_type (from)->length ();
185 	  offset = max_offset - value_bitsize (v);
186 	}
187       else
188 	max_offset = value_bitsize (v);
189     }
190   else
191     max_offset = 8 * value_type (v)->length ();
192 
193   /* Advance to the first non-skipped piece.  */
194   for (i = 0; i < c->pieces.size () && bits_to_skip >= c->pieces[i].size; i++)
195     bits_to_skip -= c->pieces[i].size;
196 
197   for (; i < c->pieces.size () && offset < max_offset; i++)
198     {
199       dwarf_expr_piece *p = &c->pieces[i];
200       size_t this_size_bits, this_size;
201 
202       this_size_bits = p->size - bits_to_skip;
203       if (this_size_bits > max_offset - offset)
204 	this_size_bits = max_offset - offset;
205 
206       switch (p->location)
207 	{
208 	case DWARF_VALUE_REGISTER:
209 	  {
210 	    frame_info_ptr frame = frame_find_by_id (c->frame_id);
211 	    gdbarch *arch = get_frame_arch (frame);
212 	    int gdb_regnum = dwarf_reg_to_regnum_or_error (arch, p->v.regno);
213 	    ULONGEST reg_bits = 8 * register_size (arch, gdb_regnum);
214 	    int optim, unavail;
215 
216 	    if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG
217 		&& p->offset + p->size < reg_bits)
218 	      {
219 		/* Big-endian, and we want less than full size.  */
220 		bits_to_skip += reg_bits - (p->offset + p->size);
221 	      }
222 	    else
223 	      bits_to_skip += p->offset;
224 
225 	    this_size = bits_to_bytes (bits_to_skip, this_size_bits);
226 	    buffer.resize (this_size);
227 
228 	    if (from == nullptr)
229 	      {
230 		/* Read mode.  */
231 		if (!get_frame_register_bytes (frame, gdb_regnum,
232 					       bits_to_skip / 8,
233 					       buffer, &optim, &unavail))
234 		  {
235 		    if (optim)
236 		      {
237 			if (check_optimized)
238 			  return true;
239 			mark_value_bits_optimized_out (v, offset,
240 						       this_size_bits);
241 		      }
242 		    if (unavail && !check_optimized)
243 		      mark_value_bits_unavailable (v, offset,
244 						   this_size_bits);
245 		    break;
246 		  }
247 
248 		if (!check_optimized)
249 		  copy_bitwise (v_contents, offset,
250 				buffer.data (), bits_to_skip % 8,
251 				this_size_bits, bits_big_endian);
252 	      }
253 	    else
254 	      {
255 		/* Write mode.  */
256 		if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
257 		  {
258 		    /* Data is copied non-byte-aligned into the register.
259 		       Need some bits from original register value.  */
260 		    get_frame_register_bytes (frame, gdb_regnum,
261 					      bits_to_skip / 8,
262 					      buffer, &optim, &unavail);
263 		    if (optim)
264 		      throw_error (OPTIMIZED_OUT_ERROR,
265 				   _("Can't do read-modify-write to "
266 				     "update bitfield; containing word "
267 				     "has been optimized out"));
268 		    if (unavail)
269 		      throw_error (NOT_AVAILABLE_ERROR,
270 				   _("Can't do read-modify-write to "
271 				     "update bitfield; containing word "
272 				     "is unavailable"));
273 		  }
274 
275 		copy_bitwise (buffer.data (), bits_to_skip % 8,
276 			      from_contents, offset,
277 			      this_size_bits, bits_big_endian);
278 		put_frame_register_bytes (frame, gdb_regnum,
279 					  bits_to_skip / 8,
280 					  buffer);
281 	      }
282 	  }
283 	  break;
284 
285 	case DWARF_VALUE_MEMORY:
286 	  {
287 	    if (check_optimized)
288 	      break;
289 
290 	    bits_to_skip += p->offset;
291 
292 	    CORE_ADDR start_addr = p->v.mem.addr + bits_to_skip / 8;
293 
294 	    if (bits_to_skip % 8 == 0 && this_size_bits % 8 == 0
295 		&& offset % 8 == 0)
296 	      {
297 		/* Everything is byte-aligned; no buffer needed.  */
298 		if (from != nullptr)
299 		  write_memory_with_notification (start_addr,
300 						  (from_contents
301 						   + offset / 8),
302 						  this_size_bits / 8);
303 		else
304 		  read_value_memory (v, offset,
305 				     p->v.mem.in_stack_memory,
306 				     p->v.mem.addr + bits_to_skip / 8,
307 				     v_contents + offset / 8,
308 				     this_size_bits / 8);
309 		break;
310 	      }
311 
312 	    this_size = bits_to_bytes (bits_to_skip, this_size_bits);
313 	    buffer.resize (this_size);
314 
315 	    if (from == nullptr)
316 	      {
317 		/* Read mode.  */
318 		read_value_memory (v, offset,
319 				   p->v.mem.in_stack_memory,
320 				   p->v.mem.addr + bits_to_skip / 8,
321 				   buffer.data (), this_size);
322 		copy_bitwise (v_contents, offset,
323 			      buffer.data (), bits_to_skip % 8,
324 			      this_size_bits, bits_big_endian);
325 	      }
326 	    else
327 	      {
328 		/* Write mode.  */
329 		if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
330 		  {
331 		    if (this_size <= 8)
332 		      {
333 			/* Perform a single read for small sizes.  */
334 			read_memory (start_addr, buffer.data (),
335 				     this_size);
336 		      }
337 		    else
338 		      {
339 			/* Only the first and last bytes can possibly have
340 			   any bits reused.  */
341 			read_memory (start_addr, buffer.data (), 1);
342 			read_memory (start_addr + this_size - 1,
343 				     &buffer[this_size - 1], 1);
344 		      }
345 		  }
346 
347 		copy_bitwise (buffer.data (), bits_to_skip % 8,
348 			      from_contents, offset,
349 			      this_size_bits, bits_big_endian);
350 		write_memory_with_notification (start_addr,
351 						buffer.data (),
352 						this_size);
353 	      }
354 	  }
355 	  break;
356 
357 	case DWARF_VALUE_STACK:
358 	  {
359 	    if (check_optimized)
360 	      break;
361 
362 	    if (from != nullptr)
363 	      {
364 		mark_value_bits_optimized_out (v, offset, this_size_bits);
365 		break;
366 	      }
367 
368 	    gdbarch *objfile_gdbarch = c->per_objfile->objfile->arch ();
369 	    ULONGEST stack_value_size_bits
370 	      = 8 * value_type (p->v.value)->length ();
371 
372 	    /* Use zeroes if piece reaches beyond stack value.  */
373 	    if (p->offset + p->size > stack_value_size_bits)
374 	      break;
375 
376 	    /* Piece is anchored at least significant bit end.  */
377 	    if (gdbarch_byte_order (objfile_gdbarch) == BFD_ENDIAN_BIG)
378 	      bits_to_skip += stack_value_size_bits - p->offset - p->size;
379 	    else
380 	      bits_to_skip += p->offset;
381 
382 	    copy_bitwise (v_contents, offset,
383 			  value_contents_all (p->v.value).data (),
384 			  bits_to_skip,
385 			  this_size_bits, bits_big_endian);
386 	  }
387 	  break;
388 
389 	case DWARF_VALUE_LITERAL:
390 	  {
391 	    if (check_optimized)
392 	      break;
393 
394 	    if (from != nullptr)
395 	      {
396 		mark_value_bits_optimized_out (v, offset, this_size_bits);
397 		break;
398 	      }
399 
400 	    ULONGEST literal_size_bits = 8 * p->v.literal.length;
401 	    size_t n = this_size_bits;
402 
403 	    /* Cut off at the end of the implicit value.  */
404 	    bits_to_skip += p->offset;
405 	    if (bits_to_skip >= literal_size_bits)
406 	      break;
407 	    if (n > literal_size_bits - bits_to_skip)
408 	      n = literal_size_bits - bits_to_skip;
409 
410 	    copy_bitwise (v_contents, offset,
411 			  p->v.literal.data, bits_to_skip,
412 			  n, bits_big_endian);
413 	  }
414 	  break;
415 
416 	case DWARF_VALUE_IMPLICIT_POINTER:
417 	    if (from != nullptr)
418 	      {
419 		mark_value_bits_optimized_out (v, offset, this_size_bits);
420 		break;
421 	      }
422 
423 	  /* These bits show up as zeros -- but do not cause the value to
424 	     be considered optimized-out.  */
425 	  break;
426 
427 	case DWARF_VALUE_OPTIMIZED_OUT:
428 	  if (check_optimized)
429 	    return true;
430 	  mark_value_bits_optimized_out (v, offset, this_size_bits);
431 	  break;
432 
433 	default:
434 	  internal_error (_("invalid location type"));
435 	}
436 
437       offset += this_size_bits;
438       bits_to_skip = 0;
439     }
440 
441   return false;
442 }
443 
444 static void
445 read_pieced_value (value *v)
446 {
447   rw_pieced_value (v, nullptr, false);
448 }
449 
450 static void
451 write_pieced_value (value *to, value *from)
452 {
453   rw_pieced_value (to, from, false);
454 }
455 
456 static bool
457 is_optimized_out_pieced_value (value *v)
458 {
459   return rw_pieced_value (v, nullptr, true);
460 }
461 
462 /* An implementation of an lval_funcs method to see whether a value is
463    a synthetic pointer.  */
464 
465 static int
466 check_pieced_synthetic_pointer (const value *value, LONGEST bit_offset,
467 				int bit_length)
468 {
469   piece_closure *c = (piece_closure *) value_computed_closure (value);
470   int i;
471 
472   bit_offset += 8 * value_offset (value);
473   if (value_bitsize (value))
474     bit_offset += value_bitpos (value);
475 
476   for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
477     {
478       dwarf_expr_piece *p = &c->pieces[i];
479       size_t this_size_bits = p->size;
480 
481       if (bit_offset > 0)
482 	{
483 	  if (bit_offset >= this_size_bits)
484 	    {
485 	      bit_offset -= this_size_bits;
486 	      continue;
487 	    }
488 
489 	  bit_length -= this_size_bits - bit_offset;
490 	  bit_offset = 0;
491 	}
492       else
493 	bit_length -= this_size_bits;
494 
495       if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
496 	return 0;
497     }
498 
499   return 1;
500 }
501 
502 /* An implementation of an lval_funcs method to indirect through a
503    pointer.  This handles the synthetic pointer case when needed.  */
504 
505 static value *
506 indirect_pieced_value (value *value)
507 {
508   piece_closure *c
509     = (piece_closure *) value_computed_closure (value);
510   int i;
511   dwarf_expr_piece *piece = NULL;
512 
513   struct type *type = check_typedef (value_type (value));
514   if (type->code () != TYPE_CODE_PTR)
515     return NULL;
516 
517   int bit_length = 8 * type->length ();
518   LONGEST bit_offset = 8 * value_offset (value);
519   if (value_bitsize (value))
520     bit_offset += value_bitpos (value);
521 
522   for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
523     {
524       dwarf_expr_piece *p = &c->pieces[i];
525       size_t this_size_bits = p->size;
526 
527       if (bit_offset > 0)
528 	{
529 	  if (bit_offset >= this_size_bits)
530 	    {
531 	      bit_offset -= this_size_bits;
532 	      continue;
533 	    }
534 
535 	  bit_length -= this_size_bits - bit_offset;
536 	  bit_offset = 0;
537 	}
538       else
539 	bit_length -= this_size_bits;
540 
541       if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
542 	return NULL;
543 
544       if (bit_length != 0)
545 	error (_("Invalid use of DW_OP_implicit_pointer"));
546 
547       piece = p;
548       break;
549     }
550 
551   gdb_assert (piece != NULL && c->per_cu != nullptr);
552   frame_info_ptr frame = get_selected_frame (_("No frame selected."));
553 
554   /* This is an offset requested by GDB, such as value subscripts.
555      However, due to how synthetic pointers are implemented, this is
556      always presented to us as a pointer type.  This means we have to
557      sign-extend it manually as appropriate.  Use raw
558      extract_signed_integer directly rather than value_as_address and
559      sign extend afterwards on architectures that would need it
560      (mostly everywhere except MIPS, which has signed addresses) as
561      the later would go through gdbarch_pointer_to_address and thus
562      return a CORE_ADDR with high bits set on architectures that
563      encode address spaces and other things in CORE_ADDR.  */
564   bfd_endian byte_order = gdbarch_byte_order (get_frame_arch (frame));
565   LONGEST byte_offset
566     = extract_signed_integer (value_contents (value), byte_order);
567   byte_offset += piece->v.ptr.offset;
568 
569   return indirect_synthetic_pointer (piece->v.ptr.die_sect_off,
570 				     byte_offset, c->per_cu,
571 				     c->per_objfile, frame, type);
572 }
573 
574 /* Implementation of the coerce_ref method of lval_funcs for synthetic C++
575    references.  */
576 
577 static value *
578 coerce_pieced_ref (const value *value)
579 {
580   struct type *type = check_typedef (value_type (value));
581 
582   if (value_bits_synthetic_pointer (value, value_embedded_offset (value),
583 				    TARGET_CHAR_BIT * type->length ()))
584     {
585       const piece_closure *closure
586 	= (piece_closure *) value_computed_closure (value);
587       frame_info_ptr frame
588 	= get_selected_frame (_("No frame selected."));
589 
590       /* gdb represents synthetic pointers as pieced values with a single
591 	 piece.  */
592       gdb_assert (closure != NULL);
593       gdb_assert (closure->pieces.size () == 1);
594 
595       return indirect_synthetic_pointer
596 	(closure->pieces[0].v.ptr.die_sect_off,
597 	 closure->pieces[0].v.ptr.offset,
598 	 closure->per_cu, closure->per_objfile, frame, type);
599     }
600   else
601     {
602       /* Else: not a synthetic reference; do nothing.  */
603       return NULL;
604     }
605 }
606 
607 static void *
608 copy_pieced_value_closure (const value *v)
609 {
610   piece_closure *c = (piece_closure *) value_computed_closure (v);
611 
612   ++c->refc;
613   return c;
614 }
615 
616 static void
617 free_pieced_value_closure (value *v)
618 {
619   piece_closure *c = (piece_closure *) value_computed_closure (v);
620 
621   --c->refc;
622   if (c->refc == 0)
623     {
624       for (dwarf_expr_piece &p : c->pieces)
625 	if (p.location == DWARF_VALUE_STACK)
626 	  value_decref (p.v.value);
627 
628       delete c;
629     }
630 }
631 
632 /* Functions for accessing a variable described by DW_OP_piece.  */
633 static const struct lval_funcs pieced_value_funcs = {
634   read_pieced_value,
635   write_pieced_value,
636   is_optimized_out_pieced_value,
637   indirect_pieced_value,
638   coerce_pieced_ref,
639   check_pieced_synthetic_pointer,
640   copy_pieced_value_closure,
641   free_pieced_value_closure
642 };
643 
644 /* Given context CTX, section offset SECT_OFF, and compilation unit
645    data PER_CU, execute the "variable value" operation on the DIE
646    found at SECT_OFF.  */
647 
648 static value *
649 sect_variable_value (sect_offset sect_off,
650 		     dwarf2_per_cu_data *per_cu,
651 		     dwarf2_per_objfile *per_objfile)
652 {
653   const char *var_name = nullptr;
654   struct type *die_type
655     = dwarf2_fetch_die_type_sect_off (sect_off, per_cu, per_objfile,
656 				      &var_name);
657 
658   if (die_type == NULL)
659     error (_("Bad DW_OP_GNU_variable_value DIE."));
660 
661   /* Note: Things still work when the following test is removed.  This
662      test and error is here to conform to the proposed specification.  */
663   if (die_type->code () != TYPE_CODE_INT
664       && die_type->code () != TYPE_CODE_ENUM
665       && die_type->code () != TYPE_CODE_RANGE
666       && die_type->code () != TYPE_CODE_PTR)
667     error (_("Type of DW_OP_GNU_variable_value DIE must be an integer or pointer."));
668 
669   if (var_name != nullptr)
670     {
671       value *result = compute_var_value (var_name);
672       if (result != nullptr)
673 	return result;
674     }
675 
676   struct type *type = lookup_pointer_type (die_type);
677   frame_info_ptr frame = get_selected_frame (_("No frame selected."));
678   return indirect_synthetic_pointer (sect_off, 0, per_cu, per_objfile, frame,
679 				     type, true);
680 }
681 
682 /* Return the type used for DWARF operations where the type is
683    unspecified in the DWARF spec.  Only certain sizes are
684    supported.  */
685 
686 struct type *
687 dwarf_expr_context::address_type () const
688 {
689   gdbarch *arch = this->m_per_objfile->objfile->arch ();
690   dwarf_gdbarch_types *types = dwarf_arch_cookie.get (arch);
691   if (types == nullptr)
692     types = dwarf_arch_cookie.emplace (arch);
693   int ndx;
694 
695   if (this->m_addr_size == 2)
696     ndx = 0;
697   else if (this->m_addr_size == 4)
698     ndx = 1;
699   else if (this->m_addr_size == 8)
700     ndx = 2;
701   else
702     error (_("Unsupported address size in DWARF expressions: %d bits"),
703 	   8 * this->m_addr_size);
704 
705   if (types->dw_types[ndx] == NULL)
706     types->dw_types[ndx]
707       = arch_integer_type (arch, 8 * this->m_addr_size,
708 			   0, "<signed DWARF address type>");
709 
710   return types->dw_types[ndx];
711 }
712 
713 /* Create a new context for the expression evaluator.  */
714 
715 dwarf_expr_context::dwarf_expr_context (dwarf2_per_objfile *per_objfile,
716 					int addr_size)
717 : m_addr_size (addr_size),
718   m_per_objfile (per_objfile)
719 {
720 }
721 
722 /* Push VALUE onto the stack.  */
723 
724 void
725 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
726 {
727   this->m_stack.emplace_back (value, in_stack_memory);
728 }
729 
730 /* Push VALUE onto the stack.  */
731 
732 void
733 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
734 {
735   push (value_from_ulongest (address_type (), value), in_stack_memory);
736 }
737 
738 /* Pop the top item off of the stack.  */
739 
740 void
741 dwarf_expr_context::pop ()
742 {
743   if (this->m_stack.empty ())
744     error (_("dwarf expression stack underflow"));
745 
746   this->m_stack.pop_back ();
747 }
748 
749 /* Retrieve the N'th item on the stack.  */
750 
751 struct value *
752 dwarf_expr_context::fetch (int n)
753 {
754   if (this->m_stack.size () <= n)
755      error (_("Asked for position %d of stack, "
756 	      "stack only has %zu elements on it."),
757 	    n, this->m_stack.size ());
758   return this->m_stack[this->m_stack.size () - (1 + n)].value;
759 }
760 
761 /* See expr.h.  */
762 
763 void
764 dwarf_expr_context::get_frame_base (const gdb_byte **start,
765 				    size_t * length)
766 {
767   ensure_have_frame (this->m_frame, "DW_OP_fbreg");
768 
769   const block *bl = get_frame_block (this->m_frame, NULL);
770 
771   if (bl == NULL)
772     error (_("frame address is not available."));
773 
774   /* Use block_linkage_function, which returns a real (not inlined)
775      function, instead of get_frame_function, which may return an
776      inlined function.  */
777   symbol *framefunc = block_linkage_function (bl);
778 
779   /* If we found a frame-relative symbol then it was certainly within
780      some function associated with a frame. If we can't find the frame,
781      something has gone wrong.  */
782   gdb_assert (framefunc != NULL);
783 
784   func_get_frame_base_dwarf_block (framefunc,
785 				   get_frame_address_in_block (this->m_frame),
786 				   start, length);
787 }
788 
789 /* See expr.h.  */
790 
791 struct type *
792 dwarf_expr_context::get_base_type (cu_offset die_cu_off)
793 {
794   if (this->m_per_cu == nullptr)
795     return builtin_type (this->m_per_objfile->objfile->arch ())->builtin_int;
796 
797   struct type *result = dwarf2_get_die_type (die_cu_off, this->m_per_cu,
798 					     this->m_per_objfile);
799 
800   if (result == nullptr)
801     error (_("Could not find type for operation"));
802 
803   return result;
804 }
805 
806 /* See expr.h.  */
807 
808 void
809 dwarf_expr_context::dwarf_call (cu_offset die_cu_off)
810 {
811   ensure_have_per_cu (this->m_per_cu, "DW_OP_call");
812 
813   frame_info_ptr frame = this->m_frame;
814 
815   auto get_pc_from_frame = [frame] ()
816     {
817       ensure_have_frame (frame, "DW_OP_call");
818       return get_frame_address_in_block (frame);
819     };
820 
821   dwarf2_locexpr_baton block
822     = dwarf2_fetch_die_loc_cu_off (die_cu_off, this->m_per_cu,
823 				   this->m_per_objfile, get_pc_from_frame);
824 
825   /* DW_OP_call_ref is currently not supported.  */
826   gdb_assert (block.per_cu == this->m_per_cu);
827 
828   this->eval (block.data, block.size);
829 }
830 
831 /* See expr.h.  */
832 
833 void
834 dwarf_expr_context::read_mem (gdb_byte *buf, CORE_ADDR addr,
835 			      size_t length)
836 {
837   if (length == 0)
838     return;
839 
840   /* Prefer the passed-in memory, if it exists.  */
841   if (this->m_addr_info != nullptr)
842     {
843       CORE_ADDR offset = addr - this->m_addr_info->addr;
844 
845       if (offset < this->m_addr_info->valaddr.size ()
846 	  && offset + length <= this->m_addr_info->valaddr.size ())
847 	{
848 	  memcpy (buf, this->m_addr_info->valaddr.data (), length);
849 	  return;
850 	}
851     }
852 
853   read_memory (addr, buf, length);
854 }
855 
856 /* See expr.h.  */
857 
858 void
859 dwarf_expr_context::push_dwarf_reg_entry_value (call_site_parameter_kind kind,
860 						call_site_parameter_u kind_u,
861 						int deref_size)
862 {
863   ensure_have_per_cu (this->m_per_cu, "DW_OP_entry_value");
864   ensure_have_frame (this->m_frame, "DW_OP_entry_value");
865 
866   dwarf2_per_cu_data *caller_per_cu;
867   dwarf2_per_objfile *caller_per_objfile;
868   frame_info_ptr caller_frame = get_prev_frame (this->m_frame);
869   call_site_parameter *parameter
870     = dwarf_expr_reg_to_entry_parameter (this->m_frame, kind, kind_u,
871 					 &caller_per_cu,
872 					 &caller_per_objfile);
873   const gdb_byte *data_src
874     = deref_size == -1 ? parameter->value : parameter->data_value;
875   size_t size
876     = deref_size == -1 ? parameter->value_size : parameter->data_value_size;
877 
878   /* DEREF_SIZE size is not verified here.  */
879   if (data_src == nullptr)
880     throw_error (NO_ENTRY_VALUE_ERROR,
881 		 _("Cannot resolve DW_AT_call_data_value"));
882 
883   /* We are about to evaluate an expression in the context of the caller
884      of the current frame.  This evaluation context may be different from
885      the current (callee's) context), so temporarily set the caller's context.
886 
887      It is possible for the caller to be from a different objfile from the
888      callee if the call is made through a function pointer.  */
889   scoped_restore save_frame = make_scoped_restore (&this->m_frame,
890 						   caller_frame);
891   scoped_restore save_per_cu = make_scoped_restore (&this->m_per_cu,
892 						    caller_per_cu);
893   scoped_restore save_addr_info = make_scoped_restore (&this->m_addr_info,
894 						       nullptr);
895   scoped_restore save_per_objfile = make_scoped_restore (&this->m_per_objfile,
896 							 caller_per_objfile);
897 
898   scoped_restore save_addr_size = make_scoped_restore (&this->m_addr_size);
899   this->m_addr_size = this->m_per_cu->addr_size ();
900 
901   this->eval (data_src, size);
902 }
903 
904 /* See expr.h.  */
905 
906 value *
907 dwarf_expr_context::fetch_result (struct type *type, struct type *subobj_type,
908 				  LONGEST subobj_offset, bool as_lval)
909 {
910   value *retval = nullptr;
911   gdbarch *arch = this->m_per_objfile->objfile->arch ();
912 
913   if (type == nullptr)
914     type = address_type ();
915 
916   if (subobj_type == nullptr)
917     subobj_type = type;
918 
919   /* Ensure that, if TYPE or SUBOBJ_TYPE are typedefs, their length is filled
920      in instead of being zero.  */
921   check_typedef (type);
922   check_typedef (subobj_type);
923 
924   if (this->m_pieces.size () > 0)
925     {
926       ULONGEST bit_size = 0;
927 
928       for (dwarf_expr_piece &piece : this->m_pieces)
929 	bit_size += piece.size;
930       /* Complain if the expression is larger than the size of the
931 	 outer type.  */
932       if (bit_size > 8 * type->length ())
933 	invalid_synthetic_pointer ();
934 
935       piece_closure *c
936 	= allocate_piece_closure (this->m_per_cu, this->m_per_objfile,
937 				  std::move (this->m_pieces), this->m_frame);
938       retval = allocate_computed_value (subobj_type,
939 					&pieced_value_funcs, c);
940       set_value_offset (retval, subobj_offset);
941     }
942   else
943     {
944       /* If AS_LVAL is false, means that the implicit conversion
945 	 from a location description to value is expected.  */
946       if (!as_lval)
947 	this->m_location = DWARF_VALUE_STACK;
948 
949       switch (this->m_location)
950 	{
951 	case DWARF_VALUE_REGISTER:
952 	  {
953 	    gdbarch *f_arch = get_frame_arch (this->m_frame);
954 	    int dwarf_regnum
955 	      = longest_to_int (value_as_long (this->fetch (0)));
956 	    int gdb_regnum = dwarf_reg_to_regnum_or_error (f_arch,
957 							   dwarf_regnum);
958 
959 	    if (subobj_offset != 0)
960 	      error (_("cannot use offset on synthetic pointer to register"));
961 
962 	    gdb_assert (this->m_frame != NULL);
963 
964 	    retval = value_from_register (subobj_type, gdb_regnum,
965 					  this->m_frame);
966 	    if (value_optimized_out (retval))
967 	      {
968 		/* This means the register has undefined value / was
969 		   not saved.  As we're computing the location of some
970 		   variable etc. in the program, not a value for
971 		   inspecting a register ($pc, $sp, etc.), return a
972 		   generic optimized out value instead, so that we show
973 		   <optimized out> instead of <not saved>.  */
974 		value *tmp = allocate_value (subobj_type);
975 		value_contents_copy (tmp, 0, retval, 0,
976 				     subobj_type->length ());
977 		retval = tmp;
978 	      }
979 	  }
980 	  break;
981 
982 	case DWARF_VALUE_MEMORY:
983 	  {
984 	    struct type *ptr_type;
985 	    CORE_ADDR address = this->fetch_address (0);
986 	    bool in_stack_memory = this->fetch_in_stack_memory (0);
987 
988 	    /* DW_OP_deref_size (and possibly other operations too) may
989 	       create a pointer instead of an address.  Ideally, the
990 	       pointer to address conversion would be performed as part
991 	       of those operations, but the type of the object to
992 	       which the address refers is not known at the time of
993 	       the operation.  Therefore, we do the conversion here
994 	       since the type is readily available.  */
995 
996 	    switch (subobj_type->code ())
997 	      {
998 		case TYPE_CODE_FUNC:
999 		case TYPE_CODE_METHOD:
1000 		  ptr_type = builtin_type (arch)->builtin_func_ptr;
1001 		  break;
1002 		default:
1003 		  ptr_type = builtin_type (arch)->builtin_data_ptr;
1004 		  break;
1005 	      }
1006 	    address = value_as_address (value_from_pointer (ptr_type, address));
1007 
1008 	    retval = value_at_lazy (subobj_type,
1009 				    address + subobj_offset);
1010 	    if (in_stack_memory)
1011 	      set_value_stack (retval, 1);
1012 	  }
1013 	  break;
1014 
1015 	case DWARF_VALUE_STACK:
1016 	  {
1017 	    value *val = this->fetch (0);
1018 	    size_t n = value_type (val)->length ();
1019 	    size_t len = subobj_type->length ();
1020 	    size_t max = type->length ();
1021 
1022 	    if (subobj_offset + len > max)
1023 	      invalid_synthetic_pointer ();
1024 
1025 	    retval = allocate_value (subobj_type);
1026 
1027 	    /* The given offset is relative to the actual object.  */
1028 	    if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG)
1029 	      subobj_offset += n - max;
1030 
1031 	    copy (value_contents_all (val).slice (subobj_offset, len),
1032 		  value_contents_raw (retval));
1033 	  }
1034 	  break;
1035 
1036 	case DWARF_VALUE_LITERAL:
1037 	  {
1038 	    size_t n = subobj_type->length ();
1039 
1040 	    if (subobj_offset + n > this->m_len)
1041 	      invalid_synthetic_pointer ();
1042 
1043 	    retval = allocate_value (subobj_type);
1044 	    bfd_byte *contents = value_contents_raw (retval).data ();
1045 	    memcpy (contents, this->m_data + subobj_offset, n);
1046 	  }
1047 	  break;
1048 
1049 	case DWARF_VALUE_OPTIMIZED_OUT:
1050 	  retval = allocate_optimized_out_value (subobj_type);
1051 	  break;
1052 
1053 	  /* DWARF_VALUE_IMPLICIT_POINTER was converted to a pieced
1054 	     operation by execute_stack_op.  */
1055 	case DWARF_VALUE_IMPLICIT_POINTER:
1056 	  /* DWARF_VALUE_OPTIMIZED_OUT can't occur in this context --
1057 	     it can only be encountered when making a piece.  */
1058 	default:
1059 	  internal_error (_("invalid location type"));
1060 	}
1061     }
1062 
1063   set_value_initialized (retval, this->m_initialized);
1064 
1065   return retval;
1066 }
1067 
1068 /* See expr.h.  */
1069 
1070 value *
1071 dwarf_expr_context::evaluate (const gdb_byte *addr, size_t len, bool as_lval,
1072 			      dwarf2_per_cu_data *per_cu, frame_info_ptr frame,
1073 			      const struct property_addr_info *addr_info,
1074 			      struct type *type, struct type *subobj_type,
1075 			      LONGEST subobj_offset)
1076 {
1077   this->m_per_cu = per_cu;
1078   this->m_frame = frame;
1079   this->m_addr_info = addr_info;
1080 
1081   eval (addr, len);
1082   return fetch_result (type, subobj_type, subobj_offset, as_lval);
1083 }
1084 
1085 /* Require that TYPE be an integral type; throw an exception if not.  */
1086 
1087 static void
1088 dwarf_require_integral (struct type *type)
1089 {
1090   if (type->code () != TYPE_CODE_INT
1091       && type->code () != TYPE_CODE_CHAR
1092       && type->code () != TYPE_CODE_BOOL)
1093     error (_("integral type expected in DWARF expression"));
1094 }
1095 
1096 /* Return the unsigned form of TYPE.  TYPE is necessarily an integral
1097    type.  */
1098 
1099 static struct type *
1100 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
1101 {
1102   switch (type->length ())
1103     {
1104     case 1:
1105       return builtin_type (gdbarch)->builtin_uint8;
1106     case 2:
1107       return builtin_type (gdbarch)->builtin_uint16;
1108     case 4:
1109       return builtin_type (gdbarch)->builtin_uint32;
1110     case 8:
1111       return builtin_type (gdbarch)->builtin_uint64;
1112     default:
1113       error (_("no unsigned variant found for type, while evaluating "
1114 	       "DWARF expression"));
1115     }
1116 }
1117 
1118 /* Return the signed form of TYPE.  TYPE is necessarily an integral
1119    type.  */
1120 
1121 static struct type *
1122 get_signed_type (struct gdbarch *gdbarch, struct type *type)
1123 {
1124   switch (type->length ())
1125     {
1126     case 1:
1127       return builtin_type (gdbarch)->builtin_int8;
1128     case 2:
1129       return builtin_type (gdbarch)->builtin_int16;
1130     case 4:
1131       return builtin_type (gdbarch)->builtin_int32;
1132     case 8:
1133       return builtin_type (gdbarch)->builtin_int64;
1134     default:
1135       error (_("no signed variant found for type, while evaluating "
1136 	       "DWARF expression"));
1137     }
1138 }
1139 
1140 /* Retrieve the N'th item on the stack, converted to an address.  */
1141 
1142 CORE_ADDR
1143 dwarf_expr_context::fetch_address (int n)
1144 {
1145   gdbarch *arch = this->m_per_objfile->objfile->arch ();
1146   value *result_val = fetch (n);
1147   bfd_endian byte_order = gdbarch_byte_order (arch);
1148   ULONGEST result;
1149 
1150   dwarf_require_integral (value_type (result_val));
1151   result = extract_unsigned_integer (value_contents (result_val), byte_order);
1152 
1153   /* For most architectures, calling extract_unsigned_integer() alone
1154      is sufficient for extracting an address.  However, some
1155      architectures (e.g. MIPS) use signed addresses and using
1156      extract_unsigned_integer() will not produce a correct
1157      result.  Make sure we invoke gdbarch_integer_to_address()
1158      for those architectures which require it.  */
1159   if (gdbarch_integer_to_address_p (arch))
1160     {
1161       gdb_byte *buf = (gdb_byte *) alloca (this->m_addr_size);
1162       type *int_type = get_unsigned_type (arch,
1163 					  value_type (result_val));
1164 
1165       store_unsigned_integer (buf, this->m_addr_size, byte_order, result);
1166       return gdbarch_integer_to_address (arch, int_type, buf);
1167     }
1168 
1169   return (CORE_ADDR) result;
1170 }
1171 
1172 /* Retrieve the in_stack_memory flag of the N'th item on the stack.  */
1173 
1174 bool
1175 dwarf_expr_context::fetch_in_stack_memory (int n)
1176 {
1177   if (this->m_stack.size () <= n)
1178      error (_("Asked for position %d of stack, "
1179 	      "stack only has %zu elements on it."),
1180 	    n, this->m_stack.size ());
1181   return this->m_stack[this->m_stack.size () - (1 + n)].in_stack_memory;
1182 }
1183 
1184 /* Return true if the expression stack is empty.  */
1185 
1186 bool
1187 dwarf_expr_context::stack_empty_p () const
1188 {
1189   return m_stack.empty ();
1190 }
1191 
1192 /* Add a new piece to the dwarf_expr_context's piece list.  */
1193 void
1194 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
1195 {
1196   this->m_pieces.emplace_back ();
1197   dwarf_expr_piece &p = this->m_pieces.back ();
1198 
1199   p.location = this->m_location;
1200   p.size = size;
1201   p.offset = offset;
1202 
1203   if (p.location == DWARF_VALUE_LITERAL)
1204     {
1205       p.v.literal.data = this->m_data;
1206       p.v.literal.length = this->m_len;
1207     }
1208   else if (stack_empty_p ())
1209     {
1210       p.location = DWARF_VALUE_OPTIMIZED_OUT;
1211       /* Also reset the context's location, for our callers.  This is
1212 	 a somewhat strange approach, but this lets us avoid setting
1213 	 the location to DWARF_VALUE_MEMORY in all the individual
1214 	 cases in the evaluator.  */
1215       this->m_location = DWARF_VALUE_OPTIMIZED_OUT;
1216     }
1217   else if (p.location == DWARF_VALUE_MEMORY)
1218     {
1219       p.v.mem.addr = fetch_address (0);
1220       p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
1221     }
1222   else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
1223     {
1224       p.v.ptr.die_sect_off = (sect_offset) this->m_len;
1225       p.v.ptr.offset = value_as_long (fetch (0));
1226     }
1227   else if (p.location == DWARF_VALUE_REGISTER)
1228     p.v.regno = value_as_long (fetch (0));
1229   else
1230     {
1231       p.v.value = fetch (0);
1232     }
1233 }
1234 
1235 /* Evaluate the expression at ADDR (LEN bytes long).  */
1236 
1237 void
1238 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
1239 {
1240   int old_recursion_depth = this->m_recursion_depth;
1241 
1242   execute_stack_op (addr, addr + len);
1243 
1244   /* RECURSION_DEPTH becomes invalid if an exception was thrown here.  */
1245 
1246   gdb_assert (this->m_recursion_depth == old_recursion_depth);
1247 }
1248 
1249 /* Helper to read a uleb128 value or throw an error.  */
1250 
1251 const gdb_byte *
1252 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1253 		   uint64_t *r)
1254 {
1255   buf = gdb_read_uleb128 (buf, buf_end, r);
1256   if (buf == NULL)
1257     error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
1258   return buf;
1259 }
1260 
1261 /* Helper to read a sleb128 value or throw an error.  */
1262 
1263 const gdb_byte *
1264 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1265 		   int64_t *r)
1266 {
1267   buf = gdb_read_sleb128 (buf, buf_end, r);
1268   if (buf == NULL)
1269     error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
1270   return buf;
1271 }
1272 
1273 const gdb_byte *
1274 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
1275 {
1276   buf = gdb_skip_leb128 (buf, buf_end);
1277   if (buf == NULL)
1278     error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
1279   return buf;
1280 }
1281 
1282 
1283 /* Check that the current operator is either at the end of an
1284    expression, or that it is followed by a composition operator or by
1285    DW_OP_GNU_uninit (which should terminate the expression).  */
1286 
1287 void
1288 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
1289 				const char *op_name)
1290 {
1291   if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
1292       && *op_ptr != DW_OP_GNU_uninit)
1293     error (_("DWARF-2 expression error: `%s' operations must be "
1294 	     "used either alone or in conjunction with DW_OP_piece "
1295 	     "or DW_OP_bit_piece."),
1296 	   op_name);
1297 }
1298 
1299 /* Return true iff the types T1 and T2 are "the same".  This only does
1300    checks that might reasonably be needed to compare DWARF base
1301    types.  */
1302 
1303 static int
1304 base_types_equal_p (struct type *t1, struct type *t2)
1305 {
1306   if (t1->code () != t2->code ())
1307     return 0;
1308   if (t1->is_unsigned () != t2->is_unsigned ())
1309     return 0;
1310   return t1->length () == t2->length ();
1311 }
1312 
1313 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
1314    DWARF register number.  Otherwise return -1.  */
1315 
1316 int
1317 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
1318 {
1319   uint64_t dwarf_reg;
1320 
1321   if (buf_end <= buf)
1322     return -1;
1323   if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
1324     {
1325       if (buf_end - buf != 1)
1326 	return -1;
1327       return *buf - DW_OP_reg0;
1328     }
1329 
1330   if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
1331     {
1332       buf++;
1333       buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1334       if (buf == NULL)
1335 	return -1;
1336       buf = gdb_skip_leb128 (buf, buf_end);
1337       if (buf == NULL)
1338 	return -1;
1339     }
1340   else if (*buf == DW_OP_regx)
1341     {
1342       buf++;
1343       buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1344       if (buf == NULL)
1345 	return -1;
1346     }
1347   else
1348     return -1;
1349   if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
1350     return -1;
1351   return dwarf_reg;
1352 }
1353 
1354 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
1355    DW_OP_deref* return the DWARF register number.  Otherwise return -1.
1356    DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
1357    size from DW_OP_deref_size.  */
1358 
1359 int
1360 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
1361 				CORE_ADDR *deref_size_return)
1362 {
1363   uint64_t dwarf_reg;
1364   int64_t offset;
1365 
1366   if (buf_end <= buf)
1367     return -1;
1368 
1369   if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1370     {
1371       dwarf_reg = *buf - DW_OP_breg0;
1372       buf++;
1373       if (buf >= buf_end)
1374 	return -1;
1375     }
1376   else if (*buf == DW_OP_bregx)
1377     {
1378       buf++;
1379       buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1380       if (buf == NULL)
1381 	return -1;
1382       if ((int) dwarf_reg != dwarf_reg)
1383        return -1;
1384     }
1385   else
1386     return -1;
1387 
1388   buf = gdb_read_sleb128 (buf, buf_end, &offset);
1389   if (buf == NULL)
1390     return -1;
1391   if (offset != 0)
1392     return -1;
1393 
1394   if (*buf == DW_OP_deref)
1395     {
1396       buf++;
1397       *deref_size_return = -1;
1398     }
1399   else if (*buf == DW_OP_deref_size)
1400     {
1401       buf++;
1402       if (buf >= buf_end)
1403        return -1;
1404       *deref_size_return = *buf++;
1405     }
1406   else
1407     return -1;
1408 
1409   if (buf != buf_end)
1410     return -1;
1411 
1412   return dwarf_reg;
1413 }
1414 
1415 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
1416    in FB_OFFSET_RETURN with the X offset and return 1.  Otherwise return 0.  */
1417 
1418 int
1419 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
1420 			  CORE_ADDR *fb_offset_return)
1421 {
1422   int64_t fb_offset;
1423 
1424   if (buf_end <= buf)
1425     return 0;
1426 
1427   if (*buf != DW_OP_fbreg)
1428     return 0;
1429   buf++;
1430 
1431   buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
1432   if (buf == NULL)
1433     return 0;
1434   *fb_offset_return = fb_offset;
1435   if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
1436     return 0;
1437 
1438   return 1;
1439 }
1440 
1441 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
1442    in SP_OFFSET_RETURN with the X offset and return 1.  Otherwise return 0.
1443    The matched SP register number depends on GDBARCH.  */
1444 
1445 int
1446 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
1447 			  const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
1448 {
1449   uint64_t dwarf_reg;
1450   int64_t sp_offset;
1451 
1452   if (buf_end <= buf)
1453     return 0;
1454   if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1455     {
1456       dwarf_reg = *buf - DW_OP_breg0;
1457       buf++;
1458     }
1459   else
1460     {
1461       if (*buf != DW_OP_bregx)
1462        return 0;
1463       buf++;
1464       buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1465       if (buf == NULL)
1466 	return 0;
1467     }
1468 
1469   if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
1470       != gdbarch_sp_regnum (gdbarch))
1471     return 0;
1472 
1473   buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
1474   if (buf == NULL)
1475     return 0;
1476   *sp_offset_return = sp_offset;
1477   if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
1478     return 0;
1479 
1480   return 1;
1481 }
1482 
1483 /* The engine for the expression evaluator.  Using the context in this
1484    object, evaluate the expression between OP_PTR and OP_END.  */
1485 
1486 void
1487 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
1488 				      const gdb_byte *op_end)
1489 {
1490   gdbarch *arch = this->m_per_objfile->objfile->arch ();
1491   bfd_endian byte_order = gdbarch_byte_order (arch);
1492   /* Old-style "untyped" DWARF values need special treatment in a
1493      couple of places, specifically DW_OP_mod and DW_OP_shr.  We need
1494      a special type for these values so we can distinguish them from
1495      values that have an explicit type, because explicitly-typed
1496      values do not need special treatment.  This special type must be
1497      different (in the `==' sense) from any base type coming from the
1498      CU.  */
1499   type *address_type = this->address_type ();
1500 
1501   this->m_location = DWARF_VALUE_MEMORY;
1502   this->m_initialized = 1;  /* Default is initialized.  */
1503 
1504   if (this->m_recursion_depth > this->m_max_recursion_depth)
1505     error (_("DWARF-2 expression error: Loop detected (%d)."),
1506 	   this->m_recursion_depth);
1507   this->m_recursion_depth++;
1508 
1509   while (op_ptr < op_end)
1510     {
1511       dwarf_location_atom op = (dwarf_location_atom) *op_ptr++;
1512       ULONGEST result;
1513       /* Assume the value is not in stack memory.
1514 	 Code that knows otherwise sets this to true.
1515 	 Some arithmetic on stack addresses can probably be assumed to still
1516 	 be a stack address, but we skip this complication for now.
1517 	 This is just an optimization, so it's always ok to punt
1518 	 and leave this as false.  */
1519       bool in_stack_memory = false;
1520       uint64_t uoffset, reg;
1521       int64_t offset;
1522       value *result_val = NULL;
1523 
1524       /* The DWARF expression might have a bug causing an infinite
1525 	 loop.  In that case, quitting is the only way out.  */
1526       QUIT;
1527 
1528       switch (op)
1529 	{
1530 	case DW_OP_lit0:
1531 	case DW_OP_lit1:
1532 	case DW_OP_lit2:
1533 	case DW_OP_lit3:
1534 	case DW_OP_lit4:
1535 	case DW_OP_lit5:
1536 	case DW_OP_lit6:
1537 	case DW_OP_lit7:
1538 	case DW_OP_lit8:
1539 	case DW_OP_lit9:
1540 	case DW_OP_lit10:
1541 	case DW_OP_lit11:
1542 	case DW_OP_lit12:
1543 	case DW_OP_lit13:
1544 	case DW_OP_lit14:
1545 	case DW_OP_lit15:
1546 	case DW_OP_lit16:
1547 	case DW_OP_lit17:
1548 	case DW_OP_lit18:
1549 	case DW_OP_lit19:
1550 	case DW_OP_lit20:
1551 	case DW_OP_lit21:
1552 	case DW_OP_lit22:
1553 	case DW_OP_lit23:
1554 	case DW_OP_lit24:
1555 	case DW_OP_lit25:
1556 	case DW_OP_lit26:
1557 	case DW_OP_lit27:
1558 	case DW_OP_lit28:
1559 	case DW_OP_lit29:
1560 	case DW_OP_lit30:
1561 	case DW_OP_lit31:
1562 	  result = op - DW_OP_lit0;
1563 	  result_val = value_from_ulongest (address_type, result);
1564 	  break;
1565 
1566 	case DW_OP_addr:
1567 	  result = extract_unsigned_integer (op_ptr,
1568 					     this->m_addr_size, byte_order);
1569 	  op_ptr += this->m_addr_size;
1570 	  /* Some versions of GCC emit DW_OP_addr before
1571 	     DW_OP_GNU_push_tls_address.  In this case the value is an
1572 	     index, not an address.  We don't support things like
1573 	     branching between the address and the TLS op.  */
1574 	  if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
1575 	    result += this->m_per_objfile->objfile->text_section_offset ();
1576 	  result_val = value_from_ulongest (address_type, result);
1577 	  break;
1578 
1579 	case DW_OP_addrx:
1580 	case DW_OP_GNU_addr_index:
1581 	  ensure_have_per_cu (this->m_per_cu, "DW_OP_addrx");
1582 
1583 	  op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1584 	  result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1585 					   uoffset);
1586 	  result += this->m_per_objfile->objfile->text_section_offset ();
1587 	  result_val = value_from_ulongest (address_type, result);
1588 	  break;
1589 	case DW_OP_GNU_const_index:
1590 	  ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_const_index");
1591 
1592 	  op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1593 	  result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1594 					   uoffset);
1595 	  result_val = value_from_ulongest (address_type, result);
1596 	  break;
1597 
1598 	case DW_OP_const1u:
1599 	  result = extract_unsigned_integer (op_ptr, 1, byte_order);
1600 	  result_val = value_from_ulongest (address_type, result);
1601 	  op_ptr += 1;
1602 	  break;
1603 	case DW_OP_const1s:
1604 	  result = extract_signed_integer (op_ptr, 1, byte_order);
1605 	  result_val = value_from_ulongest (address_type, result);
1606 	  op_ptr += 1;
1607 	  break;
1608 	case DW_OP_const2u:
1609 	  result = extract_unsigned_integer (op_ptr, 2, byte_order);
1610 	  result_val = value_from_ulongest (address_type, result);
1611 	  op_ptr += 2;
1612 	  break;
1613 	case DW_OP_const2s:
1614 	  result = extract_signed_integer (op_ptr, 2, byte_order);
1615 	  result_val = value_from_ulongest (address_type, result);
1616 	  op_ptr += 2;
1617 	  break;
1618 	case DW_OP_const4u:
1619 	  result = extract_unsigned_integer (op_ptr, 4, byte_order);
1620 	  result_val = value_from_ulongest (address_type, result);
1621 	  op_ptr += 4;
1622 	  break;
1623 	case DW_OP_const4s:
1624 	  result = extract_signed_integer (op_ptr, 4, byte_order);
1625 	  result_val = value_from_ulongest (address_type, result);
1626 	  op_ptr += 4;
1627 	  break;
1628 	case DW_OP_const8u:
1629 	  result = extract_unsigned_integer (op_ptr, 8, byte_order);
1630 	  result_val = value_from_ulongest (address_type, result);
1631 	  op_ptr += 8;
1632 	  break;
1633 	case DW_OP_const8s:
1634 	  result = extract_signed_integer (op_ptr, 8, byte_order);
1635 	  result_val = value_from_ulongest (address_type, result);
1636 	  op_ptr += 8;
1637 	  break;
1638 	case DW_OP_constu:
1639 	  op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1640 	  result = uoffset;
1641 	  result_val = value_from_ulongest (address_type, result);
1642 	  break;
1643 	case DW_OP_consts:
1644 	  op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1645 	  result = offset;
1646 	  result_val = value_from_ulongest (address_type, result);
1647 	  break;
1648 
1649 	/* The DW_OP_reg operations are required to occur alone in
1650 	   location expressions.  */
1651 	case DW_OP_reg0:
1652 	case DW_OP_reg1:
1653 	case DW_OP_reg2:
1654 	case DW_OP_reg3:
1655 	case DW_OP_reg4:
1656 	case DW_OP_reg5:
1657 	case DW_OP_reg6:
1658 	case DW_OP_reg7:
1659 	case DW_OP_reg8:
1660 	case DW_OP_reg9:
1661 	case DW_OP_reg10:
1662 	case DW_OP_reg11:
1663 	case DW_OP_reg12:
1664 	case DW_OP_reg13:
1665 	case DW_OP_reg14:
1666 	case DW_OP_reg15:
1667 	case DW_OP_reg16:
1668 	case DW_OP_reg17:
1669 	case DW_OP_reg18:
1670 	case DW_OP_reg19:
1671 	case DW_OP_reg20:
1672 	case DW_OP_reg21:
1673 	case DW_OP_reg22:
1674 	case DW_OP_reg23:
1675 	case DW_OP_reg24:
1676 	case DW_OP_reg25:
1677 	case DW_OP_reg26:
1678 	case DW_OP_reg27:
1679 	case DW_OP_reg28:
1680 	case DW_OP_reg29:
1681 	case DW_OP_reg30:
1682 	case DW_OP_reg31:
1683 	  dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
1684 
1685 	  result = op - DW_OP_reg0;
1686 	  result_val = value_from_ulongest (address_type, result);
1687 	  this->m_location = DWARF_VALUE_REGISTER;
1688 	  break;
1689 
1690 	case DW_OP_regx:
1691 	  op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1692 	  dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
1693 
1694 	  result = reg;
1695 	  result_val = value_from_ulongest (address_type, result);
1696 	  this->m_location = DWARF_VALUE_REGISTER;
1697 	  break;
1698 
1699 	case DW_OP_implicit_value:
1700 	  {
1701 	    uint64_t len;
1702 
1703 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1704 	    if (op_ptr + len > op_end)
1705 	      error (_("DW_OP_implicit_value: too few bytes available."));
1706 	    this->m_len = len;
1707 	    this->m_data = op_ptr;
1708 	    this->m_location = DWARF_VALUE_LITERAL;
1709 	    op_ptr += len;
1710 	    dwarf_expr_require_composition (op_ptr, op_end,
1711 					    "DW_OP_implicit_value");
1712 	  }
1713 	  goto no_push;
1714 
1715 	case DW_OP_stack_value:
1716 	  this->m_location = DWARF_VALUE_STACK;
1717 	  dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
1718 	  goto no_push;
1719 
1720 	case DW_OP_implicit_pointer:
1721 	case DW_OP_GNU_implicit_pointer:
1722 	  {
1723 	    int64_t len;
1724 	    ensure_have_per_cu (this->m_per_cu, "DW_OP_implicit_pointer");
1725 
1726 	    int ref_addr_size = this->m_per_cu->ref_addr_size ();
1727 
1728 	    /* The referred-to DIE of sect_offset kind.  */
1729 	    this->m_len = extract_unsigned_integer (op_ptr, ref_addr_size,
1730 						  byte_order);
1731 	    op_ptr += ref_addr_size;
1732 
1733 	    /* The byte offset into the data.  */
1734 	    op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
1735 	    result = (ULONGEST) len;
1736 	    result_val = value_from_ulongest (address_type, result);
1737 
1738 	    this->m_location = DWARF_VALUE_IMPLICIT_POINTER;
1739 	    dwarf_expr_require_composition (op_ptr, op_end,
1740 					    "DW_OP_implicit_pointer");
1741 	  }
1742 	  break;
1743 
1744 	case DW_OP_breg0:
1745 	case DW_OP_breg1:
1746 	case DW_OP_breg2:
1747 	case DW_OP_breg3:
1748 	case DW_OP_breg4:
1749 	case DW_OP_breg5:
1750 	case DW_OP_breg6:
1751 	case DW_OP_breg7:
1752 	case DW_OP_breg8:
1753 	case DW_OP_breg9:
1754 	case DW_OP_breg10:
1755 	case DW_OP_breg11:
1756 	case DW_OP_breg12:
1757 	case DW_OP_breg13:
1758 	case DW_OP_breg14:
1759 	case DW_OP_breg15:
1760 	case DW_OP_breg16:
1761 	case DW_OP_breg17:
1762 	case DW_OP_breg18:
1763 	case DW_OP_breg19:
1764 	case DW_OP_breg20:
1765 	case DW_OP_breg21:
1766 	case DW_OP_breg22:
1767 	case DW_OP_breg23:
1768 	case DW_OP_breg24:
1769 	case DW_OP_breg25:
1770 	case DW_OP_breg26:
1771 	case DW_OP_breg27:
1772 	case DW_OP_breg28:
1773 	case DW_OP_breg29:
1774 	case DW_OP_breg30:
1775 	case DW_OP_breg31:
1776 	  {
1777 	    op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1778 	    ensure_have_frame (this->m_frame, "DW_OP_breg");
1779 
1780 	    result = read_addr_from_reg (this->m_frame, op - DW_OP_breg0);
1781 	    result += offset;
1782 	    result_val = value_from_ulongest (address_type, result);
1783 	  }
1784 	  break;
1785 	case DW_OP_bregx:
1786 	  {
1787 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1788 	    op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1789 	    ensure_have_frame (this->m_frame, "DW_OP_bregx");
1790 
1791 	    result = read_addr_from_reg (this->m_frame, reg);
1792 	    result += offset;
1793 	    result_val = value_from_ulongest (address_type, result);
1794 	  }
1795 	  break;
1796 	case DW_OP_fbreg:
1797 	  {
1798 	    const gdb_byte *datastart;
1799 	    size_t datalen;
1800 
1801 	    op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1802 
1803 	    /* Rather than create a whole new context, we simply
1804 	       backup the current stack locally and install a new empty stack,
1805 	       then reset it afterwards, effectively erasing whatever the
1806 	       recursive call put there.  */
1807 	    std::vector<dwarf_stack_value> saved_stack = std::move (this->m_stack);
1808 	    this->m_stack.clear ();
1809 
1810 	    /* FIXME: cagney/2003-03-26: This code should be using
1811 	       get_frame_base_address(), and then implement a dwarf2
1812 	       specific this_base method.  */
1813 	    this->get_frame_base (&datastart, &datalen);
1814 	    eval (datastart, datalen);
1815 	    if (this->m_location == DWARF_VALUE_MEMORY)
1816 	      result = fetch_address (0);
1817 	    else if (this->m_location == DWARF_VALUE_REGISTER)
1818 	      result
1819 		= read_addr_from_reg (this->m_frame, value_as_long (fetch (0)));
1820 	    else
1821 	      error (_("Not implemented: computing frame "
1822 		       "base using explicit value operator"));
1823 	    result = result + offset;
1824 	    result_val = value_from_ulongest (address_type, result);
1825 	    in_stack_memory = true;
1826 
1827 	    /* Restore the content of the original stack.  */
1828 	    this->m_stack = std::move (saved_stack);
1829 
1830 	    this->m_location = DWARF_VALUE_MEMORY;
1831 	  }
1832 	  break;
1833 
1834 	case DW_OP_dup:
1835 	  result_val = fetch (0);
1836 	  in_stack_memory = fetch_in_stack_memory (0);
1837 	  break;
1838 
1839 	case DW_OP_drop:
1840 	  pop ();
1841 	  goto no_push;
1842 
1843 	case DW_OP_pick:
1844 	  offset = *op_ptr++;
1845 	  result_val = fetch (offset);
1846 	  in_stack_memory = fetch_in_stack_memory (offset);
1847 	  break;
1848 
1849 	case DW_OP_swap:
1850 	  {
1851 	    if (this->m_stack.size () < 2)
1852 	       error (_("Not enough elements for "
1853 			"DW_OP_swap.  Need 2, have %zu."),
1854 		      this->m_stack.size ());
1855 
1856 	    dwarf_stack_value &t1 = this->m_stack[this->m_stack.size () - 1];
1857 	    dwarf_stack_value &t2 = this->m_stack[this->m_stack.size () - 2];
1858 	    std::swap (t1, t2);
1859 	    goto no_push;
1860 	  }
1861 
1862 	case DW_OP_over:
1863 	  result_val = fetch (1);
1864 	  in_stack_memory = fetch_in_stack_memory (1);
1865 	  break;
1866 
1867 	case DW_OP_rot:
1868 	  {
1869 	    if (this->m_stack.size () < 3)
1870 	       error (_("Not enough elements for "
1871 			"DW_OP_rot.  Need 3, have %zu."),
1872 		      this->m_stack.size ());
1873 
1874 	    dwarf_stack_value temp = this->m_stack[this->m_stack.size () - 1];
1875 	    this->m_stack[this->m_stack.size () - 1]
1876 	      = this->m_stack[this->m_stack.size () - 2];
1877 	    this->m_stack[this->m_stack.size () - 2]
1878 	       = this->m_stack[this->m_stack.size () - 3];
1879 	    this->m_stack[this->m_stack.size () - 3] = temp;
1880 	    goto no_push;
1881 	  }
1882 
1883 	case DW_OP_deref:
1884 	case DW_OP_deref_size:
1885 	case DW_OP_deref_type:
1886 	case DW_OP_GNU_deref_type:
1887 	  {
1888 	    int addr_size = (op == DW_OP_deref ? this->m_addr_size : *op_ptr++);
1889 	    gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1890 	    CORE_ADDR addr = fetch_address (0);
1891 	    struct type *type;
1892 
1893 	    pop ();
1894 
1895 	    if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
1896 	      {
1897 		op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1898 		cu_offset type_die_cu_off = (cu_offset) uoffset;
1899 		type = get_base_type (type_die_cu_off);
1900 	      }
1901 	    else
1902 	      type = address_type;
1903 
1904 	    this->read_mem (buf, addr, addr_size);
1905 
1906 	    /* If the size of the object read from memory is different
1907 	       from the type length, we need to zero-extend it.  */
1908 	    if (type->length () != addr_size)
1909 	      {
1910 		ULONGEST datum =
1911 		  extract_unsigned_integer (buf, addr_size, byte_order);
1912 
1913 		buf = (gdb_byte *) alloca (type->length ());
1914 		store_unsigned_integer (buf, type->length (),
1915 					byte_order, datum);
1916 	      }
1917 
1918 	    result_val = value_from_contents_and_address (type, buf, addr);
1919 	    break;
1920 	  }
1921 
1922 	case DW_OP_abs:
1923 	case DW_OP_neg:
1924 	case DW_OP_not:
1925 	case DW_OP_plus_uconst:
1926 	  {
1927 	    /* Unary operations.  */
1928 	    result_val = fetch (0);
1929 	    pop ();
1930 
1931 	    switch (op)
1932 	      {
1933 	      case DW_OP_abs:
1934 		if (value_less (result_val,
1935 				value_zero (value_type (result_val), not_lval)))
1936 		  result_val = value_neg (result_val);
1937 		break;
1938 	      case DW_OP_neg:
1939 		result_val = value_neg (result_val);
1940 		break;
1941 	      case DW_OP_not:
1942 		dwarf_require_integral (value_type (result_val));
1943 		result_val = value_complement (result_val);
1944 		break;
1945 	      case DW_OP_plus_uconst:
1946 		dwarf_require_integral (value_type (result_val));
1947 		result = value_as_long (result_val);
1948 		op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1949 		result += reg;
1950 		result_val = value_from_ulongest (address_type, result);
1951 		break;
1952 	      }
1953 	  }
1954 	  break;
1955 
1956 	case DW_OP_and:
1957 	case DW_OP_div:
1958 	case DW_OP_minus:
1959 	case DW_OP_mod:
1960 	case DW_OP_mul:
1961 	case DW_OP_or:
1962 	case DW_OP_plus:
1963 	case DW_OP_shl:
1964 	case DW_OP_shr:
1965 	case DW_OP_shra:
1966 	case DW_OP_xor:
1967 	case DW_OP_le:
1968 	case DW_OP_ge:
1969 	case DW_OP_eq:
1970 	case DW_OP_lt:
1971 	case DW_OP_gt:
1972 	case DW_OP_ne:
1973 	  {
1974 	    /* Binary operations.  */
1975 	    struct value *first, *second;
1976 
1977 	    second = fetch (0);
1978 	    pop ();
1979 
1980 	    first = fetch (0);
1981 	    pop ();
1982 
1983 	    if (! base_types_equal_p (value_type (first), value_type (second)))
1984 	      error (_("Incompatible types on DWARF stack"));
1985 
1986 	    switch (op)
1987 	      {
1988 	      case DW_OP_and:
1989 		dwarf_require_integral (value_type (first));
1990 		dwarf_require_integral (value_type (second));
1991 		result_val = value_binop (first, second, BINOP_BITWISE_AND);
1992 		break;
1993 	      case DW_OP_div:
1994 		result_val = value_binop (first, second, BINOP_DIV);
1995 		break;
1996 	      case DW_OP_minus:
1997 		result_val = value_binop (first, second, BINOP_SUB);
1998 		break;
1999 	      case DW_OP_mod:
2000 		{
2001 		  int cast_back = 0;
2002 		  struct type *orig_type = value_type (first);
2003 
2004 		  /* We have to special-case "old-style" untyped values
2005 		     -- these must have mod computed using unsigned
2006 		     math.  */
2007 		  if (orig_type == address_type)
2008 		    {
2009 		      struct type *utype = get_unsigned_type (arch, orig_type);
2010 
2011 		      cast_back = 1;
2012 		      first = value_cast (utype, first);
2013 		      second = value_cast (utype, second);
2014 		    }
2015 		  /* Note that value_binop doesn't handle float or
2016 		     decimal float here.  This seems unimportant.  */
2017 		  result_val = value_binop (first, second, BINOP_MOD);
2018 		  if (cast_back)
2019 		    result_val = value_cast (orig_type, result_val);
2020 		}
2021 		break;
2022 	      case DW_OP_mul:
2023 		result_val = value_binop (first, second, BINOP_MUL);
2024 		break;
2025 	      case DW_OP_or:
2026 		dwarf_require_integral (value_type (first));
2027 		dwarf_require_integral (value_type (second));
2028 		result_val = value_binop (first, second, BINOP_BITWISE_IOR);
2029 		break;
2030 	      case DW_OP_plus:
2031 		result_val = value_binop (first, second, BINOP_ADD);
2032 		break;
2033 	      case DW_OP_shl:
2034 		dwarf_require_integral (value_type (first));
2035 		dwarf_require_integral (value_type (second));
2036 		result_val = value_binop (first, second, BINOP_LSH);
2037 		break;
2038 	      case DW_OP_shr:
2039 		dwarf_require_integral (value_type (first));
2040 		dwarf_require_integral (value_type (second));
2041 		if (!value_type (first)->is_unsigned ())
2042 		  {
2043 		    struct type *utype
2044 		      = get_unsigned_type (arch, value_type (first));
2045 
2046 		    first = value_cast (utype, first);
2047 		  }
2048 
2049 		result_val = value_binop (first, second, BINOP_RSH);
2050 		/* Make sure we wind up with the same type we started
2051 		   with.  */
2052 		if (value_type (result_val) != value_type (second))
2053 		  result_val = value_cast (value_type (second), result_val);
2054 		break;
2055 	      case DW_OP_shra:
2056 		dwarf_require_integral (value_type (first));
2057 		dwarf_require_integral (value_type (second));
2058 		if (value_type (first)->is_unsigned ())
2059 		  {
2060 		    struct type *stype
2061 		      = get_signed_type (arch, value_type (first));
2062 
2063 		    first = value_cast (stype, first);
2064 		  }
2065 
2066 		result_val = value_binop (first, second, BINOP_RSH);
2067 		/* Make sure we wind up with the same type we started
2068 		   with.  */
2069 		if (value_type (result_val) != value_type (second))
2070 		  result_val = value_cast (value_type (second), result_val);
2071 		break;
2072 	      case DW_OP_xor:
2073 		dwarf_require_integral (value_type (first));
2074 		dwarf_require_integral (value_type (second));
2075 		result_val = value_binop (first, second, BINOP_BITWISE_XOR);
2076 		break;
2077 	      case DW_OP_le:
2078 		/* A <= B is !(B < A).  */
2079 		result = ! value_less (second, first);
2080 		result_val = value_from_ulongest (address_type, result);
2081 		break;
2082 	      case DW_OP_ge:
2083 		/* A >= B is !(A < B).  */
2084 		result = ! value_less (first, second);
2085 		result_val = value_from_ulongest (address_type, result);
2086 		break;
2087 	      case DW_OP_eq:
2088 		result = value_equal (first, second);
2089 		result_val = value_from_ulongest (address_type, result);
2090 		break;
2091 	      case DW_OP_lt:
2092 		result = value_less (first, second);
2093 		result_val = value_from_ulongest (address_type, result);
2094 		break;
2095 	      case DW_OP_gt:
2096 		/* A > B is B < A.  */
2097 		result = value_less (second, first);
2098 		result_val = value_from_ulongest (address_type, result);
2099 		break;
2100 	      case DW_OP_ne:
2101 		result = ! value_equal (first, second);
2102 		result_val = value_from_ulongest (address_type, result);
2103 		break;
2104 	      default:
2105 		internal_error (_("Can't be reached."));
2106 	      }
2107 	  }
2108 	  break;
2109 
2110 	case DW_OP_call_frame_cfa:
2111 	  ensure_have_frame (this->m_frame, "DW_OP_call_frame_cfa");
2112 
2113 	  result = dwarf2_frame_cfa (this->m_frame);
2114 	  result_val = value_from_ulongest (address_type, result);
2115 	  in_stack_memory = true;
2116 	  break;
2117 
2118 	case DW_OP_GNU_push_tls_address:
2119 	case DW_OP_form_tls_address:
2120 	  /* Variable is at a constant offset in the thread-local
2121 	  storage block into the objfile for the current thread and
2122 	  the dynamic linker module containing this expression.  Here
2123 	  we return returns the offset from that base.  The top of the
2124 	  stack has the offset from the beginning of the thread
2125 	  control block at which the variable is located.  Nothing
2126 	  should follow this operator, so the top of stack would be
2127 	  returned.  */
2128 	  result = value_as_long (fetch (0));
2129 	  pop ();
2130 	  result = target_translate_tls_address (this->m_per_objfile->objfile,
2131 						 result);
2132 	  result_val = value_from_ulongest (address_type, result);
2133 	  break;
2134 
2135 	case DW_OP_skip:
2136 	  offset = extract_signed_integer (op_ptr, 2, byte_order);
2137 	  op_ptr += 2;
2138 	  op_ptr += offset;
2139 	  goto no_push;
2140 
2141 	case DW_OP_bra:
2142 	  {
2143 	    struct value *val;
2144 
2145 	    offset = extract_signed_integer (op_ptr, 2, byte_order);
2146 	    op_ptr += 2;
2147 	    val = fetch (0);
2148 	    dwarf_require_integral (value_type (val));
2149 	    if (value_as_long (val) != 0)
2150 	      op_ptr += offset;
2151 	    pop ();
2152 	  }
2153 	  goto no_push;
2154 
2155 	case DW_OP_nop:
2156 	  goto no_push;
2157 
2158 	case DW_OP_piece:
2159 	  {
2160 	    uint64_t size;
2161 
2162 	    /* Record the piece.  */
2163 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2164 	    add_piece (8 * size, 0);
2165 
2166 	    /* Pop off the address/regnum, and reset the location
2167 	       type.  */
2168 	    if (this->m_location != DWARF_VALUE_LITERAL
2169 		&& this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2170 	      pop ();
2171 	    this->m_location = DWARF_VALUE_MEMORY;
2172 	  }
2173 	  goto no_push;
2174 
2175 	case DW_OP_bit_piece:
2176 	  {
2177 	    uint64_t size, uleb_offset;
2178 
2179 	    /* Record the piece.  */
2180 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2181 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
2182 	    add_piece (size, uleb_offset);
2183 
2184 	    /* Pop off the address/regnum, and reset the location
2185 	       type.  */
2186 	    if (this->m_location != DWARF_VALUE_LITERAL
2187 		&& this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2188 	      pop ();
2189 	    this->m_location = DWARF_VALUE_MEMORY;
2190 	  }
2191 	  goto no_push;
2192 
2193 	case DW_OP_GNU_uninit:
2194 	  if (op_ptr != op_end)
2195 	    error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
2196 		   "be the very last op."));
2197 
2198 	  this->m_initialized = 0;
2199 	  goto no_push;
2200 
2201 	case DW_OP_call2:
2202 	  {
2203 	    cu_offset cu_off
2204 	      = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
2205 	    op_ptr += 2;
2206 	    this->dwarf_call (cu_off);
2207 	  }
2208 	  goto no_push;
2209 
2210 	case DW_OP_call4:
2211 	  {
2212 	    cu_offset cu_off
2213 	      = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2214 	    op_ptr += 4;
2215 	    this->dwarf_call (cu_off);
2216 	  }
2217 	  goto no_push;
2218 
2219 	case DW_OP_GNU_variable_value:
2220 	  {
2221 	    ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_variable_value");
2222 	    int ref_addr_size = this->m_per_cu->ref_addr_size ();
2223 
2224 	    sect_offset sect_off
2225 	      = (sect_offset) extract_unsigned_integer (op_ptr,
2226 							ref_addr_size,
2227 							byte_order);
2228 	    op_ptr += ref_addr_size;
2229 	    result_val = sect_variable_value (sect_off, this->m_per_cu,
2230 					      this->m_per_objfile);
2231 	    result_val = value_cast (address_type, result_val);
2232 	  }
2233 	  break;
2234 
2235 	case DW_OP_entry_value:
2236 	case DW_OP_GNU_entry_value:
2237 	  {
2238 	    uint64_t len;
2239 	    CORE_ADDR deref_size;
2240 	    union call_site_parameter_u kind_u;
2241 
2242 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
2243 	    if (op_ptr + len > op_end)
2244 	      error (_("DW_OP_entry_value: too few bytes available."));
2245 
2246 	    kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
2247 	    if (kind_u.dwarf_reg != -1)
2248 	      {
2249 		op_ptr += len;
2250 		this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2251 						  kind_u,
2252 						  -1 /* deref_size */);
2253 		goto no_push;
2254 	      }
2255 
2256 	    kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
2257 							       op_ptr + len,
2258 							       &deref_size);
2259 	    if (kind_u.dwarf_reg != -1)
2260 	      {
2261 		if (deref_size == -1)
2262 		  deref_size = this->m_addr_size;
2263 		op_ptr += len;
2264 		this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2265 						  kind_u, deref_size);
2266 		goto no_push;
2267 	      }
2268 
2269 	    error (_("DWARF-2 expression error: DW_OP_entry_value is "
2270 		     "supported only for single DW_OP_reg* "
2271 		     "or for DW_OP_breg*(0)+DW_OP_deref*"));
2272 	  }
2273 
2274 	case DW_OP_GNU_parameter_ref:
2275 	  {
2276 	    union call_site_parameter_u kind_u;
2277 
2278 	    kind_u.param_cu_off
2279 	      = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2280 	    op_ptr += 4;
2281 	    this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
2282 					      kind_u,
2283 					      -1 /* deref_size */);
2284 	  }
2285 	  goto no_push;
2286 
2287 	case DW_OP_const_type:
2288 	case DW_OP_GNU_const_type:
2289 	  {
2290 	    int n;
2291 	    const gdb_byte *data;
2292 	    struct type *type;
2293 
2294 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2295 	    cu_offset type_die_cu_off = (cu_offset) uoffset;
2296 
2297 	    n = *op_ptr++;
2298 	    data = op_ptr;
2299 	    op_ptr += n;
2300 
2301 	    type = get_base_type (type_die_cu_off);
2302 
2303 	    if (type->length () != n)
2304 	      error (_("DW_OP_const_type has different sizes for type and data"));
2305 
2306 	    result_val = value_from_contents (type, data);
2307 	  }
2308 	  break;
2309 
2310 	case DW_OP_regval_type:
2311 	case DW_OP_GNU_regval_type:
2312 	  {
2313 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
2314 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2315 	    cu_offset type_die_cu_off = (cu_offset) uoffset;
2316 
2317 	    ensure_have_frame (this->m_frame, "DW_OP_regval_type");
2318 
2319 	    struct type *type = get_base_type (type_die_cu_off);
2320 	    int regnum
2321 	      = dwarf_reg_to_regnum_or_error (get_frame_arch (this->m_frame),
2322 					      reg);
2323 	    result_val = value_from_register (type, regnum, this->m_frame);
2324 	  }
2325 	  break;
2326 
2327 	case DW_OP_convert:
2328 	case DW_OP_GNU_convert:
2329 	case DW_OP_reinterpret:
2330 	case DW_OP_GNU_reinterpret:
2331 	  {
2332 	    struct type *type;
2333 
2334 	    op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2335 	    cu_offset type_die_cu_off = (cu_offset) uoffset;
2336 
2337 	    if (to_underlying (type_die_cu_off) == 0)
2338 	      type = address_type;
2339 	    else
2340 	      type = get_base_type (type_die_cu_off);
2341 
2342 	    result_val = fetch (0);
2343 	    pop ();
2344 
2345 	    if (op == DW_OP_convert || op == DW_OP_GNU_convert)
2346 	      result_val = value_cast (type, result_val);
2347 	    else if (type == value_type (result_val))
2348 	      {
2349 		/* Nothing.  */
2350 	      }
2351 	    else if (type->length ()
2352 		     != value_type (result_val)->length ())
2353 	      error (_("DW_OP_reinterpret has wrong size"));
2354 	    else
2355 	      result_val
2356 		= value_from_contents (type,
2357 				       value_contents_all (result_val).data ());
2358 	  }
2359 	  break;
2360 
2361 	case DW_OP_push_object_address:
2362 	  /* Return the address of the object we are currently observing.  */
2363 	  if (this->m_addr_info == nullptr
2364 	      || (this->m_addr_info->valaddr.data () == nullptr
2365 		  && this->m_addr_info->addr == 0))
2366 	    error (_("Location address is not set."));
2367 
2368 	  result_val
2369 	    = value_from_ulongest (address_type, this->m_addr_info->addr);
2370 	  break;
2371 
2372 	default:
2373 	  error (_("Unhandled dwarf expression opcode 0x%x"), op);
2374 	}
2375 
2376       /* Most things push a result value.  */
2377       gdb_assert (result_val != NULL);
2378       push (result_val, in_stack_memory);
2379     no_push:
2380       ;
2381     }
2382 
2383   /* To simplify our main caller, if the result is an implicit
2384      pointer, then make a pieced value.  This is ok because we can't
2385      have implicit pointers in contexts where pieces are invalid.  */
2386   if (this->m_location == DWARF_VALUE_IMPLICIT_POINTER)
2387     add_piece (8 * this->m_addr_size, 0);
2388 
2389   this->m_recursion_depth--;
2390   gdb_assert (this->m_recursion_depth >= 0);
2391 }
2392