xref: /netbsd-src/external/gpl3/gdb/dist/gdb/value.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2019 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "common/selftest.h"
45 #include "common/array-view.h"
46 
47 /* Definition of a user function.  */
48 struct internal_function
49 {
50   /* The name of the function.  It is a bit odd to have this in the
51      function itself -- the user might use a differently-named
52      convenience variable to hold the function.  */
53   char *name;
54 
55   /* The handler.  */
56   internal_function_fn handler;
57 
58   /* User data for the handler.  */
59   void *cookie;
60 };
61 
62 /* Defines an [OFFSET, OFFSET + LENGTH) range.  */
63 
64 struct range
65 {
66   /* Lowest offset in the range.  */
67   LONGEST offset;
68 
69   /* Length of the range.  */
70   LONGEST length;
71 
72   /* Returns true if THIS is strictly less than OTHER, useful for
73      searching.  We keep ranges sorted by offset and coalesce
74      overlapping and contiguous ranges, so this just compares the
75      starting offset.  */
76 
77   bool operator< (const range &other) const
78   {
79     return offset < other.offset;
80   }
81 
82   /* Returns true if THIS is equal to OTHER.  */
83   bool operator== (const range &other) const
84   {
85     return offset == other.offset && length == other.length;
86   }
87 };
88 
89 /* Returns true if the ranges defined by [offset1, offset1+len1) and
90    [offset2, offset2+len2) overlap.  */
91 
92 static int
93 ranges_overlap (LONGEST offset1, LONGEST len1,
94 		LONGEST offset2, LONGEST len2)
95 {
96   ULONGEST h, l;
97 
98   l = std::max (offset1, offset2);
99   h = std::min (offset1 + len1, offset2 + len2);
100   return (l < h);
101 }
102 
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104    OFFSET+LENGTH).  */
105 
106 static int
107 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
108 		LONGEST length)
109 {
110   range what;
111 
112   what.offset = offset;
113   what.length = length;
114 
115   /* We keep ranges sorted by offset and coalesce overlapping and
116      contiguous ranges, so to check if a range list contains a given
117      range, we can do a binary search for the position the given range
118      would be inserted if we only considered the starting OFFSET of
119      ranges.  We call that position I.  Since we also have LENGTH to
120      care for (this is a range afterall), we need to check if the
121      _previous_ range overlaps the I range.  E.g.,
122 
123          R
124          |---|
125        |---|    |---|  |------| ... |--|
126        0        1      2            N
127 
128        I=1
129 
130      In the case above, the binary search would return `I=1', meaning,
131      this OFFSET should be inserted at position 1, and the current
132      position 1 should be pushed further (and before 2).  But, `0'
133      overlaps with R.
134 
135      Then we need to check if the I range overlaps the I range itself.
136      E.g.,
137 
138               R
139               |---|
140        |---|    |---|  |-------| ... |--|
141        0        1      2             N
142 
143        I=1
144   */
145 
146 
147   auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
148 
149   if (i > ranges.begin ())
150     {
151       const struct range &bef = *(i - 1);
152 
153       if (ranges_overlap (bef.offset, bef.length, offset, length))
154 	return 1;
155     }
156 
157   if (i < ranges.end ())
158     {
159       const struct range &r = *i;
160 
161       if (ranges_overlap (r.offset, r.length, offset, length))
162 	return 1;
163     }
164 
165   return 0;
166 }
167 
168 static struct cmd_list_element *functionlist;
169 
170 /* Note that the fields in this structure are arranged to save a bit
171    of memory.  */
172 
173 struct value
174 {
175   explicit value (struct type *type_)
176     : modifiable (1),
177       lazy (1),
178       initialized (1),
179       stack (0),
180       type (type_),
181       enclosing_type (type_)
182   {
183   }
184 
185   ~value ()
186   {
187     if (VALUE_LVAL (this) == lval_computed)
188       {
189 	const struct lval_funcs *funcs = location.computed.funcs;
190 
191 	if (funcs->free_closure)
192 	  funcs->free_closure (this);
193       }
194     else if (VALUE_LVAL (this) == lval_xcallable)
195       delete location.xm_worker;
196   }
197 
198   DISABLE_COPY_AND_ASSIGN (value);
199 
200   /* Type of value; either not an lval, or one of the various
201      different possible kinds of lval.  */
202   enum lval_type lval = not_lval;
203 
204   /* Is it modifiable?  Only relevant if lval != not_lval.  */
205   unsigned int modifiable : 1;
206 
207   /* If zero, contents of this value are in the contents field.  If
208      nonzero, contents are in inferior.  If the lval field is lval_memory,
209      the contents are in inferior memory at location.address plus offset.
210      The lval field may also be lval_register.
211 
212      WARNING: This field is used by the code which handles watchpoints
213      (see breakpoint.c) to decide whether a particular value can be
214      watched by hardware watchpoints.  If the lazy flag is set for
215      some member of a value chain, it is assumed that this member of
216      the chain doesn't need to be watched as part of watching the
217      value itself.  This is how GDB avoids watching the entire struct
218      or array when the user wants to watch a single struct member or
219      array element.  If you ever change the way lazy flag is set and
220      reset, be sure to consider this use as well!  */
221   unsigned int lazy : 1;
222 
223   /* If value is a variable, is it initialized or not.  */
224   unsigned int initialized : 1;
225 
226   /* If value is from the stack.  If this is set, read_stack will be
227      used instead of read_memory to enable extra caching.  */
228   unsigned int stack : 1;
229 
230   /* Location of value (if lval).  */
231   union
232   {
233     /* If lval == lval_memory, this is the address in the inferior  */
234     CORE_ADDR address;
235 
236     /*If lval == lval_register, the value is from a register.  */
237     struct
238     {
239       /* Register number.  */
240       int regnum;
241       /* Frame ID of "next" frame to which a register value is relative.
242 	 If the register value is found relative to frame F, then the
243 	 frame id of F->next will be stored in next_frame_id.  */
244       struct frame_id next_frame_id;
245     } reg;
246 
247     /* Pointer to internal variable.  */
248     struct internalvar *internalvar;
249 
250     /* Pointer to xmethod worker.  */
251     struct xmethod_worker *xm_worker;
252 
253     /* If lval == lval_computed, this is a set of function pointers
254        to use to access and describe the value, and a closure pointer
255        for them to use.  */
256     struct
257     {
258       /* Functions to call.  */
259       const struct lval_funcs *funcs;
260 
261       /* Closure for those functions to use.  */
262       void *closure;
263     } computed;
264   } location {};
265 
266   /* Describes offset of a value within lval of a structure in target
267      addressable memory units.  Note also the member embedded_offset
268      below.  */
269   LONGEST offset = 0;
270 
271   /* Only used for bitfields; number of bits contained in them.  */
272   LONGEST bitsize = 0;
273 
274   /* Only used for bitfields; position of start of field.  For
275      gdbarch_bits_big_endian=0 targets, it is the position of the LSB.  For
276      gdbarch_bits_big_endian=1 targets, it is the position of the MSB.  */
277   LONGEST bitpos = 0;
278 
279   /* The number of references to this value.  When a value is created,
280      the value chain holds a reference, so REFERENCE_COUNT is 1.  If
281      release_value is called, this value is removed from the chain but
282      the caller of release_value now has a reference to this value.
283      The caller must arrange for a call to value_free later.  */
284   int reference_count = 1;
285 
286   /* Only used for bitfields; the containing value.  This allows a
287      single read from the target when displaying multiple
288      bitfields.  */
289   value_ref_ptr parent;
290 
291   /* Type of the value.  */
292   struct type *type;
293 
294   /* If a value represents a C++ object, then the `type' field gives
295      the object's compile-time type.  If the object actually belongs
296      to some class derived from `type', perhaps with other base
297      classes and additional members, then `type' is just a subobject
298      of the real thing, and the full object is probably larger than
299      `type' would suggest.
300 
301      If `type' is a dynamic class (i.e. one with a vtable), then GDB
302      can actually determine the object's run-time type by looking at
303      the run-time type information in the vtable.  When this
304      information is available, we may elect to read in the entire
305      object, for several reasons:
306 
307      - When printing the value, the user would probably rather see the
308      full object, not just the limited portion apparent from the
309      compile-time type.
310 
311      - If `type' has virtual base classes, then even printing `type'
312      alone may require reaching outside the `type' portion of the
313      object to wherever the virtual base class has been stored.
314 
315      When we store the entire object, `enclosing_type' is the run-time
316      type -- the complete object -- and `embedded_offset' is the
317      offset of `type' within that larger type, in target addressable memory
318      units.  The value_contents() macro takes `embedded_offset' into account,
319      so most GDB code continues to see the `type' portion of the value, just
320      as the inferior would.
321 
322      If `type' is a pointer to an object, then `enclosing_type' is a
323      pointer to the object's run-time type, and `pointed_to_offset' is
324      the offset in target addressable memory units from the full object
325      to the pointed-to object -- that is, the value `embedded_offset' would
326      have if we followed the pointer and fetched the complete object.
327      (I don't really see the point.  Why not just determine the
328      run-time type when you indirect, and avoid the special case?  The
329      contents don't matter until you indirect anyway.)
330 
331      If we're not doing anything fancy, `enclosing_type' is equal to
332      `type', and `embedded_offset' is zero, so everything works
333      normally.  */
334   struct type *enclosing_type;
335   LONGEST embedded_offset = 0;
336   LONGEST pointed_to_offset = 0;
337 
338   /* Actual contents of the value.  Target byte-order.  NULL or not
339      valid if lazy is nonzero.  */
340   gdb::unique_xmalloc_ptr<gdb_byte> contents;
341 
342   /* Unavailable ranges in CONTENTS.  We mark unavailable ranges,
343      rather than available, since the common and default case is for a
344      value to be available.  This is filled in at value read time.
345      The unavailable ranges are tracked in bits.  Note that a contents
346      bit that has been optimized out doesn't really exist in the
347      program, so it can't be marked unavailable either.  */
348   std::vector<range> unavailable;
349 
350   /* Likewise, but for optimized out contents (a chunk of the value of
351      a variable that does not actually exist in the program).  If LVAL
352      is lval_register, this is a register ($pc, $sp, etc., never a
353      program variable) that has not been saved in the frame.  Not
354      saved registers and optimized-out program variables values are
355      treated pretty much the same, except not-saved registers have a
356      different string representation and related error strings.  */
357   std::vector<range> optimized_out;
358 };
359 
360 /* See value.h.  */
361 
362 struct gdbarch *
363 get_value_arch (const struct value *value)
364 {
365   return get_type_arch (value_type (value));
366 }
367 
368 int
369 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
370 {
371   gdb_assert (!value->lazy);
372 
373   return !ranges_contain (value->unavailable, offset, length);
374 }
375 
376 int
377 value_bytes_available (const struct value *value,
378 		       LONGEST offset, LONGEST length)
379 {
380   return value_bits_available (value,
381 			       offset * TARGET_CHAR_BIT,
382 			       length * TARGET_CHAR_BIT);
383 }
384 
385 int
386 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
387 {
388   gdb_assert (!value->lazy);
389 
390   return ranges_contain (value->optimized_out, bit_offset, bit_length);
391 }
392 
393 int
394 value_entirely_available (struct value *value)
395 {
396   /* We can only tell whether the whole value is available when we try
397      to read it.  */
398   if (value->lazy)
399     value_fetch_lazy (value);
400 
401   if (value->unavailable.empty ())
402     return 1;
403   return 0;
404 }
405 
406 /* Returns true if VALUE is entirely covered by RANGES.  If the value
407    is lazy, it'll be read now.  Note that RANGE is a pointer to
408    pointer because reading the value might change *RANGE.  */
409 
410 static int
411 value_entirely_covered_by_range_vector (struct value *value,
412 					const std::vector<range> &ranges)
413 {
414   /* We can only tell whether the whole value is optimized out /
415      unavailable when we try to read it.  */
416   if (value->lazy)
417     value_fetch_lazy (value);
418 
419   if (ranges.size () == 1)
420     {
421       const struct range &t = ranges[0];
422 
423       if (t.offset == 0
424 	  && t.length == (TARGET_CHAR_BIT
425 			  * TYPE_LENGTH (value_enclosing_type (value))))
426 	return 1;
427     }
428 
429   return 0;
430 }
431 
432 int
433 value_entirely_unavailable (struct value *value)
434 {
435   return value_entirely_covered_by_range_vector (value, value->unavailable);
436 }
437 
438 int
439 value_entirely_optimized_out (struct value *value)
440 {
441   return value_entirely_covered_by_range_vector (value, value->optimized_out);
442 }
443 
444 /* Insert into the vector pointed to by VECTORP the bit range starting of
445    OFFSET bits, and extending for the next LENGTH bits.  */
446 
447 static void
448 insert_into_bit_range_vector (std::vector<range> *vectorp,
449 			      LONGEST offset, LONGEST length)
450 {
451   range newr;
452 
453   /* Insert the range sorted.  If there's overlap or the new range
454      would be contiguous with an existing range, merge.  */
455 
456   newr.offset = offset;
457   newr.length = length;
458 
459   /* Do a binary search for the position the given range would be
460      inserted if we only considered the starting OFFSET of ranges.
461      Call that position I.  Since we also have LENGTH to care for
462      (this is a range afterall), we need to check if the _previous_
463      range overlaps the I range.  E.g., calling R the new range:
464 
465        #1 - overlaps with previous
466 
467 	   R
468 	   |-...-|
469 	 |---|     |---|  |------| ... |--|
470 	 0         1      2            N
471 
472 	 I=1
473 
474      In the case #1 above, the binary search would return `I=1',
475      meaning, this OFFSET should be inserted at position 1, and the
476      current position 1 should be pushed further (and become 2).  But,
477      note that `0' overlaps with R, so we want to merge them.
478 
479      A similar consideration needs to be taken if the new range would
480      be contiguous with the previous range:
481 
482        #2 - contiguous with previous
483 
484 	    R
485 	    |-...-|
486 	 |--|       |---|  |------| ... |--|
487 	 0          1      2            N
488 
489 	 I=1
490 
491      If there's no overlap with the previous range, as in:
492 
493        #3 - not overlapping and not contiguous
494 
495 	       R
496 	       |-...-|
497 	  |--|         |---|  |------| ... |--|
498 	  0            1      2            N
499 
500 	 I=1
501 
502      or if I is 0:
503 
504        #4 - R is the range with lowest offset
505 
506 	  R
507 	 |-...-|
508 	         |--|       |---|  |------| ... |--|
509 	         0          1      2            N
510 
511 	 I=0
512 
513      ... we just push the new range to I.
514 
515      All the 4 cases above need to consider that the new range may
516      also overlap several of the ranges that follow, or that R may be
517      contiguous with the following range, and merge.  E.g.,
518 
519        #5 - overlapping following ranges
520 
521 	  R
522 	 |------------------------|
523 	         |--|       |---|  |------| ... |--|
524 	         0          1      2            N
525 
526 	 I=0
527 
528        or:
529 
530 	    R
531 	    |-------|
532 	 |--|       |---|  |------| ... |--|
533 	 0          1      2            N
534 
535 	 I=1
536 
537   */
538 
539   auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
540   if (i > vectorp->begin ())
541     {
542       struct range &bef = *(i - 1);
543 
544       if (ranges_overlap (bef.offset, bef.length, offset, length))
545 	{
546 	  /* #1 */
547 	  ULONGEST l = std::min (bef.offset, offset);
548 	  ULONGEST h = std::max (bef.offset + bef.length, offset + length);
549 
550 	  bef.offset = l;
551 	  bef.length = h - l;
552 	  i--;
553 	}
554       else if (offset == bef.offset + bef.length)
555 	{
556 	  /* #2 */
557 	  bef.length += length;
558 	  i--;
559 	}
560       else
561 	{
562 	  /* #3 */
563 	  i = vectorp->insert (i, newr);
564 	}
565     }
566   else
567     {
568       /* #4 */
569       i = vectorp->insert (i, newr);
570     }
571 
572   /* Check whether the ranges following the one we've just added or
573      touched can be folded in (#5 above).  */
574   if (i != vectorp->end () && i + 1 < vectorp->end ())
575     {
576       int removed = 0;
577       auto next = i + 1;
578 
579       /* Get the range we just touched.  */
580       struct range &t = *i;
581       removed = 0;
582 
583       i = next;
584       for (; i < vectorp->end (); i++)
585 	{
586 	  struct range &r = *i;
587 	  if (r.offset <= t.offset + t.length)
588 	    {
589 	      ULONGEST l, h;
590 
591 	      l = std::min (t.offset, r.offset);
592 	      h = std::max (t.offset + t.length, r.offset + r.length);
593 
594 	      t.offset = l;
595 	      t.length = h - l;
596 
597 	      removed++;
598 	    }
599 	  else
600 	    {
601 	      /* If we couldn't merge this one, we won't be able to
602 		 merge following ones either, since the ranges are
603 		 always sorted by OFFSET.  */
604 	      break;
605 	    }
606 	}
607 
608       if (removed != 0)
609 	vectorp->erase (next, next + removed);
610     }
611 }
612 
613 void
614 mark_value_bits_unavailable (struct value *value,
615 			     LONGEST offset, LONGEST length)
616 {
617   insert_into_bit_range_vector (&value->unavailable, offset, length);
618 }
619 
620 void
621 mark_value_bytes_unavailable (struct value *value,
622 			      LONGEST offset, LONGEST length)
623 {
624   mark_value_bits_unavailable (value,
625 			       offset * TARGET_CHAR_BIT,
626 			       length * TARGET_CHAR_BIT);
627 }
628 
629 /* Find the first range in RANGES that overlaps the range defined by
630    OFFSET and LENGTH, starting at element POS in the RANGES vector,
631    Returns the index into RANGES where such overlapping range was
632    found, or -1 if none was found.  */
633 
634 static int
635 find_first_range_overlap (const std::vector<range> *ranges, int pos,
636 			  LONGEST offset, LONGEST length)
637 {
638   int i;
639 
640   for (i = pos; i < ranges->size (); i++)
641     {
642       const range &r = (*ranges)[i];
643       if (ranges_overlap (r.offset, r.length, offset, length))
644 	return i;
645     }
646 
647   return -1;
648 }
649 
650 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
651    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
652    return non-zero.
653 
654    It must always be the case that:
655      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
656 
657    It is assumed that memory can be accessed from:
658      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
659    to:
660      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
661             / TARGET_CHAR_BIT)  */
662 static int
663 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
664 			 const gdb_byte *ptr2, size_t offset2_bits,
665 			 size_t length_bits)
666 {
667   gdb_assert (offset1_bits % TARGET_CHAR_BIT
668 	      == offset2_bits % TARGET_CHAR_BIT);
669 
670   if (offset1_bits % TARGET_CHAR_BIT != 0)
671     {
672       size_t bits;
673       gdb_byte mask, b1, b2;
674 
675       /* The offset from the base pointers PTR1 and PTR2 is not a complete
676 	 number of bytes.  A number of bits up to either the next exact
677 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
678 	 compared.  */
679       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
680       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
681       mask = (1 << bits) - 1;
682 
683       if (length_bits < bits)
684 	{
685 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
686 	  bits = length_bits;
687 	}
688 
689       /* Now load the two bytes and mask off the bits we care about.  */
690       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
691       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
692 
693       if (b1 != b2)
694 	return 1;
695 
696       /* Now update the length and offsets to take account of the bits
697 	 we've just compared.  */
698       length_bits -= bits;
699       offset1_bits += bits;
700       offset2_bits += bits;
701     }
702 
703   if (length_bits % TARGET_CHAR_BIT != 0)
704     {
705       size_t bits;
706       size_t o1, o2;
707       gdb_byte mask, b1, b2;
708 
709       /* The length is not an exact number of bytes.  After the previous
710 	 IF.. block then the offsets are byte aligned, or the
711 	 length is zero (in which case this code is not reached).  Compare
712 	 a number of bits at the end of the region, starting from an exact
713 	 byte boundary.  */
714       bits = length_bits % TARGET_CHAR_BIT;
715       o1 = offset1_bits + length_bits - bits;
716       o2 = offset2_bits + length_bits - bits;
717 
718       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
719       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
720 
721       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
722       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
723 
724       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
725       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
726 
727       if (b1 != b2)
728 	return 1;
729 
730       length_bits -= bits;
731     }
732 
733   if (length_bits > 0)
734     {
735       /* We've now taken care of any stray "bits" at the start, or end of
736 	 the region to compare, the remainder can be covered with a simple
737 	 memcmp.  */
738       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
739       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
740       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
741 
742       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
743 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
744 		     length_bits / TARGET_CHAR_BIT);
745     }
746 
747   /* Length is zero, regions match.  */
748   return 0;
749 }
750 
751 /* Helper struct for find_first_range_overlap_and_match and
752    value_contents_bits_eq.  Keep track of which slot of a given ranges
753    vector have we last looked at.  */
754 
755 struct ranges_and_idx
756 {
757   /* The ranges.  */
758   const std::vector<range> *ranges;
759 
760   /* The range we've last found in RANGES.  Given ranges are sorted,
761      we can start the next lookup here.  */
762   int idx;
763 };
764 
765 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
766    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
767    ranges starting at OFFSET2 bits.  Return true if the ranges match
768    and fill in *L and *H with the overlapping window relative to
769    (both) OFFSET1 or OFFSET2.  */
770 
771 static int
772 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
773 				    struct ranges_and_idx *rp2,
774 				    LONGEST offset1, LONGEST offset2,
775 				    LONGEST length, ULONGEST *l, ULONGEST *h)
776 {
777   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
778 				       offset1, length);
779   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
780 				       offset2, length);
781 
782   if (rp1->idx == -1 && rp2->idx == -1)
783     {
784       *l = length;
785       *h = length;
786       return 1;
787     }
788   else if (rp1->idx == -1 || rp2->idx == -1)
789     return 0;
790   else
791     {
792       const range *r1, *r2;
793       ULONGEST l1, h1;
794       ULONGEST l2, h2;
795 
796       r1 = &(*rp1->ranges)[rp1->idx];
797       r2 = &(*rp2->ranges)[rp2->idx];
798 
799       /* Get the unavailable windows intersected by the incoming
800 	 ranges.  The first and last ranges that overlap the argument
801 	 range may be wider than said incoming arguments ranges.  */
802       l1 = std::max (offset1, r1->offset);
803       h1 = std::min (offset1 + length, r1->offset + r1->length);
804 
805       l2 = std::max (offset2, r2->offset);
806       h2 = std::min (offset2 + length, offset2 + r2->length);
807 
808       /* Make them relative to the respective start offsets, so we can
809 	 compare them for equality.  */
810       l1 -= offset1;
811       h1 -= offset1;
812 
813       l2 -= offset2;
814       h2 -= offset2;
815 
816       /* Different ranges, no match.  */
817       if (l1 != l2 || h1 != h2)
818 	return 0;
819 
820       *h = h1;
821       *l = l1;
822       return 1;
823     }
824 }
825 
826 /* Helper function for value_contents_eq.  The only difference is that
827    this function is bit rather than byte based.
828 
829    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
830    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
831    Return true if the available bits match.  */
832 
833 static bool
834 value_contents_bits_eq (const struct value *val1, int offset1,
835 			const struct value *val2, int offset2,
836 			int length)
837 {
838   /* Each array element corresponds to a ranges source (unavailable,
839      optimized out).  '1' is for VAL1, '2' for VAL2.  */
840   struct ranges_and_idx rp1[2], rp2[2];
841 
842   /* See function description in value.h.  */
843   gdb_assert (!val1->lazy && !val2->lazy);
844 
845   /* We shouldn't be trying to compare past the end of the values.  */
846   gdb_assert (offset1 + length
847 	      <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
848   gdb_assert (offset2 + length
849 	      <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
850 
851   memset (&rp1, 0, sizeof (rp1));
852   memset (&rp2, 0, sizeof (rp2));
853   rp1[0].ranges = &val1->unavailable;
854   rp2[0].ranges = &val2->unavailable;
855   rp1[1].ranges = &val1->optimized_out;
856   rp2[1].ranges = &val2->optimized_out;
857 
858   while (length > 0)
859     {
860       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
861       int i;
862 
863       for (i = 0; i < 2; i++)
864 	{
865 	  ULONGEST l_tmp, h_tmp;
866 
867 	  /* The contents only match equal if the invalid/unavailable
868 	     contents ranges match as well.  */
869 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
870 						   offset1, offset2, length,
871 						   &l_tmp, &h_tmp))
872 	    return false;
873 
874 	  /* We're interested in the lowest/first range found.  */
875 	  if (i == 0 || l_tmp < l)
876 	    {
877 	      l = l_tmp;
878 	      h = h_tmp;
879 	    }
880 	}
881 
882       /* Compare the available/valid contents.  */
883       if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
884 				   val2->contents.get (), offset2, l) != 0)
885 	return false;
886 
887       length -= h;
888       offset1 += h;
889       offset2 += h;
890     }
891 
892   return true;
893 }
894 
895 bool
896 value_contents_eq (const struct value *val1, LONGEST offset1,
897 		   const struct value *val2, LONGEST offset2,
898 		   LONGEST length)
899 {
900   return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
901 				 val2, offset2 * TARGET_CHAR_BIT,
902 				 length * TARGET_CHAR_BIT);
903 }
904 
905 
906 /* The value-history records all the values printed by print commands
907    during this session.  */
908 
909 static std::vector<value_ref_ptr> value_history;
910 
911 
912 /* List of all value objects currently allocated
913    (except for those released by calls to release_value)
914    This is so they can be freed after each command.  */
915 
916 static std::vector<value_ref_ptr> all_values;
917 
918 /* Allocate a lazy value for type TYPE.  Its actual content is
919    "lazily" allocated too: the content field of the return value is
920    NULL; it will be allocated when it is fetched from the target.  */
921 
922 struct value *
923 allocate_value_lazy (struct type *type)
924 {
925   struct value *val;
926 
927   /* Call check_typedef on our type to make sure that, if TYPE
928      is a TYPE_CODE_TYPEDEF, its length is set to the length
929      of the target type instead of zero.  However, we do not
930      replace the typedef type by the target type, because we want
931      to keep the typedef in order to be able to set the VAL's type
932      description correctly.  */
933   check_typedef (type);
934 
935   val = new struct value (type);
936 
937   /* Values start out on the all_values chain.  */
938   all_values.emplace_back (val);
939 
940   return val;
941 }
942 
943 /* The maximum size, in bytes, that GDB will try to allocate for a value.
944    The initial value of 64k was not selected for any specific reason, it is
945    just a reasonable starting point.  */
946 
947 static int max_value_size = 65536; /* 64k bytes */
948 
949 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
950    LONGEST, otherwise GDB will not be able to parse integer values from the
951    CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
952    be unable to parse "set max-value-size 2".
953 
954    As we want a consistent GDB experience across hosts with different sizes
955    of LONGEST, this arbitrary minimum value was selected, so long as this
956    is bigger than LONGEST on all GDB supported hosts we're fine.  */
957 
958 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
959 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
960 
961 /* Implement the "set max-value-size" command.  */
962 
963 static void
964 set_max_value_size (const char *args, int from_tty,
965 		    struct cmd_list_element *c)
966 {
967   gdb_assert (max_value_size == -1 || max_value_size >= 0);
968 
969   if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
970     {
971       max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
972       error (_("max-value-size set too low, increasing to %d bytes"),
973 	     max_value_size);
974     }
975 }
976 
977 /* Implement the "show max-value-size" command.  */
978 
979 static void
980 show_max_value_size (struct ui_file *file, int from_tty,
981 		     struct cmd_list_element *c, const char *value)
982 {
983   if (max_value_size == -1)
984     fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
985   else
986     fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
987 		      max_value_size);
988 }
989 
990 /* Called before we attempt to allocate or reallocate a buffer for the
991    contents of a value.  TYPE is the type of the value for which we are
992    allocating the buffer.  If the buffer is too large (based on the user
993    controllable setting) then throw an error.  If this function returns
994    then we should attempt to allocate the buffer.  */
995 
996 static void
997 check_type_length_before_alloc (const struct type *type)
998 {
999   unsigned int length = TYPE_LENGTH (type);
1000 
1001   if (max_value_size > -1 && length > max_value_size)
1002     {
1003       if (TYPE_NAME (type) != NULL)
1004 	error (_("value of type `%s' requires %u bytes, which is more "
1005 		 "than max-value-size"), TYPE_NAME (type), length);
1006       else
1007 	error (_("value requires %u bytes, which is more than "
1008 		 "max-value-size"), length);
1009     }
1010 }
1011 
1012 /* Allocate the contents of VAL if it has not been allocated yet.  */
1013 
1014 static void
1015 allocate_value_contents (struct value *val)
1016 {
1017   if (!val->contents)
1018     {
1019       check_type_length_before_alloc (val->enclosing_type);
1020       val->contents.reset
1021 	((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1022     }
1023 }
1024 
1025 /* Allocate a  value  and its contents for type TYPE.  */
1026 
1027 struct value *
1028 allocate_value (struct type *type)
1029 {
1030   struct value *val = allocate_value_lazy (type);
1031 
1032   allocate_value_contents (val);
1033   val->lazy = 0;
1034   return val;
1035 }
1036 
1037 /* Allocate a  value  that has the correct length
1038    for COUNT repetitions of type TYPE.  */
1039 
1040 struct value *
1041 allocate_repeat_value (struct type *type, int count)
1042 {
1043   int low_bound = current_language->string_lower_bound;		/* ??? */
1044   /* FIXME-type-allocation: need a way to free this type when we are
1045      done with it.  */
1046   struct type *array_type
1047     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1048 
1049   return allocate_value (array_type);
1050 }
1051 
1052 struct value *
1053 allocate_computed_value (struct type *type,
1054                          const struct lval_funcs *funcs,
1055                          void *closure)
1056 {
1057   struct value *v = allocate_value_lazy (type);
1058 
1059   VALUE_LVAL (v) = lval_computed;
1060   v->location.computed.funcs = funcs;
1061   v->location.computed.closure = closure;
1062 
1063   return v;
1064 }
1065 
1066 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT.  */
1067 
1068 struct value *
1069 allocate_optimized_out_value (struct type *type)
1070 {
1071   struct value *retval = allocate_value_lazy (type);
1072 
1073   mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1074   set_value_lazy (retval, 0);
1075   return retval;
1076 }
1077 
1078 /* Accessor methods.  */
1079 
1080 struct type *
1081 value_type (const struct value *value)
1082 {
1083   return value->type;
1084 }
1085 void
1086 deprecated_set_value_type (struct value *value, struct type *type)
1087 {
1088   value->type = type;
1089 }
1090 
1091 LONGEST
1092 value_offset (const struct value *value)
1093 {
1094   return value->offset;
1095 }
1096 void
1097 set_value_offset (struct value *value, LONGEST offset)
1098 {
1099   value->offset = offset;
1100 }
1101 
1102 LONGEST
1103 value_bitpos (const struct value *value)
1104 {
1105   return value->bitpos;
1106 }
1107 void
1108 set_value_bitpos (struct value *value, LONGEST bit)
1109 {
1110   value->bitpos = bit;
1111 }
1112 
1113 LONGEST
1114 value_bitsize (const struct value *value)
1115 {
1116   return value->bitsize;
1117 }
1118 void
1119 set_value_bitsize (struct value *value, LONGEST bit)
1120 {
1121   value->bitsize = bit;
1122 }
1123 
1124 struct value *
1125 value_parent (const struct value *value)
1126 {
1127   return value->parent.get ();
1128 }
1129 
1130 /* See value.h.  */
1131 
1132 void
1133 set_value_parent (struct value *value, struct value *parent)
1134 {
1135   value->parent = value_ref_ptr::new_reference (parent);
1136 }
1137 
1138 gdb_byte *
1139 value_contents_raw (struct value *value)
1140 {
1141   struct gdbarch *arch = get_value_arch (value);
1142   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1143 
1144   allocate_value_contents (value);
1145   return value->contents.get () + value->embedded_offset * unit_size;
1146 }
1147 
1148 gdb_byte *
1149 value_contents_all_raw (struct value *value)
1150 {
1151   allocate_value_contents (value);
1152   return value->contents.get ();
1153 }
1154 
1155 struct type *
1156 value_enclosing_type (const struct value *value)
1157 {
1158   return value->enclosing_type;
1159 }
1160 
1161 /* Look at value.h for description.  */
1162 
1163 struct type *
1164 value_actual_type (struct value *value, int resolve_simple_types,
1165 		   int *real_type_found)
1166 {
1167   struct value_print_options opts;
1168   struct type *result;
1169 
1170   get_user_print_options (&opts);
1171 
1172   if (real_type_found)
1173     *real_type_found = 0;
1174   result = value_type (value);
1175   if (opts.objectprint)
1176     {
1177       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1178 	 fetch its rtti type.  */
1179       if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1180 	  && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1181 	     == TYPE_CODE_STRUCT
1182 	  && !value_optimized_out (value))
1183         {
1184           struct type *real_type;
1185 
1186           real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1187           if (real_type)
1188             {
1189               if (real_type_found)
1190                 *real_type_found = 1;
1191               result = real_type;
1192             }
1193         }
1194       else if (resolve_simple_types)
1195         {
1196           if (real_type_found)
1197             *real_type_found = 1;
1198           result = value_enclosing_type (value);
1199         }
1200     }
1201 
1202   return result;
1203 }
1204 
1205 void
1206 error_value_optimized_out (void)
1207 {
1208   error (_("value has been optimized out"));
1209 }
1210 
1211 static void
1212 require_not_optimized_out (const struct value *value)
1213 {
1214   if (!value->optimized_out.empty ())
1215     {
1216       if (value->lval == lval_register)
1217 	error (_("register has not been saved in frame"));
1218       else
1219 	error_value_optimized_out ();
1220     }
1221 }
1222 
1223 static void
1224 require_available (const struct value *value)
1225 {
1226   if (!value->unavailable.empty ())
1227     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1228 }
1229 
1230 const gdb_byte *
1231 value_contents_for_printing (struct value *value)
1232 {
1233   if (value->lazy)
1234     value_fetch_lazy (value);
1235   return value->contents.get ();
1236 }
1237 
1238 const gdb_byte *
1239 value_contents_for_printing_const (const struct value *value)
1240 {
1241   gdb_assert (!value->lazy);
1242   return value->contents.get ();
1243 }
1244 
1245 const gdb_byte *
1246 value_contents_all (struct value *value)
1247 {
1248   const gdb_byte *result = value_contents_for_printing (value);
1249   require_not_optimized_out (value);
1250   require_available (value);
1251   return result;
1252 }
1253 
1254 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1255    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1256 
1257 static void
1258 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1259 		      const std::vector<range> &src_range, int src_bit_offset,
1260 		      int bit_length)
1261 {
1262   for (const range &r : src_range)
1263     {
1264       ULONGEST h, l;
1265 
1266       l = std::max (r.offset, (LONGEST) src_bit_offset);
1267       h = std::min (r.offset + r.length,
1268 		    (LONGEST) src_bit_offset + bit_length);
1269 
1270       if (l < h)
1271 	insert_into_bit_range_vector (dst_range,
1272 				      dst_bit_offset + (l - src_bit_offset),
1273 				      h - l);
1274     }
1275 }
1276 
1277 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1278    SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted.  */
1279 
1280 static void
1281 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1282 			    const struct value *src, int src_bit_offset,
1283 			    int bit_length)
1284 {
1285   ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1286 			src->unavailable, src_bit_offset,
1287 			bit_length);
1288   ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1289 			src->optimized_out, src_bit_offset,
1290 			bit_length);
1291 }
1292 
1293 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1294    (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1295    contents, starting at DST_OFFSET.  If unavailable contents are
1296    being copied from SRC, the corresponding DST contents are marked
1297    unavailable accordingly.  Neither DST nor SRC may be lazy
1298    values.
1299 
1300    It is assumed the contents of DST in the [DST_OFFSET,
1301    DST_OFFSET+LENGTH) range are wholly available.  */
1302 
1303 void
1304 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1305 			 struct value *src, LONGEST src_offset, LONGEST length)
1306 {
1307   LONGEST src_bit_offset, dst_bit_offset, bit_length;
1308   struct gdbarch *arch = get_value_arch (src);
1309   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1310 
1311   /* A lazy DST would make that this copy operation useless, since as
1312      soon as DST's contents were un-lazied (by a later value_contents
1313      call, say), the contents would be overwritten.  A lazy SRC would
1314      mean we'd be copying garbage.  */
1315   gdb_assert (!dst->lazy && !src->lazy);
1316 
1317   /* The overwritten DST range gets unavailability ORed in, not
1318      replaced.  Make sure to remember to implement replacing if it
1319      turns out actually necessary.  */
1320   gdb_assert (value_bytes_available (dst, dst_offset, length));
1321   gdb_assert (!value_bits_any_optimized_out (dst,
1322 					     TARGET_CHAR_BIT * dst_offset,
1323 					     TARGET_CHAR_BIT * length));
1324 
1325   /* Copy the data.  */
1326   memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1327 	  value_contents_all_raw (src) + src_offset * unit_size,
1328 	  length * unit_size);
1329 
1330   /* Copy the meta-data, adjusted.  */
1331   src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1332   dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1333   bit_length = length * unit_size * HOST_CHAR_BIT;
1334 
1335   value_ranges_copy_adjusted (dst, dst_bit_offset,
1336 			      src, src_bit_offset,
1337 			      bit_length);
1338 }
1339 
1340 /* Copy LENGTH bytes of SRC value's (all) contents
1341    (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1342    (all) contents, starting at DST_OFFSET.  If unavailable contents
1343    are being copied from SRC, the corresponding DST contents are
1344    marked unavailable accordingly.  DST must not be lazy.  If SRC is
1345    lazy, it will be fetched now.
1346 
1347    It is assumed the contents of DST in the [DST_OFFSET,
1348    DST_OFFSET+LENGTH) range are wholly available.  */
1349 
1350 void
1351 value_contents_copy (struct value *dst, LONGEST dst_offset,
1352 		     struct value *src, LONGEST src_offset, LONGEST length)
1353 {
1354   if (src->lazy)
1355     value_fetch_lazy (src);
1356 
1357   value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1358 }
1359 
1360 int
1361 value_lazy (const struct value *value)
1362 {
1363   return value->lazy;
1364 }
1365 
1366 void
1367 set_value_lazy (struct value *value, int val)
1368 {
1369   value->lazy = val;
1370 }
1371 
1372 int
1373 value_stack (const struct value *value)
1374 {
1375   return value->stack;
1376 }
1377 
1378 void
1379 set_value_stack (struct value *value, int val)
1380 {
1381   value->stack = val;
1382 }
1383 
1384 const gdb_byte *
1385 value_contents (struct value *value)
1386 {
1387   const gdb_byte *result = value_contents_writeable (value);
1388   require_not_optimized_out (value);
1389   require_available (value);
1390   return result;
1391 }
1392 
1393 gdb_byte *
1394 value_contents_writeable (struct value *value)
1395 {
1396   if (value->lazy)
1397     value_fetch_lazy (value);
1398   return value_contents_raw (value);
1399 }
1400 
1401 int
1402 value_optimized_out (struct value *value)
1403 {
1404   /* We can only know if a value is optimized out once we have tried to
1405      fetch it.  */
1406   if (value->optimized_out.empty () && value->lazy)
1407     {
1408       TRY
1409 	{
1410 	  value_fetch_lazy (value);
1411 	}
1412       CATCH (ex, RETURN_MASK_ERROR)
1413 	{
1414 	  /* Fall back to checking value->optimized_out.  */
1415 	}
1416       END_CATCH
1417     }
1418 
1419   return !value->optimized_out.empty ();
1420 }
1421 
1422 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1423    the following LENGTH bytes.  */
1424 
1425 void
1426 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1427 {
1428   mark_value_bits_optimized_out (value,
1429 				 offset * TARGET_CHAR_BIT,
1430 				 length * TARGET_CHAR_BIT);
1431 }
1432 
1433 /* See value.h.  */
1434 
1435 void
1436 mark_value_bits_optimized_out (struct value *value,
1437 			       LONGEST offset, LONGEST length)
1438 {
1439   insert_into_bit_range_vector (&value->optimized_out, offset, length);
1440 }
1441 
1442 int
1443 value_bits_synthetic_pointer (const struct value *value,
1444 			      LONGEST offset, LONGEST length)
1445 {
1446   if (value->lval != lval_computed
1447       || !value->location.computed.funcs->check_synthetic_pointer)
1448     return 0;
1449   return value->location.computed.funcs->check_synthetic_pointer (value,
1450 								  offset,
1451 								  length);
1452 }
1453 
1454 LONGEST
1455 value_embedded_offset (const struct value *value)
1456 {
1457   return value->embedded_offset;
1458 }
1459 
1460 void
1461 set_value_embedded_offset (struct value *value, LONGEST val)
1462 {
1463   value->embedded_offset = val;
1464 }
1465 
1466 LONGEST
1467 value_pointed_to_offset (const struct value *value)
1468 {
1469   return value->pointed_to_offset;
1470 }
1471 
1472 void
1473 set_value_pointed_to_offset (struct value *value, LONGEST val)
1474 {
1475   value->pointed_to_offset = val;
1476 }
1477 
1478 const struct lval_funcs *
1479 value_computed_funcs (const struct value *v)
1480 {
1481   gdb_assert (value_lval_const (v) == lval_computed);
1482 
1483   return v->location.computed.funcs;
1484 }
1485 
1486 void *
1487 value_computed_closure (const struct value *v)
1488 {
1489   gdb_assert (v->lval == lval_computed);
1490 
1491   return v->location.computed.closure;
1492 }
1493 
1494 enum lval_type *
1495 deprecated_value_lval_hack (struct value *value)
1496 {
1497   return &value->lval;
1498 }
1499 
1500 enum lval_type
1501 value_lval_const (const struct value *value)
1502 {
1503   return value->lval;
1504 }
1505 
1506 CORE_ADDR
1507 value_address (const struct value *value)
1508 {
1509   if (value->lval != lval_memory)
1510     return 0;
1511   if (value->parent != NULL)
1512     return value_address (value->parent.get ()) + value->offset;
1513   if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1514     {
1515       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1516       return TYPE_DATA_LOCATION_ADDR (value_type (value));
1517     }
1518 
1519   return value->location.address + value->offset;
1520 }
1521 
1522 CORE_ADDR
1523 value_raw_address (const struct value *value)
1524 {
1525   if (value->lval != lval_memory)
1526     return 0;
1527   return value->location.address;
1528 }
1529 
1530 void
1531 set_value_address (struct value *value, CORE_ADDR addr)
1532 {
1533   gdb_assert (value->lval == lval_memory);
1534   value->location.address = addr;
1535 }
1536 
1537 struct internalvar **
1538 deprecated_value_internalvar_hack (struct value *value)
1539 {
1540   return &value->location.internalvar;
1541 }
1542 
1543 struct frame_id *
1544 deprecated_value_next_frame_id_hack (struct value *value)
1545 {
1546   gdb_assert (value->lval == lval_register);
1547   return &value->location.reg.next_frame_id;
1548 }
1549 
1550 int *
1551 deprecated_value_regnum_hack (struct value *value)
1552 {
1553   gdb_assert (value->lval == lval_register);
1554   return &value->location.reg.regnum;
1555 }
1556 
1557 int
1558 deprecated_value_modifiable (const struct value *value)
1559 {
1560   return value->modifiable;
1561 }
1562 
1563 /* Return a mark in the value chain.  All values allocated after the
1564    mark is obtained (except for those released) are subject to being freed
1565    if a subsequent value_free_to_mark is passed the mark.  */
1566 struct value *
1567 value_mark (void)
1568 {
1569   if (all_values.empty ())
1570     return nullptr;
1571   return all_values.back ().get ();
1572 }
1573 
1574 /* See value.h.  */
1575 
1576 void
1577 value_incref (struct value *val)
1578 {
1579   val->reference_count++;
1580 }
1581 
1582 /* Release a reference to VAL, which was acquired with value_incref.
1583    This function is also called to deallocate values from the value
1584    chain.  */
1585 
1586 void
1587 value_decref (struct value *val)
1588 {
1589   if (val != nullptr)
1590     {
1591       gdb_assert (val->reference_count > 0);
1592       val->reference_count--;
1593       if (val->reference_count == 0)
1594 	delete val;
1595     }
1596 }
1597 
1598 /* Free all values allocated since MARK was obtained by value_mark
1599    (except for those released).  */
1600 void
1601 value_free_to_mark (const struct value *mark)
1602 {
1603   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1604   if (iter == all_values.end ())
1605     all_values.clear ();
1606   else
1607     all_values.erase (iter + 1, all_values.end ());
1608 }
1609 
1610 /* Remove VAL from the chain all_values
1611    so it will not be freed automatically.  */
1612 
1613 value_ref_ptr
1614 release_value (struct value *val)
1615 {
1616   if (val == nullptr)
1617     return value_ref_ptr ();
1618 
1619   std::vector<value_ref_ptr>::reverse_iterator iter;
1620   for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1621     {
1622       if (*iter == val)
1623 	{
1624 	  value_ref_ptr result = *iter;
1625 	  all_values.erase (iter.base () - 1);
1626 	  return result;
1627 	}
1628     }
1629 
1630   /* We must always return an owned reference.  Normally this happens
1631      because we transfer the reference from the value chain, but in
1632      this case the value was not on the chain.  */
1633   return value_ref_ptr::new_reference (val);
1634 }
1635 
1636 /* See value.h.  */
1637 
1638 std::vector<value_ref_ptr>
1639 value_release_to_mark (const struct value *mark)
1640 {
1641   std::vector<value_ref_ptr> result;
1642 
1643   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1644   if (iter == all_values.end ())
1645     std::swap (result, all_values);
1646   else
1647     {
1648       std::move (iter + 1, all_values.end (), std::back_inserter (result));
1649       all_values.erase (iter + 1, all_values.end ());
1650     }
1651   std::reverse (result.begin (), result.end ());
1652   return result;
1653 }
1654 
1655 /* Return a copy of the value ARG.
1656    It contains the same contents, for same memory address,
1657    but it's a different block of storage.  */
1658 
1659 struct value *
1660 value_copy (struct value *arg)
1661 {
1662   struct type *encl_type = value_enclosing_type (arg);
1663   struct value *val;
1664 
1665   if (value_lazy (arg))
1666     val = allocate_value_lazy (encl_type);
1667   else
1668     val = allocate_value (encl_type);
1669   val->type = arg->type;
1670   VALUE_LVAL (val) = VALUE_LVAL (arg);
1671   val->location = arg->location;
1672   val->offset = arg->offset;
1673   val->bitpos = arg->bitpos;
1674   val->bitsize = arg->bitsize;
1675   val->lazy = arg->lazy;
1676   val->embedded_offset = value_embedded_offset (arg);
1677   val->pointed_to_offset = arg->pointed_to_offset;
1678   val->modifiable = arg->modifiable;
1679   if (!value_lazy (val))
1680     {
1681       memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1682 	      TYPE_LENGTH (value_enclosing_type (arg)));
1683 
1684     }
1685   val->unavailable = arg->unavailable;
1686   val->optimized_out = arg->optimized_out;
1687   val->parent = arg->parent;
1688   if (VALUE_LVAL (val) == lval_computed)
1689     {
1690       const struct lval_funcs *funcs = val->location.computed.funcs;
1691 
1692       if (funcs->copy_closure)
1693         val->location.computed.closure = funcs->copy_closure (val);
1694     }
1695   return val;
1696 }
1697 
1698 /* Return a "const" and/or "volatile" qualified version of the value V.
1699    If CNST is true, then the returned value will be qualified with
1700    "const".
1701    if VOLTL is true, then the returned value will be qualified with
1702    "volatile".  */
1703 
1704 struct value *
1705 make_cv_value (int cnst, int voltl, struct value *v)
1706 {
1707   struct type *val_type = value_type (v);
1708   struct type *enclosing_type = value_enclosing_type (v);
1709   struct value *cv_val = value_copy (v);
1710 
1711   deprecated_set_value_type (cv_val,
1712 			     make_cv_type (cnst, voltl, val_type, NULL));
1713   set_value_enclosing_type (cv_val,
1714 			    make_cv_type (cnst, voltl, enclosing_type, NULL));
1715 
1716   return cv_val;
1717 }
1718 
1719 /* Return a version of ARG that is non-lvalue.  */
1720 
1721 struct value *
1722 value_non_lval (struct value *arg)
1723 {
1724   if (VALUE_LVAL (arg) != not_lval)
1725     {
1726       struct type *enc_type = value_enclosing_type (arg);
1727       struct value *val = allocate_value (enc_type);
1728 
1729       memcpy (value_contents_all_raw (val), value_contents_all (arg),
1730 	      TYPE_LENGTH (enc_type));
1731       val->type = arg->type;
1732       set_value_embedded_offset (val, value_embedded_offset (arg));
1733       set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1734       return val;
1735     }
1736    return arg;
1737 }
1738 
1739 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY.  */
1740 
1741 void
1742 value_force_lval (struct value *v, CORE_ADDR addr)
1743 {
1744   gdb_assert (VALUE_LVAL (v) == not_lval);
1745 
1746   write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1747   v->lval = lval_memory;
1748   v->location.address = addr;
1749 }
1750 
1751 void
1752 set_value_component_location (struct value *component,
1753 			      const struct value *whole)
1754 {
1755   struct type *type;
1756 
1757   gdb_assert (whole->lval != lval_xcallable);
1758 
1759   if (whole->lval == lval_internalvar)
1760     VALUE_LVAL (component) = lval_internalvar_component;
1761   else
1762     VALUE_LVAL (component) = whole->lval;
1763 
1764   component->location = whole->location;
1765   if (whole->lval == lval_computed)
1766     {
1767       const struct lval_funcs *funcs = whole->location.computed.funcs;
1768 
1769       if (funcs->copy_closure)
1770         component->location.computed.closure = funcs->copy_closure (whole);
1771     }
1772 
1773   /* If type has a dynamic resolved location property
1774      update it's value address.  */
1775   type = value_type (whole);
1776   if (NULL != TYPE_DATA_LOCATION (type)
1777       && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1778     set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1779 }
1780 
1781 /* Access to the value history.  */
1782 
1783 /* Record a new value in the value history.
1784    Returns the absolute history index of the entry.  */
1785 
1786 int
1787 record_latest_value (struct value *val)
1788 {
1789   /* We don't want this value to have anything to do with the inferior anymore.
1790      In particular, "set $1 = 50" should not affect the variable from which
1791      the value was taken, and fast watchpoints should be able to assume that
1792      a value on the value history never changes.  */
1793   if (value_lazy (val))
1794     value_fetch_lazy (val);
1795   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1796      from.  This is a bit dubious, because then *&$1 does not just return $1
1797      but the current contents of that location.  c'est la vie...  */
1798   val->modifiable = 0;
1799 
1800   value_history.push_back (release_value (val));
1801 
1802   return value_history.size ();
1803 }
1804 
1805 /* Return a copy of the value in the history with sequence number NUM.  */
1806 
1807 struct value *
1808 access_value_history (int num)
1809 {
1810   int absnum = num;
1811 
1812   if (absnum <= 0)
1813     absnum += value_history.size ();
1814 
1815   if (absnum <= 0)
1816     {
1817       if (num == 0)
1818 	error (_("The history is empty."));
1819       else if (num == 1)
1820 	error (_("There is only one value in the history."));
1821       else
1822 	error (_("History does not go back to $$%d."), -num);
1823     }
1824   if (absnum > value_history.size ())
1825     error (_("History has not yet reached $%d."), absnum);
1826 
1827   absnum--;
1828 
1829   return value_copy (value_history[absnum].get ());
1830 }
1831 
1832 static void
1833 show_values (const char *num_exp, int from_tty)
1834 {
1835   int i;
1836   struct value *val;
1837   static int num = 1;
1838 
1839   if (num_exp)
1840     {
1841       /* "show values +" should print from the stored position.
1842          "show values <exp>" should print around value number <exp>.  */
1843       if (num_exp[0] != '+' || num_exp[1] != '\0')
1844 	num = parse_and_eval_long (num_exp) - 5;
1845     }
1846   else
1847     {
1848       /* "show values" means print the last 10 values.  */
1849       num = value_history.size () - 9;
1850     }
1851 
1852   if (num <= 0)
1853     num = 1;
1854 
1855   for (i = num; i < num + 10 && i <= value_history.size (); i++)
1856     {
1857       struct value_print_options opts;
1858 
1859       val = access_value_history (i);
1860       printf_filtered (("$%d = "), i);
1861       get_user_print_options (&opts);
1862       value_print (val, gdb_stdout, &opts);
1863       printf_filtered (("\n"));
1864     }
1865 
1866   /* The next "show values +" should start after what we just printed.  */
1867   num += 10;
1868 
1869   /* Hitting just return after this command should do the same thing as
1870      "show values +".  If num_exp is null, this is unnecessary, since
1871      "show values +" is not useful after "show values".  */
1872   if (from_tty && num_exp)
1873     set_repeat_arguments ("+");
1874 }
1875 
1876 enum internalvar_kind
1877 {
1878   /* The internal variable is empty.  */
1879   INTERNALVAR_VOID,
1880 
1881   /* The value of the internal variable is provided directly as
1882      a GDB value object.  */
1883   INTERNALVAR_VALUE,
1884 
1885   /* A fresh value is computed via a call-back routine on every
1886      access to the internal variable.  */
1887   INTERNALVAR_MAKE_VALUE,
1888 
1889   /* The internal variable holds a GDB internal convenience function.  */
1890   INTERNALVAR_FUNCTION,
1891 
1892   /* The variable holds an integer value.  */
1893   INTERNALVAR_INTEGER,
1894 
1895   /* The variable holds a GDB-provided string.  */
1896   INTERNALVAR_STRING,
1897 };
1898 
1899 union internalvar_data
1900 {
1901   /* A value object used with INTERNALVAR_VALUE.  */
1902   struct value *value;
1903 
1904   /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
1905   struct
1906   {
1907     /* The functions to call.  */
1908     const struct internalvar_funcs *functions;
1909 
1910     /* The function's user-data.  */
1911     void *data;
1912   } make_value;
1913 
1914   /* The internal function used with INTERNALVAR_FUNCTION.  */
1915   struct
1916   {
1917     struct internal_function *function;
1918     /* True if this is the canonical name for the function.  */
1919     int canonical;
1920   } fn;
1921 
1922   /* An integer value used with INTERNALVAR_INTEGER.  */
1923   struct
1924   {
1925     /* If type is non-NULL, it will be used as the type to generate
1926        a value for this internal variable.  If type is NULL, a default
1927        integer type for the architecture is used.  */
1928     struct type *type;
1929     LONGEST val;
1930   } integer;
1931 
1932   /* A string value used with INTERNALVAR_STRING.  */
1933   char *string;
1934 };
1935 
1936 /* Internal variables.  These are variables within the debugger
1937    that hold values assigned by debugger commands.
1938    The user refers to them with a '$' prefix
1939    that does not appear in the variable names stored internally.  */
1940 
1941 struct internalvar
1942 {
1943   struct internalvar *next;
1944   char *name;
1945 
1946   /* We support various different kinds of content of an internal variable.
1947      enum internalvar_kind specifies the kind, and union internalvar_data
1948      provides the data associated with this particular kind.  */
1949 
1950   enum internalvar_kind kind;
1951 
1952   union internalvar_data u;
1953 };
1954 
1955 static struct internalvar *internalvars;
1956 
1957 /* If the variable does not already exist create it and give it the
1958    value given.  If no value is given then the default is zero.  */
1959 static void
1960 init_if_undefined_command (const char* args, int from_tty)
1961 {
1962   struct internalvar* intvar;
1963 
1964   /* Parse the expression - this is taken from set_command().  */
1965   expression_up expr = parse_expression (args);
1966 
1967   /* Validate the expression.
1968      Was the expression an assignment?
1969      Or even an expression at all?  */
1970   if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1971     error (_("Init-if-undefined requires an assignment expression."));
1972 
1973   /* Extract the variable from the parsed expression.
1974      In the case of an assign the lvalue will be in elts[1] and elts[2].  */
1975   if (expr->elts[1].opcode != OP_INTERNALVAR)
1976     error (_("The first parameter to init-if-undefined "
1977 	     "should be a GDB variable."));
1978   intvar = expr->elts[2].internalvar;
1979 
1980   /* Only evaluate the expression if the lvalue is void.
1981      This may still fail if the expresssion is invalid.  */
1982   if (intvar->kind == INTERNALVAR_VOID)
1983     evaluate_expression (expr.get ());
1984 }
1985 
1986 
1987 /* Look up an internal variable with name NAME.  NAME should not
1988    normally include a dollar sign.
1989 
1990    If the specified internal variable does not exist,
1991    the return value is NULL.  */
1992 
1993 struct internalvar *
1994 lookup_only_internalvar (const char *name)
1995 {
1996   struct internalvar *var;
1997 
1998   for (var = internalvars; var; var = var->next)
1999     if (strcmp (var->name, name) == 0)
2000       return var;
2001 
2002   return NULL;
2003 }
2004 
2005 /* Complete NAME by comparing it to the names of internal
2006    variables.  */
2007 
2008 void
2009 complete_internalvar (completion_tracker &tracker, const char *name)
2010 {
2011   struct internalvar *var;
2012   int len;
2013 
2014   len = strlen (name);
2015 
2016   for (var = internalvars; var; var = var->next)
2017     if (strncmp (var->name, name, len) == 0)
2018       {
2019 	gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2020 
2021 	tracker.add_completion (std::move (copy));
2022       }
2023 }
2024 
2025 /* Create an internal variable with name NAME and with a void value.
2026    NAME should not normally include a dollar sign.  */
2027 
2028 struct internalvar *
2029 create_internalvar (const char *name)
2030 {
2031   struct internalvar *var = XNEW (struct internalvar);
2032 
2033   var->name = concat (name, (char *)NULL);
2034   var->kind = INTERNALVAR_VOID;
2035   var->next = internalvars;
2036   internalvars = var;
2037   return var;
2038 }
2039 
2040 /* Create an internal variable with name NAME and register FUN as the
2041    function that value_of_internalvar uses to create a value whenever
2042    this variable is referenced.  NAME should not normally include a
2043    dollar sign.  DATA is passed uninterpreted to FUN when it is
2044    called.  CLEANUP, if not NULL, is called when the internal variable
2045    is destroyed.  It is passed DATA as its only argument.  */
2046 
2047 struct internalvar *
2048 create_internalvar_type_lazy (const char *name,
2049 			      const struct internalvar_funcs *funcs,
2050 			      void *data)
2051 {
2052   struct internalvar *var = create_internalvar (name);
2053 
2054   var->kind = INTERNALVAR_MAKE_VALUE;
2055   var->u.make_value.functions = funcs;
2056   var->u.make_value.data = data;
2057   return var;
2058 }
2059 
2060 /* See documentation in value.h.  */
2061 
2062 int
2063 compile_internalvar_to_ax (struct internalvar *var,
2064 			   struct agent_expr *expr,
2065 			   struct axs_value *value)
2066 {
2067   if (var->kind != INTERNALVAR_MAKE_VALUE
2068       || var->u.make_value.functions->compile_to_ax == NULL)
2069     return 0;
2070 
2071   var->u.make_value.functions->compile_to_ax (var, expr, value,
2072 					      var->u.make_value.data);
2073   return 1;
2074 }
2075 
2076 /* Look up an internal variable with name NAME.  NAME should not
2077    normally include a dollar sign.
2078 
2079    If the specified internal variable does not exist,
2080    one is created, with a void value.  */
2081 
2082 struct internalvar *
2083 lookup_internalvar (const char *name)
2084 {
2085   struct internalvar *var;
2086 
2087   var = lookup_only_internalvar (name);
2088   if (var)
2089     return var;
2090 
2091   return create_internalvar (name);
2092 }
2093 
2094 /* Return current value of internal variable VAR.  For variables that
2095    are not inherently typed, use a value type appropriate for GDBARCH.  */
2096 
2097 struct value *
2098 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2099 {
2100   struct value *val;
2101   struct trace_state_variable *tsv;
2102 
2103   /* If there is a trace state variable of the same name, assume that
2104      is what we really want to see.  */
2105   tsv = find_trace_state_variable (var->name);
2106   if (tsv)
2107     {
2108       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2109 								&(tsv->value));
2110       if (tsv->value_known)
2111 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2112 				  tsv->value);
2113       else
2114 	val = allocate_value (builtin_type (gdbarch)->builtin_void);
2115       return val;
2116     }
2117 
2118   switch (var->kind)
2119     {
2120     case INTERNALVAR_VOID:
2121       val = allocate_value (builtin_type (gdbarch)->builtin_void);
2122       break;
2123 
2124     case INTERNALVAR_FUNCTION:
2125       val = allocate_value (builtin_type (gdbarch)->internal_fn);
2126       break;
2127 
2128     case INTERNALVAR_INTEGER:
2129       if (!var->u.integer.type)
2130 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2131 				  var->u.integer.val);
2132       else
2133 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2134       break;
2135 
2136     case INTERNALVAR_STRING:
2137       val = value_cstring (var->u.string, strlen (var->u.string),
2138 			   builtin_type (gdbarch)->builtin_char);
2139       break;
2140 
2141     case INTERNALVAR_VALUE:
2142       val = value_copy (var->u.value);
2143       if (value_lazy (val))
2144 	value_fetch_lazy (val);
2145       break;
2146 
2147     case INTERNALVAR_MAKE_VALUE:
2148       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2149 							var->u.make_value.data);
2150       break;
2151 
2152     default:
2153       internal_error (__FILE__, __LINE__, _("bad kind"));
2154     }
2155 
2156   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2157      on this value go back to affect the original internal variable.
2158 
2159      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2160      no underlying modifyable state in the internal variable.
2161 
2162      Likewise, if the variable's value is a computed lvalue, we want
2163      references to it to produce another computed lvalue, where
2164      references and assignments actually operate through the
2165      computed value's functions.
2166 
2167      This means that internal variables with computed values
2168      behave a little differently from other internal variables:
2169      assignments to them don't just replace the previous value
2170      altogether.  At the moment, this seems like the behavior we
2171      want.  */
2172 
2173   if (var->kind != INTERNALVAR_MAKE_VALUE
2174       && val->lval != lval_computed)
2175     {
2176       VALUE_LVAL (val) = lval_internalvar;
2177       VALUE_INTERNALVAR (val) = var;
2178     }
2179 
2180   return val;
2181 }
2182 
2183 int
2184 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2185 {
2186   if (var->kind == INTERNALVAR_INTEGER)
2187     {
2188       *result = var->u.integer.val;
2189       return 1;
2190     }
2191 
2192   if (var->kind == INTERNALVAR_VALUE)
2193     {
2194       struct type *type = check_typedef (value_type (var->u.value));
2195 
2196       if (TYPE_CODE (type) == TYPE_CODE_INT)
2197 	{
2198 	  *result = value_as_long (var->u.value);
2199 	  return 1;
2200 	}
2201     }
2202 
2203   return 0;
2204 }
2205 
2206 static int
2207 get_internalvar_function (struct internalvar *var,
2208 			  struct internal_function **result)
2209 {
2210   switch (var->kind)
2211     {
2212     case INTERNALVAR_FUNCTION:
2213       *result = var->u.fn.function;
2214       return 1;
2215 
2216     default:
2217       return 0;
2218     }
2219 }
2220 
2221 void
2222 set_internalvar_component (struct internalvar *var,
2223 			   LONGEST offset, LONGEST bitpos,
2224 			   LONGEST bitsize, struct value *newval)
2225 {
2226   gdb_byte *addr;
2227   struct gdbarch *arch;
2228   int unit_size;
2229 
2230   switch (var->kind)
2231     {
2232     case INTERNALVAR_VALUE:
2233       addr = value_contents_writeable (var->u.value);
2234       arch = get_value_arch (var->u.value);
2235       unit_size = gdbarch_addressable_memory_unit_size (arch);
2236 
2237       if (bitsize)
2238 	modify_field (value_type (var->u.value), addr + offset,
2239 		      value_as_long (newval), bitpos, bitsize);
2240       else
2241 	memcpy (addr + offset * unit_size, value_contents (newval),
2242 		TYPE_LENGTH (value_type (newval)));
2243       break;
2244 
2245     default:
2246       /* We can never get a component of any other kind.  */
2247       internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2248     }
2249 }
2250 
2251 void
2252 set_internalvar (struct internalvar *var, struct value *val)
2253 {
2254   enum internalvar_kind new_kind;
2255   union internalvar_data new_data = { 0 };
2256 
2257   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2258     error (_("Cannot overwrite convenience function %s"), var->name);
2259 
2260   /* Prepare new contents.  */
2261   switch (TYPE_CODE (check_typedef (value_type (val))))
2262     {
2263     case TYPE_CODE_VOID:
2264       new_kind = INTERNALVAR_VOID;
2265       break;
2266 
2267     case TYPE_CODE_INTERNAL_FUNCTION:
2268       gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2269       new_kind = INTERNALVAR_FUNCTION;
2270       get_internalvar_function (VALUE_INTERNALVAR (val),
2271 				&new_data.fn.function);
2272       /* Copies created here are never canonical.  */
2273       break;
2274 
2275     default:
2276       new_kind = INTERNALVAR_VALUE;
2277       new_data.value = value_copy (val);
2278       new_data.value->modifiable = 1;
2279 
2280       /* Force the value to be fetched from the target now, to avoid problems
2281 	 later when this internalvar is referenced and the target is gone or
2282 	 has changed.  */
2283       if (value_lazy (new_data.value))
2284        value_fetch_lazy (new_data.value);
2285 
2286       /* Release the value from the value chain to prevent it from being
2287 	 deleted by free_all_values.  From here on this function should not
2288 	 call error () until new_data is installed into the var->u to avoid
2289 	 leaking memory.  */
2290       release_value (new_data.value).release ();
2291 
2292       /* Internal variables which are created from values with a dynamic
2293          location don't need the location property of the origin anymore.
2294          The resolved dynamic location is used prior then any other address
2295          when accessing the value.
2296          If we keep it, we would still refer to the origin value.
2297          Remove the location property in case it exist.  */
2298       remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2299 
2300       break;
2301     }
2302 
2303   /* Clean up old contents.  */
2304   clear_internalvar (var);
2305 
2306   /* Switch over.  */
2307   var->kind = new_kind;
2308   var->u = new_data;
2309   /* End code which must not call error().  */
2310 }
2311 
2312 void
2313 set_internalvar_integer (struct internalvar *var, LONGEST l)
2314 {
2315   /* Clean up old contents.  */
2316   clear_internalvar (var);
2317 
2318   var->kind = INTERNALVAR_INTEGER;
2319   var->u.integer.type = NULL;
2320   var->u.integer.val = l;
2321 }
2322 
2323 void
2324 set_internalvar_string (struct internalvar *var, const char *string)
2325 {
2326   /* Clean up old contents.  */
2327   clear_internalvar (var);
2328 
2329   var->kind = INTERNALVAR_STRING;
2330   var->u.string = xstrdup (string);
2331 }
2332 
2333 static void
2334 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2335 {
2336   /* Clean up old contents.  */
2337   clear_internalvar (var);
2338 
2339   var->kind = INTERNALVAR_FUNCTION;
2340   var->u.fn.function = f;
2341   var->u.fn.canonical = 1;
2342   /* Variables installed here are always the canonical version.  */
2343 }
2344 
2345 void
2346 clear_internalvar (struct internalvar *var)
2347 {
2348   /* Clean up old contents.  */
2349   switch (var->kind)
2350     {
2351     case INTERNALVAR_VALUE:
2352       value_decref (var->u.value);
2353       break;
2354 
2355     case INTERNALVAR_STRING:
2356       xfree (var->u.string);
2357       break;
2358 
2359     case INTERNALVAR_MAKE_VALUE:
2360       if (var->u.make_value.functions->destroy != NULL)
2361 	var->u.make_value.functions->destroy (var->u.make_value.data);
2362       break;
2363 
2364     default:
2365       break;
2366     }
2367 
2368   /* Reset to void kind.  */
2369   var->kind = INTERNALVAR_VOID;
2370 }
2371 
2372 char *
2373 internalvar_name (const struct internalvar *var)
2374 {
2375   return var->name;
2376 }
2377 
2378 static struct internal_function *
2379 create_internal_function (const char *name,
2380 			  internal_function_fn handler, void *cookie)
2381 {
2382   struct internal_function *ifn = XNEW (struct internal_function);
2383 
2384   ifn->name = xstrdup (name);
2385   ifn->handler = handler;
2386   ifn->cookie = cookie;
2387   return ifn;
2388 }
2389 
2390 char *
2391 value_internal_function_name (struct value *val)
2392 {
2393   struct internal_function *ifn;
2394   int result;
2395 
2396   gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2397   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2398   gdb_assert (result);
2399 
2400   return ifn->name;
2401 }
2402 
2403 struct value *
2404 call_internal_function (struct gdbarch *gdbarch,
2405 			const struct language_defn *language,
2406 			struct value *func, int argc, struct value **argv)
2407 {
2408   struct internal_function *ifn;
2409   int result;
2410 
2411   gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2412   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2413   gdb_assert (result);
2414 
2415   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2416 }
2417 
2418 /* The 'function' command.  This does nothing -- it is just a
2419    placeholder to let "help function NAME" work.  This is also used as
2420    the implementation of the sub-command that is created when
2421    registering an internal function.  */
2422 static void
2423 function_command (const char *command, int from_tty)
2424 {
2425   /* Do nothing.  */
2426 }
2427 
2428 /* Clean up if an internal function's command is destroyed.  */
2429 static void
2430 function_destroyer (struct cmd_list_element *self, void *ignore)
2431 {
2432   xfree ((char *) self->name);
2433   xfree ((char *) self->doc);
2434 }
2435 
2436 /* Add a new internal function.  NAME is the name of the function; DOC
2437    is a documentation string describing the function.  HANDLER is
2438    called when the function is invoked.  COOKIE is an arbitrary
2439    pointer which is passed to HANDLER and is intended for "user
2440    data".  */
2441 void
2442 add_internal_function (const char *name, const char *doc,
2443 		       internal_function_fn handler, void *cookie)
2444 {
2445   struct cmd_list_element *cmd;
2446   struct internal_function *ifn;
2447   struct internalvar *var = lookup_internalvar (name);
2448 
2449   ifn = create_internal_function (name, handler, cookie);
2450   set_internalvar_function (var, ifn);
2451 
2452   cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2453 		 &functionlist);
2454   cmd->destroyer = function_destroyer;
2455 }
2456 
2457 /* Update VALUE before discarding OBJFILE.  COPIED_TYPES is used to
2458    prevent cycles / duplicates.  */
2459 
2460 void
2461 preserve_one_value (struct value *value, struct objfile *objfile,
2462 		    htab_t copied_types)
2463 {
2464   if (TYPE_OBJFILE (value->type) == objfile)
2465     value->type = copy_type_recursive (objfile, value->type, copied_types);
2466 
2467   if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2468     value->enclosing_type = copy_type_recursive (objfile,
2469 						 value->enclosing_type,
2470 						 copied_types);
2471 }
2472 
2473 /* Likewise for internal variable VAR.  */
2474 
2475 static void
2476 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2477 			  htab_t copied_types)
2478 {
2479   switch (var->kind)
2480     {
2481     case INTERNALVAR_INTEGER:
2482       if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2483 	var->u.integer.type
2484 	  = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2485       break;
2486 
2487     case INTERNALVAR_VALUE:
2488       preserve_one_value (var->u.value, objfile, copied_types);
2489       break;
2490     }
2491 }
2492 
2493 /* Update the internal variables and value history when OBJFILE is
2494    discarded; we must copy the types out of the objfile.  New global types
2495    will be created for every convenience variable which currently points to
2496    this objfile's types, and the convenience variables will be adjusted to
2497    use the new global types.  */
2498 
2499 void
2500 preserve_values (struct objfile *objfile)
2501 {
2502   htab_t copied_types;
2503   struct internalvar *var;
2504 
2505   /* Create the hash table.  We allocate on the objfile's obstack, since
2506      it is soon to be deleted.  */
2507   copied_types = create_copied_types_hash (objfile);
2508 
2509   for (const value_ref_ptr &item : value_history)
2510     preserve_one_value (item.get (), objfile, copied_types);
2511 
2512   for (var = internalvars; var; var = var->next)
2513     preserve_one_internalvar (var, objfile, copied_types);
2514 
2515   preserve_ext_lang_values (objfile, copied_types);
2516 
2517   htab_delete (copied_types);
2518 }
2519 
2520 static void
2521 show_convenience (const char *ignore, int from_tty)
2522 {
2523   struct gdbarch *gdbarch = get_current_arch ();
2524   struct internalvar *var;
2525   int varseen = 0;
2526   struct value_print_options opts;
2527 
2528   get_user_print_options (&opts);
2529   for (var = internalvars; var; var = var->next)
2530     {
2531 
2532       if (!varseen)
2533 	{
2534 	  varseen = 1;
2535 	}
2536       printf_filtered (("$%s = "), var->name);
2537 
2538       TRY
2539 	{
2540 	  struct value *val;
2541 
2542 	  val = value_of_internalvar (gdbarch, var);
2543 	  value_print (val, gdb_stdout, &opts);
2544 	}
2545       CATCH (ex, RETURN_MASK_ERROR)
2546 	{
2547 	  fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2548 	}
2549       END_CATCH
2550 
2551       printf_filtered (("\n"));
2552     }
2553   if (!varseen)
2554     {
2555       /* This text does not mention convenience functions on purpose.
2556 	 The user can't create them except via Python, and if Python support
2557 	 is installed this message will never be printed ($_streq will
2558 	 exist).  */
2559       printf_unfiltered (_("No debugger convenience variables now defined.\n"
2560 			   "Convenience variables have "
2561 			   "names starting with \"$\";\n"
2562 			   "use \"set\" as in \"set "
2563 			   "$foo = 5\" to define them.\n"));
2564     }
2565 }
2566 
2567 
2568 /* See value.h.  */
2569 
2570 struct value *
2571 value_from_xmethod (xmethod_worker_up &&worker)
2572 {
2573   struct value *v;
2574 
2575   v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2576   v->lval = lval_xcallable;
2577   v->location.xm_worker = worker.release ();
2578   v->modifiable = 0;
2579 
2580   return v;
2581 }
2582 
2583 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD.  */
2584 
2585 struct type *
2586 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2587 {
2588   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2589 	      && method->lval == lval_xcallable && !argv.empty ());
2590 
2591   return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2592 }
2593 
2594 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD.  */
2595 
2596 struct value *
2597 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2598 {
2599   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2600 	      && method->lval == lval_xcallable && !argv.empty ());
2601 
2602   return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2603 }
2604 
2605 /* Extract a value as a C number (either long or double).
2606    Knows how to convert fixed values to double, or
2607    floating values to long.
2608    Does not deallocate the value.  */
2609 
2610 LONGEST
2611 value_as_long (struct value *val)
2612 {
2613   /* This coerces arrays and functions, which is necessary (e.g.
2614      in disassemble_command).  It also dereferences references, which
2615      I suspect is the most logical thing to do.  */
2616   val = coerce_array (val);
2617   return unpack_long (value_type (val), value_contents (val));
2618 }
2619 
2620 /* Extract a value as a C pointer.  Does not deallocate the value.
2621    Note that val's type may not actually be a pointer; value_as_long
2622    handles all the cases.  */
2623 CORE_ADDR
2624 value_as_address (struct value *val)
2625 {
2626   struct gdbarch *gdbarch = get_type_arch (value_type (val));
2627 
2628   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2629      whether we want this to be true eventually.  */
2630 #if 0
2631   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2632      non-address (e.g. argument to "signal", "info break", etc.), or
2633      for pointers to char, in which the low bits *are* significant.  */
2634   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2635 #else
2636 
2637   /* There are several targets (IA-64, PowerPC, and others) which
2638      don't represent pointers to functions as simply the address of
2639      the function's entry point.  For example, on the IA-64, a
2640      function pointer points to a two-word descriptor, generated by
2641      the linker, which contains the function's entry point, and the
2642      value the IA-64 "global pointer" register should have --- to
2643      support position-independent code.  The linker generates
2644      descriptors only for those functions whose addresses are taken.
2645 
2646      On such targets, it's difficult for GDB to convert an arbitrary
2647      function address into a function pointer; it has to either find
2648      an existing descriptor for that function, or call malloc and
2649      build its own.  On some targets, it is impossible for GDB to
2650      build a descriptor at all: the descriptor must contain a jump
2651      instruction; data memory cannot be executed; and code memory
2652      cannot be modified.
2653 
2654      Upon entry to this function, if VAL is a value of type `function'
2655      (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2656      value_address (val) is the address of the function.  This is what
2657      you'll get if you evaluate an expression like `main'.  The call
2658      to COERCE_ARRAY below actually does all the usual unary
2659      conversions, which includes converting values of type `function'
2660      to `pointer to function'.  This is the challenging conversion
2661      discussed above.  Then, `unpack_long' will convert that pointer
2662      back into an address.
2663 
2664      So, suppose the user types `disassemble foo' on an architecture
2665      with a strange function pointer representation, on which GDB
2666      cannot build its own descriptors, and suppose further that `foo'
2667      has no linker-built descriptor.  The address->pointer conversion
2668      will signal an error and prevent the command from running, even
2669      though the next step would have been to convert the pointer
2670      directly back into the same address.
2671 
2672      The following shortcut avoids this whole mess.  If VAL is a
2673      function, just return its address directly.  */
2674   if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2675       || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2676     return value_address (val);
2677 
2678   val = coerce_array (val);
2679 
2680   /* Some architectures (e.g. Harvard), map instruction and data
2681      addresses onto a single large unified address space.  For
2682      instance: An architecture may consider a large integer in the
2683      range 0x10000000 .. 0x1000ffff to already represent a data
2684      addresses (hence not need a pointer to address conversion) while
2685      a small integer would still need to be converted integer to
2686      pointer to address.  Just assume such architectures handle all
2687      integer conversions in a single function.  */
2688 
2689   /* JimB writes:
2690 
2691      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2692      must admonish GDB hackers to make sure its behavior matches the
2693      compiler's, whenever possible.
2694 
2695      In general, I think GDB should evaluate expressions the same way
2696      the compiler does.  When the user copies an expression out of
2697      their source code and hands it to a `print' command, they should
2698      get the same value the compiler would have computed.  Any
2699      deviation from this rule can cause major confusion and annoyance,
2700      and needs to be justified carefully.  In other words, GDB doesn't
2701      really have the freedom to do these conversions in clever and
2702      useful ways.
2703 
2704      AndrewC pointed out that users aren't complaining about how GDB
2705      casts integers to pointers; they are complaining that they can't
2706      take an address from a disassembly listing and give it to `x/i'.
2707      This is certainly important.
2708 
2709      Adding an architecture method like integer_to_address() certainly
2710      makes it possible for GDB to "get it right" in all circumstances
2711      --- the target has complete control over how things get done, so
2712      people can Do The Right Thing for their target without breaking
2713      anyone else.  The standard doesn't specify how integers get
2714      converted to pointers; usually, the ABI doesn't either, but
2715      ABI-specific code is a more reasonable place to handle it.  */
2716 
2717   if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2718       && !TYPE_IS_REFERENCE (value_type (val))
2719       && gdbarch_integer_to_address_p (gdbarch))
2720     return gdbarch_integer_to_address (gdbarch, value_type (val),
2721 				       value_contents (val));
2722 
2723   return unpack_long (value_type (val), value_contents (val));
2724 #endif
2725 }
2726 
2727 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2728    as a long, or as a double, assuming the raw data is described
2729    by type TYPE.  Knows how to convert different sizes of values
2730    and can convert between fixed and floating point.  We don't assume
2731    any alignment for the raw data.  Return value is in host byte order.
2732 
2733    If you want functions and arrays to be coerced to pointers, and
2734    references to be dereferenced, call value_as_long() instead.
2735 
2736    C++: It is assumed that the front-end has taken care of
2737    all matters concerning pointers to members.  A pointer
2738    to member which reaches here is considered to be equivalent
2739    to an INT (or some size).  After all, it is only an offset.  */
2740 
2741 LONGEST
2742 unpack_long (struct type *type, const gdb_byte *valaddr)
2743 {
2744   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2745   enum type_code code = TYPE_CODE (type);
2746   int len = TYPE_LENGTH (type);
2747   int nosign = TYPE_UNSIGNED (type);
2748 
2749   switch (code)
2750     {
2751     case TYPE_CODE_TYPEDEF:
2752       return unpack_long (check_typedef (type), valaddr);
2753     case TYPE_CODE_ENUM:
2754     case TYPE_CODE_FLAGS:
2755     case TYPE_CODE_BOOL:
2756     case TYPE_CODE_INT:
2757     case TYPE_CODE_CHAR:
2758     case TYPE_CODE_RANGE:
2759     case TYPE_CODE_MEMBERPTR:
2760       if (nosign)
2761 	return extract_unsigned_integer (valaddr, len, byte_order);
2762       else
2763 	return extract_signed_integer (valaddr, len, byte_order);
2764 
2765     case TYPE_CODE_FLT:
2766     case TYPE_CODE_DECFLOAT:
2767       return target_float_to_longest (valaddr, type);
2768 
2769     case TYPE_CODE_PTR:
2770     case TYPE_CODE_REF:
2771     case TYPE_CODE_RVALUE_REF:
2772       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2773          whether we want this to be true eventually.  */
2774       return extract_typed_address (valaddr, type);
2775 
2776     default:
2777       error (_("Value can't be converted to integer."));
2778     }
2779 }
2780 
2781 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2782    as a CORE_ADDR, assuming the raw data is described by type TYPE.
2783    We don't assume any alignment for the raw data.  Return value is in
2784    host byte order.
2785 
2786    If you want functions and arrays to be coerced to pointers, and
2787    references to be dereferenced, call value_as_address() instead.
2788 
2789    C++: It is assumed that the front-end has taken care of
2790    all matters concerning pointers to members.  A pointer
2791    to member which reaches here is considered to be equivalent
2792    to an INT (or some size).  After all, it is only an offset.  */
2793 
2794 CORE_ADDR
2795 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2796 {
2797   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2798      whether we want this to be true eventually.  */
2799   return unpack_long (type, valaddr);
2800 }
2801 
2802 bool
2803 is_floating_value (struct value *val)
2804 {
2805   struct type *type = check_typedef (value_type (val));
2806 
2807   if (is_floating_type (type))
2808     {
2809       if (!target_float_is_valid (value_contents (val), type))
2810 	error (_("Invalid floating value found in program."));
2811       return true;
2812     }
2813 
2814   return false;
2815 }
2816 
2817 
2818 /* Get the value of the FIELDNO'th field (which must be static) of
2819    TYPE.  */
2820 
2821 struct value *
2822 value_static_field (struct type *type, int fieldno)
2823 {
2824   struct value *retval;
2825 
2826   switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2827     {
2828     case FIELD_LOC_KIND_PHYSADDR:
2829       retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2830 			      TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2831       break;
2832     case FIELD_LOC_KIND_PHYSNAME:
2833     {
2834       const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2835       /* TYPE_FIELD_NAME (type, fieldno); */
2836       struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2837 
2838       if (sym.symbol == NULL)
2839 	{
2840 	  /* With some compilers, e.g. HP aCC, static data members are
2841 	     reported as non-debuggable symbols.  */
2842 	  struct bound_minimal_symbol msym
2843 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
2844 	  struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2845 
2846 	  if (!msym.minsym)
2847 	    retval = allocate_optimized_out_value (field_type);
2848 	  else
2849 	    retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2850 	}
2851       else
2852 	retval = value_of_variable (sym.symbol, sym.block);
2853       break;
2854     }
2855     default:
2856       gdb_assert_not_reached ("unexpected field location kind");
2857     }
2858 
2859   return retval;
2860 }
2861 
2862 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2863    You have to be careful here, since the size of the data area for the value
2864    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
2865    than the old enclosing type, you have to allocate more space for the
2866    data.  */
2867 
2868 void
2869 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2870 {
2871   if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2872     {
2873       check_type_length_before_alloc (new_encl_type);
2874       val->contents
2875 	.reset ((gdb_byte *) xrealloc (val->contents.release (),
2876 				       TYPE_LENGTH (new_encl_type)));
2877     }
2878 
2879   val->enclosing_type = new_encl_type;
2880 }
2881 
2882 /* Given a value ARG1 (offset by OFFSET bytes)
2883    of a struct or union type ARG_TYPE,
2884    extract and return the value of one of its (non-static) fields.
2885    FIELDNO says which field.  */
2886 
2887 struct value *
2888 value_primitive_field (struct value *arg1, LONGEST offset,
2889 		       int fieldno, struct type *arg_type)
2890 {
2891   struct value *v;
2892   struct type *type;
2893   struct gdbarch *arch = get_value_arch (arg1);
2894   int unit_size = gdbarch_addressable_memory_unit_size (arch);
2895 
2896   arg_type = check_typedef (arg_type);
2897   type = TYPE_FIELD_TYPE (arg_type, fieldno);
2898 
2899   /* Call check_typedef on our type to make sure that, if TYPE
2900      is a TYPE_CODE_TYPEDEF, its length is set to the length
2901      of the target type instead of zero.  However, we do not
2902      replace the typedef type by the target type, because we want
2903      to keep the typedef in order to be able to print the type
2904      description correctly.  */
2905   check_typedef (type);
2906 
2907   if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2908     {
2909       /* Handle packed fields.
2910 
2911 	 Create a new value for the bitfield, with bitpos and bitsize
2912 	 set.  If possible, arrange offset and bitpos so that we can
2913 	 do a single aligned read of the size of the containing type.
2914 	 Otherwise, adjust offset to the byte containing the first
2915 	 bit.  Assume that the address, offset, and embedded offset
2916 	 are sufficiently aligned.  */
2917 
2918       LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2919       LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2920 
2921       v = allocate_value_lazy (type);
2922       v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2923       if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2924 	  && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2925 	v->bitpos = bitpos % container_bitsize;
2926       else
2927 	v->bitpos = bitpos % 8;
2928       v->offset = (value_embedded_offset (arg1)
2929 		   + offset
2930 		   + (bitpos - v->bitpos) / 8);
2931       set_value_parent (v, arg1);
2932       if (!value_lazy (arg1))
2933 	value_fetch_lazy (v);
2934     }
2935   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2936     {
2937       /* This field is actually a base subobject, so preserve the
2938 	 entire object's contents for later references to virtual
2939 	 bases, etc.  */
2940       LONGEST boffset;
2941 
2942       /* Lazy register values with offsets are not supported.  */
2943       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2944 	value_fetch_lazy (arg1);
2945 
2946       /* We special case virtual inheritance here because this
2947 	 requires access to the contents, which we would rather avoid
2948 	 for references to ordinary fields of unavailable values.  */
2949       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2950 	boffset = baseclass_offset (arg_type, fieldno,
2951 				    value_contents (arg1),
2952 				    value_embedded_offset (arg1),
2953 				    value_address (arg1),
2954 				    arg1);
2955       else
2956 	boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2957 
2958       if (value_lazy (arg1))
2959 	v = allocate_value_lazy (value_enclosing_type (arg1));
2960       else
2961 	{
2962 	  v = allocate_value (value_enclosing_type (arg1));
2963 	  value_contents_copy_raw (v, 0, arg1, 0,
2964 				   TYPE_LENGTH (value_enclosing_type (arg1)));
2965 	}
2966       v->type = type;
2967       v->offset = value_offset (arg1);
2968       v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2969     }
2970   else if (NULL != TYPE_DATA_LOCATION (type))
2971     {
2972       /* Field is a dynamic data member.  */
2973 
2974       gdb_assert (0 == offset);
2975       /* We expect an already resolved data location.  */
2976       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2977       /* For dynamic data types defer memory allocation
2978          until we actual access the value.  */
2979       v = allocate_value_lazy (type);
2980     }
2981   else
2982     {
2983       /* Plain old data member */
2984       offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
2985 	         / (HOST_CHAR_BIT * unit_size));
2986 
2987       /* Lazy register values with offsets are not supported.  */
2988       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2989 	value_fetch_lazy (arg1);
2990 
2991       if (value_lazy (arg1))
2992 	v = allocate_value_lazy (type);
2993       else
2994 	{
2995 	  v = allocate_value (type);
2996 	  value_contents_copy_raw (v, value_embedded_offset (v),
2997 				   arg1, value_embedded_offset (arg1) + offset,
2998 				   type_length_units (type));
2999 	}
3000       v->offset = (value_offset (arg1) + offset
3001 		   + value_embedded_offset (arg1));
3002     }
3003   set_value_component_location (v, arg1);
3004   return v;
3005 }
3006 
3007 /* Given a value ARG1 of a struct or union type,
3008    extract and return the value of one of its (non-static) fields.
3009    FIELDNO says which field.  */
3010 
3011 struct value *
3012 value_field (struct value *arg1, int fieldno)
3013 {
3014   return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3015 }
3016 
3017 /* Return a non-virtual function as a value.
3018    F is the list of member functions which contains the desired method.
3019    J is an index into F which provides the desired method.
3020 
3021    We only use the symbol for its address, so be happy with either a
3022    full symbol or a minimal symbol.  */
3023 
3024 struct value *
3025 value_fn_field (struct value **arg1p, struct fn_field *f,
3026 		int j, struct type *type,
3027 		LONGEST offset)
3028 {
3029   struct value *v;
3030   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3031   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3032   struct symbol *sym;
3033   struct bound_minimal_symbol msym;
3034 
3035   sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3036   if (sym != NULL)
3037     {
3038       memset (&msym, 0, sizeof (msym));
3039     }
3040   else
3041     {
3042       gdb_assert (sym == NULL);
3043       msym = lookup_bound_minimal_symbol (physname);
3044       if (msym.minsym == NULL)
3045 	return NULL;
3046     }
3047 
3048   v = allocate_value (ftype);
3049   VALUE_LVAL (v) = lval_memory;
3050   if (sym)
3051     {
3052       set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3053     }
3054   else
3055     {
3056       /* The minimal symbol might point to a function descriptor;
3057 	 resolve it to the actual code address instead.  */
3058       struct objfile *objfile = msym.objfile;
3059       struct gdbarch *gdbarch = get_objfile_arch (objfile);
3060 
3061       set_value_address (v,
3062 	gdbarch_convert_from_func_ptr_addr
3063 	   (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3064     }
3065 
3066   if (arg1p)
3067     {
3068       if (type != value_type (*arg1p))
3069 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3070 					value_addr (*arg1p)));
3071 
3072       /* Move the `this' pointer according to the offset.
3073          VALUE_OFFSET (*arg1p) += offset; */
3074     }
3075 
3076   return v;
3077 }
3078 
3079 
3080 
3081 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3082    VALADDR, and store the result in *RESULT.
3083    The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3084    BITSIZE is zero, then the length is taken from FIELD_TYPE.
3085 
3086    Extracting bits depends on endianness of the machine.  Compute the
3087    number of least significant bits to discard.  For big endian machines,
3088    we compute the total number of bits in the anonymous object, subtract
3089    off the bit count from the MSB of the object to the MSB of the
3090    bitfield, then the size of the bitfield, which leaves the LSB discard
3091    count.  For little endian machines, the discard count is simply the
3092    number of bits from the LSB of the anonymous object to the LSB of the
3093    bitfield.
3094 
3095    If the field is signed, we also do sign extension.  */
3096 
3097 static LONGEST
3098 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3099 		     LONGEST bitpos, LONGEST bitsize)
3100 {
3101   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3102   ULONGEST val;
3103   ULONGEST valmask;
3104   int lsbcount;
3105   LONGEST bytes_read;
3106   LONGEST read_offset;
3107 
3108   /* Read the minimum number of bytes required; there may not be
3109      enough bytes to read an entire ULONGEST.  */
3110   field_type = check_typedef (field_type);
3111   if (bitsize)
3112     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3113   else
3114     {
3115       bytes_read = TYPE_LENGTH (field_type);
3116       bitsize = 8 * bytes_read;
3117     }
3118 
3119   read_offset = bitpos / 8;
3120 
3121   val = extract_unsigned_integer (valaddr + read_offset,
3122 				  bytes_read, byte_order);
3123 
3124   /* Extract bits.  See comment above.  */
3125 
3126   if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3127     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3128   else
3129     lsbcount = (bitpos % 8);
3130   val >>= lsbcount;
3131 
3132   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3133      If the field is signed, and is negative, then sign extend.  */
3134 
3135   if (bitsize < 8 * (int) sizeof (val))
3136     {
3137       valmask = (((ULONGEST) 1) << bitsize) - 1;
3138       val &= valmask;
3139       if (!TYPE_UNSIGNED (field_type))
3140 	{
3141 	  if (val & (valmask ^ (valmask >> 1)))
3142 	    {
3143 	      val |= ~valmask;
3144 	    }
3145 	}
3146     }
3147 
3148   return val;
3149 }
3150 
3151 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3152    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3153    ORIGINAL_VALUE, which must not be NULL.  See
3154    unpack_value_bits_as_long for more details.  */
3155 
3156 int
3157 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3158 			    LONGEST embedded_offset, int fieldno,
3159 			    const struct value *val, LONGEST *result)
3160 {
3161   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3162   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3163   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3164   int bit_offset;
3165 
3166   gdb_assert (val != NULL);
3167 
3168   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3169   if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3170       || !value_bits_available (val, bit_offset, bitsize))
3171     return 0;
3172 
3173   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3174 				 bitpos, bitsize);
3175   return 1;
3176 }
3177 
3178 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3179    object at VALADDR.  See unpack_bits_as_long for more details.  */
3180 
3181 LONGEST
3182 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3183 {
3184   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3185   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3186   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3187 
3188   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3189 }
3190 
3191 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3192    VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3193    the contents in DEST_VAL, zero or sign extending if the type of
3194    DEST_VAL is wider than BITSIZE.  VALADDR points to the contents of
3195    VAL.  If the VAL's contents required to extract the bitfield from
3196    are unavailable/optimized out, DEST_VAL is correspondingly
3197    marked unavailable/optimized out.  */
3198 
3199 void
3200 unpack_value_bitfield (struct value *dest_val,
3201 		       LONGEST bitpos, LONGEST bitsize,
3202 		       const gdb_byte *valaddr, LONGEST embedded_offset,
3203 		       const struct value *val)
3204 {
3205   enum bfd_endian byte_order;
3206   int src_bit_offset;
3207   int dst_bit_offset;
3208   struct type *field_type = value_type (dest_val);
3209 
3210   byte_order = gdbarch_byte_order (get_type_arch (field_type));
3211 
3212   /* First, unpack and sign extend the bitfield as if it was wholly
3213      valid.  Optimized out/unavailable bits are read as zero, but
3214      that's OK, as they'll end up marked below.  If the VAL is
3215      wholly-invalid we may have skipped allocating its contents,
3216      though.  See allocate_optimized_out_value.  */
3217   if (valaddr != NULL)
3218     {
3219       LONGEST num;
3220 
3221       num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3222 				 bitpos, bitsize);
3223       store_signed_integer (value_contents_raw (dest_val),
3224 			    TYPE_LENGTH (field_type), byte_order, num);
3225     }
3226 
3227   /* Now copy the optimized out / unavailability ranges to the right
3228      bits.  */
3229   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3230   if (byte_order == BFD_ENDIAN_BIG)
3231     dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3232   else
3233     dst_bit_offset = 0;
3234   value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3235 			      val, src_bit_offset, bitsize);
3236 }
3237 
3238 /* Return a new value with type TYPE, which is FIELDNO field of the
3239    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3240    of VAL.  If the VAL's contents required to extract the bitfield
3241    from are unavailable/optimized out, the new value is
3242    correspondingly marked unavailable/optimized out.  */
3243 
3244 struct value *
3245 value_field_bitfield (struct type *type, int fieldno,
3246 		      const gdb_byte *valaddr,
3247 		      LONGEST embedded_offset, const struct value *val)
3248 {
3249   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3250   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3251   struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3252 
3253   unpack_value_bitfield (res_val, bitpos, bitsize,
3254 			 valaddr, embedded_offset, val);
3255 
3256   return res_val;
3257 }
3258 
3259 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3260    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3261    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3262    indicate which bits (in target bit order) comprise the bitfield.
3263    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3264    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3265 
3266 void
3267 modify_field (struct type *type, gdb_byte *addr,
3268 	      LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3269 {
3270   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3271   ULONGEST oword;
3272   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3273   LONGEST bytesize;
3274 
3275   /* Normalize BITPOS.  */
3276   addr += bitpos / 8;
3277   bitpos %= 8;
3278 
3279   /* If a negative fieldval fits in the field in question, chop
3280      off the sign extension bits.  */
3281   if ((~fieldval & ~(mask >> 1)) == 0)
3282     fieldval &= mask;
3283 
3284   /* Warn if value is too big to fit in the field in question.  */
3285   if (0 != (fieldval & ~mask))
3286     {
3287       /* FIXME: would like to include fieldval in the message, but
3288          we don't have a sprintf_longest.  */
3289       warning (_("Value does not fit in %s bits."), plongest (bitsize));
3290 
3291       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3292       fieldval &= mask;
3293     }
3294 
3295   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3296      false valgrind reports.  */
3297 
3298   bytesize = (bitpos + bitsize + 7) / 8;
3299   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3300 
3301   /* Shifting for bit field depends on endianness of the target machine.  */
3302   if (gdbarch_bits_big_endian (get_type_arch (type)))
3303     bitpos = bytesize * 8 - bitpos - bitsize;
3304 
3305   oword &= ~(mask << bitpos);
3306   oword |= fieldval << bitpos;
3307 
3308   store_unsigned_integer (addr, bytesize, byte_order, oword);
3309 }
3310 
3311 /* Pack NUM into BUF using a target format of TYPE.  */
3312 
3313 void
3314 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3315 {
3316   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3317   LONGEST len;
3318 
3319   type = check_typedef (type);
3320   len = TYPE_LENGTH (type);
3321 
3322   switch (TYPE_CODE (type))
3323     {
3324     case TYPE_CODE_INT:
3325     case TYPE_CODE_CHAR:
3326     case TYPE_CODE_ENUM:
3327     case TYPE_CODE_FLAGS:
3328     case TYPE_CODE_BOOL:
3329     case TYPE_CODE_RANGE:
3330     case TYPE_CODE_MEMBERPTR:
3331       store_signed_integer (buf, len, byte_order, num);
3332       break;
3333 
3334     case TYPE_CODE_REF:
3335     case TYPE_CODE_RVALUE_REF:
3336     case TYPE_CODE_PTR:
3337       store_typed_address (buf, type, (CORE_ADDR) num);
3338       break;
3339 
3340     case TYPE_CODE_FLT:
3341     case TYPE_CODE_DECFLOAT:
3342       target_float_from_longest (buf, type, num);
3343       break;
3344 
3345     default:
3346       error (_("Unexpected type (%d) encountered for integer constant."),
3347 	     TYPE_CODE (type));
3348     }
3349 }
3350 
3351 
3352 /* Pack NUM into BUF using a target format of TYPE.  */
3353 
3354 static void
3355 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3356 {
3357   LONGEST len;
3358   enum bfd_endian byte_order;
3359 
3360   type = check_typedef (type);
3361   len = TYPE_LENGTH (type);
3362   byte_order = gdbarch_byte_order (get_type_arch (type));
3363 
3364   switch (TYPE_CODE (type))
3365     {
3366     case TYPE_CODE_INT:
3367     case TYPE_CODE_CHAR:
3368     case TYPE_CODE_ENUM:
3369     case TYPE_CODE_FLAGS:
3370     case TYPE_CODE_BOOL:
3371     case TYPE_CODE_RANGE:
3372     case TYPE_CODE_MEMBERPTR:
3373       store_unsigned_integer (buf, len, byte_order, num);
3374       break;
3375 
3376     case TYPE_CODE_REF:
3377     case TYPE_CODE_RVALUE_REF:
3378     case TYPE_CODE_PTR:
3379       store_typed_address (buf, type, (CORE_ADDR) num);
3380       break;
3381 
3382     case TYPE_CODE_FLT:
3383     case TYPE_CODE_DECFLOAT:
3384       target_float_from_ulongest (buf, type, num);
3385       break;
3386 
3387     default:
3388       error (_("Unexpected type (%d) encountered "
3389 	       "for unsigned integer constant."),
3390 	     TYPE_CODE (type));
3391     }
3392 }
3393 
3394 
3395 /* Convert C numbers into newly allocated values.  */
3396 
3397 struct value *
3398 value_from_longest (struct type *type, LONGEST num)
3399 {
3400   struct value *val = allocate_value (type);
3401 
3402   pack_long (value_contents_raw (val), type, num);
3403   return val;
3404 }
3405 
3406 
3407 /* Convert C unsigned numbers into newly allocated values.  */
3408 
3409 struct value *
3410 value_from_ulongest (struct type *type, ULONGEST num)
3411 {
3412   struct value *val = allocate_value (type);
3413 
3414   pack_unsigned_long (value_contents_raw (val), type, num);
3415 
3416   return val;
3417 }
3418 
3419 
3420 /* Create a value representing a pointer of type TYPE to the address
3421    ADDR.  */
3422 
3423 struct value *
3424 value_from_pointer (struct type *type, CORE_ADDR addr)
3425 {
3426   struct value *val = allocate_value (type);
3427 
3428   store_typed_address (value_contents_raw (val),
3429 		       check_typedef (type), addr);
3430   return val;
3431 }
3432 
3433 
3434 /* Create a value of type TYPE whose contents come from VALADDR, if it
3435    is non-null, and whose memory address (in the inferior) is
3436    ADDRESS.  The type of the created value may differ from the passed
3437    type TYPE.  Make sure to retrieve values new type after this call.
3438    Note that TYPE is not passed through resolve_dynamic_type; this is
3439    a special API intended for use only by Ada.  */
3440 
3441 struct value *
3442 value_from_contents_and_address_unresolved (struct type *type,
3443 					    const gdb_byte *valaddr,
3444 					    CORE_ADDR address)
3445 {
3446   struct value *v;
3447 
3448   if (valaddr == NULL)
3449     v = allocate_value_lazy (type);
3450   else
3451     v = value_from_contents (type, valaddr);
3452   VALUE_LVAL (v) = lval_memory;
3453   set_value_address (v, address);
3454   return v;
3455 }
3456 
3457 /* Create a value of type TYPE whose contents come from VALADDR, if it
3458    is non-null, and whose memory address (in the inferior) is
3459    ADDRESS.  The type of the created value may differ from the passed
3460    type TYPE.  Make sure to retrieve values new type after this call.  */
3461 
3462 struct value *
3463 value_from_contents_and_address (struct type *type,
3464 				 const gdb_byte *valaddr,
3465 				 CORE_ADDR address)
3466 {
3467   struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3468   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3469   struct value *v;
3470 
3471   if (valaddr == NULL)
3472     v = allocate_value_lazy (resolved_type);
3473   else
3474     v = value_from_contents (resolved_type, valaddr);
3475   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3476       && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3477     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3478   VALUE_LVAL (v) = lval_memory;
3479   set_value_address (v, address);
3480   return v;
3481 }
3482 
3483 /* Create a value of type TYPE holding the contents CONTENTS.
3484    The new value is `not_lval'.  */
3485 
3486 struct value *
3487 value_from_contents (struct type *type, const gdb_byte *contents)
3488 {
3489   struct value *result;
3490 
3491   result = allocate_value (type);
3492   memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3493   return result;
3494 }
3495 
3496 /* Extract a value from the history file.  Input will be of the form
3497    $digits or $$digits.  See block comment above 'write_dollar_variable'
3498    for details.  */
3499 
3500 struct value *
3501 value_from_history_ref (const char *h, const char **endp)
3502 {
3503   int index, len;
3504 
3505   if (h[0] == '$')
3506     len = 1;
3507   else
3508     return NULL;
3509 
3510   if (h[1] == '$')
3511     len = 2;
3512 
3513   /* Find length of numeral string.  */
3514   for (; isdigit (h[len]); len++)
3515     ;
3516 
3517   /* Make sure numeral string is not part of an identifier.  */
3518   if (h[len] == '_' || isalpha (h[len]))
3519     return NULL;
3520 
3521   /* Now collect the index value.  */
3522   if (h[1] == '$')
3523     {
3524       if (len == 2)
3525 	{
3526 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3527 	     rather than to "$$0" as it ought to be!  */
3528 	  index = -1;
3529 	  *endp += len;
3530 	}
3531       else
3532 	{
3533 	  char *local_end;
3534 
3535 	  index = -strtol (&h[2], &local_end, 10);
3536 	  *endp = local_end;
3537 	}
3538     }
3539   else
3540     {
3541       if (len == 1)
3542 	{
3543 	  /* "$" is equivalent to "$0".  */
3544 	  index = 0;
3545 	  *endp += len;
3546 	}
3547       else
3548 	{
3549 	  char *local_end;
3550 
3551 	  index = strtol (&h[1], &local_end, 10);
3552 	  *endp = local_end;
3553 	}
3554     }
3555 
3556   return access_value_history (index);
3557 }
3558 
3559 /* Get the component value (offset by OFFSET bytes) of a struct or
3560    union WHOLE.  Component's type is TYPE.  */
3561 
3562 struct value *
3563 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3564 {
3565   struct value *v;
3566 
3567   if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3568     v = allocate_value_lazy (type);
3569   else
3570     {
3571       v = allocate_value (type);
3572       value_contents_copy (v, value_embedded_offset (v),
3573 			   whole, value_embedded_offset (whole) + offset,
3574 			   type_length_units (type));
3575     }
3576   v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3577   set_value_component_location (v, whole);
3578 
3579   return v;
3580 }
3581 
3582 struct value *
3583 coerce_ref_if_computed (const struct value *arg)
3584 {
3585   const struct lval_funcs *funcs;
3586 
3587   if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3588     return NULL;
3589 
3590   if (value_lval_const (arg) != lval_computed)
3591     return NULL;
3592 
3593   funcs = value_computed_funcs (arg);
3594   if (funcs->coerce_ref == NULL)
3595     return NULL;
3596 
3597   return funcs->coerce_ref (arg);
3598 }
3599 
3600 /* Look at value.h for description.  */
3601 
3602 struct value *
3603 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3604 			      const struct type *original_type,
3605 			      const struct value *original_value)
3606 {
3607   /* Re-adjust type.  */
3608   deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3609 
3610   /* Add embedding info.  */
3611   set_value_enclosing_type (value, enc_type);
3612   set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3613 
3614   /* We may be pointing to an object of some derived type.  */
3615   return value_full_object (value, NULL, 0, 0, 0);
3616 }
3617 
3618 struct value *
3619 coerce_ref (struct value *arg)
3620 {
3621   struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3622   struct value *retval;
3623   struct type *enc_type;
3624 
3625   retval = coerce_ref_if_computed (arg);
3626   if (retval)
3627     return retval;
3628 
3629   if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3630     return arg;
3631 
3632   enc_type = check_typedef (value_enclosing_type (arg));
3633   enc_type = TYPE_TARGET_TYPE (enc_type);
3634 
3635   retval = value_at_lazy (enc_type,
3636                           unpack_pointer (value_type (arg),
3637                                           value_contents (arg)));
3638   enc_type = value_type (retval);
3639   return readjust_indirect_value_type (retval, enc_type,
3640                                        value_type_arg_tmp, arg);
3641 }
3642 
3643 struct value *
3644 coerce_array (struct value *arg)
3645 {
3646   struct type *type;
3647 
3648   arg = coerce_ref (arg);
3649   type = check_typedef (value_type (arg));
3650 
3651   switch (TYPE_CODE (type))
3652     {
3653     case TYPE_CODE_ARRAY:
3654       if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3655 	arg = value_coerce_array (arg);
3656       break;
3657     case TYPE_CODE_FUNC:
3658       arg = value_coerce_function (arg);
3659       break;
3660     }
3661   return arg;
3662 }
3663 
3664 
3665 /* Return the return value convention that will be used for the
3666    specified type.  */
3667 
3668 enum return_value_convention
3669 struct_return_convention (struct gdbarch *gdbarch,
3670 			  struct value *function, struct type *value_type)
3671 {
3672   enum type_code code = TYPE_CODE (value_type);
3673 
3674   if (code == TYPE_CODE_ERROR)
3675     error (_("Function return type unknown."));
3676 
3677   /* Probe the architecture for the return-value convention.  */
3678   return gdbarch_return_value (gdbarch, function, value_type,
3679 			       NULL, NULL, NULL);
3680 }
3681 
3682 /* Return true if the function returning the specified type is using
3683    the convention of returning structures in memory (passing in the
3684    address as a hidden first parameter).  */
3685 
3686 int
3687 using_struct_return (struct gdbarch *gdbarch,
3688 		     struct value *function, struct type *value_type)
3689 {
3690   if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3691     /* A void return value is never in memory.  See also corresponding
3692        code in "print_return_value".  */
3693     return 0;
3694 
3695   return (struct_return_convention (gdbarch, function, value_type)
3696 	  != RETURN_VALUE_REGISTER_CONVENTION);
3697 }
3698 
3699 /* Set the initialized field in a value struct.  */
3700 
3701 void
3702 set_value_initialized (struct value *val, int status)
3703 {
3704   val->initialized = status;
3705 }
3706 
3707 /* Return the initialized field in a value struct.  */
3708 
3709 int
3710 value_initialized (const struct value *val)
3711 {
3712   return val->initialized;
3713 }
3714 
3715 /* Helper for value_fetch_lazy when the value is a bitfield.  */
3716 
3717 static void
3718 value_fetch_lazy_bitfield (struct value *val)
3719 {
3720   gdb_assert (value_bitsize (val) != 0);
3721 
3722   /* To read a lazy bitfield, read the entire enclosing value.  This
3723      prevents reading the same block of (possibly volatile) memory once
3724      per bitfield.  It would be even better to read only the containing
3725      word, but we have no way to record that just specific bits of a
3726      value have been fetched.  */
3727   struct value *parent = value_parent (val);
3728 
3729   if (value_lazy (parent))
3730     value_fetch_lazy (parent);
3731 
3732   unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3733 			 value_contents_for_printing (parent),
3734 			 value_offset (val), parent);
3735 }
3736 
3737 /* Helper for value_fetch_lazy when the value is in memory.  */
3738 
3739 static void
3740 value_fetch_lazy_memory (struct value *val)
3741 {
3742   gdb_assert (VALUE_LVAL (val) == lval_memory);
3743 
3744   CORE_ADDR addr = value_address (val);
3745   struct type *type = check_typedef (value_enclosing_type (val));
3746 
3747   if (TYPE_LENGTH (type))
3748       read_value_memory (val, 0, value_stack (val),
3749 			 addr, value_contents_all_raw (val),
3750 			 type_length_units (type));
3751 }
3752 
3753 /* Helper for value_fetch_lazy when the value is in a register.  */
3754 
3755 static void
3756 value_fetch_lazy_register (struct value *val)
3757 {
3758   struct frame_info *next_frame;
3759   int regnum;
3760   struct type *type = check_typedef (value_type (val));
3761   struct value *new_val = val, *mark = value_mark ();
3762 
3763   /* Offsets are not supported here; lazy register values must
3764      refer to the entire register.  */
3765   gdb_assert (value_offset (val) == 0);
3766 
3767   while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3768     {
3769       struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3770 
3771       next_frame = frame_find_by_id (next_frame_id);
3772       regnum = VALUE_REGNUM (new_val);
3773 
3774       gdb_assert (next_frame != NULL);
3775 
3776       /* Convertible register routines are used for multi-register
3777 	 values and for interpretation in different types
3778 	 (e.g. float or int from a double register).  Lazy
3779 	 register values should have the register's natural type,
3780 	 so they do not apply.  */
3781       gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3782 					       regnum, type));
3783 
3784       /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3785 	 Since a "->next" operation was performed when setting
3786 	 this field, we do not need to perform a "next" operation
3787 	 again when unwinding the register.  That's why
3788 	 frame_unwind_register_value() is called here instead of
3789 	 get_frame_register_value().  */
3790       new_val = frame_unwind_register_value (next_frame, regnum);
3791 
3792       /* If we get another lazy lval_register value, it means the
3793 	 register is found by reading it from NEXT_FRAME's next frame.
3794 	 frame_unwind_register_value should never return a value with
3795 	 the frame id pointing to NEXT_FRAME.  If it does, it means we
3796 	 either have two consecutive frames with the same frame id
3797 	 in the frame chain, or some code is trying to unwind
3798 	 behind get_prev_frame's back (e.g., a frame unwind
3799 	 sniffer trying to unwind), bypassing its validations.  In
3800 	 any case, it should always be an internal error to end up
3801 	 in this situation.  */
3802       if (VALUE_LVAL (new_val) == lval_register
3803 	  && value_lazy (new_val)
3804 	  && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3805 	internal_error (__FILE__, __LINE__,
3806 			_("infinite loop while fetching a register"));
3807     }
3808 
3809   /* If it's still lazy (for instance, a saved register on the
3810      stack), fetch it.  */
3811   if (value_lazy (new_val))
3812     value_fetch_lazy (new_val);
3813 
3814   /* Copy the contents and the unavailability/optimized-out
3815      meta-data from NEW_VAL to VAL.  */
3816   set_value_lazy (val, 0);
3817   value_contents_copy (val, value_embedded_offset (val),
3818 		       new_val, value_embedded_offset (new_val),
3819 		       type_length_units (type));
3820 
3821   if (frame_debug)
3822     {
3823       struct gdbarch *gdbarch;
3824       struct frame_info *frame;
3825       /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3826 	 so that the frame level will be shown correctly.  */
3827       frame = frame_find_by_id (VALUE_FRAME_ID (val));
3828       regnum = VALUE_REGNUM (val);
3829       gdbarch = get_frame_arch (frame);
3830 
3831       fprintf_unfiltered (gdb_stdlog,
3832 			  "{ value_fetch_lazy "
3833 			  "(frame=%d,regnum=%d(%s),...) ",
3834 			  frame_relative_level (frame), regnum,
3835 			  user_reg_map_regnum_to_name (gdbarch, regnum));
3836 
3837       fprintf_unfiltered (gdb_stdlog, "->");
3838       if (value_optimized_out (new_val))
3839 	{
3840 	  fprintf_unfiltered (gdb_stdlog, " ");
3841 	  val_print_optimized_out (new_val, gdb_stdlog);
3842 	}
3843       else
3844 	{
3845 	  int i;
3846 	  const gdb_byte *buf = value_contents (new_val);
3847 
3848 	  if (VALUE_LVAL (new_val) == lval_register)
3849 	    fprintf_unfiltered (gdb_stdlog, " register=%d",
3850 				VALUE_REGNUM (new_val));
3851 	  else if (VALUE_LVAL (new_val) == lval_memory)
3852 	    fprintf_unfiltered (gdb_stdlog, " address=%s",
3853 				paddress (gdbarch,
3854 					  value_address (new_val)));
3855 	  else
3856 	    fprintf_unfiltered (gdb_stdlog, " computed");
3857 
3858 	  fprintf_unfiltered (gdb_stdlog, " bytes=");
3859 	  fprintf_unfiltered (gdb_stdlog, "[");
3860 	  for (i = 0; i < register_size (gdbarch, regnum); i++)
3861 	    fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3862 	  fprintf_unfiltered (gdb_stdlog, "]");
3863 	}
3864 
3865       fprintf_unfiltered (gdb_stdlog, " }\n");
3866     }
3867 
3868   /* Dispose of the intermediate values.  This prevents
3869      watchpoints from trying to watch the saved frame pointer.  */
3870   value_free_to_mark (mark);
3871 }
3872 
3873 /* Load the actual content of a lazy value.  Fetch the data from the
3874    user's process and clear the lazy flag to indicate that the data in
3875    the buffer is valid.
3876 
3877    If the value is zero-length, we avoid calling read_memory, which
3878    would abort.  We mark the value as fetched anyway -- all 0 bytes of
3879    it.  */
3880 
3881 void
3882 value_fetch_lazy (struct value *val)
3883 {
3884   gdb_assert (value_lazy (val));
3885   allocate_value_contents (val);
3886   /* A value is either lazy, or fully fetched.  The
3887      availability/validity is only established as we try to fetch a
3888      value.  */
3889   gdb_assert (val->optimized_out.empty ());
3890   gdb_assert (val->unavailable.empty ());
3891   if (value_bitsize (val))
3892     value_fetch_lazy_bitfield (val);
3893   else if (VALUE_LVAL (val) == lval_memory)
3894     value_fetch_lazy_memory (val);
3895   else if (VALUE_LVAL (val) == lval_register)
3896     value_fetch_lazy_register (val);
3897   else if (VALUE_LVAL (val) == lval_computed
3898 	   && value_computed_funcs (val)->read != NULL)
3899     value_computed_funcs (val)->read (val);
3900   else
3901     internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3902 
3903   set_value_lazy (val, 0);
3904 }
3905 
3906 /* Implementation of the convenience function $_isvoid.  */
3907 
3908 static struct value *
3909 isvoid_internal_fn (struct gdbarch *gdbarch,
3910 		    const struct language_defn *language,
3911 		    void *cookie, int argc, struct value **argv)
3912 {
3913   int ret;
3914 
3915   if (argc != 1)
3916     error (_("You must provide one argument for $_isvoid."));
3917 
3918   ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3919 
3920   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3921 }
3922 
3923 #if GDB_SELF_TEST
3924 namespace selftests
3925 {
3926 
3927 /* Test the ranges_contain function.  */
3928 
3929 static void
3930 test_ranges_contain ()
3931 {
3932   std::vector<range> ranges;
3933   range r;
3934 
3935   /* [10, 14] */
3936   r.offset = 10;
3937   r.length = 5;
3938   ranges.push_back (r);
3939 
3940   /* [20, 24] */
3941   r.offset = 20;
3942   r.length = 5;
3943   ranges.push_back (r);
3944 
3945   /* [2, 6] */
3946   SELF_CHECK (!ranges_contain (ranges, 2, 5));
3947   /* [9, 13] */
3948   SELF_CHECK (ranges_contain (ranges, 9, 5));
3949   /* [10, 11] */
3950   SELF_CHECK (ranges_contain (ranges, 10, 2));
3951   /* [10, 14] */
3952   SELF_CHECK (ranges_contain (ranges, 10, 5));
3953   /* [13, 18] */
3954   SELF_CHECK (ranges_contain (ranges, 13, 6));
3955   /* [14, 18] */
3956   SELF_CHECK (ranges_contain (ranges, 14, 5));
3957   /* [15, 18] */
3958   SELF_CHECK (!ranges_contain (ranges, 15, 4));
3959   /* [16, 19] */
3960   SELF_CHECK (!ranges_contain (ranges, 16, 4));
3961   /* [16, 21] */
3962   SELF_CHECK (ranges_contain (ranges, 16, 6));
3963   /* [21, 21] */
3964   SELF_CHECK (ranges_contain (ranges, 21, 1));
3965   /* [21, 25] */
3966   SELF_CHECK (ranges_contain (ranges, 21, 5));
3967   /* [26, 28] */
3968   SELF_CHECK (!ranges_contain (ranges, 26, 3));
3969 }
3970 
3971 /* Check that RANGES contains the same ranges as EXPECTED.  */
3972 
3973 static bool
3974 check_ranges_vector (gdb::array_view<const range> ranges,
3975 		     gdb::array_view<const range> expected)
3976 {
3977   return ranges == expected;
3978 }
3979 
3980 /* Test the insert_into_bit_range_vector function.  */
3981 
3982 static void
3983 test_insert_into_bit_range_vector ()
3984 {
3985   std::vector<range> ranges;
3986 
3987   /* [10, 14] */
3988   {
3989     insert_into_bit_range_vector (&ranges, 10, 5);
3990     static const range expected[] = {
3991       {10, 5}
3992     };
3993     SELF_CHECK (check_ranges_vector (ranges, expected));
3994   }
3995 
3996   /* [10, 14] */
3997   {
3998     insert_into_bit_range_vector (&ranges, 11, 4);
3999     static const range expected = {10, 5};
4000     SELF_CHECK (check_ranges_vector (ranges, expected));
4001   }
4002 
4003   /* [10, 14] [20, 24] */
4004   {
4005     insert_into_bit_range_vector (&ranges, 20, 5);
4006     static const range expected[] = {
4007       {10, 5},
4008       {20, 5},
4009     };
4010     SELF_CHECK (check_ranges_vector (ranges, expected));
4011   }
4012 
4013   /* [10, 14] [17, 24] */
4014   {
4015     insert_into_bit_range_vector (&ranges, 17, 5);
4016     static const range expected[] = {
4017       {10, 5},
4018       {17, 8},
4019     };
4020     SELF_CHECK (check_ranges_vector (ranges, expected));
4021   }
4022 
4023   /* [2, 8] [10, 14] [17, 24] */
4024   {
4025     insert_into_bit_range_vector (&ranges, 2, 7);
4026     static const range expected[] = {
4027       {2, 7},
4028       {10, 5},
4029       {17, 8},
4030     };
4031     SELF_CHECK (check_ranges_vector (ranges, expected));
4032   }
4033 
4034   /* [2, 14] [17, 24] */
4035   {
4036     insert_into_bit_range_vector (&ranges, 9, 1);
4037     static const range expected[] = {
4038       {2, 13},
4039       {17, 8},
4040     };
4041     SELF_CHECK (check_ranges_vector (ranges, expected));
4042   }
4043 
4044   /* [2, 14] [17, 24] */
4045   {
4046     insert_into_bit_range_vector (&ranges, 9, 1);
4047     static const range expected[] = {
4048       {2, 13},
4049       {17, 8},
4050     };
4051     SELF_CHECK (check_ranges_vector (ranges, expected));
4052   }
4053 
4054   /* [2, 33] */
4055   {
4056     insert_into_bit_range_vector (&ranges, 4, 30);
4057     static const range expected = {2, 32};
4058     SELF_CHECK (check_ranges_vector (ranges, expected));
4059   }
4060 }
4061 
4062 } /* namespace selftests */
4063 #endif /* GDB_SELF_TEST */
4064 
4065 void
4066 _initialize_values (void)
4067 {
4068   add_cmd ("convenience", no_class, show_convenience, _("\
4069 Debugger convenience (\"$foo\") variables and functions.\n\
4070 Convenience variables are created when you assign them values;\n\
4071 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
4072 \n\
4073 A few convenience variables are given values automatically:\n\
4074 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4075 \"$__\" holds the contents of the last address examined with \"x\"."
4076 #ifdef HAVE_PYTHON
4077 "\n\n\
4078 Convenience functions are defined via the Python API."
4079 #endif
4080 	   ), &showlist);
4081   add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4082 
4083   add_cmd ("values", no_set_class, show_values, _("\
4084 Elements of value history around item number IDX (or last ten)."),
4085 	   &showlist);
4086 
4087   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4088 Initialize a convenience variable if necessary.\n\
4089 init-if-undefined VARIABLE = EXPRESSION\n\
4090 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4091 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
4092 VARIABLE is already initialized."));
4093 
4094   add_prefix_cmd ("function", no_class, function_command, _("\
4095 Placeholder command for showing help on convenience functions."),
4096 		  &functionlist, "function ", 0, &cmdlist);
4097 
4098   add_internal_function ("_isvoid", _("\
4099 Check whether an expression is void.\n\
4100 Usage: $_isvoid (expression)\n\
4101 Return 1 if the expression is void, zero otherwise."),
4102 			 isvoid_internal_fn, NULL);
4103 
4104   add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4105 				       class_support, &max_value_size, _("\
4106 Set maximum sized value gdb will load from the inferior."), _("\
4107 Show maximum sized value gdb will load from the inferior."), _("\
4108 Use this to control the maximum size, in bytes, of a value that gdb\n\
4109 will load from the inferior.  Setting this value to 'unlimited'\n\
4110 disables checking.\n\
4111 Setting this does not invalidate already allocated values, it only\n\
4112 prevents future values, larger than this size, from being allocated."),
4113 			    set_max_value_size,
4114 			    show_max_value_size,
4115 			    &setlist, &showlist);
4116 #if GDB_SELF_TEST
4117   selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4118   selftests::register_test ("insert_into_bit_range_vector",
4119 			    selftests::test_insert_into_bit_range_vector);
4120 #endif
4121 }
4122