xref: /netbsd-src/external/gpl3/gdb/dist/gdb/value.c (revision 9ed38a247f6b4b571bcd6e5e2c578c4998af1185)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2024 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "arch-utils.h"
21 #include "extract-store-integer.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "cli/cli-cmds.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include <iterator>
44 #include <map>
45 #include <utility>
46 #include <vector>
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
51 #include "expop.h"
52 #include "inferior.h"
53 #include "varobj.h"
54 
55 /* Definition of a user function.  */
56 struct internal_function
57 {
58   /* The name of the function.  It is a bit odd to have this in the
59      function itself -- the user might use a differently-named
60      convenience variable to hold the function.  */
61   char *name;
62 
63   /* The handler.  */
64   internal_function_fn handler;
65 
66   /* User data for the handler.  */
67   void *cookie;
68 };
69 
70 /* Returns true if the ranges defined by [offset1, offset1+len1) and
71    [offset2, offset2+len2) overlap.  */
72 
73 static bool
74 ranges_overlap (LONGEST offset1, ULONGEST len1,
75 		LONGEST offset2, ULONGEST len2)
76 {
77   LONGEST h, l;
78 
79   l = std::max (offset1, offset2);
80   h = std::min (offset1 + len1, offset2 + len2);
81   return (l < h);
82 }
83 
84 /* Returns true if RANGES contains any range that overlaps [OFFSET,
85    OFFSET+LENGTH).  */
86 
87 static bool
88 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
89 		ULONGEST length)
90 {
91   range what;
92 
93   what.offset = offset;
94   what.length = length;
95 
96   /* We keep ranges sorted by offset and coalesce overlapping and
97      contiguous ranges, so to check if a range list contains a given
98      range, we can do a binary search for the position the given range
99      would be inserted if we only considered the starting OFFSET of
100      ranges.  We call that position I.  Since we also have LENGTH to
101      care for (this is a range afterall), we need to check if the
102      _previous_ range overlaps the I range.  E.g.,
103 
104 	 R
105 	 |---|
106        |---|    |---|  |------| ... |--|
107        0        1      2            N
108 
109        I=1
110 
111      In the case above, the binary search would return `I=1', meaning,
112      this OFFSET should be inserted at position 1, and the current
113      position 1 should be pushed further (and before 2).  But, `0'
114      overlaps with R.
115 
116      Then we need to check if the I range overlaps the I range itself.
117      E.g.,
118 
119 	      R
120 	      |---|
121        |---|    |---|  |-------| ... |--|
122        0        1      2             N
123 
124        I=1
125   */
126 
127 
128   auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
129 
130   if (i > ranges.begin ())
131     {
132       const struct range &bef = *(i - 1);
133 
134       if (ranges_overlap (bef.offset, bef.length, offset, length))
135 	return true;
136     }
137 
138   if (i < ranges.end ())
139     {
140       const struct range &r = *i;
141 
142       if (ranges_overlap (r.offset, r.length, offset, length))
143 	return true;
144     }
145 
146   return false;
147 }
148 
149 static struct cmd_list_element *functionlist;
150 
151 value::~value ()
152 {
153   if (this->lval () == lval_computed)
154     {
155       const struct lval_funcs *funcs = m_location.computed.funcs;
156 
157       if (funcs->free_closure)
158 	funcs->free_closure (this);
159     }
160   else if (this->lval () == lval_xcallable)
161     delete m_location.xm_worker;
162 }
163 
164 /* See value.h.  */
165 
166 struct gdbarch *
167 value::arch () const
168 {
169   return type ()->arch ();
170 }
171 
172 bool
173 value::bits_available (LONGEST offset, ULONGEST length) const
174 {
175   gdb_assert (!m_lazy);
176 
177   /* Don't pretend we have anything available there in the history beyond
178      the boundaries of the value recorded.  It's not like inferior memory
179      where there is actual stuff underneath.  */
180   ULONGEST val_len = TARGET_CHAR_BIT * enclosing_type ()->length ();
181   return !((m_in_history
182 	    && (offset < 0 || offset + length > val_len))
183 	   || ranges_contain (m_unavailable, offset, length));
184 }
185 
186 bool
187 value::bytes_available (LONGEST offset, ULONGEST length) const
188 {
189   ULONGEST sign = (1ULL << (sizeof (ULONGEST) * 8 - 1)) / TARGET_CHAR_BIT;
190   ULONGEST mask = (sign << 1) - 1;
191 
192   if (offset != ((offset & mask) ^ sign) - sign
193       || length != ((length & mask) ^ sign) - sign
194       || (length > 0 && (~offset & (offset + length - 1) & sign) != 0))
195     error (_("Integer overflow in data location calculation"));
196 
197   return bits_available (offset * TARGET_CHAR_BIT, length * TARGET_CHAR_BIT);
198 }
199 
200 bool
201 value::bits_any_optimized_out (int bit_offset, int bit_length) const
202 {
203   gdb_assert (!m_lazy);
204 
205   return ranges_contain (m_optimized_out, bit_offset, bit_length);
206 }
207 
208 bool
209 value::entirely_available ()
210 {
211   /* We can only tell whether the whole value is available when we try
212      to read it.  */
213   if (m_lazy)
214     fetch_lazy ();
215 
216   if (m_unavailable.empty ())
217     return true;
218   return false;
219 }
220 
221 /* See value.h.  */
222 
223 bool
224 value::entirely_covered_by_range_vector (const std::vector<range> &ranges)
225 {
226   /* We can only tell whether the whole value is optimized out /
227      unavailable when we try to read it.  */
228   if (m_lazy)
229     fetch_lazy ();
230 
231   if (ranges.size () == 1)
232     {
233       const struct range &t = ranges[0];
234 
235       if (t.offset == 0
236 	  && t.length == TARGET_CHAR_BIT * enclosing_type ()->length ())
237 	return true;
238     }
239 
240   return false;
241 }
242 
243 /* Insert into the vector pointed to by VECTORP the bit range starting of
244    OFFSET bits, and extending for the next LENGTH bits.  */
245 
246 static void
247 insert_into_bit_range_vector (std::vector<range> *vectorp,
248 			      LONGEST offset, ULONGEST length)
249 {
250   range newr;
251 
252   /* Insert the range sorted.  If there's overlap or the new range
253      would be contiguous with an existing range, merge.  */
254 
255   newr.offset = offset;
256   newr.length = length;
257 
258   /* Do a binary search for the position the given range would be
259      inserted if we only considered the starting OFFSET of ranges.
260      Call that position I.  Since we also have LENGTH to care for
261      (this is a range afterall), we need to check if the _previous_
262      range overlaps the I range.  E.g., calling R the new range:
263 
264        #1 - overlaps with previous
265 
266 	   R
267 	   |-...-|
268 	 |---|     |---|  |------| ... |--|
269 	 0         1      2            N
270 
271 	 I=1
272 
273      In the case #1 above, the binary search would return `I=1',
274      meaning, this OFFSET should be inserted at position 1, and the
275      current position 1 should be pushed further (and become 2).  But,
276      note that `0' overlaps with R, so we want to merge them.
277 
278      A similar consideration needs to be taken if the new range would
279      be contiguous with the previous range:
280 
281        #2 - contiguous with previous
282 
283 	    R
284 	    |-...-|
285 	 |--|       |---|  |------| ... |--|
286 	 0          1      2            N
287 
288 	 I=1
289 
290      If there's no overlap with the previous range, as in:
291 
292        #3 - not overlapping and not contiguous
293 
294 	       R
295 	       |-...-|
296 	  |--|         |---|  |------| ... |--|
297 	  0            1      2            N
298 
299 	 I=1
300 
301      or if I is 0:
302 
303        #4 - R is the range with lowest offset
304 
305 	  R
306 	 |-...-|
307 		 |--|       |---|  |------| ... |--|
308 		 0          1      2            N
309 
310 	 I=0
311 
312      ... we just push the new range to I.
313 
314      All the 4 cases above need to consider that the new range may
315      also overlap several of the ranges that follow, or that R may be
316      contiguous with the following range, and merge.  E.g.,
317 
318        #5 - overlapping following ranges
319 
320 	  R
321 	 |------------------------|
322 		 |--|       |---|  |------| ... |--|
323 		 0          1      2            N
324 
325 	 I=0
326 
327        or:
328 
329 	    R
330 	    |-------|
331 	 |--|       |---|  |------| ... |--|
332 	 0          1      2            N
333 
334 	 I=1
335 
336   */
337 
338   auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
339   if (i > vectorp->begin ())
340     {
341       struct range &bef = *(i - 1);
342 
343       if (ranges_overlap (bef.offset, bef.length, offset, length))
344 	{
345 	  /* #1 */
346 	  LONGEST l = std::min (bef.offset, offset);
347 	  LONGEST h = std::max (bef.offset + bef.length, offset + length);
348 
349 	  bef.offset = l;
350 	  bef.length = h - l;
351 	  i--;
352 	}
353       else if (offset == bef.offset + bef.length)
354 	{
355 	  /* #2 */
356 	  bef.length += length;
357 	  i--;
358 	}
359       else
360 	{
361 	  /* #3 */
362 	  i = vectorp->insert (i, newr);
363 	}
364     }
365   else
366     {
367       /* #4 */
368       i = vectorp->insert (i, newr);
369     }
370 
371   /* Check whether the ranges following the one we've just added or
372      touched can be folded in (#5 above).  */
373   if (i != vectorp->end () && i + 1 < vectorp->end ())
374     {
375       int removed = 0;
376       auto next = i + 1;
377 
378       /* Get the range we just touched.  */
379       struct range &t = *i;
380       removed = 0;
381 
382       i = next;
383       for (; i < vectorp->end (); i++)
384 	{
385 	  struct range &r = *i;
386 	  if (r.offset <= t.offset + t.length)
387 	    {
388 	      LONGEST l, h;
389 
390 	      l = std::min (t.offset, r.offset);
391 	      h = std::max (t.offset + t.length, r.offset + r.length);
392 
393 	      t.offset = l;
394 	      t.length = h - l;
395 
396 	      removed++;
397 	    }
398 	  else
399 	    {
400 	      /* If we couldn't merge this one, we won't be able to
401 		 merge following ones either, since the ranges are
402 		 always sorted by OFFSET.  */
403 	      break;
404 	    }
405 	}
406 
407       if (removed != 0)
408 	vectorp->erase (next, next + removed);
409     }
410 }
411 
412 void
413 value::mark_bits_unavailable (LONGEST offset, ULONGEST length)
414 {
415   insert_into_bit_range_vector (&m_unavailable, offset, length);
416 }
417 
418 void
419 value::mark_bytes_unavailable (LONGEST offset, ULONGEST length)
420 {
421   mark_bits_unavailable (offset * TARGET_CHAR_BIT,
422 			 length * TARGET_CHAR_BIT);
423 }
424 
425 /* Find the first range in RANGES that overlaps the range defined by
426    OFFSET and LENGTH, starting at element POS in the RANGES vector,
427    Returns the index into RANGES where such overlapping range was
428    found, or -1 if none was found.  */
429 
430 static int
431 find_first_range_overlap (const std::vector<range> *ranges, int pos,
432 			  LONGEST offset, LONGEST length)
433 {
434   int i;
435 
436   for (i = pos; i < ranges->size (); i++)
437     {
438       const range &r = (*ranges)[i];
439       if (ranges_overlap (r.offset, r.length, offset, length))
440 	return i;
441     }
442 
443   return -1;
444 }
445 
446 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
447    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
448    return non-zero.
449 
450    It must always be the case that:
451      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
452 
453    It is assumed that memory can be accessed from:
454      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
455    to:
456      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
457 	    / TARGET_CHAR_BIT)  */
458 static int
459 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
460 			 const gdb_byte *ptr2, size_t offset2_bits,
461 			 size_t length_bits)
462 {
463   gdb_assert (offset1_bits % TARGET_CHAR_BIT
464 	      == offset2_bits % TARGET_CHAR_BIT);
465 
466   if (offset1_bits % TARGET_CHAR_BIT != 0)
467     {
468       size_t bits;
469       gdb_byte mask, b1, b2;
470 
471       /* The offset from the base pointers PTR1 and PTR2 is not a complete
472 	 number of bytes.  A number of bits up to either the next exact
473 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
474 	 compared.  */
475       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
476       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
477       mask = (1 << bits) - 1;
478 
479       if (length_bits < bits)
480 	{
481 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
482 	  bits = length_bits;
483 	}
484 
485       /* Now load the two bytes and mask off the bits we care about.  */
486       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
487       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
488 
489       if (b1 != b2)
490 	return 1;
491 
492       /* Now update the length and offsets to take account of the bits
493 	 we've just compared.  */
494       length_bits -= bits;
495       offset1_bits += bits;
496       offset2_bits += bits;
497     }
498 
499   if (length_bits % TARGET_CHAR_BIT != 0)
500     {
501       size_t bits;
502       size_t o1, o2;
503       gdb_byte mask, b1, b2;
504 
505       /* The length is not an exact number of bytes.  After the previous
506 	 IF.. block then the offsets are byte aligned, or the
507 	 length is zero (in which case this code is not reached).  Compare
508 	 a number of bits at the end of the region, starting from an exact
509 	 byte boundary.  */
510       bits = length_bits % TARGET_CHAR_BIT;
511       o1 = offset1_bits + length_bits - bits;
512       o2 = offset2_bits + length_bits - bits;
513 
514       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
515       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
516 
517       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
518       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
519 
520       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
521       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
522 
523       if (b1 != b2)
524 	return 1;
525 
526       length_bits -= bits;
527     }
528 
529   if (length_bits > 0)
530     {
531       /* We've now taken care of any stray "bits" at the start, or end of
532 	 the region to compare, the remainder can be covered with a simple
533 	 memcmp.  */
534       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
535       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
536       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
537 
538       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
539 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
540 		     length_bits / TARGET_CHAR_BIT);
541     }
542 
543   /* Length is zero, regions match.  */
544   return 0;
545 }
546 
547 /* Helper struct for find_first_range_overlap_and_match and
548    value_contents_bits_eq.  Keep track of which slot of a given ranges
549    vector have we last looked at.  */
550 
551 struct ranges_and_idx
552 {
553   /* The ranges.  */
554   const std::vector<range> *ranges;
555 
556   /* The range we've last found in RANGES.  Given ranges are sorted,
557      we can start the next lookup here.  */
558   int idx;
559 };
560 
561 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
562    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
563    ranges starting at OFFSET2 bits.  Return true if the ranges match
564    and fill in *L and *H with the overlapping window relative to
565    (both) OFFSET1 or OFFSET2.  */
566 
567 static int
568 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
569 				    struct ranges_and_idx *rp2,
570 				    LONGEST offset1, LONGEST offset2,
571 				    ULONGEST length, ULONGEST *l, ULONGEST *h)
572 {
573   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
574 				       offset1, length);
575   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
576 				       offset2, length);
577 
578   if (rp1->idx == -1 && rp2->idx == -1)
579     {
580       *l = length;
581       *h = length;
582       return 1;
583     }
584   else if (rp1->idx == -1 || rp2->idx == -1)
585     return 0;
586   else
587     {
588       const range *r1, *r2;
589       ULONGEST l1, h1;
590       ULONGEST l2, h2;
591 
592       r1 = &(*rp1->ranges)[rp1->idx];
593       r2 = &(*rp2->ranges)[rp2->idx];
594 
595       /* Get the unavailable windows intersected by the incoming
596 	 ranges.  The first and last ranges that overlap the argument
597 	 range may be wider than said incoming arguments ranges.  */
598       l1 = std::max (offset1, r1->offset);
599       h1 = std::min (offset1 + length, r1->offset + r1->length);
600 
601       l2 = std::max (offset2, r2->offset);
602       h2 = std::min (offset2 + length, offset2 + r2->length);
603 
604       /* Make them relative to the respective start offsets, so we can
605 	 compare them for equality.  */
606       l1 -= offset1;
607       h1 -= offset1;
608 
609       l2 -= offset2;
610       h2 -= offset2;
611 
612       /* Different ranges, no match.  */
613       if (l1 != l2 || h1 != h2)
614 	return 0;
615 
616       *h = h1;
617       *l = l1;
618       return 1;
619     }
620 }
621 
622 /* Helper function for value_contents_eq.  The only difference is that
623    this function is bit rather than byte based.
624 
625    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
626    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
627    Return true if the available bits match.  */
628 
629 bool
630 value::contents_bits_eq (int offset1, const struct value *val2, int offset2,
631 			 int length) const
632 {
633   /* Each array element corresponds to a ranges source (unavailable,
634      optimized out).  '1' is for VAL1, '2' for VAL2.  */
635   struct ranges_and_idx rp1[2], rp2[2];
636 
637   /* See function description in value.h.  */
638   gdb_assert (!m_lazy && !val2->m_lazy);
639 
640   /* We shouldn't be trying to compare past the end of the values.  */
641   gdb_assert (offset1 + length
642 	      <= m_enclosing_type->length () * TARGET_CHAR_BIT);
643   gdb_assert (offset2 + length
644 	      <= val2->m_enclosing_type->length () * TARGET_CHAR_BIT);
645 
646   memset (&rp1, 0, sizeof (rp1));
647   memset (&rp2, 0, sizeof (rp2));
648   rp1[0].ranges = &m_unavailable;
649   rp2[0].ranges = &val2->m_unavailable;
650   rp1[1].ranges = &m_optimized_out;
651   rp2[1].ranges = &val2->m_optimized_out;
652 
653   while (length > 0)
654     {
655       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
656       int i;
657 
658       for (i = 0; i < 2; i++)
659 	{
660 	  ULONGEST l_tmp, h_tmp;
661 
662 	  /* The contents only match equal if the invalid/unavailable
663 	     contents ranges match as well.  */
664 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
665 						   offset1, offset2, length,
666 						   &l_tmp, &h_tmp))
667 	    return false;
668 
669 	  /* We're interested in the lowest/first range found.  */
670 	  if (i == 0 || l_tmp < l)
671 	    {
672 	      l = l_tmp;
673 	      h = h_tmp;
674 	    }
675 	}
676 
677       /* Compare the available/valid contents.  */
678       if (memcmp_with_bit_offsets (m_contents.get (), offset1,
679 				   val2->m_contents.get (), offset2, l) != 0)
680 	return false;
681 
682       length -= h;
683       offset1 += h;
684       offset2 += h;
685     }
686 
687   return true;
688 }
689 
690 /* See value.h.  */
691 
692 bool
693 value::contents_eq (LONGEST offset1,
694 		    const struct value *val2, LONGEST offset2,
695 		    LONGEST length) const
696 {
697   return contents_bits_eq (offset1 * TARGET_CHAR_BIT,
698 			   val2, offset2 * TARGET_CHAR_BIT,
699 			   length * TARGET_CHAR_BIT);
700 }
701 
702 /* See value.h.  */
703 
704 bool
705 value::contents_eq (const struct value *val2) const
706 {
707   ULONGEST len1 = check_typedef (enclosing_type ())->length ();
708   ULONGEST len2 = check_typedef (val2->enclosing_type ())->length ();
709   if (len1 != len2)
710     return false;
711   return contents_eq (0, val2, 0, len1);
712 }
713 
714 /* The value-history records all the values printed by print commands
715    during this session.  */
716 
717 static std::vector<value_ref_ptr> value_history;
718 
719 
720 /* List of all value objects currently allocated
721    (except for those released by calls to release_value)
722    This is so they can be freed after each command.  */
723 
724 static std::vector<value_ref_ptr> all_values;
725 
726 /* See value.h.  */
727 
728 struct value *
729 value::allocate_lazy (struct type *type)
730 {
731   struct value *val;
732 
733   /* Call check_typedef on our type to make sure that, if TYPE
734      is a TYPE_CODE_TYPEDEF, its length is set to the length
735      of the target type instead of zero.  However, we do not
736      replace the typedef type by the target type, because we want
737      to keep the typedef in order to be able to set the VAL's type
738      description correctly.  */
739   check_typedef (type);
740 
741   val = new struct value (type);
742 
743   /* Values start out on the all_values chain.  */
744   all_values.emplace_back (val);
745 
746   return val;
747 }
748 
749 /* The maximum size, in bytes, that GDB will try to allocate for a value.
750    The initial value of 64k was not selected for any specific reason, it is
751    just a reasonable starting point.  */
752 
753 static int max_value_size = 65536; /* 64k bytes */
754 
755 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
756    LONGEST, otherwise GDB will not be able to parse integer values from the
757    CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
758    be unable to parse "set max-value-size 2".
759 
760    As we want a consistent GDB experience across hosts with different sizes
761    of LONGEST, this arbitrary minimum value was selected, so long as this
762    is bigger than LONGEST on all GDB supported hosts we're fine.  */
763 
764 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
765 static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
766 
767 /* Implement the "set max-value-size" command.  */
768 
769 static void
770 set_max_value_size (const char *args, int from_tty,
771 		    struct cmd_list_element *c)
772 {
773   gdb_assert (max_value_size == -1 || max_value_size >= 0);
774 
775   if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
776     {
777       max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
778       error (_("max-value-size set too low, increasing to %d bytes"),
779 	     max_value_size);
780     }
781 }
782 
783 /* Implement the "show max-value-size" command.  */
784 
785 static void
786 show_max_value_size (struct ui_file *file, int from_tty,
787 		     struct cmd_list_element *c, const char *value)
788 {
789   if (max_value_size == -1)
790     gdb_printf (file, _("Maximum value size is unlimited.\n"));
791   else
792     gdb_printf (file, _("Maximum value size is %d bytes.\n"),
793 		max_value_size);
794 }
795 
796 /* Called before we attempt to allocate or reallocate a buffer for the
797    contents of a value.  TYPE is the type of the value for which we are
798    allocating the buffer.  If the buffer is too large (based on the user
799    controllable setting) then throw an error.  If this function returns
800    then we should attempt to allocate the buffer.  */
801 
802 static void
803 check_type_length_before_alloc (const struct type *type)
804 {
805   ULONGEST length = type->length ();
806 
807   if (exceeds_max_value_size (length))
808     {
809       if (type->name () != NULL)
810 	error (_("value of type `%s' requires %s bytes, which is more "
811 		 "than max-value-size"), type->name (), pulongest (length));
812       else
813 	error (_("value requires %s bytes, which is more than "
814 		 "max-value-size"), pulongest (length));
815     }
816 }
817 
818 /* See value.h.  */
819 
820 bool
821 exceeds_max_value_size (ULONGEST length)
822 {
823   return max_value_size > -1 && length > max_value_size;
824 }
825 
826 /* When this has a value, it is used to limit the number of array elements
827    of an array that are loaded into memory when an array value is made
828    non-lazy.  */
829 static std::optional<int> array_length_limiting_element_count;
830 
831 /* See value.h.  */
832 scoped_array_length_limiting::scoped_array_length_limiting (int elements)
833 {
834   m_old_value = array_length_limiting_element_count;
835   array_length_limiting_element_count.emplace (elements);
836 }
837 
838 /* See value.h.  */
839 scoped_array_length_limiting::~scoped_array_length_limiting ()
840 {
841   array_length_limiting_element_count = m_old_value;
842 }
843 
844 /* Find the inner element type for ARRAY_TYPE.  */
845 
846 static struct type *
847 find_array_element_type (struct type *array_type)
848 {
849   array_type = check_typedef (array_type);
850   gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
851 
852   if (current_language->la_language == language_fortran)
853     while (array_type->code () == TYPE_CODE_ARRAY)
854       {
855 	array_type = array_type->target_type ();
856 	array_type = check_typedef (array_type);
857       }
858   else
859     {
860       array_type = array_type->target_type ();
861       array_type = check_typedef (array_type);
862     }
863 
864   return array_type;
865 }
866 
867 /* Return the limited length of ARRAY_TYPE, which must be of
868    TYPE_CODE_ARRAY.  This function can only be called when the global
869    ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
870 
871    The limited length of an array is the smallest of either (1) the total
872    size of the array type, or (2) the array target type multiplies by the
873    array_length_limiting_element_count.  */
874 
875 static ULONGEST
876 calculate_limited_array_length (struct type *array_type)
877 {
878   gdb_assert (array_length_limiting_element_count.has_value ());
879 
880   array_type = check_typedef (array_type);
881   gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
882 
883   struct type *elm_type = find_array_element_type (array_type);
884   ULONGEST len = (elm_type->length ()
885 		  * (*array_length_limiting_element_count));
886   len = std::min (len, array_type->length ());
887 
888   return len;
889 }
890 
891 /* See value.h.  */
892 
893 bool
894 value::set_limited_array_length ()
895 {
896   ULONGEST limit = m_limited_length;
897   ULONGEST len = type ()->length ();
898 
899   if (array_length_limiting_element_count.has_value ())
900     len = calculate_limited_array_length (type ());
901 
902   if (limit != 0 && len > limit)
903     len = limit;
904   if (len > max_value_size)
905     return false;
906 
907   m_limited_length = max_value_size;
908   return true;
909 }
910 
911 /* See value.h.  */
912 
913 void
914 value::allocate_contents (bool check_size)
915 {
916   if (!m_contents)
917     {
918       struct type *enc_type = enclosing_type ();
919       ULONGEST len = enc_type->length ();
920 
921       if (check_size)
922 	{
923 	  /* If we are allocating the contents of an array, which
924 	     is greater in size than max_value_size, and there is
925 	     an element limit in effect, then we can possibly try
926 	     to load only a sub-set of the array contents into
927 	     GDB's memory.  */
928 	  if (type () == enc_type
929 	      && type ()->code () == TYPE_CODE_ARRAY
930 	      && len > max_value_size
931 	      && set_limited_array_length ())
932 	    len = m_limited_length;
933 	  else
934 	    check_type_length_before_alloc (enc_type);
935 	}
936 
937       m_contents.reset ((gdb_byte *) xzalloc (len));
938     }
939 }
940 
941 /* Allocate a value and its contents for type TYPE.  If CHECK_SIZE is true,
942    then apply the usual max-value-size checks.  */
943 
944 struct value *
945 value::allocate (struct type *type, bool check_size)
946 {
947   struct value *val = value::allocate_lazy (type);
948 
949   val->allocate_contents (check_size);
950   val->m_lazy = false;
951   return val;
952 }
953 
954 /* Allocate a value and its contents for type TYPE.  */
955 
956 struct value *
957 value::allocate (struct type *type)
958 {
959   return allocate (type, true);
960 }
961 
962 /* See value.h  */
963 
964 value *
965 value::allocate_register_lazy (const frame_info_ptr &initial_next_frame,
966 			       int regnum, struct type *type)
967 {
968   if (type == nullptr)
969     type = register_type (frame_unwind_arch (initial_next_frame), regnum);
970 
971   value *result = value::allocate_lazy (type);
972 
973   result->set_lval (lval_register);
974   result->m_location.reg.regnum = regnum;
975 
976   /* If this register value is created during unwind (while computing a frame
977      id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
978      NEXT_FRAME will not have a valid frame id yet.  Find the next non-inline
979      frame (possibly the sentinel frame).  This is where registers are unwound
980      from anyway.  */
981   frame_info_ptr next_frame = initial_next_frame;
982   while (get_frame_type (next_frame) == INLINE_FRAME)
983     next_frame = get_next_frame_sentinel_okay (next_frame);
984 
985   result->m_location.reg.next_frame_id = get_frame_id (next_frame);
986 
987   /* We should have a next frame with a valid id.  */
988   gdb_assert (frame_id_p (result->m_location.reg.next_frame_id));
989 
990   return result;
991 }
992 
993 /* See value.h  */
994 
995 value *
996 value::allocate_register (const frame_info_ptr &next_frame, int regnum,
997 			  struct type *type)
998 {
999   value *result = value::allocate_register_lazy (next_frame, regnum, type);
1000   result->set_lazy (false);
1001   return result;
1002 }
1003 
1004 /* Allocate a  value  that has the correct length
1005    for COUNT repetitions of type TYPE.  */
1006 
1007 struct value *
1008 allocate_repeat_value (struct type *type, int count)
1009 {
1010   /* Despite the fact that we are really creating an array of TYPE here, we
1011      use the string lower bound as the array lower bound.  This seems to
1012      work fine for now.  */
1013   int low_bound = current_language->string_lower_bound ();
1014   /* FIXME-type-allocation: need a way to free this type when we are
1015      done with it.  */
1016   struct type *array_type
1017     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1018 
1019   return value::allocate (array_type);
1020 }
1021 
1022 struct value *
1023 value::allocate_computed (struct type *type,
1024 			  const struct lval_funcs *funcs,
1025 			  void *closure)
1026 {
1027   struct value *v = value::allocate_lazy (type);
1028 
1029   v->set_lval (lval_computed);
1030   v->m_location.computed.funcs = funcs;
1031   v->m_location.computed.closure = closure;
1032 
1033   return v;
1034 }
1035 
1036 /* See value.h.  */
1037 
1038 struct value *
1039 value::allocate_optimized_out (struct type *type)
1040 {
1041   struct value *retval = value::allocate_lazy (type);
1042 
1043   retval->mark_bytes_optimized_out (0, type->length ());
1044   retval->set_lazy (false);
1045   return retval;
1046 }
1047 
1048 /* Accessor methods.  */
1049 
1050 gdb::array_view<gdb_byte>
1051 value::contents_raw ()
1052 {
1053   int unit_size = gdbarch_addressable_memory_unit_size (arch ());
1054 
1055   allocate_contents (true);
1056 
1057   ULONGEST length = type ()->length ();
1058   return gdb::make_array_view
1059     (m_contents.get () + m_embedded_offset * unit_size, length);
1060 }
1061 
1062 gdb::array_view<gdb_byte>
1063 value::contents_all_raw ()
1064 {
1065   allocate_contents (true);
1066 
1067   ULONGEST length = enclosing_type ()->length ();
1068   return gdb::make_array_view (m_contents.get (), length);
1069 }
1070 
1071 /* Look at value.h for description.  */
1072 
1073 struct type *
1074 value_actual_type (struct value *value, int resolve_simple_types,
1075 		   int *real_type_found)
1076 {
1077   struct value_print_options opts;
1078   struct type *result;
1079 
1080   get_user_print_options (&opts);
1081 
1082   if (real_type_found)
1083     *real_type_found = 0;
1084   result = value->type ();
1085   if (opts.objectprint)
1086     {
1087       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1088 	 fetch its rtti type.  */
1089       if (result->is_pointer_or_reference ()
1090 	  && (check_typedef (result->target_type ())->code ()
1091 	      == TYPE_CODE_STRUCT)
1092 	  && !value->optimized_out ())
1093 	{
1094 	  struct type *real_type;
1095 
1096 	  real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1097 	  if (real_type)
1098 	    {
1099 	      if (real_type_found)
1100 		*real_type_found = 1;
1101 	      result = real_type;
1102 	    }
1103 	}
1104       else if (resolve_simple_types)
1105 	{
1106 	  if (real_type_found)
1107 	    *real_type_found = 1;
1108 	  result = value->enclosing_type ();
1109 	}
1110     }
1111 
1112   return result;
1113 }
1114 
1115 void
1116 error_value_optimized_out (void)
1117 {
1118   throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1119 }
1120 
1121 void
1122 value::require_not_optimized_out () const
1123 {
1124   if (!m_optimized_out.empty ())
1125     {
1126       if (m_lval == lval_register)
1127 	throw_error (OPTIMIZED_OUT_ERROR,
1128 		     _("register has not been saved in frame"));
1129       else
1130 	error_value_optimized_out ();
1131     }
1132 }
1133 
1134 void
1135 value::require_available () const
1136 {
1137   if (!m_unavailable.empty ())
1138     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1139 }
1140 
1141 gdb::array_view<const gdb_byte>
1142 value::contents_for_printing ()
1143 {
1144   if (m_lazy)
1145     fetch_lazy ();
1146 
1147   ULONGEST length = enclosing_type ()->length ();
1148   return gdb::make_array_view (m_contents.get (), length);
1149 }
1150 
1151 gdb::array_view<const gdb_byte>
1152 value::contents_for_printing () const
1153 {
1154   gdb_assert (!m_lazy);
1155 
1156   ULONGEST length = enclosing_type ()->length ();
1157   return gdb::make_array_view (m_contents.get (), length);
1158 }
1159 
1160 gdb::array_view<const gdb_byte>
1161 value::contents_all ()
1162 {
1163   gdb::array_view<const gdb_byte> result = contents_for_printing ();
1164   require_not_optimized_out ();
1165   require_available ();
1166   return result;
1167 }
1168 
1169 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1170    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1171 
1172 static void
1173 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1174 		      const std::vector<range> &src_range, int src_bit_offset,
1175 		      unsigned int bit_length)
1176 {
1177   for (const range &r : src_range)
1178     {
1179       LONGEST h, l;
1180 
1181       l = std::max (r.offset, (LONGEST) src_bit_offset);
1182       h = std::min ((LONGEST) (r.offset + r.length),
1183 		    (LONGEST) src_bit_offset + bit_length);
1184 
1185       if (l < h)
1186 	insert_into_bit_range_vector (dst_range,
1187 				      dst_bit_offset + (l - src_bit_offset),
1188 				      h - l);
1189     }
1190 }
1191 
1192 /* See value.h.  */
1193 
1194 void
1195 value::ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1196 			     int src_bit_offset, int bit_length) const
1197 {
1198   ::ranges_copy_adjusted (&dst->m_unavailable, dst_bit_offset,
1199 			  m_unavailable, src_bit_offset,
1200 			  bit_length);
1201   ::ranges_copy_adjusted (&dst->m_optimized_out, dst_bit_offset,
1202 			  m_optimized_out, src_bit_offset,
1203 			  bit_length);
1204 }
1205 
1206 /* See value.h.  */
1207 
1208 void
1209 value::contents_copy_raw (struct value *dst, LONGEST dst_offset,
1210 			  LONGEST src_offset, LONGEST length)
1211 {
1212   LONGEST src_bit_offset, dst_bit_offset, bit_length;
1213   int unit_size = gdbarch_addressable_memory_unit_size (arch ());
1214 
1215   /* A lazy DST would make that this copy operation useless, since as
1216      soon as DST's contents were un-lazied (by a later value_contents
1217      call, say), the contents would be overwritten.  A lazy SRC would
1218      mean we'd be copying garbage.  */
1219   gdb_assert (!dst->m_lazy && !m_lazy);
1220 
1221   ULONGEST copy_length = length;
1222   ULONGEST limit = m_limited_length;
1223   if (limit > 0 && src_offset + length > limit)
1224     copy_length = src_offset > limit ? 0 : limit - src_offset;
1225 
1226   /* The overwritten DST range gets unavailability ORed in, not
1227      replaced.  Make sure to remember to implement replacing if it
1228      turns out actually necessary.  */
1229   gdb_assert (dst->bytes_available (dst_offset, length));
1230   gdb_assert (!dst->bits_any_optimized_out (TARGET_CHAR_BIT * dst_offset,
1231 					    TARGET_CHAR_BIT * length));
1232 
1233   if ((src_offset + copy_length) * unit_size > enclosing_type ()-> length ())
1234     error (_("access outside bounds of object"));
1235 
1236   /* Copy the data.  */
1237   gdb::array_view<gdb_byte> dst_contents
1238     = dst->contents_all_raw ().slice (dst_offset * unit_size,
1239 				      copy_length * unit_size);
1240   gdb::array_view<const gdb_byte> src_contents
1241     = contents_all_raw ().slice (src_offset * unit_size,
1242 				 copy_length * unit_size);
1243   gdb::copy (src_contents, dst_contents);
1244 
1245   /* Copy the meta-data, adjusted.  */
1246   src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1247   dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1248   bit_length = length * unit_size * HOST_CHAR_BIT;
1249 
1250   ranges_copy_adjusted (dst, dst_bit_offset,
1251 			src_bit_offset, bit_length);
1252 }
1253 
1254 /* See value.h.  */
1255 
1256 void
1257 value::contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset,
1258 				  LONGEST src_bit_offset,
1259 				  LONGEST bit_length)
1260 {
1261   /* A lazy DST would make that this copy operation useless, since as
1262      soon as DST's contents were un-lazied (by a later value_contents
1263      call, say), the contents would be overwritten.  A lazy SRC would
1264      mean we'd be copying garbage.  */
1265   gdb_assert (!dst->m_lazy && !m_lazy);
1266 
1267   ULONGEST copy_bit_length = bit_length;
1268   ULONGEST bit_limit = m_limited_length * TARGET_CHAR_BIT;
1269   if (bit_limit > 0 && src_bit_offset + bit_length > bit_limit)
1270     copy_bit_length = (src_bit_offset > bit_limit ? 0
1271 		       : bit_limit - src_bit_offset);
1272 
1273   /* The overwritten DST range gets unavailability ORed in, not
1274      replaced.  Make sure to remember to implement replacing if it
1275      turns out actually necessary.  */
1276   LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT;
1277   LONGEST length = bit_length / TARGET_CHAR_BIT;
1278   gdb_assert (dst->bytes_available (dst_offset, length));
1279   gdb_assert (!dst->bits_any_optimized_out (dst_bit_offset,
1280 					    bit_length));
1281 
1282   /* Copy the data.  */
1283   gdb::array_view<gdb_byte> dst_contents = dst->contents_all_raw ();
1284   gdb::array_view<const gdb_byte> src_contents = contents_all_raw ();
1285   copy_bitwise (dst_contents.data (), dst_bit_offset,
1286 		src_contents.data (), src_bit_offset,
1287 		copy_bit_length,
1288 		type_byte_order (type ()) == BFD_ENDIAN_BIG);
1289 
1290   /* Copy the meta-data.  */
1291   ranges_copy_adjusted (dst, dst_bit_offset, src_bit_offset, bit_length);
1292 }
1293 
1294 /* See value.h.  */
1295 
1296 void
1297 value::contents_copy (struct value *dst, LONGEST dst_offset,
1298 		      LONGEST src_offset, LONGEST length)
1299 {
1300   if (m_lazy)
1301     fetch_lazy ();
1302 
1303   contents_copy_raw (dst, dst_offset, src_offset, length);
1304 }
1305 
1306 gdb::array_view<const gdb_byte>
1307 value::contents ()
1308 {
1309   gdb::array_view<const gdb_byte> result = contents_writeable ();
1310   require_not_optimized_out ();
1311   require_available ();
1312   return result;
1313 }
1314 
1315 gdb::array_view<gdb_byte>
1316 value::contents_writeable ()
1317 {
1318   if (m_lazy)
1319     fetch_lazy ();
1320   return contents_raw ();
1321 }
1322 
1323 bool
1324 value::optimized_out ()
1325 {
1326   if (m_lazy)
1327     {
1328       /* See if we can compute the result without fetching the
1329 	 value.  */
1330       if (this->lval () == lval_memory)
1331 	return false;
1332       else if (this->lval () == lval_computed)
1333 	{
1334 	  const struct lval_funcs *funcs = m_location.computed.funcs;
1335 
1336 	  if (funcs->is_optimized_out != nullptr)
1337 	    return funcs->is_optimized_out (this);
1338 	}
1339 
1340       /* Fall back to fetching.  */
1341       try
1342 	{
1343 	  fetch_lazy ();
1344 	}
1345       catch (const gdb_exception_error &ex)
1346 	{
1347 	  switch (ex.error)
1348 	    {
1349 	    case MEMORY_ERROR:
1350 	    case OPTIMIZED_OUT_ERROR:
1351 	    case NOT_AVAILABLE_ERROR:
1352 	      /* These can normally happen when we try to access an
1353 		 optimized out or unavailable register, either in a
1354 		 physical register or spilled to memory.  */
1355 	      break;
1356 	    default:
1357 	      throw;
1358 	    }
1359 	}
1360     }
1361 
1362   return !m_optimized_out.empty ();
1363 }
1364 
1365 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1366    the following LENGTH bytes.  */
1367 
1368 void
1369 value::mark_bytes_optimized_out (int offset, int length)
1370 {
1371   mark_bits_optimized_out (offset * TARGET_CHAR_BIT,
1372 			   length * TARGET_CHAR_BIT);
1373 }
1374 
1375 /* See value.h.  */
1376 
1377 void
1378 value::mark_bits_optimized_out (LONGEST offset, LONGEST length)
1379 {
1380   insert_into_bit_range_vector (&m_optimized_out, offset, length);
1381 }
1382 
1383 bool
1384 value::bits_synthetic_pointer (LONGEST offset, LONGEST length) const
1385 {
1386   if (m_lval != lval_computed
1387       || !m_location.computed.funcs->check_synthetic_pointer)
1388     return false;
1389   return m_location.computed.funcs->check_synthetic_pointer (this, offset,
1390 							     length);
1391 }
1392 
1393 const struct lval_funcs *
1394 value::computed_funcs () const
1395 {
1396   gdb_assert (m_lval == lval_computed);
1397 
1398   return m_location.computed.funcs;
1399 }
1400 
1401 void *
1402 value::computed_closure () const
1403 {
1404   gdb_assert (m_lval == lval_computed);
1405 
1406   return m_location.computed.closure;
1407 }
1408 
1409 CORE_ADDR
1410 value::address () const
1411 {
1412   if (m_lval != lval_memory)
1413     return 0;
1414   if (m_parent != NULL)
1415     return m_parent->address () + m_offset;
1416   if (NULL != TYPE_DATA_LOCATION (type ()))
1417     {
1418       gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1419       return TYPE_DATA_LOCATION_ADDR (type ());
1420     }
1421 
1422   return m_location.address + m_offset;
1423 }
1424 
1425 CORE_ADDR
1426 value::raw_address () const
1427 {
1428   if (m_lval != lval_memory)
1429     return 0;
1430   return m_location.address;
1431 }
1432 
1433 void
1434 value::set_address (CORE_ADDR addr)
1435 {
1436   gdb_assert (m_lval == lval_memory);
1437   m_location.address = addr;
1438 }
1439 
1440 /* Return a mark in the value chain.  All values allocated after the
1441    mark is obtained (except for those released) are subject to being freed
1442    if a subsequent value_free_to_mark is passed the mark.  */
1443 struct value *
1444 value_mark (void)
1445 {
1446   if (all_values.empty ())
1447     return nullptr;
1448   return all_values.back ().get ();
1449 }
1450 
1451 /* Release a reference to VAL, which was acquired with value_incref.
1452    This function is also called to deallocate values from the value
1453    chain.  */
1454 
1455 void
1456 value::decref ()
1457 {
1458   gdb_assert (m_reference_count > 0);
1459   m_reference_count--;
1460   if (m_reference_count == 0)
1461     delete this;
1462 }
1463 
1464 /* Free all values allocated since MARK was obtained by value_mark
1465    (except for those released).  */
1466 void
1467 value_free_to_mark (const struct value *mark)
1468 {
1469   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1470   if (iter == all_values.end ())
1471     all_values.clear ();
1472   else
1473     all_values.erase (iter + 1, all_values.end ());
1474 }
1475 
1476 /* Remove VAL from the chain all_values
1477    so it will not be freed automatically.  */
1478 
1479 value_ref_ptr
1480 release_value (struct value *val)
1481 {
1482   if (val == nullptr)
1483     return value_ref_ptr ();
1484 
1485   std::vector<value_ref_ptr>::reverse_iterator iter;
1486   for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1487     {
1488       if (*iter == val)
1489 	{
1490 	  value_ref_ptr result = *iter;
1491 	  all_values.erase (iter.base () - 1);
1492 	  return result;
1493 	}
1494     }
1495 
1496   /* We must always return an owned reference.  Normally this happens
1497      because we transfer the reference from the value chain, but in
1498      this case the value was not on the chain.  */
1499   return value_ref_ptr::new_reference (val);
1500 }
1501 
1502 /* See value.h.  */
1503 
1504 std::vector<value_ref_ptr>
1505 value_release_to_mark (const struct value *mark)
1506 {
1507   std::vector<value_ref_ptr> result;
1508 
1509   auto iter = std::find (all_values.begin (), all_values.end (), mark);
1510   if (iter == all_values.end ())
1511     std::swap (result, all_values);
1512   else
1513     {
1514       std::move (iter + 1, all_values.end (), std::back_inserter (result));
1515       all_values.erase (iter + 1, all_values.end ());
1516     }
1517   std::reverse (result.begin (), result.end ());
1518   return result;
1519 }
1520 
1521 /* See value.h.  */
1522 
1523 struct value *
1524 value::copy () const
1525 {
1526   struct type *encl_type = enclosing_type ();
1527   struct value *val;
1528 
1529   val = value::allocate_lazy (encl_type);
1530   val->m_type = m_type;
1531   val->set_lval (m_lval);
1532   val->m_location = m_location;
1533   val->m_offset = m_offset;
1534   val->m_bitpos = m_bitpos;
1535   val->m_bitsize = m_bitsize;
1536   val->m_lazy = m_lazy;
1537   val->m_embedded_offset = embedded_offset ();
1538   val->m_pointed_to_offset = m_pointed_to_offset;
1539   val->m_modifiable = m_modifiable;
1540   val->m_stack = m_stack;
1541   val->m_is_zero = m_is_zero;
1542   val->m_in_history = m_in_history;
1543   val->m_initialized = m_initialized;
1544   val->m_unavailable = m_unavailable;
1545   val->m_optimized_out = m_optimized_out;
1546   val->m_parent = m_parent;
1547   val->m_limited_length = m_limited_length;
1548 
1549   if (!val->lazy ()
1550       && !(val->entirely_optimized_out ()
1551 	   || val->entirely_unavailable ()))
1552     {
1553       ULONGEST length = val->m_limited_length;
1554       if (length == 0)
1555 	length = val->enclosing_type ()->length ();
1556 
1557       gdb_assert (m_contents != nullptr);
1558       const auto &arg_view
1559 	= gdb::make_array_view (m_contents.get (), length);
1560 
1561       val->allocate_contents (false);
1562       gdb::array_view<gdb_byte> val_contents
1563 	= val->contents_all_raw ().slice (0, length);
1564 
1565       gdb::copy (arg_view, val_contents);
1566     }
1567 
1568   if (val->lval () == lval_computed)
1569     {
1570       const struct lval_funcs *funcs = val->m_location.computed.funcs;
1571 
1572       if (funcs->copy_closure)
1573 	val->m_location.computed.closure = funcs->copy_closure (val);
1574     }
1575   return val;
1576 }
1577 
1578 /* Return a "const" and/or "volatile" qualified version of the value V.
1579    If CNST is true, then the returned value will be qualified with
1580    "const".
1581    if VOLTL is true, then the returned value will be qualified with
1582    "volatile".  */
1583 
1584 struct value *
1585 make_cv_value (int cnst, int voltl, struct value *v)
1586 {
1587   struct type *val_type = v->type ();
1588   struct type *m_enclosing_type = v->enclosing_type ();
1589   struct value *cv_val = v->copy ();
1590 
1591   cv_val->deprecated_set_type (make_cv_type (cnst, voltl, val_type, NULL));
1592   cv_val->set_enclosing_type (make_cv_type (cnst, voltl, m_enclosing_type, NULL));
1593 
1594   return cv_val;
1595 }
1596 
1597 /* See value.h.  */
1598 
1599 struct value *
1600 value::non_lval ()
1601 {
1602   if (this->lval () != not_lval)
1603     {
1604       struct type *enc_type = enclosing_type ();
1605       struct value *val = value::allocate (enc_type);
1606 
1607       gdb::copy (contents_all (), val->contents_all_raw ());
1608       val->m_type = m_type;
1609       val->set_embedded_offset (embedded_offset ());
1610       val->set_pointed_to_offset (pointed_to_offset ());
1611       return val;
1612     }
1613   return this;
1614 }
1615 
1616 /* See value.h.  */
1617 
1618 void
1619 value::force_lval (CORE_ADDR addr)
1620 {
1621   gdb_assert (this->lval () == not_lval);
1622 
1623   write_memory (addr, contents_raw ().data (), type ()->length ());
1624   m_lval = lval_memory;
1625   m_location.address = addr;
1626 }
1627 
1628 void
1629 value::set_component_location (const struct value *whole)
1630 {
1631   struct type *type;
1632 
1633   gdb_assert (whole->m_lval != lval_xcallable);
1634 
1635   if (whole->m_lval == lval_internalvar)
1636     m_lval = lval_internalvar_component;
1637   else
1638     m_lval = whole->m_lval;
1639 
1640   m_location = whole->m_location;
1641   if (whole->m_lval == lval_computed)
1642     {
1643       const struct lval_funcs *funcs = whole->m_location.computed.funcs;
1644 
1645       if (funcs->copy_closure)
1646 	m_location.computed.closure = funcs->copy_closure (whole);
1647     }
1648 
1649   /* If the WHOLE value has a dynamically resolved location property then
1650      update the address of the COMPONENT.  */
1651   type = whole->type ();
1652   if (NULL != TYPE_DATA_LOCATION (type)
1653       && TYPE_DATA_LOCATION (type)->is_constant ())
1654     set_address (TYPE_DATA_LOCATION_ADDR (type));
1655 
1656   /* Similarly, if the COMPONENT value has a dynamically resolved location
1657      property then update its address.  */
1658   type = this->type ();
1659   if (NULL != TYPE_DATA_LOCATION (type)
1660       && TYPE_DATA_LOCATION (type)->is_constant ())
1661     {
1662       /* If the COMPONENT has a dynamic location, and is an
1663 	 lval_internalvar_component, then we change it to a lval_memory.
1664 
1665 	 Usually a component of an internalvar is created non-lazy, and has
1666 	 its content immediately copied from the parent internalvar.
1667 	 However, for components with a dynamic location, the content of
1668 	 the component is not contained within the parent, but is instead
1669 	 accessed indirectly.  Further, the component will be created as a
1670 	 lazy value.
1671 
1672 	 By changing the type of the component to lval_memory we ensure
1673 	 that value_fetch_lazy can successfully load the component.
1674 
1675 	 This solution isn't ideal, but a real fix would require values to
1676 	 carry around both the parent value contents, and the contents of
1677 	 any dynamic fields within the parent.  This is a substantial
1678 	 change to how values work in GDB.  */
1679       if (this->lval () == lval_internalvar_component)
1680 	{
1681 	  gdb_assert (lazy ());
1682 	  m_lval = lval_memory;
1683 	}
1684       else
1685 	gdb_assert (this->lval () == lval_memory);
1686       set_address (TYPE_DATA_LOCATION_ADDR (type));
1687     }
1688 }
1689 
1690 /* Access to the value history.  */
1691 
1692 /* Record a new value in the value history.
1693    Returns the absolute history index of the entry.  */
1694 
1695 int
1696 value::record_latest ()
1697 {
1698   /* We don't want this value to have anything to do with the inferior anymore.
1699      In particular, "set $1 = 50" should not affect the variable from which
1700      the value was taken, and fast watchpoints should be able to assume that
1701      a value on the value history never changes.  */
1702   if (lazy ())
1703     {
1704       /* We know that this is a _huge_ array, any attempt to fetch this
1705 	 is going to cause GDB to throw an error.  However, to allow
1706 	 the array to still be displayed we fetch its contents up to
1707 	 `max_value_size' and mark anything beyond "unavailable" in
1708 	 the history.  */
1709       if (m_type->code () == TYPE_CODE_ARRAY
1710 	  && m_type->length () > max_value_size
1711 	  && array_length_limiting_element_count.has_value ()
1712 	  && m_enclosing_type == m_type
1713 	  && calculate_limited_array_length (m_type) <= max_value_size)
1714 	m_limited_length = max_value_size;
1715 
1716       fetch_lazy ();
1717     }
1718 
1719   ULONGEST limit = m_limited_length;
1720   if (limit != 0)
1721     mark_bytes_unavailable (limit, m_enclosing_type->length () - limit);
1722 
1723   /* Mark the value as recorded in the history for the availability check.  */
1724   m_in_history = true;
1725 
1726   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1727      from.  This is a bit dubious, because then *&$1 does not just return $1
1728      but the current contents of that location.  c'est la vie...  */
1729   set_modifiable (false);
1730 
1731   value_history.push_back (release_value (this));
1732 
1733   return value_history.size ();
1734 }
1735 
1736 /* Return a copy of the value in the history with sequence number NUM.  */
1737 
1738 struct value *
1739 access_value_history (int num)
1740 {
1741   int absnum = num;
1742 
1743   if (absnum <= 0)
1744     absnum += value_history.size ();
1745 
1746   if (absnum <= 0)
1747     {
1748       if (num == 0)
1749 	error (_("The history is empty."));
1750       else if (num == 1)
1751 	error (_("There is only one value in the history."));
1752       else
1753 	error (_("History does not go back to $$%d."), -num);
1754     }
1755   if (absnum > value_history.size ())
1756     error (_("History has not yet reached $%d."), absnum);
1757 
1758   absnum--;
1759 
1760   return value_history[absnum]->copy ();
1761 }
1762 
1763 /* See value.h.  */
1764 
1765 ULONGEST
1766 value_history_count ()
1767 {
1768   return value_history.size ();
1769 }
1770 
1771 static void
1772 show_values (const char *num_exp, int from_tty)
1773 {
1774   int i;
1775   struct value *val;
1776   static int num = 1;
1777 
1778   if (num_exp)
1779     {
1780       /* "show values +" should print from the stored position.
1781 	 "show values <exp>" should print around value number <exp>.  */
1782       if (num_exp[0] != '+' || num_exp[1] != '\0')
1783 	num = parse_and_eval_long (num_exp) - 5;
1784     }
1785   else
1786     {
1787       /* "show values" means print the last 10 values.  */
1788       num = value_history.size () - 9;
1789     }
1790 
1791   if (num <= 0)
1792     num = 1;
1793 
1794   for (i = num; i < num + 10 && i <= value_history.size (); i++)
1795     {
1796       struct value_print_options opts;
1797 
1798       val = access_value_history (i);
1799       gdb_printf (("$%d = "), i);
1800       get_user_print_options (&opts);
1801       value_print (val, gdb_stdout, &opts);
1802       gdb_printf (("\n"));
1803     }
1804 
1805   /* The next "show values +" should start after what we just printed.  */
1806   num += 10;
1807 
1808   /* Hitting just return after this command should do the same thing as
1809      "show values +".  If num_exp is null, this is unnecessary, since
1810      "show values +" is not useful after "show values".  */
1811   if (from_tty && num_exp)
1812     set_repeat_arguments ("+");
1813 }
1814 
1815 enum internalvar_kind
1816 {
1817   /* The internal variable is empty.  */
1818   INTERNALVAR_VOID,
1819 
1820   /* The value of the internal variable is provided directly as
1821      a GDB value object.  */
1822   INTERNALVAR_VALUE,
1823 
1824   /* A fresh value is computed via a call-back routine on every
1825      access to the internal variable.  */
1826   INTERNALVAR_MAKE_VALUE,
1827 
1828   /* The internal variable holds a GDB internal convenience function.  */
1829   INTERNALVAR_FUNCTION,
1830 
1831   /* The variable holds an integer value.  */
1832   INTERNALVAR_INTEGER,
1833 
1834   /* The variable holds a GDB-provided string.  */
1835   INTERNALVAR_STRING,
1836 };
1837 
1838 union internalvar_data
1839 {
1840   /* A value object used with INTERNALVAR_VALUE.  */
1841   struct value *value;
1842 
1843   /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
1844   struct
1845   {
1846     /* The functions to call.  */
1847     const struct internalvar_funcs *functions;
1848 
1849     /* The function's user-data.  */
1850     void *data;
1851   } make_value;
1852 
1853   /* The internal function used with INTERNALVAR_FUNCTION.  */
1854   struct
1855   {
1856     struct internal_function *function;
1857     /* True if this is the canonical name for the function.  */
1858     int canonical;
1859   } fn;
1860 
1861   /* An integer value used with INTERNALVAR_INTEGER.  */
1862   struct
1863   {
1864     /* If type is non-NULL, it will be used as the type to generate
1865        a value for this internal variable.  If type is NULL, a default
1866        integer type for the architecture is used.  */
1867     struct type *type;
1868     LONGEST val;
1869   } integer;
1870 
1871   /* A string value used with INTERNALVAR_STRING.  */
1872   char *string;
1873 };
1874 
1875 /* Internal variables.  These are variables within the debugger
1876    that hold values assigned by debugger commands.
1877    The user refers to them with a '$' prefix
1878    that does not appear in the variable names stored internally.  */
1879 
1880 struct internalvar
1881 {
1882   internalvar (std::string name)
1883     : name (std::move (name))
1884   {}
1885 
1886   std::string name;
1887 
1888   /* We support various different kinds of content of an internal variable.
1889      enum internalvar_kind specifies the kind, and union internalvar_data
1890      provides the data associated with this particular kind.  */
1891 
1892   enum internalvar_kind kind = INTERNALVAR_VOID;
1893 
1894   union internalvar_data u {};
1895 };
1896 
1897 /* Use std::map, a sorted container, to make the order of iteration (and
1898    therefore the output of "show convenience") stable.  */
1899 
1900 static std::map<std::string, internalvar> internalvars;
1901 
1902 /* If the variable does not already exist create it and give it the
1903    value given.  If no value is given then the default is zero.  */
1904 static void
1905 init_if_undefined_command (const char* args, int from_tty)
1906 {
1907   struct internalvar *intvar = nullptr;
1908 
1909   /* Parse the expression - this is taken from set_command().  */
1910   expression_up expr = parse_expression (args);
1911 
1912   /* Validate the expression.
1913      Was the expression an assignment?
1914      Or even an expression at all?  */
1915   if (expr->first_opcode () != BINOP_ASSIGN)
1916     error (_("Init-if-undefined requires an assignment expression."));
1917 
1918   /* Extract the variable from the parsed expression.  */
1919   expr::assign_operation *assign
1920     = dynamic_cast<expr::assign_operation *> (expr->op.get ());
1921   if (assign != nullptr)
1922     {
1923       expr::operation *lhs = assign->get_lhs ();
1924       expr::internalvar_operation *ivarop
1925 	= dynamic_cast<expr::internalvar_operation *> (lhs);
1926       if (ivarop != nullptr)
1927 	intvar = ivarop->get_internalvar ();
1928     }
1929 
1930   if (intvar == nullptr)
1931     error (_("The first parameter to init-if-undefined "
1932 	     "should be a GDB variable."));
1933 
1934   /* Only evaluate the expression if the lvalue is void.
1935      This may still fail if the expression is invalid.  */
1936   if (intvar->kind == INTERNALVAR_VOID)
1937     expr->evaluate ();
1938 }
1939 
1940 
1941 /* Look up an internal variable with name NAME.  NAME should not
1942    normally include a dollar sign.
1943 
1944    If the specified internal variable does not exist,
1945    the return value is NULL.  */
1946 
1947 struct internalvar *
1948 lookup_only_internalvar (const char *name)
1949 {
1950   auto it = internalvars.find (name);
1951   if (it == internalvars.end ())
1952     return nullptr;
1953 
1954   return &it->second;
1955 }
1956 
1957 /* Complete NAME by comparing it to the names of internal
1958    variables.  */
1959 
1960 void
1961 complete_internalvar (completion_tracker &tracker, const char *name)
1962 {
1963   int len = strlen (name);
1964 
1965   for (auto &pair : internalvars)
1966     {
1967       const internalvar &var = pair.second;
1968 
1969       if (var.name.compare (0, len, name) == 0)
1970 	tracker.add_completion (make_unique_xstrdup (var.name.c_str ()));
1971     }
1972 }
1973 
1974 /* Create an internal variable with name NAME and with a void value.
1975    NAME should not normally include a dollar sign.
1976 
1977    An internal variable with that name must not exist already.  */
1978 
1979 struct internalvar *
1980 create_internalvar (const char *name)
1981 {
1982   auto pair = internalvars.emplace (std::make_pair (name, internalvar (name)));
1983   gdb_assert (pair.second);
1984 
1985   return &pair.first->second;
1986 }
1987 
1988 /* Create an internal variable with name NAME and register FUN as the
1989    function that value_of_internalvar uses to create a value whenever
1990    this variable is referenced.  NAME should not normally include a
1991    dollar sign.  DATA is passed uninterpreted to FUN when it is
1992    called.  CLEANUP, if not NULL, is called when the internal variable
1993    is destroyed.  It is passed DATA as its only argument.  */
1994 
1995 struct internalvar *
1996 create_internalvar_type_lazy (const char *name,
1997 			      const struct internalvar_funcs *funcs,
1998 			      void *data)
1999 {
2000   struct internalvar *var = create_internalvar (name);
2001 
2002   var->kind = INTERNALVAR_MAKE_VALUE;
2003   var->u.make_value.functions = funcs;
2004   var->u.make_value.data = data;
2005   return var;
2006 }
2007 
2008 /* See documentation in value.h.  */
2009 
2010 int
2011 compile_internalvar_to_ax (struct internalvar *var,
2012 			   struct agent_expr *expr,
2013 			   struct axs_value *value)
2014 {
2015   if (var->kind != INTERNALVAR_MAKE_VALUE
2016       || var->u.make_value.functions->compile_to_ax == NULL)
2017     return 0;
2018 
2019   var->u.make_value.functions->compile_to_ax (var, expr, value,
2020 					      var->u.make_value.data);
2021   return 1;
2022 }
2023 
2024 /* Look up an internal variable with name NAME.  NAME should not
2025    normally include a dollar sign.
2026 
2027    If the specified internal variable does not exist,
2028    one is created, with a void value.  */
2029 
2030 struct internalvar *
2031 lookup_internalvar (const char *name)
2032 {
2033   struct internalvar *var;
2034 
2035   var = lookup_only_internalvar (name);
2036   if (var)
2037     return var;
2038 
2039   return create_internalvar (name);
2040 }
2041 
2042 /* Return current value of internal variable VAR.  For variables that
2043    are not inherently typed, use a value type appropriate for GDBARCH.  */
2044 
2045 struct value *
2046 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2047 {
2048   struct value *val;
2049   struct trace_state_variable *tsv;
2050 
2051   /* If there is a trace state variable of the same name, assume that
2052      is what we really want to see.  */
2053   tsv = find_trace_state_variable (var->name.c_str ());
2054   if (tsv)
2055     {
2056       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2057 								&(tsv->value));
2058       if (tsv->value_known)
2059 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2060 				  tsv->value);
2061       else
2062 	val = value::allocate (builtin_type (gdbarch)->builtin_void);
2063       return val;
2064     }
2065 
2066   switch (var->kind)
2067     {
2068     case INTERNALVAR_VOID:
2069       val = value::allocate (builtin_type (gdbarch)->builtin_void);
2070       break;
2071 
2072     case INTERNALVAR_FUNCTION:
2073       val = value::allocate (builtin_type (gdbarch)->internal_fn);
2074       break;
2075 
2076     case INTERNALVAR_INTEGER:
2077       if (!var->u.integer.type)
2078 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2079 				  var->u.integer.val);
2080       else
2081 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2082       break;
2083 
2084     case INTERNALVAR_STRING:
2085       val = current_language->value_string (gdbarch,
2086 					    var->u.string,
2087 					    strlen (var->u.string));
2088       break;
2089 
2090     case INTERNALVAR_VALUE:
2091       val = var->u.value->copy ();
2092       if (val->lazy ())
2093 	val->fetch_lazy ();
2094       break;
2095 
2096     case INTERNALVAR_MAKE_VALUE:
2097       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2098 							var->u.make_value.data);
2099       break;
2100 
2101     default:
2102       internal_error (_("bad kind"));
2103     }
2104 
2105   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2106      on this value go back to affect the original internal variable.
2107 
2108      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2109      no underlying modifiable state in the internal variable.
2110 
2111      Likewise, if the variable's value is a computed lvalue, we want
2112      references to it to produce another computed lvalue, where
2113      references and assignments actually operate through the
2114      computed value's functions.
2115 
2116      This means that internal variables with computed values
2117      behave a little differently from other internal variables:
2118      assignments to them don't just replace the previous value
2119      altogether.  At the moment, this seems like the behavior we
2120      want.  */
2121 
2122   if (var->kind != INTERNALVAR_MAKE_VALUE
2123       && val->lval () != lval_computed)
2124     {
2125       val->set_lval (lval_internalvar);
2126       VALUE_INTERNALVAR (val) = var;
2127     }
2128 
2129   return val;
2130 }
2131 
2132 int
2133 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2134 {
2135   if (var->kind == INTERNALVAR_INTEGER)
2136     {
2137       *result = var->u.integer.val;
2138       return 1;
2139     }
2140 
2141   if (var->kind == INTERNALVAR_VALUE)
2142     {
2143       struct type *type = check_typedef (var->u.value->type ());
2144 
2145       if (type->code () == TYPE_CODE_INT)
2146 	{
2147 	  *result = value_as_long (var->u.value);
2148 	  return 1;
2149 	}
2150     }
2151 
2152   if (var->kind == INTERNALVAR_MAKE_VALUE)
2153     {
2154       struct gdbarch *gdbarch = get_current_arch ();
2155       struct value *val
2156 	= (*var->u.make_value.functions->make_value) (gdbarch, var,
2157 						      var->u.make_value.data);
2158       struct type *type = check_typedef (val->type ());
2159 
2160       if (type->code () == TYPE_CODE_INT)
2161 	{
2162 	  *result = value_as_long (val);
2163 	  return 1;
2164 	}
2165     }
2166 
2167   return 0;
2168 }
2169 
2170 static int
2171 get_internalvar_function (struct internalvar *var,
2172 			  struct internal_function **result)
2173 {
2174   switch (var->kind)
2175     {
2176     case INTERNALVAR_FUNCTION:
2177       *result = var->u.fn.function;
2178       return 1;
2179 
2180     default:
2181       return 0;
2182     }
2183 }
2184 
2185 void
2186 set_internalvar_component (struct internalvar *var,
2187 			   LONGEST offset, LONGEST bitpos,
2188 			   LONGEST bitsize, struct value *newval)
2189 {
2190   gdb_byte *addr;
2191   struct gdbarch *gdbarch;
2192   int unit_size;
2193 
2194   switch (var->kind)
2195     {
2196     case INTERNALVAR_VALUE:
2197       addr = var->u.value->contents_writeable ().data ();
2198       gdbarch = var->u.value->arch ();
2199       unit_size = gdbarch_addressable_memory_unit_size (gdbarch);
2200 
2201       if (bitsize)
2202 	modify_field (var->u.value->type (), addr + offset,
2203 		      value_as_long (newval), bitpos, bitsize);
2204       else
2205 	memcpy (addr + offset * unit_size, newval->contents ().data (),
2206 		newval->type ()->length ());
2207       break;
2208 
2209     default:
2210       /* We can never get a component of any other kind.  */
2211       internal_error (_("set_internalvar_component"));
2212     }
2213 }
2214 
2215 void
2216 set_internalvar (struct internalvar *var, struct value *val)
2217 {
2218   enum internalvar_kind new_kind;
2219   union internalvar_data new_data = { 0 };
2220 
2221   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2222     error (_("Cannot overwrite convenience function %s"), var->name.c_str ());
2223 
2224   /* Prepare new contents.  */
2225   switch (check_typedef (val->type ())->code ())
2226     {
2227     case TYPE_CODE_VOID:
2228       new_kind = INTERNALVAR_VOID;
2229       break;
2230 
2231     case TYPE_CODE_INTERNAL_FUNCTION:
2232       gdb_assert (val->lval () == lval_internalvar);
2233       new_kind = INTERNALVAR_FUNCTION;
2234       get_internalvar_function (VALUE_INTERNALVAR (val),
2235 				&new_data.fn.function);
2236       /* Copies created here are never canonical.  */
2237       break;
2238 
2239     default:
2240       new_kind = INTERNALVAR_VALUE;
2241       struct value *copy = val->copy ();
2242       copy->set_modifiable (true);
2243 
2244       /* Force the value to be fetched from the target now, to avoid problems
2245 	 later when this internalvar is referenced and the target is gone or
2246 	 has changed.  */
2247       if (copy->lazy ())
2248 	copy->fetch_lazy ();
2249 
2250       /* Release the value from the value chain to prevent it from being
2251 	 deleted by free_all_values.  From here on this function should not
2252 	 call error () until new_data is installed into the var->u to avoid
2253 	 leaking memory.  */
2254       new_data.value = release_value (copy).release ();
2255 
2256       /* Internal variables which are created from values with a dynamic
2257 	 location don't need the location property of the origin anymore.
2258 	 The resolved dynamic location is used prior then any other address
2259 	 when accessing the value.
2260 	 If we keep it, we would still refer to the origin value.
2261 	 Remove the location property in case it exist.  */
2262       new_data.value->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2263 
2264       break;
2265     }
2266 
2267   /* Clean up old contents.  */
2268   clear_internalvar (var);
2269 
2270   /* Switch over.  */
2271   var->kind = new_kind;
2272   var->u = new_data;
2273   /* End code which must not call error().  */
2274 }
2275 
2276 void
2277 set_internalvar_integer (struct internalvar *var, LONGEST l)
2278 {
2279   /* Clean up old contents.  */
2280   clear_internalvar (var);
2281 
2282   var->kind = INTERNALVAR_INTEGER;
2283   var->u.integer.type = NULL;
2284   var->u.integer.val = l;
2285 }
2286 
2287 void
2288 set_internalvar_string (struct internalvar *var, const char *string)
2289 {
2290   /* Clean up old contents.  */
2291   clear_internalvar (var);
2292 
2293   var->kind = INTERNALVAR_STRING;
2294   var->u.string = xstrdup (string);
2295 }
2296 
2297 static void
2298 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2299 {
2300   /* Clean up old contents.  */
2301   clear_internalvar (var);
2302 
2303   var->kind = INTERNALVAR_FUNCTION;
2304   var->u.fn.function = f;
2305   var->u.fn.canonical = 1;
2306   /* Variables installed here are always the canonical version.  */
2307 }
2308 
2309 void
2310 clear_internalvar (struct internalvar *var)
2311 {
2312   /* Clean up old contents.  */
2313   switch (var->kind)
2314     {
2315     case INTERNALVAR_VALUE:
2316       var->u.value->decref ();
2317       break;
2318 
2319     case INTERNALVAR_STRING:
2320       xfree (var->u.string);
2321       break;
2322 
2323     default:
2324       break;
2325     }
2326 
2327   /* Reset to void kind.  */
2328   var->kind = INTERNALVAR_VOID;
2329 }
2330 
2331 const char *
2332 internalvar_name (const struct internalvar *var)
2333 {
2334   return var->name.c_str ();
2335 }
2336 
2337 static struct internal_function *
2338 create_internal_function (const char *name,
2339 			  internal_function_fn handler, void *cookie)
2340 {
2341   struct internal_function *ifn = XNEW (struct internal_function);
2342 
2343   ifn->name = xstrdup (name);
2344   ifn->handler = handler;
2345   ifn->cookie = cookie;
2346   return ifn;
2347 }
2348 
2349 const char *
2350 value_internal_function_name (struct value *val)
2351 {
2352   struct internal_function *ifn;
2353   int result;
2354 
2355   gdb_assert (val->lval () == lval_internalvar);
2356   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2357   gdb_assert (result);
2358 
2359   return ifn->name;
2360 }
2361 
2362 struct value *
2363 call_internal_function (struct gdbarch *gdbarch,
2364 			const struct language_defn *language,
2365 			struct value *func, int argc, struct value **argv)
2366 {
2367   struct internal_function *ifn;
2368   int result;
2369 
2370   gdb_assert (func->lval () == lval_internalvar);
2371   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2372   gdb_assert (result);
2373 
2374   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2375 }
2376 
2377 /* The 'function' command.  This does nothing -- it is just a
2378    placeholder to let "help function NAME" work.  This is also used as
2379    the implementation of the sub-command that is created when
2380    registering an internal function.  */
2381 static void
2382 function_command (const char *command, int from_tty)
2383 {
2384   /* Do nothing.  */
2385 }
2386 
2387 /* Helper function that does the work for add_internal_function.  */
2388 
2389 static struct cmd_list_element *
2390 do_add_internal_function (const char *name, const char *doc,
2391 			  internal_function_fn handler, void *cookie)
2392 {
2393   struct internal_function *ifn;
2394   struct internalvar *var = lookup_internalvar (name);
2395 
2396   ifn = create_internal_function (name, handler, cookie);
2397   set_internalvar_function (var, ifn);
2398 
2399   return add_cmd (name, no_class, function_command, doc, &functionlist);
2400 }
2401 
2402 /* See value.h.  */
2403 
2404 void
2405 add_internal_function (const char *name, const char *doc,
2406 		       internal_function_fn handler, void *cookie)
2407 {
2408   do_add_internal_function (name, doc, handler, cookie);
2409 }
2410 
2411 /* See value.h.  */
2412 
2413 void
2414 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2415 		       gdb::unique_xmalloc_ptr<char> &&doc,
2416 		       internal_function_fn handler, void *cookie)
2417 {
2418   struct cmd_list_element *cmd
2419     = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2420 
2421   /* Manually transfer the ownership of the doc and name strings to CMD by
2422      setting the appropriate flags.  */
2423   (void) doc.release ();
2424   cmd->doc_allocated = 1;
2425   (void) name.release ();
2426   cmd->name_allocated = 1;
2427 }
2428 
2429 void
2430 value::preserve (struct objfile *objfile, htab_t copied_types)
2431 {
2432   if (m_type->objfile_owner () == objfile)
2433     m_type = copy_type_recursive (m_type, copied_types);
2434 
2435   if (m_enclosing_type->objfile_owner () == objfile)
2436     m_enclosing_type = copy_type_recursive (m_enclosing_type, copied_types);
2437 }
2438 
2439 /* Likewise for internal variable VAR.  */
2440 
2441 static void
2442 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2443 			  htab_t copied_types)
2444 {
2445   switch (var->kind)
2446     {
2447     case INTERNALVAR_INTEGER:
2448       if (var->u.integer.type
2449 	  && var->u.integer.type->objfile_owner () == objfile)
2450 	var->u.integer.type
2451 	  = copy_type_recursive (var->u.integer.type, copied_types);
2452       break;
2453 
2454     case INTERNALVAR_VALUE:
2455       var->u.value->preserve (objfile, copied_types);
2456       break;
2457     }
2458 }
2459 
2460 /* Make sure that all types and values referenced by VAROBJ are updated before
2461    OBJFILE is discarded.  COPIED_TYPES is used to prevent cycles and
2462    duplicates.  */
2463 
2464 static void
2465 preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
2466 		     htab_t copied_types)
2467 {
2468   if (varobj->type->is_objfile_owned ()
2469       && varobj->type->objfile_owner () == objfile)
2470     {
2471       varobj->type
2472 	= copy_type_recursive (varobj->type, copied_types);
2473     }
2474 
2475   if (varobj->value != nullptr)
2476     varobj->value->preserve (objfile, copied_types);
2477 }
2478 
2479 /* Update the internal variables and value history when OBJFILE is
2480    discarded; we must copy the types out of the objfile.  New global types
2481    will be created for every convenience variable which currently points to
2482    this objfile's types, and the convenience variables will be adjusted to
2483    use the new global types.  */
2484 
2485 void
2486 preserve_values (struct objfile *objfile)
2487 {
2488   /* Create the hash table.  We allocate on the objfile's obstack, since
2489      it is soon to be deleted.  */
2490   htab_up copied_types = create_copied_types_hash ();
2491 
2492   for (const value_ref_ptr &item : value_history)
2493     item->preserve (objfile, copied_types.get ());
2494 
2495   for (auto &pair : internalvars)
2496     preserve_one_internalvar (&pair.second, objfile, copied_types.get ());
2497 
2498   /* For the remaining varobj, check that none has type owned by OBJFILE.  */
2499   all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
2500     {
2501       preserve_one_varobj (varobj, objfile,
2502 			   copied_types.get ());
2503     });
2504 
2505   preserve_ext_lang_values (objfile, copied_types.get ());
2506 }
2507 
2508 static void
2509 show_convenience (const char *ignore, int from_tty)
2510 {
2511   struct gdbarch *gdbarch = get_current_arch ();
2512   int varseen = 0;
2513   struct value_print_options opts;
2514 
2515   get_user_print_options (&opts);
2516   for (auto &pair : internalvars)
2517     {
2518       internalvar &var = pair.second;
2519 
2520       if (!varseen)
2521 	{
2522 	  varseen = 1;
2523 	}
2524       gdb_printf (("$%s = "), var.name.c_str ());
2525 
2526       try
2527 	{
2528 	  struct value *val;
2529 
2530 	  val = value_of_internalvar (gdbarch, &var);
2531 	  value_print (val, gdb_stdout, &opts);
2532 	}
2533       catch (const gdb_exception_error &ex)
2534 	{
2535 	  fprintf_styled (gdb_stdout, metadata_style.style (),
2536 			  _("<error: %s>"), ex.what ());
2537 	}
2538 
2539       gdb_printf (("\n"));
2540     }
2541   if (!varseen)
2542     {
2543       /* This text does not mention convenience functions on purpose.
2544 	 The user can't create them except via Python, and if Python support
2545 	 is installed this message will never be printed ($_streq will
2546 	 exist).  */
2547       gdb_printf (_("No debugger convenience variables now defined.\n"
2548 		    "Convenience variables have "
2549 		    "names starting with \"$\";\n"
2550 		    "use \"set\" as in \"set "
2551 		    "$foo = 5\" to define them.\n"));
2552     }
2553 }
2554 
2555 
2556 /* See value.h.  */
2557 
2558 struct value *
2559 value::from_xmethod (xmethod_worker_up &&worker)
2560 {
2561   struct value *v;
2562 
2563   v = value::allocate (builtin_type (current_inferior ()->arch ())->xmethod);
2564   v->m_lval = lval_xcallable;
2565   v->m_location.xm_worker = worker.release ();
2566   v->m_modifiable = false;
2567 
2568   return v;
2569 }
2570 
2571 /* See value.h.  */
2572 
2573 struct type *
2574 value::result_type_of_xmethod (gdb::array_view<value *> argv)
2575 {
2576   gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2577 	      && m_lval == lval_xcallable && !argv.empty ());
2578 
2579   return m_location.xm_worker->get_result_type (argv[0], argv.slice (1));
2580 }
2581 
2582 /* See value.h.  */
2583 
2584 struct value *
2585 value::call_xmethod (gdb::array_view<value *> argv)
2586 {
2587   gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2588 	      && m_lval == lval_xcallable && !argv.empty ());
2589 
2590   return m_location.xm_worker->invoke (argv[0], argv.slice (1));
2591 }
2592 
2593 /* Extract a value as a C number (either long or double).
2594    Knows how to convert fixed values to double, or
2595    floating values to long.
2596    Does not deallocate the value.  */
2597 
2598 LONGEST
2599 value_as_long (struct value *val)
2600 {
2601   /* This coerces arrays and functions, which is necessary (e.g.
2602      in disassemble_command).  It also dereferences references, which
2603      I suspect is the most logical thing to do.  */
2604   val = coerce_array (val);
2605   return unpack_long (val->type (), val->contents ().data ());
2606 }
2607 
2608 /* See value.h.  */
2609 
2610 gdb_mpz
2611 value_as_mpz (struct value *val)
2612 {
2613   val = coerce_array (val);
2614   struct type *type = check_typedef (val->type ());
2615 
2616   switch (type->code ())
2617     {
2618     case TYPE_CODE_ENUM:
2619     case TYPE_CODE_BOOL:
2620     case TYPE_CODE_INT:
2621     case TYPE_CODE_CHAR:
2622     case TYPE_CODE_RANGE:
2623       break;
2624 
2625     default:
2626       return gdb_mpz (value_as_long (val));
2627     }
2628 
2629   gdb_mpz result;
2630 
2631   gdb::array_view<const gdb_byte> valbytes = val->contents ();
2632   enum bfd_endian byte_order = type_byte_order (type);
2633 
2634   /* Handle integers that are either not a multiple of the word size,
2635      or that are stored at some bit offset.  */
2636   unsigned bit_off = 0, bit_size = 0;
2637   if (type->bit_size_differs_p ())
2638     {
2639       bit_size = type->bit_size ();
2640       if (bit_size == 0)
2641 	{
2642 	  /* We can just handle this immediately.  */
2643 	  return result;
2644 	}
2645 
2646       bit_off = type->bit_offset ();
2647 
2648       unsigned n_bytes = ((bit_off % 8) + bit_size + 7) / 8;
2649       valbytes = valbytes.slice (bit_off / 8, n_bytes);
2650 
2651       if (byte_order == BFD_ENDIAN_BIG)
2652 	bit_off = (n_bytes * 8 - bit_off % 8 - bit_size);
2653       else
2654 	bit_off %= 8;
2655     }
2656 
2657   result.read (val->contents (), byte_order, type->is_unsigned ());
2658 
2659   /* Shift off any low bits, if needed.  */
2660   if (bit_off != 0)
2661     result >>= bit_off;
2662 
2663   /* Mask off any high bits, if needed.  */
2664   if (bit_size)
2665     result.mask (bit_size);
2666 
2667   /* Now handle any range bias.  */
2668   if (type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0)
2669     {
2670       /* Unfortunately we have to box here, because LONGEST is
2671 	 probably wider than long.  */
2672       result += gdb_mpz (type->bounds ()->bias);
2673     }
2674 
2675   return result;
2676 }
2677 
2678 /* Extract a value as a C pointer.  */
2679 
2680 CORE_ADDR
2681 value_as_address (struct value *val)
2682 {
2683   struct gdbarch *gdbarch = val->type ()->arch ();
2684 
2685   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2686      whether we want this to be true eventually.  */
2687 #if 0
2688   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2689      non-address (e.g. argument to "signal", "info break", etc.), or
2690      for pointers to char, in which the low bits *are* significant.  */
2691   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2692 #else
2693 
2694   /* There are several targets (IA-64, PowerPC, and others) which
2695      don't represent pointers to functions as simply the address of
2696      the function's entry point.  For example, on the IA-64, a
2697      function pointer points to a two-word descriptor, generated by
2698      the linker, which contains the function's entry point, and the
2699      value the IA-64 "global pointer" register should have --- to
2700      support position-independent code.  The linker generates
2701      descriptors only for those functions whose addresses are taken.
2702 
2703      On such targets, it's difficult for GDB to convert an arbitrary
2704      function address into a function pointer; it has to either find
2705      an existing descriptor for that function, or call malloc and
2706      build its own.  On some targets, it is impossible for GDB to
2707      build a descriptor at all: the descriptor must contain a jump
2708      instruction; data memory cannot be executed; and code memory
2709      cannot be modified.
2710 
2711      Upon entry to this function, if VAL is a value of type `function'
2712      (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2713      val->address () is the address of the function.  This is what
2714      you'll get if you evaluate an expression like `main'.  The call
2715      to COERCE_ARRAY below actually does all the usual unary
2716      conversions, which includes converting values of type `function'
2717      to `pointer to function'.  This is the challenging conversion
2718      discussed above.  Then, `unpack_pointer' will convert that pointer
2719      back into an address.
2720 
2721      So, suppose the user types `disassemble foo' on an architecture
2722      with a strange function pointer representation, on which GDB
2723      cannot build its own descriptors, and suppose further that `foo'
2724      has no linker-built descriptor.  The address->pointer conversion
2725      will signal an error and prevent the command from running, even
2726      though the next step would have been to convert the pointer
2727      directly back into the same address.
2728 
2729      The following shortcut avoids this whole mess.  If VAL is a
2730      function, just return its address directly.  */
2731   if (val->type ()->code () == TYPE_CODE_FUNC
2732       || val->type ()->code () == TYPE_CODE_METHOD)
2733     return val->address ();
2734 
2735   val = coerce_array (val);
2736 
2737   /* Some architectures (e.g. Harvard), map instruction and data
2738      addresses onto a single large unified address space.  For
2739      instance: An architecture may consider a large integer in the
2740      range 0x10000000 .. 0x1000ffff to already represent a data
2741      addresses (hence not need a pointer to address conversion) while
2742      a small integer would still need to be converted integer to
2743      pointer to address.  Just assume such architectures handle all
2744      integer conversions in a single function.  */
2745 
2746   /* JimB writes:
2747 
2748      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2749      must admonish GDB hackers to make sure its behavior matches the
2750      compiler's, whenever possible.
2751 
2752      In general, I think GDB should evaluate expressions the same way
2753      the compiler does.  When the user copies an expression out of
2754      their source code and hands it to a `print' command, they should
2755      get the same value the compiler would have computed.  Any
2756      deviation from this rule can cause major confusion and annoyance,
2757      and needs to be justified carefully.  In other words, GDB doesn't
2758      really have the freedom to do these conversions in clever and
2759      useful ways.
2760 
2761      AndrewC pointed out that users aren't complaining about how GDB
2762      casts integers to pointers; they are complaining that they can't
2763      take an address from a disassembly listing and give it to `x/i'.
2764      This is certainly important.
2765 
2766      Adding an architecture method like integer_to_address() certainly
2767      makes it possible for GDB to "get it right" in all circumstances
2768      --- the target has complete control over how things get done, so
2769      people can Do The Right Thing for their target without breaking
2770      anyone else.  The standard doesn't specify how integers get
2771      converted to pointers; usually, the ABI doesn't either, but
2772      ABI-specific code is a more reasonable place to handle it.  */
2773 
2774   if (!val->type ()->is_pointer_or_reference ()
2775       && gdbarch_integer_to_address_p (gdbarch))
2776     return gdbarch_integer_to_address (gdbarch, val->type (),
2777 				       val->contents ().data ());
2778 
2779   return unpack_pointer (val->type (), val->contents ().data ());
2780 #endif
2781 }
2782 
2783 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2784    as a long, or as a double, assuming the raw data is described
2785    by type TYPE.  Knows how to convert different sizes of values
2786    and can convert between fixed and floating point.  We don't assume
2787    any alignment for the raw data.  Return value is in host byte order.
2788 
2789    If you want functions and arrays to be coerced to pointers, and
2790    references to be dereferenced, call value_as_long() instead.
2791 
2792    C++: It is assumed that the front-end has taken care of
2793    all matters concerning pointers to members.  A pointer
2794    to member which reaches here is considered to be equivalent
2795    to an INT (or some size).  After all, it is only an offset.  */
2796 
2797 LONGEST
2798 unpack_long (struct type *type, const gdb_byte *valaddr)
2799 {
2800   if (is_fixed_point_type (type))
2801     type = type->fixed_point_type_base_type ();
2802 
2803   enum bfd_endian byte_order = type_byte_order (type);
2804   enum type_code code = type->code ();
2805   int len = type->length ();
2806   int nosign = type->is_unsigned ();
2807 
2808   switch (code)
2809     {
2810     case TYPE_CODE_TYPEDEF:
2811       return unpack_long (check_typedef (type), valaddr);
2812     case TYPE_CODE_ENUM:
2813     case TYPE_CODE_FLAGS:
2814     case TYPE_CODE_BOOL:
2815     case TYPE_CODE_INT:
2816     case TYPE_CODE_CHAR:
2817     case TYPE_CODE_RANGE:
2818     case TYPE_CODE_MEMBERPTR:
2819       {
2820 	LONGEST result;
2821 
2822 	if (type->bit_size_differs_p ())
2823 	  {
2824 	    unsigned bit_off = type->bit_offset ();
2825 	    unsigned bit_size = type->bit_size ();
2826 	    if (bit_size == 0)
2827 	      {
2828 		/* unpack_bits_as_long doesn't handle this case the
2829 		   way we'd like, so handle it here.  */
2830 		result = 0;
2831 	      }
2832 	    else
2833 	      result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2834 	  }
2835 	else
2836 	  {
2837 	    if (nosign)
2838 	      result = extract_unsigned_integer (valaddr, len, byte_order);
2839 	    else
2840 	      result = extract_signed_integer (valaddr, len, byte_order);
2841 	  }
2842 	if (code == TYPE_CODE_RANGE)
2843 	  result += type->bounds ()->bias;
2844 	return result;
2845       }
2846 
2847     case TYPE_CODE_FLT:
2848     case TYPE_CODE_DECFLOAT:
2849       return target_float_to_longest (valaddr, type);
2850 
2851     case TYPE_CODE_FIXED_POINT:
2852       {
2853 	gdb_mpq vq;
2854 	vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2855 			     byte_order, nosign,
2856 			     type->fixed_point_scaling_factor ());
2857 
2858 	gdb_mpz vz = vq.as_integer ();
2859 	return vz.as_integer<LONGEST> ();
2860       }
2861 
2862     case TYPE_CODE_PTR:
2863     case TYPE_CODE_REF:
2864     case TYPE_CODE_RVALUE_REF:
2865       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2866 	 whether we want this to be true eventually.  */
2867       return extract_typed_address (valaddr, type);
2868 
2869     default:
2870       error (_("Value can't be converted to integer."));
2871     }
2872 }
2873 
2874 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2875    as a CORE_ADDR, assuming the raw data is described by type TYPE.
2876    We don't assume any alignment for the raw data.  Return value is in
2877    host byte order.
2878 
2879    If you want functions and arrays to be coerced to pointers, and
2880    references to be dereferenced, call value_as_address() instead.
2881 
2882    C++: It is assumed that the front-end has taken care of
2883    all matters concerning pointers to members.  A pointer
2884    to member which reaches here is considered to be equivalent
2885    to an INT (or some size).  After all, it is only an offset.  */
2886 
2887 CORE_ADDR
2888 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2889 {
2890   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2891      whether we want this to be true eventually.  */
2892   return unpack_long (type, valaddr);
2893 }
2894 
2895 bool
2896 is_floating_value (struct value *val)
2897 {
2898   struct type *type = check_typedef (val->type ());
2899 
2900   if (is_floating_type (type))
2901     {
2902       if (!target_float_is_valid (val->contents ().data (), type))
2903 	error (_("Invalid floating value found in program."));
2904       return true;
2905     }
2906 
2907   return false;
2908 }
2909 
2910 
2911 /* Get the value of the FIELDNO'th field (which must be static) of
2912    TYPE.  */
2913 
2914 struct value *
2915 value_static_field (struct type *type, int fieldno)
2916 {
2917   struct value *retval;
2918 
2919   switch (type->field (fieldno).loc_kind ())
2920     {
2921     case FIELD_LOC_KIND_PHYSADDR:
2922       retval = value_at_lazy (type->field (fieldno).type (),
2923 			      type->field (fieldno).loc_physaddr ());
2924       break;
2925     case FIELD_LOC_KIND_PHYSNAME:
2926     {
2927       const char *phys_name = type->field (fieldno).loc_physname ();
2928       /* type->field (fieldno).name (); */
2929       struct block_symbol sym = lookup_symbol (phys_name, nullptr,
2930 					       SEARCH_VAR_DOMAIN, nullptr);
2931 
2932       if (sym.symbol == NULL)
2933 	{
2934 	  /* With some compilers, e.g. HP aCC, static data members are
2935 	     reported as non-debuggable symbols.  */
2936 	  struct bound_minimal_symbol msym
2937 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
2938 	  struct type *field_type = type->field (fieldno).type ();
2939 
2940 	  if (!msym.minsym)
2941 	    retval = value::allocate_optimized_out (field_type);
2942 	  else
2943 	    retval = value_at_lazy (field_type, msym.value_address ());
2944 	}
2945       else
2946 	retval = value_of_variable (sym.symbol, sym.block);
2947       break;
2948     }
2949     default:
2950       gdb_assert_not_reached ("unexpected field location kind");
2951     }
2952 
2953   return retval;
2954 }
2955 
2956 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2957    You have to be careful here, since the size of the data area for the value
2958    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
2959    than the old enclosing type, you have to allocate more space for the
2960    data.  */
2961 
2962 void
2963 value::set_enclosing_type (struct type *new_encl_type)
2964 {
2965   if (new_encl_type->length () > enclosing_type ()->length ())
2966     {
2967       check_type_length_before_alloc (new_encl_type);
2968       m_contents.reset ((gdb_byte *) xrealloc (m_contents.release (),
2969 					       new_encl_type->length ()));
2970     }
2971 
2972   m_enclosing_type = new_encl_type;
2973 }
2974 
2975 /* See value.h.  */
2976 
2977 struct value *
2978 value::primitive_field (LONGEST offset, int fieldno, struct type *arg_type)
2979 {
2980   struct value *v;
2981   struct type *type;
2982   int unit_size = gdbarch_addressable_memory_unit_size (arch ());
2983 
2984   arg_type = check_typedef (arg_type);
2985   type = arg_type->field (fieldno).type ();
2986 
2987   /* Call check_typedef on our type to make sure that, if TYPE
2988      is a TYPE_CODE_TYPEDEF, its length is set to the length
2989      of the target type instead of zero.  However, we do not
2990      replace the typedef type by the target type, because we want
2991      to keep the typedef in order to be able to print the type
2992      description correctly.  */
2993   check_typedef (type);
2994 
2995   if (arg_type->field (fieldno).bitsize ())
2996     {
2997       /* Handle packed fields.
2998 
2999 	 Create a new value for the bitfield, with bitpos and bitsize
3000 	 set.  If possible, arrange offset and bitpos so that we can
3001 	 do a single aligned read of the size of the containing type.
3002 	 Otherwise, adjust offset to the byte containing the first
3003 	 bit.  Assume that the address, offset, and embedded offset
3004 	 are sufficiently aligned.  */
3005 
3006       LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3007       LONGEST container_bitsize = type->length () * 8;
3008 
3009       v = value::allocate_lazy (type);
3010       v->set_bitsize (arg_type->field (fieldno).bitsize ());
3011       if ((bitpos % container_bitsize) + v->bitsize () <= container_bitsize
3012 	  && type->length () <= (int) sizeof (LONGEST))
3013 	v->set_bitpos (bitpos % container_bitsize);
3014       else
3015 	v->set_bitpos (bitpos % 8);
3016       v->set_offset ((embedded_offset ()
3017 		      + offset
3018 		      + (bitpos - v->bitpos ()) / 8));
3019       v->set_parent (this);
3020       if (!lazy ())
3021 	v->fetch_lazy ();
3022     }
3023   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3024     {
3025       /* This field is actually a base subobject, so preserve the
3026 	 entire object's contents for later references to virtual
3027 	 bases, etc.  */
3028       LONGEST boffset;
3029 
3030       /* Lazy register values with offsets are not supported.  */
3031       if (this->lval () == lval_register && lazy ())
3032 	fetch_lazy ();
3033 
3034       /* We special case virtual inheritance here because this
3035 	 requires access to the contents, which we would rather avoid
3036 	 for references to ordinary fields of unavailable values.  */
3037       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3038 	boffset = baseclass_offset (arg_type, fieldno,
3039 				    contents ().data (),
3040 				    embedded_offset (),
3041 				    address (),
3042 				    this);
3043       else
3044 	boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3045 
3046       if (lazy ())
3047 	v = value::allocate_lazy (enclosing_type ());
3048       else
3049 	{
3050 	  v = value::allocate (enclosing_type ());
3051 	  contents_copy_raw (v, 0, 0, enclosing_type ()->length ());
3052 	}
3053       v->deprecated_set_type (type);
3054       v->set_offset (this->offset ());
3055       v->set_embedded_offset (offset + embedded_offset () + boffset);
3056     }
3057   else if (NULL != TYPE_DATA_LOCATION (type))
3058     {
3059       /* Field is a dynamic data member.  */
3060 
3061       gdb_assert (0 == offset);
3062       /* We expect an already resolved data location.  */
3063       gdb_assert (TYPE_DATA_LOCATION (type)->is_constant ());
3064       /* For dynamic data types defer memory allocation
3065 	 until we actual access the value.  */
3066       v = value::allocate_lazy (type);
3067     }
3068   else
3069     {
3070       /* Plain old data member */
3071       offset += (arg_type->field (fieldno).loc_bitpos ()
3072 		 / (HOST_CHAR_BIT * unit_size));
3073 
3074       /* Lazy register values with offsets are not supported.  */
3075       if (this->lval () == lval_register && lazy ())
3076 	fetch_lazy ();
3077 
3078       if (lazy ())
3079 	v = value::allocate_lazy (type);
3080       else
3081 	{
3082 	  v = value::allocate (type);
3083 	  contents_copy_raw (v, v->embedded_offset (),
3084 			     embedded_offset () + offset,
3085 			     type_length_units (type));
3086 	}
3087       v->set_offset (this->offset () + offset + embedded_offset ());
3088     }
3089   v->set_component_location (this);
3090   return v;
3091 }
3092 
3093 /* Given a value ARG1 of a struct or union type,
3094    extract and return the value of one of its (non-static) fields.
3095    FIELDNO says which field.  */
3096 
3097 struct value *
3098 value_field (struct value *arg1, int fieldno)
3099 {
3100   return arg1->primitive_field (0, fieldno, arg1->type ());
3101 }
3102 
3103 /* Return a non-virtual function as a value.
3104    F is the list of member functions which contains the desired method.
3105    J is an index into F which provides the desired method.
3106 
3107    We only use the symbol for its address, so be happy with either a
3108    full symbol or a minimal symbol.  */
3109 
3110 struct value *
3111 value_fn_field (struct value **arg1p, struct fn_field *f,
3112 		int j, struct type *type,
3113 		LONGEST offset)
3114 {
3115   struct value *v;
3116   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3117   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3118   struct symbol *sym;
3119   struct bound_minimal_symbol msym;
3120 
3121   sym = lookup_symbol (physname, nullptr, SEARCH_FUNCTION_DOMAIN,
3122 		       nullptr).symbol;
3123   if (sym == nullptr)
3124     {
3125       msym = lookup_bound_minimal_symbol (physname);
3126       if (msym.minsym == NULL)
3127 	return NULL;
3128     }
3129 
3130   v = value::allocate (ftype);
3131   v->set_lval (lval_memory);
3132   if (sym)
3133     {
3134       v->set_address (sym->value_block ()->entry_pc ());
3135     }
3136   else
3137     {
3138       /* The minimal symbol might point to a function descriptor;
3139 	 resolve it to the actual code address instead.  */
3140       struct objfile *objfile = msym.objfile;
3141       struct gdbarch *gdbarch = objfile->arch ();
3142 
3143       v->set_address (gdbarch_convert_from_func_ptr_addr
3144 		      (gdbarch, msym.value_address (),
3145 		       current_inferior ()->top_target ()));
3146     }
3147 
3148   if (arg1p)
3149     {
3150       if (type != (*arg1p)->type ())
3151 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3152 					value_addr (*arg1p)));
3153 
3154       /* Move the `this' pointer according to the offset.
3155 	 (*arg1p)->offset () += offset; */
3156     }
3157 
3158   return v;
3159 }
3160 
3161 
3162 
3163 /* See value.h.  */
3164 
3165 LONGEST
3166 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3167 		     LONGEST bitpos, LONGEST bitsize)
3168 {
3169   enum bfd_endian byte_order = type_byte_order (field_type);
3170   ULONGEST val;
3171   ULONGEST valmask;
3172   int lsbcount;
3173   LONGEST bytes_read;
3174   LONGEST read_offset;
3175 
3176   /* Read the minimum number of bytes required; there may not be
3177      enough bytes to read an entire ULONGEST.  */
3178   field_type = check_typedef (field_type);
3179   if (bitsize)
3180     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3181   else
3182     {
3183       bytes_read = field_type->length ();
3184       bitsize = 8 * bytes_read;
3185     }
3186 
3187   read_offset = bitpos / 8;
3188 
3189   val = extract_unsigned_integer (valaddr + read_offset,
3190 				  bytes_read, byte_order);
3191 
3192   /* Extract bits.  See comment above.  */
3193 
3194   if (byte_order == BFD_ENDIAN_BIG)
3195     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3196   else
3197     lsbcount = (bitpos % 8);
3198   val >>= lsbcount;
3199 
3200   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3201      If the field is signed, and is negative, then sign extend.  */
3202 
3203   if (bitsize < 8 * (int) sizeof (val))
3204     {
3205       valmask = (((ULONGEST) 1) << bitsize) - 1;
3206       val &= valmask;
3207       if (!field_type->is_unsigned ())
3208 	{
3209 	  if (val & (valmask ^ (valmask >> 1)))
3210 	    {
3211 	      val |= ~valmask;
3212 	    }
3213 	}
3214     }
3215 
3216   return val;
3217 }
3218 
3219 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3220    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3221    ORIGINAL_VALUE, which must not be NULL.  See
3222    unpack_value_bits_as_long for more details.  */
3223 
3224 int
3225 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3226 			    LONGEST embedded_offset, int fieldno,
3227 			    const struct value *val, LONGEST *result)
3228 {
3229   int bitpos = type->field (fieldno).loc_bitpos ();
3230   int bitsize = type->field (fieldno).bitsize ();
3231   struct type *field_type = type->field (fieldno).type ();
3232   int bit_offset;
3233 
3234   gdb_assert (val != NULL);
3235 
3236   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3237   if (val->bits_any_optimized_out (bit_offset, bitsize)
3238       || !val->bits_available (bit_offset, bitsize))
3239     return 0;
3240 
3241   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3242 				 bitpos, bitsize);
3243   return 1;
3244 }
3245 
3246 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3247    object at VALADDR.  See unpack_bits_as_long for more details.  */
3248 
3249 LONGEST
3250 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3251 {
3252   int bitpos = type->field (fieldno).loc_bitpos ();
3253   int bitsize = type->field (fieldno).bitsize ();
3254   struct type *field_type = type->field (fieldno).type ();
3255 
3256   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3257 }
3258 
3259 /* See value.h.  */
3260 
3261 void
3262 value::unpack_bitfield (struct value *dest_val,
3263 			LONGEST bitpos, LONGEST bitsize,
3264 			const gdb_byte *valaddr, LONGEST embedded_offset)
3265   const
3266 {
3267   enum bfd_endian byte_order;
3268   int src_bit_offset;
3269   int dst_bit_offset;
3270   struct type *field_type = dest_val->type ();
3271 
3272   byte_order = type_byte_order (field_type);
3273 
3274   /* First, unpack and sign extend the bitfield as if it was wholly
3275      valid.  Optimized out/unavailable bits are read as zero, but
3276      that's OK, as they'll end up marked below.  If the VAL is
3277      wholly-invalid we may have skipped allocating its contents,
3278      though.  See value::allocate_optimized_out.  */
3279   if (valaddr != NULL)
3280     {
3281       LONGEST num;
3282 
3283       num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3284 				 bitpos, bitsize);
3285       store_signed_integer (dest_val->contents_raw ().data (),
3286 			    field_type->length (), byte_order, num);
3287     }
3288 
3289   /* Now copy the optimized out / unavailability ranges to the right
3290      bits.  */
3291   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3292   if (byte_order == BFD_ENDIAN_BIG)
3293     dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize;
3294   else
3295     dst_bit_offset = 0;
3296   ranges_copy_adjusted (dest_val, dst_bit_offset, src_bit_offset, bitsize);
3297 }
3298 
3299 /* Return a new value with type TYPE, which is FIELDNO field of the
3300    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3301    of VAL.  If the VAL's contents required to extract the bitfield
3302    from are unavailable/optimized out, the new value is
3303    correspondingly marked unavailable/optimized out.  */
3304 
3305 struct value *
3306 value_field_bitfield (struct type *type, int fieldno,
3307 		      const gdb_byte *valaddr,
3308 		      LONGEST embedded_offset, const struct value *val)
3309 {
3310   int bitpos = type->field (fieldno).loc_bitpos ();
3311   int bitsize = type->field (fieldno).bitsize ();
3312   struct value *res_val = value::allocate (type->field (fieldno).type ());
3313 
3314   val->unpack_bitfield (res_val, bitpos, bitsize, valaddr, embedded_offset);
3315 
3316   return res_val;
3317 }
3318 
3319 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3320    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3321    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3322    indicate which bits (in target bit order) comprise the bitfield.
3323    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3324    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3325 
3326 void
3327 modify_field (struct type *type, gdb_byte *addr,
3328 	      LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3329 {
3330   enum bfd_endian byte_order = type_byte_order (type);
3331   ULONGEST oword;
3332   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3333   LONGEST bytesize;
3334 
3335   /* Normalize BITPOS.  */
3336   addr += bitpos / 8;
3337   bitpos %= 8;
3338 
3339   /* If a negative fieldval fits in the field in question, chop
3340      off the sign extension bits.  */
3341   if ((~fieldval & ~(mask >> 1)) == 0)
3342     fieldval &= mask;
3343 
3344   /* Warn if value is too big to fit in the field in question.  */
3345   if (0 != (fieldval & ~mask))
3346     {
3347       /* FIXME: would like to include fieldval in the message, but
3348 	 we don't have a sprintf_longest.  */
3349       warning (_("Value does not fit in %s bits."), plongest (bitsize));
3350 
3351       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3352       fieldval &= mask;
3353     }
3354 
3355   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3356      false valgrind reports.  */
3357 
3358   bytesize = (bitpos + bitsize + 7) / 8;
3359   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3360 
3361   /* Shifting for bit field depends on endianness of the target machine.  */
3362   if (byte_order == BFD_ENDIAN_BIG)
3363     bitpos = bytesize * 8 - bitpos - bitsize;
3364 
3365   oword &= ~(mask << bitpos);
3366   oword |= fieldval << bitpos;
3367 
3368   store_unsigned_integer (addr, bytesize, byte_order, oword);
3369 }
3370 
3371 /* Pack NUM into BUF using a target format of TYPE.  */
3372 
3373 void
3374 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3375 {
3376   enum bfd_endian byte_order = type_byte_order (type);
3377   LONGEST len;
3378 
3379   type = check_typedef (type);
3380   len = type->length ();
3381 
3382   switch (type->code ())
3383     {
3384     case TYPE_CODE_RANGE:
3385       num -= type->bounds ()->bias;
3386       [[fallthrough]];
3387     case TYPE_CODE_INT:
3388     case TYPE_CODE_CHAR:
3389     case TYPE_CODE_ENUM:
3390     case TYPE_CODE_FLAGS:
3391     case TYPE_CODE_BOOL:
3392     case TYPE_CODE_MEMBERPTR:
3393       if (type->bit_size_differs_p ())
3394 	{
3395 	  unsigned bit_off = type->bit_offset ();
3396 	  unsigned bit_size = type->bit_size ();
3397 	  num &= ((ULONGEST) 1 << bit_size) - 1;
3398 	  num <<= bit_off;
3399 	}
3400       store_signed_integer (buf, len, byte_order, num);
3401       break;
3402 
3403     case TYPE_CODE_REF:
3404     case TYPE_CODE_RVALUE_REF:
3405     case TYPE_CODE_PTR:
3406       store_typed_address (buf, type, (CORE_ADDR) num);
3407       break;
3408 
3409     case TYPE_CODE_FLT:
3410     case TYPE_CODE_DECFLOAT:
3411       target_float_from_longest (buf, type, num);
3412       break;
3413 
3414     default:
3415       error (_("Unexpected type (%d) encountered for integer constant."),
3416 	     type->code ());
3417     }
3418 }
3419 
3420 
3421 /* Pack NUM into BUF using a target format of TYPE.  */
3422 
3423 static void
3424 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3425 {
3426   LONGEST len;
3427   enum bfd_endian byte_order;
3428 
3429   type = check_typedef (type);
3430   len = type->length ();
3431   byte_order = type_byte_order (type);
3432 
3433   switch (type->code ())
3434     {
3435     case TYPE_CODE_INT:
3436     case TYPE_CODE_CHAR:
3437     case TYPE_CODE_ENUM:
3438     case TYPE_CODE_FLAGS:
3439     case TYPE_CODE_BOOL:
3440     case TYPE_CODE_RANGE:
3441     case TYPE_CODE_MEMBERPTR:
3442       if (type->bit_size_differs_p ())
3443 	{
3444 	  unsigned bit_off = type->bit_offset ();
3445 	  unsigned bit_size = type->bit_size ();
3446 	  num &= ((ULONGEST) 1 << bit_size) - 1;
3447 	  num <<= bit_off;
3448 	}
3449       store_unsigned_integer (buf, len, byte_order, num);
3450       break;
3451 
3452     case TYPE_CODE_REF:
3453     case TYPE_CODE_RVALUE_REF:
3454     case TYPE_CODE_PTR:
3455       store_typed_address (buf, type, (CORE_ADDR) num);
3456       break;
3457 
3458     case TYPE_CODE_FLT:
3459     case TYPE_CODE_DECFLOAT:
3460       target_float_from_ulongest (buf, type, num);
3461       break;
3462 
3463     default:
3464       error (_("Unexpected type (%d) encountered "
3465 	       "for unsigned integer constant."),
3466 	     type->code ());
3467     }
3468 }
3469 
3470 /* See value.h.  */
3471 
3472 struct value *
3473 value::zero (struct type *type, enum lval_type lv)
3474 {
3475   struct value *val = value::allocate_lazy (type);
3476 
3477   val->set_lval (lv == lval_computed ? not_lval : lv);
3478   val->m_is_zero = true;
3479   return val;
3480 }
3481 
3482 /* Convert C numbers into newly allocated values.  */
3483 
3484 struct value *
3485 value_from_longest (struct type *type, LONGEST num)
3486 {
3487   struct value *val = value::allocate (type);
3488 
3489   pack_long (val->contents_raw ().data (), type, num);
3490   return val;
3491 }
3492 
3493 
3494 /* Convert C unsigned numbers into newly allocated values.  */
3495 
3496 struct value *
3497 value_from_ulongest (struct type *type, ULONGEST num)
3498 {
3499   struct value *val = value::allocate (type);
3500 
3501   pack_unsigned_long (val->contents_raw ().data (), type, num);
3502 
3503   return val;
3504 }
3505 
3506 /* See value.h.  */
3507 
3508 struct value *
3509 value_from_mpz (struct type *type, const gdb_mpz &v)
3510 {
3511   struct type *real_type = check_typedef (type);
3512 
3513   const gdb_mpz *val = &v;
3514   gdb_mpz storage;
3515   if (real_type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0)
3516     {
3517       storage = *val;
3518       val = &storage;
3519       storage -= type->bounds ()->bias;
3520     }
3521 
3522   if (type->bit_size_differs_p ())
3523     {
3524       unsigned bit_off = type->bit_offset ();
3525       unsigned bit_size = type->bit_size ();
3526 
3527       if (val != &storage)
3528 	{
3529 	  storage = *val;
3530 	  val = &storage;
3531 	}
3532 
3533       storage.mask (bit_size);
3534       storage <<= bit_off;
3535     }
3536 
3537   struct value *result = value::allocate (type);
3538   val->truncate (result->contents_raw (), type_byte_order (type),
3539 		 type->is_unsigned ());
3540   return result;
3541 }
3542 
3543 /* Create a value representing a pointer of type TYPE to the address
3544    ADDR.  */
3545 
3546 struct value *
3547 value_from_pointer (struct type *type, CORE_ADDR addr)
3548 {
3549   struct value *val = value::allocate (type);
3550 
3551   store_typed_address (val->contents_raw ().data (),
3552 		       check_typedef (type), addr);
3553   return val;
3554 }
3555 
3556 /* Create and return a value object of TYPE containing the value D.  The
3557    TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3558    it is converted to target format.  */
3559 
3560 struct value *
3561 value_from_host_double (struct type *type, double d)
3562 {
3563   struct value *value = value::allocate (type);
3564   gdb_assert (type->code () == TYPE_CODE_FLT);
3565   target_float_from_host_double (value->contents_raw ().data (),
3566 				 value->type (), d);
3567   return value;
3568 }
3569 
3570 /* Create a value of type TYPE whose contents come from VALADDR, if it
3571    is non-null, and whose memory address (in the inferior) is
3572    ADDRESS.  The type of the created value may differ from the passed
3573    type TYPE.  Make sure to retrieve values new type after this call.
3574    Note that TYPE is not passed through resolve_dynamic_type; this is
3575    a special API intended for use only by Ada.  */
3576 
3577 struct value *
3578 value_from_contents_and_address_unresolved (struct type *type,
3579 					    const gdb_byte *valaddr,
3580 					    CORE_ADDR address)
3581 {
3582   struct value *v;
3583 
3584   if (valaddr == NULL)
3585     v = value::allocate_lazy (type);
3586   else
3587     v = value_from_contents (type, valaddr);
3588   v->set_lval (lval_memory);
3589   v->set_address (address);
3590   return v;
3591 }
3592 
3593 /* Create a value of type TYPE whose contents come from VALADDR, if it
3594    is non-null, and whose memory address (in the inferior) is
3595    ADDRESS.  The type of the created value may differ from the passed
3596    type TYPE.  Make sure to retrieve values new type after this call.  */
3597 
3598 struct value *
3599 value_from_contents_and_address (struct type *type,
3600 				 const gdb_byte *valaddr,
3601 				 CORE_ADDR address,
3602 				 const frame_info_ptr &frame)
3603 {
3604   gdb::array_view<const gdb_byte> view;
3605   if (valaddr != nullptr)
3606     view = gdb::make_array_view (valaddr, type->length ());
3607   struct type *resolved_type = resolve_dynamic_type (type, view, address,
3608 						     &frame);
3609   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3610 
3611   struct value *v;
3612   if (resolved_type_no_typedef->code () == TYPE_CODE_ARRAY
3613       && resolved_type_no_typedef->bound_optimized_out ())
3614     {
3615       /* Resolution found that the bounds are optimized out.  In this
3616 	 case, mark the array itself as optimized-out.  */
3617       v = value::allocate_optimized_out (resolved_type);
3618     }
3619   else if (valaddr == nullptr)
3620     v = value::allocate_lazy (resolved_type);
3621   else
3622     v = value_from_contents (resolved_type, valaddr);
3623   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3624       && TYPE_DATA_LOCATION (resolved_type_no_typedef)->is_constant ())
3625     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3626   v->set_lval (lval_memory);
3627   v->set_address (address);
3628   return v;
3629 }
3630 
3631 /* Create a value of type TYPE holding the contents CONTENTS.
3632    The new value is `not_lval'.  */
3633 
3634 struct value *
3635 value_from_contents (struct type *type, const gdb_byte *contents)
3636 {
3637   struct value *result;
3638 
3639   result = value::allocate (type);
3640   memcpy (result->contents_raw ().data (), contents, type->length ());
3641   return result;
3642 }
3643 
3644 /* Extract a value from the history file.  Input will be of the form
3645    $digits or $$digits.  See block comment above 'write_dollar_variable'
3646    for details.  */
3647 
3648 struct value *
3649 value_from_history_ref (const char *h, const char **endp)
3650 {
3651   int index, len;
3652 
3653   if (h[0] == '$')
3654     len = 1;
3655   else
3656     return NULL;
3657 
3658   if (h[1] == '$')
3659     len = 2;
3660 
3661   /* Find length of numeral string.  */
3662   for (; isdigit (h[len]); len++)
3663     ;
3664 
3665   /* Make sure numeral string is not part of an identifier.  */
3666   if (h[len] == '_' || isalpha (h[len]))
3667     return NULL;
3668 
3669   /* Now collect the index value.  */
3670   if (h[1] == '$')
3671     {
3672       if (len == 2)
3673 	{
3674 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3675 	     rather than to "$$0" as it ought to be!  */
3676 	  index = -1;
3677 	  *endp += len;
3678 	}
3679       else
3680 	{
3681 	  char *local_end;
3682 
3683 	  index = -strtol (&h[2], &local_end, 10);
3684 	  *endp = local_end;
3685 	}
3686     }
3687   else
3688     {
3689       if (len == 1)
3690 	{
3691 	  /* "$" is equivalent to "$0".  */
3692 	  index = 0;
3693 	  *endp += len;
3694 	}
3695       else
3696 	{
3697 	  char *local_end;
3698 
3699 	  index = strtol (&h[1], &local_end, 10);
3700 	  *endp = local_end;
3701 	}
3702     }
3703 
3704   return access_value_history (index);
3705 }
3706 
3707 /* Get the component value (offset by OFFSET bytes) of a struct or
3708    union WHOLE.  Component's type is TYPE.  */
3709 
3710 struct value *
3711 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3712 {
3713   struct value *v;
3714 
3715   if (whole->lval () == lval_memory && whole->lazy ())
3716     v = value::allocate_lazy (type);
3717   else
3718     {
3719       v = value::allocate (type);
3720       whole->contents_copy (v, v->embedded_offset (),
3721 			    whole->embedded_offset () + offset,
3722 			    type_length_units (type));
3723     }
3724   v->set_offset (whole->offset () + offset + whole->embedded_offset ());
3725   v->set_component_location (whole);
3726 
3727   return v;
3728 }
3729 
3730 /* See value.h.  */
3731 
3732 struct value *
3733 value::from_component_bitsize (struct type *type,
3734 			       LONGEST bit_offset, LONGEST bit_length)
3735 {
3736   gdb_assert (!lazy ());
3737 
3738   /* Preserve lvalue-ness if possible.  This is needed to avoid
3739      array-printing failures (including crashes) when printing Ada
3740      arrays in programs compiled with -fgnat-encodings=all.  */
3741   if ((bit_offset % TARGET_CHAR_BIT) == 0
3742       && (bit_length % TARGET_CHAR_BIT) == 0
3743       && bit_length == TARGET_CHAR_BIT * type->length ())
3744     return value_from_component (this, type, bit_offset / TARGET_CHAR_BIT);
3745 
3746   struct value *v = value::allocate (type);
3747 
3748   LONGEST dst_offset = TARGET_CHAR_BIT * v->embedded_offset ();
3749   if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG)
3750     dst_offset += TARGET_CHAR_BIT * type->length () - bit_length;
3751 
3752   contents_copy_raw_bitwise (v, dst_offset,
3753 			     TARGET_CHAR_BIT
3754 			     * embedded_offset ()
3755 			     + bit_offset,
3756 			     bit_length);
3757   return v;
3758 }
3759 
3760 struct value *
3761 coerce_ref_if_computed (const struct value *arg)
3762 {
3763   const struct lval_funcs *funcs;
3764 
3765   if (!TYPE_IS_REFERENCE (check_typedef (arg->type ())))
3766     return NULL;
3767 
3768   if (arg->lval () != lval_computed)
3769     return NULL;
3770 
3771   funcs = arg->computed_funcs ();
3772   if (funcs->coerce_ref == NULL)
3773     return NULL;
3774 
3775   return funcs->coerce_ref (arg);
3776 }
3777 
3778 /* Look at value.h for description.  */
3779 
3780 struct value *
3781 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3782 			      const struct type *original_type,
3783 			      struct value *original_value,
3784 			      CORE_ADDR original_value_address)
3785 {
3786   gdb_assert (original_type->is_pointer_or_reference ());
3787 
3788   struct type *original_target_type = original_type->target_type ();
3789   gdb::array_view<const gdb_byte> view;
3790   struct type *resolved_original_target_type
3791     = resolve_dynamic_type (original_target_type, view,
3792 			    original_value_address);
3793 
3794   /* Re-adjust type.  */
3795   value->deprecated_set_type (resolved_original_target_type);
3796 
3797   /* Add embedding info.  */
3798   value->set_enclosing_type (enc_type);
3799   value->set_embedded_offset (original_value->pointed_to_offset ());
3800 
3801   /* We may be pointing to an object of some derived type.  */
3802   return value_full_object (value, NULL, 0, 0, 0);
3803 }
3804 
3805 struct value *
3806 coerce_ref (struct value *arg)
3807 {
3808   struct type *value_type_arg_tmp = check_typedef (arg->type ());
3809   struct value *retval;
3810   struct type *enc_type;
3811 
3812   retval = coerce_ref_if_computed (arg);
3813   if (retval)
3814     return retval;
3815 
3816   if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3817     return arg;
3818 
3819   enc_type = check_typedef (arg->enclosing_type ());
3820   enc_type = enc_type->target_type ();
3821 
3822   CORE_ADDR addr = unpack_pointer (arg->type (), arg->contents ().data ());
3823   retval = value_at_lazy (enc_type, addr);
3824   enc_type = retval->type ();
3825   return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3826 				       arg, addr);
3827 }
3828 
3829 struct value *
3830 coerce_array (struct value *arg)
3831 {
3832   struct type *type;
3833 
3834   arg = coerce_ref (arg);
3835   type = check_typedef (arg->type ());
3836 
3837   switch (type->code ())
3838     {
3839     case TYPE_CODE_ARRAY:
3840       if (!type->is_vector () && current_language->c_style_arrays_p ())
3841 	arg = value_coerce_array (arg);
3842       break;
3843     case TYPE_CODE_FUNC:
3844       arg = value_coerce_function (arg);
3845       break;
3846     }
3847   return arg;
3848 }
3849 
3850 
3851 /* Return the return value convention that will be used for the
3852    specified type.  */
3853 
3854 enum return_value_convention
3855 struct_return_convention (struct gdbarch *gdbarch,
3856 			  struct value *function, struct type *value_type)
3857 {
3858   enum type_code code = value_type->code ();
3859 
3860   if (code == TYPE_CODE_ERROR)
3861     error (_("Function return type unknown."));
3862 
3863   /* Probe the architecture for the return-value convention.  */
3864   return gdbarch_return_value_as_value (gdbarch, function, value_type,
3865 					NULL, NULL, NULL);
3866 }
3867 
3868 /* Return true if the function returning the specified type is using
3869    the convention of returning structures in memory (passing in the
3870    address as a hidden first parameter).  */
3871 
3872 int
3873 using_struct_return (struct gdbarch *gdbarch,
3874 		     struct value *function, struct type *value_type)
3875 {
3876   if (value_type->code () == TYPE_CODE_VOID)
3877     /* A void return value is never in memory.  See also corresponding
3878        code in "print_return_value".  */
3879     return 0;
3880 
3881   return (struct_return_convention (gdbarch, function, value_type)
3882 	  != RETURN_VALUE_REGISTER_CONVENTION);
3883 }
3884 
3885 /* See value.h.  */
3886 
3887 void
3888 value::fetch_lazy_bitfield ()
3889 {
3890   gdb_assert (bitsize () != 0);
3891 
3892   /* To read a lazy bitfield, read the entire enclosing value.  This
3893      prevents reading the same block of (possibly volatile) memory once
3894      per bitfield.  It would be even better to read only the containing
3895      word, but we have no way to record that just specific bits of a
3896      value have been fetched.  */
3897   struct value *parent = this->parent ();
3898 
3899   if (parent->lazy ())
3900     parent->fetch_lazy ();
3901 
3902   parent->unpack_bitfield (this, bitpos (), bitsize (),
3903 			   parent->contents_for_printing ().data (),
3904 			   offset ());
3905 }
3906 
3907 /* See value.h.  */
3908 
3909 void
3910 value::fetch_lazy_memory ()
3911 {
3912   gdb_assert (m_lval == lval_memory);
3913 
3914   CORE_ADDR addr = address ();
3915   struct type *type = check_typedef (enclosing_type ());
3916 
3917   /* Figure out how much we should copy from memory.  Usually, this is just
3918      the size of the type, but, for arrays, we might only be loading a
3919      small part of the array (this is only done for very large arrays).  */
3920   int len = 0;
3921   if (m_limited_length > 0)
3922     {
3923       gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY);
3924       len = m_limited_length;
3925     }
3926   else if (type->length () > 0)
3927     len = type_length_units (type);
3928 
3929   gdb_assert (len >= 0);
3930 
3931   if (len > 0)
3932     read_value_memory (this, 0, stack (), addr,
3933 		       contents_all_raw ().data (), len);
3934 }
3935 
3936 /* See value.h.  */
3937 
3938 void
3939 value::fetch_lazy_register ()
3940 {
3941   struct type *type = check_typedef (this->type ());
3942   struct value *new_val = this;
3943 
3944   scoped_value_mark mark;
3945 
3946   /* Offsets are not supported here; lazy register values must
3947      refer to the entire register.  */
3948   gdb_assert (offset () == 0);
3949 
3950   while (new_val->lval () == lval_register && new_val->lazy ())
3951     {
3952       frame_id next_frame_id = new_val->next_frame_id ();
3953       frame_info_ptr next_frame = frame_find_by_id (next_frame_id);
3954       gdb_assert (next_frame != NULL);
3955 
3956       int regnum = new_val->regnum ();
3957 
3958       /* Convertible register routines are used for multi-register
3959 	 values and for interpretation in different types
3960 	 (e.g. float or int from a double register).  Lazy
3961 	 register values should have the register's natural type,
3962 	 so they do not apply.  */
3963       gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3964 					       regnum, type));
3965 
3966       new_val = frame_unwind_register_value (next_frame, regnum);
3967 
3968       /* If we get another lazy lval_register value, it means the
3969 	 register is found by reading it from NEXT_FRAME's next frame.
3970 	 frame_unwind_register_value should never return a value with
3971 	 the frame id pointing to NEXT_FRAME.  If it does, it means we
3972 	 either have two consecutive frames with the same frame id
3973 	 in the frame chain, or some code is trying to unwind
3974 	 behind get_prev_frame's back (e.g., a frame unwind
3975 	 sniffer trying to unwind), bypassing its validations.  In
3976 	 any case, it should always be an internal error to end up
3977 	 in this situation.  */
3978       if (new_val->lval () == lval_register
3979 	  && new_val->lazy ()
3980 	  && new_val->next_frame_id () == next_frame_id)
3981 	internal_error (_("infinite loop while fetching a register"));
3982     }
3983 
3984   /* If it's still lazy (for instance, a saved register on the
3985      stack), fetch it.  */
3986   if (new_val->lazy ())
3987     new_val->fetch_lazy ();
3988 
3989   /* Copy the contents and the unavailability/optimized-out
3990      meta-data from NEW_VAL to VAL.  */
3991   set_lazy (false);
3992   new_val->contents_copy (this, embedded_offset (),
3993 			  new_val->embedded_offset (),
3994 			  type_length_units (type));
3995 
3996   if (frame_debug)
3997     {
3998       frame_info_ptr frame = frame_find_by_id (this->next_frame_id ());
3999       frame = get_prev_frame_always (frame);
4000       int regnum = this->regnum ();
4001       gdbarch *gdbarch = get_frame_arch (frame);
4002 
4003       string_file debug_file;
4004       gdb_printf (&debug_file,
4005 		  "(frame=%d, regnum=%d(%s), ...) ",
4006 		  frame_relative_level (frame), regnum,
4007 		  user_reg_map_regnum_to_name (gdbarch, regnum));
4008 
4009       gdb_printf (&debug_file, "->");
4010       if (new_val->optimized_out ())
4011 	{
4012 	  gdb_printf (&debug_file, " ");
4013 	  val_print_optimized_out (new_val, &debug_file);
4014 	}
4015       else
4016 	{
4017 	  if (new_val->lval () == lval_register)
4018 	    gdb_printf (&debug_file, " register=%d", new_val->regnum ());
4019 	  else if (new_val->lval () == lval_memory)
4020 	    gdb_printf (&debug_file, " address=%s",
4021 			paddress (gdbarch,
4022 				  new_val->address ()));
4023 	  else
4024 	    gdb_printf (&debug_file, " computed");
4025 
4026 	  if (new_val->entirely_available ())
4027 	    {
4028 	      int i;
4029 	      gdb::array_view<const gdb_byte> buf = new_val->contents ();
4030 
4031 	      gdb_printf (&debug_file, " bytes=");
4032 	      gdb_printf (&debug_file, "[");
4033 	      for (i = 0; i < register_size (gdbarch, regnum); i++)
4034 		gdb_printf (&debug_file, "%02x", buf[i]);
4035 	      gdb_printf (&debug_file, "]");
4036 	    }
4037 	  else if (new_val->entirely_unavailable ())
4038 	    gdb_printf (&debug_file, " unavailable");
4039 	  else
4040 	    gdb_printf (&debug_file, " partly unavailable");
4041 	}
4042 
4043       frame_debug_printf ("%s", debug_file.c_str ());
4044     }
4045 }
4046 
4047 /* See value.h.  */
4048 
4049 void
4050 value::fetch_lazy ()
4051 {
4052   gdb_assert (lazy ());
4053   allocate_contents (true);
4054   /* A value is either lazy, or fully fetched.  The
4055      availability/validity is only established as we try to fetch a
4056      value.  */
4057   gdb_assert (m_optimized_out.empty ());
4058   gdb_assert (m_unavailable.empty ());
4059   if (m_is_zero)
4060     {
4061       /* Nothing.  */
4062     }
4063   else if (bitsize ())
4064     fetch_lazy_bitfield ();
4065   else if (this->lval () == lval_memory)
4066     fetch_lazy_memory ();
4067   else if (this->lval () == lval_register)
4068     fetch_lazy_register ();
4069   else if (this->lval () == lval_computed
4070 	   && computed_funcs ()->read != NULL)
4071     computed_funcs ()->read (this);
4072   else
4073     internal_error (_("Unexpected lazy value type."));
4074 
4075   set_lazy (false);
4076 }
4077 
4078 /* See value.h.  */
4079 
4080 value *
4081 pseudo_from_raw_part (const frame_info_ptr &next_frame, int pseudo_reg_num,
4082 		      int raw_reg_num, int raw_offset)
4083 {
4084   value *pseudo_reg_val
4085     = value::allocate_register (next_frame, pseudo_reg_num);
4086   value *raw_reg_val = value_of_register (raw_reg_num, next_frame);
4087   raw_reg_val->contents_copy (pseudo_reg_val, 0, raw_offset,
4088 			      pseudo_reg_val->type ()->length ());
4089   return pseudo_reg_val;
4090 }
4091 
4092 /* See value.h.  */
4093 
4094 void
4095 pseudo_to_raw_part (const frame_info_ptr &next_frame,
4096 		    gdb::array_view<const gdb_byte> pseudo_buf,
4097 		    int raw_reg_num, int raw_offset)
4098 {
4099   int raw_reg_size
4100     = register_size (frame_unwind_arch (next_frame), raw_reg_num);
4101 
4102   /* When overflowing a register, put_frame_register_bytes writes to the
4103      subsequent registers.  We don't want that behavior here, so make sure
4104      the write is wholly within register RAW_REG_NUM.  */
4105   gdb_assert (raw_offset + pseudo_buf.size () <= raw_reg_size);
4106   put_frame_register_bytes (next_frame, raw_reg_num, raw_offset, pseudo_buf);
4107 }
4108 
4109 /* See value.h.  */
4110 
4111 value *
4112 pseudo_from_concat_raw (const frame_info_ptr &next_frame, int pseudo_reg_num,
4113 			int raw_reg_1_num, int raw_reg_2_num)
4114 {
4115   value *pseudo_reg_val
4116     = value::allocate_register (next_frame, pseudo_reg_num);
4117   int dst_offset = 0;
4118 
4119   value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame);
4120   raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4121 				raw_reg_1_val->type ()->length ());
4122   dst_offset += raw_reg_1_val->type ()->length ();
4123 
4124   value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame);
4125   raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4126 				raw_reg_2_val->type ()->length ());
4127   dst_offset += raw_reg_2_val->type ()->length ();
4128 
4129   gdb_assert (dst_offset == pseudo_reg_val->type ()->length ());
4130 
4131   return pseudo_reg_val;
4132 }
4133 
4134 /* See value.h. */
4135 
4136 void
4137 pseudo_to_concat_raw (const frame_info_ptr &next_frame,
4138 		      gdb::array_view<const gdb_byte> pseudo_buf,
4139 		      int raw_reg_1_num, int raw_reg_2_num)
4140 {
4141   int src_offset = 0;
4142   gdbarch *arch = frame_unwind_arch (next_frame);
4143 
4144   int raw_reg_1_size = register_size (arch, raw_reg_1_num);
4145   put_frame_register (next_frame, raw_reg_1_num,
4146 		      pseudo_buf.slice (src_offset, raw_reg_1_size));
4147   src_offset += raw_reg_1_size;
4148 
4149   int raw_reg_2_size = register_size (arch, raw_reg_2_num);
4150   put_frame_register (next_frame, raw_reg_2_num,
4151 		      pseudo_buf.slice (src_offset, raw_reg_2_size));
4152   src_offset += raw_reg_2_size;
4153 
4154   gdb_assert (src_offset == pseudo_buf.size ());
4155 }
4156 
4157 /* See value.h.  */
4158 
4159 value *
4160 pseudo_from_concat_raw (const frame_info_ptr &next_frame, int pseudo_reg_num,
4161 			int raw_reg_1_num, int raw_reg_2_num,
4162 			int raw_reg_3_num)
4163 {
4164   value *pseudo_reg_val
4165     = value::allocate_register (next_frame, pseudo_reg_num);
4166   int dst_offset = 0;
4167 
4168   value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame);
4169   raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4170 				raw_reg_1_val->type ()->length ());
4171   dst_offset += raw_reg_1_val->type ()->length ();
4172 
4173   value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame);
4174   raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4175 				raw_reg_2_val->type ()->length ());
4176   dst_offset += raw_reg_2_val->type ()->length ();
4177 
4178   value *raw_reg_3_val = value_of_register (raw_reg_3_num, next_frame);
4179   raw_reg_3_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4180 				raw_reg_3_val->type ()->length ());
4181   dst_offset += raw_reg_3_val->type ()->length ();
4182 
4183   gdb_assert (dst_offset == pseudo_reg_val->type ()->length ());
4184 
4185   return pseudo_reg_val;
4186 }
4187 
4188 /* See value.h. */
4189 
4190 void
4191 pseudo_to_concat_raw (const frame_info_ptr &next_frame,
4192 		      gdb::array_view<const gdb_byte> pseudo_buf,
4193 		      int raw_reg_1_num, int raw_reg_2_num, int raw_reg_3_num)
4194 {
4195   int src_offset = 0;
4196   gdbarch *arch = frame_unwind_arch (next_frame);
4197 
4198   int raw_reg_1_size = register_size (arch, raw_reg_1_num);
4199   put_frame_register (next_frame, raw_reg_1_num,
4200 		      pseudo_buf.slice (src_offset, raw_reg_1_size));
4201   src_offset += raw_reg_1_size;
4202 
4203   int raw_reg_2_size = register_size (arch, raw_reg_2_num);
4204   put_frame_register (next_frame, raw_reg_2_num,
4205 		      pseudo_buf.slice (src_offset, raw_reg_2_size));
4206   src_offset += raw_reg_2_size;
4207 
4208   int raw_reg_3_size = register_size (arch, raw_reg_3_num);
4209   put_frame_register (next_frame, raw_reg_3_num,
4210 		      pseudo_buf.slice (src_offset, raw_reg_3_size));
4211   src_offset += raw_reg_3_size;
4212 
4213   gdb_assert (src_offset == pseudo_buf.size ());
4214 }
4215 
4216 /* Implementation of the convenience function $_isvoid.  */
4217 
4218 static struct value *
4219 isvoid_internal_fn (struct gdbarch *gdbarch,
4220 		    const struct language_defn *language,
4221 		    void *cookie, int argc, struct value **argv)
4222 {
4223   int ret;
4224 
4225   if (argc != 1)
4226     error (_("You must provide one argument for $_isvoid."));
4227 
4228   ret = argv[0]->type ()->code () == TYPE_CODE_VOID;
4229 
4230   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4231 }
4232 
4233 /* Implementation of the convenience function $_creal.  Extracts the
4234    real part from a complex number.  */
4235 
4236 static struct value *
4237 creal_internal_fn (struct gdbarch *gdbarch,
4238 		   const struct language_defn *language,
4239 		   void *cookie, int argc, struct value **argv)
4240 {
4241   if (argc != 1)
4242     error (_("You must provide one argument for $_creal."));
4243 
4244   value *cval = argv[0];
4245   type *ctype = check_typedef (cval->type ());
4246   if (ctype->code () != TYPE_CODE_COMPLEX)
4247     error (_("expected a complex number"));
4248   return value_real_part (cval);
4249 }
4250 
4251 /* Implementation of the convenience function $_cimag.  Extracts the
4252    imaginary part from a complex number.  */
4253 
4254 static struct value *
4255 cimag_internal_fn (struct gdbarch *gdbarch,
4256 		   const struct language_defn *language,
4257 		   void *cookie, int argc,
4258 		   struct value **argv)
4259 {
4260   if (argc != 1)
4261     error (_("You must provide one argument for $_cimag."));
4262 
4263   value *cval = argv[0];
4264   type *ctype = check_typedef (cval->type ());
4265   if (ctype->code () != TYPE_CODE_COMPLEX)
4266     error (_("expected a complex number"));
4267   return value_imaginary_part (cval);
4268 }
4269 
4270 #if GDB_SELF_TEST
4271 namespace selftests
4272 {
4273 
4274 /* Test the ranges_contain function.  */
4275 
4276 static void
4277 test_ranges_contain ()
4278 {
4279   std::vector<range> ranges;
4280   range r;
4281 
4282   /* [10, 14] */
4283   r.offset = 10;
4284   r.length = 5;
4285   ranges.push_back (r);
4286 
4287   /* [20, 24] */
4288   r.offset = 20;
4289   r.length = 5;
4290   ranges.push_back (r);
4291 
4292   /* [2, 6] */
4293   SELF_CHECK (!ranges_contain (ranges, 2, 5));
4294   /* [9, 13] */
4295   SELF_CHECK (ranges_contain (ranges, 9, 5));
4296   /* [10, 11] */
4297   SELF_CHECK (ranges_contain (ranges, 10, 2));
4298   /* [10, 14] */
4299   SELF_CHECK (ranges_contain (ranges, 10, 5));
4300   /* [13, 18] */
4301   SELF_CHECK (ranges_contain (ranges, 13, 6));
4302   /* [14, 18] */
4303   SELF_CHECK (ranges_contain (ranges, 14, 5));
4304   /* [15, 18] */
4305   SELF_CHECK (!ranges_contain (ranges, 15, 4));
4306   /* [16, 19] */
4307   SELF_CHECK (!ranges_contain (ranges, 16, 4));
4308   /* [16, 21] */
4309   SELF_CHECK (ranges_contain (ranges, 16, 6));
4310   /* [21, 21] */
4311   SELF_CHECK (ranges_contain (ranges, 21, 1));
4312   /* [21, 25] */
4313   SELF_CHECK (ranges_contain (ranges, 21, 5));
4314   /* [26, 28] */
4315   SELF_CHECK (!ranges_contain (ranges, 26, 3));
4316 }
4317 
4318 /* Check that RANGES contains the same ranges as EXPECTED.  */
4319 
4320 static bool
4321 check_ranges_vector (gdb::array_view<const range> ranges,
4322 		     gdb::array_view<const range> expected)
4323 {
4324   return ranges == expected;
4325 }
4326 
4327 /* Test the insert_into_bit_range_vector function.  */
4328 
4329 static void
4330 test_insert_into_bit_range_vector ()
4331 {
4332   std::vector<range> ranges;
4333 
4334   /* [10, 14] */
4335   {
4336     insert_into_bit_range_vector (&ranges, 10, 5);
4337     static const range expected[] = {
4338       {10, 5}
4339     };
4340     SELF_CHECK (check_ranges_vector (ranges, expected));
4341   }
4342 
4343   /* [10, 14] */
4344   {
4345     insert_into_bit_range_vector (&ranges, 11, 4);
4346     static const range expected = {10, 5};
4347     SELF_CHECK (check_ranges_vector (ranges, expected));
4348   }
4349 
4350   /* [10, 14] [20, 24] */
4351   {
4352     insert_into_bit_range_vector (&ranges, 20, 5);
4353     static const range expected[] = {
4354       {10, 5},
4355       {20, 5},
4356     };
4357     SELF_CHECK (check_ranges_vector (ranges, expected));
4358   }
4359 
4360   /* [10, 14] [17, 24] */
4361   {
4362     insert_into_bit_range_vector (&ranges, 17, 5);
4363     static const range expected[] = {
4364       {10, 5},
4365       {17, 8},
4366     };
4367     SELF_CHECK (check_ranges_vector (ranges, expected));
4368   }
4369 
4370   /* [2, 8] [10, 14] [17, 24] */
4371   {
4372     insert_into_bit_range_vector (&ranges, 2, 7);
4373     static const range expected[] = {
4374       {2, 7},
4375       {10, 5},
4376       {17, 8},
4377     };
4378     SELF_CHECK (check_ranges_vector (ranges, expected));
4379   }
4380 
4381   /* [2, 14] [17, 24] */
4382   {
4383     insert_into_bit_range_vector (&ranges, 9, 1);
4384     static const range expected[] = {
4385       {2, 13},
4386       {17, 8},
4387     };
4388     SELF_CHECK (check_ranges_vector (ranges, expected));
4389   }
4390 
4391   /* [2, 14] [17, 24] */
4392   {
4393     insert_into_bit_range_vector (&ranges, 9, 1);
4394     static const range expected[] = {
4395       {2, 13},
4396       {17, 8},
4397     };
4398     SELF_CHECK (check_ranges_vector (ranges, expected));
4399   }
4400 
4401   /* [2, 33] */
4402   {
4403     insert_into_bit_range_vector (&ranges, 4, 30);
4404     static const range expected = {2, 32};
4405     SELF_CHECK (check_ranges_vector (ranges, expected));
4406   }
4407 }
4408 
4409 static void
4410 test_value_copy ()
4411 {
4412   type *type = builtin_type (current_inferior ()->arch ())->builtin_int;
4413 
4414   /* Verify that we can copy an entirely optimized out value, that may not have
4415      its contents allocated.  */
4416   value_ref_ptr val = release_value (value::allocate_optimized_out (type));
4417   value_ref_ptr copy = release_value (val->copy ());
4418 
4419   SELF_CHECK (val->entirely_optimized_out ());
4420   SELF_CHECK (copy->entirely_optimized_out ());
4421 }
4422 
4423 } /* namespace selftests */
4424 #endif /* GDB_SELF_TEST */
4425 
4426 void _initialize_values ();
4427 void
4428 _initialize_values ()
4429 {
4430   cmd_list_element *show_convenience_cmd
4431     = add_cmd ("convenience", no_class, show_convenience, _("\
4432 Debugger convenience (\"$foo\") variables and functions.\n\
4433 Convenience variables are created when you assign them values;\n\
4434 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
4435 \n\
4436 A few convenience variables are given values automatically:\n\
4437 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4438 \"$__\" holds the contents of the last address examined with \"x\"."
4439 #ifdef HAVE_PYTHON
4440 "\n\n\
4441 Convenience functions are defined via the Python API."
4442 #endif
4443 	   ), &showlist);
4444   add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4445 
4446   add_cmd ("values", no_set_class, show_values, _("\
4447 Elements of value history around item number IDX (or last ten)."),
4448 	   &showlist);
4449 
4450   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4451 Initialize a convenience variable if necessary.\n\
4452 init-if-undefined VARIABLE = EXPRESSION\n\
4453 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4454 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
4455 VARIABLE is already initialized."));
4456 
4457   add_prefix_cmd ("function", no_class, function_command, _("\
4458 Placeholder command for showing help on convenience functions."),
4459 		  &functionlist, 0, &cmdlist);
4460 
4461   add_internal_function ("_isvoid", _("\
4462 Check whether an expression is void.\n\
4463 Usage: $_isvoid (expression)\n\
4464 Return 1 if the expression is void, zero otherwise."),
4465 			 isvoid_internal_fn, NULL);
4466 
4467   add_internal_function ("_creal", _("\
4468 Extract the real part of a complex number.\n\
4469 Usage: $_creal (expression)\n\
4470 Return the real part of a complex number, the type depends on the\n\
4471 type of a complex number."),
4472 			 creal_internal_fn, NULL);
4473 
4474   add_internal_function ("_cimag", _("\
4475 Extract the imaginary part of a complex number.\n\
4476 Usage: $_cimag (expression)\n\
4477 Return the imaginary part of a complex number, the type depends on the\n\
4478 type of a complex number."),
4479 			 cimag_internal_fn, NULL);
4480 
4481   add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4482 				       class_support, &max_value_size, _("\
4483 Set maximum sized value gdb will load from the inferior."), _("\
4484 Show maximum sized value gdb will load from the inferior."), _("\
4485 Use this to control the maximum size, in bytes, of a value that gdb\n\
4486 will load from the inferior.  Setting this value to 'unlimited'\n\
4487 disables checking.\n\
4488 Setting this does not invalidate already allocated values, it only\n\
4489 prevents future values, larger than this size, from being allocated."),
4490 			    set_max_value_size,
4491 			    show_max_value_size,
4492 			    &setlist, &showlist);
4493   set_show_commands vsize_limit
4494     = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4495 					   &max_value_size, _("\
4496 Set the maximum number of bytes allowed in a variable-size object."), _("\
4497 Show the maximum number of bytes allowed in a variable-size object."), _("\
4498 Attempts to access an object whose size is not a compile-time constant\n\
4499 and exceeds this limit will cause an error."),
4500 					   NULL, NULL, &setlist, &showlist);
4501   deprecate_cmd (vsize_limit.set, "set max-value-size");
4502 
4503 #if GDB_SELF_TEST
4504   selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4505   selftests::register_test ("insert_into_bit_range_vector",
4506 			    selftests::test_insert_into_bit_range_vector);
4507   selftests::register_test ("value_copy", selftests::test_value_copy);
4508 #endif
4509 
4510   /* Destroy any values currently allocated in a final cleanup instead
4511      of leaving it to global destructors, because that may be too
4512      late.  For example, the destructors of xmethod values call into
4513      the Python runtime.  */
4514   add_final_cleanup ([] ()
4515     {
4516       all_values.clear ();
4517     });
4518 }
4519