xref: /netbsd-src/external/gpl3/gdb/dist/gdb/value.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2017 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 #include <algorithm>
44 
45 /* Prototypes for exported functions.  */
46 
47 void _initialize_values (void);
48 
49 /* Definition of a user function.  */
50 struct internal_function
51 {
52   /* The name of the function.  It is a bit odd to have this in the
53      function itself -- the user might use a differently-named
54      convenience variable to hold the function.  */
55   char *name;
56 
57   /* The handler.  */
58   internal_function_fn handler;
59 
60   /* User data for the handler.  */
61   void *cookie;
62 };
63 
64 /* Defines an [OFFSET, OFFSET + LENGTH) range.  */
65 
66 struct range
67 {
68   /* Lowest offset in the range.  */
69   LONGEST offset;
70 
71   /* Length of the range.  */
72   LONGEST length;
73 };
74 
75 typedef struct range range_s;
76 
77 DEF_VEC_O(range_s);
78 
79 /* Returns true if the ranges defined by [offset1, offset1+len1) and
80    [offset2, offset2+len2) overlap.  */
81 
82 static int
83 ranges_overlap (LONGEST offset1, LONGEST len1,
84 		LONGEST offset2, LONGEST len2)
85 {
86   ULONGEST h, l;
87 
88   l = std::max (offset1, offset2);
89   h = std::min (offset1 + len1, offset2 + len2);
90   return (l < h);
91 }
92 
93 /* Returns true if the first argument is strictly less than the
94    second, useful for VEC_lower_bound.  We keep ranges sorted by
95    offset and coalesce overlapping and contiguous ranges, so this just
96    compares the starting offset.  */
97 
98 static int
99 range_lessthan (const range_s *r1, const range_s *r2)
100 {
101   return r1->offset < r2->offset;
102 }
103 
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105    OFFSET+LENGTH).  */
106 
107 static int
108 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
109 {
110   range_s what;
111   LONGEST i;
112 
113   what.offset = offset;
114   what.length = length;
115 
116   /* We keep ranges sorted by offset and coalesce overlapping and
117      contiguous ranges, so to check if a range list contains a given
118      range, we can do a binary search for the position the given range
119      would be inserted if we only considered the starting OFFSET of
120      ranges.  We call that position I.  Since we also have LENGTH to
121      care for (this is a range afterall), we need to check if the
122      _previous_ range overlaps the I range.  E.g.,
123 
124          R
125          |---|
126        |---|    |---|  |------| ... |--|
127        0        1      2            N
128 
129        I=1
130 
131      In the case above, the binary search would return `I=1', meaning,
132      this OFFSET should be inserted at position 1, and the current
133      position 1 should be pushed further (and before 2).  But, `0'
134      overlaps with R.
135 
136      Then we need to check if the I range overlaps the I range itself.
137      E.g.,
138 
139               R
140               |---|
141        |---|    |---|  |-------| ... |--|
142        0        1      2             N
143 
144        I=1
145   */
146 
147   i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
148 
149   if (i > 0)
150     {
151       struct range *bef = VEC_index (range_s, ranges, i - 1);
152 
153       if (ranges_overlap (bef->offset, bef->length, offset, length))
154 	return 1;
155     }
156 
157   if (i < VEC_length (range_s, ranges))
158     {
159       struct range *r = VEC_index (range_s, ranges, i);
160 
161       if (ranges_overlap (r->offset, r->length, offset, length))
162 	return 1;
163     }
164 
165   return 0;
166 }
167 
168 static struct cmd_list_element *functionlist;
169 
170 /* Note that the fields in this structure are arranged to save a bit
171    of memory.  */
172 
173 struct value
174 {
175   /* Type of value; either not an lval, or one of the various
176      different possible kinds of lval.  */
177   enum lval_type lval;
178 
179   /* Is it modifiable?  Only relevant if lval != not_lval.  */
180   unsigned int modifiable : 1;
181 
182   /* If zero, contents of this value are in the contents field.  If
183      nonzero, contents are in inferior.  If the lval field is lval_memory,
184      the contents are in inferior memory at location.address plus offset.
185      The lval field may also be lval_register.
186 
187      WARNING: This field is used by the code which handles watchpoints
188      (see breakpoint.c) to decide whether a particular value can be
189      watched by hardware watchpoints.  If the lazy flag is set for
190      some member of a value chain, it is assumed that this member of
191      the chain doesn't need to be watched as part of watching the
192      value itself.  This is how GDB avoids watching the entire struct
193      or array when the user wants to watch a single struct member or
194      array element.  If you ever change the way lazy flag is set and
195      reset, be sure to consider this use as well!  */
196   unsigned int lazy : 1;
197 
198   /* If value is a variable, is it initialized or not.  */
199   unsigned int initialized : 1;
200 
201   /* If value is from the stack.  If this is set, read_stack will be
202      used instead of read_memory to enable extra caching.  */
203   unsigned int stack : 1;
204 
205   /* If the value has been released.  */
206   unsigned int released : 1;
207 
208   /* Location of value (if lval).  */
209   union
210   {
211     /* If lval == lval_memory, this is the address in the inferior  */
212     CORE_ADDR address;
213 
214     /*If lval == lval_register, the value is from a register.  */
215     struct
216     {
217       /* Register number.  */
218       int regnum;
219       /* Frame ID of "next" frame to which a register value is relative.
220 	 If the register value is found relative to frame F, then the
221 	 frame id of F->next will be stored in next_frame_id.  */
222       struct frame_id next_frame_id;
223     } reg;
224 
225     /* Pointer to internal variable.  */
226     struct internalvar *internalvar;
227 
228     /* Pointer to xmethod worker.  */
229     struct xmethod_worker *xm_worker;
230 
231     /* If lval == lval_computed, this is a set of function pointers
232        to use to access and describe the value, and a closure pointer
233        for them to use.  */
234     struct
235     {
236       /* Functions to call.  */
237       const struct lval_funcs *funcs;
238 
239       /* Closure for those functions to use.  */
240       void *closure;
241     } computed;
242   } location;
243 
244   /* Describes offset of a value within lval of a structure in target
245      addressable memory units.  Note also the member embedded_offset
246      below.  */
247   LONGEST offset;
248 
249   /* Only used for bitfields; number of bits contained in them.  */
250   LONGEST bitsize;
251 
252   /* Only used for bitfields; position of start of field.  For
253      gdbarch_bits_big_endian=0 targets, it is the position of the LSB.  For
254      gdbarch_bits_big_endian=1 targets, it is the position of the MSB.  */
255   LONGEST bitpos;
256 
257   /* The number of references to this value.  When a value is created,
258      the value chain holds a reference, so REFERENCE_COUNT is 1.  If
259      release_value is called, this value is removed from the chain but
260      the caller of release_value now has a reference to this value.
261      The caller must arrange for a call to value_free later.  */
262   int reference_count;
263 
264   /* Only used for bitfields; the containing value.  This allows a
265      single read from the target when displaying multiple
266      bitfields.  */
267   struct value *parent;
268 
269   /* Type of the value.  */
270   struct type *type;
271 
272   /* If a value represents a C++ object, then the `type' field gives
273      the object's compile-time type.  If the object actually belongs
274      to some class derived from `type', perhaps with other base
275      classes and additional members, then `type' is just a subobject
276      of the real thing, and the full object is probably larger than
277      `type' would suggest.
278 
279      If `type' is a dynamic class (i.e. one with a vtable), then GDB
280      can actually determine the object's run-time type by looking at
281      the run-time type information in the vtable.  When this
282      information is available, we may elect to read in the entire
283      object, for several reasons:
284 
285      - When printing the value, the user would probably rather see the
286      full object, not just the limited portion apparent from the
287      compile-time type.
288 
289      - If `type' has virtual base classes, then even printing `type'
290      alone may require reaching outside the `type' portion of the
291      object to wherever the virtual base class has been stored.
292 
293      When we store the entire object, `enclosing_type' is the run-time
294      type -- the complete object -- and `embedded_offset' is the
295      offset of `type' within that larger type, in target addressable memory
296      units.  The value_contents() macro takes `embedded_offset' into account,
297      so most GDB code continues to see the `type' portion of the value, just
298      as the inferior would.
299 
300      If `type' is a pointer to an object, then `enclosing_type' is a
301      pointer to the object's run-time type, and `pointed_to_offset' is
302      the offset in target addressable memory units from the full object
303      to the pointed-to object -- that is, the value `embedded_offset' would
304      have if we followed the pointer and fetched the complete object.
305      (I don't really see the point.  Why not just determine the
306      run-time type when you indirect, and avoid the special case?  The
307      contents don't matter until you indirect anyway.)
308 
309      If we're not doing anything fancy, `enclosing_type' is equal to
310      `type', and `embedded_offset' is zero, so everything works
311      normally.  */
312   struct type *enclosing_type;
313   LONGEST embedded_offset;
314   LONGEST pointed_to_offset;
315 
316   /* Values are stored in a chain, so that they can be deleted easily
317      over calls to the inferior.  Values assigned to internal
318      variables, put into the value history or exposed to Python are
319      taken off this list.  */
320   struct value *next;
321 
322   /* Actual contents of the value.  Target byte-order.  NULL or not
323      valid if lazy is nonzero.  */
324   gdb_byte *contents;
325 
326   /* Unavailable ranges in CONTENTS.  We mark unavailable ranges,
327      rather than available, since the common and default case is for a
328      value to be available.  This is filled in at value read time.
329      The unavailable ranges are tracked in bits.  Note that a contents
330      bit that has been optimized out doesn't really exist in the
331      program, so it can't be marked unavailable either.  */
332   VEC(range_s) *unavailable;
333 
334   /* Likewise, but for optimized out contents (a chunk of the value of
335      a variable that does not actually exist in the program).  If LVAL
336      is lval_register, this is a register ($pc, $sp, etc., never a
337      program variable) that has not been saved in the frame.  Not
338      saved registers and optimized-out program variables values are
339      treated pretty much the same, except not-saved registers have a
340      different string representation and related error strings.  */
341   VEC(range_s) *optimized_out;
342 };
343 
344 /* See value.h.  */
345 
346 struct gdbarch *
347 get_value_arch (const struct value *value)
348 {
349   return get_type_arch (value_type (value));
350 }
351 
352 int
353 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
354 {
355   gdb_assert (!value->lazy);
356 
357   return !ranges_contain (value->unavailable, offset, length);
358 }
359 
360 int
361 value_bytes_available (const struct value *value,
362 		       LONGEST offset, LONGEST length)
363 {
364   return value_bits_available (value,
365 			       offset * TARGET_CHAR_BIT,
366 			       length * TARGET_CHAR_BIT);
367 }
368 
369 int
370 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
371 {
372   gdb_assert (!value->lazy);
373 
374   return ranges_contain (value->optimized_out, bit_offset, bit_length);
375 }
376 
377 int
378 value_entirely_available (struct value *value)
379 {
380   /* We can only tell whether the whole value is available when we try
381      to read it.  */
382   if (value->lazy)
383     value_fetch_lazy (value);
384 
385   if (VEC_empty (range_s, value->unavailable))
386     return 1;
387   return 0;
388 }
389 
390 /* Returns true if VALUE is entirely covered by RANGES.  If the value
391    is lazy, it'll be read now.  Note that RANGE is a pointer to
392    pointer because reading the value might change *RANGE.  */
393 
394 static int
395 value_entirely_covered_by_range_vector (struct value *value,
396 					VEC(range_s) **ranges)
397 {
398   /* We can only tell whether the whole value is optimized out /
399      unavailable when we try to read it.  */
400   if (value->lazy)
401     value_fetch_lazy (value);
402 
403   if (VEC_length (range_s, *ranges) == 1)
404     {
405       struct range *t = VEC_index (range_s, *ranges, 0);
406 
407       if (t->offset == 0
408 	  && t->length == (TARGET_CHAR_BIT
409 			   * TYPE_LENGTH (value_enclosing_type (value))))
410 	return 1;
411     }
412 
413   return 0;
414 }
415 
416 int
417 value_entirely_unavailable (struct value *value)
418 {
419   return value_entirely_covered_by_range_vector (value, &value->unavailable);
420 }
421 
422 int
423 value_entirely_optimized_out (struct value *value)
424 {
425   return value_entirely_covered_by_range_vector (value, &value->optimized_out);
426 }
427 
428 /* Insert into the vector pointed to by VECTORP the bit range starting of
429    OFFSET bits, and extending for the next LENGTH bits.  */
430 
431 static void
432 insert_into_bit_range_vector (VEC(range_s) **vectorp,
433 			      LONGEST offset, LONGEST length)
434 {
435   range_s newr;
436   int i;
437 
438   /* Insert the range sorted.  If there's overlap or the new range
439      would be contiguous with an existing range, merge.  */
440 
441   newr.offset = offset;
442   newr.length = length;
443 
444   /* Do a binary search for the position the given range would be
445      inserted if we only considered the starting OFFSET of ranges.
446      Call that position I.  Since we also have LENGTH to care for
447      (this is a range afterall), we need to check if the _previous_
448      range overlaps the I range.  E.g., calling R the new range:
449 
450        #1 - overlaps with previous
451 
452 	   R
453 	   |-...-|
454 	 |---|     |---|  |------| ... |--|
455 	 0         1      2            N
456 
457 	 I=1
458 
459      In the case #1 above, the binary search would return `I=1',
460      meaning, this OFFSET should be inserted at position 1, and the
461      current position 1 should be pushed further (and become 2).  But,
462      note that `0' overlaps with R, so we want to merge them.
463 
464      A similar consideration needs to be taken if the new range would
465      be contiguous with the previous range:
466 
467        #2 - contiguous with previous
468 
469 	    R
470 	    |-...-|
471 	 |--|       |---|  |------| ... |--|
472 	 0          1      2            N
473 
474 	 I=1
475 
476      If there's no overlap with the previous range, as in:
477 
478        #3 - not overlapping and not contiguous
479 
480 	       R
481 	       |-...-|
482 	  |--|         |---|  |------| ... |--|
483 	  0            1      2            N
484 
485 	 I=1
486 
487      or if I is 0:
488 
489        #4 - R is the range with lowest offset
490 
491 	  R
492 	 |-...-|
493 	         |--|       |---|  |------| ... |--|
494 	         0          1      2            N
495 
496 	 I=0
497 
498      ... we just push the new range to I.
499 
500      All the 4 cases above need to consider that the new range may
501      also overlap several of the ranges that follow, or that R may be
502      contiguous with the following range, and merge.  E.g.,
503 
504        #5 - overlapping following ranges
505 
506 	  R
507 	 |------------------------|
508 	         |--|       |---|  |------| ... |--|
509 	         0          1      2            N
510 
511 	 I=0
512 
513        or:
514 
515 	    R
516 	    |-------|
517 	 |--|       |---|  |------| ... |--|
518 	 0          1      2            N
519 
520 	 I=1
521 
522   */
523 
524   i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
525   if (i > 0)
526     {
527       struct range *bef = VEC_index (range_s, *vectorp, i - 1);
528 
529       if (ranges_overlap (bef->offset, bef->length, offset, length))
530 	{
531 	  /* #1 */
532 	  ULONGEST l = std::min (bef->offset, offset);
533 	  ULONGEST h = std::max (bef->offset + bef->length, offset + length);
534 
535 	  bef->offset = l;
536 	  bef->length = h - l;
537 	  i--;
538 	}
539       else if (offset == bef->offset + bef->length)
540 	{
541 	  /* #2 */
542 	  bef->length += length;
543 	  i--;
544 	}
545       else
546 	{
547 	  /* #3 */
548 	  VEC_safe_insert (range_s, *vectorp, i, &newr);
549 	}
550     }
551   else
552     {
553       /* #4 */
554       VEC_safe_insert (range_s, *vectorp, i, &newr);
555     }
556 
557   /* Check whether the ranges following the one we've just added or
558      touched can be folded in (#5 above).  */
559   if (i + 1 < VEC_length (range_s, *vectorp))
560     {
561       struct range *t;
562       struct range *r;
563       int removed = 0;
564       int next = i + 1;
565 
566       /* Get the range we just touched.  */
567       t = VEC_index (range_s, *vectorp, i);
568       removed = 0;
569 
570       i = next;
571       for (; VEC_iterate (range_s, *vectorp, i, r); i++)
572 	if (r->offset <= t->offset + t->length)
573 	  {
574 	    ULONGEST l, h;
575 
576 	    l = std::min (t->offset, r->offset);
577 	    h = std::max (t->offset + t->length, r->offset + r->length);
578 
579 	    t->offset = l;
580 	    t->length = h - l;
581 
582 	    removed++;
583 	  }
584 	else
585 	  {
586 	    /* If we couldn't merge this one, we won't be able to
587 	       merge following ones either, since the ranges are
588 	       always sorted by OFFSET.  */
589 	    break;
590 	  }
591 
592       if (removed != 0)
593 	VEC_block_remove (range_s, *vectorp, next, removed);
594     }
595 }
596 
597 void
598 mark_value_bits_unavailable (struct value *value,
599 			     LONGEST offset, LONGEST length)
600 {
601   insert_into_bit_range_vector (&value->unavailable, offset, length);
602 }
603 
604 void
605 mark_value_bytes_unavailable (struct value *value,
606 			      LONGEST offset, LONGEST length)
607 {
608   mark_value_bits_unavailable (value,
609 			       offset * TARGET_CHAR_BIT,
610 			       length * TARGET_CHAR_BIT);
611 }
612 
613 /* Find the first range in RANGES that overlaps the range defined by
614    OFFSET and LENGTH, starting at element POS in the RANGES vector,
615    Returns the index into RANGES where such overlapping range was
616    found, or -1 if none was found.  */
617 
618 static int
619 find_first_range_overlap (VEC(range_s) *ranges, int pos,
620 			  LONGEST offset, LONGEST length)
621 {
622   range_s *r;
623   int i;
624 
625   for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
626     if (ranges_overlap (r->offset, r->length, offset, length))
627       return i;
628 
629   return -1;
630 }
631 
632 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
633    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
634    return non-zero.
635 
636    It must always be the case that:
637      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
638 
639    It is assumed that memory can be accessed from:
640      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
641    to:
642      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
643             / TARGET_CHAR_BIT)  */
644 static int
645 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
646 			 const gdb_byte *ptr2, size_t offset2_bits,
647 			 size_t length_bits)
648 {
649   gdb_assert (offset1_bits % TARGET_CHAR_BIT
650 	      == offset2_bits % TARGET_CHAR_BIT);
651 
652   if (offset1_bits % TARGET_CHAR_BIT != 0)
653     {
654       size_t bits;
655       gdb_byte mask, b1, b2;
656 
657       /* The offset from the base pointers PTR1 and PTR2 is not a complete
658 	 number of bytes.  A number of bits up to either the next exact
659 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
660 	 compared.  */
661       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
662       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
663       mask = (1 << bits) - 1;
664 
665       if (length_bits < bits)
666 	{
667 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
668 	  bits = length_bits;
669 	}
670 
671       /* Now load the two bytes and mask off the bits we care about.  */
672       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
673       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
674 
675       if (b1 != b2)
676 	return 1;
677 
678       /* Now update the length and offsets to take account of the bits
679 	 we've just compared.  */
680       length_bits -= bits;
681       offset1_bits += bits;
682       offset2_bits += bits;
683     }
684 
685   if (length_bits % TARGET_CHAR_BIT != 0)
686     {
687       size_t bits;
688       size_t o1, o2;
689       gdb_byte mask, b1, b2;
690 
691       /* The length is not an exact number of bytes.  After the previous
692 	 IF.. block then the offsets are byte aligned, or the
693 	 length is zero (in which case this code is not reached).  Compare
694 	 a number of bits at the end of the region, starting from an exact
695 	 byte boundary.  */
696       bits = length_bits % TARGET_CHAR_BIT;
697       o1 = offset1_bits + length_bits - bits;
698       o2 = offset2_bits + length_bits - bits;
699 
700       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
701       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
702 
703       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
704       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
705 
706       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
707       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
708 
709       if (b1 != b2)
710 	return 1;
711 
712       length_bits -= bits;
713     }
714 
715   if (length_bits > 0)
716     {
717       /* We've now taken care of any stray "bits" at the start, or end of
718 	 the region to compare, the remainder can be covered with a simple
719 	 memcmp.  */
720       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
721       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
722       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
723 
724       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
725 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
726 		     length_bits / TARGET_CHAR_BIT);
727     }
728 
729   /* Length is zero, regions match.  */
730   return 0;
731 }
732 
733 /* Helper struct for find_first_range_overlap_and_match and
734    value_contents_bits_eq.  Keep track of which slot of a given ranges
735    vector have we last looked at.  */
736 
737 struct ranges_and_idx
738 {
739   /* The ranges.  */
740   VEC(range_s) *ranges;
741 
742   /* The range we've last found in RANGES.  Given ranges are sorted,
743      we can start the next lookup here.  */
744   int idx;
745 };
746 
747 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
748    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
749    ranges starting at OFFSET2 bits.  Return true if the ranges match
750    and fill in *L and *H with the overlapping window relative to
751    (both) OFFSET1 or OFFSET2.  */
752 
753 static int
754 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
755 				    struct ranges_and_idx *rp2,
756 				    LONGEST offset1, LONGEST offset2,
757 				    LONGEST length, ULONGEST *l, ULONGEST *h)
758 {
759   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
760 				       offset1, length);
761   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
762 				       offset2, length);
763 
764   if (rp1->idx == -1 && rp2->idx == -1)
765     {
766       *l = length;
767       *h = length;
768       return 1;
769     }
770   else if (rp1->idx == -1 || rp2->idx == -1)
771     return 0;
772   else
773     {
774       range_s *r1, *r2;
775       ULONGEST l1, h1;
776       ULONGEST l2, h2;
777 
778       r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
779       r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
780 
781       /* Get the unavailable windows intersected by the incoming
782 	 ranges.  The first and last ranges that overlap the argument
783 	 range may be wider than said incoming arguments ranges.  */
784       l1 = std::max (offset1, r1->offset);
785       h1 = std::min (offset1 + length, r1->offset + r1->length);
786 
787       l2 = std::max (offset2, r2->offset);
788       h2 = std::min (offset2 + length, offset2 + r2->length);
789 
790       /* Make them relative to the respective start offsets, so we can
791 	 compare them for equality.  */
792       l1 -= offset1;
793       h1 -= offset1;
794 
795       l2 -= offset2;
796       h2 -= offset2;
797 
798       /* Different ranges, no match.  */
799       if (l1 != l2 || h1 != h2)
800 	return 0;
801 
802       *h = h1;
803       *l = l1;
804       return 1;
805     }
806 }
807 
808 /* Helper function for value_contents_eq.  The only difference is that
809    this function is bit rather than byte based.
810 
811    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
812    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
813    Return true if the available bits match.  */
814 
815 static int
816 value_contents_bits_eq (const struct value *val1, int offset1,
817 			const struct value *val2, int offset2,
818 			int length)
819 {
820   /* Each array element corresponds to a ranges source (unavailable,
821      optimized out).  '1' is for VAL1, '2' for VAL2.  */
822   struct ranges_and_idx rp1[2], rp2[2];
823 
824   /* See function description in value.h.  */
825   gdb_assert (!val1->lazy && !val2->lazy);
826 
827   /* We shouldn't be trying to compare past the end of the values.  */
828   gdb_assert (offset1 + length
829 	      <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
830   gdb_assert (offset2 + length
831 	      <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
832 
833   memset (&rp1, 0, sizeof (rp1));
834   memset (&rp2, 0, sizeof (rp2));
835   rp1[0].ranges = val1->unavailable;
836   rp2[0].ranges = val2->unavailable;
837   rp1[1].ranges = val1->optimized_out;
838   rp2[1].ranges = val2->optimized_out;
839 
840   while (length > 0)
841     {
842       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
843       int i;
844 
845       for (i = 0; i < 2; i++)
846 	{
847 	  ULONGEST l_tmp, h_tmp;
848 
849 	  /* The contents only match equal if the invalid/unavailable
850 	     contents ranges match as well.  */
851 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
852 						   offset1, offset2, length,
853 						   &l_tmp, &h_tmp))
854 	    return 0;
855 
856 	  /* We're interested in the lowest/first range found.  */
857 	  if (i == 0 || l_tmp < l)
858 	    {
859 	      l = l_tmp;
860 	      h = h_tmp;
861 	    }
862 	}
863 
864       /* Compare the available/valid contents.  */
865       if (memcmp_with_bit_offsets (val1->contents, offset1,
866 				   val2->contents, offset2, l) != 0)
867 	return 0;
868 
869       length -= h;
870       offset1 += h;
871       offset2 += h;
872     }
873 
874   return 1;
875 }
876 
877 int
878 value_contents_eq (const struct value *val1, LONGEST offset1,
879 		   const struct value *val2, LONGEST offset2,
880 		   LONGEST length)
881 {
882   return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
883 				 val2, offset2 * TARGET_CHAR_BIT,
884 				 length * TARGET_CHAR_BIT);
885 }
886 
887 /* Prototypes for local functions.  */
888 
889 static void show_values (char *, int);
890 
891 static void show_convenience (char *, int);
892 
893 
894 /* The value-history records all the values printed
895    by print commands during this session.  Each chunk
896    records 60 consecutive values.  The first chunk on
897    the chain records the most recent values.
898    The total number of values is in value_history_count.  */
899 
900 #define VALUE_HISTORY_CHUNK 60
901 
902 struct value_history_chunk
903   {
904     struct value_history_chunk *next;
905     struct value *values[VALUE_HISTORY_CHUNK];
906   };
907 
908 /* Chain of chunks now in use.  */
909 
910 static struct value_history_chunk *value_history_chain;
911 
912 static int value_history_count;	/* Abs number of last entry stored.  */
913 
914 
915 /* List of all value objects currently allocated
916    (except for those released by calls to release_value)
917    This is so they can be freed after each command.  */
918 
919 static struct value *all_values;
920 
921 /* Allocate a lazy value for type TYPE.  Its actual content is
922    "lazily" allocated too: the content field of the return value is
923    NULL; it will be allocated when it is fetched from the target.  */
924 
925 struct value *
926 allocate_value_lazy (struct type *type)
927 {
928   struct value *val;
929 
930   /* Call check_typedef on our type to make sure that, if TYPE
931      is a TYPE_CODE_TYPEDEF, its length is set to the length
932      of the target type instead of zero.  However, we do not
933      replace the typedef type by the target type, because we want
934      to keep the typedef in order to be able to set the VAL's type
935      description correctly.  */
936   check_typedef (type);
937 
938   val = XCNEW (struct value);
939   val->contents = NULL;
940   val->next = all_values;
941   all_values = val;
942   val->type = type;
943   val->enclosing_type = type;
944   VALUE_LVAL (val) = not_lval;
945   val->location.address = 0;
946   val->offset = 0;
947   val->bitpos = 0;
948   val->bitsize = 0;
949   val->lazy = 1;
950   val->embedded_offset = 0;
951   val->pointed_to_offset = 0;
952   val->modifiable = 1;
953   val->initialized = 1;  /* Default to initialized.  */
954 
955   /* Values start out on the all_values chain.  */
956   val->reference_count = 1;
957 
958   return val;
959 }
960 
961 /* The maximum size, in bytes, that GDB will try to allocate for a value.
962    The initial value of 64k was not selected for any specific reason, it is
963    just a reasonable starting point.  */
964 
965 static int max_value_size = 65536; /* 64k bytes */
966 
967 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
968    LONGEST, otherwise GDB will not be able to parse integer values from the
969    CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
970    be unable to parse "set max-value-size 2".
971 
972    As we want a consistent GDB experience across hosts with different sizes
973    of LONGEST, this arbitrary minimum value was selected, so long as this
974    is bigger than LONGEST on all GDB supported hosts we're fine.  */
975 
976 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
977 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
978 
979 /* Implement the "set max-value-size" command.  */
980 
981 static void
982 set_max_value_size (char *args, int from_tty,
983 		    struct cmd_list_element *c)
984 {
985   gdb_assert (max_value_size == -1 || max_value_size >= 0);
986 
987   if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
988     {
989       max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
990       error (_("max-value-size set too low, increasing to %d bytes"),
991 	     max_value_size);
992     }
993 }
994 
995 /* Implement the "show max-value-size" command.  */
996 
997 static void
998 show_max_value_size (struct ui_file *file, int from_tty,
999 		     struct cmd_list_element *c, const char *value)
1000 {
1001   if (max_value_size == -1)
1002     fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1003   else
1004     fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1005 		      max_value_size);
1006 }
1007 
1008 /* Called before we attempt to allocate or reallocate a buffer for the
1009    contents of a value.  TYPE is the type of the value for which we are
1010    allocating the buffer.  If the buffer is too large (based on the user
1011    controllable setting) then throw an error.  If this function returns
1012    then we should attempt to allocate the buffer.  */
1013 
1014 static void
1015 check_type_length_before_alloc (const struct type *type)
1016 {
1017   unsigned int length = TYPE_LENGTH (type);
1018 
1019   if (max_value_size > -1 && length > max_value_size)
1020     {
1021       if (TYPE_NAME (type) != NULL)
1022 	error (_("value of type `%s' requires %u bytes, which is more "
1023 		 "than max-value-size"), TYPE_NAME (type), length);
1024       else
1025 	error (_("value requires %u bytes, which is more than "
1026 		 "max-value-size"), length);
1027     }
1028 }
1029 
1030 /* Allocate the contents of VAL if it has not been allocated yet.  */
1031 
1032 static void
1033 allocate_value_contents (struct value *val)
1034 {
1035   if (!val->contents)
1036     {
1037       check_type_length_before_alloc (val->enclosing_type);
1038       val->contents
1039 	= (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1040     }
1041 }
1042 
1043 /* Allocate a  value  and its contents for type TYPE.  */
1044 
1045 struct value *
1046 allocate_value (struct type *type)
1047 {
1048   struct value *val = allocate_value_lazy (type);
1049 
1050   allocate_value_contents (val);
1051   val->lazy = 0;
1052   return val;
1053 }
1054 
1055 /* Allocate a  value  that has the correct length
1056    for COUNT repetitions of type TYPE.  */
1057 
1058 struct value *
1059 allocate_repeat_value (struct type *type, int count)
1060 {
1061   int low_bound = current_language->string_lower_bound;		/* ??? */
1062   /* FIXME-type-allocation: need a way to free this type when we are
1063      done with it.  */
1064   struct type *array_type
1065     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1066 
1067   return allocate_value (array_type);
1068 }
1069 
1070 struct value *
1071 allocate_computed_value (struct type *type,
1072                          const struct lval_funcs *funcs,
1073                          void *closure)
1074 {
1075   struct value *v = allocate_value_lazy (type);
1076 
1077   VALUE_LVAL (v) = lval_computed;
1078   v->location.computed.funcs = funcs;
1079   v->location.computed.closure = closure;
1080 
1081   return v;
1082 }
1083 
1084 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT.  */
1085 
1086 struct value *
1087 allocate_optimized_out_value (struct type *type)
1088 {
1089   struct value *retval = allocate_value_lazy (type);
1090 
1091   mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1092   set_value_lazy (retval, 0);
1093   return retval;
1094 }
1095 
1096 /* Accessor methods.  */
1097 
1098 struct value *
1099 value_next (const struct value *value)
1100 {
1101   return value->next;
1102 }
1103 
1104 struct type *
1105 value_type (const struct value *value)
1106 {
1107   return value->type;
1108 }
1109 void
1110 deprecated_set_value_type (struct value *value, struct type *type)
1111 {
1112   value->type = type;
1113 }
1114 
1115 LONGEST
1116 value_offset (const struct value *value)
1117 {
1118   return value->offset;
1119 }
1120 void
1121 set_value_offset (struct value *value, LONGEST offset)
1122 {
1123   value->offset = offset;
1124 }
1125 
1126 LONGEST
1127 value_bitpos (const struct value *value)
1128 {
1129   return value->bitpos;
1130 }
1131 void
1132 set_value_bitpos (struct value *value, LONGEST bit)
1133 {
1134   value->bitpos = bit;
1135 }
1136 
1137 LONGEST
1138 value_bitsize (const struct value *value)
1139 {
1140   return value->bitsize;
1141 }
1142 void
1143 set_value_bitsize (struct value *value, LONGEST bit)
1144 {
1145   value->bitsize = bit;
1146 }
1147 
1148 struct value *
1149 value_parent (const struct value *value)
1150 {
1151   return value->parent;
1152 }
1153 
1154 /* See value.h.  */
1155 
1156 void
1157 set_value_parent (struct value *value, struct value *parent)
1158 {
1159   struct value *old = value->parent;
1160 
1161   value->parent = parent;
1162   if (parent != NULL)
1163     value_incref (parent);
1164   value_free (old);
1165 }
1166 
1167 gdb_byte *
1168 value_contents_raw (struct value *value)
1169 {
1170   struct gdbarch *arch = get_value_arch (value);
1171   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1172 
1173   allocate_value_contents (value);
1174   return value->contents + value->embedded_offset * unit_size;
1175 }
1176 
1177 gdb_byte *
1178 value_contents_all_raw (struct value *value)
1179 {
1180   allocate_value_contents (value);
1181   return value->contents;
1182 }
1183 
1184 struct type *
1185 value_enclosing_type (const struct value *value)
1186 {
1187   return value->enclosing_type;
1188 }
1189 
1190 /* Look at value.h for description.  */
1191 
1192 struct type *
1193 value_actual_type (struct value *value, int resolve_simple_types,
1194 		   int *real_type_found)
1195 {
1196   struct value_print_options opts;
1197   struct type *result;
1198 
1199   get_user_print_options (&opts);
1200 
1201   if (real_type_found)
1202     *real_type_found = 0;
1203   result = value_type (value);
1204   if (opts.objectprint)
1205     {
1206       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1207 	 fetch its rtti type.  */
1208       if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1209 	  && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1210 	     == TYPE_CODE_STRUCT
1211 	  && !value_optimized_out (value))
1212         {
1213           struct type *real_type;
1214 
1215           real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1216           if (real_type)
1217             {
1218               if (real_type_found)
1219                 *real_type_found = 1;
1220               result = real_type;
1221             }
1222         }
1223       else if (resolve_simple_types)
1224         {
1225           if (real_type_found)
1226             *real_type_found = 1;
1227           result = value_enclosing_type (value);
1228         }
1229     }
1230 
1231   return result;
1232 }
1233 
1234 void
1235 error_value_optimized_out (void)
1236 {
1237   error (_("value has been optimized out"));
1238 }
1239 
1240 static void
1241 require_not_optimized_out (const struct value *value)
1242 {
1243   if (!VEC_empty (range_s, value->optimized_out))
1244     {
1245       if (value->lval == lval_register)
1246 	error (_("register has not been saved in frame"));
1247       else
1248 	error_value_optimized_out ();
1249     }
1250 }
1251 
1252 static void
1253 require_available (const struct value *value)
1254 {
1255   if (!VEC_empty (range_s, value->unavailable))
1256     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1257 }
1258 
1259 const gdb_byte *
1260 value_contents_for_printing (struct value *value)
1261 {
1262   if (value->lazy)
1263     value_fetch_lazy (value);
1264   return value->contents;
1265 }
1266 
1267 const gdb_byte *
1268 value_contents_for_printing_const (const struct value *value)
1269 {
1270   gdb_assert (!value->lazy);
1271   return value->contents;
1272 }
1273 
1274 const gdb_byte *
1275 value_contents_all (struct value *value)
1276 {
1277   const gdb_byte *result = value_contents_for_printing (value);
1278   require_not_optimized_out (value);
1279   require_available (value);
1280   return result;
1281 }
1282 
1283 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1284    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1285 
1286 static void
1287 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1288 		      VEC (range_s) *src_range, int src_bit_offset,
1289 		      int bit_length)
1290 {
1291   range_s *r;
1292   int i;
1293 
1294   for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1295     {
1296       ULONGEST h, l;
1297 
1298       l = std::max (r->offset, (LONGEST) src_bit_offset);
1299       h = std::min (r->offset + r->length,
1300 		    (LONGEST) src_bit_offset + bit_length);
1301 
1302       if (l < h)
1303 	insert_into_bit_range_vector (dst_range,
1304 				      dst_bit_offset + (l - src_bit_offset),
1305 				      h - l);
1306     }
1307 }
1308 
1309 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1310    SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted.  */
1311 
1312 static void
1313 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1314 			    const struct value *src, int src_bit_offset,
1315 			    int bit_length)
1316 {
1317   ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1318 			src->unavailable, src_bit_offset,
1319 			bit_length);
1320   ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1321 			src->optimized_out, src_bit_offset,
1322 			bit_length);
1323 }
1324 
1325 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1326    (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1327    contents, starting at DST_OFFSET.  If unavailable contents are
1328    being copied from SRC, the corresponding DST contents are marked
1329    unavailable accordingly.  Neither DST nor SRC may be lazy
1330    values.
1331 
1332    It is assumed the contents of DST in the [DST_OFFSET,
1333    DST_OFFSET+LENGTH) range are wholly available.  */
1334 
1335 void
1336 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1337 			 struct value *src, LONGEST src_offset, LONGEST length)
1338 {
1339   LONGEST src_bit_offset, dst_bit_offset, bit_length;
1340   struct gdbarch *arch = get_value_arch (src);
1341   int unit_size = gdbarch_addressable_memory_unit_size (arch);
1342 
1343   /* A lazy DST would make that this copy operation useless, since as
1344      soon as DST's contents were un-lazied (by a later value_contents
1345      call, say), the contents would be overwritten.  A lazy SRC would
1346      mean we'd be copying garbage.  */
1347   gdb_assert (!dst->lazy && !src->lazy);
1348 
1349   /* The overwritten DST range gets unavailability ORed in, not
1350      replaced.  Make sure to remember to implement replacing if it
1351      turns out actually necessary.  */
1352   gdb_assert (value_bytes_available (dst, dst_offset, length));
1353   gdb_assert (!value_bits_any_optimized_out (dst,
1354 					     TARGET_CHAR_BIT * dst_offset,
1355 					     TARGET_CHAR_BIT * length));
1356 
1357   /* Copy the data.  */
1358   memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1359 	  value_contents_all_raw (src) + src_offset * unit_size,
1360 	  length * unit_size);
1361 
1362   /* Copy the meta-data, adjusted.  */
1363   src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1364   dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1365   bit_length = length * unit_size * HOST_CHAR_BIT;
1366 
1367   value_ranges_copy_adjusted (dst, dst_bit_offset,
1368 			      src, src_bit_offset,
1369 			      bit_length);
1370 }
1371 
1372 /* Copy LENGTH bytes of SRC value's (all) contents
1373    (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1374    (all) contents, starting at DST_OFFSET.  If unavailable contents
1375    are being copied from SRC, the corresponding DST contents are
1376    marked unavailable accordingly.  DST must not be lazy.  If SRC is
1377    lazy, it will be fetched now.
1378 
1379    It is assumed the contents of DST in the [DST_OFFSET,
1380    DST_OFFSET+LENGTH) range are wholly available.  */
1381 
1382 void
1383 value_contents_copy (struct value *dst, LONGEST dst_offset,
1384 		     struct value *src, LONGEST src_offset, LONGEST length)
1385 {
1386   if (src->lazy)
1387     value_fetch_lazy (src);
1388 
1389   value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1390 }
1391 
1392 int
1393 value_lazy (const struct value *value)
1394 {
1395   return value->lazy;
1396 }
1397 
1398 void
1399 set_value_lazy (struct value *value, int val)
1400 {
1401   value->lazy = val;
1402 }
1403 
1404 int
1405 value_stack (const struct value *value)
1406 {
1407   return value->stack;
1408 }
1409 
1410 void
1411 set_value_stack (struct value *value, int val)
1412 {
1413   value->stack = val;
1414 }
1415 
1416 const gdb_byte *
1417 value_contents (struct value *value)
1418 {
1419   const gdb_byte *result = value_contents_writeable (value);
1420   require_not_optimized_out (value);
1421   require_available (value);
1422   return result;
1423 }
1424 
1425 gdb_byte *
1426 value_contents_writeable (struct value *value)
1427 {
1428   if (value->lazy)
1429     value_fetch_lazy (value);
1430   return value_contents_raw (value);
1431 }
1432 
1433 int
1434 value_optimized_out (struct value *value)
1435 {
1436   /* We can only know if a value is optimized out once we have tried to
1437      fetch it.  */
1438   if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1439     {
1440       TRY
1441 	{
1442 	  value_fetch_lazy (value);
1443 	}
1444       CATCH (ex, RETURN_MASK_ERROR)
1445 	{
1446 	  /* Fall back to checking value->optimized_out.  */
1447 	}
1448       END_CATCH
1449     }
1450 
1451   return !VEC_empty (range_s, value->optimized_out);
1452 }
1453 
1454 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1455    the following LENGTH bytes.  */
1456 
1457 void
1458 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1459 {
1460   mark_value_bits_optimized_out (value,
1461 				 offset * TARGET_CHAR_BIT,
1462 				 length * TARGET_CHAR_BIT);
1463 }
1464 
1465 /* See value.h.  */
1466 
1467 void
1468 mark_value_bits_optimized_out (struct value *value,
1469 			       LONGEST offset, LONGEST length)
1470 {
1471   insert_into_bit_range_vector (&value->optimized_out, offset, length);
1472 }
1473 
1474 int
1475 value_bits_synthetic_pointer (const struct value *value,
1476 			      LONGEST offset, LONGEST length)
1477 {
1478   if (value->lval != lval_computed
1479       || !value->location.computed.funcs->check_synthetic_pointer)
1480     return 0;
1481   return value->location.computed.funcs->check_synthetic_pointer (value,
1482 								  offset,
1483 								  length);
1484 }
1485 
1486 LONGEST
1487 value_embedded_offset (const struct value *value)
1488 {
1489   return value->embedded_offset;
1490 }
1491 
1492 void
1493 set_value_embedded_offset (struct value *value, LONGEST val)
1494 {
1495   value->embedded_offset = val;
1496 }
1497 
1498 LONGEST
1499 value_pointed_to_offset (const struct value *value)
1500 {
1501   return value->pointed_to_offset;
1502 }
1503 
1504 void
1505 set_value_pointed_to_offset (struct value *value, LONGEST val)
1506 {
1507   value->pointed_to_offset = val;
1508 }
1509 
1510 const struct lval_funcs *
1511 value_computed_funcs (const struct value *v)
1512 {
1513   gdb_assert (value_lval_const (v) == lval_computed);
1514 
1515   return v->location.computed.funcs;
1516 }
1517 
1518 void *
1519 value_computed_closure (const struct value *v)
1520 {
1521   gdb_assert (v->lval == lval_computed);
1522 
1523   return v->location.computed.closure;
1524 }
1525 
1526 enum lval_type *
1527 deprecated_value_lval_hack (struct value *value)
1528 {
1529   return &value->lval;
1530 }
1531 
1532 enum lval_type
1533 value_lval_const (const struct value *value)
1534 {
1535   return value->lval;
1536 }
1537 
1538 CORE_ADDR
1539 value_address (const struct value *value)
1540 {
1541   if (value->lval != lval_memory)
1542     return 0;
1543   if (value->parent != NULL)
1544     return value_address (value->parent) + value->offset;
1545   if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1546     {
1547       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1548       return TYPE_DATA_LOCATION_ADDR (value_type (value));
1549     }
1550 
1551   return value->location.address + value->offset;
1552 }
1553 
1554 CORE_ADDR
1555 value_raw_address (const struct value *value)
1556 {
1557   if (value->lval != lval_memory)
1558     return 0;
1559   return value->location.address;
1560 }
1561 
1562 void
1563 set_value_address (struct value *value, CORE_ADDR addr)
1564 {
1565   gdb_assert (value->lval == lval_memory);
1566   value->location.address = addr;
1567 }
1568 
1569 struct internalvar **
1570 deprecated_value_internalvar_hack (struct value *value)
1571 {
1572   return &value->location.internalvar;
1573 }
1574 
1575 struct frame_id *
1576 deprecated_value_next_frame_id_hack (struct value *value)
1577 {
1578   gdb_assert (value->lval == lval_register);
1579   return &value->location.reg.next_frame_id;
1580 }
1581 
1582 int *
1583 deprecated_value_regnum_hack (struct value *value)
1584 {
1585   gdb_assert (value->lval == lval_register);
1586   return &value->location.reg.regnum;
1587 }
1588 
1589 int
1590 deprecated_value_modifiable (const struct value *value)
1591 {
1592   return value->modifiable;
1593 }
1594 
1595 /* Return a mark in the value chain.  All values allocated after the
1596    mark is obtained (except for those released) are subject to being freed
1597    if a subsequent value_free_to_mark is passed the mark.  */
1598 struct value *
1599 value_mark (void)
1600 {
1601   return all_values;
1602 }
1603 
1604 /* Take a reference to VAL.  VAL will not be deallocated until all
1605    references are released.  */
1606 
1607 void
1608 value_incref (struct value *val)
1609 {
1610   val->reference_count++;
1611 }
1612 
1613 /* Release a reference to VAL, which was acquired with value_incref.
1614    This function is also called to deallocate values from the value
1615    chain.  */
1616 
1617 void
1618 value_free (struct value *val)
1619 {
1620   if (val)
1621     {
1622       gdb_assert (val->reference_count > 0);
1623       val->reference_count--;
1624       if (val->reference_count > 0)
1625 	return;
1626 
1627       /* If there's an associated parent value, drop our reference to
1628 	 it.  */
1629       if (val->parent != NULL)
1630 	value_free (val->parent);
1631 
1632       if (VALUE_LVAL (val) == lval_computed)
1633 	{
1634 	  const struct lval_funcs *funcs = val->location.computed.funcs;
1635 
1636 	  if (funcs->free_closure)
1637 	    funcs->free_closure (val);
1638 	}
1639       else if (VALUE_LVAL (val) == lval_xcallable)
1640 	  free_xmethod_worker (val->location.xm_worker);
1641 
1642       xfree (val->contents);
1643       VEC_free (range_s, val->unavailable);
1644     }
1645   xfree (val);
1646 }
1647 
1648 /* Free all values allocated since MARK was obtained by value_mark
1649    (except for those released).  */
1650 void
1651 value_free_to_mark (const struct value *mark)
1652 {
1653   struct value *val;
1654   struct value *next;
1655 
1656   for (val = all_values; val && val != mark; val = next)
1657     {
1658       next = val->next;
1659       val->released = 1;
1660       value_free (val);
1661     }
1662   all_values = val;
1663 }
1664 
1665 /* Free all the values that have been allocated (except for those released).
1666    Call after each command, successful or not.
1667    In practice this is called before each command, which is sufficient.  */
1668 
1669 void
1670 free_all_values (void)
1671 {
1672   struct value *val;
1673   struct value *next;
1674 
1675   for (val = all_values; val; val = next)
1676     {
1677       next = val->next;
1678       val->released = 1;
1679       value_free (val);
1680     }
1681 
1682   all_values = 0;
1683 }
1684 
1685 /* Frees all the elements in a chain of values.  */
1686 
1687 void
1688 free_value_chain (struct value *v)
1689 {
1690   struct value *next;
1691 
1692   for (; v; v = next)
1693     {
1694       next = value_next (v);
1695       value_free (v);
1696     }
1697 }
1698 
1699 /* Remove VAL from the chain all_values
1700    so it will not be freed automatically.  */
1701 
1702 void
1703 release_value (struct value *val)
1704 {
1705   struct value *v;
1706 
1707   if (all_values == val)
1708     {
1709       all_values = val->next;
1710       val->next = NULL;
1711       val->released = 1;
1712       return;
1713     }
1714 
1715   for (v = all_values; v; v = v->next)
1716     {
1717       if (v->next == val)
1718 	{
1719 	  v->next = val->next;
1720 	  val->next = NULL;
1721 	  val->released = 1;
1722 	  break;
1723 	}
1724     }
1725 }
1726 
1727 /* If the value is not already released, release it.
1728    If the value is already released, increment its reference count.
1729    That is, this function ensures that the value is released from the
1730    value chain and that the caller owns a reference to it.  */
1731 
1732 void
1733 release_value_or_incref (struct value *val)
1734 {
1735   if (val->released)
1736     value_incref (val);
1737   else
1738     release_value (val);
1739 }
1740 
1741 /* Release all values up to mark  */
1742 struct value *
1743 value_release_to_mark (const struct value *mark)
1744 {
1745   struct value *val;
1746   struct value *next;
1747 
1748   for (val = next = all_values; next; next = next->next)
1749     {
1750       if (next->next == mark)
1751 	{
1752 	  all_values = next->next;
1753 	  next->next = NULL;
1754 	  return val;
1755 	}
1756       next->released = 1;
1757     }
1758   all_values = 0;
1759   return val;
1760 }
1761 
1762 /* Return a copy of the value ARG.
1763    It contains the same contents, for same memory address,
1764    but it's a different block of storage.  */
1765 
1766 struct value *
1767 value_copy (struct value *arg)
1768 {
1769   struct type *encl_type = value_enclosing_type (arg);
1770   struct value *val;
1771 
1772   if (value_lazy (arg))
1773     val = allocate_value_lazy (encl_type);
1774   else
1775     val = allocate_value (encl_type);
1776   val->type = arg->type;
1777   VALUE_LVAL (val) = VALUE_LVAL (arg);
1778   val->location = arg->location;
1779   val->offset = arg->offset;
1780   val->bitpos = arg->bitpos;
1781   val->bitsize = arg->bitsize;
1782   val->lazy = arg->lazy;
1783   val->embedded_offset = value_embedded_offset (arg);
1784   val->pointed_to_offset = arg->pointed_to_offset;
1785   val->modifiable = arg->modifiable;
1786   if (!value_lazy (val))
1787     {
1788       memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1789 	      TYPE_LENGTH (value_enclosing_type (arg)));
1790 
1791     }
1792   val->unavailable = VEC_copy (range_s, arg->unavailable);
1793   val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1794   set_value_parent (val, arg->parent);
1795   if (VALUE_LVAL (val) == lval_computed)
1796     {
1797       const struct lval_funcs *funcs = val->location.computed.funcs;
1798 
1799       if (funcs->copy_closure)
1800         val->location.computed.closure = funcs->copy_closure (val);
1801     }
1802   return val;
1803 }
1804 
1805 /* Return a "const" and/or "volatile" qualified version of the value V.
1806    If CNST is true, then the returned value will be qualified with
1807    "const".
1808    if VOLTL is true, then the returned value will be qualified with
1809    "volatile".  */
1810 
1811 struct value *
1812 make_cv_value (int cnst, int voltl, struct value *v)
1813 {
1814   struct type *val_type = value_type (v);
1815   struct type *enclosing_type = value_enclosing_type (v);
1816   struct value *cv_val = value_copy (v);
1817 
1818   deprecated_set_value_type (cv_val,
1819 			     make_cv_type (cnst, voltl, val_type, NULL));
1820   set_value_enclosing_type (cv_val,
1821 			    make_cv_type (cnst, voltl, enclosing_type, NULL));
1822 
1823   return cv_val;
1824 }
1825 
1826 /* Return a version of ARG that is non-lvalue.  */
1827 
1828 struct value *
1829 value_non_lval (struct value *arg)
1830 {
1831   if (VALUE_LVAL (arg) != not_lval)
1832     {
1833       struct type *enc_type = value_enclosing_type (arg);
1834       struct value *val = allocate_value (enc_type);
1835 
1836       memcpy (value_contents_all_raw (val), value_contents_all (arg),
1837 	      TYPE_LENGTH (enc_type));
1838       val->type = arg->type;
1839       set_value_embedded_offset (val, value_embedded_offset (arg));
1840       set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1841       return val;
1842     }
1843    return arg;
1844 }
1845 
1846 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY.  */
1847 
1848 void
1849 value_force_lval (struct value *v, CORE_ADDR addr)
1850 {
1851   gdb_assert (VALUE_LVAL (v) == not_lval);
1852 
1853   write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1854   v->lval = lval_memory;
1855   v->location.address = addr;
1856 }
1857 
1858 void
1859 set_value_component_location (struct value *component,
1860 			      const struct value *whole)
1861 {
1862   struct type *type;
1863 
1864   gdb_assert (whole->lval != lval_xcallable);
1865 
1866   if (whole->lval == lval_internalvar)
1867     VALUE_LVAL (component) = lval_internalvar_component;
1868   else
1869     VALUE_LVAL (component) = whole->lval;
1870 
1871   component->location = whole->location;
1872   if (whole->lval == lval_computed)
1873     {
1874       const struct lval_funcs *funcs = whole->location.computed.funcs;
1875 
1876       if (funcs->copy_closure)
1877         component->location.computed.closure = funcs->copy_closure (whole);
1878     }
1879 
1880   /* If type has a dynamic resolved location property
1881      update it's value address.  */
1882   type = value_type (whole);
1883   if (NULL != TYPE_DATA_LOCATION (type)
1884       && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1885     set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1886 }
1887 
1888 /* Access to the value history.  */
1889 
1890 /* Record a new value in the value history.
1891    Returns the absolute history index of the entry.  */
1892 
1893 int
1894 record_latest_value (struct value *val)
1895 {
1896   int i;
1897 
1898   /* We don't want this value to have anything to do with the inferior anymore.
1899      In particular, "set $1 = 50" should not affect the variable from which
1900      the value was taken, and fast watchpoints should be able to assume that
1901      a value on the value history never changes.  */
1902   if (value_lazy (val))
1903     value_fetch_lazy (val);
1904   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1905      from.  This is a bit dubious, because then *&$1 does not just return $1
1906      but the current contents of that location.  c'est la vie...  */
1907   val->modifiable = 0;
1908 
1909   /* The value may have already been released, in which case we're adding a
1910      new reference for its entry in the history.  That is why we call
1911      release_value_or_incref here instead of release_value.  */
1912   release_value_or_incref (val);
1913 
1914   /* Here we treat value_history_count as origin-zero
1915      and applying to the value being stored now.  */
1916 
1917   i = value_history_count % VALUE_HISTORY_CHUNK;
1918   if (i == 0)
1919     {
1920       struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1921 
1922       newobj->next = value_history_chain;
1923       value_history_chain = newobj;
1924     }
1925 
1926   value_history_chain->values[i] = val;
1927 
1928   /* Now we regard value_history_count as origin-one
1929      and applying to the value just stored.  */
1930 
1931   return ++value_history_count;
1932 }
1933 
1934 /* Return a copy of the value in the history with sequence number NUM.  */
1935 
1936 struct value *
1937 access_value_history (int num)
1938 {
1939   struct value_history_chunk *chunk;
1940   int i;
1941   int absnum = num;
1942 
1943   if (absnum <= 0)
1944     absnum += value_history_count;
1945 
1946   if (absnum <= 0)
1947     {
1948       if (num == 0)
1949 	error (_("The history is empty."));
1950       else if (num == 1)
1951 	error (_("There is only one value in the history."));
1952       else
1953 	error (_("History does not go back to $$%d."), -num);
1954     }
1955   if (absnum > value_history_count)
1956     error (_("History has not yet reached $%d."), absnum);
1957 
1958   absnum--;
1959 
1960   /* Now absnum is always absolute and origin zero.  */
1961 
1962   chunk = value_history_chain;
1963   for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1964 	 - absnum / VALUE_HISTORY_CHUNK;
1965        i > 0; i--)
1966     chunk = chunk->next;
1967 
1968   return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1969 }
1970 
1971 static void
1972 show_values (char *num_exp, int from_tty)
1973 {
1974   int i;
1975   struct value *val;
1976   static int num = 1;
1977 
1978   if (num_exp)
1979     {
1980       /* "show values +" should print from the stored position.
1981          "show values <exp>" should print around value number <exp>.  */
1982       if (num_exp[0] != '+' || num_exp[1] != '\0')
1983 	num = parse_and_eval_long (num_exp) - 5;
1984     }
1985   else
1986     {
1987       /* "show values" means print the last 10 values.  */
1988       num = value_history_count - 9;
1989     }
1990 
1991   if (num <= 0)
1992     num = 1;
1993 
1994   for (i = num; i < num + 10 && i <= value_history_count; i++)
1995     {
1996       struct value_print_options opts;
1997 
1998       val = access_value_history (i);
1999       printf_filtered (("$%d = "), i);
2000       get_user_print_options (&opts);
2001       value_print (val, gdb_stdout, &opts);
2002       printf_filtered (("\n"));
2003     }
2004 
2005   /* The next "show values +" should start after what we just printed.  */
2006   num += 10;
2007 
2008   /* Hitting just return after this command should do the same thing as
2009      "show values +".  If num_exp is null, this is unnecessary, since
2010      "show values +" is not useful after "show values".  */
2011   if (from_tty && num_exp)
2012     {
2013       num_exp[0] = '+';
2014       num_exp[1] = '\0';
2015     }
2016 }
2017 
2018 enum internalvar_kind
2019 {
2020   /* The internal variable is empty.  */
2021   INTERNALVAR_VOID,
2022 
2023   /* The value of the internal variable is provided directly as
2024      a GDB value object.  */
2025   INTERNALVAR_VALUE,
2026 
2027   /* A fresh value is computed via a call-back routine on every
2028      access to the internal variable.  */
2029   INTERNALVAR_MAKE_VALUE,
2030 
2031   /* The internal variable holds a GDB internal convenience function.  */
2032   INTERNALVAR_FUNCTION,
2033 
2034   /* The variable holds an integer value.  */
2035   INTERNALVAR_INTEGER,
2036 
2037   /* The variable holds a GDB-provided string.  */
2038   INTERNALVAR_STRING,
2039 };
2040 
2041 union internalvar_data
2042 {
2043   /* A value object used with INTERNALVAR_VALUE.  */
2044   struct value *value;
2045 
2046   /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
2047   struct
2048   {
2049     /* The functions to call.  */
2050     const struct internalvar_funcs *functions;
2051 
2052     /* The function's user-data.  */
2053     void *data;
2054   } make_value;
2055 
2056   /* The internal function used with INTERNALVAR_FUNCTION.  */
2057   struct
2058   {
2059     struct internal_function *function;
2060     /* True if this is the canonical name for the function.  */
2061     int canonical;
2062   } fn;
2063 
2064   /* An integer value used with INTERNALVAR_INTEGER.  */
2065   struct
2066   {
2067     /* If type is non-NULL, it will be used as the type to generate
2068        a value for this internal variable.  If type is NULL, a default
2069        integer type for the architecture is used.  */
2070     struct type *type;
2071     LONGEST val;
2072   } integer;
2073 
2074   /* A string value used with INTERNALVAR_STRING.  */
2075   char *string;
2076 };
2077 
2078 /* Internal variables.  These are variables within the debugger
2079    that hold values assigned by debugger commands.
2080    The user refers to them with a '$' prefix
2081    that does not appear in the variable names stored internally.  */
2082 
2083 struct internalvar
2084 {
2085   struct internalvar *next;
2086   char *name;
2087 
2088   /* We support various different kinds of content of an internal variable.
2089      enum internalvar_kind specifies the kind, and union internalvar_data
2090      provides the data associated with this particular kind.  */
2091 
2092   enum internalvar_kind kind;
2093 
2094   union internalvar_data u;
2095 };
2096 
2097 static struct internalvar *internalvars;
2098 
2099 /* If the variable does not already exist create it and give it the
2100    value given.  If no value is given then the default is zero.  */
2101 static void
2102 init_if_undefined_command (char* args, int from_tty)
2103 {
2104   struct internalvar* intvar;
2105 
2106   /* Parse the expression - this is taken from set_command().  */
2107   expression_up expr = parse_expression (args);
2108 
2109   /* Validate the expression.
2110      Was the expression an assignment?
2111      Or even an expression at all?  */
2112   if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2113     error (_("Init-if-undefined requires an assignment expression."));
2114 
2115   /* Extract the variable from the parsed expression.
2116      In the case of an assign the lvalue will be in elts[1] and elts[2].  */
2117   if (expr->elts[1].opcode != OP_INTERNALVAR)
2118     error (_("The first parameter to init-if-undefined "
2119 	     "should be a GDB variable."));
2120   intvar = expr->elts[2].internalvar;
2121 
2122   /* Only evaluate the expression if the lvalue is void.
2123      This may still fail if the expresssion is invalid.  */
2124   if (intvar->kind == INTERNALVAR_VOID)
2125     evaluate_expression (expr.get ());
2126 }
2127 
2128 
2129 /* Look up an internal variable with name NAME.  NAME should not
2130    normally include a dollar sign.
2131 
2132    If the specified internal variable does not exist,
2133    the return value is NULL.  */
2134 
2135 struct internalvar *
2136 lookup_only_internalvar (const char *name)
2137 {
2138   struct internalvar *var;
2139 
2140   for (var = internalvars; var; var = var->next)
2141     if (strcmp (var->name, name) == 0)
2142       return var;
2143 
2144   return NULL;
2145 }
2146 
2147 /* Complete NAME by comparing it to the names of internal variables.
2148    Returns a vector of newly allocated strings, or NULL if no matches
2149    were found.  */
2150 
2151 VEC (char_ptr) *
2152 complete_internalvar (const char *name)
2153 {
2154   VEC (char_ptr) *result = NULL;
2155   struct internalvar *var;
2156   int len;
2157 
2158   len = strlen (name);
2159 
2160   for (var = internalvars; var; var = var->next)
2161     if (strncmp (var->name, name, len) == 0)
2162       {
2163 	char *r = xstrdup (var->name);
2164 
2165 	VEC_safe_push (char_ptr, result, r);
2166       }
2167 
2168   return result;
2169 }
2170 
2171 /* Create an internal variable with name NAME and with a void value.
2172    NAME should not normally include a dollar sign.  */
2173 
2174 struct internalvar *
2175 create_internalvar (const char *name)
2176 {
2177   struct internalvar *var = XNEW (struct internalvar);
2178 
2179   var->name = concat (name, (char *)NULL);
2180   var->kind = INTERNALVAR_VOID;
2181   var->next = internalvars;
2182   internalvars = var;
2183   return var;
2184 }
2185 
2186 /* Create an internal variable with name NAME and register FUN as the
2187    function that value_of_internalvar uses to create a value whenever
2188    this variable is referenced.  NAME should not normally include a
2189    dollar sign.  DATA is passed uninterpreted to FUN when it is
2190    called.  CLEANUP, if not NULL, is called when the internal variable
2191    is destroyed.  It is passed DATA as its only argument.  */
2192 
2193 struct internalvar *
2194 create_internalvar_type_lazy (const char *name,
2195 			      const struct internalvar_funcs *funcs,
2196 			      void *data)
2197 {
2198   struct internalvar *var = create_internalvar (name);
2199 
2200   var->kind = INTERNALVAR_MAKE_VALUE;
2201   var->u.make_value.functions = funcs;
2202   var->u.make_value.data = data;
2203   return var;
2204 }
2205 
2206 /* See documentation in value.h.  */
2207 
2208 int
2209 compile_internalvar_to_ax (struct internalvar *var,
2210 			   struct agent_expr *expr,
2211 			   struct axs_value *value)
2212 {
2213   if (var->kind != INTERNALVAR_MAKE_VALUE
2214       || var->u.make_value.functions->compile_to_ax == NULL)
2215     return 0;
2216 
2217   var->u.make_value.functions->compile_to_ax (var, expr, value,
2218 					      var->u.make_value.data);
2219   return 1;
2220 }
2221 
2222 /* Look up an internal variable with name NAME.  NAME should not
2223    normally include a dollar sign.
2224 
2225    If the specified internal variable does not exist,
2226    one is created, with a void value.  */
2227 
2228 struct internalvar *
2229 lookup_internalvar (const char *name)
2230 {
2231   struct internalvar *var;
2232 
2233   var = lookup_only_internalvar (name);
2234   if (var)
2235     return var;
2236 
2237   return create_internalvar (name);
2238 }
2239 
2240 /* Return current value of internal variable VAR.  For variables that
2241    are not inherently typed, use a value type appropriate for GDBARCH.  */
2242 
2243 struct value *
2244 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2245 {
2246   struct value *val;
2247   struct trace_state_variable *tsv;
2248 
2249   /* If there is a trace state variable of the same name, assume that
2250      is what we really want to see.  */
2251   tsv = find_trace_state_variable (var->name);
2252   if (tsv)
2253     {
2254       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2255 								&(tsv->value));
2256       if (tsv->value_known)
2257 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2258 				  tsv->value);
2259       else
2260 	val = allocate_value (builtin_type (gdbarch)->builtin_void);
2261       return val;
2262     }
2263 
2264   switch (var->kind)
2265     {
2266     case INTERNALVAR_VOID:
2267       val = allocate_value (builtin_type (gdbarch)->builtin_void);
2268       break;
2269 
2270     case INTERNALVAR_FUNCTION:
2271       val = allocate_value (builtin_type (gdbarch)->internal_fn);
2272       break;
2273 
2274     case INTERNALVAR_INTEGER:
2275       if (!var->u.integer.type)
2276 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2277 				  var->u.integer.val);
2278       else
2279 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2280       break;
2281 
2282     case INTERNALVAR_STRING:
2283       val = value_cstring (var->u.string, strlen (var->u.string),
2284 			   builtin_type (gdbarch)->builtin_char);
2285       break;
2286 
2287     case INTERNALVAR_VALUE:
2288       val = value_copy (var->u.value);
2289       if (value_lazy (val))
2290 	value_fetch_lazy (val);
2291       break;
2292 
2293     case INTERNALVAR_MAKE_VALUE:
2294       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2295 							var->u.make_value.data);
2296       break;
2297 
2298     default:
2299       internal_error (__FILE__, __LINE__, _("bad kind"));
2300     }
2301 
2302   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2303      on this value go back to affect the original internal variable.
2304 
2305      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2306      no underlying modifyable state in the internal variable.
2307 
2308      Likewise, if the variable's value is a computed lvalue, we want
2309      references to it to produce another computed lvalue, where
2310      references and assignments actually operate through the
2311      computed value's functions.
2312 
2313      This means that internal variables with computed values
2314      behave a little differently from other internal variables:
2315      assignments to them don't just replace the previous value
2316      altogether.  At the moment, this seems like the behavior we
2317      want.  */
2318 
2319   if (var->kind != INTERNALVAR_MAKE_VALUE
2320       && val->lval != lval_computed)
2321     {
2322       VALUE_LVAL (val) = lval_internalvar;
2323       VALUE_INTERNALVAR (val) = var;
2324     }
2325 
2326   return val;
2327 }
2328 
2329 int
2330 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2331 {
2332   if (var->kind == INTERNALVAR_INTEGER)
2333     {
2334       *result = var->u.integer.val;
2335       return 1;
2336     }
2337 
2338   if (var->kind == INTERNALVAR_VALUE)
2339     {
2340       struct type *type = check_typedef (value_type (var->u.value));
2341 
2342       if (TYPE_CODE (type) == TYPE_CODE_INT)
2343 	{
2344 	  *result = value_as_long (var->u.value);
2345 	  return 1;
2346 	}
2347     }
2348 
2349   return 0;
2350 }
2351 
2352 static int
2353 get_internalvar_function (struct internalvar *var,
2354 			  struct internal_function **result)
2355 {
2356   switch (var->kind)
2357     {
2358     case INTERNALVAR_FUNCTION:
2359       *result = var->u.fn.function;
2360       return 1;
2361 
2362     default:
2363       return 0;
2364     }
2365 }
2366 
2367 void
2368 set_internalvar_component (struct internalvar *var,
2369 			   LONGEST offset, LONGEST bitpos,
2370 			   LONGEST bitsize, struct value *newval)
2371 {
2372   gdb_byte *addr;
2373   struct gdbarch *arch;
2374   int unit_size;
2375 
2376   switch (var->kind)
2377     {
2378     case INTERNALVAR_VALUE:
2379       addr = value_contents_writeable (var->u.value);
2380       arch = get_value_arch (var->u.value);
2381       unit_size = gdbarch_addressable_memory_unit_size (arch);
2382 
2383       if (bitsize)
2384 	modify_field (value_type (var->u.value), addr + offset,
2385 		      value_as_long (newval), bitpos, bitsize);
2386       else
2387 	memcpy (addr + offset * unit_size, value_contents (newval),
2388 		TYPE_LENGTH (value_type (newval)));
2389       break;
2390 
2391     default:
2392       /* We can never get a component of any other kind.  */
2393       internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2394     }
2395 }
2396 
2397 void
2398 set_internalvar (struct internalvar *var, struct value *val)
2399 {
2400   enum internalvar_kind new_kind;
2401   union internalvar_data new_data = { 0 };
2402 
2403   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2404     error (_("Cannot overwrite convenience function %s"), var->name);
2405 
2406   /* Prepare new contents.  */
2407   switch (TYPE_CODE (check_typedef (value_type (val))))
2408     {
2409     case TYPE_CODE_VOID:
2410       new_kind = INTERNALVAR_VOID;
2411       break;
2412 
2413     case TYPE_CODE_INTERNAL_FUNCTION:
2414       gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2415       new_kind = INTERNALVAR_FUNCTION;
2416       get_internalvar_function (VALUE_INTERNALVAR (val),
2417 				&new_data.fn.function);
2418       /* Copies created here are never canonical.  */
2419       break;
2420 
2421     default:
2422       new_kind = INTERNALVAR_VALUE;
2423       new_data.value = value_copy (val);
2424       new_data.value->modifiable = 1;
2425 
2426       /* Force the value to be fetched from the target now, to avoid problems
2427 	 later when this internalvar is referenced and the target is gone or
2428 	 has changed.  */
2429       if (value_lazy (new_data.value))
2430        value_fetch_lazy (new_data.value);
2431 
2432       /* Release the value from the value chain to prevent it from being
2433 	 deleted by free_all_values.  From here on this function should not
2434 	 call error () until new_data is installed into the var->u to avoid
2435 	 leaking memory.  */
2436       release_value (new_data.value);
2437 
2438       /* Internal variables which are created from values with a dynamic
2439          location don't need the location property of the origin anymore.
2440          The resolved dynamic location is used prior then any other address
2441          when accessing the value.
2442          If we keep it, we would still refer to the origin value.
2443          Remove the location property in case it exist.  */
2444       remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2445 
2446       break;
2447     }
2448 
2449   /* Clean up old contents.  */
2450   clear_internalvar (var);
2451 
2452   /* Switch over.  */
2453   var->kind = new_kind;
2454   var->u = new_data;
2455   /* End code which must not call error().  */
2456 }
2457 
2458 void
2459 set_internalvar_integer (struct internalvar *var, LONGEST l)
2460 {
2461   /* Clean up old contents.  */
2462   clear_internalvar (var);
2463 
2464   var->kind = INTERNALVAR_INTEGER;
2465   var->u.integer.type = NULL;
2466   var->u.integer.val = l;
2467 }
2468 
2469 void
2470 set_internalvar_string (struct internalvar *var, const char *string)
2471 {
2472   /* Clean up old contents.  */
2473   clear_internalvar (var);
2474 
2475   var->kind = INTERNALVAR_STRING;
2476   var->u.string = xstrdup (string);
2477 }
2478 
2479 static void
2480 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2481 {
2482   /* Clean up old contents.  */
2483   clear_internalvar (var);
2484 
2485   var->kind = INTERNALVAR_FUNCTION;
2486   var->u.fn.function = f;
2487   var->u.fn.canonical = 1;
2488   /* Variables installed here are always the canonical version.  */
2489 }
2490 
2491 void
2492 clear_internalvar (struct internalvar *var)
2493 {
2494   /* Clean up old contents.  */
2495   switch (var->kind)
2496     {
2497     case INTERNALVAR_VALUE:
2498       value_free (var->u.value);
2499       break;
2500 
2501     case INTERNALVAR_STRING:
2502       xfree (var->u.string);
2503       break;
2504 
2505     case INTERNALVAR_MAKE_VALUE:
2506       if (var->u.make_value.functions->destroy != NULL)
2507 	var->u.make_value.functions->destroy (var->u.make_value.data);
2508       break;
2509 
2510     default:
2511       break;
2512     }
2513 
2514   /* Reset to void kind.  */
2515   var->kind = INTERNALVAR_VOID;
2516 }
2517 
2518 char *
2519 internalvar_name (const struct internalvar *var)
2520 {
2521   return var->name;
2522 }
2523 
2524 static struct internal_function *
2525 create_internal_function (const char *name,
2526 			  internal_function_fn handler, void *cookie)
2527 {
2528   struct internal_function *ifn = XNEW (struct internal_function);
2529 
2530   ifn->name = xstrdup (name);
2531   ifn->handler = handler;
2532   ifn->cookie = cookie;
2533   return ifn;
2534 }
2535 
2536 char *
2537 value_internal_function_name (struct value *val)
2538 {
2539   struct internal_function *ifn;
2540   int result;
2541 
2542   gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2543   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2544   gdb_assert (result);
2545 
2546   return ifn->name;
2547 }
2548 
2549 struct value *
2550 call_internal_function (struct gdbarch *gdbarch,
2551 			const struct language_defn *language,
2552 			struct value *func, int argc, struct value **argv)
2553 {
2554   struct internal_function *ifn;
2555   int result;
2556 
2557   gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2558   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2559   gdb_assert (result);
2560 
2561   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2562 }
2563 
2564 /* The 'function' command.  This does nothing -- it is just a
2565    placeholder to let "help function NAME" work.  This is also used as
2566    the implementation of the sub-command that is created when
2567    registering an internal function.  */
2568 static void
2569 function_command (char *command, int from_tty)
2570 {
2571   /* Do nothing.  */
2572 }
2573 
2574 /* Clean up if an internal function's command is destroyed.  */
2575 static void
2576 function_destroyer (struct cmd_list_element *self, void *ignore)
2577 {
2578   xfree ((char *) self->name);
2579   xfree ((char *) self->doc);
2580 }
2581 
2582 /* Add a new internal function.  NAME is the name of the function; DOC
2583    is a documentation string describing the function.  HANDLER is
2584    called when the function is invoked.  COOKIE is an arbitrary
2585    pointer which is passed to HANDLER and is intended for "user
2586    data".  */
2587 void
2588 add_internal_function (const char *name, const char *doc,
2589 		       internal_function_fn handler, void *cookie)
2590 {
2591   struct cmd_list_element *cmd;
2592   struct internal_function *ifn;
2593   struct internalvar *var = lookup_internalvar (name);
2594 
2595   ifn = create_internal_function (name, handler, cookie);
2596   set_internalvar_function (var, ifn);
2597 
2598   cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2599 		 &functionlist);
2600   cmd->destroyer = function_destroyer;
2601 }
2602 
2603 /* Update VALUE before discarding OBJFILE.  COPIED_TYPES is used to
2604    prevent cycles / duplicates.  */
2605 
2606 void
2607 preserve_one_value (struct value *value, struct objfile *objfile,
2608 		    htab_t copied_types)
2609 {
2610   if (TYPE_OBJFILE (value->type) == objfile)
2611     value->type = copy_type_recursive (objfile, value->type, copied_types);
2612 
2613   if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2614     value->enclosing_type = copy_type_recursive (objfile,
2615 						 value->enclosing_type,
2616 						 copied_types);
2617 }
2618 
2619 /* Likewise for internal variable VAR.  */
2620 
2621 static void
2622 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2623 			  htab_t copied_types)
2624 {
2625   switch (var->kind)
2626     {
2627     case INTERNALVAR_INTEGER:
2628       if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2629 	var->u.integer.type
2630 	  = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2631       break;
2632 
2633     case INTERNALVAR_VALUE:
2634       preserve_one_value (var->u.value, objfile, copied_types);
2635       break;
2636     }
2637 }
2638 
2639 /* Update the internal variables and value history when OBJFILE is
2640    discarded; we must copy the types out of the objfile.  New global types
2641    will be created for every convenience variable which currently points to
2642    this objfile's types, and the convenience variables will be adjusted to
2643    use the new global types.  */
2644 
2645 void
2646 preserve_values (struct objfile *objfile)
2647 {
2648   htab_t copied_types;
2649   struct value_history_chunk *cur;
2650   struct internalvar *var;
2651   int i;
2652 
2653   /* Create the hash table.  We allocate on the objfile's obstack, since
2654      it is soon to be deleted.  */
2655   copied_types = create_copied_types_hash (objfile);
2656 
2657   for (cur = value_history_chain; cur; cur = cur->next)
2658     for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2659       if (cur->values[i])
2660 	preserve_one_value (cur->values[i], objfile, copied_types);
2661 
2662   for (var = internalvars; var; var = var->next)
2663     preserve_one_internalvar (var, objfile, copied_types);
2664 
2665   preserve_ext_lang_values (objfile, copied_types);
2666 
2667   htab_delete (copied_types);
2668 }
2669 
2670 static void
2671 show_convenience (char *ignore, int from_tty)
2672 {
2673   struct gdbarch *gdbarch = get_current_arch ();
2674   struct internalvar *var;
2675   int varseen = 0;
2676   struct value_print_options opts;
2677 
2678   get_user_print_options (&opts);
2679   for (var = internalvars; var; var = var->next)
2680     {
2681 
2682       if (!varseen)
2683 	{
2684 	  varseen = 1;
2685 	}
2686       printf_filtered (("$%s = "), var->name);
2687 
2688       TRY
2689 	{
2690 	  struct value *val;
2691 
2692 	  val = value_of_internalvar (gdbarch, var);
2693 	  value_print (val, gdb_stdout, &opts);
2694 	}
2695       CATCH (ex, RETURN_MASK_ERROR)
2696 	{
2697 	  fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2698 	}
2699       END_CATCH
2700 
2701       printf_filtered (("\n"));
2702     }
2703   if (!varseen)
2704     {
2705       /* This text does not mention convenience functions on purpose.
2706 	 The user can't create them except via Python, and if Python support
2707 	 is installed this message will never be printed ($_streq will
2708 	 exist).  */
2709       printf_unfiltered (_("No debugger convenience variables now defined.\n"
2710 			   "Convenience variables have "
2711 			   "names starting with \"$\";\n"
2712 			   "use \"set\" as in \"set "
2713 			   "$foo = 5\" to define them.\n"));
2714     }
2715 }
2716 
2717 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER.  */
2718 
2719 struct value *
2720 value_of_xmethod (struct xmethod_worker *worker)
2721 {
2722   if (worker->value == NULL)
2723     {
2724       struct value *v;
2725 
2726       v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2727       v->lval = lval_xcallable;
2728       v->location.xm_worker = worker;
2729       v->modifiable = 0;
2730       worker->value = v;
2731     }
2732 
2733   return worker->value;
2734 }
2735 
2736 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD.  */
2737 
2738 struct type *
2739 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2740 {
2741   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2742 	      && method->lval == lval_xcallable && argc > 0);
2743 
2744   return get_xmethod_result_type (method->location.xm_worker,
2745 				  argv[0], argv + 1, argc - 1);
2746 }
2747 
2748 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD.  */
2749 
2750 struct value *
2751 call_xmethod (struct value *method, int argc, struct value **argv)
2752 {
2753   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2754 	      && method->lval == lval_xcallable && argc > 0);
2755 
2756   return invoke_xmethod (method->location.xm_worker,
2757 			 argv[0], argv + 1, argc - 1);
2758 }
2759 
2760 /* Extract a value as a C number (either long or double).
2761    Knows how to convert fixed values to double, or
2762    floating values to long.
2763    Does not deallocate the value.  */
2764 
2765 LONGEST
2766 value_as_long (struct value *val)
2767 {
2768   /* This coerces arrays and functions, which is necessary (e.g.
2769      in disassemble_command).  It also dereferences references, which
2770      I suspect is the most logical thing to do.  */
2771   val = coerce_array (val);
2772   return unpack_long (value_type (val), value_contents (val));
2773 }
2774 
2775 DOUBLEST
2776 value_as_double (struct value *val)
2777 {
2778   DOUBLEST foo;
2779   int inv;
2780 
2781   foo = unpack_double (value_type (val), value_contents (val), &inv);
2782   if (inv)
2783     error (_("Invalid floating value found in program."));
2784   return foo;
2785 }
2786 
2787 /* Extract a value as a C pointer.  Does not deallocate the value.
2788    Note that val's type may not actually be a pointer; value_as_long
2789    handles all the cases.  */
2790 CORE_ADDR
2791 value_as_address (struct value *val)
2792 {
2793   struct gdbarch *gdbarch = get_type_arch (value_type (val));
2794 
2795   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2796      whether we want this to be true eventually.  */
2797 #if 0
2798   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2799      non-address (e.g. argument to "signal", "info break", etc.), or
2800      for pointers to char, in which the low bits *are* significant.  */
2801   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2802 #else
2803 
2804   /* There are several targets (IA-64, PowerPC, and others) which
2805      don't represent pointers to functions as simply the address of
2806      the function's entry point.  For example, on the IA-64, a
2807      function pointer points to a two-word descriptor, generated by
2808      the linker, which contains the function's entry point, and the
2809      value the IA-64 "global pointer" register should have --- to
2810      support position-independent code.  The linker generates
2811      descriptors only for those functions whose addresses are taken.
2812 
2813      On such targets, it's difficult for GDB to convert an arbitrary
2814      function address into a function pointer; it has to either find
2815      an existing descriptor for that function, or call malloc and
2816      build its own.  On some targets, it is impossible for GDB to
2817      build a descriptor at all: the descriptor must contain a jump
2818      instruction; data memory cannot be executed; and code memory
2819      cannot be modified.
2820 
2821      Upon entry to this function, if VAL is a value of type `function'
2822      (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2823      value_address (val) is the address of the function.  This is what
2824      you'll get if you evaluate an expression like `main'.  The call
2825      to COERCE_ARRAY below actually does all the usual unary
2826      conversions, which includes converting values of type `function'
2827      to `pointer to function'.  This is the challenging conversion
2828      discussed above.  Then, `unpack_long' will convert that pointer
2829      back into an address.
2830 
2831      So, suppose the user types `disassemble foo' on an architecture
2832      with a strange function pointer representation, on which GDB
2833      cannot build its own descriptors, and suppose further that `foo'
2834      has no linker-built descriptor.  The address->pointer conversion
2835      will signal an error and prevent the command from running, even
2836      though the next step would have been to convert the pointer
2837      directly back into the same address.
2838 
2839      The following shortcut avoids this whole mess.  If VAL is a
2840      function, just return its address directly.  */
2841   if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2842       || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2843     return value_address (val);
2844 
2845   val = coerce_array (val);
2846 
2847   /* Some architectures (e.g. Harvard), map instruction and data
2848      addresses onto a single large unified address space.  For
2849      instance: An architecture may consider a large integer in the
2850      range 0x10000000 .. 0x1000ffff to already represent a data
2851      addresses (hence not need a pointer to address conversion) while
2852      a small integer would still need to be converted integer to
2853      pointer to address.  Just assume such architectures handle all
2854      integer conversions in a single function.  */
2855 
2856   /* JimB writes:
2857 
2858      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2859      must admonish GDB hackers to make sure its behavior matches the
2860      compiler's, whenever possible.
2861 
2862      In general, I think GDB should evaluate expressions the same way
2863      the compiler does.  When the user copies an expression out of
2864      their source code and hands it to a `print' command, they should
2865      get the same value the compiler would have computed.  Any
2866      deviation from this rule can cause major confusion and annoyance,
2867      and needs to be justified carefully.  In other words, GDB doesn't
2868      really have the freedom to do these conversions in clever and
2869      useful ways.
2870 
2871      AndrewC pointed out that users aren't complaining about how GDB
2872      casts integers to pointers; they are complaining that they can't
2873      take an address from a disassembly listing and give it to `x/i'.
2874      This is certainly important.
2875 
2876      Adding an architecture method like integer_to_address() certainly
2877      makes it possible for GDB to "get it right" in all circumstances
2878      --- the target has complete control over how things get done, so
2879      people can Do The Right Thing for their target without breaking
2880      anyone else.  The standard doesn't specify how integers get
2881      converted to pointers; usually, the ABI doesn't either, but
2882      ABI-specific code is a more reasonable place to handle it.  */
2883 
2884   if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2885       && !TYPE_IS_REFERENCE (value_type (val))
2886       && gdbarch_integer_to_address_p (gdbarch))
2887     return gdbarch_integer_to_address (gdbarch, value_type (val),
2888 				       value_contents (val));
2889 
2890   return unpack_long (value_type (val), value_contents (val));
2891 #endif
2892 }
2893 
2894 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2895    as a long, or as a double, assuming the raw data is described
2896    by type TYPE.  Knows how to convert different sizes of values
2897    and can convert between fixed and floating point.  We don't assume
2898    any alignment for the raw data.  Return value is in host byte order.
2899 
2900    If you want functions and arrays to be coerced to pointers, and
2901    references to be dereferenced, call value_as_long() instead.
2902 
2903    C++: It is assumed that the front-end has taken care of
2904    all matters concerning pointers to members.  A pointer
2905    to member which reaches here is considered to be equivalent
2906    to an INT (or some size).  After all, it is only an offset.  */
2907 
2908 LONGEST
2909 unpack_long (struct type *type, const gdb_byte *valaddr)
2910 {
2911   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2912   enum type_code code = TYPE_CODE (type);
2913   int len = TYPE_LENGTH (type);
2914   int nosign = TYPE_UNSIGNED (type);
2915 
2916   switch (code)
2917     {
2918     case TYPE_CODE_TYPEDEF:
2919       return unpack_long (check_typedef (type), valaddr);
2920     case TYPE_CODE_ENUM:
2921     case TYPE_CODE_FLAGS:
2922     case TYPE_CODE_BOOL:
2923     case TYPE_CODE_INT:
2924     case TYPE_CODE_CHAR:
2925     case TYPE_CODE_RANGE:
2926     case TYPE_CODE_MEMBERPTR:
2927       if (nosign)
2928 	return extract_unsigned_integer (valaddr, len, byte_order);
2929       else
2930 	return extract_signed_integer (valaddr, len, byte_order);
2931 
2932     case TYPE_CODE_FLT:
2933       return (LONGEST) extract_typed_floating (valaddr, type);
2934 
2935     case TYPE_CODE_DECFLOAT:
2936       /* libdecnumber has a function to convert from decimal to integer, but
2937 	 it doesn't work when the decimal number has a fractional part.  */
2938       return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2939 
2940     case TYPE_CODE_PTR:
2941     case TYPE_CODE_REF:
2942     case TYPE_CODE_RVALUE_REF:
2943       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2944          whether we want this to be true eventually.  */
2945       return extract_typed_address (valaddr, type);
2946 
2947     default:
2948       error (_("Value can't be converted to integer."));
2949     }
2950   return 0;			/* Placate lint.  */
2951 }
2952 
2953 /* Return a double value from the specified type and address.
2954    INVP points to an int which is set to 0 for valid value,
2955    1 for invalid value (bad float format).  In either case,
2956    the returned double is OK to use.  Argument is in target
2957    format, result is in host format.  */
2958 
2959 DOUBLEST
2960 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2961 {
2962   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2963   enum type_code code;
2964   int len;
2965   int nosign;
2966 
2967   *invp = 0;			/* Assume valid.  */
2968   type = check_typedef (type);
2969   code = TYPE_CODE (type);
2970   len = TYPE_LENGTH (type);
2971   nosign = TYPE_UNSIGNED (type);
2972   if (code == TYPE_CODE_FLT)
2973     {
2974       /* NOTE: cagney/2002-02-19: There was a test here to see if the
2975 	 floating-point value was valid (using the macro
2976 	 INVALID_FLOAT).  That test/macro have been removed.
2977 
2978 	 It turns out that only the VAX defined this macro and then
2979 	 only in a non-portable way.  Fixing the portability problem
2980 	 wouldn't help since the VAX floating-point code is also badly
2981 	 bit-rotten.  The target needs to add definitions for the
2982 	 methods gdbarch_float_format and gdbarch_double_format - these
2983 	 exactly describe the target floating-point format.  The
2984 	 problem here is that the corresponding floatformat_vax_f and
2985 	 floatformat_vax_d values these methods should be set to are
2986 	 also not defined either.  Oops!
2987 
2988          Hopefully someone will add both the missing floatformat
2989          definitions and the new cases for floatformat_is_valid ().  */
2990 
2991       if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2992 	{
2993 	  *invp = 1;
2994 	  return 0.0;
2995 	}
2996 
2997       return extract_typed_floating (valaddr, type);
2998     }
2999   else if (code == TYPE_CODE_DECFLOAT)
3000     return decimal_to_doublest (valaddr, len, byte_order);
3001   else if (nosign)
3002     {
3003       /* Unsigned -- be sure we compensate for signed LONGEST.  */
3004       return (ULONGEST) unpack_long (type, valaddr);
3005     }
3006   else
3007     {
3008       /* Signed -- we are OK with unpack_long.  */
3009       return unpack_long (type, valaddr);
3010     }
3011 }
3012 
3013 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3014    as a CORE_ADDR, assuming the raw data is described by type TYPE.
3015    We don't assume any alignment for the raw data.  Return value is in
3016    host byte order.
3017 
3018    If you want functions and arrays to be coerced to pointers, and
3019    references to be dereferenced, call value_as_address() instead.
3020 
3021    C++: It is assumed that the front-end has taken care of
3022    all matters concerning pointers to members.  A pointer
3023    to member which reaches here is considered to be equivalent
3024    to an INT (or some size).  After all, it is only an offset.  */
3025 
3026 CORE_ADDR
3027 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3028 {
3029   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
3030      whether we want this to be true eventually.  */
3031   return unpack_long (type, valaddr);
3032 }
3033 
3034 
3035 /* Get the value of the FIELDNO'th field (which must be static) of
3036    TYPE.  */
3037 
3038 struct value *
3039 value_static_field (struct type *type, int fieldno)
3040 {
3041   struct value *retval;
3042 
3043   switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3044     {
3045     case FIELD_LOC_KIND_PHYSADDR:
3046       retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3047 			      TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3048       break;
3049     case FIELD_LOC_KIND_PHYSNAME:
3050     {
3051       const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3052       /* TYPE_FIELD_NAME (type, fieldno); */
3053       struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3054 
3055       if (sym.symbol == NULL)
3056 	{
3057 	  /* With some compilers, e.g. HP aCC, static data members are
3058 	     reported as non-debuggable symbols.  */
3059 	  struct bound_minimal_symbol msym
3060 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
3061 
3062 	  if (!msym.minsym)
3063 	    return allocate_optimized_out_value (type);
3064 	  else
3065 	    {
3066 	      retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3067 				      BMSYMBOL_VALUE_ADDRESS (msym));
3068 	    }
3069 	}
3070       else
3071 	retval = value_of_variable (sym.symbol, sym.block);
3072       break;
3073     }
3074     default:
3075       gdb_assert_not_reached ("unexpected field location kind");
3076     }
3077 
3078   return retval;
3079 }
3080 
3081 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3082    You have to be careful here, since the size of the data area for the value
3083    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
3084    than the old enclosing type, you have to allocate more space for the
3085    data.  */
3086 
3087 void
3088 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3089 {
3090   if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3091     {
3092       check_type_length_before_alloc (new_encl_type);
3093       val->contents
3094 	= (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3095     }
3096 
3097   val->enclosing_type = new_encl_type;
3098 }
3099 
3100 /* Given a value ARG1 (offset by OFFSET bytes)
3101    of a struct or union type ARG_TYPE,
3102    extract and return the value of one of its (non-static) fields.
3103    FIELDNO says which field.  */
3104 
3105 struct value *
3106 value_primitive_field (struct value *arg1, LONGEST offset,
3107 		       int fieldno, struct type *arg_type)
3108 {
3109   struct value *v;
3110   struct type *type;
3111   struct gdbarch *arch = get_value_arch (arg1);
3112   int unit_size = gdbarch_addressable_memory_unit_size (arch);
3113 
3114   arg_type = check_typedef (arg_type);
3115   type = TYPE_FIELD_TYPE (arg_type, fieldno);
3116 
3117   /* Call check_typedef on our type to make sure that, if TYPE
3118      is a TYPE_CODE_TYPEDEF, its length is set to the length
3119      of the target type instead of zero.  However, we do not
3120      replace the typedef type by the target type, because we want
3121      to keep the typedef in order to be able to print the type
3122      description correctly.  */
3123   check_typedef (type);
3124 
3125   if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3126     {
3127       /* Handle packed fields.
3128 
3129 	 Create a new value for the bitfield, with bitpos and bitsize
3130 	 set.  If possible, arrange offset and bitpos so that we can
3131 	 do a single aligned read of the size of the containing type.
3132 	 Otherwise, adjust offset to the byte containing the first
3133 	 bit.  Assume that the address, offset, and embedded offset
3134 	 are sufficiently aligned.  */
3135 
3136       LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3137       LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3138 
3139       v = allocate_value_lazy (type);
3140       v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3141       if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3142 	  && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3143 	v->bitpos = bitpos % container_bitsize;
3144       else
3145 	v->bitpos = bitpos % 8;
3146       v->offset = (value_embedded_offset (arg1)
3147 		   + offset
3148 		   + (bitpos - v->bitpos) / 8);
3149       set_value_parent (v, arg1);
3150       if (!value_lazy (arg1))
3151 	value_fetch_lazy (v);
3152     }
3153   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3154     {
3155       /* This field is actually a base subobject, so preserve the
3156 	 entire object's contents for later references to virtual
3157 	 bases, etc.  */
3158       LONGEST boffset;
3159 
3160       /* Lazy register values with offsets are not supported.  */
3161       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3162 	value_fetch_lazy (arg1);
3163 
3164       /* We special case virtual inheritance here because this
3165 	 requires access to the contents, which we would rather avoid
3166 	 for references to ordinary fields of unavailable values.  */
3167       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3168 	boffset = baseclass_offset (arg_type, fieldno,
3169 				    value_contents (arg1),
3170 				    value_embedded_offset (arg1),
3171 				    value_address (arg1),
3172 				    arg1);
3173       else
3174 	boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3175 
3176       if (value_lazy (arg1))
3177 	v = allocate_value_lazy (value_enclosing_type (arg1));
3178       else
3179 	{
3180 	  v = allocate_value (value_enclosing_type (arg1));
3181 	  value_contents_copy_raw (v, 0, arg1, 0,
3182 				   TYPE_LENGTH (value_enclosing_type (arg1)));
3183 	}
3184       v->type = type;
3185       v->offset = value_offset (arg1);
3186       v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3187     }
3188   else if (NULL != TYPE_DATA_LOCATION (type))
3189     {
3190       /* Field is a dynamic data member.  */
3191 
3192       gdb_assert (0 == offset);
3193       /* We expect an already resolved data location.  */
3194       gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3195       /* For dynamic data types defer memory allocation
3196          until we actual access the value.  */
3197       v = allocate_value_lazy (type);
3198     }
3199   else
3200     {
3201       /* Plain old data member */
3202       offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3203 	         / (HOST_CHAR_BIT * unit_size));
3204 
3205       /* Lazy register values with offsets are not supported.  */
3206       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3207 	value_fetch_lazy (arg1);
3208 
3209       if (value_lazy (arg1))
3210 	v = allocate_value_lazy (type);
3211       else
3212 	{
3213 	  v = allocate_value (type);
3214 	  value_contents_copy_raw (v, value_embedded_offset (v),
3215 				   arg1, value_embedded_offset (arg1) + offset,
3216 				   type_length_units (type));
3217 	}
3218       v->offset = (value_offset (arg1) + offset
3219 		   + value_embedded_offset (arg1));
3220     }
3221   set_value_component_location (v, arg1);
3222   return v;
3223 }
3224 
3225 /* Given a value ARG1 of a struct or union type,
3226    extract and return the value of one of its (non-static) fields.
3227    FIELDNO says which field.  */
3228 
3229 struct value *
3230 value_field (struct value *arg1, int fieldno)
3231 {
3232   return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3233 }
3234 
3235 /* Return a non-virtual function as a value.
3236    F is the list of member functions which contains the desired method.
3237    J is an index into F which provides the desired method.
3238 
3239    We only use the symbol for its address, so be happy with either a
3240    full symbol or a minimal symbol.  */
3241 
3242 struct value *
3243 value_fn_field (struct value **arg1p, struct fn_field *f,
3244 		int j, struct type *type,
3245 		LONGEST offset)
3246 {
3247   struct value *v;
3248   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3249   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3250   struct symbol *sym;
3251   struct bound_minimal_symbol msym;
3252 
3253   sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3254   if (sym != NULL)
3255     {
3256       memset (&msym, 0, sizeof (msym));
3257     }
3258   else
3259     {
3260       gdb_assert (sym == NULL);
3261       msym = lookup_bound_minimal_symbol (physname);
3262       if (msym.minsym == NULL)
3263 	return NULL;
3264     }
3265 
3266   v = allocate_value (ftype);
3267   VALUE_LVAL (v) = lval_memory;
3268   if (sym)
3269     {
3270       set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3271     }
3272   else
3273     {
3274       /* The minimal symbol might point to a function descriptor;
3275 	 resolve it to the actual code address instead.  */
3276       struct objfile *objfile = msym.objfile;
3277       struct gdbarch *gdbarch = get_objfile_arch (objfile);
3278 
3279       set_value_address (v,
3280 	gdbarch_convert_from_func_ptr_addr
3281 	   (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3282     }
3283 
3284   if (arg1p)
3285     {
3286       if (type != value_type (*arg1p))
3287 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3288 					value_addr (*arg1p)));
3289 
3290       /* Move the `this' pointer according to the offset.
3291          VALUE_OFFSET (*arg1p) += offset; */
3292     }
3293 
3294   return v;
3295 }
3296 
3297 
3298 
3299 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3300    VALADDR, and store the result in *RESULT.
3301    The bitfield starts at BITPOS bits and contains BITSIZE bits.
3302 
3303    Extracting bits depends on endianness of the machine.  Compute the
3304    number of least significant bits to discard.  For big endian machines,
3305    we compute the total number of bits in the anonymous object, subtract
3306    off the bit count from the MSB of the object to the MSB of the
3307    bitfield, then the size of the bitfield, which leaves the LSB discard
3308    count.  For little endian machines, the discard count is simply the
3309    number of bits from the LSB of the anonymous object to the LSB of the
3310    bitfield.
3311 
3312    If the field is signed, we also do sign extension.  */
3313 
3314 static LONGEST
3315 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3316 		     LONGEST bitpos, LONGEST bitsize)
3317 {
3318   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3319   ULONGEST val;
3320   ULONGEST valmask;
3321   int lsbcount;
3322   LONGEST bytes_read;
3323   LONGEST read_offset;
3324 
3325   /* Read the minimum number of bytes required; there may not be
3326      enough bytes to read an entire ULONGEST.  */
3327   field_type = check_typedef (field_type);
3328   if (bitsize)
3329     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3330   else
3331     bytes_read = TYPE_LENGTH (field_type);
3332 
3333   read_offset = bitpos / 8;
3334 
3335   val = extract_unsigned_integer (valaddr + read_offset,
3336 				  bytes_read, byte_order);
3337 
3338   /* Extract bits.  See comment above.  */
3339 
3340   if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3341     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3342   else
3343     lsbcount = (bitpos % 8);
3344   val >>= lsbcount;
3345 
3346   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3347      If the field is signed, and is negative, then sign extend.  */
3348 
3349   if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3350     {
3351       valmask = (((ULONGEST) 1) << bitsize) - 1;
3352       val &= valmask;
3353       if (!TYPE_UNSIGNED (field_type))
3354 	{
3355 	  if (val & (valmask ^ (valmask >> 1)))
3356 	    {
3357 	      val |= ~valmask;
3358 	    }
3359 	}
3360     }
3361 
3362   return val;
3363 }
3364 
3365 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3366    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3367    ORIGINAL_VALUE, which must not be NULL.  See
3368    unpack_value_bits_as_long for more details.  */
3369 
3370 int
3371 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3372 			    LONGEST embedded_offset, int fieldno,
3373 			    const struct value *val, LONGEST *result)
3374 {
3375   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3376   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3377   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3378   int bit_offset;
3379 
3380   gdb_assert (val != NULL);
3381 
3382   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3383   if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3384       || !value_bits_available (val, bit_offset, bitsize))
3385     return 0;
3386 
3387   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3388 				 bitpos, bitsize);
3389   return 1;
3390 }
3391 
3392 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3393    object at VALADDR.  See unpack_bits_as_long for more details.  */
3394 
3395 LONGEST
3396 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3397 {
3398   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3399   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3400   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3401 
3402   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3403 }
3404 
3405 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3406    VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3407    the contents in DEST_VAL, zero or sign extending if the type of
3408    DEST_VAL is wider than BITSIZE.  VALADDR points to the contents of
3409    VAL.  If the VAL's contents required to extract the bitfield from
3410    are unavailable/optimized out, DEST_VAL is correspondingly
3411    marked unavailable/optimized out.  */
3412 
3413 void
3414 unpack_value_bitfield (struct value *dest_val,
3415 		       LONGEST bitpos, LONGEST bitsize,
3416 		       const gdb_byte *valaddr, LONGEST embedded_offset,
3417 		       const struct value *val)
3418 {
3419   enum bfd_endian byte_order;
3420   int src_bit_offset;
3421   int dst_bit_offset;
3422   struct type *field_type = value_type (dest_val);
3423 
3424   byte_order = gdbarch_byte_order (get_type_arch (field_type));
3425 
3426   /* First, unpack and sign extend the bitfield as if it was wholly
3427      valid.  Optimized out/unavailable bits are read as zero, but
3428      that's OK, as they'll end up marked below.  If the VAL is
3429      wholly-invalid we may have skipped allocating its contents,
3430      though.  See allocate_optimized_out_value.  */
3431   if (valaddr != NULL)
3432     {
3433       LONGEST num;
3434 
3435       num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3436 				 bitpos, bitsize);
3437       store_signed_integer (value_contents_raw (dest_val),
3438 			    TYPE_LENGTH (field_type), byte_order, num);
3439     }
3440 
3441   /* Now copy the optimized out / unavailability ranges to the right
3442      bits.  */
3443   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3444   if (byte_order == BFD_ENDIAN_BIG)
3445     dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3446   else
3447     dst_bit_offset = 0;
3448   value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3449 			      val, src_bit_offset, bitsize);
3450 }
3451 
3452 /* Return a new value with type TYPE, which is FIELDNO field of the
3453    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3454    of VAL.  If the VAL's contents required to extract the bitfield
3455    from are unavailable/optimized out, the new value is
3456    correspondingly marked unavailable/optimized out.  */
3457 
3458 struct value *
3459 value_field_bitfield (struct type *type, int fieldno,
3460 		      const gdb_byte *valaddr,
3461 		      LONGEST embedded_offset, const struct value *val)
3462 {
3463   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3464   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3465   struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3466 
3467   unpack_value_bitfield (res_val, bitpos, bitsize,
3468 			 valaddr, embedded_offset, val);
3469 
3470   return res_val;
3471 }
3472 
3473 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3474    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3475    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3476    indicate which bits (in target bit order) comprise the bitfield.
3477    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3478    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3479 
3480 void
3481 modify_field (struct type *type, gdb_byte *addr,
3482 	      LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3483 {
3484   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3485   ULONGEST oword;
3486   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3487   LONGEST bytesize;
3488 
3489   /* Normalize BITPOS.  */
3490   addr += bitpos / 8;
3491   bitpos %= 8;
3492 
3493   /* If a negative fieldval fits in the field in question, chop
3494      off the sign extension bits.  */
3495   if ((~fieldval & ~(mask >> 1)) == 0)
3496     fieldval &= mask;
3497 
3498   /* Warn if value is too big to fit in the field in question.  */
3499   if (0 != (fieldval & ~mask))
3500     {
3501       /* FIXME: would like to include fieldval in the message, but
3502          we don't have a sprintf_longest.  */
3503       warning (_("Value does not fit in %s bits."), plongest (bitsize));
3504 
3505       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3506       fieldval &= mask;
3507     }
3508 
3509   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3510      false valgrind reports.  */
3511 
3512   bytesize = (bitpos + bitsize + 7) / 8;
3513   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3514 
3515   /* Shifting for bit field depends on endianness of the target machine.  */
3516   if (gdbarch_bits_big_endian (get_type_arch (type)))
3517     bitpos = bytesize * 8 - bitpos - bitsize;
3518 
3519   oword &= ~(mask << bitpos);
3520   oword |= fieldval << bitpos;
3521 
3522   store_unsigned_integer (addr, bytesize, byte_order, oword);
3523 }
3524 
3525 /* Pack NUM into BUF using a target format of TYPE.  */
3526 
3527 void
3528 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3529 {
3530   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3531   LONGEST len;
3532 
3533   type = check_typedef (type);
3534   len = TYPE_LENGTH (type);
3535 
3536   switch (TYPE_CODE (type))
3537     {
3538     case TYPE_CODE_INT:
3539     case TYPE_CODE_CHAR:
3540     case TYPE_CODE_ENUM:
3541     case TYPE_CODE_FLAGS:
3542     case TYPE_CODE_BOOL:
3543     case TYPE_CODE_RANGE:
3544     case TYPE_CODE_MEMBERPTR:
3545       store_signed_integer (buf, len, byte_order, num);
3546       break;
3547 
3548     case TYPE_CODE_REF:
3549     case TYPE_CODE_RVALUE_REF:
3550     case TYPE_CODE_PTR:
3551       store_typed_address (buf, type, (CORE_ADDR) num);
3552       break;
3553 
3554     default:
3555       error (_("Unexpected type (%d) encountered for integer constant."),
3556 	     TYPE_CODE (type));
3557     }
3558 }
3559 
3560 
3561 /* Pack NUM into BUF using a target format of TYPE.  */
3562 
3563 static void
3564 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3565 {
3566   LONGEST len;
3567   enum bfd_endian byte_order;
3568 
3569   type = check_typedef (type);
3570   len = TYPE_LENGTH (type);
3571   byte_order = gdbarch_byte_order (get_type_arch (type));
3572 
3573   switch (TYPE_CODE (type))
3574     {
3575     case TYPE_CODE_INT:
3576     case TYPE_CODE_CHAR:
3577     case TYPE_CODE_ENUM:
3578     case TYPE_CODE_FLAGS:
3579     case TYPE_CODE_BOOL:
3580     case TYPE_CODE_RANGE:
3581     case TYPE_CODE_MEMBERPTR:
3582       store_unsigned_integer (buf, len, byte_order, num);
3583       break;
3584 
3585     case TYPE_CODE_REF:
3586     case TYPE_CODE_RVALUE_REF:
3587     case TYPE_CODE_PTR:
3588       store_typed_address (buf, type, (CORE_ADDR) num);
3589       break;
3590 
3591     default:
3592       error (_("Unexpected type (%d) encountered "
3593 	       "for unsigned integer constant."),
3594 	     TYPE_CODE (type));
3595     }
3596 }
3597 
3598 
3599 /* Convert C numbers into newly allocated values.  */
3600 
3601 struct value *
3602 value_from_longest (struct type *type, LONGEST num)
3603 {
3604   struct value *val = allocate_value (type);
3605 
3606   pack_long (value_contents_raw (val), type, num);
3607   return val;
3608 }
3609 
3610 
3611 /* Convert C unsigned numbers into newly allocated values.  */
3612 
3613 struct value *
3614 value_from_ulongest (struct type *type, ULONGEST num)
3615 {
3616   struct value *val = allocate_value (type);
3617 
3618   pack_unsigned_long (value_contents_raw (val), type, num);
3619 
3620   return val;
3621 }
3622 
3623 
3624 /* Create a value representing a pointer of type TYPE to the address
3625    ADDR.  */
3626 
3627 struct value *
3628 value_from_pointer (struct type *type, CORE_ADDR addr)
3629 {
3630   struct value *val = allocate_value (type);
3631 
3632   store_typed_address (value_contents_raw (val),
3633 		       check_typedef (type), addr);
3634   return val;
3635 }
3636 
3637 
3638 /* Create a value of type TYPE whose contents come from VALADDR, if it
3639    is non-null, and whose memory address (in the inferior) is
3640    ADDRESS.  The type of the created value may differ from the passed
3641    type TYPE.  Make sure to retrieve values new type after this call.
3642    Note that TYPE is not passed through resolve_dynamic_type; this is
3643    a special API intended for use only by Ada.  */
3644 
3645 struct value *
3646 value_from_contents_and_address_unresolved (struct type *type,
3647 					    const gdb_byte *valaddr,
3648 					    CORE_ADDR address)
3649 {
3650   struct value *v;
3651 
3652   if (valaddr == NULL)
3653     v = allocate_value_lazy (type);
3654   else
3655     v = value_from_contents (type, valaddr);
3656   VALUE_LVAL (v) = lval_memory;
3657   set_value_address (v, address);
3658   return v;
3659 }
3660 
3661 /* Create a value of type TYPE whose contents come from VALADDR, if it
3662    is non-null, and whose memory address (in the inferior) is
3663    ADDRESS.  The type of the created value may differ from the passed
3664    type TYPE.  Make sure to retrieve values new type after this call.  */
3665 
3666 struct value *
3667 value_from_contents_and_address (struct type *type,
3668 				 const gdb_byte *valaddr,
3669 				 CORE_ADDR address)
3670 {
3671   struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3672   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3673   struct value *v;
3674 
3675   if (valaddr == NULL)
3676     v = allocate_value_lazy (resolved_type);
3677   else
3678     v = value_from_contents (resolved_type, valaddr);
3679   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3680       && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3681     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3682   VALUE_LVAL (v) = lval_memory;
3683   set_value_address (v, address);
3684   return v;
3685 }
3686 
3687 /* Create a value of type TYPE holding the contents CONTENTS.
3688    The new value is `not_lval'.  */
3689 
3690 struct value *
3691 value_from_contents (struct type *type, const gdb_byte *contents)
3692 {
3693   struct value *result;
3694 
3695   result = allocate_value (type);
3696   memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3697   return result;
3698 }
3699 
3700 struct value *
3701 value_from_double (struct type *type, DOUBLEST num)
3702 {
3703   struct value *val = allocate_value (type);
3704   struct type *base_type = check_typedef (type);
3705   enum type_code code = TYPE_CODE (base_type);
3706 
3707   if (code == TYPE_CODE_FLT)
3708     {
3709       store_typed_floating (value_contents_raw (val), base_type, num);
3710     }
3711   else
3712     error (_("Unexpected type encountered for floating constant."));
3713 
3714   return val;
3715 }
3716 
3717 struct value *
3718 value_from_decfloat (struct type *type, const gdb_byte *dec)
3719 {
3720   struct value *val = allocate_value (type);
3721 
3722   memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3723   return val;
3724 }
3725 
3726 /* Extract a value from the history file.  Input will be of the form
3727    $digits or $$digits.  See block comment above 'write_dollar_variable'
3728    for details.  */
3729 
3730 struct value *
3731 value_from_history_ref (const char *h, const char **endp)
3732 {
3733   int index, len;
3734 
3735   if (h[0] == '$')
3736     len = 1;
3737   else
3738     return NULL;
3739 
3740   if (h[1] == '$')
3741     len = 2;
3742 
3743   /* Find length of numeral string.  */
3744   for (; isdigit (h[len]); len++)
3745     ;
3746 
3747   /* Make sure numeral string is not part of an identifier.  */
3748   if (h[len] == '_' || isalpha (h[len]))
3749     return NULL;
3750 
3751   /* Now collect the index value.  */
3752   if (h[1] == '$')
3753     {
3754       if (len == 2)
3755 	{
3756 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3757 	     rather than to "$$0" as it ought to be!  */
3758 	  index = -1;
3759 	  *endp += len;
3760 	}
3761       else
3762 	{
3763 	  char *local_end;
3764 
3765 	  index = -strtol (&h[2], &local_end, 10);
3766 	  *endp = local_end;
3767 	}
3768     }
3769   else
3770     {
3771       if (len == 1)
3772 	{
3773 	  /* "$" is equivalent to "$0".  */
3774 	  index = 0;
3775 	  *endp += len;
3776 	}
3777       else
3778 	{
3779 	  char *local_end;
3780 
3781 	  index = strtol (&h[1], &local_end, 10);
3782 	  *endp = local_end;
3783 	}
3784     }
3785 
3786   return access_value_history (index);
3787 }
3788 
3789 /* Get the component value (offset by OFFSET bytes) of a struct or
3790    union WHOLE.  Component's type is TYPE.  */
3791 
3792 struct value *
3793 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3794 {
3795   struct value *v;
3796 
3797   if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3798     v = allocate_value_lazy (type);
3799   else
3800     {
3801       v = allocate_value (type);
3802       value_contents_copy (v, value_embedded_offset (v),
3803 			   whole, value_embedded_offset (whole) + offset,
3804 			   type_length_units (type));
3805     }
3806   v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3807   set_value_component_location (v, whole);
3808 
3809   return v;
3810 }
3811 
3812 struct value *
3813 coerce_ref_if_computed (const struct value *arg)
3814 {
3815   const struct lval_funcs *funcs;
3816 
3817   if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3818     return NULL;
3819 
3820   if (value_lval_const (arg) != lval_computed)
3821     return NULL;
3822 
3823   funcs = value_computed_funcs (arg);
3824   if (funcs->coerce_ref == NULL)
3825     return NULL;
3826 
3827   return funcs->coerce_ref (arg);
3828 }
3829 
3830 /* Look at value.h for description.  */
3831 
3832 struct value *
3833 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3834 			      const struct type *original_type,
3835 			      const struct value *original_value)
3836 {
3837   /* Re-adjust type.  */
3838   deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3839 
3840   /* Add embedding info.  */
3841   set_value_enclosing_type (value, enc_type);
3842   set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3843 
3844   /* We may be pointing to an object of some derived type.  */
3845   return value_full_object (value, NULL, 0, 0, 0);
3846 }
3847 
3848 struct value *
3849 coerce_ref (struct value *arg)
3850 {
3851   struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3852   struct value *retval;
3853   struct type *enc_type;
3854 
3855   retval = coerce_ref_if_computed (arg);
3856   if (retval)
3857     return retval;
3858 
3859   if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3860     return arg;
3861 
3862   enc_type = check_typedef (value_enclosing_type (arg));
3863   enc_type = TYPE_TARGET_TYPE (enc_type);
3864 
3865   retval = value_at_lazy (enc_type,
3866                           unpack_pointer (value_type (arg),
3867                                           value_contents (arg)));
3868   enc_type = value_type (retval);
3869   return readjust_indirect_value_type (retval, enc_type,
3870                                        value_type_arg_tmp, arg);
3871 }
3872 
3873 struct value *
3874 coerce_array (struct value *arg)
3875 {
3876   struct type *type;
3877 
3878   arg = coerce_ref (arg);
3879   type = check_typedef (value_type (arg));
3880 
3881   switch (TYPE_CODE (type))
3882     {
3883     case TYPE_CODE_ARRAY:
3884       if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3885 	arg = value_coerce_array (arg);
3886       break;
3887     case TYPE_CODE_FUNC:
3888       arg = value_coerce_function (arg);
3889       break;
3890     }
3891   return arg;
3892 }
3893 
3894 
3895 /* Return the return value convention that will be used for the
3896    specified type.  */
3897 
3898 enum return_value_convention
3899 struct_return_convention (struct gdbarch *gdbarch,
3900 			  struct value *function, struct type *value_type)
3901 {
3902   enum type_code code = TYPE_CODE (value_type);
3903 
3904   if (code == TYPE_CODE_ERROR)
3905     error (_("Function return type unknown."));
3906 
3907   /* Probe the architecture for the return-value convention.  */
3908   return gdbarch_return_value (gdbarch, function, value_type,
3909 			       NULL, NULL, NULL);
3910 }
3911 
3912 /* Return true if the function returning the specified type is using
3913    the convention of returning structures in memory (passing in the
3914    address as a hidden first parameter).  */
3915 
3916 int
3917 using_struct_return (struct gdbarch *gdbarch,
3918 		     struct value *function, struct type *value_type)
3919 {
3920   if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3921     /* A void return value is never in memory.  See also corresponding
3922        code in "print_return_value".  */
3923     return 0;
3924 
3925   return (struct_return_convention (gdbarch, function, value_type)
3926 	  != RETURN_VALUE_REGISTER_CONVENTION);
3927 }
3928 
3929 /* Set the initialized field in a value struct.  */
3930 
3931 void
3932 set_value_initialized (struct value *val, int status)
3933 {
3934   val->initialized = status;
3935 }
3936 
3937 /* Return the initialized field in a value struct.  */
3938 
3939 int
3940 value_initialized (const struct value *val)
3941 {
3942   return val->initialized;
3943 }
3944 
3945 /* Load the actual content of a lazy value.  Fetch the data from the
3946    user's process and clear the lazy flag to indicate that the data in
3947    the buffer is valid.
3948 
3949    If the value is zero-length, we avoid calling read_memory, which
3950    would abort.  We mark the value as fetched anyway -- all 0 bytes of
3951    it.  */
3952 
3953 void
3954 value_fetch_lazy (struct value *val)
3955 {
3956   gdb_assert (value_lazy (val));
3957   allocate_value_contents (val);
3958   /* A value is either lazy, or fully fetched.  The
3959      availability/validity is only established as we try to fetch a
3960      value.  */
3961   gdb_assert (VEC_empty (range_s, val->optimized_out));
3962   gdb_assert (VEC_empty (range_s, val->unavailable));
3963   if (value_bitsize (val))
3964     {
3965       /* To read a lazy bitfield, read the entire enclosing value.  This
3966 	 prevents reading the same block of (possibly volatile) memory once
3967          per bitfield.  It would be even better to read only the containing
3968          word, but we have no way to record that just specific bits of a
3969          value have been fetched.  */
3970       struct type *type = check_typedef (value_type (val));
3971       struct value *parent = value_parent (val);
3972 
3973       if (value_lazy (parent))
3974 	value_fetch_lazy (parent);
3975 
3976       unpack_value_bitfield (val,
3977 			     value_bitpos (val), value_bitsize (val),
3978 			     value_contents_for_printing (parent),
3979 			     value_offset (val), parent);
3980     }
3981   else if (VALUE_LVAL (val) == lval_memory)
3982     {
3983       CORE_ADDR addr = value_address (val);
3984       struct type *type = check_typedef (value_enclosing_type (val));
3985 
3986       if (TYPE_LENGTH (type))
3987 	read_value_memory (val, 0, value_stack (val),
3988 			   addr, value_contents_all_raw (val),
3989 			   type_length_units (type));
3990     }
3991   else if (VALUE_LVAL (val) == lval_register)
3992     {
3993       struct frame_info *next_frame;
3994       int regnum;
3995       struct type *type = check_typedef (value_type (val));
3996       struct value *new_val = val, *mark = value_mark ();
3997 
3998       /* Offsets are not supported here; lazy register values must
3999 	 refer to the entire register.  */
4000       gdb_assert (value_offset (val) == 0);
4001 
4002       while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
4003 	{
4004 	  struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
4005 
4006 	  next_frame = frame_find_by_id (next_frame_id);
4007 	  regnum = VALUE_REGNUM (new_val);
4008 
4009 	  gdb_assert (next_frame != NULL);
4010 
4011 	  /* Convertible register routines are used for multi-register
4012 	     values and for interpretation in different types
4013 	     (e.g. float or int from a double register).  Lazy
4014 	     register values should have the register's natural type,
4015 	     so they do not apply.  */
4016 	  gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4017 						   regnum, type));
4018 
4019 	  /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4020 	     Since a "->next" operation was performed when setting
4021 	     this field, we do not need to perform a "next" operation
4022 	     again when unwinding the register.  That's why
4023 	     frame_unwind_register_value() is called here instead of
4024 	     get_frame_register_value().  */
4025 	  new_val = frame_unwind_register_value (next_frame, regnum);
4026 
4027 	  /* If we get another lazy lval_register value, it means the
4028 	     register is found by reading it from NEXT_FRAME's next frame.
4029 	     frame_unwind_register_value should never return a value with
4030 	     the frame id pointing to NEXT_FRAME.  If it does, it means we
4031 	     either have two consecutive frames with the same frame id
4032 	     in the frame chain, or some code is trying to unwind
4033 	     behind get_prev_frame's back (e.g., a frame unwind
4034 	     sniffer trying to unwind), bypassing its validations.  In
4035 	     any case, it should always be an internal error to end up
4036 	     in this situation.  */
4037 	  if (VALUE_LVAL (new_val) == lval_register
4038 	      && value_lazy (new_val)
4039 	      && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
4040 	    internal_error (__FILE__, __LINE__,
4041 			    _("infinite loop while fetching a register"));
4042 	}
4043 
4044       /* If it's still lazy (for instance, a saved register on the
4045 	 stack), fetch it.  */
4046       if (value_lazy (new_val))
4047 	value_fetch_lazy (new_val);
4048 
4049       /* Copy the contents and the unavailability/optimized-out
4050 	 meta-data from NEW_VAL to VAL.  */
4051       set_value_lazy (val, 0);
4052       value_contents_copy (val, value_embedded_offset (val),
4053 			   new_val, value_embedded_offset (new_val),
4054 			   type_length_units (type));
4055 
4056       if (frame_debug)
4057 	{
4058 	  struct gdbarch *gdbarch;
4059 	  struct frame_info *frame;
4060 	  /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4061 	     so that the frame level will be shown correctly.  */
4062 	  frame = frame_find_by_id (VALUE_FRAME_ID (val));
4063 	  regnum = VALUE_REGNUM (val);
4064 	  gdbarch = get_frame_arch (frame);
4065 
4066 	  fprintf_unfiltered (gdb_stdlog,
4067 			      "{ value_fetch_lazy "
4068 			      "(frame=%d,regnum=%d(%s),...) ",
4069 			      frame_relative_level (frame), regnum,
4070 			      user_reg_map_regnum_to_name (gdbarch, regnum));
4071 
4072 	  fprintf_unfiltered (gdb_stdlog, "->");
4073 	  if (value_optimized_out (new_val))
4074 	    {
4075 	      fprintf_unfiltered (gdb_stdlog, " ");
4076 	      val_print_optimized_out (new_val, gdb_stdlog);
4077 	    }
4078 	  else
4079 	    {
4080 	      int i;
4081 	      const gdb_byte *buf = value_contents (new_val);
4082 
4083 	      if (VALUE_LVAL (new_val) == lval_register)
4084 		fprintf_unfiltered (gdb_stdlog, " register=%d",
4085 				    VALUE_REGNUM (new_val));
4086 	      else if (VALUE_LVAL (new_val) == lval_memory)
4087 		fprintf_unfiltered (gdb_stdlog, " address=%s",
4088 				    paddress (gdbarch,
4089 					      value_address (new_val)));
4090 	      else
4091 		fprintf_unfiltered (gdb_stdlog, " computed");
4092 
4093 	      fprintf_unfiltered (gdb_stdlog, " bytes=");
4094 	      fprintf_unfiltered (gdb_stdlog, "[");
4095 	      for (i = 0; i < register_size (gdbarch, regnum); i++)
4096 		fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4097 	      fprintf_unfiltered (gdb_stdlog, "]");
4098 	    }
4099 
4100 	  fprintf_unfiltered (gdb_stdlog, " }\n");
4101 	}
4102 
4103       /* Dispose of the intermediate values.  This prevents
4104 	 watchpoints from trying to watch the saved frame pointer.  */
4105       value_free_to_mark (mark);
4106     }
4107   else if (VALUE_LVAL (val) == lval_computed
4108 	   && value_computed_funcs (val)->read != NULL)
4109     value_computed_funcs (val)->read (val);
4110   else
4111     internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4112 
4113   set_value_lazy (val, 0);
4114 }
4115 
4116 /* Implementation of the convenience function $_isvoid.  */
4117 
4118 static struct value *
4119 isvoid_internal_fn (struct gdbarch *gdbarch,
4120 		    const struct language_defn *language,
4121 		    void *cookie, int argc, struct value **argv)
4122 {
4123   int ret;
4124 
4125   if (argc != 1)
4126     error (_("You must provide one argument for $_isvoid."));
4127 
4128   ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4129 
4130   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4131 }
4132 
4133 void
4134 _initialize_values (void)
4135 {
4136   add_cmd ("convenience", no_class, show_convenience, _("\
4137 Debugger convenience (\"$foo\") variables and functions.\n\
4138 Convenience variables are created when you assign them values;\n\
4139 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
4140 \n\
4141 A few convenience variables are given values automatically:\n\
4142 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4143 \"$__\" holds the contents of the last address examined with \"x\"."
4144 #ifdef HAVE_PYTHON
4145 "\n\n\
4146 Convenience functions are defined via the Python API."
4147 #endif
4148 	   ), &showlist);
4149   add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4150 
4151   add_cmd ("values", no_set_class, show_values, _("\
4152 Elements of value history around item number IDX (or last ten)."),
4153 	   &showlist);
4154 
4155   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4156 Initialize a convenience variable if necessary.\n\
4157 init-if-undefined VARIABLE = EXPRESSION\n\
4158 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4159 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
4160 VARIABLE is already initialized."));
4161 
4162   add_prefix_cmd ("function", no_class, function_command, _("\
4163 Placeholder command for showing help on convenience functions."),
4164 		  &functionlist, "function ", 0, &cmdlist);
4165 
4166   add_internal_function ("_isvoid", _("\
4167 Check whether an expression is void.\n\
4168 Usage: $_isvoid (expression)\n\
4169 Return 1 if the expression is void, zero otherwise."),
4170 			 isvoid_internal_fn, NULL);
4171 
4172   add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4173 				       class_support, &max_value_size, _("\
4174 Set maximum sized value gdb will load from the inferior."), _("\
4175 Show maximum sized value gdb will load from the inferior."), _("\
4176 Use this to control the maximum size, in bytes, of a value that gdb\n\
4177 will load from the inferior.  Setting this value to 'unlimited'\n\
4178 disables checking.\n\
4179 Setting this does not invalidate already allocated values, it only\n\
4180 prevents future values, larger than this size, from being allocated."),
4181 			    set_max_value_size,
4182 			    show_max_value_size,
4183 			    &setlist, &showlist);
4184 }
4185