xref: /netbsd-src/external/gpl3/gdb/dist/gdb/value.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2015 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 
44 /* Prototypes for exported functions.  */
45 
46 void _initialize_values (void);
47 
48 /* Definition of a user function.  */
49 struct internal_function
50 {
51   /* The name of the function.  It is a bit odd to have this in the
52      function itself -- the user might use a differently-named
53      convenience variable to hold the function.  */
54   char *name;
55 
56   /* The handler.  */
57   internal_function_fn handler;
58 
59   /* User data for the handler.  */
60   void *cookie;
61 };
62 
63 /* Defines an [OFFSET, OFFSET + LENGTH) range.  */
64 
65 struct range
66 {
67   /* Lowest offset in the range.  */
68   int offset;
69 
70   /* Length of the range.  */
71   int length;
72 };
73 
74 typedef struct range range_s;
75 
76 DEF_VEC_O(range_s);
77 
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79    [offset2, offset2+len2) overlap.  */
80 
81 static int
82 ranges_overlap (int offset1, int len1,
83 		int offset2, int len2)
84 {
85   ULONGEST h, l;
86 
87   l = max (offset1, offset2);
88   h = min (offset1 + len1, offset2 + len2);
89   return (l < h);
90 }
91 
92 /* Returns true if the first argument is strictly less than the
93    second, useful for VEC_lower_bound.  We keep ranges sorted by
94    offset and coalesce overlapping and contiguous ranges, so this just
95    compares the starting offset.  */
96 
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100   return r1->offset < r2->offset;
101 }
102 
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104    OFFSET+LENGTH).  */
105 
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109   range_s what;
110   int i;
111 
112   what.offset = offset;
113   what.length = length;
114 
115   /* We keep ranges sorted by offset and coalesce overlapping and
116      contiguous ranges, so to check if a range list contains a given
117      range, we can do a binary search for the position the given range
118      would be inserted if we only considered the starting OFFSET of
119      ranges.  We call that position I.  Since we also have LENGTH to
120      care for (this is a range afterall), we need to check if the
121      _previous_ range overlaps the I range.  E.g.,
122 
123          R
124          |---|
125        |---|    |---|  |------| ... |--|
126        0        1      2            N
127 
128        I=1
129 
130      In the case above, the binary search would return `I=1', meaning,
131      this OFFSET should be inserted at position 1, and the current
132      position 1 should be pushed further (and before 2).  But, `0'
133      overlaps with R.
134 
135      Then we need to check if the I range overlaps the I range itself.
136      E.g.,
137 
138               R
139               |---|
140        |---|    |---|  |-------| ... |--|
141        0        1      2             N
142 
143        I=1
144   */
145 
146   i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147 
148   if (i > 0)
149     {
150       struct range *bef = VEC_index (range_s, ranges, i - 1);
151 
152       if (ranges_overlap (bef->offset, bef->length, offset, length))
153 	return 1;
154     }
155 
156   if (i < VEC_length (range_s, ranges))
157     {
158       struct range *r = VEC_index (range_s, ranges, i);
159 
160       if (ranges_overlap (r->offset, r->length, offset, length))
161 	return 1;
162     }
163 
164   return 0;
165 }
166 
167 static struct cmd_list_element *functionlist;
168 
169 /* Note that the fields in this structure are arranged to save a bit
170    of memory.  */
171 
172 struct value
173 {
174   /* Type of value; either not an lval, or one of the various
175      different possible kinds of lval.  */
176   enum lval_type lval;
177 
178   /* Is it modifiable?  Only relevant if lval != not_lval.  */
179   unsigned int modifiable : 1;
180 
181   /* If zero, contents of this value are in the contents field.  If
182      nonzero, contents are in inferior.  If the lval field is lval_memory,
183      the contents are in inferior memory at location.address plus offset.
184      The lval field may also be lval_register.
185 
186      WARNING: This field is used by the code which handles watchpoints
187      (see breakpoint.c) to decide whether a particular value can be
188      watched by hardware watchpoints.  If the lazy flag is set for
189      some member of a value chain, it is assumed that this member of
190      the chain doesn't need to be watched as part of watching the
191      value itself.  This is how GDB avoids watching the entire struct
192      or array when the user wants to watch a single struct member or
193      array element.  If you ever change the way lazy flag is set and
194      reset, be sure to consider this use as well!  */
195   unsigned int lazy : 1;
196 
197   /* If value is a variable, is it initialized or not.  */
198   unsigned int initialized : 1;
199 
200   /* If value is from the stack.  If this is set, read_stack will be
201      used instead of read_memory to enable extra caching.  */
202   unsigned int stack : 1;
203 
204   /* If the value has been released.  */
205   unsigned int released : 1;
206 
207   /* Register number if the value is from a register.  */
208   short regnum;
209 
210   /* Location of value (if lval).  */
211   union
212   {
213     /* If lval == lval_memory, this is the address in the inferior.
214        If lval == lval_register, this is the byte offset into the
215        registers structure.  */
216     CORE_ADDR address;
217 
218     /* Pointer to internal variable.  */
219     struct internalvar *internalvar;
220 
221     /* Pointer to xmethod worker.  */
222     struct xmethod_worker *xm_worker;
223 
224     /* If lval == lval_computed, this is a set of function pointers
225        to use to access and describe the value, and a closure pointer
226        for them to use.  */
227     struct
228     {
229       /* Functions to call.  */
230       const struct lval_funcs *funcs;
231 
232       /* Closure for those functions to use.  */
233       void *closure;
234     } computed;
235   } location;
236 
237   /* Describes offset of a value within lval of a structure in bytes.
238      If lval == lval_memory, this is an offset to the address.  If
239      lval == lval_register, this is a further offset from
240      location.address within the registers structure.  Note also the
241      member embedded_offset below.  */
242   int offset;
243 
244   /* Only used for bitfields; number of bits contained in them.  */
245   int bitsize;
246 
247   /* Only used for bitfields; position of start of field.  For
248      gdbarch_bits_big_endian=0 targets, it is the position of the LSB.  For
249      gdbarch_bits_big_endian=1 targets, it is the position of the MSB.  */
250   int bitpos;
251 
252   /* The number of references to this value.  When a value is created,
253      the value chain holds a reference, so REFERENCE_COUNT is 1.  If
254      release_value is called, this value is removed from the chain but
255      the caller of release_value now has a reference to this value.
256      The caller must arrange for a call to value_free later.  */
257   int reference_count;
258 
259   /* Only used for bitfields; the containing value.  This allows a
260      single read from the target when displaying multiple
261      bitfields.  */
262   struct value *parent;
263 
264   /* Frame register value is relative to.  This will be described in
265      the lval enum above as "lval_register".  */
266   struct frame_id frame_id;
267 
268   /* Type of the value.  */
269   struct type *type;
270 
271   /* If a value represents a C++ object, then the `type' field gives
272      the object's compile-time type.  If the object actually belongs
273      to some class derived from `type', perhaps with other base
274      classes and additional members, then `type' is just a subobject
275      of the real thing, and the full object is probably larger than
276      `type' would suggest.
277 
278      If `type' is a dynamic class (i.e. one with a vtable), then GDB
279      can actually determine the object's run-time type by looking at
280      the run-time type information in the vtable.  When this
281      information is available, we may elect to read in the entire
282      object, for several reasons:
283 
284      - When printing the value, the user would probably rather see the
285      full object, not just the limited portion apparent from the
286      compile-time type.
287 
288      - If `type' has virtual base classes, then even printing `type'
289      alone may require reaching outside the `type' portion of the
290      object to wherever the virtual base class has been stored.
291 
292      When we store the entire object, `enclosing_type' is the run-time
293      type -- the complete object -- and `embedded_offset' is the
294      offset of `type' within that larger type, in bytes.  The
295      value_contents() macro takes `embedded_offset' into account, so
296      most GDB code continues to see the `type' portion of the value,
297      just as the inferior would.
298 
299      If `type' is a pointer to an object, then `enclosing_type' is a
300      pointer to the object's run-time type, and `pointed_to_offset' is
301      the offset in bytes from the full object to the pointed-to object
302      -- that is, the value `embedded_offset' would have if we followed
303      the pointer and fetched the complete object.  (I don't really see
304      the point.  Why not just determine the run-time type when you
305      indirect, and avoid the special case?  The contents don't matter
306      until you indirect anyway.)
307 
308      If we're not doing anything fancy, `enclosing_type' is equal to
309      `type', and `embedded_offset' is zero, so everything works
310      normally.  */
311   struct type *enclosing_type;
312   int embedded_offset;
313   int pointed_to_offset;
314 
315   /* Values are stored in a chain, so that they can be deleted easily
316      over calls to the inferior.  Values assigned to internal
317      variables, put into the value history or exposed to Python are
318      taken off this list.  */
319   struct value *next;
320 
321   /* Actual contents of the value.  Target byte-order.  NULL or not
322      valid if lazy is nonzero.  */
323   gdb_byte *contents;
324 
325   /* Unavailable ranges in CONTENTS.  We mark unavailable ranges,
326      rather than available, since the common and default case is for a
327      value to be available.  This is filled in at value read time.
328      The unavailable ranges are tracked in bits.  Note that a contents
329      bit that has been optimized out doesn't really exist in the
330      program, so it can't be marked unavailable either.  */
331   VEC(range_s) *unavailable;
332 
333   /* Likewise, but for optimized out contents (a chunk of the value of
334      a variable that does not actually exist in the program).  If LVAL
335      is lval_register, this is a register ($pc, $sp, etc., never a
336      program variable) that has not been saved in the frame.  Not
337      saved registers and optimized-out program variables values are
338      treated pretty much the same, except not-saved registers have a
339      different string representation and related error strings.  */
340   VEC(range_s) *optimized_out;
341 };
342 
343 int
344 value_bits_available (const struct value *value, int offset, int length)
345 {
346   gdb_assert (!value->lazy);
347 
348   return !ranges_contain (value->unavailable, offset, length);
349 }
350 
351 int
352 value_bytes_available (const struct value *value, int offset, int length)
353 {
354   return value_bits_available (value,
355 			       offset * TARGET_CHAR_BIT,
356 			       length * TARGET_CHAR_BIT);
357 }
358 
359 int
360 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
361 {
362   gdb_assert (!value->lazy);
363 
364   return ranges_contain (value->optimized_out, bit_offset, bit_length);
365 }
366 
367 int
368 value_entirely_available (struct value *value)
369 {
370   /* We can only tell whether the whole value is available when we try
371      to read it.  */
372   if (value->lazy)
373     value_fetch_lazy (value);
374 
375   if (VEC_empty (range_s, value->unavailable))
376     return 1;
377   return 0;
378 }
379 
380 /* Returns true if VALUE is entirely covered by RANGES.  If the value
381    is lazy, it'll be read now.  Note that RANGE is a pointer to
382    pointer because reading the value might change *RANGE.  */
383 
384 static int
385 value_entirely_covered_by_range_vector (struct value *value,
386 					VEC(range_s) **ranges)
387 {
388   /* We can only tell whether the whole value is optimized out /
389      unavailable when we try to read it.  */
390   if (value->lazy)
391     value_fetch_lazy (value);
392 
393   if (VEC_length (range_s, *ranges) == 1)
394     {
395       struct range *t = VEC_index (range_s, *ranges, 0);
396 
397       if (t->offset == 0
398 	  && t->length == (TARGET_CHAR_BIT
399 			   * TYPE_LENGTH (value_enclosing_type (value))))
400 	return 1;
401     }
402 
403   return 0;
404 }
405 
406 int
407 value_entirely_unavailable (struct value *value)
408 {
409   return value_entirely_covered_by_range_vector (value, &value->unavailable);
410 }
411 
412 int
413 value_entirely_optimized_out (struct value *value)
414 {
415   return value_entirely_covered_by_range_vector (value, &value->optimized_out);
416 }
417 
418 /* Insert into the vector pointed to by VECTORP the bit range starting of
419    OFFSET bits, and extending for the next LENGTH bits.  */
420 
421 static void
422 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
423 {
424   range_s newr;
425   int i;
426 
427   /* Insert the range sorted.  If there's overlap or the new range
428      would be contiguous with an existing range, merge.  */
429 
430   newr.offset = offset;
431   newr.length = length;
432 
433   /* Do a binary search for the position the given range would be
434      inserted if we only considered the starting OFFSET of ranges.
435      Call that position I.  Since we also have LENGTH to care for
436      (this is a range afterall), we need to check if the _previous_
437      range overlaps the I range.  E.g., calling R the new range:
438 
439        #1 - overlaps with previous
440 
441 	   R
442 	   |-...-|
443 	 |---|     |---|  |------| ... |--|
444 	 0         1      2            N
445 
446 	 I=1
447 
448      In the case #1 above, the binary search would return `I=1',
449      meaning, this OFFSET should be inserted at position 1, and the
450      current position 1 should be pushed further (and become 2).  But,
451      note that `0' overlaps with R, so we want to merge them.
452 
453      A similar consideration needs to be taken if the new range would
454      be contiguous with the previous range:
455 
456        #2 - contiguous with previous
457 
458 	    R
459 	    |-...-|
460 	 |--|       |---|  |------| ... |--|
461 	 0          1      2            N
462 
463 	 I=1
464 
465      If there's no overlap with the previous range, as in:
466 
467        #3 - not overlapping and not contiguous
468 
469 	       R
470 	       |-...-|
471 	  |--|         |---|  |------| ... |--|
472 	  0            1      2            N
473 
474 	 I=1
475 
476      or if I is 0:
477 
478        #4 - R is the range with lowest offset
479 
480 	  R
481 	 |-...-|
482 	         |--|       |---|  |------| ... |--|
483 	         0          1      2            N
484 
485 	 I=0
486 
487      ... we just push the new range to I.
488 
489      All the 4 cases above need to consider that the new range may
490      also overlap several of the ranges that follow, or that R may be
491      contiguous with the following range, and merge.  E.g.,
492 
493        #5 - overlapping following ranges
494 
495 	  R
496 	 |------------------------|
497 	         |--|       |---|  |------| ... |--|
498 	         0          1      2            N
499 
500 	 I=0
501 
502        or:
503 
504 	    R
505 	    |-------|
506 	 |--|       |---|  |------| ... |--|
507 	 0          1      2            N
508 
509 	 I=1
510 
511   */
512 
513   i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
514   if (i > 0)
515     {
516       struct range *bef = VEC_index (range_s, *vectorp, i - 1);
517 
518       if (ranges_overlap (bef->offset, bef->length, offset, length))
519 	{
520 	  /* #1 */
521 	  ULONGEST l = min (bef->offset, offset);
522 	  ULONGEST h = max (bef->offset + bef->length, offset + length);
523 
524 	  bef->offset = l;
525 	  bef->length = h - l;
526 	  i--;
527 	}
528       else if (offset == bef->offset + bef->length)
529 	{
530 	  /* #2 */
531 	  bef->length += length;
532 	  i--;
533 	}
534       else
535 	{
536 	  /* #3 */
537 	  VEC_safe_insert (range_s, *vectorp, i, &newr);
538 	}
539     }
540   else
541     {
542       /* #4 */
543       VEC_safe_insert (range_s, *vectorp, i, &newr);
544     }
545 
546   /* Check whether the ranges following the one we've just added or
547      touched can be folded in (#5 above).  */
548   if (i + 1 < VEC_length (range_s, *vectorp))
549     {
550       struct range *t;
551       struct range *r;
552       int removed = 0;
553       int next = i + 1;
554 
555       /* Get the range we just touched.  */
556       t = VEC_index (range_s, *vectorp, i);
557       removed = 0;
558 
559       i = next;
560       for (; VEC_iterate (range_s, *vectorp, i, r); i++)
561 	if (r->offset <= t->offset + t->length)
562 	  {
563 	    ULONGEST l, h;
564 
565 	    l = min (t->offset, r->offset);
566 	    h = max (t->offset + t->length, r->offset + r->length);
567 
568 	    t->offset = l;
569 	    t->length = h - l;
570 
571 	    removed++;
572 	  }
573 	else
574 	  {
575 	    /* If we couldn't merge this one, we won't be able to
576 	       merge following ones either, since the ranges are
577 	       always sorted by OFFSET.  */
578 	    break;
579 	  }
580 
581       if (removed != 0)
582 	VEC_block_remove (range_s, *vectorp, next, removed);
583     }
584 }
585 
586 void
587 mark_value_bits_unavailable (struct value *value, int offset, int length)
588 {
589   insert_into_bit_range_vector (&value->unavailable, offset, length);
590 }
591 
592 void
593 mark_value_bytes_unavailable (struct value *value, int offset, int length)
594 {
595   mark_value_bits_unavailable (value,
596 			       offset * TARGET_CHAR_BIT,
597 			       length * TARGET_CHAR_BIT);
598 }
599 
600 /* Find the first range in RANGES that overlaps the range defined by
601    OFFSET and LENGTH, starting at element POS in the RANGES vector,
602    Returns the index into RANGES where such overlapping range was
603    found, or -1 if none was found.  */
604 
605 static int
606 find_first_range_overlap (VEC(range_s) *ranges, int pos,
607 			  int offset, int length)
608 {
609   range_s *r;
610   int i;
611 
612   for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
613     if (ranges_overlap (r->offset, r->length, offset, length))
614       return i;
615 
616   return -1;
617 }
618 
619 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
620    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
621    return non-zero.
622 
623    It must always be the case that:
624      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
625 
626    It is assumed that memory can be accessed from:
627      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
628    to:
629      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
630             / TARGET_CHAR_BIT)  */
631 static int
632 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
633 			 const gdb_byte *ptr2, size_t offset2_bits,
634 			 size_t length_bits)
635 {
636   gdb_assert (offset1_bits % TARGET_CHAR_BIT
637 	      == offset2_bits % TARGET_CHAR_BIT);
638 
639   if (offset1_bits % TARGET_CHAR_BIT != 0)
640     {
641       size_t bits;
642       gdb_byte mask, b1, b2;
643 
644       /* The offset from the base pointers PTR1 and PTR2 is not a complete
645 	 number of bytes.  A number of bits up to either the next exact
646 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
647 	 compared.  */
648       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
649       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
650       mask = (1 << bits) - 1;
651 
652       if (length_bits < bits)
653 	{
654 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
655 	  bits = length_bits;
656 	}
657 
658       /* Now load the two bytes and mask off the bits we care about.  */
659       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
660       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
661 
662       if (b1 != b2)
663 	return 1;
664 
665       /* Now update the length and offsets to take account of the bits
666 	 we've just compared.  */
667       length_bits -= bits;
668       offset1_bits += bits;
669       offset2_bits += bits;
670     }
671 
672   if (length_bits % TARGET_CHAR_BIT != 0)
673     {
674       size_t bits;
675       size_t o1, o2;
676       gdb_byte mask, b1, b2;
677 
678       /* The length is not an exact number of bytes.  After the previous
679 	 IF.. block then the offsets are byte aligned, or the
680 	 length is zero (in which case this code is not reached).  Compare
681 	 a number of bits at the end of the region, starting from an exact
682 	 byte boundary.  */
683       bits = length_bits % TARGET_CHAR_BIT;
684       o1 = offset1_bits + length_bits - bits;
685       o2 = offset2_bits + length_bits - bits;
686 
687       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
688       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
689 
690       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
691       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
692 
693       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
694       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
695 
696       if (b1 != b2)
697 	return 1;
698 
699       length_bits -= bits;
700     }
701 
702   if (length_bits > 0)
703     {
704       /* We've now taken care of any stray "bits" at the start, or end of
705 	 the region to compare, the remainder can be covered with a simple
706 	 memcmp.  */
707       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
708       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
709       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
710 
711       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
712 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
713 		     length_bits / TARGET_CHAR_BIT);
714     }
715 
716   /* Length is zero, regions match.  */
717   return 0;
718 }
719 
720 /* Helper struct for find_first_range_overlap_and_match and
721    value_contents_bits_eq.  Keep track of which slot of a given ranges
722    vector have we last looked at.  */
723 
724 struct ranges_and_idx
725 {
726   /* The ranges.  */
727   VEC(range_s) *ranges;
728 
729   /* The range we've last found in RANGES.  Given ranges are sorted,
730      we can start the next lookup here.  */
731   int idx;
732 };
733 
734 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
735    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
736    ranges starting at OFFSET2 bits.  Return true if the ranges match
737    and fill in *L and *H with the overlapping window relative to
738    (both) OFFSET1 or OFFSET2.  */
739 
740 static int
741 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
742 				    struct ranges_and_idx *rp2,
743 				    int offset1, int offset2,
744 				    int length, ULONGEST *l, ULONGEST *h)
745 {
746   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
747 				       offset1, length);
748   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
749 				       offset2, length);
750 
751   if (rp1->idx == -1 && rp2->idx == -1)
752     {
753       *l = length;
754       *h = length;
755       return 1;
756     }
757   else if (rp1->idx == -1 || rp2->idx == -1)
758     return 0;
759   else
760     {
761       range_s *r1, *r2;
762       ULONGEST l1, h1;
763       ULONGEST l2, h2;
764 
765       r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
766       r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
767 
768       /* Get the unavailable windows intersected by the incoming
769 	 ranges.  The first and last ranges that overlap the argument
770 	 range may be wider than said incoming arguments ranges.  */
771       l1 = max (offset1, r1->offset);
772       h1 = min (offset1 + length, r1->offset + r1->length);
773 
774       l2 = max (offset2, r2->offset);
775       h2 = min (offset2 + length, offset2 + r2->length);
776 
777       /* Make them relative to the respective start offsets, so we can
778 	 compare them for equality.  */
779       l1 -= offset1;
780       h1 -= offset1;
781 
782       l2 -= offset2;
783       h2 -= offset2;
784 
785       /* Different ranges, no match.  */
786       if (l1 != l2 || h1 != h2)
787 	return 0;
788 
789       *h = h1;
790       *l = l1;
791       return 1;
792     }
793 }
794 
795 /* Helper function for value_contents_eq.  The only difference is that
796    this function is bit rather than byte based.
797 
798    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
799    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
800    Return true if the available bits match.  */
801 
802 static int
803 value_contents_bits_eq (const struct value *val1, int offset1,
804 			const struct value *val2, int offset2,
805 			int length)
806 {
807   /* Each array element corresponds to a ranges source (unavailable,
808      optimized out).  '1' is for VAL1, '2' for VAL2.  */
809   struct ranges_and_idx rp1[2], rp2[2];
810 
811   /* See function description in value.h.  */
812   gdb_assert (!val1->lazy && !val2->lazy);
813 
814   /* We shouldn't be trying to compare past the end of the values.  */
815   gdb_assert (offset1 + length
816 	      <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
817   gdb_assert (offset2 + length
818 	      <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
819 
820   memset (&rp1, 0, sizeof (rp1));
821   memset (&rp2, 0, sizeof (rp2));
822   rp1[0].ranges = val1->unavailable;
823   rp2[0].ranges = val2->unavailable;
824   rp1[1].ranges = val1->optimized_out;
825   rp2[1].ranges = val2->optimized_out;
826 
827   while (length > 0)
828     {
829       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
830       int i;
831 
832       for (i = 0; i < 2; i++)
833 	{
834 	  ULONGEST l_tmp, h_tmp;
835 
836 	  /* The contents only match equal if the invalid/unavailable
837 	     contents ranges match as well.  */
838 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
839 						   offset1, offset2, length,
840 						   &l_tmp, &h_tmp))
841 	    return 0;
842 
843 	  /* We're interested in the lowest/first range found.  */
844 	  if (i == 0 || l_tmp < l)
845 	    {
846 	      l = l_tmp;
847 	      h = h_tmp;
848 	    }
849 	}
850 
851       /* Compare the available/valid contents.  */
852       if (memcmp_with_bit_offsets (val1->contents, offset1,
853 				   val2->contents, offset2, l) != 0)
854 	return 0;
855 
856       length -= h;
857       offset1 += h;
858       offset2 += h;
859     }
860 
861   return 1;
862 }
863 
864 int
865 value_contents_eq (const struct value *val1, int offset1,
866 		   const struct value *val2, int offset2,
867 		   int length)
868 {
869   return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
870 				 val2, offset2 * TARGET_CHAR_BIT,
871 				 length * TARGET_CHAR_BIT);
872 }
873 
874 /* Prototypes for local functions.  */
875 
876 static void show_values (char *, int);
877 
878 static void show_convenience (char *, int);
879 
880 
881 /* The value-history records all the values printed
882    by print commands during this session.  Each chunk
883    records 60 consecutive values.  The first chunk on
884    the chain records the most recent values.
885    The total number of values is in value_history_count.  */
886 
887 #define VALUE_HISTORY_CHUNK 60
888 
889 struct value_history_chunk
890   {
891     struct value_history_chunk *next;
892     struct value *values[VALUE_HISTORY_CHUNK];
893   };
894 
895 /* Chain of chunks now in use.  */
896 
897 static struct value_history_chunk *value_history_chain;
898 
899 static int value_history_count;	/* Abs number of last entry stored.  */
900 
901 
902 /* List of all value objects currently allocated
903    (except for those released by calls to release_value)
904    This is so they can be freed after each command.  */
905 
906 static struct value *all_values;
907 
908 /* Allocate a lazy value for type TYPE.  Its actual content is
909    "lazily" allocated too: the content field of the return value is
910    NULL; it will be allocated when it is fetched from the target.  */
911 
912 struct value *
913 allocate_value_lazy (struct type *type)
914 {
915   struct value *val;
916 
917   /* Call check_typedef on our type to make sure that, if TYPE
918      is a TYPE_CODE_TYPEDEF, its length is set to the length
919      of the target type instead of zero.  However, we do not
920      replace the typedef type by the target type, because we want
921      to keep the typedef in order to be able to set the VAL's type
922      description correctly.  */
923   check_typedef (type);
924 
925   val = (struct value *) xzalloc (sizeof (struct value));
926   val->contents = NULL;
927   val->next = all_values;
928   all_values = val;
929   val->type = type;
930   val->enclosing_type = type;
931   VALUE_LVAL (val) = not_lval;
932   val->location.address = 0;
933   VALUE_FRAME_ID (val) = null_frame_id;
934   val->offset = 0;
935   val->bitpos = 0;
936   val->bitsize = 0;
937   VALUE_REGNUM (val) = -1;
938   val->lazy = 1;
939   val->embedded_offset = 0;
940   val->pointed_to_offset = 0;
941   val->modifiable = 1;
942   val->initialized = 1;  /* Default to initialized.  */
943 
944   /* Values start out on the all_values chain.  */
945   val->reference_count = 1;
946 
947   return val;
948 }
949 
950 /* Allocate the contents of VAL if it has not been allocated yet.  */
951 
952 static void
953 allocate_value_contents (struct value *val)
954 {
955   if (!val->contents)
956     val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
957 }
958 
959 /* Allocate a  value  and its contents for type TYPE.  */
960 
961 struct value *
962 allocate_value (struct type *type)
963 {
964   struct value *val = allocate_value_lazy (type);
965 
966   allocate_value_contents (val);
967   val->lazy = 0;
968   return val;
969 }
970 
971 /* Allocate a  value  that has the correct length
972    for COUNT repetitions of type TYPE.  */
973 
974 struct value *
975 allocate_repeat_value (struct type *type, int count)
976 {
977   int low_bound = current_language->string_lower_bound;		/* ??? */
978   /* FIXME-type-allocation: need a way to free this type when we are
979      done with it.  */
980   struct type *array_type
981     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
982 
983   return allocate_value (array_type);
984 }
985 
986 struct value *
987 allocate_computed_value (struct type *type,
988                          const struct lval_funcs *funcs,
989                          void *closure)
990 {
991   struct value *v = allocate_value_lazy (type);
992 
993   VALUE_LVAL (v) = lval_computed;
994   v->location.computed.funcs = funcs;
995   v->location.computed.closure = closure;
996 
997   return v;
998 }
999 
1000 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT.  */
1001 
1002 struct value *
1003 allocate_optimized_out_value (struct type *type)
1004 {
1005   struct value *retval = allocate_value_lazy (type);
1006 
1007   mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1008   set_value_lazy (retval, 0);
1009   return retval;
1010 }
1011 
1012 /* Accessor methods.  */
1013 
1014 struct value *
1015 value_next (struct value *value)
1016 {
1017   return value->next;
1018 }
1019 
1020 struct type *
1021 value_type (const struct value *value)
1022 {
1023   return value->type;
1024 }
1025 void
1026 deprecated_set_value_type (struct value *value, struct type *type)
1027 {
1028   value->type = type;
1029 }
1030 
1031 int
1032 value_offset (const struct value *value)
1033 {
1034   return value->offset;
1035 }
1036 void
1037 set_value_offset (struct value *value, int offset)
1038 {
1039   value->offset = offset;
1040 }
1041 
1042 int
1043 value_bitpos (const struct value *value)
1044 {
1045   return value->bitpos;
1046 }
1047 void
1048 set_value_bitpos (struct value *value, int bit)
1049 {
1050   value->bitpos = bit;
1051 }
1052 
1053 int
1054 value_bitsize (const struct value *value)
1055 {
1056   return value->bitsize;
1057 }
1058 void
1059 set_value_bitsize (struct value *value, int bit)
1060 {
1061   value->bitsize = bit;
1062 }
1063 
1064 struct value *
1065 value_parent (struct value *value)
1066 {
1067   return value->parent;
1068 }
1069 
1070 /* See value.h.  */
1071 
1072 void
1073 set_value_parent (struct value *value, struct value *parent)
1074 {
1075   struct value *old = value->parent;
1076 
1077   value->parent = parent;
1078   if (parent != NULL)
1079     value_incref (parent);
1080   value_free (old);
1081 }
1082 
1083 gdb_byte *
1084 value_contents_raw (struct value *value)
1085 {
1086   allocate_value_contents (value);
1087   return value->contents + value->embedded_offset;
1088 }
1089 
1090 gdb_byte *
1091 value_contents_all_raw (struct value *value)
1092 {
1093   allocate_value_contents (value);
1094   return value->contents;
1095 }
1096 
1097 struct type *
1098 value_enclosing_type (struct value *value)
1099 {
1100   return value->enclosing_type;
1101 }
1102 
1103 /* Look at value.h for description.  */
1104 
1105 struct type *
1106 value_actual_type (struct value *value, int resolve_simple_types,
1107 		   int *real_type_found)
1108 {
1109   struct value_print_options opts;
1110   struct type *result;
1111 
1112   get_user_print_options (&opts);
1113 
1114   if (real_type_found)
1115     *real_type_found = 0;
1116   result = value_type (value);
1117   if (opts.objectprint)
1118     {
1119       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1120 	 fetch its rtti type.  */
1121       if ((TYPE_CODE (result) == TYPE_CODE_PTR
1122 	  || TYPE_CODE (result) == TYPE_CODE_REF)
1123 	  && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1124 	     == TYPE_CODE_STRUCT)
1125         {
1126           struct type *real_type;
1127 
1128           real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1129           if (real_type)
1130             {
1131               if (real_type_found)
1132                 *real_type_found = 1;
1133               result = real_type;
1134             }
1135         }
1136       else if (resolve_simple_types)
1137         {
1138           if (real_type_found)
1139             *real_type_found = 1;
1140           result = value_enclosing_type (value);
1141         }
1142     }
1143 
1144   return result;
1145 }
1146 
1147 void
1148 error_value_optimized_out (void)
1149 {
1150   error (_("value has been optimized out"));
1151 }
1152 
1153 static void
1154 require_not_optimized_out (const struct value *value)
1155 {
1156   if (!VEC_empty (range_s, value->optimized_out))
1157     {
1158       if (value->lval == lval_register)
1159 	error (_("register has not been saved in frame"));
1160       else
1161 	error_value_optimized_out ();
1162     }
1163 }
1164 
1165 static void
1166 require_available (const struct value *value)
1167 {
1168   if (!VEC_empty (range_s, value->unavailable))
1169     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1170 }
1171 
1172 const gdb_byte *
1173 value_contents_for_printing (struct value *value)
1174 {
1175   if (value->lazy)
1176     value_fetch_lazy (value);
1177   return value->contents;
1178 }
1179 
1180 const gdb_byte *
1181 value_contents_for_printing_const (const struct value *value)
1182 {
1183   gdb_assert (!value->lazy);
1184   return value->contents;
1185 }
1186 
1187 const gdb_byte *
1188 value_contents_all (struct value *value)
1189 {
1190   const gdb_byte *result = value_contents_for_printing (value);
1191   require_not_optimized_out (value);
1192   require_available (value);
1193   return result;
1194 }
1195 
1196 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1197    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1198 
1199 static void
1200 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1201 		      VEC (range_s) *src_range, int src_bit_offset,
1202 		      int bit_length)
1203 {
1204   range_s *r;
1205   int i;
1206 
1207   for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1208     {
1209       ULONGEST h, l;
1210 
1211       l = max (r->offset, src_bit_offset);
1212       h = min (r->offset + r->length, src_bit_offset + bit_length);
1213 
1214       if (l < h)
1215 	insert_into_bit_range_vector (dst_range,
1216 				      dst_bit_offset + (l - src_bit_offset),
1217 				      h - l);
1218     }
1219 }
1220 
1221 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1222    SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted.  */
1223 
1224 static void
1225 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1226 			    const struct value *src, int src_bit_offset,
1227 			    int bit_length)
1228 {
1229   ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1230 			src->unavailable, src_bit_offset,
1231 			bit_length);
1232   ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1233 			src->optimized_out, src_bit_offset,
1234 			bit_length);
1235 }
1236 
1237 /* Copy LENGTH bytes of SRC value's (all) contents
1238    (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1239    contents, starting at DST_OFFSET.  If unavailable contents are
1240    being copied from SRC, the corresponding DST contents are marked
1241    unavailable accordingly.  Neither DST nor SRC may be lazy
1242    values.
1243 
1244    It is assumed the contents of DST in the [DST_OFFSET,
1245    DST_OFFSET+LENGTH) range are wholly available.  */
1246 
1247 void
1248 value_contents_copy_raw (struct value *dst, int dst_offset,
1249 			 struct value *src, int src_offset, int length)
1250 {
1251   range_s *r;
1252   int i;
1253   int src_bit_offset, dst_bit_offset, bit_length;
1254 
1255   /* A lazy DST would make that this copy operation useless, since as
1256      soon as DST's contents were un-lazied (by a later value_contents
1257      call, say), the contents would be overwritten.  A lazy SRC would
1258      mean we'd be copying garbage.  */
1259   gdb_assert (!dst->lazy && !src->lazy);
1260 
1261   /* The overwritten DST range gets unavailability ORed in, not
1262      replaced.  Make sure to remember to implement replacing if it
1263      turns out actually necessary.  */
1264   gdb_assert (value_bytes_available (dst, dst_offset, length));
1265   gdb_assert (!value_bits_any_optimized_out (dst,
1266 					     TARGET_CHAR_BIT * dst_offset,
1267 					     TARGET_CHAR_BIT * length));
1268 
1269   /* Copy the data.  */
1270   memcpy (value_contents_all_raw (dst) + dst_offset,
1271 	  value_contents_all_raw (src) + src_offset,
1272 	  length);
1273 
1274   /* Copy the meta-data, adjusted.  */
1275   src_bit_offset = src_offset * TARGET_CHAR_BIT;
1276   dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1277   bit_length = length * TARGET_CHAR_BIT;
1278 
1279   value_ranges_copy_adjusted (dst, dst_bit_offset,
1280 			      src, src_bit_offset,
1281 			      bit_length);
1282 }
1283 
1284 /* Copy LENGTH bytes of SRC value's (all) contents
1285    (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1286    (all) contents, starting at DST_OFFSET.  If unavailable contents
1287    are being copied from SRC, the corresponding DST contents are
1288    marked unavailable accordingly.  DST must not be lazy.  If SRC is
1289    lazy, it will be fetched now.
1290 
1291    It is assumed the contents of DST in the [DST_OFFSET,
1292    DST_OFFSET+LENGTH) range are wholly available.  */
1293 
1294 void
1295 value_contents_copy (struct value *dst, int dst_offset,
1296 		     struct value *src, int src_offset, int length)
1297 {
1298   if (src->lazy)
1299     value_fetch_lazy (src);
1300 
1301   value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1302 }
1303 
1304 int
1305 value_lazy (struct value *value)
1306 {
1307   return value->lazy;
1308 }
1309 
1310 void
1311 set_value_lazy (struct value *value, int val)
1312 {
1313   value->lazy = val;
1314 }
1315 
1316 int
1317 value_stack (struct value *value)
1318 {
1319   return value->stack;
1320 }
1321 
1322 void
1323 set_value_stack (struct value *value, int val)
1324 {
1325   value->stack = val;
1326 }
1327 
1328 const gdb_byte *
1329 value_contents (struct value *value)
1330 {
1331   const gdb_byte *result = value_contents_writeable (value);
1332   require_not_optimized_out (value);
1333   require_available (value);
1334   return result;
1335 }
1336 
1337 gdb_byte *
1338 value_contents_writeable (struct value *value)
1339 {
1340   if (value->lazy)
1341     value_fetch_lazy (value);
1342   return value_contents_raw (value);
1343 }
1344 
1345 int
1346 value_optimized_out (struct value *value)
1347 {
1348   /* We can only know if a value is optimized out once we have tried to
1349      fetch it.  */
1350   if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1351     value_fetch_lazy (value);
1352 
1353   return !VEC_empty (range_s, value->optimized_out);
1354 }
1355 
1356 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1357    the following LENGTH bytes.  */
1358 
1359 void
1360 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1361 {
1362   mark_value_bits_optimized_out (value,
1363 				 offset * TARGET_CHAR_BIT,
1364 				 length * TARGET_CHAR_BIT);
1365 }
1366 
1367 /* See value.h.  */
1368 
1369 void
1370 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1371 {
1372   insert_into_bit_range_vector (&value->optimized_out, offset, length);
1373 }
1374 
1375 int
1376 value_bits_synthetic_pointer (const struct value *value,
1377 			      int offset, int length)
1378 {
1379   if (value->lval != lval_computed
1380       || !value->location.computed.funcs->check_synthetic_pointer)
1381     return 0;
1382   return value->location.computed.funcs->check_synthetic_pointer (value,
1383 								  offset,
1384 								  length);
1385 }
1386 
1387 int
1388 value_embedded_offset (struct value *value)
1389 {
1390   return value->embedded_offset;
1391 }
1392 
1393 void
1394 set_value_embedded_offset (struct value *value, int val)
1395 {
1396   value->embedded_offset = val;
1397 }
1398 
1399 int
1400 value_pointed_to_offset (struct value *value)
1401 {
1402   return value->pointed_to_offset;
1403 }
1404 
1405 void
1406 set_value_pointed_to_offset (struct value *value, int val)
1407 {
1408   value->pointed_to_offset = val;
1409 }
1410 
1411 const struct lval_funcs *
1412 value_computed_funcs (const struct value *v)
1413 {
1414   gdb_assert (value_lval_const (v) == lval_computed);
1415 
1416   return v->location.computed.funcs;
1417 }
1418 
1419 void *
1420 value_computed_closure (const struct value *v)
1421 {
1422   gdb_assert (v->lval == lval_computed);
1423 
1424   return v->location.computed.closure;
1425 }
1426 
1427 enum lval_type *
1428 deprecated_value_lval_hack (struct value *value)
1429 {
1430   return &value->lval;
1431 }
1432 
1433 enum lval_type
1434 value_lval_const (const struct value *value)
1435 {
1436   return value->lval;
1437 }
1438 
1439 CORE_ADDR
1440 value_address (const struct value *value)
1441 {
1442   if (value->lval == lval_internalvar
1443       || value->lval == lval_internalvar_component
1444       || value->lval == lval_xcallable)
1445     return 0;
1446   if (value->parent != NULL)
1447     return value_address (value->parent) + value->offset;
1448   else
1449     return value->location.address + value->offset;
1450 }
1451 
1452 CORE_ADDR
1453 value_raw_address (struct value *value)
1454 {
1455   if (value->lval == lval_internalvar
1456       || value->lval == lval_internalvar_component
1457       || value->lval == lval_xcallable)
1458     return 0;
1459   return value->location.address;
1460 }
1461 
1462 void
1463 set_value_address (struct value *value, CORE_ADDR addr)
1464 {
1465   gdb_assert (value->lval != lval_internalvar
1466 	      && value->lval != lval_internalvar_component
1467 	      && value->lval != lval_xcallable);
1468   value->location.address = addr;
1469 }
1470 
1471 struct internalvar **
1472 deprecated_value_internalvar_hack (struct value *value)
1473 {
1474   return &value->location.internalvar;
1475 }
1476 
1477 struct frame_id *
1478 deprecated_value_frame_id_hack (struct value *value)
1479 {
1480   return &value->frame_id;
1481 }
1482 
1483 short *
1484 deprecated_value_regnum_hack (struct value *value)
1485 {
1486   return &value->regnum;
1487 }
1488 
1489 int
1490 deprecated_value_modifiable (struct value *value)
1491 {
1492   return value->modifiable;
1493 }
1494 
1495 /* Return a mark in the value chain.  All values allocated after the
1496    mark is obtained (except for those released) are subject to being freed
1497    if a subsequent value_free_to_mark is passed the mark.  */
1498 struct value *
1499 value_mark (void)
1500 {
1501   return all_values;
1502 }
1503 
1504 /* Take a reference to VAL.  VAL will not be deallocated until all
1505    references are released.  */
1506 
1507 void
1508 value_incref (struct value *val)
1509 {
1510   val->reference_count++;
1511 }
1512 
1513 /* Release a reference to VAL, which was acquired with value_incref.
1514    This function is also called to deallocate values from the value
1515    chain.  */
1516 
1517 void
1518 value_free (struct value *val)
1519 {
1520   if (val)
1521     {
1522       gdb_assert (val->reference_count > 0);
1523       val->reference_count--;
1524       if (val->reference_count > 0)
1525 	return;
1526 
1527       /* If there's an associated parent value, drop our reference to
1528 	 it.  */
1529       if (val->parent != NULL)
1530 	value_free (val->parent);
1531 
1532       if (VALUE_LVAL (val) == lval_computed)
1533 	{
1534 	  const struct lval_funcs *funcs = val->location.computed.funcs;
1535 
1536 	  if (funcs->free_closure)
1537 	    funcs->free_closure (val);
1538 	}
1539       else if (VALUE_LVAL (val) == lval_xcallable)
1540 	  free_xmethod_worker (val->location.xm_worker);
1541 
1542       xfree (val->contents);
1543       VEC_free (range_s, val->unavailable);
1544     }
1545   xfree (val);
1546 }
1547 
1548 /* Free all values allocated since MARK was obtained by value_mark
1549    (except for those released).  */
1550 void
1551 value_free_to_mark (struct value *mark)
1552 {
1553   struct value *val;
1554   struct value *next;
1555 
1556   for (val = all_values; val && val != mark; val = next)
1557     {
1558       next = val->next;
1559       val->released = 1;
1560       value_free (val);
1561     }
1562   all_values = val;
1563 }
1564 
1565 /* Free all the values that have been allocated (except for those released).
1566    Call after each command, successful or not.
1567    In practice this is called before each command, which is sufficient.  */
1568 
1569 void
1570 free_all_values (void)
1571 {
1572   struct value *val;
1573   struct value *next;
1574 
1575   for (val = all_values; val; val = next)
1576     {
1577       next = val->next;
1578       val->released = 1;
1579       value_free (val);
1580     }
1581 
1582   all_values = 0;
1583 }
1584 
1585 /* Frees all the elements in a chain of values.  */
1586 
1587 void
1588 free_value_chain (struct value *v)
1589 {
1590   struct value *next;
1591 
1592   for (; v; v = next)
1593     {
1594       next = value_next (v);
1595       value_free (v);
1596     }
1597 }
1598 
1599 /* Remove VAL from the chain all_values
1600    so it will not be freed automatically.  */
1601 
1602 void
1603 release_value (struct value *val)
1604 {
1605   struct value *v;
1606 
1607   if (all_values == val)
1608     {
1609       all_values = val->next;
1610       val->next = NULL;
1611       val->released = 1;
1612       return;
1613     }
1614 
1615   for (v = all_values; v; v = v->next)
1616     {
1617       if (v->next == val)
1618 	{
1619 	  v->next = val->next;
1620 	  val->next = NULL;
1621 	  val->released = 1;
1622 	  break;
1623 	}
1624     }
1625 }
1626 
1627 /* If the value is not already released, release it.
1628    If the value is already released, increment its reference count.
1629    That is, this function ensures that the value is released from the
1630    value chain and that the caller owns a reference to it.  */
1631 
1632 void
1633 release_value_or_incref (struct value *val)
1634 {
1635   if (val->released)
1636     value_incref (val);
1637   else
1638     release_value (val);
1639 }
1640 
1641 /* Release all values up to mark  */
1642 struct value *
1643 value_release_to_mark (struct value *mark)
1644 {
1645   struct value *val;
1646   struct value *next;
1647 
1648   for (val = next = all_values; next; next = next->next)
1649     {
1650       if (next->next == mark)
1651 	{
1652 	  all_values = next->next;
1653 	  next->next = NULL;
1654 	  return val;
1655 	}
1656       next->released = 1;
1657     }
1658   all_values = 0;
1659   return val;
1660 }
1661 
1662 /* Return a copy of the value ARG.
1663    It contains the same contents, for same memory address,
1664    but it's a different block of storage.  */
1665 
1666 struct value *
1667 value_copy (struct value *arg)
1668 {
1669   struct type *encl_type = value_enclosing_type (arg);
1670   struct value *val;
1671 
1672   if (value_lazy (arg))
1673     val = allocate_value_lazy (encl_type);
1674   else
1675     val = allocate_value (encl_type);
1676   val->type = arg->type;
1677   VALUE_LVAL (val) = VALUE_LVAL (arg);
1678   val->location = arg->location;
1679   val->offset = arg->offset;
1680   val->bitpos = arg->bitpos;
1681   val->bitsize = arg->bitsize;
1682   VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1683   VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1684   val->lazy = arg->lazy;
1685   val->embedded_offset = value_embedded_offset (arg);
1686   val->pointed_to_offset = arg->pointed_to_offset;
1687   val->modifiable = arg->modifiable;
1688   if (!value_lazy (val))
1689     {
1690       memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1691 	      TYPE_LENGTH (value_enclosing_type (arg)));
1692 
1693     }
1694   val->unavailable = VEC_copy (range_s, arg->unavailable);
1695   val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1696   set_value_parent (val, arg->parent);
1697   if (VALUE_LVAL (val) == lval_computed)
1698     {
1699       const struct lval_funcs *funcs = val->location.computed.funcs;
1700 
1701       if (funcs->copy_closure)
1702         val->location.computed.closure = funcs->copy_closure (val);
1703     }
1704   return val;
1705 }
1706 
1707 /* Return a "const" and/or "volatile" qualified version of the value V.
1708    If CNST is true, then the returned value will be qualified with
1709    "const".
1710    if VOLTL is true, then the returned value will be qualified with
1711    "volatile".  */
1712 
1713 struct value *
1714 make_cv_value (int cnst, int voltl, struct value *v)
1715 {
1716   struct type *val_type = value_type (v);
1717   struct type *enclosing_type = value_enclosing_type (v);
1718   struct value *cv_val = value_copy (v);
1719 
1720   deprecated_set_value_type (cv_val,
1721 			     make_cv_type (cnst, voltl, val_type, NULL));
1722   set_value_enclosing_type (cv_val,
1723 			    make_cv_type (cnst, voltl, enclosing_type, NULL));
1724 
1725   return cv_val;
1726 }
1727 
1728 /* Return a version of ARG that is non-lvalue.  */
1729 
1730 struct value *
1731 value_non_lval (struct value *arg)
1732 {
1733   if (VALUE_LVAL (arg) != not_lval)
1734     {
1735       struct type *enc_type = value_enclosing_type (arg);
1736       struct value *val = allocate_value (enc_type);
1737 
1738       memcpy (value_contents_all_raw (val), value_contents_all (arg),
1739 	      TYPE_LENGTH (enc_type));
1740       val->type = arg->type;
1741       set_value_embedded_offset (val, value_embedded_offset (arg));
1742       set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1743       return val;
1744     }
1745    return arg;
1746 }
1747 
1748 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY.  */
1749 
1750 void
1751 value_force_lval (struct value *v, CORE_ADDR addr)
1752 {
1753   gdb_assert (VALUE_LVAL (v) == not_lval);
1754 
1755   write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1756   v->lval = lval_memory;
1757   v->location.address = addr;
1758 }
1759 
1760 void
1761 set_value_component_location (struct value *component,
1762 			      const struct value *whole)
1763 {
1764   gdb_assert (whole->lval != lval_xcallable);
1765 
1766   if (whole->lval == lval_internalvar)
1767     VALUE_LVAL (component) = lval_internalvar_component;
1768   else
1769     VALUE_LVAL (component) = whole->lval;
1770 
1771   component->location = whole->location;
1772   if (whole->lval == lval_computed)
1773     {
1774       const struct lval_funcs *funcs = whole->location.computed.funcs;
1775 
1776       if (funcs->copy_closure)
1777         component->location.computed.closure = funcs->copy_closure (whole);
1778     }
1779 }
1780 
1781 
1782 /* Access to the value history.  */
1783 
1784 /* Record a new value in the value history.
1785    Returns the absolute history index of the entry.  */
1786 
1787 int
1788 record_latest_value (struct value *val)
1789 {
1790   int i;
1791 
1792   /* We don't want this value to have anything to do with the inferior anymore.
1793      In particular, "set $1 = 50" should not affect the variable from which
1794      the value was taken, and fast watchpoints should be able to assume that
1795      a value on the value history never changes.  */
1796   if (value_lazy (val))
1797     value_fetch_lazy (val);
1798   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1799      from.  This is a bit dubious, because then *&$1 does not just return $1
1800      but the current contents of that location.  c'est la vie...  */
1801   val->modifiable = 0;
1802 
1803   /* The value may have already been released, in which case we're adding a
1804      new reference for its entry in the history.  That is why we call
1805      release_value_or_incref here instead of release_value.  */
1806   release_value_or_incref (val);
1807 
1808   /* Here we treat value_history_count as origin-zero
1809      and applying to the value being stored now.  */
1810 
1811   i = value_history_count % VALUE_HISTORY_CHUNK;
1812   if (i == 0)
1813     {
1814       struct value_history_chunk *newobj
1815 	= (struct value_history_chunk *)
1816 
1817       xmalloc (sizeof (struct value_history_chunk));
1818       memset (newobj->values, 0, sizeof newobj->values);
1819       newobj->next = value_history_chain;
1820       value_history_chain = newobj;
1821     }
1822 
1823   value_history_chain->values[i] = val;
1824 
1825   /* Now we regard value_history_count as origin-one
1826      and applying to the value just stored.  */
1827 
1828   return ++value_history_count;
1829 }
1830 
1831 /* Return a copy of the value in the history with sequence number NUM.  */
1832 
1833 struct value *
1834 access_value_history (int num)
1835 {
1836   struct value_history_chunk *chunk;
1837   int i;
1838   int absnum = num;
1839 
1840   if (absnum <= 0)
1841     absnum += value_history_count;
1842 
1843   if (absnum <= 0)
1844     {
1845       if (num == 0)
1846 	error (_("The history is empty."));
1847       else if (num == 1)
1848 	error (_("There is only one value in the history."));
1849       else
1850 	error (_("History does not go back to $$%d."), -num);
1851     }
1852   if (absnum > value_history_count)
1853     error (_("History has not yet reached $%d."), absnum);
1854 
1855   absnum--;
1856 
1857   /* Now absnum is always absolute and origin zero.  */
1858 
1859   chunk = value_history_chain;
1860   for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1861 	 - absnum / VALUE_HISTORY_CHUNK;
1862        i > 0; i--)
1863     chunk = chunk->next;
1864 
1865   return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1866 }
1867 
1868 static void
1869 show_values (char *num_exp, int from_tty)
1870 {
1871   int i;
1872   struct value *val;
1873   static int num = 1;
1874 
1875   if (num_exp)
1876     {
1877       /* "show values +" should print from the stored position.
1878          "show values <exp>" should print around value number <exp>.  */
1879       if (num_exp[0] != '+' || num_exp[1] != '\0')
1880 	num = parse_and_eval_long (num_exp) - 5;
1881     }
1882   else
1883     {
1884       /* "show values" means print the last 10 values.  */
1885       num = value_history_count - 9;
1886     }
1887 
1888   if (num <= 0)
1889     num = 1;
1890 
1891   for (i = num; i < num + 10 && i <= value_history_count; i++)
1892     {
1893       struct value_print_options opts;
1894 
1895       val = access_value_history (i);
1896       printf_filtered (("$%d = "), i);
1897       get_user_print_options (&opts);
1898       value_print (val, gdb_stdout, &opts);
1899       printf_filtered (("\n"));
1900     }
1901 
1902   /* The next "show values +" should start after what we just printed.  */
1903   num += 10;
1904 
1905   /* Hitting just return after this command should do the same thing as
1906      "show values +".  If num_exp is null, this is unnecessary, since
1907      "show values +" is not useful after "show values".  */
1908   if (from_tty && num_exp)
1909     {
1910       num_exp[0] = '+';
1911       num_exp[1] = '\0';
1912     }
1913 }
1914 
1915 enum internalvar_kind
1916 {
1917   /* The internal variable is empty.  */
1918   INTERNALVAR_VOID,
1919 
1920   /* The value of the internal variable is provided directly as
1921      a GDB value object.  */
1922   INTERNALVAR_VALUE,
1923 
1924   /* A fresh value is computed via a call-back routine on every
1925      access to the internal variable.  */
1926   INTERNALVAR_MAKE_VALUE,
1927 
1928   /* The internal variable holds a GDB internal convenience function.  */
1929   INTERNALVAR_FUNCTION,
1930 
1931   /* The variable holds an integer value.  */
1932   INTERNALVAR_INTEGER,
1933 
1934   /* The variable holds a GDB-provided string.  */
1935   INTERNALVAR_STRING,
1936 };
1937 
1938 union internalvar_data
1939 {
1940   /* A value object used with INTERNALVAR_VALUE.  */
1941   struct value *value;
1942 
1943   /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
1944   struct
1945   {
1946     /* The functions to call.  */
1947     const struct internalvar_funcs *functions;
1948 
1949     /* The function's user-data.  */
1950     void *data;
1951   } make_value;
1952 
1953   /* The internal function used with INTERNALVAR_FUNCTION.  */
1954   struct
1955   {
1956     struct internal_function *function;
1957     /* True if this is the canonical name for the function.  */
1958     int canonical;
1959   } fn;
1960 
1961   /* An integer value used with INTERNALVAR_INTEGER.  */
1962   struct
1963   {
1964     /* If type is non-NULL, it will be used as the type to generate
1965        a value for this internal variable.  If type is NULL, a default
1966        integer type for the architecture is used.  */
1967     struct type *type;
1968     LONGEST val;
1969   } integer;
1970 
1971   /* A string value used with INTERNALVAR_STRING.  */
1972   char *string;
1973 };
1974 
1975 /* Internal variables.  These are variables within the debugger
1976    that hold values assigned by debugger commands.
1977    The user refers to them with a '$' prefix
1978    that does not appear in the variable names stored internally.  */
1979 
1980 struct internalvar
1981 {
1982   struct internalvar *next;
1983   char *name;
1984 
1985   /* We support various different kinds of content of an internal variable.
1986      enum internalvar_kind specifies the kind, and union internalvar_data
1987      provides the data associated with this particular kind.  */
1988 
1989   enum internalvar_kind kind;
1990 
1991   union internalvar_data u;
1992 };
1993 
1994 static struct internalvar *internalvars;
1995 
1996 /* If the variable does not already exist create it and give it the
1997    value given.  If no value is given then the default is zero.  */
1998 static void
1999 init_if_undefined_command (char* args, int from_tty)
2000 {
2001   struct internalvar* intvar;
2002 
2003   /* Parse the expression - this is taken from set_command().  */
2004   struct expression *expr = parse_expression (args);
2005   register struct cleanup *old_chain =
2006     make_cleanup (free_current_contents, &expr);
2007 
2008   /* Validate the expression.
2009      Was the expression an assignment?
2010      Or even an expression at all?  */
2011   if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2012     error (_("Init-if-undefined requires an assignment expression."));
2013 
2014   /* Extract the variable from the parsed expression.
2015      In the case of an assign the lvalue will be in elts[1] and elts[2].  */
2016   if (expr->elts[1].opcode != OP_INTERNALVAR)
2017     error (_("The first parameter to init-if-undefined "
2018 	     "should be a GDB variable."));
2019   intvar = expr->elts[2].internalvar;
2020 
2021   /* Only evaluate the expression if the lvalue is void.
2022      This may still fail if the expresssion is invalid.  */
2023   if (intvar->kind == INTERNALVAR_VOID)
2024     evaluate_expression (expr);
2025 
2026   do_cleanups (old_chain);
2027 }
2028 
2029 
2030 /* Look up an internal variable with name NAME.  NAME should not
2031    normally include a dollar sign.
2032 
2033    If the specified internal variable does not exist,
2034    the return value is NULL.  */
2035 
2036 struct internalvar *
2037 lookup_only_internalvar (const char *name)
2038 {
2039   struct internalvar *var;
2040 
2041   for (var = internalvars; var; var = var->next)
2042     if (strcmp (var->name, name) == 0)
2043       return var;
2044 
2045   return NULL;
2046 }
2047 
2048 /* Complete NAME by comparing it to the names of internal variables.
2049    Returns a vector of newly allocated strings, or NULL if no matches
2050    were found.  */
2051 
2052 VEC (char_ptr) *
2053 complete_internalvar (const char *name)
2054 {
2055   VEC (char_ptr) *result = NULL;
2056   struct internalvar *var;
2057   int len;
2058 
2059   len = strlen (name);
2060 
2061   for (var = internalvars; var; var = var->next)
2062     if (strncmp (var->name, name, len) == 0)
2063       {
2064 	char *r = xstrdup (var->name);
2065 
2066 	VEC_safe_push (char_ptr, result, r);
2067       }
2068 
2069   return result;
2070 }
2071 
2072 /* Create an internal variable with name NAME and with a void value.
2073    NAME should not normally include a dollar sign.  */
2074 
2075 struct internalvar *
2076 create_internalvar (const char *name)
2077 {
2078   struct internalvar *var;
2079 
2080   var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2081   var->name = concat (name, (char *)NULL);
2082   var->kind = INTERNALVAR_VOID;
2083   var->next = internalvars;
2084   internalvars = var;
2085   return var;
2086 }
2087 
2088 /* Create an internal variable with name NAME and register FUN as the
2089    function that value_of_internalvar uses to create a value whenever
2090    this variable is referenced.  NAME should not normally include a
2091    dollar sign.  DATA is passed uninterpreted to FUN when it is
2092    called.  CLEANUP, if not NULL, is called when the internal variable
2093    is destroyed.  It is passed DATA as its only argument.  */
2094 
2095 struct internalvar *
2096 create_internalvar_type_lazy (const char *name,
2097 			      const struct internalvar_funcs *funcs,
2098 			      void *data)
2099 {
2100   struct internalvar *var = create_internalvar (name);
2101 
2102   var->kind = INTERNALVAR_MAKE_VALUE;
2103   var->u.make_value.functions = funcs;
2104   var->u.make_value.data = data;
2105   return var;
2106 }
2107 
2108 /* See documentation in value.h.  */
2109 
2110 int
2111 compile_internalvar_to_ax (struct internalvar *var,
2112 			   struct agent_expr *expr,
2113 			   struct axs_value *value)
2114 {
2115   if (var->kind != INTERNALVAR_MAKE_VALUE
2116       || var->u.make_value.functions->compile_to_ax == NULL)
2117     return 0;
2118 
2119   var->u.make_value.functions->compile_to_ax (var, expr, value,
2120 					      var->u.make_value.data);
2121   return 1;
2122 }
2123 
2124 /* Look up an internal variable with name NAME.  NAME should not
2125    normally include a dollar sign.
2126 
2127    If the specified internal variable does not exist,
2128    one is created, with a void value.  */
2129 
2130 struct internalvar *
2131 lookup_internalvar (const char *name)
2132 {
2133   struct internalvar *var;
2134 
2135   var = lookup_only_internalvar (name);
2136   if (var)
2137     return var;
2138 
2139   return create_internalvar (name);
2140 }
2141 
2142 /* Return current value of internal variable VAR.  For variables that
2143    are not inherently typed, use a value type appropriate for GDBARCH.  */
2144 
2145 struct value *
2146 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2147 {
2148   struct value *val;
2149   struct trace_state_variable *tsv;
2150 
2151   /* If there is a trace state variable of the same name, assume that
2152      is what we really want to see.  */
2153   tsv = find_trace_state_variable (var->name);
2154   if (tsv)
2155     {
2156       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2157 								&(tsv->value));
2158       if (tsv->value_known)
2159 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2160 				  tsv->value);
2161       else
2162 	val = allocate_value (builtin_type (gdbarch)->builtin_void);
2163       return val;
2164     }
2165 
2166   switch (var->kind)
2167     {
2168     case INTERNALVAR_VOID:
2169       val = allocate_value (builtin_type (gdbarch)->builtin_void);
2170       break;
2171 
2172     case INTERNALVAR_FUNCTION:
2173       val = allocate_value (builtin_type (gdbarch)->internal_fn);
2174       break;
2175 
2176     case INTERNALVAR_INTEGER:
2177       if (!var->u.integer.type)
2178 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2179 				  var->u.integer.val);
2180       else
2181 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2182       break;
2183 
2184     case INTERNALVAR_STRING:
2185       val = value_cstring (var->u.string, strlen (var->u.string),
2186 			   builtin_type (gdbarch)->builtin_char);
2187       break;
2188 
2189     case INTERNALVAR_VALUE:
2190       val = value_copy (var->u.value);
2191       if (value_lazy (val))
2192 	value_fetch_lazy (val);
2193       break;
2194 
2195     case INTERNALVAR_MAKE_VALUE:
2196       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2197 							var->u.make_value.data);
2198       break;
2199 
2200     default:
2201       internal_error (__FILE__, __LINE__, _("bad kind"));
2202     }
2203 
2204   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2205      on this value go back to affect the original internal variable.
2206 
2207      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2208      no underlying modifyable state in the internal variable.
2209 
2210      Likewise, if the variable's value is a computed lvalue, we want
2211      references to it to produce another computed lvalue, where
2212      references and assignments actually operate through the
2213      computed value's functions.
2214 
2215      This means that internal variables with computed values
2216      behave a little differently from other internal variables:
2217      assignments to them don't just replace the previous value
2218      altogether.  At the moment, this seems like the behavior we
2219      want.  */
2220 
2221   if (var->kind != INTERNALVAR_MAKE_VALUE
2222       && val->lval != lval_computed)
2223     {
2224       VALUE_LVAL (val) = lval_internalvar;
2225       VALUE_INTERNALVAR (val) = var;
2226     }
2227 
2228   return val;
2229 }
2230 
2231 int
2232 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2233 {
2234   if (var->kind == INTERNALVAR_INTEGER)
2235     {
2236       *result = var->u.integer.val;
2237       return 1;
2238     }
2239 
2240   if (var->kind == INTERNALVAR_VALUE)
2241     {
2242       struct type *type = check_typedef (value_type (var->u.value));
2243 
2244       if (TYPE_CODE (type) == TYPE_CODE_INT)
2245 	{
2246 	  *result = value_as_long (var->u.value);
2247 	  return 1;
2248 	}
2249     }
2250 
2251   return 0;
2252 }
2253 
2254 static int
2255 get_internalvar_function (struct internalvar *var,
2256 			  struct internal_function **result)
2257 {
2258   switch (var->kind)
2259     {
2260     case INTERNALVAR_FUNCTION:
2261       *result = var->u.fn.function;
2262       return 1;
2263 
2264     default:
2265       return 0;
2266     }
2267 }
2268 
2269 void
2270 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2271 			   int bitsize, struct value *newval)
2272 {
2273   gdb_byte *addr;
2274 
2275   switch (var->kind)
2276     {
2277     case INTERNALVAR_VALUE:
2278       addr = value_contents_writeable (var->u.value);
2279 
2280       if (bitsize)
2281 	modify_field (value_type (var->u.value), addr + offset,
2282 		      value_as_long (newval), bitpos, bitsize);
2283       else
2284 	memcpy (addr + offset, value_contents (newval),
2285 		TYPE_LENGTH (value_type (newval)));
2286       break;
2287 
2288     default:
2289       /* We can never get a component of any other kind.  */
2290       internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2291     }
2292 }
2293 
2294 void
2295 set_internalvar (struct internalvar *var, struct value *val)
2296 {
2297   enum internalvar_kind new_kind;
2298   union internalvar_data new_data = { 0 };
2299 
2300   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2301     error (_("Cannot overwrite convenience function %s"), var->name);
2302 
2303   /* Prepare new contents.  */
2304   switch (TYPE_CODE (check_typedef (value_type (val))))
2305     {
2306     case TYPE_CODE_VOID:
2307       new_kind = INTERNALVAR_VOID;
2308       break;
2309 
2310     case TYPE_CODE_INTERNAL_FUNCTION:
2311       gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2312       new_kind = INTERNALVAR_FUNCTION;
2313       get_internalvar_function (VALUE_INTERNALVAR (val),
2314 				&new_data.fn.function);
2315       /* Copies created here are never canonical.  */
2316       break;
2317 
2318     default:
2319       new_kind = INTERNALVAR_VALUE;
2320       new_data.value = value_copy (val);
2321       new_data.value->modifiable = 1;
2322 
2323       /* Force the value to be fetched from the target now, to avoid problems
2324 	 later when this internalvar is referenced and the target is gone or
2325 	 has changed.  */
2326       if (value_lazy (new_data.value))
2327        value_fetch_lazy (new_data.value);
2328 
2329       /* Release the value from the value chain to prevent it from being
2330 	 deleted by free_all_values.  From here on this function should not
2331 	 call error () until new_data is installed into the var->u to avoid
2332 	 leaking memory.  */
2333       release_value (new_data.value);
2334       break;
2335     }
2336 
2337   /* Clean up old contents.  */
2338   clear_internalvar (var);
2339 
2340   /* Switch over.  */
2341   var->kind = new_kind;
2342   var->u = new_data;
2343   /* End code which must not call error().  */
2344 }
2345 
2346 void
2347 set_internalvar_integer (struct internalvar *var, LONGEST l)
2348 {
2349   /* Clean up old contents.  */
2350   clear_internalvar (var);
2351 
2352   var->kind = INTERNALVAR_INTEGER;
2353   var->u.integer.type = NULL;
2354   var->u.integer.val = l;
2355 }
2356 
2357 void
2358 set_internalvar_string (struct internalvar *var, const char *string)
2359 {
2360   /* Clean up old contents.  */
2361   clear_internalvar (var);
2362 
2363   var->kind = INTERNALVAR_STRING;
2364   var->u.string = xstrdup (string);
2365 }
2366 
2367 static void
2368 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2369 {
2370   /* Clean up old contents.  */
2371   clear_internalvar (var);
2372 
2373   var->kind = INTERNALVAR_FUNCTION;
2374   var->u.fn.function = f;
2375   var->u.fn.canonical = 1;
2376   /* Variables installed here are always the canonical version.  */
2377 }
2378 
2379 void
2380 clear_internalvar (struct internalvar *var)
2381 {
2382   /* Clean up old contents.  */
2383   switch (var->kind)
2384     {
2385     case INTERNALVAR_VALUE:
2386       value_free (var->u.value);
2387       break;
2388 
2389     case INTERNALVAR_STRING:
2390       xfree (var->u.string);
2391       break;
2392 
2393     case INTERNALVAR_MAKE_VALUE:
2394       if (var->u.make_value.functions->destroy != NULL)
2395 	var->u.make_value.functions->destroy (var->u.make_value.data);
2396       break;
2397 
2398     default:
2399       break;
2400     }
2401 
2402   /* Reset to void kind.  */
2403   var->kind = INTERNALVAR_VOID;
2404 }
2405 
2406 char *
2407 internalvar_name (struct internalvar *var)
2408 {
2409   return var->name;
2410 }
2411 
2412 static struct internal_function *
2413 create_internal_function (const char *name,
2414 			  internal_function_fn handler, void *cookie)
2415 {
2416   struct internal_function *ifn = XNEW (struct internal_function);
2417 
2418   ifn->name = xstrdup (name);
2419   ifn->handler = handler;
2420   ifn->cookie = cookie;
2421   return ifn;
2422 }
2423 
2424 char *
2425 value_internal_function_name (struct value *val)
2426 {
2427   struct internal_function *ifn;
2428   int result;
2429 
2430   gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2431   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2432   gdb_assert (result);
2433 
2434   return ifn->name;
2435 }
2436 
2437 struct value *
2438 call_internal_function (struct gdbarch *gdbarch,
2439 			const struct language_defn *language,
2440 			struct value *func, int argc, struct value **argv)
2441 {
2442   struct internal_function *ifn;
2443   int result;
2444 
2445   gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2446   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2447   gdb_assert (result);
2448 
2449   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2450 }
2451 
2452 /* The 'function' command.  This does nothing -- it is just a
2453    placeholder to let "help function NAME" work.  This is also used as
2454    the implementation of the sub-command that is created when
2455    registering an internal function.  */
2456 static void
2457 function_command (char *command, int from_tty)
2458 {
2459   /* Do nothing.  */
2460 }
2461 
2462 /* Clean up if an internal function's command is destroyed.  */
2463 static void
2464 function_destroyer (struct cmd_list_element *self, void *ignore)
2465 {
2466   xfree ((char *) self->name);
2467   xfree ((char *) self->doc);
2468 }
2469 
2470 /* Add a new internal function.  NAME is the name of the function; DOC
2471    is a documentation string describing the function.  HANDLER is
2472    called when the function is invoked.  COOKIE is an arbitrary
2473    pointer which is passed to HANDLER and is intended for "user
2474    data".  */
2475 void
2476 add_internal_function (const char *name, const char *doc,
2477 		       internal_function_fn handler, void *cookie)
2478 {
2479   struct cmd_list_element *cmd;
2480   struct internal_function *ifn;
2481   struct internalvar *var = lookup_internalvar (name);
2482 
2483   ifn = create_internal_function (name, handler, cookie);
2484   set_internalvar_function (var, ifn);
2485 
2486   cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2487 		 &functionlist);
2488   cmd->destroyer = function_destroyer;
2489 }
2490 
2491 /* Update VALUE before discarding OBJFILE.  COPIED_TYPES is used to
2492    prevent cycles / duplicates.  */
2493 
2494 void
2495 preserve_one_value (struct value *value, struct objfile *objfile,
2496 		    htab_t copied_types)
2497 {
2498   if (TYPE_OBJFILE (value->type) == objfile)
2499     value->type = copy_type_recursive (objfile, value->type, copied_types);
2500 
2501   if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2502     value->enclosing_type = copy_type_recursive (objfile,
2503 						 value->enclosing_type,
2504 						 copied_types);
2505 }
2506 
2507 /* Likewise for internal variable VAR.  */
2508 
2509 static void
2510 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2511 			  htab_t copied_types)
2512 {
2513   switch (var->kind)
2514     {
2515     case INTERNALVAR_INTEGER:
2516       if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2517 	var->u.integer.type
2518 	  = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2519       break;
2520 
2521     case INTERNALVAR_VALUE:
2522       preserve_one_value (var->u.value, objfile, copied_types);
2523       break;
2524     }
2525 }
2526 
2527 /* Update the internal variables and value history when OBJFILE is
2528    discarded; we must copy the types out of the objfile.  New global types
2529    will be created for every convenience variable which currently points to
2530    this objfile's types, and the convenience variables will be adjusted to
2531    use the new global types.  */
2532 
2533 void
2534 preserve_values (struct objfile *objfile)
2535 {
2536   htab_t copied_types;
2537   struct value_history_chunk *cur;
2538   struct internalvar *var;
2539   int i;
2540 
2541   /* Create the hash table.  We allocate on the objfile's obstack, since
2542      it is soon to be deleted.  */
2543   copied_types = create_copied_types_hash (objfile);
2544 
2545   for (cur = value_history_chain; cur; cur = cur->next)
2546     for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2547       if (cur->values[i])
2548 	preserve_one_value (cur->values[i], objfile, copied_types);
2549 
2550   for (var = internalvars; var; var = var->next)
2551     preserve_one_internalvar (var, objfile, copied_types);
2552 
2553   preserve_ext_lang_values (objfile, copied_types);
2554 
2555   htab_delete (copied_types);
2556 }
2557 
2558 static void
2559 show_convenience (char *ignore, int from_tty)
2560 {
2561   struct gdbarch *gdbarch = get_current_arch ();
2562   struct internalvar *var;
2563   int varseen = 0;
2564   struct value_print_options opts;
2565 
2566   get_user_print_options (&opts);
2567   for (var = internalvars; var; var = var->next)
2568     {
2569 
2570       if (!varseen)
2571 	{
2572 	  varseen = 1;
2573 	}
2574       printf_filtered (("$%s = "), var->name);
2575 
2576       TRY
2577 	{
2578 	  struct value *val;
2579 
2580 	  val = value_of_internalvar (gdbarch, var);
2581 	  value_print (val, gdb_stdout, &opts);
2582 	}
2583       CATCH (ex, RETURN_MASK_ERROR)
2584 	{
2585 	  fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2586 	}
2587       END_CATCH
2588 
2589       printf_filtered (("\n"));
2590     }
2591   if (!varseen)
2592     {
2593       /* This text does not mention convenience functions on purpose.
2594 	 The user can't create them except via Python, and if Python support
2595 	 is installed this message will never be printed ($_streq will
2596 	 exist).  */
2597       printf_unfiltered (_("No debugger convenience variables now defined.\n"
2598 			   "Convenience variables have "
2599 			   "names starting with \"$\";\n"
2600 			   "use \"set\" as in \"set "
2601 			   "$foo = 5\" to define them.\n"));
2602     }
2603 }
2604 
2605 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER.  */
2606 
2607 struct value *
2608 value_of_xmethod (struct xmethod_worker *worker)
2609 {
2610   if (worker->value == NULL)
2611     {
2612       struct value *v;
2613 
2614       v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2615       v->lval = lval_xcallable;
2616       v->location.xm_worker = worker;
2617       v->modifiable = 0;
2618       worker->value = v;
2619     }
2620 
2621   return worker->value;
2622 }
2623 
2624 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD.  */
2625 
2626 struct type *
2627 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2628 {
2629   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2630 	      && method->lval == lval_xcallable && argc > 0);
2631 
2632   return get_xmethod_result_type (method->location.xm_worker,
2633 				  argv[0], argv + 1, argc - 1);
2634 }
2635 
2636 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD.  */
2637 
2638 struct value *
2639 call_xmethod (struct value *method, int argc, struct value **argv)
2640 {
2641   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2642 	      && method->lval == lval_xcallable && argc > 0);
2643 
2644   return invoke_xmethod (method->location.xm_worker,
2645 			 argv[0], argv + 1, argc - 1);
2646 }
2647 
2648 /* Extract a value as a C number (either long or double).
2649    Knows how to convert fixed values to double, or
2650    floating values to long.
2651    Does not deallocate the value.  */
2652 
2653 LONGEST
2654 value_as_long (struct value *val)
2655 {
2656   /* This coerces arrays and functions, which is necessary (e.g.
2657      in disassemble_command).  It also dereferences references, which
2658      I suspect is the most logical thing to do.  */
2659   val = coerce_array (val);
2660   return unpack_long (value_type (val), value_contents (val));
2661 }
2662 
2663 DOUBLEST
2664 value_as_double (struct value *val)
2665 {
2666   DOUBLEST foo;
2667   int inv;
2668 
2669   foo = unpack_double (value_type (val), value_contents (val), &inv);
2670   if (inv)
2671     error (_("Invalid floating value found in program."));
2672   return foo;
2673 }
2674 
2675 /* Extract a value as a C pointer.  Does not deallocate the value.
2676    Note that val's type may not actually be a pointer; value_as_long
2677    handles all the cases.  */
2678 CORE_ADDR
2679 value_as_address (struct value *val)
2680 {
2681   struct gdbarch *gdbarch = get_type_arch (value_type (val));
2682 
2683   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2684      whether we want this to be true eventually.  */
2685 #if 0
2686   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2687      non-address (e.g. argument to "signal", "info break", etc.), or
2688      for pointers to char, in which the low bits *are* significant.  */
2689   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2690 #else
2691 
2692   /* There are several targets (IA-64, PowerPC, and others) which
2693      don't represent pointers to functions as simply the address of
2694      the function's entry point.  For example, on the IA-64, a
2695      function pointer points to a two-word descriptor, generated by
2696      the linker, which contains the function's entry point, and the
2697      value the IA-64 "global pointer" register should have --- to
2698      support position-independent code.  The linker generates
2699      descriptors only for those functions whose addresses are taken.
2700 
2701      On such targets, it's difficult for GDB to convert an arbitrary
2702      function address into a function pointer; it has to either find
2703      an existing descriptor for that function, or call malloc and
2704      build its own.  On some targets, it is impossible for GDB to
2705      build a descriptor at all: the descriptor must contain a jump
2706      instruction; data memory cannot be executed; and code memory
2707      cannot be modified.
2708 
2709      Upon entry to this function, if VAL is a value of type `function'
2710      (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2711      value_address (val) is the address of the function.  This is what
2712      you'll get if you evaluate an expression like `main'.  The call
2713      to COERCE_ARRAY below actually does all the usual unary
2714      conversions, which includes converting values of type `function'
2715      to `pointer to function'.  This is the challenging conversion
2716      discussed above.  Then, `unpack_long' will convert that pointer
2717      back into an address.
2718 
2719      So, suppose the user types `disassemble foo' on an architecture
2720      with a strange function pointer representation, on which GDB
2721      cannot build its own descriptors, and suppose further that `foo'
2722      has no linker-built descriptor.  The address->pointer conversion
2723      will signal an error and prevent the command from running, even
2724      though the next step would have been to convert the pointer
2725      directly back into the same address.
2726 
2727      The following shortcut avoids this whole mess.  If VAL is a
2728      function, just return its address directly.  */
2729   if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2730       || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2731     return value_address (val);
2732 
2733   val = coerce_array (val);
2734 
2735   /* Some architectures (e.g. Harvard), map instruction and data
2736      addresses onto a single large unified address space.  For
2737      instance: An architecture may consider a large integer in the
2738      range 0x10000000 .. 0x1000ffff to already represent a data
2739      addresses (hence not need a pointer to address conversion) while
2740      a small integer would still need to be converted integer to
2741      pointer to address.  Just assume such architectures handle all
2742      integer conversions in a single function.  */
2743 
2744   /* JimB writes:
2745 
2746      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2747      must admonish GDB hackers to make sure its behavior matches the
2748      compiler's, whenever possible.
2749 
2750      In general, I think GDB should evaluate expressions the same way
2751      the compiler does.  When the user copies an expression out of
2752      their source code and hands it to a `print' command, they should
2753      get the same value the compiler would have computed.  Any
2754      deviation from this rule can cause major confusion and annoyance,
2755      and needs to be justified carefully.  In other words, GDB doesn't
2756      really have the freedom to do these conversions in clever and
2757      useful ways.
2758 
2759      AndrewC pointed out that users aren't complaining about how GDB
2760      casts integers to pointers; they are complaining that they can't
2761      take an address from a disassembly listing and give it to `x/i'.
2762      This is certainly important.
2763 
2764      Adding an architecture method like integer_to_address() certainly
2765      makes it possible for GDB to "get it right" in all circumstances
2766      --- the target has complete control over how things get done, so
2767      people can Do The Right Thing for their target without breaking
2768      anyone else.  The standard doesn't specify how integers get
2769      converted to pointers; usually, the ABI doesn't either, but
2770      ABI-specific code is a more reasonable place to handle it.  */
2771 
2772   if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2773       && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2774       && gdbarch_integer_to_address_p (gdbarch))
2775     return gdbarch_integer_to_address (gdbarch, value_type (val),
2776 				       value_contents (val));
2777 
2778   return unpack_long (value_type (val), value_contents (val));
2779 #endif
2780 }
2781 
2782 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2783    as a long, or as a double, assuming the raw data is described
2784    by type TYPE.  Knows how to convert different sizes of values
2785    and can convert between fixed and floating point.  We don't assume
2786    any alignment for the raw data.  Return value is in host byte order.
2787 
2788    If you want functions and arrays to be coerced to pointers, and
2789    references to be dereferenced, call value_as_long() instead.
2790 
2791    C++: It is assumed that the front-end has taken care of
2792    all matters concerning pointers to members.  A pointer
2793    to member which reaches here is considered to be equivalent
2794    to an INT (or some size).  After all, it is only an offset.  */
2795 
2796 LONGEST
2797 unpack_long (struct type *type, const gdb_byte *valaddr)
2798 {
2799   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2800   enum type_code code = TYPE_CODE (type);
2801   int len = TYPE_LENGTH (type);
2802   int nosign = TYPE_UNSIGNED (type);
2803 
2804   switch (code)
2805     {
2806     case TYPE_CODE_TYPEDEF:
2807       return unpack_long (check_typedef (type), valaddr);
2808     case TYPE_CODE_ENUM:
2809     case TYPE_CODE_FLAGS:
2810     case TYPE_CODE_BOOL:
2811     case TYPE_CODE_INT:
2812     case TYPE_CODE_CHAR:
2813     case TYPE_CODE_RANGE:
2814     case TYPE_CODE_MEMBERPTR:
2815       if (nosign)
2816 	return extract_unsigned_integer (valaddr, len, byte_order);
2817       else
2818 	return extract_signed_integer (valaddr, len, byte_order);
2819 
2820     case TYPE_CODE_FLT:
2821       return extract_typed_floating (valaddr, type);
2822 
2823     case TYPE_CODE_DECFLOAT:
2824       /* libdecnumber has a function to convert from decimal to integer, but
2825 	 it doesn't work when the decimal number has a fractional part.  */
2826       return decimal_to_doublest (valaddr, len, byte_order);
2827 
2828     case TYPE_CODE_PTR:
2829     case TYPE_CODE_REF:
2830       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2831          whether we want this to be true eventually.  */
2832       return extract_typed_address (valaddr, type);
2833 
2834     default:
2835       error (_("Value can't be converted to integer."));
2836     }
2837   return 0;			/* Placate lint.  */
2838 }
2839 
2840 /* Return a double value from the specified type and address.
2841    INVP points to an int which is set to 0 for valid value,
2842    1 for invalid value (bad float format).  In either case,
2843    the returned double is OK to use.  Argument is in target
2844    format, result is in host format.  */
2845 
2846 DOUBLEST
2847 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2848 {
2849   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2850   enum type_code code;
2851   int len;
2852   int nosign;
2853 
2854   *invp = 0;			/* Assume valid.  */
2855   CHECK_TYPEDEF (type);
2856   code = TYPE_CODE (type);
2857   len = TYPE_LENGTH (type);
2858   nosign = TYPE_UNSIGNED (type);
2859   if (code == TYPE_CODE_FLT)
2860     {
2861       /* NOTE: cagney/2002-02-19: There was a test here to see if the
2862 	 floating-point value was valid (using the macro
2863 	 INVALID_FLOAT).  That test/macro have been removed.
2864 
2865 	 It turns out that only the VAX defined this macro and then
2866 	 only in a non-portable way.  Fixing the portability problem
2867 	 wouldn't help since the VAX floating-point code is also badly
2868 	 bit-rotten.  The target needs to add definitions for the
2869 	 methods gdbarch_float_format and gdbarch_double_format - these
2870 	 exactly describe the target floating-point format.  The
2871 	 problem here is that the corresponding floatformat_vax_f and
2872 	 floatformat_vax_d values these methods should be set to are
2873 	 also not defined either.  Oops!
2874 
2875          Hopefully someone will add both the missing floatformat
2876          definitions and the new cases for floatformat_is_valid ().  */
2877 
2878       if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2879 	{
2880 	  *invp = 1;
2881 	  return 0.0;
2882 	}
2883 
2884       return extract_typed_floating (valaddr, type);
2885     }
2886   else if (code == TYPE_CODE_DECFLOAT)
2887     return decimal_to_doublest (valaddr, len, byte_order);
2888   else if (nosign)
2889     {
2890       /* Unsigned -- be sure we compensate for signed LONGEST.  */
2891       return (ULONGEST) unpack_long (type, valaddr);
2892     }
2893   else
2894     {
2895       /* Signed -- we are OK with unpack_long.  */
2896       return unpack_long (type, valaddr);
2897     }
2898 }
2899 
2900 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2901    as a CORE_ADDR, assuming the raw data is described by type TYPE.
2902    We don't assume any alignment for the raw data.  Return value is in
2903    host byte order.
2904 
2905    If you want functions and arrays to be coerced to pointers, and
2906    references to be dereferenced, call value_as_address() instead.
2907 
2908    C++: It is assumed that the front-end has taken care of
2909    all matters concerning pointers to members.  A pointer
2910    to member which reaches here is considered to be equivalent
2911    to an INT (or some size).  After all, it is only an offset.  */
2912 
2913 CORE_ADDR
2914 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2915 {
2916   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2917      whether we want this to be true eventually.  */
2918   return unpack_long (type, valaddr);
2919 }
2920 
2921 
2922 /* Get the value of the FIELDNO'th field (which must be static) of
2923    TYPE.  */
2924 
2925 struct value *
2926 value_static_field (struct type *type, int fieldno)
2927 {
2928   struct value *retval;
2929 
2930   switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2931     {
2932     case FIELD_LOC_KIND_PHYSADDR:
2933       retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2934 			      TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2935       break;
2936     case FIELD_LOC_KIND_PHYSNAME:
2937     {
2938       const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2939       /* TYPE_FIELD_NAME (type, fieldno); */
2940       struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2941 
2942       if (sym == NULL)
2943 	{
2944 	  /* With some compilers, e.g. HP aCC, static data members are
2945 	     reported as non-debuggable symbols.  */
2946 	  struct bound_minimal_symbol msym
2947 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
2948 
2949 	  if (!msym.minsym)
2950 	    return allocate_optimized_out_value (type);
2951 	  else
2952 	    {
2953 	      retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2954 				      BMSYMBOL_VALUE_ADDRESS (msym));
2955 	    }
2956 	}
2957       else
2958 	retval = value_of_variable (sym, NULL);
2959       break;
2960     }
2961     default:
2962       gdb_assert_not_reached ("unexpected field location kind");
2963     }
2964 
2965   return retval;
2966 }
2967 
2968 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2969    You have to be careful here, since the size of the data area for the value
2970    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
2971    than the old enclosing type, you have to allocate more space for the
2972    data.  */
2973 
2974 void
2975 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2976 {
2977   if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2978     val->contents =
2979       (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2980 
2981   val->enclosing_type = new_encl_type;
2982 }
2983 
2984 /* Given a value ARG1 (offset by OFFSET bytes)
2985    of a struct or union type ARG_TYPE,
2986    extract and return the value of one of its (non-static) fields.
2987    FIELDNO says which field.  */
2988 
2989 struct value *
2990 value_primitive_field (struct value *arg1, int offset,
2991 		       int fieldno, struct type *arg_type)
2992 {
2993   struct value *v;
2994   struct type *type;
2995 
2996   CHECK_TYPEDEF (arg_type);
2997   type = TYPE_FIELD_TYPE (arg_type, fieldno);
2998 
2999   /* Call check_typedef on our type to make sure that, if TYPE
3000      is a TYPE_CODE_TYPEDEF, its length is set to the length
3001      of the target type instead of zero.  However, we do not
3002      replace the typedef type by the target type, because we want
3003      to keep the typedef in order to be able to print the type
3004      description correctly.  */
3005   check_typedef (type);
3006 
3007   if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3008     {
3009       /* Handle packed fields.
3010 
3011 	 Create a new value for the bitfield, with bitpos and bitsize
3012 	 set.  If possible, arrange offset and bitpos so that we can
3013 	 do a single aligned read of the size of the containing type.
3014 	 Otherwise, adjust offset to the byte containing the first
3015 	 bit.  Assume that the address, offset, and embedded offset
3016 	 are sufficiently aligned.  */
3017 
3018       int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3019       int container_bitsize = TYPE_LENGTH (type) * 8;
3020 
3021       v = allocate_value_lazy (type);
3022       v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3023       if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3024 	  && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3025 	v->bitpos = bitpos % container_bitsize;
3026       else
3027 	v->bitpos = bitpos % 8;
3028       v->offset = (value_embedded_offset (arg1)
3029 		   + offset
3030 		   + (bitpos - v->bitpos) / 8);
3031       set_value_parent (v, arg1);
3032       if (!value_lazy (arg1))
3033 	value_fetch_lazy (v);
3034     }
3035   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3036     {
3037       /* This field is actually a base subobject, so preserve the
3038 	 entire object's contents for later references to virtual
3039 	 bases, etc.  */
3040       int boffset;
3041 
3042       /* Lazy register values with offsets are not supported.  */
3043       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3044 	value_fetch_lazy (arg1);
3045 
3046       /* We special case virtual inheritance here because this
3047 	 requires access to the contents, which we would rather avoid
3048 	 for references to ordinary fields of unavailable values.  */
3049       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3050 	boffset = baseclass_offset (arg_type, fieldno,
3051 				    value_contents (arg1),
3052 				    value_embedded_offset (arg1),
3053 				    value_address (arg1),
3054 				    arg1);
3055       else
3056 	boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3057 
3058       if (value_lazy (arg1))
3059 	v = allocate_value_lazy (value_enclosing_type (arg1));
3060       else
3061 	{
3062 	  v = allocate_value (value_enclosing_type (arg1));
3063 	  value_contents_copy_raw (v, 0, arg1, 0,
3064 				   TYPE_LENGTH (value_enclosing_type (arg1)));
3065 	}
3066       v->type = type;
3067       v->offset = value_offset (arg1);
3068       v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3069     }
3070   else
3071     {
3072       /* Plain old data member */
3073       offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3074 
3075       /* Lazy register values with offsets are not supported.  */
3076       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3077 	value_fetch_lazy (arg1);
3078 
3079       if (value_lazy (arg1))
3080 	v = allocate_value_lazy (type);
3081       else
3082 	{
3083 	  v = allocate_value (type);
3084 	  value_contents_copy_raw (v, value_embedded_offset (v),
3085 				   arg1, value_embedded_offset (arg1) + offset,
3086 				   TYPE_LENGTH (type));
3087 	}
3088       v->offset = (value_offset (arg1) + offset
3089 		   + value_embedded_offset (arg1));
3090     }
3091   set_value_component_location (v, arg1);
3092   VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3093   VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3094   return v;
3095 }
3096 
3097 /* Given a value ARG1 of a struct or union type,
3098    extract and return the value of one of its (non-static) fields.
3099    FIELDNO says which field.  */
3100 
3101 struct value *
3102 value_field (struct value *arg1, int fieldno)
3103 {
3104   return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3105 }
3106 
3107 /* Return a non-virtual function as a value.
3108    F is the list of member functions which contains the desired method.
3109    J is an index into F which provides the desired method.
3110 
3111    We only use the symbol for its address, so be happy with either a
3112    full symbol or a minimal symbol.  */
3113 
3114 struct value *
3115 value_fn_field (struct value **arg1p, struct fn_field *f,
3116 		int j, struct type *type,
3117 		int offset)
3118 {
3119   struct value *v;
3120   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3121   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3122   struct symbol *sym;
3123   struct bound_minimal_symbol msym;
3124 
3125   sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
3126   if (sym != NULL)
3127     {
3128       memset (&msym, 0, sizeof (msym));
3129     }
3130   else
3131     {
3132       gdb_assert (sym == NULL);
3133       msym = lookup_bound_minimal_symbol (physname);
3134       if (msym.minsym == NULL)
3135 	return NULL;
3136     }
3137 
3138   v = allocate_value (ftype);
3139   if (sym)
3140     {
3141       set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3142     }
3143   else
3144     {
3145       /* The minimal symbol might point to a function descriptor;
3146 	 resolve it to the actual code address instead.  */
3147       struct objfile *objfile = msym.objfile;
3148       struct gdbarch *gdbarch = get_objfile_arch (objfile);
3149 
3150       set_value_address (v,
3151 	gdbarch_convert_from_func_ptr_addr
3152 	   (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3153     }
3154 
3155   if (arg1p)
3156     {
3157       if (type != value_type (*arg1p))
3158 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3159 					value_addr (*arg1p)));
3160 
3161       /* Move the `this' pointer according to the offset.
3162          VALUE_OFFSET (*arg1p) += offset; */
3163     }
3164 
3165   return v;
3166 }
3167 
3168 
3169 
3170 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3171    VALADDR, and store the result in *RESULT.
3172    The bitfield starts at BITPOS bits and contains BITSIZE bits.
3173 
3174    Extracting bits depends on endianness of the machine.  Compute the
3175    number of least significant bits to discard.  For big endian machines,
3176    we compute the total number of bits in the anonymous object, subtract
3177    off the bit count from the MSB of the object to the MSB of the
3178    bitfield, then the size of the bitfield, which leaves the LSB discard
3179    count.  For little endian machines, the discard count is simply the
3180    number of bits from the LSB of the anonymous object to the LSB of the
3181    bitfield.
3182 
3183    If the field is signed, we also do sign extension.  */
3184 
3185 static LONGEST
3186 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3187 		     int bitpos, int bitsize)
3188 {
3189   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3190   ULONGEST val;
3191   ULONGEST valmask;
3192   int lsbcount;
3193   int bytes_read;
3194   int read_offset;
3195 
3196   /* Read the minimum number of bytes required; there may not be
3197      enough bytes to read an entire ULONGEST.  */
3198   CHECK_TYPEDEF (field_type);
3199   if (bitsize)
3200     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3201   else
3202     bytes_read = TYPE_LENGTH (field_type);
3203 
3204   read_offset = bitpos / 8;
3205 
3206   val = extract_unsigned_integer (valaddr + read_offset,
3207 				  bytes_read, byte_order);
3208 
3209   /* Extract bits.  See comment above.  */
3210 
3211   if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3212     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3213   else
3214     lsbcount = (bitpos % 8);
3215   val >>= lsbcount;
3216 
3217   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3218      If the field is signed, and is negative, then sign extend.  */
3219 
3220   if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3221     {
3222       valmask = (((ULONGEST) 1) << bitsize) - 1;
3223       val &= valmask;
3224       if (!TYPE_UNSIGNED (field_type))
3225 	{
3226 	  if (val & (valmask ^ (valmask >> 1)))
3227 	    {
3228 	      val |= ~valmask;
3229 	    }
3230 	}
3231     }
3232 
3233   return val;
3234 }
3235 
3236 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3237    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3238    ORIGINAL_VALUE, which must not be NULL.  See
3239    unpack_value_bits_as_long for more details.  */
3240 
3241 int
3242 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3243 			    int embedded_offset, int fieldno,
3244 			    const struct value *val, LONGEST *result)
3245 {
3246   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3247   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3248   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3249   int bit_offset;
3250 
3251   gdb_assert (val != NULL);
3252 
3253   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3254   if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3255       || !value_bits_available (val, bit_offset, bitsize))
3256     return 0;
3257 
3258   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3259 				 bitpos, bitsize);
3260   return 1;
3261 }
3262 
3263 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3264    object at VALADDR.  See unpack_bits_as_long for more details.  */
3265 
3266 LONGEST
3267 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3268 {
3269   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3270   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3271   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3272 
3273   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3274 }
3275 
3276 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3277    VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3278    the contents in DEST_VAL, zero or sign extending if the type of
3279    DEST_VAL is wider than BITSIZE.  VALADDR points to the contents of
3280    VAL.  If the VAL's contents required to extract the bitfield from
3281    are unavailable/optimized out, DEST_VAL is correspondingly
3282    marked unavailable/optimized out.  */
3283 
3284 void
3285 unpack_value_bitfield (struct value *dest_val,
3286 		       int bitpos, int bitsize,
3287 		       const gdb_byte *valaddr, int embedded_offset,
3288 		       const struct value *val)
3289 {
3290   enum bfd_endian byte_order;
3291   int src_bit_offset;
3292   int dst_bit_offset;
3293   LONGEST num;
3294   struct type *field_type = value_type (dest_val);
3295 
3296   /* First, unpack and sign extend the bitfield as if it was wholly
3297      available.  Invalid/unavailable bits are read as zero, but that's
3298      OK, as they'll end up marked below.  */
3299   byte_order = gdbarch_byte_order (get_type_arch (field_type));
3300   num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3301 			     bitpos, bitsize);
3302   store_signed_integer (value_contents_raw (dest_val),
3303 			TYPE_LENGTH (field_type), byte_order, num);
3304 
3305   /* Now copy the optimized out / unavailability ranges to the right
3306      bits.  */
3307   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3308   if (byte_order == BFD_ENDIAN_BIG)
3309     dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3310   else
3311     dst_bit_offset = 0;
3312   value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3313 			      val, src_bit_offset, bitsize);
3314 }
3315 
3316 /* Return a new value with type TYPE, which is FIELDNO field of the
3317    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3318    of VAL.  If the VAL's contents required to extract the bitfield
3319    from are unavailable/optimized out, the new value is
3320    correspondingly marked unavailable/optimized out.  */
3321 
3322 struct value *
3323 value_field_bitfield (struct type *type, int fieldno,
3324 		      const gdb_byte *valaddr,
3325 		      int embedded_offset, const struct value *val)
3326 {
3327   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3328   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3329   struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3330 
3331   unpack_value_bitfield (res_val, bitpos, bitsize,
3332 			 valaddr, embedded_offset, val);
3333 
3334   return res_val;
3335 }
3336 
3337 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3338    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3339    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3340    indicate which bits (in target bit order) comprise the bitfield.
3341    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3342    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3343 
3344 void
3345 modify_field (struct type *type, gdb_byte *addr,
3346 	      LONGEST fieldval, int bitpos, int bitsize)
3347 {
3348   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3349   ULONGEST oword;
3350   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3351   int bytesize;
3352 
3353   /* Normalize BITPOS.  */
3354   addr += bitpos / 8;
3355   bitpos %= 8;
3356 
3357   /* If a negative fieldval fits in the field in question, chop
3358      off the sign extension bits.  */
3359   if ((~fieldval & ~(mask >> 1)) == 0)
3360     fieldval &= mask;
3361 
3362   /* Warn if value is too big to fit in the field in question.  */
3363   if (0 != (fieldval & ~mask))
3364     {
3365       /* FIXME: would like to include fieldval in the message, but
3366          we don't have a sprintf_longest.  */
3367       warning (_("Value does not fit in %d bits."), bitsize);
3368 
3369       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3370       fieldval &= mask;
3371     }
3372 
3373   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3374      false valgrind reports.  */
3375 
3376   bytesize = (bitpos + bitsize + 7) / 8;
3377   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3378 
3379   /* Shifting for bit field depends on endianness of the target machine.  */
3380   if (gdbarch_bits_big_endian (get_type_arch (type)))
3381     bitpos = bytesize * 8 - bitpos - bitsize;
3382 
3383   oword &= ~(mask << bitpos);
3384   oword |= fieldval << bitpos;
3385 
3386   store_unsigned_integer (addr, bytesize, byte_order, oword);
3387 }
3388 
3389 /* Pack NUM into BUF using a target format of TYPE.  */
3390 
3391 void
3392 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3393 {
3394   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3395   int len;
3396 
3397   type = check_typedef (type);
3398   len = TYPE_LENGTH (type);
3399 
3400   switch (TYPE_CODE (type))
3401     {
3402     case TYPE_CODE_INT:
3403     case TYPE_CODE_CHAR:
3404     case TYPE_CODE_ENUM:
3405     case TYPE_CODE_FLAGS:
3406     case TYPE_CODE_BOOL:
3407     case TYPE_CODE_RANGE:
3408     case TYPE_CODE_MEMBERPTR:
3409       store_signed_integer (buf, len, byte_order, num);
3410       break;
3411 
3412     case TYPE_CODE_REF:
3413     case TYPE_CODE_PTR:
3414       store_typed_address (buf, type, (CORE_ADDR) num);
3415       break;
3416 
3417     default:
3418       error (_("Unexpected type (%d) encountered for integer constant."),
3419 	     TYPE_CODE (type));
3420     }
3421 }
3422 
3423 
3424 /* Pack NUM into BUF using a target format of TYPE.  */
3425 
3426 static void
3427 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3428 {
3429   int len;
3430   enum bfd_endian byte_order;
3431 
3432   type = check_typedef (type);
3433   len = TYPE_LENGTH (type);
3434   byte_order = gdbarch_byte_order (get_type_arch (type));
3435 
3436   switch (TYPE_CODE (type))
3437     {
3438     case TYPE_CODE_INT:
3439     case TYPE_CODE_CHAR:
3440     case TYPE_CODE_ENUM:
3441     case TYPE_CODE_FLAGS:
3442     case TYPE_CODE_BOOL:
3443     case TYPE_CODE_RANGE:
3444     case TYPE_CODE_MEMBERPTR:
3445       store_unsigned_integer (buf, len, byte_order, num);
3446       break;
3447 
3448     case TYPE_CODE_REF:
3449     case TYPE_CODE_PTR:
3450       store_typed_address (buf, type, (CORE_ADDR) num);
3451       break;
3452 
3453     default:
3454       error (_("Unexpected type (%d) encountered "
3455 	       "for unsigned integer constant."),
3456 	     TYPE_CODE (type));
3457     }
3458 }
3459 
3460 
3461 /* Convert C numbers into newly allocated values.  */
3462 
3463 struct value *
3464 value_from_longest (struct type *type, LONGEST num)
3465 {
3466   struct value *val = allocate_value (type);
3467 
3468   pack_long (value_contents_raw (val), type, num);
3469   return val;
3470 }
3471 
3472 
3473 /* Convert C unsigned numbers into newly allocated values.  */
3474 
3475 struct value *
3476 value_from_ulongest (struct type *type, ULONGEST num)
3477 {
3478   struct value *val = allocate_value (type);
3479 
3480   pack_unsigned_long (value_contents_raw (val), type, num);
3481 
3482   return val;
3483 }
3484 
3485 
3486 /* Create a value representing a pointer of type TYPE to the address
3487    ADDR.  */
3488 
3489 struct value *
3490 value_from_pointer (struct type *type, CORE_ADDR addr)
3491 {
3492   struct value *val = allocate_value (type);
3493 
3494   store_typed_address (value_contents_raw (val),
3495 		       check_typedef (type), addr);
3496   return val;
3497 }
3498 
3499 
3500 /* Create a value of type TYPE whose contents come from VALADDR, if it
3501    is non-null, and whose memory address (in the inferior) is
3502    ADDRESS.  The type of the created value may differ from the passed
3503    type TYPE.  Make sure to retrieve values new type after this call.
3504    Note that TYPE is not passed through resolve_dynamic_type; this is
3505    a special API intended for use only by Ada.  */
3506 
3507 struct value *
3508 value_from_contents_and_address_unresolved (struct type *type,
3509 					    const gdb_byte *valaddr,
3510 					    CORE_ADDR address)
3511 {
3512   struct value *v;
3513 
3514   if (valaddr == NULL)
3515     v = allocate_value_lazy (type);
3516   else
3517     v = value_from_contents (type, valaddr);
3518   set_value_address (v, address);
3519   VALUE_LVAL (v) = lval_memory;
3520   return v;
3521 }
3522 
3523 /* Create a value of type TYPE whose contents come from VALADDR, if it
3524    is non-null, and whose memory address (in the inferior) is
3525    ADDRESS.  The type of the created value may differ from the passed
3526    type TYPE.  Make sure to retrieve values new type after this call.  */
3527 
3528 struct value *
3529 value_from_contents_and_address (struct type *type,
3530 				 const gdb_byte *valaddr,
3531 				 CORE_ADDR address)
3532 {
3533   struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3534   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3535   struct value *v;
3536 
3537   if (valaddr == NULL)
3538     v = allocate_value_lazy (resolved_type);
3539   else
3540     v = value_from_contents (resolved_type, valaddr);
3541   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3542       && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3543     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3544   set_value_address (v, address);
3545   VALUE_LVAL (v) = lval_memory;
3546   return v;
3547 }
3548 
3549 /* Create a value of type TYPE holding the contents CONTENTS.
3550    The new value is `not_lval'.  */
3551 
3552 struct value *
3553 value_from_contents (struct type *type, const gdb_byte *contents)
3554 {
3555   struct value *result;
3556 
3557   result = allocate_value (type);
3558   memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3559   return result;
3560 }
3561 
3562 struct value *
3563 value_from_double (struct type *type, DOUBLEST num)
3564 {
3565   struct value *val = allocate_value (type);
3566   struct type *base_type = check_typedef (type);
3567   enum type_code code = TYPE_CODE (base_type);
3568 
3569   if (code == TYPE_CODE_FLT)
3570     {
3571       store_typed_floating (value_contents_raw (val), base_type, num);
3572     }
3573   else
3574     error (_("Unexpected type encountered for floating constant."));
3575 
3576   return val;
3577 }
3578 
3579 struct value *
3580 value_from_decfloat (struct type *type, const gdb_byte *dec)
3581 {
3582   struct value *val = allocate_value (type);
3583 
3584   memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3585   return val;
3586 }
3587 
3588 /* Extract a value from the history file.  Input will be of the form
3589    $digits or $$digits.  See block comment above 'write_dollar_variable'
3590    for details.  */
3591 
3592 struct value *
3593 value_from_history_ref (const char *h, const char **endp)
3594 {
3595   int index, len;
3596 
3597   if (h[0] == '$')
3598     len = 1;
3599   else
3600     return NULL;
3601 
3602   if (h[1] == '$')
3603     len = 2;
3604 
3605   /* Find length of numeral string.  */
3606   for (; isdigit (h[len]); len++)
3607     ;
3608 
3609   /* Make sure numeral string is not part of an identifier.  */
3610   if (h[len] == '_' || isalpha (h[len]))
3611     return NULL;
3612 
3613   /* Now collect the index value.  */
3614   if (h[1] == '$')
3615     {
3616       if (len == 2)
3617 	{
3618 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3619 	     rather than to "$$0" as it ought to be!  */
3620 	  index = -1;
3621 	  *endp += len;
3622 	}
3623       else
3624 	{
3625 	  char *local_end;
3626 
3627 	  index = -strtol (&h[2], &local_end, 10);
3628 	  *endp = local_end;
3629 	}
3630     }
3631   else
3632     {
3633       if (len == 1)
3634 	{
3635 	  /* "$" is equivalent to "$0".  */
3636 	  index = 0;
3637 	  *endp += len;
3638 	}
3639       else
3640 	{
3641 	  char *local_end;
3642 
3643 	  index = strtol (&h[1], &local_end, 10);
3644 	  *endp = local_end;
3645 	}
3646     }
3647 
3648   return access_value_history (index);
3649 }
3650 
3651 struct value *
3652 coerce_ref_if_computed (const struct value *arg)
3653 {
3654   const struct lval_funcs *funcs;
3655 
3656   if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3657     return NULL;
3658 
3659   if (value_lval_const (arg) != lval_computed)
3660     return NULL;
3661 
3662   funcs = value_computed_funcs (arg);
3663   if (funcs->coerce_ref == NULL)
3664     return NULL;
3665 
3666   return funcs->coerce_ref (arg);
3667 }
3668 
3669 /* Look at value.h for description.  */
3670 
3671 struct value *
3672 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3673 			      struct type *original_type,
3674 			      struct value *original_value)
3675 {
3676   /* Re-adjust type.  */
3677   deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3678 
3679   /* Add embedding info.  */
3680   set_value_enclosing_type (value, enc_type);
3681   set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3682 
3683   /* We may be pointing to an object of some derived type.  */
3684   return value_full_object (value, NULL, 0, 0, 0);
3685 }
3686 
3687 struct value *
3688 coerce_ref (struct value *arg)
3689 {
3690   struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3691   struct value *retval;
3692   struct type *enc_type;
3693 
3694   retval = coerce_ref_if_computed (arg);
3695   if (retval)
3696     return retval;
3697 
3698   if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3699     return arg;
3700 
3701   enc_type = check_typedef (value_enclosing_type (arg));
3702   enc_type = TYPE_TARGET_TYPE (enc_type);
3703 
3704   retval = value_at_lazy (enc_type,
3705                           unpack_pointer (value_type (arg),
3706                                           value_contents (arg)));
3707   enc_type = value_type (retval);
3708   return readjust_indirect_value_type (retval, enc_type,
3709                                        value_type_arg_tmp, arg);
3710 }
3711 
3712 struct value *
3713 coerce_array (struct value *arg)
3714 {
3715   struct type *type;
3716 
3717   arg = coerce_ref (arg);
3718   type = check_typedef (value_type (arg));
3719 
3720   switch (TYPE_CODE (type))
3721     {
3722     case TYPE_CODE_ARRAY:
3723       if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3724 	arg = value_coerce_array (arg);
3725       break;
3726     case TYPE_CODE_FUNC:
3727       arg = value_coerce_function (arg);
3728       break;
3729     }
3730   return arg;
3731 }
3732 
3733 
3734 /* Return the return value convention that will be used for the
3735    specified type.  */
3736 
3737 enum return_value_convention
3738 struct_return_convention (struct gdbarch *gdbarch,
3739 			  struct value *function, struct type *value_type)
3740 {
3741   enum type_code code = TYPE_CODE (value_type);
3742 
3743   if (code == TYPE_CODE_ERROR)
3744     error (_("Function return type unknown."));
3745 
3746   /* Probe the architecture for the return-value convention.  */
3747   return gdbarch_return_value (gdbarch, function, value_type,
3748 			       NULL, NULL, NULL);
3749 }
3750 
3751 /* Return true if the function returning the specified type is using
3752    the convention of returning structures in memory (passing in the
3753    address as a hidden first parameter).  */
3754 
3755 int
3756 using_struct_return (struct gdbarch *gdbarch,
3757 		     struct value *function, struct type *value_type)
3758 {
3759   if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3760     /* A void return value is never in memory.  See also corresponding
3761        code in "print_return_value".  */
3762     return 0;
3763 
3764   return (struct_return_convention (gdbarch, function, value_type)
3765 	  != RETURN_VALUE_REGISTER_CONVENTION);
3766 }
3767 
3768 /* Set the initialized field in a value struct.  */
3769 
3770 void
3771 set_value_initialized (struct value *val, int status)
3772 {
3773   val->initialized = status;
3774 }
3775 
3776 /* Return the initialized field in a value struct.  */
3777 
3778 int
3779 value_initialized (struct value *val)
3780 {
3781   return val->initialized;
3782 }
3783 
3784 /* Load the actual content of a lazy value.  Fetch the data from the
3785    user's process and clear the lazy flag to indicate that the data in
3786    the buffer is valid.
3787 
3788    If the value is zero-length, we avoid calling read_memory, which
3789    would abort.  We mark the value as fetched anyway -- all 0 bytes of
3790    it.  */
3791 
3792 void
3793 value_fetch_lazy (struct value *val)
3794 {
3795   gdb_assert (value_lazy (val));
3796   allocate_value_contents (val);
3797   /* A value is either lazy, or fully fetched.  The
3798      availability/validity is only established as we try to fetch a
3799      value.  */
3800   gdb_assert (VEC_empty (range_s, val->optimized_out));
3801   gdb_assert (VEC_empty (range_s, val->unavailable));
3802   if (value_bitsize (val))
3803     {
3804       /* To read a lazy bitfield, read the entire enclosing value.  This
3805 	 prevents reading the same block of (possibly volatile) memory once
3806          per bitfield.  It would be even better to read only the containing
3807          word, but we have no way to record that just specific bits of a
3808          value have been fetched.  */
3809       struct type *type = check_typedef (value_type (val));
3810       struct value *parent = value_parent (val);
3811 
3812       if (value_lazy (parent))
3813 	value_fetch_lazy (parent);
3814 
3815       unpack_value_bitfield (val,
3816 			     value_bitpos (val), value_bitsize (val),
3817 			     value_contents_for_printing (parent),
3818 			     value_offset (val), parent);
3819     }
3820   else if (VALUE_LVAL (val) == lval_memory)
3821     {
3822       CORE_ADDR addr = value_address (val);
3823       struct type *type = check_typedef (value_enclosing_type (val));
3824 
3825       if (TYPE_LENGTH (type))
3826 	read_value_memory (val, 0, value_stack (val),
3827 			   addr, value_contents_all_raw (val),
3828 			   TYPE_LENGTH (type));
3829     }
3830   else if (VALUE_LVAL (val) == lval_register)
3831     {
3832       struct frame_info *frame;
3833       int regnum;
3834       struct type *type = check_typedef (value_type (val));
3835       struct value *new_val = val, *mark = value_mark ();
3836 
3837       /* Offsets are not supported here; lazy register values must
3838 	 refer to the entire register.  */
3839       gdb_assert (value_offset (val) == 0);
3840 
3841       while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3842 	{
3843 	  struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3844 
3845 	  frame = frame_find_by_id (frame_id);
3846 	  regnum = VALUE_REGNUM (new_val);
3847 
3848 	  gdb_assert (frame != NULL);
3849 
3850 	  /* Convertible register routines are used for multi-register
3851 	     values and for interpretation in different types
3852 	     (e.g. float or int from a double register).  Lazy
3853 	     register values should have the register's natural type,
3854 	     so they do not apply.  */
3855 	  gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3856 						   regnum, type));
3857 
3858 	  new_val = get_frame_register_value (frame, regnum);
3859 
3860 	  /* If we get another lazy lval_register value, it means the
3861 	     register is found by reading it from the next frame.
3862 	     get_frame_register_value should never return a value with
3863 	     the frame id pointing to FRAME.  If it does, it means we
3864 	     either have two consecutive frames with the same frame id
3865 	     in the frame chain, or some code is trying to unwind
3866 	     behind get_prev_frame's back (e.g., a frame unwind
3867 	     sniffer trying to unwind), bypassing its validations.  In
3868 	     any case, it should always be an internal error to end up
3869 	     in this situation.  */
3870 	  if (VALUE_LVAL (new_val) == lval_register
3871 	      && value_lazy (new_val)
3872 	      && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3873 	    internal_error (__FILE__, __LINE__,
3874 			    _("infinite loop while fetching a register"));
3875 	}
3876 
3877       /* If it's still lazy (for instance, a saved register on the
3878 	 stack), fetch it.  */
3879       if (value_lazy (new_val))
3880 	value_fetch_lazy (new_val);
3881 
3882       /* Copy the contents and the unavailability/optimized-out
3883 	 meta-data from NEW_VAL to VAL.  */
3884       set_value_lazy (val, 0);
3885       value_contents_copy (val, value_embedded_offset (val),
3886 			   new_val, value_embedded_offset (new_val),
3887 			   TYPE_LENGTH (type));
3888 
3889       if (frame_debug)
3890 	{
3891 	  struct gdbarch *gdbarch;
3892 	  frame = frame_find_by_id (VALUE_FRAME_ID (val));
3893 	  regnum = VALUE_REGNUM (val);
3894 	  gdbarch = get_frame_arch (frame);
3895 
3896 	  fprintf_unfiltered (gdb_stdlog,
3897 			      "{ value_fetch_lazy "
3898 			      "(frame=%d,regnum=%d(%s),...) ",
3899 			      frame_relative_level (frame), regnum,
3900 			      user_reg_map_regnum_to_name (gdbarch, regnum));
3901 
3902 	  fprintf_unfiltered (gdb_stdlog, "->");
3903 	  if (value_optimized_out (new_val))
3904 	    {
3905 	      fprintf_unfiltered (gdb_stdlog, " ");
3906 	      val_print_optimized_out (new_val, gdb_stdlog);
3907 	    }
3908 	  else
3909 	    {
3910 	      int i;
3911 	      const gdb_byte *buf = value_contents (new_val);
3912 
3913 	      if (VALUE_LVAL (new_val) == lval_register)
3914 		fprintf_unfiltered (gdb_stdlog, " register=%d",
3915 				    VALUE_REGNUM (new_val));
3916 	      else if (VALUE_LVAL (new_val) == lval_memory)
3917 		fprintf_unfiltered (gdb_stdlog, " address=%s",
3918 				    paddress (gdbarch,
3919 					      value_address (new_val)));
3920 	      else
3921 		fprintf_unfiltered (gdb_stdlog, " computed");
3922 
3923 	      fprintf_unfiltered (gdb_stdlog, " bytes=");
3924 	      fprintf_unfiltered (gdb_stdlog, "[");
3925 	      for (i = 0; i < register_size (gdbarch, regnum); i++)
3926 		fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3927 	      fprintf_unfiltered (gdb_stdlog, "]");
3928 	    }
3929 
3930 	  fprintf_unfiltered (gdb_stdlog, " }\n");
3931 	}
3932 
3933       /* Dispose of the intermediate values.  This prevents
3934 	 watchpoints from trying to watch the saved frame pointer.  */
3935       value_free_to_mark (mark);
3936     }
3937   else if (VALUE_LVAL (val) == lval_computed
3938 	   && value_computed_funcs (val)->read != NULL)
3939     value_computed_funcs (val)->read (val);
3940   else
3941     internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3942 
3943   set_value_lazy (val, 0);
3944 }
3945 
3946 /* Implementation of the convenience function $_isvoid.  */
3947 
3948 static struct value *
3949 isvoid_internal_fn (struct gdbarch *gdbarch,
3950 		    const struct language_defn *language,
3951 		    void *cookie, int argc, struct value **argv)
3952 {
3953   int ret;
3954 
3955   if (argc != 1)
3956     error (_("You must provide one argument for $_isvoid."));
3957 
3958   ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3959 
3960   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3961 }
3962 
3963 void
3964 _initialize_values (void)
3965 {
3966   add_cmd ("convenience", no_class, show_convenience, _("\
3967 Debugger convenience (\"$foo\") variables and functions.\n\
3968 Convenience variables are created when you assign them values;\n\
3969 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
3970 \n\
3971 A few convenience variables are given values automatically:\n\
3972 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3973 \"$__\" holds the contents of the last address examined with \"x\"."
3974 #ifdef HAVE_PYTHON
3975 "\n\n\
3976 Convenience functions are defined via the Python API."
3977 #endif
3978 	   ), &showlist);
3979   add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3980 
3981   add_cmd ("values", no_set_class, show_values, _("\
3982 Elements of value history around item number IDX (or last ten)."),
3983 	   &showlist);
3984 
3985   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3986 Initialize a convenience variable if necessary.\n\
3987 init-if-undefined VARIABLE = EXPRESSION\n\
3988 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3989 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
3990 VARIABLE is already initialized."));
3991 
3992   add_prefix_cmd ("function", no_class, function_command, _("\
3993 Placeholder command for showing help on convenience functions."),
3994 		  &functionlist, "function ", 0, &cmdlist);
3995 
3996   add_internal_function ("_isvoid", _("\
3997 Check whether an expression is void.\n\
3998 Usage: $_isvoid (expression)\n\
3999 Return 1 if the expression is void, zero otherwise."),
4000 			 isvoid_internal_fn, NULL);
4001 }
4002