xref: /netbsd-src/external/gpl3/gdb/dist/gdb/value.c (revision 63aea4bd5b445e491ff0389fe27ec78b3099dba3)
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2 
3    Copyright (C) 1986-2015 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 
44 /* Prototypes for exported functions.  */
45 
46 void _initialize_values (void);
47 
48 /* Definition of a user function.  */
49 struct internal_function
50 {
51   /* The name of the function.  It is a bit odd to have this in the
52      function itself -- the user might use a differently-named
53      convenience variable to hold the function.  */
54   char *name;
55 
56   /* The handler.  */
57   internal_function_fn handler;
58 
59   /* User data for the handler.  */
60   void *cookie;
61 };
62 
63 /* Defines an [OFFSET, OFFSET + LENGTH) range.  */
64 
65 struct range
66 {
67   /* Lowest offset in the range.  */
68   int offset;
69 
70   /* Length of the range.  */
71   int length;
72 };
73 
74 typedef struct range range_s;
75 
76 DEF_VEC_O(range_s);
77 
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79    [offset2, offset2+len2) overlap.  */
80 
81 static int
82 ranges_overlap (int offset1, int len1,
83 		int offset2, int len2)
84 {
85   ULONGEST h, l;
86 
87   l = max (offset1, offset2);
88   h = min (offset1 + len1, offset2 + len2);
89   return (l < h);
90 }
91 
92 /* Returns true if the first argument is strictly less than the
93    second, useful for VEC_lower_bound.  We keep ranges sorted by
94    offset and coalesce overlapping and contiguous ranges, so this just
95    compares the starting offset.  */
96 
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100   return r1->offset < r2->offset;
101 }
102 
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104    OFFSET+LENGTH).  */
105 
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109   range_s what;
110   int i;
111 
112   what.offset = offset;
113   what.length = length;
114 
115   /* We keep ranges sorted by offset and coalesce overlapping and
116      contiguous ranges, so to check if a range list contains a given
117      range, we can do a binary search for the position the given range
118      would be inserted if we only considered the starting OFFSET of
119      ranges.  We call that position I.  Since we also have LENGTH to
120      care for (this is a range afterall), we need to check if the
121      _previous_ range overlaps the I range.  E.g.,
122 
123          R
124          |---|
125        |---|    |---|  |------| ... |--|
126        0        1      2            N
127 
128        I=1
129 
130      In the case above, the binary search would return `I=1', meaning,
131      this OFFSET should be inserted at position 1, and the current
132      position 1 should be pushed further (and before 2).  But, `0'
133      overlaps with R.
134 
135      Then we need to check if the I range overlaps the I range itself.
136      E.g.,
137 
138               R
139               |---|
140        |---|    |---|  |-------| ... |--|
141        0        1      2             N
142 
143        I=1
144   */
145 
146   i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147 
148   if (i > 0)
149     {
150       struct range *bef = VEC_index (range_s, ranges, i - 1);
151 
152       if (ranges_overlap (bef->offset, bef->length, offset, length))
153 	return 1;
154     }
155 
156   if (i < VEC_length (range_s, ranges))
157     {
158       struct range *r = VEC_index (range_s, ranges, i);
159 
160       if (ranges_overlap (r->offset, r->length, offset, length))
161 	return 1;
162     }
163 
164   return 0;
165 }
166 
167 static struct cmd_list_element *functionlist;
168 
169 /* Note that the fields in this structure are arranged to save a bit
170    of memory.  */
171 
172 struct value
173 {
174   /* Type of value; either not an lval, or one of the various
175      different possible kinds of lval.  */
176   enum lval_type lval;
177 
178   /* Is it modifiable?  Only relevant if lval != not_lval.  */
179   unsigned int modifiable : 1;
180 
181   /* If zero, contents of this value are in the contents field.  If
182      nonzero, contents are in inferior.  If the lval field is lval_memory,
183      the contents are in inferior memory at location.address plus offset.
184      The lval field may also be lval_register.
185 
186      WARNING: This field is used by the code which handles watchpoints
187      (see breakpoint.c) to decide whether a particular value can be
188      watched by hardware watchpoints.  If the lazy flag is set for
189      some member of a value chain, it is assumed that this member of
190      the chain doesn't need to be watched as part of watching the
191      value itself.  This is how GDB avoids watching the entire struct
192      or array when the user wants to watch a single struct member or
193      array element.  If you ever change the way lazy flag is set and
194      reset, be sure to consider this use as well!  */
195   unsigned int lazy : 1;
196 
197   /* If value is a variable, is it initialized or not.  */
198   unsigned int initialized : 1;
199 
200   /* If value is from the stack.  If this is set, read_stack will be
201      used instead of read_memory to enable extra caching.  */
202   unsigned int stack : 1;
203 
204   /* If the value has been released.  */
205   unsigned int released : 1;
206 
207   /* Register number if the value is from a register.  */
208   short regnum;
209 
210   /* Location of value (if lval).  */
211   union
212   {
213     /* If lval == lval_memory, this is the address in the inferior.
214        If lval == lval_register, this is the byte offset into the
215        registers structure.  */
216     CORE_ADDR address;
217 
218     /* Pointer to internal variable.  */
219     struct internalvar *internalvar;
220 
221     /* Pointer to xmethod worker.  */
222     struct xmethod_worker *xm_worker;
223 
224     /* If lval == lval_computed, this is a set of function pointers
225        to use to access and describe the value, and a closure pointer
226        for them to use.  */
227     struct
228     {
229       /* Functions to call.  */
230       const struct lval_funcs *funcs;
231 
232       /* Closure for those functions to use.  */
233       void *closure;
234     } computed;
235   } location;
236 
237   /* Describes offset of a value within lval of a structure in bytes.
238      If lval == lval_memory, this is an offset to the address.  If
239      lval == lval_register, this is a further offset from
240      location.address within the registers structure.  Note also the
241      member embedded_offset below.  */
242   int offset;
243 
244   /* Only used for bitfields; number of bits contained in them.  */
245   int bitsize;
246 
247   /* Only used for bitfields; position of start of field.  For
248      gdbarch_bits_big_endian=0 targets, it is the position of the LSB.  For
249      gdbarch_bits_big_endian=1 targets, it is the position of the MSB.  */
250   int bitpos;
251 
252   /* The number of references to this value.  When a value is created,
253      the value chain holds a reference, so REFERENCE_COUNT is 1.  If
254      release_value is called, this value is removed from the chain but
255      the caller of release_value now has a reference to this value.
256      The caller must arrange for a call to value_free later.  */
257   int reference_count;
258 
259   /* Only used for bitfields; the containing value.  This allows a
260      single read from the target when displaying multiple
261      bitfields.  */
262   struct value *parent;
263 
264   /* Frame register value is relative to.  This will be described in
265      the lval enum above as "lval_register".  */
266   struct frame_id frame_id;
267 
268   /* Type of the value.  */
269   struct type *type;
270 
271   /* If a value represents a C++ object, then the `type' field gives
272      the object's compile-time type.  If the object actually belongs
273      to some class derived from `type', perhaps with other base
274      classes and additional members, then `type' is just a subobject
275      of the real thing, and the full object is probably larger than
276      `type' would suggest.
277 
278      If `type' is a dynamic class (i.e. one with a vtable), then GDB
279      can actually determine the object's run-time type by looking at
280      the run-time type information in the vtable.  When this
281      information is available, we may elect to read in the entire
282      object, for several reasons:
283 
284      - When printing the value, the user would probably rather see the
285      full object, not just the limited portion apparent from the
286      compile-time type.
287 
288      - If `type' has virtual base classes, then even printing `type'
289      alone may require reaching outside the `type' portion of the
290      object to wherever the virtual base class has been stored.
291 
292      When we store the entire object, `enclosing_type' is the run-time
293      type -- the complete object -- and `embedded_offset' is the
294      offset of `type' within that larger type, in bytes.  The
295      value_contents() macro takes `embedded_offset' into account, so
296      most GDB code continues to see the `type' portion of the value,
297      just as the inferior would.
298 
299      If `type' is a pointer to an object, then `enclosing_type' is a
300      pointer to the object's run-time type, and `pointed_to_offset' is
301      the offset in bytes from the full object to the pointed-to object
302      -- that is, the value `embedded_offset' would have if we followed
303      the pointer and fetched the complete object.  (I don't really see
304      the point.  Why not just determine the run-time type when you
305      indirect, and avoid the special case?  The contents don't matter
306      until you indirect anyway.)
307 
308      If we're not doing anything fancy, `enclosing_type' is equal to
309      `type', and `embedded_offset' is zero, so everything works
310      normally.  */
311   struct type *enclosing_type;
312   int embedded_offset;
313   int pointed_to_offset;
314 
315   /* Values are stored in a chain, so that they can be deleted easily
316      over calls to the inferior.  Values assigned to internal
317      variables, put into the value history or exposed to Python are
318      taken off this list.  */
319   struct value *next;
320 
321   /* Actual contents of the value.  Target byte-order.  NULL or not
322      valid if lazy is nonzero.  */
323   gdb_byte *contents;
324 
325   /* Unavailable ranges in CONTENTS.  We mark unavailable ranges,
326      rather than available, since the common and default case is for a
327      value to be available.  This is filled in at value read time.
328      The unavailable ranges are tracked in bits.  Note that a contents
329      bit that has been optimized out doesn't really exist in the
330      program, so it can't be marked unavailable either.  */
331   VEC(range_s) *unavailable;
332 
333   /* Likewise, but for optimized out contents (a chunk of the value of
334      a variable that does not actually exist in the program).  If LVAL
335      is lval_register, this is a register ($pc, $sp, etc., never a
336      program variable) that has not been saved in the frame.  Not
337      saved registers and optimized-out program variables values are
338      treated pretty much the same, except not-saved registers have a
339      different string representation and related error strings.  */
340   VEC(range_s) *optimized_out;
341 };
342 
343 int
344 value_bits_available (const struct value *value, int offset, int length)
345 {
346   gdb_assert (!value->lazy);
347 
348   return !ranges_contain (value->unavailable, offset, length);
349 }
350 
351 int
352 value_bytes_available (const struct value *value, int offset, int length)
353 {
354   return value_bits_available (value,
355 			       offset * TARGET_CHAR_BIT,
356 			       length * TARGET_CHAR_BIT);
357 }
358 
359 int
360 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
361 {
362   gdb_assert (!value->lazy);
363 
364   return ranges_contain (value->optimized_out, bit_offset, bit_length);
365 }
366 
367 int
368 value_entirely_available (struct value *value)
369 {
370   /* We can only tell whether the whole value is available when we try
371      to read it.  */
372   if (value->lazy)
373     value_fetch_lazy (value);
374 
375   if (VEC_empty (range_s, value->unavailable))
376     return 1;
377   return 0;
378 }
379 
380 /* Returns true if VALUE is entirely covered by RANGES.  If the value
381    is lazy, it'll be read now.  Note that RANGE is a pointer to
382    pointer because reading the value might change *RANGE.  */
383 
384 static int
385 value_entirely_covered_by_range_vector (struct value *value,
386 					VEC(range_s) **ranges)
387 {
388   /* We can only tell whether the whole value is optimized out /
389      unavailable when we try to read it.  */
390   if (value->lazy)
391     value_fetch_lazy (value);
392 
393   if (VEC_length (range_s, *ranges) == 1)
394     {
395       struct range *t = VEC_index (range_s, *ranges, 0);
396 
397       if (t->offset == 0
398 	  && t->length == (TARGET_CHAR_BIT
399 			   * TYPE_LENGTH (value_enclosing_type (value))))
400 	return 1;
401     }
402 
403   return 0;
404 }
405 
406 int
407 value_entirely_unavailable (struct value *value)
408 {
409   return value_entirely_covered_by_range_vector (value, &value->unavailable);
410 }
411 
412 int
413 value_entirely_optimized_out (struct value *value)
414 {
415   return value_entirely_covered_by_range_vector (value, &value->optimized_out);
416 }
417 
418 /* Insert into the vector pointed to by VECTORP the bit range starting of
419    OFFSET bits, and extending for the next LENGTH bits.  */
420 
421 static void
422 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
423 {
424   range_s newr;
425   int i;
426 
427   /* Insert the range sorted.  If there's overlap or the new range
428      would be contiguous with an existing range, merge.  */
429 
430   newr.offset = offset;
431   newr.length = length;
432 
433   /* Do a binary search for the position the given range would be
434      inserted if we only considered the starting OFFSET of ranges.
435      Call that position I.  Since we also have LENGTH to care for
436      (this is a range afterall), we need to check if the _previous_
437      range overlaps the I range.  E.g., calling R the new range:
438 
439        #1 - overlaps with previous
440 
441 	   R
442 	   |-...-|
443 	 |---|     |---|  |------| ... |--|
444 	 0         1      2            N
445 
446 	 I=1
447 
448      In the case #1 above, the binary search would return `I=1',
449      meaning, this OFFSET should be inserted at position 1, and the
450      current position 1 should be pushed further (and become 2).  But,
451      note that `0' overlaps with R, so we want to merge them.
452 
453      A similar consideration needs to be taken if the new range would
454      be contiguous with the previous range:
455 
456        #2 - contiguous with previous
457 
458 	    R
459 	    |-...-|
460 	 |--|       |---|  |------| ... |--|
461 	 0          1      2            N
462 
463 	 I=1
464 
465      If there's no overlap with the previous range, as in:
466 
467        #3 - not overlapping and not contiguous
468 
469 	       R
470 	       |-...-|
471 	  |--|         |---|  |------| ... |--|
472 	  0            1      2            N
473 
474 	 I=1
475 
476      or if I is 0:
477 
478        #4 - R is the range with lowest offset
479 
480 	  R
481 	 |-...-|
482 	         |--|       |---|  |------| ... |--|
483 	         0          1      2            N
484 
485 	 I=0
486 
487      ... we just push the new range to I.
488 
489      All the 4 cases above need to consider that the new range may
490      also overlap several of the ranges that follow, or that R may be
491      contiguous with the following range, and merge.  E.g.,
492 
493        #5 - overlapping following ranges
494 
495 	  R
496 	 |------------------------|
497 	         |--|       |---|  |------| ... |--|
498 	         0          1      2            N
499 
500 	 I=0
501 
502        or:
503 
504 	    R
505 	    |-------|
506 	 |--|       |---|  |------| ... |--|
507 	 0          1      2            N
508 
509 	 I=1
510 
511   */
512 
513   i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
514   if (i > 0)
515     {
516       struct range *bef = VEC_index (range_s, *vectorp, i - 1);
517 
518       if (ranges_overlap (bef->offset, bef->length, offset, length))
519 	{
520 	  /* #1 */
521 	  ULONGEST l = min (bef->offset, offset);
522 	  ULONGEST h = max (bef->offset + bef->length, offset + length);
523 
524 	  bef->offset = l;
525 	  bef->length = h - l;
526 	  i--;
527 	}
528       else if (offset == bef->offset + bef->length)
529 	{
530 	  /* #2 */
531 	  bef->length += length;
532 	  i--;
533 	}
534       else
535 	{
536 	  /* #3 */
537 	  VEC_safe_insert (range_s, *vectorp, i, &newr);
538 	}
539     }
540   else
541     {
542       /* #4 */
543       VEC_safe_insert (range_s, *vectorp, i, &newr);
544     }
545 
546   /* Check whether the ranges following the one we've just added or
547      touched can be folded in (#5 above).  */
548   if (i + 1 < VEC_length (range_s, *vectorp))
549     {
550       struct range *t;
551       struct range *r;
552       int removed = 0;
553       int next = i + 1;
554 
555       /* Get the range we just touched.  */
556       t = VEC_index (range_s, *vectorp, i);
557       removed = 0;
558 
559       i = next;
560       for (; VEC_iterate (range_s, *vectorp, i, r); i++)
561 	if (r->offset <= t->offset + t->length)
562 	  {
563 	    ULONGEST l, h;
564 
565 	    l = min (t->offset, r->offset);
566 	    h = max (t->offset + t->length, r->offset + r->length);
567 
568 	    t->offset = l;
569 	    t->length = h - l;
570 
571 	    removed++;
572 	  }
573 	else
574 	  {
575 	    /* If we couldn't merge this one, we won't be able to
576 	       merge following ones either, since the ranges are
577 	       always sorted by OFFSET.  */
578 	    break;
579 	  }
580 
581       if (removed != 0)
582 	VEC_block_remove (range_s, *vectorp, next, removed);
583     }
584 }
585 
586 void
587 mark_value_bits_unavailable (struct value *value, int offset, int length)
588 {
589   insert_into_bit_range_vector (&value->unavailable, offset, length);
590 }
591 
592 void
593 mark_value_bytes_unavailable (struct value *value, int offset, int length)
594 {
595   mark_value_bits_unavailable (value,
596 			       offset * TARGET_CHAR_BIT,
597 			       length * TARGET_CHAR_BIT);
598 }
599 
600 /* Find the first range in RANGES that overlaps the range defined by
601    OFFSET and LENGTH, starting at element POS in the RANGES vector,
602    Returns the index into RANGES where such overlapping range was
603    found, or -1 if none was found.  */
604 
605 static int
606 find_first_range_overlap (VEC(range_s) *ranges, int pos,
607 			  int offset, int length)
608 {
609   range_s *r;
610   int i;
611 
612   for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
613     if (ranges_overlap (r->offset, r->length, offset, length))
614       return i;
615 
616   return -1;
617 }
618 
619 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
620    PTR2 + OFFSET2_BITS.  Return 0 if the memory is the same, otherwise
621    return non-zero.
622 
623    It must always be the case that:
624      OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
625 
626    It is assumed that memory can be accessed from:
627      PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
628    to:
629      PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
630             / TARGET_CHAR_BIT)  */
631 static int
632 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
633 			 const gdb_byte *ptr2, size_t offset2_bits,
634 			 size_t length_bits)
635 {
636   gdb_assert (offset1_bits % TARGET_CHAR_BIT
637 	      == offset2_bits % TARGET_CHAR_BIT);
638 
639   if (offset1_bits % TARGET_CHAR_BIT != 0)
640     {
641       size_t bits;
642       gdb_byte mask, b1, b2;
643 
644       /* The offset from the base pointers PTR1 and PTR2 is not a complete
645 	 number of bytes.  A number of bits up to either the next exact
646 	 byte boundary, or LENGTH_BITS (which ever is sooner) will be
647 	 compared.  */
648       bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
649       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
650       mask = (1 << bits) - 1;
651 
652       if (length_bits < bits)
653 	{
654 	  mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
655 	  bits = length_bits;
656 	}
657 
658       /* Now load the two bytes and mask off the bits we care about.  */
659       b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
660       b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
661 
662       if (b1 != b2)
663 	return 1;
664 
665       /* Now update the length and offsets to take account of the bits
666 	 we've just compared.  */
667       length_bits -= bits;
668       offset1_bits += bits;
669       offset2_bits += bits;
670     }
671 
672   if (length_bits % TARGET_CHAR_BIT != 0)
673     {
674       size_t bits;
675       size_t o1, o2;
676       gdb_byte mask, b1, b2;
677 
678       /* The length is not an exact number of bytes.  After the previous
679 	 IF.. block then the offsets are byte aligned, or the
680 	 length is zero (in which case this code is not reached).  Compare
681 	 a number of bits at the end of the region, starting from an exact
682 	 byte boundary.  */
683       bits = length_bits % TARGET_CHAR_BIT;
684       o1 = offset1_bits + length_bits - bits;
685       o2 = offset2_bits + length_bits - bits;
686 
687       gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
688       mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
689 
690       gdb_assert (o1 % TARGET_CHAR_BIT == 0);
691       gdb_assert (o2 % TARGET_CHAR_BIT == 0);
692 
693       b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
694       b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
695 
696       if (b1 != b2)
697 	return 1;
698 
699       length_bits -= bits;
700     }
701 
702   if (length_bits > 0)
703     {
704       /* We've now taken care of any stray "bits" at the start, or end of
705 	 the region to compare, the remainder can be covered with a simple
706 	 memcmp.  */
707       gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
708       gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
709       gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
710 
711       return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
712 		     ptr2 + offset2_bits / TARGET_CHAR_BIT,
713 		     length_bits / TARGET_CHAR_BIT);
714     }
715 
716   /* Length is zero, regions match.  */
717   return 0;
718 }
719 
720 /* Helper struct for find_first_range_overlap_and_match and
721    value_contents_bits_eq.  Keep track of which slot of a given ranges
722    vector have we last looked at.  */
723 
724 struct ranges_and_idx
725 {
726   /* The ranges.  */
727   VEC(range_s) *ranges;
728 
729   /* The range we've last found in RANGES.  Given ranges are sorted,
730      we can start the next lookup here.  */
731   int idx;
732 };
733 
734 /* Helper function for value_contents_bits_eq.  Compare LENGTH bits of
735    RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
736    ranges starting at OFFSET2 bits.  Return true if the ranges match
737    and fill in *L and *H with the overlapping window relative to
738    (both) OFFSET1 or OFFSET2.  */
739 
740 static int
741 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
742 				    struct ranges_and_idx *rp2,
743 				    int offset1, int offset2,
744 				    int length, ULONGEST *l, ULONGEST *h)
745 {
746   rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
747 				       offset1, length);
748   rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
749 				       offset2, length);
750 
751   if (rp1->idx == -1 && rp2->idx == -1)
752     {
753       *l = length;
754       *h = length;
755       return 1;
756     }
757   else if (rp1->idx == -1 || rp2->idx == -1)
758     return 0;
759   else
760     {
761       range_s *r1, *r2;
762       ULONGEST l1, h1;
763       ULONGEST l2, h2;
764 
765       r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
766       r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
767 
768       /* Get the unavailable windows intersected by the incoming
769 	 ranges.  The first and last ranges that overlap the argument
770 	 range may be wider than said incoming arguments ranges.  */
771       l1 = max (offset1, r1->offset);
772       h1 = min (offset1 + length, r1->offset + r1->length);
773 
774       l2 = max (offset2, r2->offset);
775       h2 = min (offset2 + length, offset2 + r2->length);
776 
777       /* Make them relative to the respective start offsets, so we can
778 	 compare them for equality.  */
779       l1 -= offset1;
780       h1 -= offset1;
781 
782       l2 -= offset2;
783       h2 -= offset2;
784 
785       /* Different ranges, no match.  */
786       if (l1 != l2 || h1 != h2)
787 	return 0;
788 
789       *h = h1;
790       *l = l1;
791       return 1;
792     }
793 }
794 
795 /* Helper function for value_contents_eq.  The only difference is that
796    this function is bit rather than byte based.
797 
798    Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
799    with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
800    Return true if the available bits match.  */
801 
802 static int
803 value_contents_bits_eq (const struct value *val1, int offset1,
804 			const struct value *val2, int offset2,
805 			int length)
806 {
807   /* Each array element corresponds to a ranges source (unavailable,
808      optimized out).  '1' is for VAL1, '2' for VAL2.  */
809   struct ranges_and_idx rp1[2], rp2[2];
810 
811   /* See function description in value.h.  */
812   gdb_assert (!val1->lazy && !val2->lazy);
813 
814   /* We shouldn't be trying to compare past the end of the values.  */
815   gdb_assert (offset1 + length
816 	      <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
817   gdb_assert (offset2 + length
818 	      <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
819 
820   memset (&rp1, 0, sizeof (rp1));
821   memset (&rp2, 0, sizeof (rp2));
822   rp1[0].ranges = val1->unavailable;
823   rp2[0].ranges = val2->unavailable;
824   rp1[1].ranges = val1->optimized_out;
825   rp2[1].ranges = val2->optimized_out;
826 
827   while (length > 0)
828     {
829       ULONGEST l = 0, h = 0; /* init for gcc -Wall */
830       int i;
831 
832       for (i = 0; i < 2; i++)
833 	{
834 	  ULONGEST l_tmp, h_tmp;
835 
836 	  /* The contents only match equal if the invalid/unavailable
837 	     contents ranges match as well.  */
838 	  if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
839 						   offset1, offset2, length,
840 						   &l_tmp, &h_tmp))
841 	    return 0;
842 
843 	  /* We're interested in the lowest/first range found.  */
844 	  if (i == 0 || l_tmp < l)
845 	    {
846 	      l = l_tmp;
847 	      h = h_tmp;
848 	    }
849 	}
850 
851       /* Compare the available/valid contents.  */
852       if (memcmp_with_bit_offsets (val1->contents, offset1,
853 				   val2->contents, offset2, l) != 0)
854 	return 0;
855 
856       length -= h;
857       offset1 += h;
858       offset2 += h;
859     }
860 
861   return 1;
862 }
863 
864 int
865 value_contents_eq (const struct value *val1, int offset1,
866 		   const struct value *val2, int offset2,
867 		   int length)
868 {
869   return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
870 				 val2, offset2 * TARGET_CHAR_BIT,
871 				 length * TARGET_CHAR_BIT);
872 }
873 
874 /* Prototypes for local functions.  */
875 
876 static void show_values (char *, int);
877 
878 static void show_convenience (char *, int);
879 
880 
881 /* The value-history records all the values printed
882    by print commands during this session.  Each chunk
883    records 60 consecutive values.  The first chunk on
884    the chain records the most recent values.
885    The total number of values is in value_history_count.  */
886 
887 #define VALUE_HISTORY_CHUNK 60
888 
889 struct value_history_chunk
890   {
891     struct value_history_chunk *next;
892     struct value *values[VALUE_HISTORY_CHUNK];
893   };
894 
895 /* Chain of chunks now in use.  */
896 
897 static struct value_history_chunk *value_history_chain;
898 
899 static int value_history_count;	/* Abs number of last entry stored.  */
900 
901 
902 /* List of all value objects currently allocated
903    (except for those released by calls to release_value)
904    This is so they can be freed after each command.  */
905 
906 static struct value *all_values;
907 
908 /* Allocate a lazy value for type TYPE.  Its actual content is
909    "lazily" allocated too: the content field of the return value is
910    NULL; it will be allocated when it is fetched from the target.  */
911 
912 struct value *
913 allocate_value_lazy (struct type *type)
914 {
915   struct value *val;
916 
917   /* Call check_typedef on our type to make sure that, if TYPE
918      is a TYPE_CODE_TYPEDEF, its length is set to the length
919      of the target type instead of zero.  However, we do not
920      replace the typedef type by the target type, because we want
921      to keep the typedef in order to be able to set the VAL's type
922      description correctly.  */
923   check_typedef (type);
924 
925   val = (struct value *) xzalloc (sizeof (struct value));
926   val->contents = NULL;
927   val->next = all_values;
928   all_values = val;
929   val->type = type;
930   val->enclosing_type = type;
931   VALUE_LVAL (val) = not_lval;
932   val->location.address = 0;
933   VALUE_FRAME_ID (val) = null_frame_id;
934   val->offset = 0;
935   val->bitpos = 0;
936   val->bitsize = 0;
937   VALUE_REGNUM (val) = -1;
938   val->lazy = 1;
939   val->embedded_offset = 0;
940   val->pointed_to_offset = 0;
941   val->modifiable = 1;
942   val->initialized = 1;  /* Default to initialized.  */
943 
944   /* Values start out on the all_values chain.  */
945   val->reference_count = 1;
946 
947   return val;
948 }
949 
950 /* Allocate the contents of VAL if it has not been allocated yet.  */
951 
952 static void
953 allocate_value_contents (struct value *val)
954 {
955   if (!val->contents)
956     val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
957 }
958 
959 /* Allocate a  value  and its contents for type TYPE.  */
960 
961 struct value *
962 allocate_value (struct type *type)
963 {
964   struct value *val = allocate_value_lazy (type);
965 
966   allocate_value_contents (val);
967   val->lazy = 0;
968   return val;
969 }
970 
971 /* Allocate a  value  that has the correct length
972    for COUNT repetitions of type TYPE.  */
973 
974 struct value *
975 allocate_repeat_value (struct type *type, int count)
976 {
977   int low_bound = current_language->string_lower_bound;		/* ??? */
978   /* FIXME-type-allocation: need a way to free this type when we are
979      done with it.  */
980   struct type *array_type
981     = lookup_array_range_type (type, low_bound, count + low_bound - 1);
982 
983   return allocate_value (array_type);
984 }
985 
986 struct value *
987 allocate_computed_value (struct type *type,
988                          const struct lval_funcs *funcs,
989                          void *closure)
990 {
991   struct value *v = allocate_value_lazy (type);
992 
993   VALUE_LVAL (v) = lval_computed;
994   v->location.computed.funcs = funcs;
995   v->location.computed.closure = closure;
996 
997   return v;
998 }
999 
1000 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT.  */
1001 
1002 struct value *
1003 allocate_optimized_out_value (struct type *type)
1004 {
1005   struct value *retval = allocate_value_lazy (type);
1006 
1007   mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1008   set_value_lazy (retval, 0);
1009   return retval;
1010 }
1011 
1012 /* Accessor methods.  */
1013 
1014 struct value *
1015 value_next (struct value *value)
1016 {
1017   return value->next;
1018 }
1019 
1020 struct type *
1021 value_type (const struct value *value)
1022 {
1023   return value->type;
1024 }
1025 void
1026 deprecated_set_value_type (struct value *value, struct type *type)
1027 {
1028   value->type = type;
1029 }
1030 
1031 int
1032 value_offset (const struct value *value)
1033 {
1034   return value->offset;
1035 }
1036 void
1037 set_value_offset (struct value *value, int offset)
1038 {
1039   value->offset = offset;
1040 }
1041 
1042 int
1043 value_bitpos (const struct value *value)
1044 {
1045   return value->bitpos;
1046 }
1047 void
1048 set_value_bitpos (struct value *value, int bit)
1049 {
1050   value->bitpos = bit;
1051 }
1052 
1053 int
1054 value_bitsize (const struct value *value)
1055 {
1056   return value->bitsize;
1057 }
1058 void
1059 set_value_bitsize (struct value *value, int bit)
1060 {
1061   value->bitsize = bit;
1062 }
1063 
1064 struct value *
1065 value_parent (struct value *value)
1066 {
1067   return value->parent;
1068 }
1069 
1070 /* See value.h.  */
1071 
1072 void
1073 set_value_parent (struct value *value, struct value *parent)
1074 {
1075   struct value *old = value->parent;
1076 
1077   value->parent = parent;
1078   if (parent != NULL)
1079     value_incref (parent);
1080   value_free (old);
1081 }
1082 
1083 gdb_byte *
1084 value_contents_raw (struct value *value)
1085 {
1086   allocate_value_contents (value);
1087   return value->contents + value->embedded_offset;
1088 }
1089 
1090 gdb_byte *
1091 value_contents_all_raw (struct value *value)
1092 {
1093   allocate_value_contents (value);
1094   return value->contents;
1095 }
1096 
1097 struct type *
1098 value_enclosing_type (struct value *value)
1099 {
1100   return value->enclosing_type;
1101 }
1102 
1103 /* Look at value.h for description.  */
1104 
1105 struct type *
1106 value_actual_type (struct value *value, int resolve_simple_types,
1107 		   int *real_type_found)
1108 {
1109   struct value_print_options opts;
1110   struct type *result;
1111 
1112   get_user_print_options (&opts);
1113 
1114   if (real_type_found)
1115     *real_type_found = 0;
1116   result = value_type (value);
1117   if (opts.objectprint)
1118     {
1119       /* If result's target type is TYPE_CODE_STRUCT, proceed to
1120 	 fetch its rtti type.  */
1121       if ((TYPE_CODE (result) == TYPE_CODE_PTR
1122 	  || TYPE_CODE (result) == TYPE_CODE_REF)
1123 	  && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1124 	     == TYPE_CODE_STRUCT)
1125         {
1126           struct type *real_type;
1127 
1128           real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1129           if (real_type)
1130             {
1131               if (real_type_found)
1132                 *real_type_found = 1;
1133               result = real_type;
1134             }
1135         }
1136       else if (resolve_simple_types)
1137         {
1138           if (real_type_found)
1139             *real_type_found = 1;
1140           result = value_enclosing_type (value);
1141         }
1142     }
1143 
1144   return result;
1145 }
1146 
1147 void
1148 error_value_optimized_out (void)
1149 {
1150   error (_("value has been optimized out"));
1151 }
1152 
1153 static void
1154 require_not_optimized_out (const struct value *value)
1155 {
1156   if (!VEC_empty (range_s, value->optimized_out))
1157     {
1158       if (value->lval == lval_register)
1159 	error (_("register has not been saved in frame"));
1160       else
1161 	error_value_optimized_out ();
1162     }
1163 }
1164 
1165 static void
1166 require_available (const struct value *value)
1167 {
1168   if (!VEC_empty (range_s, value->unavailable))
1169     throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1170 }
1171 
1172 const gdb_byte *
1173 value_contents_for_printing (struct value *value)
1174 {
1175   if (value->lazy)
1176     value_fetch_lazy (value);
1177   return value->contents;
1178 }
1179 
1180 const gdb_byte *
1181 value_contents_for_printing_const (const struct value *value)
1182 {
1183   gdb_assert (!value->lazy);
1184   return value->contents;
1185 }
1186 
1187 const gdb_byte *
1188 value_contents_all (struct value *value)
1189 {
1190   const gdb_byte *result = value_contents_for_printing (value);
1191   require_not_optimized_out (value);
1192   require_available (value);
1193   return result;
1194 }
1195 
1196 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1197    SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted.  */
1198 
1199 static void
1200 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1201 		      VEC (range_s) *src_range, int src_bit_offset,
1202 		      int bit_length)
1203 {
1204   range_s *r;
1205   int i;
1206 
1207   for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1208     {
1209       ULONGEST h, l;
1210 
1211       l = max (r->offset, src_bit_offset);
1212       h = min (r->offset + r->length, src_bit_offset + bit_length);
1213 
1214       if (l < h)
1215 	insert_into_bit_range_vector (dst_range,
1216 				      dst_bit_offset + (l - src_bit_offset),
1217 				      h - l);
1218     }
1219 }
1220 
1221 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1222    SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted.  */
1223 
1224 static void
1225 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1226 			    const struct value *src, int src_bit_offset,
1227 			    int bit_length)
1228 {
1229   ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1230 			src->unavailable, src_bit_offset,
1231 			bit_length);
1232   ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1233 			src->optimized_out, src_bit_offset,
1234 			bit_length);
1235 }
1236 
1237 /* Copy LENGTH bytes of SRC value's (all) contents
1238    (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1239    contents, starting at DST_OFFSET.  If unavailable contents are
1240    being copied from SRC, the corresponding DST contents are marked
1241    unavailable accordingly.  Neither DST nor SRC may be lazy
1242    values.
1243 
1244    It is assumed the contents of DST in the [DST_OFFSET,
1245    DST_OFFSET+LENGTH) range are wholly available.  */
1246 
1247 void
1248 value_contents_copy_raw (struct value *dst, int dst_offset,
1249 			 struct value *src, int src_offset, int length)
1250 {
1251   range_s *r;
1252   int i;
1253   int src_bit_offset, dst_bit_offset, bit_length;
1254 
1255   /* A lazy DST would make that this copy operation useless, since as
1256      soon as DST's contents were un-lazied (by a later value_contents
1257      call, say), the contents would be overwritten.  A lazy SRC would
1258      mean we'd be copying garbage.  */
1259   gdb_assert (!dst->lazy && !src->lazy);
1260 
1261   /* The overwritten DST range gets unavailability ORed in, not
1262      replaced.  Make sure to remember to implement replacing if it
1263      turns out actually necessary.  */
1264   gdb_assert (value_bytes_available (dst, dst_offset, length));
1265   gdb_assert (!value_bits_any_optimized_out (dst,
1266 					     TARGET_CHAR_BIT * dst_offset,
1267 					     TARGET_CHAR_BIT * length));
1268 
1269   /* Copy the data.  */
1270   memcpy (value_contents_all_raw (dst) + dst_offset,
1271 	  value_contents_all_raw (src) + src_offset,
1272 	  length);
1273 
1274   /* Copy the meta-data, adjusted.  */
1275   src_bit_offset = src_offset * TARGET_CHAR_BIT;
1276   dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1277   bit_length = length * TARGET_CHAR_BIT;
1278 
1279   value_ranges_copy_adjusted (dst, dst_bit_offset,
1280 			      src, src_bit_offset,
1281 			      bit_length);
1282 }
1283 
1284 /* Copy LENGTH bytes of SRC value's (all) contents
1285    (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1286    (all) contents, starting at DST_OFFSET.  If unavailable contents
1287    are being copied from SRC, the corresponding DST contents are
1288    marked unavailable accordingly.  DST must not be lazy.  If SRC is
1289    lazy, it will be fetched now.
1290 
1291    It is assumed the contents of DST in the [DST_OFFSET,
1292    DST_OFFSET+LENGTH) range are wholly available.  */
1293 
1294 void
1295 value_contents_copy (struct value *dst, int dst_offset,
1296 		     struct value *src, int src_offset, int length)
1297 {
1298   if (src->lazy)
1299     value_fetch_lazy (src);
1300 
1301   value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1302 }
1303 
1304 int
1305 value_lazy (struct value *value)
1306 {
1307   return value->lazy;
1308 }
1309 
1310 void
1311 set_value_lazy (struct value *value, int val)
1312 {
1313   value->lazy = val;
1314 }
1315 
1316 int
1317 value_stack (struct value *value)
1318 {
1319   return value->stack;
1320 }
1321 
1322 void
1323 set_value_stack (struct value *value, int val)
1324 {
1325   value->stack = val;
1326 }
1327 
1328 const gdb_byte *
1329 value_contents (struct value *value)
1330 {
1331   const gdb_byte *result = value_contents_writeable (value);
1332   require_not_optimized_out (value);
1333   require_available (value);
1334   return result;
1335 }
1336 
1337 gdb_byte *
1338 value_contents_writeable (struct value *value)
1339 {
1340   if (value->lazy)
1341     value_fetch_lazy (value);
1342   return value_contents_raw (value);
1343 }
1344 
1345 int
1346 value_optimized_out (struct value *value)
1347 {
1348   /* We can only know if a value is optimized out once we have tried to
1349      fetch it.  */
1350   if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1351     value_fetch_lazy (value);
1352 
1353   return !VEC_empty (range_s, value->optimized_out);
1354 }
1355 
1356 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1357    the following LENGTH bytes.  */
1358 
1359 void
1360 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1361 {
1362   mark_value_bits_optimized_out (value,
1363 				 offset * TARGET_CHAR_BIT,
1364 				 length * TARGET_CHAR_BIT);
1365 }
1366 
1367 /* See value.h.  */
1368 
1369 void
1370 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1371 {
1372   insert_into_bit_range_vector (&value->optimized_out, offset, length);
1373 }
1374 
1375 int
1376 value_bits_synthetic_pointer (const struct value *value,
1377 			      int offset, int length)
1378 {
1379   if (value->lval != lval_computed
1380       || !value->location.computed.funcs->check_synthetic_pointer)
1381     return 0;
1382   return value->location.computed.funcs->check_synthetic_pointer (value,
1383 								  offset,
1384 								  length);
1385 }
1386 
1387 int
1388 value_embedded_offset (struct value *value)
1389 {
1390   return value->embedded_offset;
1391 }
1392 
1393 void
1394 set_value_embedded_offset (struct value *value, int val)
1395 {
1396   value->embedded_offset = val;
1397 }
1398 
1399 int
1400 value_pointed_to_offset (struct value *value)
1401 {
1402   return value->pointed_to_offset;
1403 }
1404 
1405 void
1406 set_value_pointed_to_offset (struct value *value, int val)
1407 {
1408   value->pointed_to_offset = val;
1409 }
1410 
1411 const struct lval_funcs *
1412 value_computed_funcs (const struct value *v)
1413 {
1414   gdb_assert (value_lval_const (v) == lval_computed);
1415 
1416   return v->location.computed.funcs;
1417 }
1418 
1419 void *
1420 value_computed_closure (const struct value *v)
1421 {
1422   gdb_assert (v->lval == lval_computed);
1423 
1424   return v->location.computed.closure;
1425 }
1426 
1427 enum lval_type *
1428 deprecated_value_lval_hack (struct value *value)
1429 {
1430   return &value->lval;
1431 }
1432 
1433 enum lval_type
1434 value_lval_const (const struct value *value)
1435 {
1436   return value->lval;
1437 }
1438 
1439 CORE_ADDR
1440 value_address (const struct value *value)
1441 {
1442   if (value->lval == lval_internalvar
1443       || value->lval == lval_internalvar_component
1444       || value->lval == lval_xcallable)
1445     return 0;
1446   if (value->parent != NULL)
1447     return value_address (value->parent) + value->offset;
1448   else
1449     return value->location.address + value->offset;
1450 }
1451 
1452 CORE_ADDR
1453 value_raw_address (struct value *value)
1454 {
1455   if (value->lval == lval_internalvar
1456       || value->lval == lval_internalvar_component
1457       || value->lval == lval_xcallable)
1458     return 0;
1459   return value->location.address;
1460 }
1461 
1462 void
1463 set_value_address (struct value *value, CORE_ADDR addr)
1464 {
1465   gdb_assert (value->lval != lval_internalvar
1466 	      && value->lval != lval_internalvar_component
1467 	      && value->lval != lval_xcallable);
1468   value->location.address = addr;
1469 }
1470 
1471 struct internalvar **
1472 deprecated_value_internalvar_hack (struct value *value)
1473 {
1474   return &value->location.internalvar;
1475 }
1476 
1477 struct frame_id *
1478 deprecated_value_frame_id_hack (struct value *value)
1479 {
1480   return &value->frame_id;
1481 }
1482 
1483 short *
1484 deprecated_value_regnum_hack (struct value *value)
1485 {
1486   return &value->regnum;
1487 }
1488 
1489 int
1490 deprecated_value_modifiable (struct value *value)
1491 {
1492   return value->modifiable;
1493 }
1494 
1495 /* Return a mark in the value chain.  All values allocated after the
1496    mark is obtained (except for those released) are subject to being freed
1497    if a subsequent value_free_to_mark is passed the mark.  */
1498 struct value *
1499 value_mark (void)
1500 {
1501   return all_values;
1502 }
1503 
1504 /* Take a reference to VAL.  VAL will not be deallocated until all
1505    references are released.  */
1506 
1507 void
1508 value_incref (struct value *val)
1509 {
1510   val->reference_count++;
1511 }
1512 
1513 /* Release a reference to VAL, which was acquired with value_incref.
1514    This function is also called to deallocate values from the value
1515    chain.  */
1516 
1517 void
1518 value_free (struct value *val)
1519 {
1520   if (val)
1521     {
1522       gdb_assert (val->reference_count > 0);
1523       val->reference_count--;
1524       if (val->reference_count > 0)
1525 	return;
1526 
1527       /* If there's an associated parent value, drop our reference to
1528 	 it.  */
1529       if (val->parent != NULL)
1530 	value_free (val->parent);
1531 
1532       if (VALUE_LVAL (val) == lval_computed)
1533 	{
1534 	  const struct lval_funcs *funcs = val->location.computed.funcs;
1535 
1536 	  if (funcs->free_closure)
1537 	    funcs->free_closure (val);
1538 	}
1539       else if (VALUE_LVAL (val) == lval_xcallable)
1540 	  free_xmethod_worker (val->location.xm_worker);
1541 
1542       xfree (val->contents);
1543       VEC_free (range_s, val->unavailable);
1544     }
1545   xfree (val);
1546 }
1547 
1548 /* Free all values allocated since MARK was obtained by value_mark
1549    (except for those released).  */
1550 void
1551 value_free_to_mark (struct value *mark)
1552 {
1553   struct value *val;
1554   struct value *next;
1555 
1556   for (val = all_values; val && val != mark; val = next)
1557     {
1558       next = val->next;
1559       val->released = 1;
1560       value_free (val);
1561     }
1562   all_values = val;
1563 }
1564 
1565 /* Free all the values that have been allocated (except for those released).
1566    Call after each command, successful or not.
1567    In practice this is called before each command, which is sufficient.  */
1568 
1569 void
1570 free_all_values (void)
1571 {
1572   struct value *val;
1573   struct value *next;
1574 
1575   for (val = all_values; val; val = next)
1576     {
1577       next = val->next;
1578       val->released = 1;
1579       value_free (val);
1580     }
1581 
1582   all_values = 0;
1583 }
1584 
1585 /* Frees all the elements in a chain of values.  */
1586 
1587 void
1588 free_value_chain (struct value *v)
1589 {
1590   struct value *next;
1591 
1592   for (; v; v = next)
1593     {
1594       next = value_next (v);
1595       value_free (v);
1596     }
1597 }
1598 
1599 /* Remove VAL from the chain all_values
1600    so it will not be freed automatically.  */
1601 
1602 void
1603 release_value (struct value *val)
1604 {
1605   struct value *v;
1606 
1607   if (all_values == val)
1608     {
1609       all_values = val->next;
1610       val->next = NULL;
1611       val->released = 1;
1612       return;
1613     }
1614 
1615   for (v = all_values; v; v = v->next)
1616     {
1617       if (v->next == val)
1618 	{
1619 	  v->next = val->next;
1620 	  val->next = NULL;
1621 	  val->released = 1;
1622 	  break;
1623 	}
1624     }
1625 }
1626 
1627 /* If the value is not already released, release it.
1628    If the value is already released, increment its reference count.
1629    That is, this function ensures that the value is released from the
1630    value chain and that the caller owns a reference to it.  */
1631 
1632 void
1633 release_value_or_incref (struct value *val)
1634 {
1635   if (val->released)
1636     value_incref (val);
1637   else
1638     release_value (val);
1639 }
1640 
1641 /* Release all values up to mark  */
1642 struct value *
1643 value_release_to_mark (struct value *mark)
1644 {
1645   struct value *val;
1646   struct value *next;
1647 
1648   for (val = next = all_values; next; next = next->next)
1649     {
1650       if (next->next == mark)
1651 	{
1652 	  all_values = next->next;
1653 	  next->next = NULL;
1654 	  return val;
1655 	}
1656       next->released = 1;
1657     }
1658   all_values = 0;
1659   return val;
1660 }
1661 
1662 /* Return a copy of the value ARG.
1663    It contains the same contents, for same memory address,
1664    but it's a different block of storage.  */
1665 
1666 struct value *
1667 value_copy (struct value *arg)
1668 {
1669   struct type *encl_type = value_enclosing_type (arg);
1670   struct value *val;
1671 
1672   if (value_lazy (arg))
1673     val = allocate_value_lazy (encl_type);
1674   else
1675     val = allocate_value (encl_type);
1676   val->type = arg->type;
1677   VALUE_LVAL (val) = VALUE_LVAL (arg);
1678   val->location = arg->location;
1679   val->offset = arg->offset;
1680   val->bitpos = arg->bitpos;
1681   val->bitsize = arg->bitsize;
1682   VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1683   VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1684   val->lazy = arg->lazy;
1685   val->embedded_offset = value_embedded_offset (arg);
1686   val->pointed_to_offset = arg->pointed_to_offset;
1687   val->modifiable = arg->modifiable;
1688   if (!value_lazy (val))
1689     {
1690       memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1691 	      TYPE_LENGTH (value_enclosing_type (arg)));
1692 
1693     }
1694   val->unavailable = VEC_copy (range_s, arg->unavailable);
1695   val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1696   set_value_parent (val, arg->parent);
1697   if (VALUE_LVAL (val) == lval_computed)
1698     {
1699       const struct lval_funcs *funcs = val->location.computed.funcs;
1700 
1701       if (funcs->copy_closure)
1702         val->location.computed.closure = funcs->copy_closure (val);
1703     }
1704   return val;
1705 }
1706 
1707 /* Return a version of ARG that is non-lvalue.  */
1708 
1709 struct value *
1710 value_non_lval (struct value *arg)
1711 {
1712   if (VALUE_LVAL (arg) != not_lval)
1713     {
1714       struct type *enc_type = value_enclosing_type (arg);
1715       struct value *val = allocate_value (enc_type);
1716 
1717       memcpy (value_contents_all_raw (val), value_contents_all (arg),
1718 	      TYPE_LENGTH (enc_type));
1719       val->type = arg->type;
1720       set_value_embedded_offset (val, value_embedded_offset (arg));
1721       set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1722       return val;
1723     }
1724    return arg;
1725 }
1726 
1727 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY.  */
1728 
1729 void
1730 value_force_lval (struct value *v, CORE_ADDR addr)
1731 {
1732   gdb_assert (VALUE_LVAL (v) == not_lval);
1733 
1734   write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1735   v->lval = lval_memory;
1736   v->location.address = addr;
1737 }
1738 
1739 void
1740 set_value_component_location (struct value *component,
1741 			      const struct value *whole)
1742 {
1743   gdb_assert (whole->lval != lval_xcallable);
1744 
1745   if (whole->lval == lval_internalvar)
1746     VALUE_LVAL (component) = lval_internalvar_component;
1747   else
1748     VALUE_LVAL (component) = whole->lval;
1749 
1750   component->location = whole->location;
1751   if (whole->lval == lval_computed)
1752     {
1753       const struct lval_funcs *funcs = whole->location.computed.funcs;
1754 
1755       if (funcs->copy_closure)
1756         component->location.computed.closure = funcs->copy_closure (whole);
1757     }
1758 }
1759 
1760 
1761 /* Access to the value history.  */
1762 
1763 /* Record a new value in the value history.
1764    Returns the absolute history index of the entry.  */
1765 
1766 int
1767 record_latest_value (struct value *val)
1768 {
1769   int i;
1770 
1771   /* We don't want this value to have anything to do with the inferior anymore.
1772      In particular, "set $1 = 50" should not affect the variable from which
1773      the value was taken, and fast watchpoints should be able to assume that
1774      a value on the value history never changes.  */
1775   if (value_lazy (val))
1776     value_fetch_lazy (val);
1777   /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1778      from.  This is a bit dubious, because then *&$1 does not just return $1
1779      but the current contents of that location.  c'est la vie...  */
1780   val->modifiable = 0;
1781 
1782   /* The value may have already been released, in which case we're adding a
1783      new reference for its entry in the history.  That is why we call
1784      release_value_or_incref here instead of release_value.  */
1785   release_value_or_incref (val);
1786 
1787   /* Here we treat value_history_count as origin-zero
1788      and applying to the value being stored now.  */
1789 
1790   i = value_history_count % VALUE_HISTORY_CHUNK;
1791   if (i == 0)
1792     {
1793       struct value_history_chunk *new
1794 	= (struct value_history_chunk *)
1795 
1796       xmalloc (sizeof (struct value_history_chunk));
1797       memset (new->values, 0, sizeof new->values);
1798       new->next = value_history_chain;
1799       value_history_chain = new;
1800     }
1801 
1802   value_history_chain->values[i] = val;
1803 
1804   /* Now we regard value_history_count as origin-one
1805      and applying to the value just stored.  */
1806 
1807   return ++value_history_count;
1808 }
1809 
1810 /* Return a copy of the value in the history with sequence number NUM.  */
1811 
1812 struct value *
1813 access_value_history (int num)
1814 {
1815   struct value_history_chunk *chunk;
1816   int i;
1817   int absnum = num;
1818 
1819   if (absnum <= 0)
1820     absnum += value_history_count;
1821 
1822   if (absnum <= 0)
1823     {
1824       if (num == 0)
1825 	error (_("The history is empty."));
1826       else if (num == 1)
1827 	error (_("There is only one value in the history."));
1828       else
1829 	error (_("History does not go back to $$%d."), -num);
1830     }
1831   if (absnum > value_history_count)
1832     error (_("History has not yet reached $%d."), absnum);
1833 
1834   absnum--;
1835 
1836   /* Now absnum is always absolute and origin zero.  */
1837 
1838   chunk = value_history_chain;
1839   for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1840 	 - absnum / VALUE_HISTORY_CHUNK;
1841        i > 0; i--)
1842     chunk = chunk->next;
1843 
1844   return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1845 }
1846 
1847 static void
1848 show_values (char *num_exp, int from_tty)
1849 {
1850   int i;
1851   struct value *val;
1852   static int num = 1;
1853 
1854   if (num_exp)
1855     {
1856       /* "show values +" should print from the stored position.
1857          "show values <exp>" should print around value number <exp>.  */
1858       if (num_exp[0] != '+' || num_exp[1] != '\0')
1859 	num = parse_and_eval_long (num_exp) - 5;
1860     }
1861   else
1862     {
1863       /* "show values" means print the last 10 values.  */
1864       num = value_history_count - 9;
1865     }
1866 
1867   if (num <= 0)
1868     num = 1;
1869 
1870   for (i = num; i < num + 10 && i <= value_history_count; i++)
1871     {
1872       struct value_print_options opts;
1873 
1874       val = access_value_history (i);
1875       printf_filtered (("$%d = "), i);
1876       get_user_print_options (&opts);
1877       value_print (val, gdb_stdout, &opts);
1878       printf_filtered (("\n"));
1879     }
1880 
1881   /* The next "show values +" should start after what we just printed.  */
1882   num += 10;
1883 
1884   /* Hitting just return after this command should do the same thing as
1885      "show values +".  If num_exp is null, this is unnecessary, since
1886      "show values +" is not useful after "show values".  */
1887   if (from_tty && num_exp)
1888     {
1889       num_exp[0] = '+';
1890       num_exp[1] = '\0';
1891     }
1892 }
1893 
1894 /* Internal variables.  These are variables within the debugger
1895    that hold values assigned by debugger commands.
1896    The user refers to them with a '$' prefix
1897    that does not appear in the variable names stored internally.  */
1898 
1899 struct internalvar
1900 {
1901   struct internalvar *next;
1902   char *name;
1903 
1904   /* We support various different kinds of content of an internal variable.
1905      enum internalvar_kind specifies the kind, and union internalvar_data
1906      provides the data associated with this particular kind.  */
1907 
1908   enum internalvar_kind
1909     {
1910       /* The internal variable is empty.  */
1911       INTERNALVAR_VOID,
1912 
1913       /* The value of the internal variable is provided directly as
1914 	 a GDB value object.  */
1915       INTERNALVAR_VALUE,
1916 
1917       /* A fresh value is computed via a call-back routine on every
1918 	 access to the internal variable.  */
1919       INTERNALVAR_MAKE_VALUE,
1920 
1921       /* The internal variable holds a GDB internal convenience function.  */
1922       INTERNALVAR_FUNCTION,
1923 
1924       /* The variable holds an integer value.  */
1925       INTERNALVAR_INTEGER,
1926 
1927       /* The variable holds a GDB-provided string.  */
1928       INTERNALVAR_STRING,
1929 
1930     } kind;
1931 
1932   union internalvar_data
1933     {
1934       /* A value object used with INTERNALVAR_VALUE.  */
1935       struct value *value;
1936 
1937       /* The call-back routine used with INTERNALVAR_MAKE_VALUE.  */
1938       struct
1939         {
1940 	  /* The functions to call.  */
1941 	  const struct internalvar_funcs *functions;
1942 
1943 	  /* The function's user-data.  */
1944 	  void *data;
1945         } make_value;
1946 
1947       /* The internal function used with INTERNALVAR_FUNCTION.  */
1948       struct
1949 	{
1950 	  struct internal_function *function;
1951 	  /* True if this is the canonical name for the function.  */
1952 	  int canonical;
1953 	} fn;
1954 
1955       /* An integer value used with INTERNALVAR_INTEGER.  */
1956       struct
1957         {
1958 	  /* If type is non-NULL, it will be used as the type to generate
1959 	     a value for this internal variable.  If type is NULL, a default
1960 	     integer type for the architecture is used.  */
1961 	  struct type *type;
1962 	  LONGEST val;
1963         } integer;
1964 
1965       /* A string value used with INTERNALVAR_STRING.  */
1966       char *string;
1967     } u;
1968 };
1969 
1970 static struct internalvar *internalvars;
1971 
1972 /* If the variable does not already exist create it and give it the
1973    value given.  If no value is given then the default is zero.  */
1974 static void
1975 init_if_undefined_command (char* args, int from_tty)
1976 {
1977   struct internalvar* intvar;
1978 
1979   /* Parse the expression - this is taken from set_command().  */
1980   struct expression *expr = parse_expression (args);
1981   register struct cleanup *old_chain =
1982     make_cleanup (free_current_contents, &expr);
1983 
1984   /* Validate the expression.
1985      Was the expression an assignment?
1986      Or even an expression at all?  */
1987   if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1988     error (_("Init-if-undefined requires an assignment expression."));
1989 
1990   /* Extract the variable from the parsed expression.
1991      In the case of an assign the lvalue will be in elts[1] and elts[2].  */
1992   if (expr->elts[1].opcode != OP_INTERNALVAR)
1993     error (_("The first parameter to init-if-undefined "
1994 	     "should be a GDB variable."));
1995   intvar = expr->elts[2].internalvar;
1996 
1997   /* Only evaluate the expression if the lvalue is void.
1998      This may still fail if the expresssion is invalid.  */
1999   if (intvar->kind == INTERNALVAR_VOID)
2000     evaluate_expression (expr);
2001 
2002   do_cleanups (old_chain);
2003 }
2004 
2005 
2006 /* Look up an internal variable with name NAME.  NAME should not
2007    normally include a dollar sign.
2008 
2009    If the specified internal variable does not exist,
2010    the return value is NULL.  */
2011 
2012 struct internalvar *
2013 lookup_only_internalvar (const char *name)
2014 {
2015   struct internalvar *var;
2016 
2017   for (var = internalvars; var; var = var->next)
2018     if (strcmp (var->name, name) == 0)
2019       return var;
2020 
2021   return NULL;
2022 }
2023 
2024 /* Complete NAME by comparing it to the names of internal variables.
2025    Returns a vector of newly allocated strings, or NULL if no matches
2026    were found.  */
2027 
2028 VEC (char_ptr) *
2029 complete_internalvar (const char *name)
2030 {
2031   VEC (char_ptr) *result = NULL;
2032   struct internalvar *var;
2033   int len;
2034 
2035   len = strlen (name);
2036 
2037   for (var = internalvars; var; var = var->next)
2038     if (strncmp (var->name, name, len) == 0)
2039       {
2040 	char *r = xstrdup (var->name);
2041 
2042 	VEC_safe_push (char_ptr, result, r);
2043       }
2044 
2045   return result;
2046 }
2047 
2048 /* Create an internal variable with name NAME and with a void value.
2049    NAME should not normally include a dollar sign.  */
2050 
2051 struct internalvar *
2052 create_internalvar (const char *name)
2053 {
2054   struct internalvar *var;
2055 
2056   var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2057   var->name = concat (name, (char *)NULL);
2058   var->kind = INTERNALVAR_VOID;
2059   var->next = internalvars;
2060   internalvars = var;
2061   return var;
2062 }
2063 
2064 /* Create an internal variable with name NAME and register FUN as the
2065    function that value_of_internalvar uses to create a value whenever
2066    this variable is referenced.  NAME should not normally include a
2067    dollar sign.  DATA is passed uninterpreted to FUN when it is
2068    called.  CLEANUP, if not NULL, is called when the internal variable
2069    is destroyed.  It is passed DATA as its only argument.  */
2070 
2071 struct internalvar *
2072 create_internalvar_type_lazy (const char *name,
2073 			      const struct internalvar_funcs *funcs,
2074 			      void *data)
2075 {
2076   struct internalvar *var = create_internalvar (name);
2077 
2078   var->kind = INTERNALVAR_MAKE_VALUE;
2079   var->u.make_value.functions = funcs;
2080   var->u.make_value.data = data;
2081   return var;
2082 }
2083 
2084 /* See documentation in value.h.  */
2085 
2086 int
2087 compile_internalvar_to_ax (struct internalvar *var,
2088 			   struct agent_expr *expr,
2089 			   struct axs_value *value)
2090 {
2091   if (var->kind != INTERNALVAR_MAKE_VALUE
2092       || var->u.make_value.functions->compile_to_ax == NULL)
2093     return 0;
2094 
2095   var->u.make_value.functions->compile_to_ax (var, expr, value,
2096 					      var->u.make_value.data);
2097   return 1;
2098 }
2099 
2100 /* Look up an internal variable with name NAME.  NAME should not
2101    normally include a dollar sign.
2102 
2103    If the specified internal variable does not exist,
2104    one is created, with a void value.  */
2105 
2106 struct internalvar *
2107 lookup_internalvar (const char *name)
2108 {
2109   struct internalvar *var;
2110 
2111   var = lookup_only_internalvar (name);
2112   if (var)
2113     return var;
2114 
2115   return create_internalvar (name);
2116 }
2117 
2118 /* Return current value of internal variable VAR.  For variables that
2119    are not inherently typed, use a value type appropriate for GDBARCH.  */
2120 
2121 struct value *
2122 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2123 {
2124   struct value *val;
2125   struct trace_state_variable *tsv;
2126 
2127   /* If there is a trace state variable of the same name, assume that
2128      is what we really want to see.  */
2129   tsv = find_trace_state_variable (var->name);
2130   if (tsv)
2131     {
2132       tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2133 								&(tsv->value));
2134       if (tsv->value_known)
2135 	val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2136 				  tsv->value);
2137       else
2138 	val = allocate_value (builtin_type (gdbarch)->builtin_void);
2139       return val;
2140     }
2141 
2142   switch (var->kind)
2143     {
2144     case INTERNALVAR_VOID:
2145       val = allocate_value (builtin_type (gdbarch)->builtin_void);
2146       break;
2147 
2148     case INTERNALVAR_FUNCTION:
2149       val = allocate_value (builtin_type (gdbarch)->internal_fn);
2150       break;
2151 
2152     case INTERNALVAR_INTEGER:
2153       if (!var->u.integer.type)
2154 	val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2155 				  var->u.integer.val);
2156       else
2157 	val = value_from_longest (var->u.integer.type, var->u.integer.val);
2158       break;
2159 
2160     case INTERNALVAR_STRING:
2161       val = value_cstring (var->u.string, strlen (var->u.string),
2162 			   builtin_type (gdbarch)->builtin_char);
2163       break;
2164 
2165     case INTERNALVAR_VALUE:
2166       val = value_copy (var->u.value);
2167       if (value_lazy (val))
2168 	value_fetch_lazy (val);
2169       break;
2170 
2171     case INTERNALVAR_MAKE_VALUE:
2172       val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2173 							var->u.make_value.data);
2174       break;
2175 
2176     default:
2177       internal_error (__FILE__, __LINE__, _("bad kind"));
2178     }
2179 
2180   /* Change the VALUE_LVAL to lval_internalvar so that future operations
2181      on this value go back to affect the original internal variable.
2182 
2183      Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2184      no underlying modifyable state in the internal variable.
2185 
2186      Likewise, if the variable's value is a computed lvalue, we want
2187      references to it to produce another computed lvalue, where
2188      references and assignments actually operate through the
2189      computed value's functions.
2190 
2191      This means that internal variables with computed values
2192      behave a little differently from other internal variables:
2193      assignments to them don't just replace the previous value
2194      altogether.  At the moment, this seems like the behavior we
2195      want.  */
2196 
2197   if (var->kind != INTERNALVAR_MAKE_VALUE
2198       && val->lval != lval_computed)
2199     {
2200       VALUE_LVAL (val) = lval_internalvar;
2201       VALUE_INTERNALVAR (val) = var;
2202     }
2203 
2204   return val;
2205 }
2206 
2207 int
2208 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2209 {
2210   if (var->kind == INTERNALVAR_INTEGER)
2211     {
2212       *result = var->u.integer.val;
2213       return 1;
2214     }
2215 
2216   if (var->kind == INTERNALVAR_VALUE)
2217     {
2218       struct type *type = check_typedef (value_type (var->u.value));
2219 
2220       if (TYPE_CODE (type) == TYPE_CODE_INT)
2221 	{
2222 	  *result = value_as_long (var->u.value);
2223 	  return 1;
2224 	}
2225     }
2226 
2227   return 0;
2228 }
2229 
2230 static int
2231 get_internalvar_function (struct internalvar *var,
2232 			  struct internal_function **result)
2233 {
2234   switch (var->kind)
2235     {
2236     case INTERNALVAR_FUNCTION:
2237       *result = var->u.fn.function;
2238       return 1;
2239 
2240     default:
2241       return 0;
2242     }
2243 }
2244 
2245 void
2246 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2247 			   int bitsize, struct value *newval)
2248 {
2249   gdb_byte *addr;
2250 
2251   switch (var->kind)
2252     {
2253     case INTERNALVAR_VALUE:
2254       addr = value_contents_writeable (var->u.value);
2255 
2256       if (bitsize)
2257 	modify_field (value_type (var->u.value), addr + offset,
2258 		      value_as_long (newval), bitpos, bitsize);
2259       else
2260 	memcpy (addr + offset, value_contents (newval),
2261 		TYPE_LENGTH (value_type (newval)));
2262       break;
2263 
2264     default:
2265       /* We can never get a component of any other kind.  */
2266       internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2267     }
2268 }
2269 
2270 void
2271 set_internalvar (struct internalvar *var, struct value *val)
2272 {
2273   enum internalvar_kind new_kind;
2274   union internalvar_data new_data = { 0 };
2275 
2276   if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2277     error (_("Cannot overwrite convenience function %s"), var->name);
2278 
2279   /* Prepare new contents.  */
2280   switch (TYPE_CODE (check_typedef (value_type (val))))
2281     {
2282     case TYPE_CODE_VOID:
2283       new_kind = INTERNALVAR_VOID;
2284       break;
2285 
2286     case TYPE_CODE_INTERNAL_FUNCTION:
2287       gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2288       new_kind = INTERNALVAR_FUNCTION;
2289       get_internalvar_function (VALUE_INTERNALVAR (val),
2290 				&new_data.fn.function);
2291       /* Copies created here are never canonical.  */
2292       break;
2293 
2294     default:
2295       new_kind = INTERNALVAR_VALUE;
2296       new_data.value = value_copy (val);
2297       new_data.value->modifiable = 1;
2298 
2299       /* Force the value to be fetched from the target now, to avoid problems
2300 	 later when this internalvar is referenced and the target is gone or
2301 	 has changed.  */
2302       if (value_lazy (new_data.value))
2303        value_fetch_lazy (new_data.value);
2304 
2305       /* Release the value from the value chain to prevent it from being
2306 	 deleted by free_all_values.  From here on this function should not
2307 	 call error () until new_data is installed into the var->u to avoid
2308 	 leaking memory.  */
2309       release_value (new_data.value);
2310       break;
2311     }
2312 
2313   /* Clean up old contents.  */
2314   clear_internalvar (var);
2315 
2316   /* Switch over.  */
2317   var->kind = new_kind;
2318   var->u = new_data;
2319   /* End code which must not call error().  */
2320 }
2321 
2322 void
2323 set_internalvar_integer (struct internalvar *var, LONGEST l)
2324 {
2325   /* Clean up old contents.  */
2326   clear_internalvar (var);
2327 
2328   var->kind = INTERNALVAR_INTEGER;
2329   var->u.integer.type = NULL;
2330   var->u.integer.val = l;
2331 }
2332 
2333 void
2334 set_internalvar_string (struct internalvar *var, const char *string)
2335 {
2336   /* Clean up old contents.  */
2337   clear_internalvar (var);
2338 
2339   var->kind = INTERNALVAR_STRING;
2340   var->u.string = xstrdup (string);
2341 }
2342 
2343 static void
2344 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2345 {
2346   /* Clean up old contents.  */
2347   clear_internalvar (var);
2348 
2349   var->kind = INTERNALVAR_FUNCTION;
2350   var->u.fn.function = f;
2351   var->u.fn.canonical = 1;
2352   /* Variables installed here are always the canonical version.  */
2353 }
2354 
2355 void
2356 clear_internalvar (struct internalvar *var)
2357 {
2358   /* Clean up old contents.  */
2359   switch (var->kind)
2360     {
2361     case INTERNALVAR_VALUE:
2362       value_free (var->u.value);
2363       break;
2364 
2365     case INTERNALVAR_STRING:
2366       xfree (var->u.string);
2367       break;
2368 
2369     case INTERNALVAR_MAKE_VALUE:
2370       if (var->u.make_value.functions->destroy != NULL)
2371 	var->u.make_value.functions->destroy (var->u.make_value.data);
2372       break;
2373 
2374     default:
2375       break;
2376     }
2377 
2378   /* Reset to void kind.  */
2379   var->kind = INTERNALVAR_VOID;
2380 }
2381 
2382 char *
2383 internalvar_name (struct internalvar *var)
2384 {
2385   return var->name;
2386 }
2387 
2388 static struct internal_function *
2389 create_internal_function (const char *name,
2390 			  internal_function_fn handler, void *cookie)
2391 {
2392   struct internal_function *ifn = XNEW (struct internal_function);
2393 
2394   ifn->name = xstrdup (name);
2395   ifn->handler = handler;
2396   ifn->cookie = cookie;
2397   return ifn;
2398 }
2399 
2400 char *
2401 value_internal_function_name (struct value *val)
2402 {
2403   struct internal_function *ifn;
2404   int result;
2405 
2406   gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2407   result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2408   gdb_assert (result);
2409 
2410   return ifn->name;
2411 }
2412 
2413 struct value *
2414 call_internal_function (struct gdbarch *gdbarch,
2415 			const struct language_defn *language,
2416 			struct value *func, int argc, struct value **argv)
2417 {
2418   struct internal_function *ifn;
2419   int result;
2420 
2421   gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2422   result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2423   gdb_assert (result);
2424 
2425   return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2426 }
2427 
2428 /* The 'function' command.  This does nothing -- it is just a
2429    placeholder to let "help function NAME" work.  This is also used as
2430    the implementation of the sub-command that is created when
2431    registering an internal function.  */
2432 static void
2433 function_command (char *command, int from_tty)
2434 {
2435   /* Do nothing.  */
2436 }
2437 
2438 /* Clean up if an internal function's command is destroyed.  */
2439 static void
2440 function_destroyer (struct cmd_list_element *self, void *ignore)
2441 {
2442   xfree ((char *) self->name);
2443   xfree ((char *) self->doc);
2444 }
2445 
2446 /* Add a new internal function.  NAME is the name of the function; DOC
2447    is a documentation string describing the function.  HANDLER is
2448    called when the function is invoked.  COOKIE is an arbitrary
2449    pointer which is passed to HANDLER and is intended for "user
2450    data".  */
2451 void
2452 add_internal_function (const char *name, const char *doc,
2453 		       internal_function_fn handler, void *cookie)
2454 {
2455   struct cmd_list_element *cmd;
2456   struct internal_function *ifn;
2457   struct internalvar *var = lookup_internalvar (name);
2458 
2459   ifn = create_internal_function (name, handler, cookie);
2460   set_internalvar_function (var, ifn);
2461 
2462   cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2463 		 &functionlist);
2464   cmd->destroyer = function_destroyer;
2465 }
2466 
2467 /* Update VALUE before discarding OBJFILE.  COPIED_TYPES is used to
2468    prevent cycles / duplicates.  */
2469 
2470 void
2471 preserve_one_value (struct value *value, struct objfile *objfile,
2472 		    htab_t copied_types)
2473 {
2474   if (TYPE_OBJFILE (value->type) == objfile)
2475     value->type = copy_type_recursive (objfile, value->type, copied_types);
2476 
2477   if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2478     value->enclosing_type = copy_type_recursive (objfile,
2479 						 value->enclosing_type,
2480 						 copied_types);
2481 }
2482 
2483 /* Likewise for internal variable VAR.  */
2484 
2485 static void
2486 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2487 			  htab_t copied_types)
2488 {
2489   switch (var->kind)
2490     {
2491     case INTERNALVAR_INTEGER:
2492       if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2493 	var->u.integer.type
2494 	  = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2495       break;
2496 
2497     case INTERNALVAR_VALUE:
2498       preserve_one_value (var->u.value, objfile, copied_types);
2499       break;
2500     }
2501 }
2502 
2503 /* Update the internal variables and value history when OBJFILE is
2504    discarded; we must copy the types out of the objfile.  New global types
2505    will be created for every convenience variable which currently points to
2506    this objfile's types, and the convenience variables will be adjusted to
2507    use the new global types.  */
2508 
2509 void
2510 preserve_values (struct objfile *objfile)
2511 {
2512   htab_t copied_types;
2513   struct value_history_chunk *cur;
2514   struct internalvar *var;
2515   int i;
2516 
2517   /* Create the hash table.  We allocate on the objfile's obstack, since
2518      it is soon to be deleted.  */
2519   copied_types = create_copied_types_hash (objfile);
2520 
2521   for (cur = value_history_chain; cur; cur = cur->next)
2522     for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2523       if (cur->values[i])
2524 	preserve_one_value (cur->values[i], objfile, copied_types);
2525 
2526   for (var = internalvars; var; var = var->next)
2527     preserve_one_internalvar (var, objfile, copied_types);
2528 
2529   preserve_ext_lang_values (objfile, copied_types);
2530 
2531   htab_delete (copied_types);
2532 }
2533 
2534 static void
2535 show_convenience (char *ignore, int from_tty)
2536 {
2537   struct gdbarch *gdbarch = get_current_arch ();
2538   struct internalvar *var;
2539   int varseen = 0;
2540   struct value_print_options opts;
2541 
2542   get_user_print_options (&opts);
2543   for (var = internalvars; var; var = var->next)
2544     {
2545       volatile struct gdb_exception ex;
2546 
2547       if (!varseen)
2548 	{
2549 	  varseen = 1;
2550 	}
2551       printf_filtered (("$%s = "), var->name);
2552 
2553       TRY_CATCH (ex, RETURN_MASK_ERROR)
2554 	{
2555 	  struct value *val;
2556 
2557 	  val = value_of_internalvar (gdbarch, var);
2558 	  value_print (val, gdb_stdout, &opts);
2559 	}
2560       if (ex.reason < 0)
2561 	fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2562       printf_filtered (("\n"));
2563     }
2564   if (!varseen)
2565     {
2566       /* This text does not mention convenience functions on purpose.
2567 	 The user can't create them except via Python, and if Python support
2568 	 is installed this message will never be printed ($_streq will
2569 	 exist).  */
2570       printf_unfiltered (_("No debugger convenience variables now defined.\n"
2571 			   "Convenience variables have "
2572 			   "names starting with \"$\";\n"
2573 			   "use \"set\" as in \"set "
2574 			   "$foo = 5\" to define them.\n"));
2575     }
2576 }
2577 
2578 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER.  */
2579 
2580 struct value *
2581 value_of_xmethod (struct xmethod_worker *worker)
2582 {
2583   if (worker->value == NULL)
2584     {
2585       struct value *v;
2586 
2587       v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2588       v->lval = lval_xcallable;
2589       v->location.xm_worker = worker;
2590       v->modifiable = 0;
2591       worker->value = v;
2592     }
2593 
2594   return worker->value;
2595 }
2596 
2597 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD.  */
2598 
2599 struct type *
2600 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2601 {
2602   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2603 	      && method->lval == lval_xcallable && argc > 0);
2604 
2605   return get_xmethod_result_type (method->location.xm_worker,
2606 				  argv[0], argv + 1, argc - 1);
2607 }
2608 
2609 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD.  */
2610 
2611 struct value *
2612 call_xmethod (struct value *method, int argc, struct value **argv)
2613 {
2614   gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2615 	      && method->lval == lval_xcallable && argc > 0);
2616 
2617   return invoke_xmethod (method->location.xm_worker,
2618 			 argv[0], argv + 1, argc - 1);
2619 }
2620 
2621 /* Extract a value as a C number (either long or double).
2622    Knows how to convert fixed values to double, or
2623    floating values to long.
2624    Does not deallocate the value.  */
2625 
2626 LONGEST
2627 value_as_long (struct value *val)
2628 {
2629   /* This coerces arrays and functions, which is necessary (e.g.
2630      in disassemble_command).  It also dereferences references, which
2631      I suspect is the most logical thing to do.  */
2632   val = coerce_array (val);
2633   return unpack_long (value_type (val), value_contents (val));
2634 }
2635 
2636 DOUBLEST
2637 value_as_double (struct value *val)
2638 {
2639   DOUBLEST foo;
2640   int inv;
2641 
2642   foo = unpack_double (value_type (val), value_contents (val), &inv);
2643   if (inv)
2644     error (_("Invalid floating value found in program."));
2645   return foo;
2646 }
2647 
2648 /* Extract a value as a C pointer.  Does not deallocate the value.
2649    Note that val's type may not actually be a pointer; value_as_long
2650    handles all the cases.  */
2651 CORE_ADDR
2652 value_as_address (struct value *val)
2653 {
2654   struct gdbarch *gdbarch = get_type_arch (value_type (val));
2655 
2656   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2657      whether we want this to be true eventually.  */
2658 #if 0
2659   /* gdbarch_addr_bits_remove is wrong if we are being called for a
2660      non-address (e.g. argument to "signal", "info break", etc.), or
2661      for pointers to char, in which the low bits *are* significant.  */
2662   return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2663 #else
2664 
2665   /* There are several targets (IA-64, PowerPC, and others) which
2666      don't represent pointers to functions as simply the address of
2667      the function's entry point.  For example, on the IA-64, a
2668      function pointer points to a two-word descriptor, generated by
2669      the linker, which contains the function's entry point, and the
2670      value the IA-64 "global pointer" register should have --- to
2671      support position-independent code.  The linker generates
2672      descriptors only for those functions whose addresses are taken.
2673 
2674      On such targets, it's difficult for GDB to convert an arbitrary
2675      function address into a function pointer; it has to either find
2676      an existing descriptor for that function, or call malloc and
2677      build its own.  On some targets, it is impossible for GDB to
2678      build a descriptor at all: the descriptor must contain a jump
2679      instruction; data memory cannot be executed; and code memory
2680      cannot be modified.
2681 
2682      Upon entry to this function, if VAL is a value of type `function'
2683      (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2684      value_address (val) is the address of the function.  This is what
2685      you'll get if you evaluate an expression like `main'.  The call
2686      to COERCE_ARRAY below actually does all the usual unary
2687      conversions, which includes converting values of type `function'
2688      to `pointer to function'.  This is the challenging conversion
2689      discussed above.  Then, `unpack_long' will convert that pointer
2690      back into an address.
2691 
2692      So, suppose the user types `disassemble foo' on an architecture
2693      with a strange function pointer representation, on which GDB
2694      cannot build its own descriptors, and suppose further that `foo'
2695      has no linker-built descriptor.  The address->pointer conversion
2696      will signal an error and prevent the command from running, even
2697      though the next step would have been to convert the pointer
2698      directly back into the same address.
2699 
2700      The following shortcut avoids this whole mess.  If VAL is a
2701      function, just return its address directly.  */
2702   if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2703       || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2704     return value_address (val);
2705 
2706   val = coerce_array (val);
2707 
2708   /* Some architectures (e.g. Harvard), map instruction and data
2709      addresses onto a single large unified address space.  For
2710      instance: An architecture may consider a large integer in the
2711      range 0x10000000 .. 0x1000ffff to already represent a data
2712      addresses (hence not need a pointer to address conversion) while
2713      a small integer would still need to be converted integer to
2714      pointer to address.  Just assume such architectures handle all
2715      integer conversions in a single function.  */
2716 
2717   /* JimB writes:
2718 
2719      I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2720      must admonish GDB hackers to make sure its behavior matches the
2721      compiler's, whenever possible.
2722 
2723      In general, I think GDB should evaluate expressions the same way
2724      the compiler does.  When the user copies an expression out of
2725      their source code and hands it to a `print' command, they should
2726      get the same value the compiler would have computed.  Any
2727      deviation from this rule can cause major confusion and annoyance,
2728      and needs to be justified carefully.  In other words, GDB doesn't
2729      really have the freedom to do these conversions in clever and
2730      useful ways.
2731 
2732      AndrewC pointed out that users aren't complaining about how GDB
2733      casts integers to pointers; they are complaining that they can't
2734      take an address from a disassembly listing and give it to `x/i'.
2735      This is certainly important.
2736 
2737      Adding an architecture method like integer_to_address() certainly
2738      makes it possible for GDB to "get it right" in all circumstances
2739      --- the target has complete control over how things get done, so
2740      people can Do The Right Thing for their target without breaking
2741      anyone else.  The standard doesn't specify how integers get
2742      converted to pointers; usually, the ABI doesn't either, but
2743      ABI-specific code is a more reasonable place to handle it.  */
2744 
2745   if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2746       && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2747       && gdbarch_integer_to_address_p (gdbarch))
2748     return gdbarch_integer_to_address (gdbarch, value_type (val),
2749 				       value_contents (val));
2750 
2751   return unpack_long (value_type (val), value_contents (val));
2752 #endif
2753 }
2754 
2755 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2756    as a long, or as a double, assuming the raw data is described
2757    by type TYPE.  Knows how to convert different sizes of values
2758    and can convert between fixed and floating point.  We don't assume
2759    any alignment for the raw data.  Return value is in host byte order.
2760 
2761    If you want functions and arrays to be coerced to pointers, and
2762    references to be dereferenced, call value_as_long() instead.
2763 
2764    C++: It is assumed that the front-end has taken care of
2765    all matters concerning pointers to members.  A pointer
2766    to member which reaches here is considered to be equivalent
2767    to an INT (or some size).  After all, it is only an offset.  */
2768 
2769 LONGEST
2770 unpack_long (struct type *type, const gdb_byte *valaddr)
2771 {
2772   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2773   enum type_code code = TYPE_CODE (type);
2774   int len = TYPE_LENGTH (type);
2775   int nosign = TYPE_UNSIGNED (type);
2776 
2777   switch (code)
2778     {
2779     case TYPE_CODE_TYPEDEF:
2780       return unpack_long (check_typedef (type), valaddr);
2781     case TYPE_CODE_ENUM:
2782     case TYPE_CODE_FLAGS:
2783     case TYPE_CODE_BOOL:
2784     case TYPE_CODE_INT:
2785     case TYPE_CODE_CHAR:
2786     case TYPE_CODE_RANGE:
2787     case TYPE_CODE_MEMBERPTR:
2788       if (nosign)
2789 	return extract_unsigned_integer (valaddr, len, byte_order);
2790       else
2791 	return extract_signed_integer (valaddr, len, byte_order);
2792 
2793     case TYPE_CODE_FLT:
2794       return extract_typed_floating (valaddr, type);
2795 
2796     case TYPE_CODE_DECFLOAT:
2797       /* libdecnumber has a function to convert from decimal to integer, but
2798 	 it doesn't work when the decimal number has a fractional part.  */
2799       return decimal_to_doublest (valaddr, len, byte_order);
2800 
2801     case TYPE_CODE_PTR:
2802     case TYPE_CODE_REF:
2803       /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2804          whether we want this to be true eventually.  */
2805       return extract_typed_address (valaddr, type);
2806 
2807     default:
2808       error (_("Value can't be converted to integer."));
2809     }
2810   return 0;			/* Placate lint.  */
2811 }
2812 
2813 /* Return a double value from the specified type and address.
2814    INVP points to an int which is set to 0 for valid value,
2815    1 for invalid value (bad float format).  In either case,
2816    the returned double is OK to use.  Argument is in target
2817    format, result is in host format.  */
2818 
2819 DOUBLEST
2820 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2821 {
2822   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2823   enum type_code code;
2824   int len;
2825   int nosign;
2826 
2827   *invp = 0;			/* Assume valid.  */
2828   CHECK_TYPEDEF (type);
2829   code = TYPE_CODE (type);
2830   len = TYPE_LENGTH (type);
2831   nosign = TYPE_UNSIGNED (type);
2832   if (code == TYPE_CODE_FLT)
2833     {
2834       /* NOTE: cagney/2002-02-19: There was a test here to see if the
2835 	 floating-point value was valid (using the macro
2836 	 INVALID_FLOAT).  That test/macro have been removed.
2837 
2838 	 It turns out that only the VAX defined this macro and then
2839 	 only in a non-portable way.  Fixing the portability problem
2840 	 wouldn't help since the VAX floating-point code is also badly
2841 	 bit-rotten.  The target needs to add definitions for the
2842 	 methods gdbarch_float_format and gdbarch_double_format - these
2843 	 exactly describe the target floating-point format.  The
2844 	 problem here is that the corresponding floatformat_vax_f and
2845 	 floatformat_vax_d values these methods should be set to are
2846 	 also not defined either.  Oops!
2847 
2848          Hopefully someone will add both the missing floatformat
2849          definitions and the new cases for floatformat_is_valid ().  */
2850 
2851       if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2852 	{
2853 	  *invp = 1;
2854 	  return 0.0;
2855 	}
2856 
2857       return extract_typed_floating (valaddr, type);
2858     }
2859   else if (code == TYPE_CODE_DECFLOAT)
2860     return decimal_to_doublest (valaddr, len, byte_order);
2861   else if (nosign)
2862     {
2863       /* Unsigned -- be sure we compensate for signed LONGEST.  */
2864       return (ULONGEST) unpack_long (type, valaddr);
2865     }
2866   else
2867     {
2868       /* Signed -- we are OK with unpack_long.  */
2869       return unpack_long (type, valaddr);
2870     }
2871 }
2872 
2873 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2874    as a CORE_ADDR, assuming the raw data is described by type TYPE.
2875    We don't assume any alignment for the raw data.  Return value is in
2876    host byte order.
2877 
2878    If you want functions and arrays to be coerced to pointers, and
2879    references to be dereferenced, call value_as_address() instead.
2880 
2881    C++: It is assumed that the front-end has taken care of
2882    all matters concerning pointers to members.  A pointer
2883    to member which reaches here is considered to be equivalent
2884    to an INT (or some size).  After all, it is only an offset.  */
2885 
2886 CORE_ADDR
2887 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2888 {
2889   /* Assume a CORE_ADDR can fit in a LONGEST (for now).  Not sure
2890      whether we want this to be true eventually.  */
2891   return unpack_long (type, valaddr);
2892 }
2893 
2894 
2895 /* Get the value of the FIELDNO'th field (which must be static) of
2896    TYPE.  */
2897 
2898 struct value *
2899 value_static_field (struct type *type, int fieldno)
2900 {
2901   struct value *retval;
2902 
2903   switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2904     {
2905     case FIELD_LOC_KIND_PHYSADDR:
2906       retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2907 			      TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2908       break;
2909     case FIELD_LOC_KIND_PHYSNAME:
2910     {
2911       const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2912       /* TYPE_FIELD_NAME (type, fieldno); */
2913       struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2914 
2915       if (sym == NULL)
2916 	{
2917 	  /* With some compilers, e.g. HP aCC, static data members are
2918 	     reported as non-debuggable symbols.  */
2919 	  struct bound_minimal_symbol msym
2920 	    = lookup_minimal_symbol (phys_name, NULL, NULL);
2921 
2922 	  if (!msym.minsym)
2923 	    return allocate_optimized_out_value (type);
2924 	  else
2925 	    {
2926 	      retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2927 				      BMSYMBOL_VALUE_ADDRESS (msym));
2928 	    }
2929 	}
2930       else
2931 	retval = value_of_variable (sym, NULL);
2932       break;
2933     }
2934     default:
2935       gdb_assert_not_reached ("unexpected field location kind");
2936     }
2937 
2938   return retval;
2939 }
2940 
2941 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2942    You have to be careful here, since the size of the data area for the value
2943    is set by the length of the enclosing type.  So if NEW_ENCL_TYPE is bigger
2944    than the old enclosing type, you have to allocate more space for the
2945    data.  */
2946 
2947 void
2948 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2949 {
2950   if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2951     val->contents =
2952       (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2953 
2954   val->enclosing_type = new_encl_type;
2955 }
2956 
2957 /* Given a value ARG1 (offset by OFFSET bytes)
2958    of a struct or union type ARG_TYPE,
2959    extract and return the value of one of its (non-static) fields.
2960    FIELDNO says which field.  */
2961 
2962 struct value *
2963 value_primitive_field (struct value *arg1, int offset,
2964 		       int fieldno, struct type *arg_type)
2965 {
2966   struct value *v;
2967   struct type *type;
2968 
2969   CHECK_TYPEDEF (arg_type);
2970   type = TYPE_FIELD_TYPE (arg_type, fieldno);
2971 
2972   /* Call check_typedef on our type to make sure that, if TYPE
2973      is a TYPE_CODE_TYPEDEF, its length is set to the length
2974      of the target type instead of zero.  However, we do not
2975      replace the typedef type by the target type, because we want
2976      to keep the typedef in order to be able to print the type
2977      description correctly.  */
2978   check_typedef (type);
2979 
2980   if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2981     {
2982       /* Handle packed fields.
2983 
2984 	 Create a new value for the bitfield, with bitpos and bitsize
2985 	 set.  If possible, arrange offset and bitpos so that we can
2986 	 do a single aligned read of the size of the containing type.
2987 	 Otherwise, adjust offset to the byte containing the first
2988 	 bit.  Assume that the address, offset, and embedded offset
2989 	 are sufficiently aligned.  */
2990 
2991       int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2992       int container_bitsize = TYPE_LENGTH (type) * 8;
2993 
2994       v = allocate_value_lazy (type);
2995       v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2996       if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2997 	  && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2998 	v->bitpos = bitpos % container_bitsize;
2999       else
3000 	v->bitpos = bitpos % 8;
3001       v->offset = (value_embedded_offset (arg1)
3002 		   + offset
3003 		   + (bitpos - v->bitpos) / 8);
3004       set_value_parent (v, arg1);
3005       if (!value_lazy (arg1))
3006 	value_fetch_lazy (v);
3007     }
3008   else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3009     {
3010       /* This field is actually a base subobject, so preserve the
3011 	 entire object's contents for later references to virtual
3012 	 bases, etc.  */
3013       int boffset;
3014 
3015       /* Lazy register values with offsets are not supported.  */
3016       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3017 	value_fetch_lazy (arg1);
3018 
3019       /* We special case virtual inheritance here because this
3020 	 requires access to the contents, which we would rather avoid
3021 	 for references to ordinary fields of unavailable values.  */
3022       if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3023 	boffset = baseclass_offset (arg_type, fieldno,
3024 				    value_contents (arg1),
3025 				    value_embedded_offset (arg1),
3026 				    value_address (arg1),
3027 				    arg1);
3028       else
3029 	boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3030 
3031       if (value_lazy (arg1))
3032 	v = allocate_value_lazy (value_enclosing_type (arg1));
3033       else
3034 	{
3035 	  v = allocate_value (value_enclosing_type (arg1));
3036 	  value_contents_copy_raw (v, 0, arg1, 0,
3037 				   TYPE_LENGTH (value_enclosing_type (arg1)));
3038 	}
3039       v->type = type;
3040       v->offset = value_offset (arg1);
3041       v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3042     }
3043   else
3044     {
3045       /* Plain old data member */
3046       offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3047 
3048       /* Lazy register values with offsets are not supported.  */
3049       if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3050 	value_fetch_lazy (arg1);
3051 
3052       if (value_lazy (arg1))
3053 	v = allocate_value_lazy (type);
3054       else
3055 	{
3056 	  v = allocate_value (type);
3057 	  value_contents_copy_raw (v, value_embedded_offset (v),
3058 				   arg1, value_embedded_offset (arg1) + offset,
3059 				   TYPE_LENGTH (type));
3060 	}
3061       v->offset = (value_offset (arg1) + offset
3062 		   + value_embedded_offset (arg1));
3063     }
3064   set_value_component_location (v, arg1);
3065   VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3066   VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3067   return v;
3068 }
3069 
3070 /* Given a value ARG1 of a struct or union type,
3071    extract and return the value of one of its (non-static) fields.
3072    FIELDNO says which field.  */
3073 
3074 struct value *
3075 value_field (struct value *arg1, int fieldno)
3076 {
3077   return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3078 }
3079 
3080 /* Return a non-virtual function as a value.
3081    F is the list of member functions which contains the desired method.
3082    J is an index into F which provides the desired method.
3083 
3084    We only use the symbol for its address, so be happy with either a
3085    full symbol or a minimal symbol.  */
3086 
3087 struct value *
3088 value_fn_field (struct value **arg1p, struct fn_field *f,
3089 		int j, struct type *type,
3090 		int offset)
3091 {
3092   struct value *v;
3093   struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3094   const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3095   struct symbol *sym;
3096   struct bound_minimal_symbol msym;
3097 
3098   sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
3099   if (sym != NULL)
3100     {
3101       memset (&msym, 0, sizeof (msym));
3102     }
3103   else
3104     {
3105       gdb_assert (sym == NULL);
3106       msym = lookup_bound_minimal_symbol (physname);
3107       if (msym.minsym == NULL)
3108 	return NULL;
3109     }
3110 
3111   v = allocate_value (ftype);
3112   if (sym)
3113     {
3114       set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3115     }
3116   else
3117     {
3118       /* The minimal symbol might point to a function descriptor;
3119 	 resolve it to the actual code address instead.  */
3120       struct objfile *objfile = msym.objfile;
3121       struct gdbarch *gdbarch = get_objfile_arch (objfile);
3122 
3123       set_value_address (v,
3124 	gdbarch_convert_from_func_ptr_addr
3125 	   (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3126     }
3127 
3128   if (arg1p)
3129     {
3130       if (type != value_type (*arg1p))
3131 	*arg1p = value_ind (value_cast (lookup_pointer_type (type),
3132 					value_addr (*arg1p)));
3133 
3134       /* Move the `this' pointer according to the offset.
3135          VALUE_OFFSET (*arg1p) += offset; */
3136     }
3137 
3138   return v;
3139 }
3140 
3141 
3142 
3143 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3144    VALADDR, and store the result in *RESULT.
3145    The bitfield starts at BITPOS bits and contains BITSIZE bits.
3146 
3147    Extracting bits depends on endianness of the machine.  Compute the
3148    number of least significant bits to discard.  For big endian machines,
3149    we compute the total number of bits in the anonymous object, subtract
3150    off the bit count from the MSB of the object to the MSB of the
3151    bitfield, then the size of the bitfield, which leaves the LSB discard
3152    count.  For little endian machines, the discard count is simply the
3153    number of bits from the LSB of the anonymous object to the LSB of the
3154    bitfield.
3155 
3156    If the field is signed, we also do sign extension.  */
3157 
3158 static LONGEST
3159 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3160 		     int bitpos, int bitsize)
3161 {
3162   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3163   ULONGEST val;
3164   ULONGEST valmask;
3165   int lsbcount;
3166   int bytes_read;
3167   int read_offset;
3168 
3169   /* Read the minimum number of bytes required; there may not be
3170      enough bytes to read an entire ULONGEST.  */
3171   CHECK_TYPEDEF (field_type);
3172   if (bitsize)
3173     bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3174   else
3175     bytes_read = TYPE_LENGTH (field_type);
3176 
3177   read_offset = bitpos / 8;
3178 
3179   val = extract_unsigned_integer (valaddr + read_offset,
3180 				  bytes_read, byte_order);
3181 
3182   /* Extract bits.  See comment above.  */
3183 
3184   if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3185     lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3186   else
3187     lsbcount = (bitpos % 8);
3188   val >>= lsbcount;
3189 
3190   /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3191      If the field is signed, and is negative, then sign extend.  */
3192 
3193   if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3194     {
3195       valmask = (((ULONGEST) 1) << bitsize) - 1;
3196       val &= valmask;
3197       if (!TYPE_UNSIGNED (field_type))
3198 	{
3199 	  if (val & (valmask ^ (valmask >> 1)))
3200 	    {
3201 	      val |= ~valmask;
3202 	    }
3203 	}
3204     }
3205 
3206   return val;
3207 }
3208 
3209 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3210    VALADDR + EMBEDDED_OFFSET.  VALADDR points to the contents of
3211    ORIGINAL_VALUE, which must not be NULL.  See
3212    unpack_value_bits_as_long for more details.  */
3213 
3214 int
3215 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3216 			    int embedded_offset, int fieldno,
3217 			    const struct value *val, LONGEST *result)
3218 {
3219   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3220   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3221   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3222   int bit_offset;
3223 
3224   gdb_assert (val != NULL);
3225 
3226   bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3227   if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3228       || !value_bits_available (val, bit_offset, bitsize))
3229     return 0;
3230 
3231   *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3232 				 bitpos, bitsize);
3233   return 1;
3234 }
3235 
3236 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3237    object at VALADDR.  See unpack_bits_as_long for more details.  */
3238 
3239 LONGEST
3240 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3241 {
3242   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3243   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3244   struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3245 
3246   return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3247 }
3248 
3249 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3250    VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3251    the contents in DEST_VAL, zero or sign extending if the type of
3252    DEST_VAL is wider than BITSIZE.  VALADDR points to the contents of
3253    VAL.  If the VAL's contents required to extract the bitfield from
3254    are unavailable/optimized out, DEST_VAL is correspondingly
3255    marked unavailable/optimized out.  */
3256 
3257 void
3258 unpack_value_bitfield (struct value *dest_val,
3259 		       int bitpos, int bitsize,
3260 		       const gdb_byte *valaddr, int embedded_offset,
3261 		       const struct value *val)
3262 {
3263   enum bfd_endian byte_order;
3264   int src_bit_offset;
3265   int dst_bit_offset;
3266   LONGEST num;
3267   struct type *field_type = value_type (dest_val);
3268 
3269   /* First, unpack and sign extend the bitfield as if it was wholly
3270      available.  Invalid/unavailable bits are read as zero, but that's
3271      OK, as they'll end up marked below.  */
3272   byte_order = gdbarch_byte_order (get_type_arch (field_type));
3273   num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3274 			     bitpos, bitsize);
3275   store_signed_integer (value_contents_raw (dest_val),
3276 			TYPE_LENGTH (field_type), byte_order, num);
3277 
3278   /* Now copy the optimized out / unavailability ranges to the right
3279      bits.  */
3280   src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3281   if (byte_order == BFD_ENDIAN_BIG)
3282     dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3283   else
3284     dst_bit_offset = 0;
3285   value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3286 			      val, src_bit_offset, bitsize);
3287 }
3288 
3289 /* Return a new value with type TYPE, which is FIELDNO field of the
3290    object at VALADDR + EMBEDDEDOFFSET.  VALADDR points to the contents
3291    of VAL.  If the VAL's contents required to extract the bitfield
3292    from are unavailable/optimized out, the new value is
3293    correspondingly marked unavailable/optimized out.  */
3294 
3295 struct value *
3296 value_field_bitfield (struct type *type, int fieldno,
3297 		      const gdb_byte *valaddr,
3298 		      int embedded_offset, const struct value *val)
3299 {
3300   int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3301   int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3302   struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3303 
3304   unpack_value_bitfield (res_val, bitpos, bitsize,
3305 			 valaddr, embedded_offset, val);
3306 
3307   return res_val;
3308 }
3309 
3310 /* Modify the value of a bitfield.  ADDR points to a block of memory in
3311    target byte order; the bitfield starts in the byte pointed to.  FIELDVAL
3312    is the desired value of the field, in host byte order.  BITPOS and BITSIZE
3313    indicate which bits (in target bit order) comprise the bitfield.
3314    Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3315    0 <= BITPOS, where lbits is the size of a LONGEST in bits.  */
3316 
3317 void
3318 modify_field (struct type *type, gdb_byte *addr,
3319 	      LONGEST fieldval, int bitpos, int bitsize)
3320 {
3321   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3322   ULONGEST oword;
3323   ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3324   int bytesize;
3325 
3326   /* Normalize BITPOS.  */
3327   addr += bitpos / 8;
3328   bitpos %= 8;
3329 
3330   /* If a negative fieldval fits in the field in question, chop
3331      off the sign extension bits.  */
3332   if ((~fieldval & ~(mask >> 1)) == 0)
3333     fieldval &= mask;
3334 
3335   /* Warn if value is too big to fit in the field in question.  */
3336   if (0 != (fieldval & ~mask))
3337     {
3338       /* FIXME: would like to include fieldval in the message, but
3339          we don't have a sprintf_longest.  */
3340       warning (_("Value does not fit in %d bits."), bitsize);
3341 
3342       /* Truncate it, otherwise adjoining fields may be corrupted.  */
3343       fieldval &= mask;
3344     }
3345 
3346   /* Ensure no bytes outside of the modified ones get accessed as it may cause
3347      false valgrind reports.  */
3348 
3349   bytesize = (bitpos + bitsize + 7) / 8;
3350   oword = extract_unsigned_integer (addr, bytesize, byte_order);
3351 
3352   /* Shifting for bit field depends on endianness of the target machine.  */
3353   if (gdbarch_bits_big_endian (get_type_arch (type)))
3354     bitpos = bytesize * 8 - bitpos - bitsize;
3355 
3356   oword &= ~(mask << bitpos);
3357   oword |= fieldval << bitpos;
3358 
3359   store_unsigned_integer (addr, bytesize, byte_order, oword);
3360 }
3361 
3362 /* Pack NUM into BUF using a target format of TYPE.  */
3363 
3364 void
3365 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3366 {
3367   enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3368   int len;
3369 
3370   type = check_typedef (type);
3371   len = TYPE_LENGTH (type);
3372 
3373   switch (TYPE_CODE (type))
3374     {
3375     case TYPE_CODE_INT:
3376     case TYPE_CODE_CHAR:
3377     case TYPE_CODE_ENUM:
3378     case TYPE_CODE_FLAGS:
3379     case TYPE_CODE_BOOL:
3380     case TYPE_CODE_RANGE:
3381     case TYPE_CODE_MEMBERPTR:
3382       store_signed_integer (buf, len, byte_order, num);
3383       break;
3384 
3385     case TYPE_CODE_REF:
3386     case TYPE_CODE_PTR:
3387       store_typed_address (buf, type, (CORE_ADDR) num);
3388       break;
3389 
3390     default:
3391       error (_("Unexpected type (%d) encountered for integer constant."),
3392 	     TYPE_CODE (type));
3393     }
3394 }
3395 
3396 
3397 /* Pack NUM into BUF using a target format of TYPE.  */
3398 
3399 static void
3400 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3401 {
3402   int len;
3403   enum bfd_endian byte_order;
3404 
3405   type = check_typedef (type);
3406   len = TYPE_LENGTH (type);
3407   byte_order = gdbarch_byte_order (get_type_arch (type));
3408 
3409   switch (TYPE_CODE (type))
3410     {
3411     case TYPE_CODE_INT:
3412     case TYPE_CODE_CHAR:
3413     case TYPE_CODE_ENUM:
3414     case TYPE_CODE_FLAGS:
3415     case TYPE_CODE_BOOL:
3416     case TYPE_CODE_RANGE:
3417     case TYPE_CODE_MEMBERPTR:
3418       store_unsigned_integer (buf, len, byte_order, num);
3419       break;
3420 
3421     case TYPE_CODE_REF:
3422     case TYPE_CODE_PTR:
3423       store_typed_address (buf, type, (CORE_ADDR) num);
3424       break;
3425 
3426     default:
3427       error (_("Unexpected type (%d) encountered "
3428 	       "for unsigned integer constant."),
3429 	     TYPE_CODE (type));
3430     }
3431 }
3432 
3433 
3434 /* Convert C numbers into newly allocated values.  */
3435 
3436 struct value *
3437 value_from_longest (struct type *type, LONGEST num)
3438 {
3439   struct value *val = allocate_value (type);
3440 
3441   pack_long (value_contents_raw (val), type, num);
3442   return val;
3443 }
3444 
3445 
3446 /* Convert C unsigned numbers into newly allocated values.  */
3447 
3448 struct value *
3449 value_from_ulongest (struct type *type, ULONGEST num)
3450 {
3451   struct value *val = allocate_value (type);
3452 
3453   pack_unsigned_long (value_contents_raw (val), type, num);
3454 
3455   return val;
3456 }
3457 
3458 
3459 /* Create a value representing a pointer of type TYPE to the address
3460    ADDR.  */
3461 
3462 struct value *
3463 value_from_pointer (struct type *type, CORE_ADDR addr)
3464 {
3465   struct value *val = allocate_value (type);
3466 
3467   store_typed_address (value_contents_raw (val),
3468 		       check_typedef (type), addr);
3469   return val;
3470 }
3471 
3472 
3473 /* Create a value of type TYPE whose contents come from VALADDR, if it
3474    is non-null, and whose memory address (in the inferior) is
3475    ADDRESS.  The type of the created value may differ from the passed
3476    type TYPE.  Make sure to retrieve values new type after this call.
3477    Note that TYPE is not passed through resolve_dynamic_type; this is
3478    a special API intended for use only by Ada.  */
3479 
3480 struct value *
3481 value_from_contents_and_address_unresolved (struct type *type,
3482 					    const gdb_byte *valaddr,
3483 					    CORE_ADDR address)
3484 {
3485   struct value *v;
3486 
3487   if (valaddr == NULL)
3488     v = allocate_value_lazy (type);
3489   else
3490     v = value_from_contents (type, valaddr);
3491   set_value_address (v, address);
3492   VALUE_LVAL (v) = lval_memory;
3493   return v;
3494 }
3495 
3496 /* Create a value of type TYPE whose contents come from VALADDR, if it
3497    is non-null, and whose memory address (in the inferior) is
3498    ADDRESS.  The type of the created value may differ from the passed
3499    type TYPE.  Make sure to retrieve values new type after this call.  */
3500 
3501 struct value *
3502 value_from_contents_and_address (struct type *type,
3503 				 const gdb_byte *valaddr,
3504 				 CORE_ADDR address)
3505 {
3506   struct type *resolved_type = resolve_dynamic_type (type, address);
3507   struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3508   struct value *v;
3509 
3510   if (valaddr == NULL)
3511     v = allocate_value_lazy (resolved_type);
3512   else
3513     v = value_from_contents (resolved_type, valaddr);
3514   if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3515       && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3516     address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3517   set_value_address (v, address);
3518   VALUE_LVAL (v) = lval_memory;
3519   return v;
3520 }
3521 
3522 /* Create a value of type TYPE holding the contents CONTENTS.
3523    The new value is `not_lval'.  */
3524 
3525 struct value *
3526 value_from_contents (struct type *type, const gdb_byte *contents)
3527 {
3528   struct value *result;
3529 
3530   result = allocate_value (type);
3531   memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3532   return result;
3533 }
3534 
3535 struct value *
3536 value_from_double (struct type *type, DOUBLEST num)
3537 {
3538   struct value *val = allocate_value (type);
3539   struct type *base_type = check_typedef (type);
3540   enum type_code code = TYPE_CODE (base_type);
3541 
3542   if (code == TYPE_CODE_FLT)
3543     {
3544       store_typed_floating (value_contents_raw (val), base_type, num);
3545     }
3546   else
3547     error (_("Unexpected type encountered for floating constant."));
3548 
3549   return val;
3550 }
3551 
3552 struct value *
3553 value_from_decfloat (struct type *type, const gdb_byte *dec)
3554 {
3555   struct value *val = allocate_value (type);
3556 
3557   memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3558   return val;
3559 }
3560 
3561 /* Extract a value from the history file.  Input will be of the form
3562    $digits or $$digits.  See block comment above 'write_dollar_variable'
3563    for details.  */
3564 
3565 struct value *
3566 value_from_history_ref (const char *h, const char **endp)
3567 {
3568   int index, len;
3569 
3570   if (h[0] == '$')
3571     len = 1;
3572   else
3573     return NULL;
3574 
3575   if (h[1] == '$')
3576     len = 2;
3577 
3578   /* Find length of numeral string.  */
3579   for (; isdigit (h[len]); len++)
3580     ;
3581 
3582   /* Make sure numeral string is not part of an identifier.  */
3583   if (h[len] == '_' || isalpha (h[len]))
3584     return NULL;
3585 
3586   /* Now collect the index value.  */
3587   if (h[1] == '$')
3588     {
3589       if (len == 2)
3590 	{
3591 	  /* For some bizarre reason, "$$" is equivalent to "$$1",
3592 	     rather than to "$$0" as it ought to be!  */
3593 	  index = -1;
3594 	  *endp += len;
3595 	}
3596       else
3597 	{
3598 	  char *local_end;
3599 
3600 	  index = -strtol (&h[2], &local_end, 10);
3601 	  *endp = local_end;
3602 	}
3603     }
3604   else
3605     {
3606       if (len == 1)
3607 	{
3608 	  /* "$" is equivalent to "$0".  */
3609 	  index = 0;
3610 	  *endp += len;
3611 	}
3612       else
3613 	{
3614 	  char *local_end;
3615 
3616 	  index = strtol (&h[1], &local_end, 10);
3617 	  *endp = local_end;
3618 	}
3619     }
3620 
3621   return access_value_history (index);
3622 }
3623 
3624 struct value *
3625 coerce_ref_if_computed (const struct value *arg)
3626 {
3627   const struct lval_funcs *funcs;
3628 
3629   if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3630     return NULL;
3631 
3632   if (value_lval_const (arg) != lval_computed)
3633     return NULL;
3634 
3635   funcs = value_computed_funcs (arg);
3636   if (funcs->coerce_ref == NULL)
3637     return NULL;
3638 
3639   return funcs->coerce_ref (arg);
3640 }
3641 
3642 /* Look at value.h for description.  */
3643 
3644 struct value *
3645 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3646 			      struct type *original_type,
3647 			      struct value *original_value)
3648 {
3649   /* Re-adjust type.  */
3650   deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3651 
3652   /* Add embedding info.  */
3653   set_value_enclosing_type (value, enc_type);
3654   set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3655 
3656   /* We may be pointing to an object of some derived type.  */
3657   return value_full_object (value, NULL, 0, 0, 0);
3658 }
3659 
3660 struct value *
3661 coerce_ref (struct value *arg)
3662 {
3663   struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3664   struct value *retval;
3665   struct type *enc_type;
3666 
3667   retval = coerce_ref_if_computed (arg);
3668   if (retval)
3669     return retval;
3670 
3671   if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3672     return arg;
3673 
3674   enc_type = check_typedef (value_enclosing_type (arg));
3675   enc_type = TYPE_TARGET_TYPE (enc_type);
3676 
3677   retval = value_at_lazy (enc_type,
3678                           unpack_pointer (value_type (arg),
3679                                           value_contents (arg)));
3680   enc_type = value_type (retval);
3681   return readjust_indirect_value_type (retval, enc_type,
3682                                        value_type_arg_tmp, arg);
3683 }
3684 
3685 struct value *
3686 coerce_array (struct value *arg)
3687 {
3688   struct type *type;
3689 
3690   arg = coerce_ref (arg);
3691   type = check_typedef (value_type (arg));
3692 
3693   switch (TYPE_CODE (type))
3694     {
3695     case TYPE_CODE_ARRAY:
3696       if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3697 	arg = value_coerce_array (arg);
3698       break;
3699     case TYPE_CODE_FUNC:
3700       arg = value_coerce_function (arg);
3701       break;
3702     }
3703   return arg;
3704 }
3705 
3706 
3707 /* Return the return value convention that will be used for the
3708    specified type.  */
3709 
3710 enum return_value_convention
3711 struct_return_convention (struct gdbarch *gdbarch,
3712 			  struct value *function, struct type *value_type)
3713 {
3714   enum type_code code = TYPE_CODE (value_type);
3715 
3716   if (code == TYPE_CODE_ERROR)
3717     error (_("Function return type unknown."));
3718 
3719   /* Probe the architecture for the return-value convention.  */
3720   return gdbarch_return_value (gdbarch, function, value_type,
3721 			       NULL, NULL, NULL);
3722 }
3723 
3724 /* Return true if the function returning the specified type is using
3725    the convention of returning structures in memory (passing in the
3726    address as a hidden first parameter).  */
3727 
3728 int
3729 using_struct_return (struct gdbarch *gdbarch,
3730 		     struct value *function, struct type *value_type)
3731 {
3732   if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3733     /* A void return value is never in memory.  See also corresponding
3734        code in "print_return_value".  */
3735     return 0;
3736 
3737   return (struct_return_convention (gdbarch, function, value_type)
3738 	  != RETURN_VALUE_REGISTER_CONVENTION);
3739 }
3740 
3741 /* Set the initialized field in a value struct.  */
3742 
3743 void
3744 set_value_initialized (struct value *val, int status)
3745 {
3746   val->initialized = status;
3747 }
3748 
3749 /* Return the initialized field in a value struct.  */
3750 
3751 int
3752 value_initialized (struct value *val)
3753 {
3754   return val->initialized;
3755 }
3756 
3757 /* Called only from the value_contents and value_contents_all()
3758    macros, if the current data for a variable needs to be loaded into
3759    value_contents(VAL).  Fetches the data from the user's process, and
3760    clears the lazy flag to indicate that the data in the buffer is
3761    valid.
3762 
3763    If the value is zero-length, we avoid calling read_memory, which
3764    would abort.  We mark the value as fetched anyway -- all 0 bytes of
3765    it.
3766 
3767    This function returns a value because it is used in the
3768    value_contents macro as part of an expression, where a void would
3769    not work.  The value is ignored.  */
3770 
3771 int
3772 value_fetch_lazy (struct value *val)
3773 {
3774   gdb_assert (value_lazy (val));
3775   allocate_value_contents (val);
3776   /* A value is either lazy, or fully fetched.  The
3777      availability/validity is only established as we try to fetch a
3778      value.  */
3779   gdb_assert (VEC_empty (range_s, val->optimized_out));
3780   gdb_assert (VEC_empty (range_s, val->unavailable));
3781   if (value_bitsize (val))
3782     {
3783       /* To read a lazy bitfield, read the entire enclosing value.  This
3784 	 prevents reading the same block of (possibly volatile) memory once
3785          per bitfield.  It would be even better to read only the containing
3786          word, but we have no way to record that just specific bits of a
3787          value have been fetched.  */
3788       struct type *type = check_typedef (value_type (val));
3789       struct value *parent = value_parent (val);
3790 
3791       if (value_lazy (parent))
3792 	value_fetch_lazy (parent);
3793 
3794       unpack_value_bitfield (val,
3795 			     value_bitpos (val), value_bitsize (val),
3796 			     value_contents_for_printing (parent),
3797 			     value_offset (val), parent);
3798     }
3799   else if (VALUE_LVAL (val) == lval_memory)
3800     {
3801       CORE_ADDR addr = value_address (val);
3802       struct type *type = check_typedef (value_enclosing_type (val));
3803 
3804       if (TYPE_LENGTH (type))
3805 	read_value_memory (val, 0, value_stack (val),
3806 			   addr, value_contents_all_raw (val),
3807 			   TYPE_LENGTH (type));
3808     }
3809   else if (VALUE_LVAL (val) == lval_register)
3810     {
3811       struct frame_info *frame;
3812       int regnum;
3813       struct type *type = check_typedef (value_type (val));
3814       struct value *new_val = val, *mark = value_mark ();
3815 
3816       /* Offsets are not supported here; lazy register values must
3817 	 refer to the entire register.  */
3818       gdb_assert (value_offset (val) == 0);
3819 
3820       while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3821 	{
3822 	  struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3823 
3824 	  frame = frame_find_by_id (frame_id);
3825 	  regnum = VALUE_REGNUM (new_val);
3826 
3827 	  gdb_assert (frame != NULL);
3828 
3829 	  /* Convertible register routines are used for multi-register
3830 	     values and for interpretation in different types
3831 	     (e.g. float or int from a double register).  Lazy
3832 	     register values should have the register's natural type,
3833 	     so they do not apply.  */
3834 	  gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3835 						   regnum, type));
3836 
3837 	  new_val = get_frame_register_value (frame, regnum);
3838 
3839 	  /* If we get another lazy lval_register value, it means the
3840 	     register is found by reading it from the next frame.
3841 	     get_frame_register_value should never return a value with
3842 	     the frame id pointing to FRAME.  If it does, it means we
3843 	     either have two consecutive frames with the same frame id
3844 	     in the frame chain, or some code is trying to unwind
3845 	     behind get_prev_frame's back (e.g., a frame unwind
3846 	     sniffer trying to unwind), bypassing its validations.  In
3847 	     any case, it should always be an internal error to end up
3848 	     in this situation.  */
3849 	  if (VALUE_LVAL (new_val) == lval_register
3850 	      && value_lazy (new_val)
3851 	      && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3852 	    internal_error (__FILE__, __LINE__,
3853 			    _("infinite loop while fetching a register"));
3854 	}
3855 
3856       /* If it's still lazy (for instance, a saved register on the
3857 	 stack), fetch it.  */
3858       if (value_lazy (new_val))
3859 	value_fetch_lazy (new_val);
3860 
3861       /* Copy the contents and the unavailability/optimized-out
3862 	 meta-data from NEW_VAL to VAL.  */
3863       set_value_lazy (val, 0);
3864       value_contents_copy (val, value_embedded_offset (val),
3865 			   new_val, value_embedded_offset (new_val),
3866 			   TYPE_LENGTH (type));
3867 
3868       if (frame_debug)
3869 	{
3870 	  struct gdbarch *gdbarch;
3871 	  frame = frame_find_by_id (VALUE_FRAME_ID (val));
3872 	  regnum = VALUE_REGNUM (val);
3873 	  gdbarch = get_frame_arch (frame);
3874 
3875 	  fprintf_unfiltered (gdb_stdlog,
3876 			      "{ value_fetch_lazy "
3877 			      "(frame=%d,regnum=%d(%s),...) ",
3878 			      frame_relative_level (frame), regnum,
3879 			      user_reg_map_regnum_to_name (gdbarch, regnum));
3880 
3881 	  fprintf_unfiltered (gdb_stdlog, "->");
3882 	  if (value_optimized_out (new_val))
3883 	    {
3884 	      fprintf_unfiltered (gdb_stdlog, " ");
3885 	      val_print_optimized_out (new_val, gdb_stdlog);
3886 	    }
3887 	  else
3888 	    {
3889 	      int i;
3890 	      const gdb_byte *buf = value_contents (new_val);
3891 
3892 	      if (VALUE_LVAL (new_val) == lval_register)
3893 		fprintf_unfiltered (gdb_stdlog, " register=%d",
3894 				    VALUE_REGNUM (new_val));
3895 	      else if (VALUE_LVAL (new_val) == lval_memory)
3896 		fprintf_unfiltered (gdb_stdlog, " address=%s",
3897 				    paddress (gdbarch,
3898 					      value_address (new_val)));
3899 	      else
3900 		fprintf_unfiltered (gdb_stdlog, " computed");
3901 
3902 	      fprintf_unfiltered (gdb_stdlog, " bytes=");
3903 	      fprintf_unfiltered (gdb_stdlog, "[");
3904 	      for (i = 0; i < register_size (gdbarch, regnum); i++)
3905 		fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3906 	      fprintf_unfiltered (gdb_stdlog, "]");
3907 	    }
3908 
3909 	  fprintf_unfiltered (gdb_stdlog, " }\n");
3910 	}
3911 
3912       /* Dispose of the intermediate values.  This prevents
3913 	 watchpoints from trying to watch the saved frame pointer.  */
3914       value_free_to_mark (mark);
3915     }
3916   else if (VALUE_LVAL (val) == lval_computed
3917 	   && value_computed_funcs (val)->read != NULL)
3918     value_computed_funcs (val)->read (val);
3919   else
3920     internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3921 
3922   set_value_lazy (val, 0);
3923   return 0;
3924 }
3925 
3926 /* Implementation of the convenience function $_isvoid.  */
3927 
3928 static struct value *
3929 isvoid_internal_fn (struct gdbarch *gdbarch,
3930 		    const struct language_defn *language,
3931 		    void *cookie, int argc, struct value **argv)
3932 {
3933   int ret;
3934 
3935   if (argc != 1)
3936     error (_("You must provide one argument for $_isvoid."));
3937 
3938   ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3939 
3940   return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3941 }
3942 
3943 void
3944 _initialize_values (void)
3945 {
3946   add_cmd ("convenience", no_class, show_convenience, _("\
3947 Debugger convenience (\"$foo\") variables and functions.\n\
3948 Convenience variables are created when you assign them values;\n\
3949 thus, \"set $foo=1\" gives \"$foo\" the value 1.  Values may be any type.\n\
3950 \n\
3951 A few convenience variables are given values automatically:\n\
3952 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3953 \"$__\" holds the contents of the last address examined with \"x\"."
3954 #ifdef HAVE_PYTHON
3955 "\n\n\
3956 Convenience functions are defined via the Python API."
3957 #endif
3958 	   ), &showlist);
3959   add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3960 
3961   add_cmd ("values", no_set_class, show_values, _("\
3962 Elements of value history around item number IDX (or last ten)."),
3963 	   &showlist);
3964 
3965   add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3966 Initialize a convenience variable if necessary.\n\
3967 init-if-undefined VARIABLE = EXPRESSION\n\
3968 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3969 exist or does not contain a value.  The EXPRESSION is not evaluated if the\n\
3970 VARIABLE is already initialized."));
3971 
3972   add_prefix_cmd ("function", no_class, function_command, _("\
3973 Placeholder command for showing help on convenience functions."),
3974 		  &functionlist, "function ", 0, &cmdlist);
3975 
3976   add_internal_function ("_isvoid", _("\
3977 Check whether an expression is void.\n\
3978 Usage: $_isvoid (expression)\n\
3979 Return 1 if the expression is void, zero otherwise."),
3980 			 isvoid_internal_fn, NULL);
3981 }
3982