1 /* Low level packing and unpacking of values for GDB, the GNU Debugger. 2 3 Copyright (C) 1986-2014 Free Software Foundation, Inc. 4 5 This file is part of GDB. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 19 20 #include "defs.h" 21 #include "arch-utils.h" 22 #include <string.h> 23 #include "symtab.h" 24 #include "gdbtypes.h" 25 #include "value.h" 26 #include "gdbcore.h" 27 #include "command.h" 28 #include "gdbcmd.h" 29 #include "target.h" 30 #include "language.h" 31 #include "demangle.h" 32 #include "doublest.h" 33 #include "gdb_assert.h" 34 #include "regcache.h" 35 #include "block.h" 36 #include "dfp.h" 37 #include "objfiles.h" 38 #include "valprint.h" 39 #include "cli/cli-decode.h" 40 #include "exceptions.h" 41 #include "python/python.h" 42 #include <ctype.h> 43 #include "tracepoint.h" 44 #include "cp-abi.h" 45 #include "user-regs.h" 46 47 /* Prototypes for exported functions. */ 48 49 void _initialize_values (void); 50 51 /* Definition of a user function. */ 52 struct internal_function 53 { 54 /* The name of the function. It is a bit odd to have this in the 55 function itself -- the user might use a differently-named 56 convenience variable to hold the function. */ 57 char *name; 58 59 /* The handler. */ 60 internal_function_fn handler; 61 62 /* User data for the handler. */ 63 void *cookie; 64 }; 65 66 /* Defines an [OFFSET, OFFSET + LENGTH) range. */ 67 68 struct range 69 { 70 /* Lowest offset in the range. */ 71 int offset; 72 73 /* Length of the range. */ 74 int length; 75 }; 76 77 typedef struct range range_s; 78 79 DEF_VEC_O(range_s); 80 81 /* Returns true if the ranges defined by [offset1, offset1+len1) and 82 [offset2, offset2+len2) overlap. */ 83 84 static int 85 ranges_overlap (int offset1, int len1, 86 int offset2, int len2) 87 { 88 ULONGEST h, l; 89 90 l = max (offset1, offset2); 91 h = min (offset1 + len1, offset2 + len2); 92 return (l < h); 93 } 94 95 /* Returns true if the first argument is strictly less than the 96 second, useful for VEC_lower_bound. We keep ranges sorted by 97 offset and coalesce overlapping and contiguous ranges, so this just 98 compares the starting offset. */ 99 100 static int 101 range_lessthan (const range_s *r1, const range_s *r2) 102 { 103 return r1->offset < r2->offset; 104 } 105 106 /* Returns true if RANGES contains any range that overlaps [OFFSET, 107 OFFSET+LENGTH). */ 108 109 static int 110 ranges_contain (VEC(range_s) *ranges, int offset, int length) 111 { 112 range_s what; 113 int i; 114 115 what.offset = offset; 116 what.length = length; 117 118 /* We keep ranges sorted by offset and coalesce overlapping and 119 contiguous ranges, so to check if a range list contains a given 120 range, we can do a binary search for the position the given range 121 would be inserted if we only considered the starting OFFSET of 122 ranges. We call that position I. Since we also have LENGTH to 123 care for (this is a range afterall), we need to check if the 124 _previous_ range overlaps the I range. E.g., 125 126 R 127 |---| 128 |---| |---| |------| ... |--| 129 0 1 2 N 130 131 I=1 132 133 In the case above, the binary search would return `I=1', meaning, 134 this OFFSET should be inserted at position 1, and the current 135 position 1 should be pushed further (and before 2). But, `0' 136 overlaps with R. 137 138 Then we need to check if the I range overlaps the I range itself. 139 E.g., 140 141 R 142 |---| 143 |---| |---| |-------| ... |--| 144 0 1 2 N 145 146 I=1 147 */ 148 149 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan); 150 151 if (i > 0) 152 { 153 struct range *bef = VEC_index (range_s, ranges, i - 1); 154 155 if (ranges_overlap (bef->offset, bef->length, offset, length)) 156 return 1; 157 } 158 159 if (i < VEC_length (range_s, ranges)) 160 { 161 struct range *r = VEC_index (range_s, ranges, i); 162 163 if (ranges_overlap (r->offset, r->length, offset, length)) 164 return 1; 165 } 166 167 return 0; 168 } 169 170 static struct cmd_list_element *functionlist; 171 172 /* Note that the fields in this structure are arranged to save a bit 173 of memory. */ 174 175 struct value 176 { 177 /* Type of value; either not an lval, or one of the various 178 different possible kinds of lval. */ 179 enum lval_type lval; 180 181 /* Is it modifiable? Only relevant if lval != not_lval. */ 182 unsigned int modifiable : 1; 183 184 /* If zero, contents of this value are in the contents field. If 185 nonzero, contents are in inferior. If the lval field is lval_memory, 186 the contents are in inferior memory at location.address plus offset. 187 The lval field may also be lval_register. 188 189 WARNING: This field is used by the code which handles watchpoints 190 (see breakpoint.c) to decide whether a particular value can be 191 watched by hardware watchpoints. If the lazy flag is set for 192 some member of a value chain, it is assumed that this member of 193 the chain doesn't need to be watched as part of watching the 194 value itself. This is how GDB avoids watching the entire struct 195 or array when the user wants to watch a single struct member or 196 array element. If you ever change the way lazy flag is set and 197 reset, be sure to consider this use as well! */ 198 unsigned int lazy : 1; 199 200 /* If nonzero, this is the value of a variable that does not 201 actually exist in the program. If nonzero, and LVAL is 202 lval_register, this is a register ($pc, $sp, etc., never a 203 program variable) that has not been saved in the frame. All 204 optimized-out values are treated pretty much the same, except 205 registers have a different string representation and related 206 error strings. */ 207 unsigned int optimized_out : 1; 208 209 /* If value is a variable, is it initialized or not. */ 210 unsigned int initialized : 1; 211 212 /* If value is from the stack. If this is set, read_stack will be 213 used instead of read_memory to enable extra caching. */ 214 unsigned int stack : 1; 215 216 /* If the value has been released. */ 217 unsigned int released : 1; 218 219 /* Location of value (if lval). */ 220 union 221 { 222 /* If lval == lval_memory, this is the address in the inferior. 223 If lval == lval_register, this is the byte offset into the 224 registers structure. */ 225 CORE_ADDR address; 226 227 /* Pointer to internal variable. */ 228 struct internalvar *internalvar; 229 230 /* If lval == lval_computed, this is a set of function pointers 231 to use to access and describe the value, and a closure pointer 232 for them to use. */ 233 struct 234 { 235 /* Functions to call. */ 236 const struct lval_funcs *funcs; 237 238 /* Closure for those functions to use. */ 239 void *closure; 240 } computed; 241 } location; 242 243 /* Describes offset of a value within lval of a structure in bytes. 244 If lval == lval_memory, this is an offset to the address. If 245 lval == lval_register, this is a further offset from 246 location.address within the registers structure. Note also the 247 member embedded_offset below. */ 248 int offset; 249 250 /* Only used for bitfields; number of bits contained in them. */ 251 int bitsize; 252 253 /* Only used for bitfields; position of start of field. For 254 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For 255 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */ 256 int bitpos; 257 258 /* The number of references to this value. When a value is created, 259 the value chain holds a reference, so REFERENCE_COUNT is 1. If 260 release_value is called, this value is removed from the chain but 261 the caller of release_value now has a reference to this value. 262 The caller must arrange for a call to value_free later. */ 263 int reference_count; 264 265 /* Only used for bitfields; the containing value. This allows a 266 single read from the target when displaying multiple 267 bitfields. */ 268 struct value *parent; 269 270 /* Frame register value is relative to. This will be described in 271 the lval enum above as "lval_register". */ 272 struct frame_id frame_id; 273 274 /* Type of the value. */ 275 struct type *type; 276 277 /* If a value represents a C++ object, then the `type' field gives 278 the object's compile-time type. If the object actually belongs 279 to some class derived from `type', perhaps with other base 280 classes and additional members, then `type' is just a subobject 281 of the real thing, and the full object is probably larger than 282 `type' would suggest. 283 284 If `type' is a dynamic class (i.e. one with a vtable), then GDB 285 can actually determine the object's run-time type by looking at 286 the run-time type information in the vtable. When this 287 information is available, we may elect to read in the entire 288 object, for several reasons: 289 290 - When printing the value, the user would probably rather see the 291 full object, not just the limited portion apparent from the 292 compile-time type. 293 294 - If `type' has virtual base classes, then even printing `type' 295 alone may require reaching outside the `type' portion of the 296 object to wherever the virtual base class has been stored. 297 298 When we store the entire object, `enclosing_type' is the run-time 299 type -- the complete object -- and `embedded_offset' is the 300 offset of `type' within that larger type, in bytes. The 301 value_contents() macro takes `embedded_offset' into account, so 302 most GDB code continues to see the `type' portion of the value, 303 just as the inferior would. 304 305 If `type' is a pointer to an object, then `enclosing_type' is a 306 pointer to the object's run-time type, and `pointed_to_offset' is 307 the offset in bytes from the full object to the pointed-to object 308 -- that is, the value `embedded_offset' would have if we followed 309 the pointer and fetched the complete object. (I don't really see 310 the point. Why not just determine the run-time type when you 311 indirect, and avoid the special case? The contents don't matter 312 until you indirect anyway.) 313 314 If we're not doing anything fancy, `enclosing_type' is equal to 315 `type', and `embedded_offset' is zero, so everything works 316 normally. */ 317 struct type *enclosing_type; 318 int embedded_offset; 319 int pointed_to_offset; 320 321 /* Values are stored in a chain, so that they can be deleted easily 322 over calls to the inferior. Values assigned to internal 323 variables, put into the value history or exposed to Python are 324 taken off this list. */ 325 struct value *next; 326 327 /* Register number if the value is from a register. */ 328 short regnum; 329 330 /* Actual contents of the value. Target byte-order. NULL or not 331 valid if lazy is nonzero. */ 332 gdb_byte *contents; 333 334 /* Unavailable ranges in CONTENTS. We mark unavailable ranges, 335 rather than available, since the common and default case is for a 336 value to be available. This is filled in at value read time. The 337 unavailable ranges are tracked in bits. */ 338 VEC(range_s) *unavailable; 339 }; 340 341 int 342 value_bits_available (const struct value *value, int offset, int length) 343 { 344 gdb_assert (!value->lazy); 345 346 return !ranges_contain (value->unavailable, offset, length); 347 } 348 349 int 350 value_bytes_available (const struct value *value, int offset, int length) 351 { 352 return value_bits_available (value, 353 offset * TARGET_CHAR_BIT, 354 length * TARGET_CHAR_BIT); 355 } 356 357 int 358 value_entirely_available (struct value *value) 359 { 360 /* We can only tell whether the whole value is available when we try 361 to read it. */ 362 if (value->lazy) 363 value_fetch_lazy (value); 364 365 if (VEC_empty (range_s, value->unavailable)) 366 return 1; 367 return 0; 368 } 369 370 int 371 value_entirely_unavailable (struct value *value) 372 { 373 /* We can only tell whether the whole value is available when we try 374 to read it. */ 375 if (value->lazy) 376 value_fetch_lazy (value); 377 378 if (VEC_length (range_s, value->unavailable) == 1) 379 { 380 struct range *t = VEC_index (range_s, value->unavailable, 0); 381 382 if (t->offset == 0 383 && t->length == (TARGET_CHAR_BIT 384 * TYPE_LENGTH (value_enclosing_type (value)))) 385 return 1; 386 } 387 388 return 0; 389 } 390 391 void 392 mark_value_bits_unavailable (struct value *value, int offset, int length) 393 { 394 range_s newr; 395 int i; 396 397 /* Insert the range sorted. If there's overlap or the new range 398 would be contiguous with an existing range, merge. */ 399 400 newr.offset = offset; 401 newr.length = length; 402 403 /* Do a binary search for the position the given range would be 404 inserted if we only considered the starting OFFSET of ranges. 405 Call that position I. Since we also have LENGTH to care for 406 (this is a range afterall), we need to check if the _previous_ 407 range overlaps the I range. E.g., calling R the new range: 408 409 #1 - overlaps with previous 410 411 R 412 |-...-| 413 |---| |---| |------| ... |--| 414 0 1 2 N 415 416 I=1 417 418 In the case #1 above, the binary search would return `I=1', 419 meaning, this OFFSET should be inserted at position 1, and the 420 current position 1 should be pushed further (and become 2). But, 421 note that `0' overlaps with R, so we want to merge them. 422 423 A similar consideration needs to be taken if the new range would 424 be contiguous with the previous range: 425 426 #2 - contiguous with previous 427 428 R 429 |-...-| 430 |--| |---| |------| ... |--| 431 0 1 2 N 432 433 I=1 434 435 If there's no overlap with the previous range, as in: 436 437 #3 - not overlapping and not contiguous 438 439 R 440 |-...-| 441 |--| |---| |------| ... |--| 442 0 1 2 N 443 444 I=1 445 446 or if I is 0: 447 448 #4 - R is the range with lowest offset 449 450 R 451 |-...-| 452 |--| |---| |------| ... |--| 453 0 1 2 N 454 455 I=0 456 457 ... we just push the new range to I. 458 459 All the 4 cases above need to consider that the new range may 460 also overlap several of the ranges that follow, or that R may be 461 contiguous with the following range, and merge. E.g., 462 463 #5 - overlapping following ranges 464 465 R 466 |------------------------| 467 |--| |---| |------| ... |--| 468 0 1 2 N 469 470 I=0 471 472 or: 473 474 R 475 |-------| 476 |--| |---| |------| ... |--| 477 0 1 2 N 478 479 I=1 480 481 */ 482 483 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan); 484 if (i > 0) 485 { 486 struct range *bef = VEC_index (range_s, value->unavailable, i - 1); 487 488 if (ranges_overlap (bef->offset, bef->length, offset, length)) 489 { 490 /* #1 */ 491 ULONGEST l = min (bef->offset, offset); 492 ULONGEST h = max (bef->offset + bef->length, offset + length); 493 494 bef->offset = l; 495 bef->length = h - l; 496 i--; 497 } 498 else if (offset == bef->offset + bef->length) 499 { 500 /* #2 */ 501 bef->length += length; 502 i--; 503 } 504 else 505 { 506 /* #3 */ 507 VEC_safe_insert (range_s, value->unavailable, i, &newr); 508 } 509 } 510 else 511 { 512 /* #4 */ 513 VEC_safe_insert (range_s, value->unavailable, i, &newr); 514 } 515 516 /* Check whether the ranges following the one we've just added or 517 touched can be folded in (#5 above). */ 518 if (i + 1 < VEC_length (range_s, value->unavailable)) 519 { 520 struct range *t; 521 struct range *r; 522 int removed = 0; 523 int next = i + 1; 524 525 /* Get the range we just touched. */ 526 t = VEC_index (range_s, value->unavailable, i); 527 removed = 0; 528 529 i = next; 530 for (; VEC_iterate (range_s, value->unavailable, i, r); i++) 531 if (r->offset <= t->offset + t->length) 532 { 533 ULONGEST l, h; 534 535 l = min (t->offset, r->offset); 536 h = max (t->offset + t->length, r->offset + r->length); 537 538 t->offset = l; 539 t->length = h - l; 540 541 removed++; 542 } 543 else 544 { 545 /* If we couldn't merge this one, we won't be able to 546 merge following ones either, since the ranges are 547 always sorted by OFFSET. */ 548 break; 549 } 550 551 if (removed != 0) 552 VEC_block_remove (range_s, value->unavailable, next, removed); 553 } 554 } 555 556 void 557 mark_value_bytes_unavailable (struct value *value, int offset, int length) 558 { 559 mark_value_bits_unavailable (value, 560 offset * TARGET_CHAR_BIT, 561 length * TARGET_CHAR_BIT); 562 } 563 564 /* Find the first range in RANGES that overlaps the range defined by 565 OFFSET and LENGTH, starting at element POS in the RANGES vector, 566 Returns the index into RANGES where such overlapping range was 567 found, or -1 if none was found. */ 568 569 static int 570 find_first_range_overlap (VEC(range_s) *ranges, int pos, 571 int offset, int length) 572 { 573 range_s *r; 574 int i; 575 576 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++) 577 if (ranges_overlap (r->offset, r->length, offset, length)) 578 return i; 579 580 return -1; 581 } 582 583 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at 584 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise 585 return non-zero. 586 587 It must always be the case that: 588 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT 589 590 It is assumed that memory can be accessed from: 591 PTR + (OFFSET_BITS / TARGET_CHAR_BIT) 592 to: 593 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1) 594 / TARGET_CHAR_BIT) */ 595 static int 596 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, 597 const gdb_byte *ptr2, size_t offset2_bits, 598 size_t length_bits) 599 { 600 gdb_assert (offset1_bits % TARGET_CHAR_BIT 601 == offset2_bits % TARGET_CHAR_BIT); 602 603 if (offset1_bits % TARGET_CHAR_BIT != 0) 604 { 605 size_t bits; 606 gdb_byte mask, b1, b2; 607 608 /* The offset from the base pointers PTR1 and PTR2 is not a complete 609 number of bytes. A number of bits up to either the next exact 610 byte boundary, or LENGTH_BITS (which ever is sooner) will be 611 compared. */ 612 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT; 613 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); 614 mask = (1 << bits) - 1; 615 616 if (length_bits < bits) 617 { 618 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1); 619 bits = length_bits; 620 } 621 622 /* Now load the two bytes and mask off the bits we care about. */ 623 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask; 624 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask; 625 626 if (b1 != b2) 627 return 1; 628 629 /* Now update the length and offsets to take account of the bits 630 we've just compared. */ 631 length_bits -= bits; 632 offset1_bits += bits; 633 offset2_bits += bits; 634 } 635 636 if (length_bits % TARGET_CHAR_BIT != 0) 637 { 638 size_t bits; 639 size_t o1, o2; 640 gdb_byte mask, b1, b2; 641 642 /* The length is not an exact number of bytes. After the previous 643 IF.. block then the offsets are byte aligned, or the 644 length is zero (in which case this code is not reached). Compare 645 a number of bits at the end of the region, starting from an exact 646 byte boundary. */ 647 bits = length_bits % TARGET_CHAR_BIT; 648 o1 = offset1_bits + length_bits - bits; 649 o2 = offset2_bits + length_bits - bits; 650 651 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); 652 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits); 653 654 gdb_assert (o1 % TARGET_CHAR_BIT == 0); 655 gdb_assert (o2 % TARGET_CHAR_BIT == 0); 656 657 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask; 658 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask; 659 660 if (b1 != b2) 661 return 1; 662 663 length_bits -= bits; 664 } 665 666 if (length_bits > 0) 667 { 668 /* We've now taken care of any stray "bits" at the start, or end of 669 the region to compare, the remainder can be covered with a simple 670 memcmp. */ 671 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0); 672 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0); 673 gdb_assert (length_bits % TARGET_CHAR_BIT == 0); 674 675 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT, 676 ptr2 + offset2_bits / TARGET_CHAR_BIT, 677 length_bits / TARGET_CHAR_BIT); 678 } 679 680 /* Length is zero, regions match. */ 681 return 0; 682 } 683 684 /* Helper function for value_available_contents_eq. The only difference is 685 that this function is bit rather than byte based. 686 687 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits with 688 LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true 689 if the available bits match. */ 690 691 static int 692 value_available_contents_bits_eq (const struct value *val1, int offset1, 693 const struct value *val2, int offset2, 694 int length) 695 { 696 int idx1 = 0, idx2 = 0; 697 698 /* See function description in value.h. */ 699 gdb_assert (!val1->lazy && !val2->lazy); 700 701 while (length > 0) 702 { 703 range_s *r1, *r2; 704 ULONGEST l1, h1; 705 ULONGEST l2, h2; 706 707 idx1 = find_first_range_overlap (val1->unavailable, idx1, 708 offset1, length); 709 idx2 = find_first_range_overlap (val2->unavailable, idx2, 710 offset2, length); 711 712 /* The usual case is for both values to be completely available. */ 713 if (idx1 == -1 && idx2 == -1) 714 return (memcmp_with_bit_offsets (val1->contents, offset1, 715 val2->contents, offset2, 716 length) == 0); 717 /* The contents only match equal if the available set matches as 718 well. */ 719 else if (idx1 == -1 || idx2 == -1) 720 return 0; 721 722 gdb_assert (idx1 != -1 && idx2 != -1); 723 724 r1 = VEC_index (range_s, val1->unavailable, idx1); 725 r2 = VEC_index (range_s, val2->unavailable, idx2); 726 727 /* Get the unavailable windows intersected by the incoming 728 ranges. The first and last ranges that overlap the argument 729 range may be wider than said incoming arguments ranges. */ 730 l1 = max (offset1, r1->offset); 731 h1 = min (offset1 + length, r1->offset + r1->length); 732 733 l2 = max (offset2, r2->offset); 734 h2 = min (offset2 + length, r2->offset + r2->length); 735 736 /* Make them relative to the respective start offsets, so we can 737 compare them for equality. */ 738 l1 -= offset1; 739 h1 -= offset1; 740 741 l2 -= offset2; 742 h2 -= offset2; 743 744 /* Different availability, no match. */ 745 if (l1 != l2 || h1 != h2) 746 return 0; 747 748 /* Compare the _available_ contents. */ 749 if (memcmp_with_bit_offsets (val1->contents, offset1, 750 val2->contents, offset2, l1) != 0) 751 return 0; 752 753 length -= h1; 754 offset1 += h1; 755 offset2 += h1; 756 } 757 758 return 1; 759 } 760 761 int 762 value_available_contents_eq (const struct value *val1, int offset1, 763 const struct value *val2, int offset2, 764 int length) 765 { 766 return value_available_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT, 767 val2, offset2 * TARGET_CHAR_BIT, 768 length * TARGET_CHAR_BIT); 769 } 770 771 /* Prototypes for local functions. */ 772 773 static void show_values (char *, int); 774 775 static void show_convenience (char *, int); 776 777 778 /* The value-history records all the values printed 779 by print commands during this session. Each chunk 780 records 60 consecutive values. The first chunk on 781 the chain records the most recent values. 782 The total number of values is in value_history_count. */ 783 784 #define VALUE_HISTORY_CHUNK 60 785 786 struct value_history_chunk 787 { 788 struct value_history_chunk *next; 789 struct value *values[VALUE_HISTORY_CHUNK]; 790 }; 791 792 /* Chain of chunks now in use. */ 793 794 static struct value_history_chunk *value_history_chain; 795 796 static int value_history_count; /* Abs number of last entry stored. */ 797 798 799 /* List of all value objects currently allocated 800 (except for those released by calls to release_value) 801 This is so they can be freed after each command. */ 802 803 static struct value *all_values; 804 805 /* Allocate a lazy value for type TYPE. Its actual content is 806 "lazily" allocated too: the content field of the return value is 807 NULL; it will be allocated when it is fetched from the target. */ 808 809 struct value * 810 allocate_value_lazy (struct type *type) 811 { 812 struct value *val; 813 814 /* Call check_typedef on our type to make sure that, if TYPE 815 is a TYPE_CODE_TYPEDEF, its length is set to the length 816 of the target type instead of zero. However, we do not 817 replace the typedef type by the target type, because we want 818 to keep the typedef in order to be able to set the VAL's type 819 description correctly. */ 820 check_typedef (type); 821 822 val = (struct value *) xzalloc (sizeof (struct value)); 823 val->contents = NULL; 824 val->next = all_values; 825 all_values = val; 826 val->type = type; 827 val->enclosing_type = type; 828 VALUE_LVAL (val) = not_lval; 829 val->location.address = 0; 830 VALUE_FRAME_ID (val) = null_frame_id; 831 val->offset = 0; 832 val->bitpos = 0; 833 val->bitsize = 0; 834 VALUE_REGNUM (val) = -1; 835 val->lazy = 1; 836 val->optimized_out = 0; 837 val->embedded_offset = 0; 838 val->pointed_to_offset = 0; 839 val->modifiable = 1; 840 val->initialized = 1; /* Default to initialized. */ 841 842 /* Values start out on the all_values chain. */ 843 val->reference_count = 1; 844 845 return val; 846 } 847 848 /* Allocate the contents of VAL if it has not been allocated yet. */ 849 850 static void 851 allocate_value_contents (struct value *val) 852 { 853 if (!val->contents) 854 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)); 855 } 856 857 /* Allocate a value and its contents for type TYPE. */ 858 859 struct value * 860 allocate_value (struct type *type) 861 { 862 struct value *val = allocate_value_lazy (type); 863 864 allocate_value_contents (val); 865 val->lazy = 0; 866 return val; 867 } 868 869 /* Allocate a value that has the correct length 870 for COUNT repetitions of type TYPE. */ 871 872 struct value * 873 allocate_repeat_value (struct type *type, int count) 874 { 875 int low_bound = current_language->string_lower_bound; /* ??? */ 876 /* FIXME-type-allocation: need a way to free this type when we are 877 done with it. */ 878 struct type *array_type 879 = lookup_array_range_type (type, low_bound, count + low_bound - 1); 880 881 return allocate_value (array_type); 882 } 883 884 struct value * 885 allocate_computed_value (struct type *type, 886 const struct lval_funcs *funcs, 887 void *closure) 888 { 889 struct value *v = allocate_value_lazy (type); 890 891 VALUE_LVAL (v) = lval_computed; 892 v->location.computed.funcs = funcs; 893 v->location.computed.closure = closure; 894 895 return v; 896 } 897 898 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */ 899 900 struct value * 901 allocate_optimized_out_value (struct type *type) 902 { 903 struct value *retval = allocate_value_lazy (type); 904 905 set_value_optimized_out (retval, 1); 906 set_value_lazy (retval, 0); 907 return retval; 908 } 909 910 /* Accessor methods. */ 911 912 struct value * 913 value_next (struct value *value) 914 { 915 return value->next; 916 } 917 918 struct type * 919 value_type (const struct value *value) 920 { 921 return value->type; 922 } 923 void 924 deprecated_set_value_type (struct value *value, struct type *type) 925 { 926 value->type = type; 927 } 928 929 int 930 value_offset (const struct value *value) 931 { 932 return value->offset; 933 } 934 void 935 set_value_offset (struct value *value, int offset) 936 { 937 value->offset = offset; 938 } 939 940 int 941 value_bitpos (const struct value *value) 942 { 943 return value->bitpos; 944 } 945 void 946 set_value_bitpos (struct value *value, int bit) 947 { 948 value->bitpos = bit; 949 } 950 951 int 952 value_bitsize (const struct value *value) 953 { 954 return value->bitsize; 955 } 956 void 957 set_value_bitsize (struct value *value, int bit) 958 { 959 value->bitsize = bit; 960 } 961 962 struct value * 963 value_parent (struct value *value) 964 { 965 return value->parent; 966 } 967 968 /* See value.h. */ 969 970 void 971 set_value_parent (struct value *value, struct value *parent) 972 { 973 struct value *old = value->parent; 974 975 value->parent = parent; 976 if (parent != NULL) 977 value_incref (parent); 978 value_free (old); 979 } 980 981 gdb_byte * 982 value_contents_raw (struct value *value) 983 { 984 allocate_value_contents (value); 985 return value->contents + value->embedded_offset; 986 } 987 988 gdb_byte * 989 value_contents_all_raw (struct value *value) 990 { 991 allocate_value_contents (value); 992 return value->contents; 993 } 994 995 struct type * 996 value_enclosing_type (struct value *value) 997 { 998 return value->enclosing_type; 999 } 1000 1001 /* Look at value.h for description. */ 1002 1003 struct type * 1004 value_actual_type (struct value *value, int resolve_simple_types, 1005 int *real_type_found) 1006 { 1007 struct value_print_options opts; 1008 struct type *result; 1009 1010 get_user_print_options (&opts); 1011 1012 if (real_type_found) 1013 *real_type_found = 0; 1014 result = value_type (value); 1015 if (opts.objectprint) 1016 { 1017 /* If result's target type is TYPE_CODE_STRUCT, proceed to 1018 fetch its rtti type. */ 1019 if ((TYPE_CODE (result) == TYPE_CODE_PTR 1020 || TYPE_CODE (result) == TYPE_CODE_REF) 1021 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result))) 1022 == TYPE_CODE_STRUCT) 1023 { 1024 struct type *real_type; 1025 1026 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL); 1027 if (real_type) 1028 { 1029 if (real_type_found) 1030 *real_type_found = 1; 1031 result = real_type; 1032 } 1033 } 1034 else if (resolve_simple_types) 1035 { 1036 if (real_type_found) 1037 *real_type_found = 1; 1038 result = value_enclosing_type (value); 1039 } 1040 } 1041 1042 return result; 1043 } 1044 1045 void 1046 error_value_optimized_out (void) 1047 { 1048 error (_("value has been optimized out")); 1049 } 1050 1051 static void 1052 require_not_optimized_out (const struct value *value) 1053 { 1054 if (value->optimized_out) 1055 { 1056 if (value->lval == lval_register) 1057 error (_("register has not been saved in frame")); 1058 else 1059 error_value_optimized_out (); 1060 } 1061 } 1062 1063 static void 1064 require_available (const struct value *value) 1065 { 1066 if (!VEC_empty (range_s, value->unavailable)) 1067 throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); 1068 } 1069 1070 const gdb_byte * 1071 value_contents_for_printing (struct value *value) 1072 { 1073 if (value->lazy) 1074 value_fetch_lazy (value); 1075 return value->contents; 1076 } 1077 1078 const gdb_byte * 1079 value_contents_for_printing_const (const struct value *value) 1080 { 1081 gdb_assert (!value->lazy); 1082 return value->contents; 1083 } 1084 1085 const gdb_byte * 1086 value_contents_all (struct value *value) 1087 { 1088 const gdb_byte *result = value_contents_for_printing (value); 1089 require_not_optimized_out (value); 1090 require_available (value); 1091 return result; 1092 } 1093 1094 /* Copy LENGTH bytes of SRC value's (all) contents 1095 (value_contents_all) starting at SRC_OFFSET, into DST value's (all) 1096 contents, starting at DST_OFFSET. If unavailable contents are 1097 being copied from SRC, the corresponding DST contents are marked 1098 unavailable accordingly. Neither DST nor SRC may be lazy 1099 values. 1100 1101 It is assumed the contents of DST in the [DST_OFFSET, 1102 DST_OFFSET+LENGTH) range are wholly available. */ 1103 1104 void 1105 value_contents_copy_raw (struct value *dst, int dst_offset, 1106 struct value *src, int src_offset, int length) 1107 { 1108 range_s *r; 1109 int i; 1110 int src_bit_offset, dst_bit_offset, bit_length; 1111 1112 /* A lazy DST would make that this copy operation useless, since as 1113 soon as DST's contents were un-lazied (by a later value_contents 1114 call, say), the contents would be overwritten. A lazy SRC would 1115 mean we'd be copying garbage. */ 1116 gdb_assert (!dst->lazy && !src->lazy); 1117 1118 /* The overwritten DST range gets unavailability ORed in, not 1119 replaced. Make sure to remember to implement replacing if it 1120 turns out actually necessary. */ 1121 gdb_assert (value_bytes_available (dst, dst_offset, length)); 1122 1123 /* Copy the data. */ 1124 memcpy (value_contents_all_raw (dst) + dst_offset, 1125 value_contents_all_raw (src) + src_offset, 1126 length); 1127 1128 /* Copy the meta-data, adjusted. */ 1129 src_bit_offset = src_offset * TARGET_CHAR_BIT; 1130 dst_bit_offset = dst_offset * TARGET_CHAR_BIT; 1131 bit_length = length * TARGET_CHAR_BIT; 1132 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++) 1133 { 1134 ULONGEST h, l; 1135 1136 l = max (r->offset, src_bit_offset); 1137 h = min (r->offset + r->length, src_bit_offset + bit_length); 1138 1139 if (l < h) 1140 mark_value_bits_unavailable (dst, 1141 dst_bit_offset + (l - src_bit_offset), 1142 h - l); 1143 } 1144 } 1145 1146 /* Copy LENGTH bytes of SRC value's (all) contents 1147 (value_contents_all) starting at SRC_OFFSET byte, into DST value's 1148 (all) contents, starting at DST_OFFSET. If unavailable contents 1149 are being copied from SRC, the corresponding DST contents are 1150 marked unavailable accordingly. DST must not be lazy. If SRC is 1151 lazy, it will be fetched now. If SRC is not valid (is optimized 1152 out), an error is thrown. 1153 1154 It is assumed the contents of DST in the [DST_OFFSET, 1155 DST_OFFSET+LENGTH) range are wholly available. */ 1156 1157 void 1158 value_contents_copy (struct value *dst, int dst_offset, 1159 struct value *src, int src_offset, int length) 1160 { 1161 require_not_optimized_out (src); 1162 1163 if (src->lazy) 1164 value_fetch_lazy (src); 1165 1166 value_contents_copy_raw (dst, dst_offset, src, src_offset, length); 1167 } 1168 1169 int 1170 value_lazy (struct value *value) 1171 { 1172 return value->lazy; 1173 } 1174 1175 void 1176 set_value_lazy (struct value *value, int val) 1177 { 1178 value->lazy = val; 1179 } 1180 1181 int 1182 value_stack (struct value *value) 1183 { 1184 return value->stack; 1185 } 1186 1187 void 1188 set_value_stack (struct value *value, int val) 1189 { 1190 value->stack = val; 1191 } 1192 1193 const gdb_byte * 1194 value_contents (struct value *value) 1195 { 1196 const gdb_byte *result = value_contents_writeable (value); 1197 require_not_optimized_out (value); 1198 require_available (value); 1199 return result; 1200 } 1201 1202 gdb_byte * 1203 value_contents_writeable (struct value *value) 1204 { 1205 if (value->lazy) 1206 value_fetch_lazy (value); 1207 return value_contents_raw (value); 1208 } 1209 1210 /* Return non-zero if VAL1 and VAL2 have the same contents. Note that 1211 this function is different from value_equal; in C the operator == 1212 can return 0 even if the two values being compared are equal. */ 1213 1214 int 1215 value_contents_equal (struct value *val1, struct value *val2) 1216 { 1217 struct type *type1; 1218 struct type *type2; 1219 1220 type1 = check_typedef (value_type (val1)); 1221 type2 = check_typedef (value_type (val2)); 1222 if (TYPE_LENGTH (type1) != TYPE_LENGTH (type2)) 1223 return 0; 1224 1225 return (memcmp (value_contents (val1), value_contents (val2), 1226 TYPE_LENGTH (type1)) == 0); 1227 } 1228 1229 int 1230 value_optimized_out (struct value *value) 1231 { 1232 /* We can only know if a value is optimized out once we have tried to 1233 fetch it. */ 1234 if (!value->optimized_out && value->lazy) 1235 value_fetch_lazy (value); 1236 1237 return value->optimized_out; 1238 } 1239 1240 int 1241 value_optimized_out_const (const struct value *value) 1242 { 1243 return value->optimized_out; 1244 } 1245 1246 void 1247 set_value_optimized_out (struct value *value, int val) 1248 { 1249 value->optimized_out = val; 1250 } 1251 1252 int 1253 value_entirely_optimized_out (const struct value *value) 1254 { 1255 if (!value->optimized_out) 1256 return 0; 1257 if (value->lval != lval_computed 1258 || !value->location.computed.funcs->check_any_valid) 1259 return 1; 1260 return !value->location.computed.funcs->check_any_valid (value); 1261 } 1262 1263 int 1264 value_bits_valid (const struct value *value, int offset, int length) 1265 { 1266 if (!value->optimized_out) 1267 return 1; 1268 if (value->lval != lval_computed 1269 || !value->location.computed.funcs->check_validity) 1270 return 0; 1271 return value->location.computed.funcs->check_validity (value, offset, 1272 length); 1273 } 1274 1275 int 1276 value_bits_synthetic_pointer (const struct value *value, 1277 int offset, int length) 1278 { 1279 if (value->lval != lval_computed 1280 || !value->location.computed.funcs->check_synthetic_pointer) 1281 return 0; 1282 return value->location.computed.funcs->check_synthetic_pointer (value, 1283 offset, 1284 length); 1285 } 1286 1287 int 1288 value_embedded_offset (struct value *value) 1289 { 1290 return value->embedded_offset; 1291 } 1292 1293 void 1294 set_value_embedded_offset (struct value *value, int val) 1295 { 1296 value->embedded_offset = val; 1297 } 1298 1299 int 1300 value_pointed_to_offset (struct value *value) 1301 { 1302 return value->pointed_to_offset; 1303 } 1304 1305 void 1306 set_value_pointed_to_offset (struct value *value, int val) 1307 { 1308 value->pointed_to_offset = val; 1309 } 1310 1311 const struct lval_funcs * 1312 value_computed_funcs (const struct value *v) 1313 { 1314 gdb_assert (value_lval_const (v) == lval_computed); 1315 1316 return v->location.computed.funcs; 1317 } 1318 1319 void * 1320 value_computed_closure (const struct value *v) 1321 { 1322 gdb_assert (v->lval == lval_computed); 1323 1324 return v->location.computed.closure; 1325 } 1326 1327 enum lval_type * 1328 deprecated_value_lval_hack (struct value *value) 1329 { 1330 return &value->lval; 1331 } 1332 1333 enum lval_type 1334 value_lval_const (const struct value *value) 1335 { 1336 return value->lval; 1337 } 1338 1339 CORE_ADDR 1340 value_address (const struct value *value) 1341 { 1342 if (value->lval == lval_internalvar 1343 || value->lval == lval_internalvar_component) 1344 return 0; 1345 if (value->parent != NULL) 1346 return value_address (value->parent) + value->offset; 1347 else 1348 return value->location.address + value->offset; 1349 } 1350 1351 CORE_ADDR 1352 value_raw_address (struct value *value) 1353 { 1354 if (value->lval == lval_internalvar 1355 || value->lval == lval_internalvar_component) 1356 return 0; 1357 return value->location.address; 1358 } 1359 1360 void 1361 set_value_address (struct value *value, CORE_ADDR addr) 1362 { 1363 gdb_assert (value->lval != lval_internalvar 1364 && value->lval != lval_internalvar_component); 1365 value->location.address = addr; 1366 } 1367 1368 struct internalvar ** 1369 deprecated_value_internalvar_hack (struct value *value) 1370 { 1371 return &value->location.internalvar; 1372 } 1373 1374 struct frame_id * 1375 deprecated_value_frame_id_hack (struct value *value) 1376 { 1377 return &value->frame_id; 1378 } 1379 1380 short * 1381 deprecated_value_regnum_hack (struct value *value) 1382 { 1383 return &value->regnum; 1384 } 1385 1386 int 1387 deprecated_value_modifiable (struct value *value) 1388 { 1389 return value->modifiable; 1390 } 1391 1392 /* Return a mark in the value chain. All values allocated after the 1393 mark is obtained (except for those released) are subject to being freed 1394 if a subsequent value_free_to_mark is passed the mark. */ 1395 struct value * 1396 value_mark (void) 1397 { 1398 return all_values; 1399 } 1400 1401 /* Take a reference to VAL. VAL will not be deallocated until all 1402 references are released. */ 1403 1404 void 1405 value_incref (struct value *val) 1406 { 1407 val->reference_count++; 1408 } 1409 1410 /* Release a reference to VAL, which was acquired with value_incref. 1411 This function is also called to deallocate values from the value 1412 chain. */ 1413 1414 void 1415 value_free (struct value *val) 1416 { 1417 if (val) 1418 { 1419 gdb_assert (val->reference_count > 0); 1420 val->reference_count--; 1421 if (val->reference_count > 0) 1422 return; 1423 1424 /* If there's an associated parent value, drop our reference to 1425 it. */ 1426 if (val->parent != NULL) 1427 value_free (val->parent); 1428 1429 if (VALUE_LVAL (val) == lval_computed) 1430 { 1431 const struct lval_funcs *funcs = val->location.computed.funcs; 1432 1433 if (funcs->free_closure) 1434 funcs->free_closure (val); 1435 } 1436 1437 xfree (val->contents); 1438 VEC_free (range_s, val->unavailable); 1439 } 1440 xfree (val); 1441 } 1442 1443 /* Free all values allocated since MARK was obtained by value_mark 1444 (except for those released). */ 1445 void 1446 value_free_to_mark (struct value *mark) 1447 { 1448 struct value *val; 1449 struct value *next; 1450 1451 for (val = all_values; val && val != mark; val = next) 1452 { 1453 next = val->next; 1454 val->released = 1; 1455 value_free (val); 1456 } 1457 all_values = val; 1458 } 1459 1460 /* Free all the values that have been allocated (except for those released). 1461 Call after each command, successful or not. 1462 In practice this is called before each command, which is sufficient. */ 1463 1464 void 1465 free_all_values (void) 1466 { 1467 struct value *val; 1468 struct value *next; 1469 1470 for (val = all_values; val; val = next) 1471 { 1472 next = val->next; 1473 val->released = 1; 1474 value_free (val); 1475 } 1476 1477 all_values = 0; 1478 } 1479 1480 /* Frees all the elements in a chain of values. */ 1481 1482 void 1483 free_value_chain (struct value *v) 1484 { 1485 struct value *next; 1486 1487 for (; v; v = next) 1488 { 1489 next = value_next (v); 1490 value_free (v); 1491 } 1492 } 1493 1494 /* Remove VAL from the chain all_values 1495 so it will not be freed automatically. */ 1496 1497 void 1498 release_value (struct value *val) 1499 { 1500 struct value *v; 1501 1502 if (all_values == val) 1503 { 1504 all_values = val->next; 1505 val->next = NULL; 1506 val->released = 1; 1507 return; 1508 } 1509 1510 for (v = all_values; v; v = v->next) 1511 { 1512 if (v->next == val) 1513 { 1514 v->next = val->next; 1515 val->next = NULL; 1516 val->released = 1; 1517 break; 1518 } 1519 } 1520 } 1521 1522 /* If the value is not already released, release it. 1523 If the value is already released, increment its reference count. 1524 That is, this function ensures that the value is released from the 1525 value chain and that the caller owns a reference to it. */ 1526 1527 void 1528 release_value_or_incref (struct value *val) 1529 { 1530 if (val->released) 1531 value_incref (val); 1532 else 1533 release_value (val); 1534 } 1535 1536 /* Release all values up to mark */ 1537 struct value * 1538 value_release_to_mark (struct value *mark) 1539 { 1540 struct value *val; 1541 struct value *next; 1542 1543 for (val = next = all_values; next; next = next->next) 1544 { 1545 if (next->next == mark) 1546 { 1547 all_values = next->next; 1548 next->next = NULL; 1549 return val; 1550 } 1551 next->released = 1; 1552 } 1553 all_values = 0; 1554 return val; 1555 } 1556 1557 /* Return a copy of the value ARG. 1558 It contains the same contents, for same memory address, 1559 but it's a different block of storage. */ 1560 1561 struct value * 1562 value_copy (struct value *arg) 1563 { 1564 struct type *encl_type = value_enclosing_type (arg); 1565 struct value *val; 1566 1567 if (value_lazy (arg)) 1568 val = allocate_value_lazy (encl_type); 1569 else 1570 val = allocate_value (encl_type); 1571 val->type = arg->type; 1572 VALUE_LVAL (val) = VALUE_LVAL (arg); 1573 val->location = arg->location; 1574 val->offset = arg->offset; 1575 val->bitpos = arg->bitpos; 1576 val->bitsize = arg->bitsize; 1577 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg); 1578 VALUE_REGNUM (val) = VALUE_REGNUM (arg); 1579 val->lazy = arg->lazy; 1580 val->optimized_out = arg->optimized_out; 1581 val->embedded_offset = value_embedded_offset (arg); 1582 val->pointed_to_offset = arg->pointed_to_offset; 1583 val->modifiable = arg->modifiable; 1584 if (!value_lazy (val)) 1585 { 1586 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg), 1587 TYPE_LENGTH (value_enclosing_type (arg))); 1588 1589 } 1590 val->unavailable = VEC_copy (range_s, arg->unavailable); 1591 set_value_parent (val, arg->parent); 1592 if (VALUE_LVAL (val) == lval_computed) 1593 { 1594 const struct lval_funcs *funcs = val->location.computed.funcs; 1595 1596 if (funcs->copy_closure) 1597 val->location.computed.closure = funcs->copy_closure (val); 1598 } 1599 return val; 1600 } 1601 1602 /* Return a version of ARG that is non-lvalue. */ 1603 1604 struct value * 1605 value_non_lval (struct value *arg) 1606 { 1607 if (VALUE_LVAL (arg) != not_lval) 1608 { 1609 struct type *enc_type = value_enclosing_type (arg); 1610 struct value *val = allocate_value (enc_type); 1611 1612 memcpy (value_contents_all_raw (val), value_contents_all (arg), 1613 TYPE_LENGTH (enc_type)); 1614 val->type = arg->type; 1615 set_value_embedded_offset (val, value_embedded_offset (arg)); 1616 set_value_pointed_to_offset (val, value_pointed_to_offset (arg)); 1617 return val; 1618 } 1619 return arg; 1620 } 1621 1622 void 1623 set_value_component_location (struct value *component, 1624 const struct value *whole) 1625 { 1626 if (whole->lval == lval_internalvar) 1627 VALUE_LVAL (component) = lval_internalvar_component; 1628 else 1629 VALUE_LVAL (component) = whole->lval; 1630 1631 component->location = whole->location; 1632 if (whole->lval == lval_computed) 1633 { 1634 const struct lval_funcs *funcs = whole->location.computed.funcs; 1635 1636 if (funcs->copy_closure) 1637 component->location.computed.closure = funcs->copy_closure (whole); 1638 } 1639 } 1640 1641 1642 /* Access to the value history. */ 1643 1644 /* Record a new value in the value history. 1645 Returns the absolute history index of the entry. 1646 Result of -1 indicates the value was not saved; otherwise it is the 1647 value history index of this new item. */ 1648 1649 int 1650 record_latest_value (struct value *val) 1651 { 1652 int i; 1653 1654 /* We don't want this value to have anything to do with the inferior anymore. 1655 In particular, "set $1 = 50" should not affect the variable from which 1656 the value was taken, and fast watchpoints should be able to assume that 1657 a value on the value history never changes. */ 1658 if (value_lazy (val)) 1659 value_fetch_lazy (val); 1660 /* We preserve VALUE_LVAL so that the user can find out where it was fetched 1661 from. This is a bit dubious, because then *&$1 does not just return $1 1662 but the current contents of that location. c'est la vie... */ 1663 val->modifiable = 0; 1664 release_value (val); 1665 1666 /* Here we treat value_history_count as origin-zero 1667 and applying to the value being stored now. */ 1668 1669 i = value_history_count % VALUE_HISTORY_CHUNK; 1670 if (i == 0) 1671 { 1672 struct value_history_chunk *new 1673 = (struct value_history_chunk *) 1674 1675 xmalloc (sizeof (struct value_history_chunk)); 1676 memset (new->values, 0, sizeof new->values); 1677 new->next = value_history_chain; 1678 value_history_chain = new; 1679 } 1680 1681 value_history_chain->values[i] = val; 1682 1683 /* Now we regard value_history_count as origin-one 1684 and applying to the value just stored. */ 1685 1686 return ++value_history_count; 1687 } 1688 1689 /* Return a copy of the value in the history with sequence number NUM. */ 1690 1691 struct value * 1692 access_value_history (int num) 1693 { 1694 struct value_history_chunk *chunk; 1695 int i; 1696 int absnum = num; 1697 1698 if (absnum <= 0) 1699 absnum += value_history_count; 1700 1701 if (absnum <= 0) 1702 { 1703 if (num == 0) 1704 error (_("The history is empty.")); 1705 else if (num == 1) 1706 error (_("There is only one value in the history.")); 1707 else 1708 error (_("History does not go back to $$%d."), -num); 1709 } 1710 if (absnum > value_history_count) 1711 error (_("History has not yet reached $%d."), absnum); 1712 1713 absnum--; 1714 1715 /* Now absnum is always absolute and origin zero. */ 1716 1717 chunk = value_history_chain; 1718 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK 1719 - absnum / VALUE_HISTORY_CHUNK; 1720 i > 0; i--) 1721 chunk = chunk->next; 1722 1723 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]); 1724 } 1725 1726 static void 1727 show_values (char *num_exp, int from_tty) 1728 { 1729 int i; 1730 struct value *val; 1731 static int num = 1; 1732 1733 if (num_exp) 1734 { 1735 /* "show values +" should print from the stored position. 1736 "show values <exp>" should print around value number <exp>. */ 1737 if (num_exp[0] != '+' || num_exp[1] != '\0') 1738 num = parse_and_eval_long (num_exp) - 5; 1739 } 1740 else 1741 { 1742 /* "show values" means print the last 10 values. */ 1743 num = value_history_count - 9; 1744 } 1745 1746 if (num <= 0) 1747 num = 1; 1748 1749 for (i = num; i < num + 10 && i <= value_history_count; i++) 1750 { 1751 struct value_print_options opts; 1752 1753 val = access_value_history (i); 1754 printf_filtered (("$%d = "), i); 1755 get_user_print_options (&opts); 1756 value_print (val, gdb_stdout, &opts); 1757 printf_filtered (("\n")); 1758 } 1759 1760 /* The next "show values +" should start after what we just printed. */ 1761 num += 10; 1762 1763 /* Hitting just return after this command should do the same thing as 1764 "show values +". If num_exp is null, this is unnecessary, since 1765 "show values +" is not useful after "show values". */ 1766 if (from_tty && num_exp) 1767 { 1768 num_exp[0] = '+'; 1769 num_exp[1] = '\0'; 1770 } 1771 } 1772 1773 /* Internal variables. These are variables within the debugger 1774 that hold values assigned by debugger commands. 1775 The user refers to them with a '$' prefix 1776 that does not appear in the variable names stored internally. */ 1777 1778 struct internalvar 1779 { 1780 struct internalvar *next; 1781 char *name; 1782 1783 /* We support various different kinds of content of an internal variable. 1784 enum internalvar_kind specifies the kind, and union internalvar_data 1785 provides the data associated with this particular kind. */ 1786 1787 enum internalvar_kind 1788 { 1789 /* The internal variable is empty. */ 1790 INTERNALVAR_VOID, 1791 1792 /* The value of the internal variable is provided directly as 1793 a GDB value object. */ 1794 INTERNALVAR_VALUE, 1795 1796 /* A fresh value is computed via a call-back routine on every 1797 access to the internal variable. */ 1798 INTERNALVAR_MAKE_VALUE, 1799 1800 /* The internal variable holds a GDB internal convenience function. */ 1801 INTERNALVAR_FUNCTION, 1802 1803 /* The variable holds an integer value. */ 1804 INTERNALVAR_INTEGER, 1805 1806 /* The variable holds a GDB-provided string. */ 1807 INTERNALVAR_STRING, 1808 1809 } kind; 1810 1811 union internalvar_data 1812 { 1813 /* A value object used with INTERNALVAR_VALUE. */ 1814 struct value *value; 1815 1816 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */ 1817 struct 1818 { 1819 /* The functions to call. */ 1820 const struct internalvar_funcs *functions; 1821 1822 /* The function's user-data. */ 1823 void *data; 1824 } make_value; 1825 1826 /* The internal function used with INTERNALVAR_FUNCTION. */ 1827 struct 1828 { 1829 struct internal_function *function; 1830 /* True if this is the canonical name for the function. */ 1831 int canonical; 1832 } fn; 1833 1834 /* An integer value used with INTERNALVAR_INTEGER. */ 1835 struct 1836 { 1837 /* If type is non-NULL, it will be used as the type to generate 1838 a value for this internal variable. If type is NULL, a default 1839 integer type for the architecture is used. */ 1840 struct type *type; 1841 LONGEST val; 1842 } integer; 1843 1844 /* A string value used with INTERNALVAR_STRING. */ 1845 char *string; 1846 } u; 1847 }; 1848 1849 static struct internalvar *internalvars; 1850 1851 /* If the variable does not already exist create it and give it the 1852 value given. If no value is given then the default is zero. */ 1853 static void 1854 init_if_undefined_command (char* args, int from_tty) 1855 { 1856 struct internalvar* intvar; 1857 1858 /* Parse the expression - this is taken from set_command(). */ 1859 struct expression *expr = parse_expression (args); 1860 register struct cleanup *old_chain = 1861 make_cleanup (free_current_contents, &expr); 1862 1863 /* Validate the expression. 1864 Was the expression an assignment? 1865 Or even an expression at all? */ 1866 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN) 1867 error (_("Init-if-undefined requires an assignment expression.")); 1868 1869 /* Extract the variable from the parsed expression. 1870 In the case of an assign the lvalue will be in elts[1] and elts[2]. */ 1871 if (expr->elts[1].opcode != OP_INTERNALVAR) 1872 error (_("The first parameter to init-if-undefined " 1873 "should be a GDB variable.")); 1874 intvar = expr->elts[2].internalvar; 1875 1876 /* Only evaluate the expression if the lvalue is void. 1877 This may still fail if the expresssion is invalid. */ 1878 if (intvar->kind == INTERNALVAR_VOID) 1879 evaluate_expression (expr); 1880 1881 do_cleanups (old_chain); 1882 } 1883 1884 1885 /* Look up an internal variable with name NAME. NAME should not 1886 normally include a dollar sign. 1887 1888 If the specified internal variable does not exist, 1889 the return value is NULL. */ 1890 1891 struct internalvar * 1892 lookup_only_internalvar (const char *name) 1893 { 1894 struct internalvar *var; 1895 1896 for (var = internalvars; var; var = var->next) 1897 if (strcmp (var->name, name) == 0) 1898 return var; 1899 1900 return NULL; 1901 } 1902 1903 /* Complete NAME by comparing it to the names of internal variables. 1904 Returns a vector of newly allocated strings, or NULL if no matches 1905 were found. */ 1906 1907 VEC (char_ptr) * 1908 complete_internalvar (const char *name) 1909 { 1910 VEC (char_ptr) *result = NULL; 1911 struct internalvar *var; 1912 int len; 1913 1914 len = strlen (name); 1915 1916 for (var = internalvars; var; var = var->next) 1917 if (strncmp (var->name, name, len) == 0) 1918 { 1919 char *r = xstrdup (var->name); 1920 1921 VEC_safe_push (char_ptr, result, r); 1922 } 1923 1924 return result; 1925 } 1926 1927 /* Create an internal variable with name NAME and with a void value. 1928 NAME should not normally include a dollar sign. */ 1929 1930 struct internalvar * 1931 create_internalvar (const char *name) 1932 { 1933 struct internalvar *var; 1934 1935 var = (struct internalvar *) xmalloc (sizeof (struct internalvar)); 1936 var->name = concat (name, (char *)NULL); 1937 var->kind = INTERNALVAR_VOID; 1938 var->next = internalvars; 1939 internalvars = var; 1940 return var; 1941 } 1942 1943 /* Create an internal variable with name NAME and register FUN as the 1944 function that value_of_internalvar uses to create a value whenever 1945 this variable is referenced. NAME should not normally include a 1946 dollar sign. DATA is passed uninterpreted to FUN when it is 1947 called. CLEANUP, if not NULL, is called when the internal variable 1948 is destroyed. It is passed DATA as its only argument. */ 1949 1950 struct internalvar * 1951 create_internalvar_type_lazy (const char *name, 1952 const struct internalvar_funcs *funcs, 1953 void *data) 1954 { 1955 struct internalvar *var = create_internalvar (name); 1956 1957 var->kind = INTERNALVAR_MAKE_VALUE; 1958 var->u.make_value.functions = funcs; 1959 var->u.make_value.data = data; 1960 return var; 1961 } 1962 1963 /* See documentation in value.h. */ 1964 1965 int 1966 compile_internalvar_to_ax (struct internalvar *var, 1967 struct agent_expr *expr, 1968 struct axs_value *value) 1969 { 1970 if (var->kind != INTERNALVAR_MAKE_VALUE 1971 || var->u.make_value.functions->compile_to_ax == NULL) 1972 return 0; 1973 1974 var->u.make_value.functions->compile_to_ax (var, expr, value, 1975 var->u.make_value.data); 1976 return 1; 1977 } 1978 1979 /* Look up an internal variable with name NAME. NAME should not 1980 normally include a dollar sign. 1981 1982 If the specified internal variable does not exist, 1983 one is created, with a void value. */ 1984 1985 struct internalvar * 1986 lookup_internalvar (const char *name) 1987 { 1988 struct internalvar *var; 1989 1990 var = lookup_only_internalvar (name); 1991 if (var) 1992 return var; 1993 1994 return create_internalvar (name); 1995 } 1996 1997 /* Return current value of internal variable VAR. For variables that 1998 are not inherently typed, use a value type appropriate for GDBARCH. */ 1999 2000 struct value * 2001 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) 2002 { 2003 struct value *val; 2004 struct trace_state_variable *tsv; 2005 2006 /* If there is a trace state variable of the same name, assume that 2007 is what we really want to see. */ 2008 tsv = find_trace_state_variable (var->name); 2009 if (tsv) 2010 { 2011 tsv->value_known = target_get_trace_state_variable_value (tsv->number, 2012 &(tsv->value)); 2013 if (tsv->value_known) 2014 val = value_from_longest (builtin_type (gdbarch)->builtin_int64, 2015 tsv->value); 2016 else 2017 val = allocate_value (builtin_type (gdbarch)->builtin_void); 2018 return val; 2019 } 2020 2021 switch (var->kind) 2022 { 2023 case INTERNALVAR_VOID: 2024 val = allocate_value (builtin_type (gdbarch)->builtin_void); 2025 break; 2026 2027 case INTERNALVAR_FUNCTION: 2028 val = allocate_value (builtin_type (gdbarch)->internal_fn); 2029 break; 2030 2031 case INTERNALVAR_INTEGER: 2032 if (!var->u.integer.type) 2033 val = value_from_longest (builtin_type (gdbarch)->builtin_int, 2034 var->u.integer.val); 2035 else 2036 val = value_from_longest (var->u.integer.type, var->u.integer.val); 2037 break; 2038 2039 case INTERNALVAR_STRING: 2040 val = value_cstring (var->u.string, strlen (var->u.string), 2041 builtin_type (gdbarch)->builtin_char); 2042 break; 2043 2044 case INTERNALVAR_VALUE: 2045 val = value_copy (var->u.value); 2046 if (value_lazy (val)) 2047 value_fetch_lazy (val); 2048 break; 2049 2050 case INTERNALVAR_MAKE_VALUE: 2051 val = (*var->u.make_value.functions->make_value) (gdbarch, var, 2052 var->u.make_value.data); 2053 break; 2054 2055 default: 2056 internal_error (__FILE__, __LINE__, _("bad kind")); 2057 } 2058 2059 /* Change the VALUE_LVAL to lval_internalvar so that future operations 2060 on this value go back to affect the original internal variable. 2061 2062 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have 2063 no underlying modifyable state in the internal variable. 2064 2065 Likewise, if the variable's value is a computed lvalue, we want 2066 references to it to produce another computed lvalue, where 2067 references and assignments actually operate through the 2068 computed value's functions. 2069 2070 This means that internal variables with computed values 2071 behave a little differently from other internal variables: 2072 assignments to them don't just replace the previous value 2073 altogether. At the moment, this seems like the behavior we 2074 want. */ 2075 2076 if (var->kind != INTERNALVAR_MAKE_VALUE 2077 && val->lval != lval_computed) 2078 { 2079 VALUE_LVAL (val) = lval_internalvar; 2080 VALUE_INTERNALVAR (val) = var; 2081 } 2082 2083 return val; 2084 } 2085 2086 int 2087 get_internalvar_integer (struct internalvar *var, LONGEST *result) 2088 { 2089 if (var->kind == INTERNALVAR_INTEGER) 2090 { 2091 *result = var->u.integer.val; 2092 return 1; 2093 } 2094 2095 if (var->kind == INTERNALVAR_VALUE) 2096 { 2097 struct type *type = check_typedef (value_type (var->u.value)); 2098 2099 if (TYPE_CODE (type) == TYPE_CODE_INT) 2100 { 2101 *result = value_as_long (var->u.value); 2102 return 1; 2103 } 2104 } 2105 2106 return 0; 2107 } 2108 2109 static int 2110 get_internalvar_function (struct internalvar *var, 2111 struct internal_function **result) 2112 { 2113 switch (var->kind) 2114 { 2115 case INTERNALVAR_FUNCTION: 2116 *result = var->u.fn.function; 2117 return 1; 2118 2119 default: 2120 return 0; 2121 } 2122 } 2123 2124 void 2125 set_internalvar_component (struct internalvar *var, int offset, int bitpos, 2126 int bitsize, struct value *newval) 2127 { 2128 gdb_byte *addr; 2129 2130 switch (var->kind) 2131 { 2132 case INTERNALVAR_VALUE: 2133 addr = value_contents_writeable (var->u.value); 2134 2135 if (bitsize) 2136 modify_field (value_type (var->u.value), addr + offset, 2137 value_as_long (newval), bitpos, bitsize); 2138 else 2139 memcpy (addr + offset, value_contents (newval), 2140 TYPE_LENGTH (value_type (newval))); 2141 break; 2142 2143 default: 2144 /* We can never get a component of any other kind. */ 2145 internal_error (__FILE__, __LINE__, _("set_internalvar_component")); 2146 } 2147 } 2148 2149 void 2150 set_internalvar (struct internalvar *var, struct value *val) 2151 { 2152 enum internalvar_kind new_kind; 2153 union internalvar_data new_data = { 0 }; 2154 2155 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical) 2156 error (_("Cannot overwrite convenience function %s"), var->name); 2157 2158 /* Prepare new contents. */ 2159 switch (TYPE_CODE (check_typedef (value_type (val)))) 2160 { 2161 case TYPE_CODE_VOID: 2162 new_kind = INTERNALVAR_VOID; 2163 break; 2164 2165 case TYPE_CODE_INTERNAL_FUNCTION: 2166 gdb_assert (VALUE_LVAL (val) == lval_internalvar); 2167 new_kind = INTERNALVAR_FUNCTION; 2168 get_internalvar_function (VALUE_INTERNALVAR (val), 2169 &new_data.fn.function); 2170 /* Copies created here are never canonical. */ 2171 break; 2172 2173 default: 2174 new_kind = INTERNALVAR_VALUE; 2175 new_data.value = value_copy (val); 2176 new_data.value->modifiable = 1; 2177 2178 /* Force the value to be fetched from the target now, to avoid problems 2179 later when this internalvar is referenced and the target is gone or 2180 has changed. */ 2181 if (value_lazy (new_data.value)) 2182 value_fetch_lazy (new_data.value); 2183 2184 /* Release the value from the value chain to prevent it from being 2185 deleted by free_all_values. From here on this function should not 2186 call error () until new_data is installed into the var->u to avoid 2187 leaking memory. */ 2188 release_value (new_data.value); 2189 break; 2190 } 2191 2192 /* Clean up old contents. */ 2193 clear_internalvar (var); 2194 2195 /* Switch over. */ 2196 var->kind = new_kind; 2197 var->u = new_data; 2198 /* End code which must not call error(). */ 2199 } 2200 2201 void 2202 set_internalvar_integer (struct internalvar *var, LONGEST l) 2203 { 2204 /* Clean up old contents. */ 2205 clear_internalvar (var); 2206 2207 var->kind = INTERNALVAR_INTEGER; 2208 var->u.integer.type = NULL; 2209 var->u.integer.val = l; 2210 } 2211 2212 void 2213 set_internalvar_string (struct internalvar *var, const char *string) 2214 { 2215 /* Clean up old contents. */ 2216 clear_internalvar (var); 2217 2218 var->kind = INTERNALVAR_STRING; 2219 var->u.string = xstrdup (string); 2220 } 2221 2222 static void 2223 set_internalvar_function (struct internalvar *var, struct internal_function *f) 2224 { 2225 /* Clean up old contents. */ 2226 clear_internalvar (var); 2227 2228 var->kind = INTERNALVAR_FUNCTION; 2229 var->u.fn.function = f; 2230 var->u.fn.canonical = 1; 2231 /* Variables installed here are always the canonical version. */ 2232 } 2233 2234 void 2235 clear_internalvar (struct internalvar *var) 2236 { 2237 /* Clean up old contents. */ 2238 switch (var->kind) 2239 { 2240 case INTERNALVAR_VALUE: 2241 value_free (var->u.value); 2242 break; 2243 2244 case INTERNALVAR_STRING: 2245 xfree (var->u.string); 2246 break; 2247 2248 case INTERNALVAR_MAKE_VALUE: 2249 if (var->u.make_value.functions->destroy != NULL) 2250 var->u.make_value.functions->destroy (var->u.make_value.data); 2251 break; 2252 2253 default: 2254 break; 2255 } 2256 2257 /* Reset to void kind. */ 2258 var->kind = INTERNALVAR_VOID; 2259 } 2260 2261 char * 2262 internalvar_name (struct internalvar *var) 2263 { 2264 return var->name; 2265 } 2266 2267 static struct internal_function * 2268 create_internal_function (const char *name, 2269 internal_function_fn handler, void *cookie) 2270 { 2271 struct internal_function *ifn = XNEW (struct internal_function); 2272 2273 ifn->name = xstrdup (name); 2274 ifn->handler = handler; 2275 ifn->cookie = cookie; 2276 return ifn; 2277 } 2278 2279 char * 2280 value_internal_function_name (struct value *val) 2281 { 2282 struct internal_function *ifn; 2283 int result; 2284 2285 gdb_assert (VALUE_LVAL (val) == lval_internalvar); 2286 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn); 2287 gdb_assert (result); 2288 2289 return ifn->name; 2290 } 2291 2292 struct value * 2293 call_internal_function (struct gdbarch *gdbarch, 2294 const struct language_defn *language, 2295 struct value *func, int argc, struct value **argv) 2296 { 2297 struct internal_function *ifn; 2298 int result; 2299 2300 gdb_assert (VALUE_LVAL (func) == lval_internalvar); 2301 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn); 2302 gdb_assert (result); 2303 2304 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv); 2305 } 2306 2307 /* The 'function' command. This does nothing -- it is just a 2308 placeholder to let "help function NAME" work. This is also used as 2309 the implementation of the sub-command that is created when 2310 registering an internal function. */ 2311 static void 2312 function_command (char *command, int from_tty) 2313 { 2314 /* Do nothing. */ 2315 } 2316 2317 /* Clean up if an internal function's command is destroyed. */ 2318 static void 2319 function_destroyer (struct cmd_list_element *self, void *ignore) 2320 { 2321 xfree ((char *) self->name); 2322 xfree (self->doc); 2323 } 2324 2325 /* Add a new internal function. NAME is the name of the function; DOC 2326 is a documentation string describing the function. HANDLER is 2327 called when the function is invoked. COOKIE is an arbitrary 2328 pointer which is passed to HANDLER and is intended for "user 2329 data". */ 2330 void 2331 add_internal_function (const char *name, const char *doc, 2332 internal_function_fn handler, void *cookie) 2333 { 2334 struct cmd_list_element *cmd; 2335 struct internal_function *ifn; 2336 struct internalvar *var = lookup_internalvar (name); 2337 2338 ifn = create_internal_function (name, handler, cookie); 2339 set_internalvar_function (var, ifn); 2340 2341 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc, 2342 &functionlist); 2343 cmd->destroyer = function_destroyer; 2344 } 2345 2346 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to 2347 prevent cycles / duplicates. */ 2348 2349 void 2350 preserve_one_value (struct value *value, struct objfile *objfile, 2351 htab_t copied_types) 2352 { 2353 if (TYPE_OBJFILE (value->type) == objfile) 2354 value->type = copy_type_recursive (objfile, value->type, copied_types); 2355 2356 if (TYPE_OBJFILE (value->enclosing_type) == objfile) 2357 value->enclosing_type = copy_type_recursive (objfile, 2358 value->enclosing_type, 2359 copied_types); 2360 } 2361 2362 /* Likewise for internal variable VAR. */ 2363 2364 static void 2365 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile, 2366 htab_t copied_types) 2367 { 2368 switch (var->kind) 2369 { 2370 case INTERNALVAR_INTEGER: 2371 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile) 2372 var->u.integer.type 2373 = copy_type_recursive (objfile, var->u.integer.type, copied_types); 2374 break; 2375 2376 case INTERNALVAR_VALUE: 2377 preserve_one_value (var->u.value, objfile, copied_types); 2378 break; 2379 } 2380 } 2381 2382 /* Update the internal variables and value history when OBJFILE is 2383 discarded; we must copy the types out of the objfile. New global types 2384 will be created for every convenience variable which currently points to 2385 this objfile's types, and the convenience variables will be adjusted to 2386 use the new global types. */ 2387 2388 void 2389 preserve_values (struct objfile *objfile) 2390 { 2391 htab_t copied_types; 2392 struct value_history_chunk *cur; 2393 struct internalvar *var; 2394 int i; 2395 2396 /* Create the hash table. We allocate on the objfile's obstack, since 2397 it is soon to be deleted. */ 2398 copied_types = create_copied_types_hash (objfile); 2399 2400 for (cur = value_history_chain; cur; cur = cur->next) 2401 for (i = 0; i < VALUE_HISTORY_CHUNK; i++) 2402 if (cur->values[i]) 2403 preserve_one_value (cur->values[i], objfile, copied_types); 2404 2405 for (var = internalvars; var; var = var->next) 2406 preserve_one_internalvar (var, objfile, copied_types); 2407 2408 preserve_python_values (objfile, copied_types); 2409 2410 htab_delete (copied_types); 2411 } 2412 2413 static void 2414 show_convenience (char *ignore, int from_tty) 2415 { 2416 struct gdbarch *gdbarch = get_current_arch (); 2417 struct internalvar *var; 2418 int varseen = 0; 2419 struct value_print_options opts; 2420 2421 get_user_print_options (&opts); 2422 for (var = internalvars; var; var = var->next) 2423 { 2424 volatile struct gdb_exception ex; 2425 2426 if (!varseen) 2427 { 2428 varseen = 1; 2429 } 2430 printf_filtered (("$%s = "), var->name); 2431 2432 TRY_CATCH (ex, RETURN_MASK_ERROR) 2433 { 2434 struct value *val; 2435 2436 val = value_of_internalvar (gdbarch, var); 2437 value_print (val, gdb_stdout, &opts); 2438 } 2439 if (ex.reason < 0) 2440 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message); 2441 printf_filtered (("\n")); 2442 } 2443 if (!varseen) 2444 { 2445 /* This text does not mention convenience functions on purpose. 2446 The user can't create them except via Python, and if Python support 2447 is installed this message will never be printed ($_streq will 2448 exist). */ 2449 printf_unfiltered (_("No debugger convenience variables now defined.\n" 2450 "Convenience variables have " 2451 "names starting with \"$\";\n" 2452 "use \"set\" as in \"set " 2453 "$foo = 5\" to define them.\n")); 2454 } 2455 } 2456 2457 /* Extract a value as a C number (either long or double). 2458 Knows how to convert fixed values to double, or 2459 floating values to long. 2460 Does not deallocate the value. */ 2461 2462 LONGEST 2463 value_as_long (struct value *val) 2464 { 2465 /* This coerces arrays and functions, which is necessary (e.g. 2466 in disassemble_command). It also dereferences references, which 2467 I suspect is the most logical thing to do. */ 2468 val = coerce_array (val); 2469 return unpack_long (value_type (val), value_contents (val)); 2470 } 2471 2472 DOUBLEST 2473 value_as_double (struct value *val) 2474 { 2475 DOUBLEST foo; 2476 int inv; 2477 2478 foo = unpack_double (value_type (val), value_contents (val), &inv); 2479 if (inv) 2480 error (_("Invalid floating value found in program.")); 2481 return foo; 2482 } 2483 2484 /* Extract a value as a C pointer. Does not deallocate the value. 2485 Note that val's type may not actually be a pointer; value_as_long 2486 handles all the cases. */ 2487 CORE_ADDR 2488 value_as_address (struct value *val) 2489 { 2490 struct gdbarch *gdbarch = get_type_arch (value_type (val)); 2491 2492 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure 2493 whether we want this to be true eventually. */ 2494 #if 0 2495 /* gdbarch_addr_bits_remove is wrong if we are being called for a 2496 non-address (e.g. argument to "signal", "info break", etc.), or 2497 for pointers to char, in which the low bits *are* significant. */ 2498 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val)); 2499 #else 2500 2501 /* There are several targets (IA-64, PowerPC, and others) which 2502 don't represent pointers to functions as simply the address of 2503 the function's entry point. For example, on the IA-64, a 2504 function pointer points to a two-word descriptor, generated by 2505 the linker, which contains the function's entry point, and the 2506 value the IA-64 "global pointer" register should have --- to 2507 support position-independent code. The linker generates 2508 descriptors only for those functions whose addresses are taken. 2509 2510 On such targets, it's difficult for GDB to convert an arbitrary 2511 function address into a function pointer; it has to either find 2512 an existing descriptor for that function, or call malloc and 2513 build its own. On some targets, it is impossible for GDB to 2514 build a descriptor at all: the descriptor must contain a jump 2515 instruction; data memory cannot be executed; and code memory 2516 cannot be modified. 2517 2518 Upon entry to this function, if VAL is a value of type `function' 2519 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then 2520 value_address (val) is the address of the function. This is what 2521 you'll get if you evaluate an expression like `main'. The call 2522 to COERCE_ARRAY below actually does all the usual unary 2523 conversions, which includes converting values of type `function' 2524 to `pointer to function'. This is the challenging conversion 2525 discussed above. Then, `unpack_long' will convert that pointer 2526 back into an address. 2527 2528 So, suppose the user types `disassemble foo' on an architecture 2529 with a strange function pointer representation, on which GDB 2530 cannot build its own descriptors, and suppose further that `foo' 2531 has no linker-built descriptor. The address->pointer conversion 2532 will signal an error and prevent the command from running, even 2533 though the next step would have been to convert the pointer 2534 directly back into the same address. 2535 2536 The following shortcut avoids this whole mess. If VAL is a 2537 function, just return its address directly. */ 2538 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC 2539 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD) 2540 return value_address (val); 2541 2542 val = coerce_array (val); 2543 2544 /* Some architectures (e.g. Harvard), map instruction and data 2545 addresses onto a single large unified address space. For 2546 instance: An architecture may consider a large integer in the 2547 range 0x10000000 .. 0x1000ffff to already represent a data 2548 addresses (hence not need a pointer to address conversion) while 2549 a small integer would still need to be converted integer to 2550 pointer to address. Just assume such architectures handle all 2551 integer conversions in a single function. */ 2552 2553 /* JimB writes: 2554 2555 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we 2556 must admonish GDB hackers to make sure its behavior matches the 2557 compiler's, whenever possible. 2558 2559 In general, I think GDB should evaluate expressions the same way 2560 the compiler does. When the user copies an expression out of 2561 their source code and hands it to a `print' command, they should 2562 get the same value the compiler would have computed. Any 2563 deviation from this rule can cause major confusion and annoyance, 2564 and needs to be justified carefully. In other words, GDB doesn't 2565 really have the freedom to do these conversions in clever and 2566 useful ways. 2567 2568 AndrewC pointed out that users aren't complaining about how GDB 2569 casts integers to pointers; they are complaining that they can't 2570 take an address from a disassembly listing and give it to `x/i'. 2571 This is certainly important. 2572 2573 Adding an architecture method like integer_to_address() certainly 2574 makes it possible for GDB to "get it right" in all circumstances 2575 --- the target has complete control over how things get done, so 2576 people can Do The Right Thing for their target without breaking 2577 anyone else. The standard doesn't specify how integers get 2578 converted to pointers; usually, the ABI doesn't either, but 2579 ABI-specific code is a more reasonable place to handle it. */ 2580 2581 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR 2582 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF 2583 && gdbarch_integer_to_address_p (gdbarch)) 2584 return gdbarch_integer_to_address (gdbarch, value_type (val), 2585 value_contents (val)); 2586 2587 return unpack_long (value_type (val), value_contents (val)); 2588 #endif 2589 } 2590 2591 /* Unpack raw data (copied from debugee, target byte order) at VALADDR 2592 as a long, or as a double, assuming the raw data is described 2593 by type TYPE. Knows how to convert different sizes of values 2594 and can convert between fixed and floating point. We don't assume 2595 any alignment for the raw data. Return value is in host byte order. 2596 2597 If you want functions and arrays to be coerced to pointers, and 2598 references to be dereferenced, call value_as_long() instead. 2599 2600 C++: It is assumed that the front-end has taken care of 2601 all matters concerning pointers to members. A pointer 2602 to member which reaches here is considered to be equivalent 2603 to an INT (or some size). After all, it is only an offset. */ 2604 2605 LONGEST 2606 unpack_long (struct type *type, const gdb_byte *valaddr) 2607 { 2608 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); 2609 enum type_code code = TYPE_CODE (type); 2610 int len = TYPE_LENGTH (type); 2611 int nosign = TYPE_UNSIGNED (type); 2612 2613 switch (code) 2614 { 2615 case TYPE_CODE_TYPEDEF: 2616 return unpack_long (check_typedef (type), valaddr); 2617 case TYPE_CODE_ENUM: 2618 case TYPE_CODE_FLAGS: 2619 case TYPE_CODE_BOOL: 2620 case TYPE_CODE_INT: 2621 case TYPE_CODE_CHAR: 2622 case TYPE_CODE_RANGE: 2623 case TYPE_CODE_MEMBERPTR: 2624 if (nosign) 2625 return extract_unsigned_integer (valaddr, len, byte_order); 2626 else 2627 return extract_signed_integer (valaddr, len, byte_order); 2628 2629 case TYPE_CODE_FLT: 2630 return extract_typed_floating (valaddr, type); 2631 2632 case TYPE_CODE_DECFLOAT: 2633 /* libdecnumber has a function to convert from decimal to integer, but 2634 it doesn't work when the decimal number has a fractional part. */ 2635 return decimal_to_doublest (valaddr, len, byte_order); 2636 2637 case TYPE_CODE_PTR: 2638 case TYPE_CODE_REF: 2639 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure 2640 whether we want this to be true eventually. */ 2641 return extract_typed_address (valaddr, type); 2642 2643 default: 2644 error (_("Value can't be converted to integer.")); 2645 } 2646 return 0; /* Placate lint. */ 2647 } 2648 2649 /* Return a double value from the specified type and address. 2650 INVP points to an int which is set to 0 for valid value, 2651 1 for invalid value (bad float format). In either case, 2652 the returned double is OK to use. Argument is in target 2653 format, result is in host format. */ 2654 2655 DOUBLEST 2656 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp) 2657 { 2658 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); 2659 enum type_code code; 2660 int len; 2661 int nosign; 2662 2663 *invp = 0; /* Assume valid. */ 2664 CHECK_TYPEDEF (type); 2665 code = TYPE_CODE (type); 2666 len = TYPE_LENGTH (type); 2667 nosign = TYPE_UNSIGNED (type); 2668 if (code == TYPE_CODE_FLT) 2669 { 2670 /* NOTE: cagney/2002-02-19: There was a test here to see if the 2671 floating-point value was valid (using the macro 2672 INVALID_FLOAT). That test/macro have been removed. 2673 2674 It turns out that only the VAX defined this macro and then 2675 only in a non-portable way. Fixing the portability problem 2676 wouldn't help since the VAX floating-point code is also badly 2677 bit-rotten. The target needs to add definitions for the 2678 methods gdbarch_float_format and gdbarch_double_format - these 2679 exactly describe the target floating-point format. The 2680 problem here is that the corresponding floatformat_vax_f and 2681 floatformat_vax_d values these methods should be set to are 2682 also not defined either. Oops! 2683 2684 Hopefully someone will add both the missing floatformat 2685 definitions and the new cases for floatformat_is_valid (). */ 2686 2687 if (!floatformat_is_valid (floatformat_from_type (type), valaddr)) 2688 { 2689 *invp = 1; 2690 return 0.0; 2691 } 2692 2693 return extract_typed_floating (valaddr, type); 2694 } 2695 else if (code == TYPE_CODE_DECFLOAT) 2696 return decimal_to_doublest (valaddr, len, byte_order); 2697 else if (nosign) 2698 { 2699 /* Unsigned -- be sure we compensate for signed LONGEST. */ 2700 return (ULONGEST) unpack_long (type, valaddr); 2701 } 2702 else 2703 { 2704 /* Signed -- we are OK with unpack_long. */ 2705 return unpack_long (type, valaddr); 2706 } 2707 } 2708 2709 /* Unpack raw data (copied from debugee, target byte order) at VALADDR 2710 as a CORE_ADDR, assuming the raw data is described by type TYPE. 2711 We don't assume any alignment for the raw data. Return value is in 2712 host byte order. 2713 2714 If you want functions and arrays to be coerced to pointers, and 2715 references to be dereferenced, call value_as_address() instead. 2716 2717 C++: It is assumed that the front-end has taken care of 2718 all matters concerning pointers to members. A pointer 2719 to member which reaches here is considered to be equivalent 2720 to an INT (or some size). After all, it is only an offset. */ 2721 2722 CORE_ADDR 2723 unpack_pointer (struct type *type, const gdb_byte *valaddr) 2724 { 2725 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure 2726 whether we want this to be true eventually. */ 2727 return unpack_long (type, valaddr); 2728 } 2729 2730 2731 /* Get the value of the FIELDNO'th field (which must be static) of 2732 TYPE. */ 2733 2734 struct value * 2735 value_static_field (struct type *type, int fieldno) 2736 { 2737 struct value *retval; 2738 2739 switch (TYPE_FIELD_LOC_KIND (type, fieldno)) 2740 { 2741 case FIELD_LOC_KIND_PHYSADDR: 2742 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno), 2743 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno)); 2744 break; 2745 case FIELD_LOC_KIND_PHYSNAME: 2746 { 2747 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno); 2748 /* TYPE_FIELD_NAME (type, fieldno); */ 2749 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0); 2750 2751 if (sym == NULL) 2752 { 2753 /* With some compilers, e.g. HP aCC, static data members are 2754 reported as non-debuggable symbols. */ 2755 struct minimal_symbol *msym = lookup_minimal_symbol (phys_name, 2756 NULL, NULL); 2757 2758 if (!msym) 2759 return allocate_optimized_out_value (type); 2760 else 2761 { 2762 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno), 2763 SYMBOL_VALUE_ADDRESS (msym)); 2764 } 2765 } 2766 else 2767 retval = value_of_variable (sym, NULL); 2768 break; 2769 } 2770 default: 2771 gdb_assert_not_reached ("unexpected field location kind"); 2772 } 2773 2774 return retval; 2775 } 2776 2777 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE. 2778 You have to be careful here, since the size of the data area for the value 2779 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger 2780 than the old enclosing type, you have to allocate more space for the 2781 data. */ 2782 2783 void 2784 set_value_enclosing_type (struct value *val, struct type *new_encl_type) 2785 { 2786 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val))) 2787 val->contents = 2788 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type)); 2789 2790 val->enclosing_type = new_encl_type; 2791 } 2792 2793 /* Given a value ARG1 (offset by OFFSET bytes) 2794 of a struct or union type ARG_TYPE, 2795 extract and return the value of one of its (non-static) fields. 2796 FIELDNO says which field. */ 2797 2798 struct value * 2799 value_primitive_field (struct value *arg1, int offset, 2800 int fieldno, struct type *arg_type) 2801 { 2802 struct value *v; 2803 struct type *type; 2804 2805 CHECK_TYPEDEF (arg_type); 2806 type = TYPE_FIELD_TYPE (arg_type, fieldno); 2807 2808 /* Call check_typedef on our type to make sure that, if TYPE 2809 is a TYPE_CODE_TYPEDEF, its length is set to the length 2810 of the target type instead of zero. However, we do not 2811 replace the typedef type by the target type, because we want 2812 to keep the typedef in order to be able to print the type 2813 description correctly. */ 2814 check_typedef (type); 2815 2816 if (TYPE_FIELD_BITSIZE (arg_type, fieldno)) 2817 { 2818 /* Handle packed fields. 2819 2820 Create a new value for the bitfield, with bitpos and bitsize 2821 set. If possible, arrange offset and bitpos so that we can 2822 do a single aligned read of the size of the containing type. 2823 Otherwise, adjust offset to the byte containing the first 2824 bit. Assume that the address, offset, and embedded offset 2825 are sufficiently aligned. */ 2826 2827 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno); 2828 int container_bitsize = TYPE_LENGTH (type) * 8; 2829 2830 if (arg1->optimized_out) 2831 v = allocate_optimized_out_value (type); 2832 else 2833 { 2834 v = allocate_value_lazy (type); 2835 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno); 2836 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize 2837 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST)) 2838 v->bitpos = bitpos % container_bitsize; 2839 else 2840 v->bitpos = bitpos % 8; 2841 v->offset = (value_embedded_offset (arg1) 2842 + offset 2843 + (bitpos - v->bitpos) / 8); 2844 set_value_parent (v, arg1); 2845 if (!value_lazy (arg1)) 2846 value_fetch_lazy (v); 2847 } 2848 } 2849 else if (fieldno < TYPE_N_BASECLASSES (arg_type)) 2850 { 2851 /* This field is actually a base subobject, so preserve the 2852 entire object's contents for later references to virtual 2853 bases, etc. */ 2854 int boffset; 2855 2856 /* Lazy register values with offsets are not supported. */ 2857 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) 2858 value_fetch_lazy (arg1); 2859 2860 /* The optimized_out flag is only set correctly once a lazy value is 2861 loaded, having just loaded some lazy values we should check the 2862 optimized out case now. */ 2863 if (arg1->optimized_out) 2864 v = allocate_optimized_out_value (type); 2865 else 2866 { 2867 /* We special case virtual inheritance here because this 2868 requires access to the contents, which we would rather avoid 2869 for references to ordinary fields of unavailable values. */ 2870 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno)) 2871 boffset = baseclass_offset (arg_type, fieldno, 2872 value_contents (arg1), 2873 value_embedded_offset (arg1), 2874 value_address (arg1), 2875 arg1); 2876 else 2877 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; 2878 2879 if (value_lazy (arg1)) 2880 v = allocate_value_lazy (value_enclosing_type (arg1)); 2881 else 2882 { 2883 v = allocate_value (value_enclosing_type (arg1)); 2884 value_contents_copy_raw (v, 0, arg1, 0, 2885 TYPE_LENGTH (value_enclosing_type (arg1))); 2886 } 2887 v->type = type; 2888 v->offset = value_offset (arg1); 2889 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset; 2890 } 2891 } 2892 else 2893 { 2894 /* Plain old data member */ 2895 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; 2896 2897 /* Lazy register values with offsets are not supported. */ 2898 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) 2899 value_fetch_lazy (arg1); 2900 2901 /* The optimized_out flag is only set correctly once a lazy value is 2902 loaded, having just loaded some lazy values we should check for 2903 the optimized out case now. */ 2904 if (arg1->optimized_out) 2905 v = allocate_optimized_out_value (type); 2906 else if (value_lazy (arg1)) 2907 v = allocate_value_lazy (type); 2908 else 2909 { 2910 v = allocate_value (type); 2911 value_contents_copy_raw (v, value_embedded_offset (v), 2912 arg1, value_embedded_offset (arg1) + offset, 2913 TYPE_LENGTH (type)); 2914 } 2915 v->offset = (value_offset (arg1) + offset 2916 + value_embedded_offset (arg1)); 2917 } 2918 set_value_component_location (v, arg1); 2919 VALUE_REGNUM (v) = VALUE_REGNUM (arg1); 2920 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1); 2921 return v; 2922 } 2923 2924 /* Given a value ARG1 of a struct or union type, 2925 extract and return the value of one of its (non-static) fields. 2926 FIELDNO says which field. */ 2927 2928 struct value * 2929 value_field (struct value *arg1, int fieldno) 2930 { 2931 return value_primitive_field (arg1, 0, fieldno, value_type (arg1)); 2932 } 2933 2934 /* Return a non-virtual function as a value. 2935 F is the list of member functions which contains the desired method. 2936 J is an index into F which provides the desired method. 2937 2938 We only use the symbol for its address, so be happy with either a 2939 full symbol or a minimal symbol. */ 2940 2941 struct value * 2942 value_fn_field (struct value **arg1p, struct fn_field *f, 2943 int j, struct type *type, 2944 int offset) 2945 { 2946 struct value *v; 2947 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j); 2948 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j); 2949 struct symbol *sym; 2950 struct bound_minimal_symbol msym; 2951 2952 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0); 2953 if (sym != NULL) 2954 { 2955 memset (&msym, 0, sizeof (msym)); 2956 } 2957 else 2958 { 2959 gdb_assert (sym == NULL); 2960 msym = lookup_bound_minimal_symbol (physname); 2961 if (msym.minsym == NULL) 2962 return NULL; 2963 } 2964 2965 v = allocate_value (ftype); 2966 if (sym) 2967 { 2968 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym))); 2969 } 2970 else 2971 { 2972 /* The minimal symbol might point to a function descriptor; 2973 resolve it to the actual code address instead. */ 2974 struct objfile *objfile = msym.objfile; 2975 struct gdbarch *gdbarch = get_objfile_arch (objfile); 2976 2977 set_value_address (v, 2978 gdbarch_convert_from_func_ptr_addr 2979 (gdbarch, SYMBOL_VALUE_ADDRESS (msym.minsym), ¤t_target)); 2980 } 2981 2982 if (arg1p) 2983 { 2984 if (type != value_type (*arg1p)) 2985 *arg1p = value_ind (value_cast (lookup_pointer_type (type), 2986 value_addr (*arg1p))); 2987 2988 /* Move the `this' pointer according to the offset. 2989 VALUE_OFFSET (*arg1p) += offset; */ 2990 } 2991 2992 return v; 2993 } 2994 2995 2996 2997 /* Helper function for both unpack_value_bits_as_long and 2998 unpack_bits_as_long. See those functions for more details on the 2999 interface; the only difference is that this function accepts either 3000 a NULL or a non-NULL ORIGINAL_VALUE. */ 3001 3002 static int 3003 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr, 3004 int embedded_offset, int bitpos, int bitsize, 3005 const struct value *original_value, 3006 LONGEST *result) 3007 { 3008 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type)); 3009 ULONGEST val; 3010 ULONGEST valmask; 3011 int lsbcount; 3012 int bytes_read; 3013 int read_offset; 3014 3015 /* Read the minimum number of bytes required; there may not be 3016 enough bytes to read an entire ULONGEST. */ 3017 CHECK_TYPEDEF (field_type); 3018 if (bitsize) 3019 bytes_read = ((bitpos % 8) + bitsize + 7) / 8; 3020 else 3021 bytes_read = TYPE_LENGTH (field_type); 3022 3023 read_offset = bitpos / 8; 3024 3025 if (original_value != NULL 3026 && !value_bits_available (original_value, embedded_offset + bitpos, 3027 bitsize)) 3028 return 0; 3029 3030 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset, 3031 bytes_read, byte_order); 3032 3033 /* Extract bits. See comment above. */ 3034 3035 if (gdbarch_bits_big_endian (get_type_arch (field_type))) 3036 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); 3037 else 3038 lsbcount = (bitpos % 8); 3039 val >>= lsbcount; 3040 3041 /* If the field does not entirely fill a LONGEST, then zero the sign bits. 3042 If the field is signed, and is negative, then sign extend. */ 3043 3044 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val))) 3045 { 3046 valmask = (((ULONGEST) 1) << bitsize) - 1; 3047 val &= valmask; 3048 if (!TYPE_UNSIGNED (field_type)) 3049 { 3050 if (val & (valmask ^ (valmask >> 1))) 3051 { 3052 val |= ~valmask; 3053 } 3054 } 3055 } 3056 3057 *result = val; 3058 return 1; 3059 } 3060 3061 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at 3062 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT. 3063 VALADDR points to the contents of ORIGINAL_VALUE, which must not be 3064 NULL. The bitfield starts at BITPOS bits and contains BITSIZE 3065 bits. 3066 3067 Returns false if the value contents are unavailable, otherwise 3068 returns true, indicating a valid value has been stored in *RESULT. 3069 3070 Extracting bits depends on endianness of the machine. Compute the 3071 number of least significant bits to discard. For big endian machines, 3072 we compute the total number of bits in the anonymous object, subtract 3073 off the bit count from the MSB of the object to the MSB of the 3074 bitfield, then the size of the bitfield, which leaves the LSB discard 3075 count. For little endian machines, the discard count is simply the 3076 number of bits from the LSB of the anonymous object to the LSB of the 3077 bitfield. 3078 3079 If the field is signed, we also do sign extension. */ 3080 3081 int 3082 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr, 3083 int embedded_offset, int bitpos, int bitsize, 3084 const struct value *original_value, 3085 LONGEST *result) 3086 { 3087 gdb_assert (original_value != NULL); 3088 3089 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset, 3090 bitpos, bitsize, original_value, result); 3091 3092 } 3093 3094 /* Unpack a field FIELDNO of the specified TYPE, from the object at 3095 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of 3096 ORIGINAL_VALUE. See unpack_value_bits_as_long for more 3097 details. */ 3098 3099 static int 3100 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr, 3101 int embedded_offset, int fieldno, 3102 const struct value *val, LONGEST *result) 3103 { 3104 int bitpos = TYPE_FIELD_BITPOS (type, fieldno); 3105 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); 3106 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); 3107 3108 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset, 3109 bitpos, bitsize, val, 3110 result); 3111 } 3112 3113 /* Unpack a field FIELDNO of the specified TYPE, from the object at 3114 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of 3115 ORIGINAL_VALUE, which must not be NULL. See 3116 unpack_value_bits_as_long for more details. */ 3117 3118 int 3119 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr, 3120 int embedded_offset, int fieldno, 3121 const struct value *val, LONGEST *result) 3122 { 3123 gdb_assert (val != NULL); 3124 3125 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset, 3126 fieldno, val, result); 3127 } 3128 3129 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous 3130 object at VALADDR. See unpack_value_bits_as_long for more details. 3131 This function differs from unpack_value_field_as_long in that it 3132 operates without a struct value object. */ 3133 3134 LONGEST 3135 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) 3136 { 3137 LONGEST result; 3138 3139 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result); 3140 return result; 3141 } 3142 3143 /* Return a new value with type TYPE, which is FIELDNO field of the 3144 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents 3145 of VAL. If the VAL's contents required to extract the bitfield 3146 from are unavailable, the new value is correspondingly marked as 3147 unavailable. */ 3148 3149 struct value * 3150 value_field_bitfield (struct type *type, int fieldno, 3151 const gdb_byte *valaddr, 3152 int embedded_offset, const struct value *val) 3153 { 3154 LONGEST l; 3155 3156 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno, 3157 val, &l)) 3158 { 3159 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); 3160 struct value *retval = allocate_value (field_type); 3161 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type)); 3162 return retval; 3163 } 3164 else 3165 { 3166 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l); 3167 } 3168 } 3169 3170 /* Modify the value of a bitfield. ADDR points to a block of memory in 3171 target byte order; the bitfield starts in the byte pointed to. FIELDVAL 3172 is the desired value of the field, in host byte order. BITPOS and BITSIZE 3173 indicate which bits (in target bit order) comprise the bitfield. 3174 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and 3175 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */ 3176 3177 void 3178 modify_field (struct type *type, gdb_byte *addr, 3179 LONGEST fieldval, int bitpos, int bitsize) 3180 { 3181 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); 3182 ULONGEST oword; 3183 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); 3184 int bytesize; 3185 3186 /* Normalize BITPOS. */ 3187 addr += bitpos / 8; 3188 bitpos %= 8; 3189 3190 /* If a negative fieldval fits in the field in question, chop 3191 off the sign extension bits. */ 3192 if ((~fieldval & ~(mask >> 1)) == 0) 3193 fieldval &= mask; 3194 3195 /* Warn if value is too big to fit in the field in question. */ 3196 if (0 != (fieldval & ~mask)) 3197 { 3198 /* FIXME: would like to include fieldval in the message, but 3199 we don't have a sprintf_longest. */ 3200 warning (_("Value does not fit in %d bits."), bitsize); 3201 3202 /* Truncate it, otherwise adjoining fields may be corrupted. */ 3203 fieldval &= mask; 3204 } 3205 3206 /* Ensure no bytes outside of the modified ones get accessed as it may cause 3207 false valgrind reports. */ 3208 3209 bytesize = (bitpos + bitsize + 7) / 8; 3210 oword = extract_unsigned_integer (addr, bytesize, byte_order); 3211 3212 /* Shifting for bit field depends on endianness of the target machine. */ 3213 if (gdbarch_bits_big_endian (get_type_arch (type))) 3214 bitpos = bytesize * 8 - bitpos - bitsize; 3215 3216 oword &= ~(mask << bitpos); 3217 oword |= fieldval << bitpos; 3218 3219 store_unsigned_integer (addr, bytesize, byte_order, oword); 3220 } 3221 3222 /* Pack NUM into BUF using a target format of TYPE. */ 3223 3224 void 3225 pack_long (gdb_byte *buf, struct type *type, LONGEST num) 3226 { 3227 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); 3228 int len; 3229 3230 type = check_typedef (type); 3231 len = TYPE_LENGTH (type); 3232 3233 switch (TYPE_CODE (type)) 3234 { 3235 case TYPE_CODE_INT: 3236 case TYPE_CODE_CHAR: 3237 case TYPE_CODE_ENUM: 3238 case TYPE_CODE_FLAGS: 3239 case TYPE_CODE_BOOL: 3240 case TYPE_CODE_RANGE: 3241 case TYPE_CODE_MEMBERPTR: 3242 store_signed_integer (buf, len, byte_order, num); 3243 break; 3244 3245 case TYPE_CODE_REF: 3246 case TYPE_CODE_PTR: 3247 store_typed_address (buf, type, (CORE_ADDR) num); 3248 break; 3249 3250 default: 3251 error (_("Unexpected type (%d) encountered for integer constant."), 3252 TYPE_CODE (type)); 3253 } 3254 } 3255 3256 3257 /* Pack NUM into BUF using a target format of TYPE. */ 3258 3259 static void 3260 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) 3261 { 3262 int len; 3263 enum bfd_endian byte_order; 3264 3265 type = check_typedef (type); 3266 len = TYPE_LENGTH (type); 3267 byte_order = gdbarch_byte_order (get_type_arch (type)); 3268 3269 switch (TYPE_CODE (type)) 3270 { 3271 case TYPE_CODE_INT: 3272 case TYPE_CODE_CHAR: 3273 case TYPE_CODE_ENUM: 3274 case TYPE_CODE_FLAGS: 3275 case TYPE_CODE_BOOL: 3276 case TYPE_CODE_RANGE: 3277 case TYPE_CODE_MEMBERPTR: 3278 store_unsigned_integer (buf, len, byte_order, num); 3279 break; 3280 3281 case TYPE_CODE_REF: 3282 case TYPE_CODE_PTR: 3283 store_typed_address (buf, type, (CORE_ADDR) num); 3284 break; 3285 3286 default: 3287 error (_("Unexpected type (%d) encountered " 3288 "for unsigned integer constant."), 3289 TYPE_CODE (type)); 3290 } 3291 } 3292 3293 3294 /* Convert C numbers into newly allocated values. */ 3295 3296 struct value * 3297 value_from_longest (struct type *type, LONGEST num) 3298 { 3299 struct value *val = allocate_value (type); 3300 3301 pack_long (value_contents_raw (val), type, num); 3302 return val; 3303 } 3304 3305 3306 /* Convert C unsigned numbers into newly allocated values. */ 3307 3308 struct value * 3309 value_from_ulongest (struct type *type, ULONGEST num) 3310 { 3311 struct value *val = allocate_value (type); 3312 3313 pack_unsigned_long (value_contents_raw (val), type, num); 3314 3315 return val; 3316 } 3317 3318 3319 /* Create a value representing a pointer of type TYPE to the address 3320 ADDR. */ 3321 struct value * 3322 value_from_pointer (struct type *type, CORE_ADDR addr) 3323 { 3324 struct value *val = allocate_value (type); 3325 3326 store_typed_address (value_contents_raw (val), check_typedef (type), addr); 3327 return val; 3328 } 3329 3330 3331 /* Create a value of type TYPE whose contents come from VALADDR, if it 3332 is non-null, and whose memory address (in the inferior) is 3333 ADDRESS. */ 3334 3335 struct value * 3336 value_from_contents_and_address (struct type *type, 3337 const gdb_byte *valaddr, 3338 CORE_ADDR address) 3339 { 3340 struct value *v; 3341 3342 if (valaddr == NULL) 3343 v = allocate_value_lazy (type); 3344 else 3345 v = value_from_contents (type, valaddr); 3346 set_value_address (v, address); 3347 VALUE_LVAL (v) = lval_memory; 3348 return v; 3349 } 3350 3351 /* Create a value of type TYPE holding the contents CONTENTS. 3352 The new value is `not_lval'. */ 3353 3354 struct value * 3355 value_from_contents (struct type *type, const gdb_byte *contents) 3356 { 3357 struct value *result; 3358 3359 result = allocate_value (type); 3360 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type)); 3361 return result; 3362 } 3363 3364 struct value * 3365 value_from_double (struct type *type, DOUBLEST num) 3366 { 3367 struct value *val = allocate_value (type); 3368 struct type *base_type = check_typedef (type); 3369 enum type_code code = TYPE_CODE (base_type); 3370 3371 if (code == TYPE_CODE_FLT) 3372 { 3373 store_typed_floating (value_contents_raw (val), base_type, num); 3374 } 3375 else 3376 error (_("Unexpected type encountered for floating constant.")); 3377 3378 return val; 3379 } 3380 3381 struct value * 3382 value_from_decfloat (struct type *type, const gdb_byte *dec) 3383 { 3384 struct value *val = allocate_value (type); 3385 3386 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type)); 3387 return val; 3388 } 3389 3390 /* Extract a value from the history file. Input will be of the form 3391 $digits or $$digits. See block comment above 'write_dollar_variable' 3392 for details. */ 3393 3394 struct value * 3395 value_from_history_ref (char *h, char **endp) 3396 { 3397 int index, len; 3398 3399 if (h[0] == '$') 3400 len = 1; 3401 else 3402 return NULL; 3403 3404 if (h[1] == '$') 3405 len = 2; 3406 3407 /* Find length of numeral string. */ 3408 for (; isdigit (h[len]); len++) 3409 ; 3410 3411 /* Make sure numeral string is not part of an identifier. */ 3412 if (h[len] == '_' || isalpha (h[len])) 3413 return NULL; 3414 3415 /* Now collect the index value. */ 3416 if (h[1] == '$') 3417 { 3418 if (len == 2) 3419 { 3420 /* For some bizarre reason, "$$" is equivalent to "$$1", 3421 rather than to "$$0" as it ought to be! */ 3422 index = -1; 3423 *endp += len; 3424 } 3425 else 3426 index = -strtol (&h[2], endp, 10); 3427 } 3428 else 3429 { 3430 if (len == 1) 3431 { 3432 /* "$" is equivalent to "$0". */ 3433 index = 0; 3434 *endp += len; 3435 } 3436 else 3437 index = strtol (&h[1], endp, 10); 3438 } 3439 3440 return access_value_history (index); 3441 } 3442 3443 struct value * 3444 coerce_ref_if_computed (const struct value *arg) 3445 { 3446 const struct lval_funcs *funcs; 3447 3448 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF) 3449 return NULL; 3450 3451 if (value_lval_const (arg) != lval_computed) 3452 return NULL; 3453 3454 funcs = value_computed_funcs (arg); 3455 if (funcs->coerce_ref == NULL) 3456 return NULL; 3457 3458 return funcs->coerce_ref (arg); 3459 } 3460 3461 /* Look at value.h for description. */ 3462 3463 struct value * 3464 readjust_indirect_value_type (struct value *value, struct type *enc_type, 3465 struct type *original_type, 3466 struct value *original_value) 3467 { 3468 /* Re-adjust type. */ 3469 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type)); 3470 3471 /* Add embedding info. */ 3472 set_value_enclosing_type (value, enc_type); 3473 set_value_embedded_offset (value, value_pointed_to_offset (original_value)); 3474 3475 /* We may be pointing to an object of some derived type. */ 3476 return value_full_object (value, NULL, 0, 0, 0); 3477 } 3478 3479 struct value * 3480 coerce_ref (struct value *arg) 3481 { 3482 struct type *value_type_arg_tmp = check_typedef (value_type (arg)); 3483 struct value *retval; 3484 struct type *enc_type; 3485 3486 retval = coerce_ref_if_computed (arg); 3487 if (retval) 3488 return retval; 3489 3490 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF) 3491 return arg; 3492 3493 enc_type = check_typedef (value_enclosing_type (arg)); 3494 enc_type = TYPE_TARGET_TYPE (enc_type); 3495 3496 retval = value_at_lazy (enc_type, 3497 unpack_pointer (value_type (arg), 3498 value_contents (arg))); 3499 return readjust_indirect_value_type (retval, enc_type, 3500 value_type_arg_tmp, arg); 3501 } 3502 3503 struct value * 3504 coerce_array (struct value *arg) 3505 { 3506 struct type *type; 3507 3508 arg = coerce_ref (arg); 3509 type = check_typedef (value_type (arg)); 3510 3511 switch (TYPE_CODE (type)) 3512 { 3513 case TYPE_CODE_ARRAY: 3514 if (!TYPE_VECTOR (type) && current_language->c_style_arrays) 3515 arg = value_coerce_array (arg); 3516 break; 3517 case TYPE_CODE_FUNC: 3518 arg = value_coerce_function (arg); 3519 break; 3520 } 3521 return arg; 3522 } 3523 3524 3525 /* Return the return value convention that will be used for the 3526 specified type. */ 3527 3528 enum return_value_convention 3529 struct_return_convention (struct gdbarch *gdbarch, 3530 struct value *function, struct type *value_type) 3531 { 3532 enum type_code code = TYPE_CODE (value_type); 3533 3534 if (code == TYPE_CODE_ERROR) 3535 error (_("Function return type unknown.")); 3536 3537 /* Probe the architecture for the return-value convention. */ 3538 return gdbarch_return_value (gdbarch, function, value_type, 3539 NULL, NULL, NULL); 3540 } 3541 3542 /* Return true if the function returning the specified type is using 3543 the convention of returning structures in memory (passing in the 3544 address as a hidden first parameter). */ 3545 3546 int 3547 using_struct_return (struct gdbarch *gdbarch, 3548 struct value *function, struct type *value_type) 3549 { 3550 if (TYPE_CODE (value_type) == TYPE_CODE_VOID) 3551 /* A void return value is never in memory. See also corresponding 3552 code in "print_return_value". */ 3553 return 0; 3554 3555 return (struct_return_convention (gdbarch, function, value_type) 3556 != RETURN_VALUE_REGISTER_CONVENTION); 3557 } 3558 3559 /* Set the initialized field in a value struct. */ 3560 3561 void 3562 set_value_initialized (struct value *val, int status) 3563 { 3564 val->initialized = status; 3565 } 3566 3567 /* Return the initialized field in a value struct. */ 3568 3569 int 3570 value_initialized (struct value *val) 3571 { 3572 return val->initialized; 3573 } 3574 3575 /* Called only from the value_contents and value_contents_all() 3576 macros, if the current data for a variable needs to be loaded into 3577 value_contents(VAL). Fetches the data from the user's process, and 3578 clears the lazy flag to indicate that the data in the buffer is 3579 valid. 3580 3581 If the value is zero-length, we avoid calling read_memory, which 3582 would abort. We mark the value as fetched anyway -- all 0 bytes of 3583 it. 3584 3585 This function returns a value because it is used in the 3586 value_contents macro as part of an expression, where a void would 3587 not work. The value is ignored. */ 3588 3589 int 3590 value_fetch_lazy (struct value *val) 3591 { 3592 gdb_assert (value_lazy (val)); 3593 allocate_value_contents (val); 3594 if (value_bitsize (val)) 3595 { 3596 /* To read a lazy bitfield, read the entire enclosing value. This 3597 prevents reading the same block of (possibly volatile) memory once 3598 per bitfield. It would be even better to read only the containing 3599 word, but we have no way to record that just specific bits of a 3600 value have been fetched. */ 3601 struct type *type = check_typedef (value_type (val)); 3602 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); 3603 struct value *parent = value_parent (val); 3604 LONGEST offset = value_offset (val); 3605 LONGEST num; 3606 3607 if (value_lazy (parent)) 3608 value_fetch_lazy (parent); 3609 3610 if (!value_bits_valid (parent, 3611 TARGET_CHAR_BIT * offset + value_bitpos (val), 3612 value_bitsize (val))) 3613 set_value_optimized_out (val, 1); 3614 else if (!unpack_value_bits_as_long (value_type (val), 3615 value_contents_for_printing (parent), 3616 offset, 3617 value_bitpos (val), 3618 value_bitsize (val), parent, &num)) 3619 mark_value_bytes_unavailable (val, 3620 value_embedded_offset (val), 3621 TYPE_LENGTH (type)); 3622 else 3623 store_signed_integer (value_contents_raw (val), TYPE_LENGTH (type), 3624 byte_order, num); 3625 } 3626 else if (VALUE_LVAL (val) == lval_memory) 3627 { 3628 CORE_ADDR addr = value_address (val); 3629 struct type *type = check_typedef (value_enclosing_type (val)); 3630 3631 if (TYPE_LENGTH (type)) 3632 read_value_memory (val, 0, value_stack (val), 3633 addr, value_contents_all_raw (val), 3634 TYPE_LENGTH (type)); 3635 } 3636 else if (VALUE_LVAL (val) == lval_register) 3637 { 3638 struct frame_info *frame; 3639 int regnum; 3640 struct type *type = check_typedef (value_type (val)); 3641 struct value *new_val = val, *mark = value_mark (); 3642 3643 /* Offsets are not supported here; lazy register values must 3644 refer to the entire register. */ 3645 gdb_assert (value_offset (val) == 0); 3646 3647 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) 3648 { 3649 struct frame_id frame_id = VALUE_FRAME_ID (new_val); 3650 3651 frame = frame_find_by_id (frame_id); 3652 regnum = VALUE_REGNUM (new_val); 3653 3654 gdb_assert (frame != NULL); 3655 3656 /* Convertible register routines are used for multi-register 3657 values and for interpretation in different types 3658 (e.g. float or int from a double register). Lazy 3659 register values should have the register's natural type, 3660 so they do not apply. */ 3661 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame), 3662 regnum, type)); 3663 3664 new_val = get_frame_register_value (frame, regnum); 3665 3666 /* If we get another lazy lval_register value, it means the 3667 register is found by reading it from the next frame. 3668 get_frame_register_value should never return a value with 3669 the frame id pointing to FRAME. If it does, it means we 3670 either have two consecutive frames with the same frame id 3671 in the frame chain, or some code is trying to unwind 3672 behind get_prev_frame's back (e.g., a frame unwind 3673 sniffer trying to unwind), bypassing its validations. In 3674 any case, it should always be an internal error to end up 3675 in this situation. */ 3676 if (VALUE_LVAL (new_val) == lval_register 3677 && value_lazy (new_val) 3678 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id)) 3679 internal_error (__FILE__, __LINE__, 3680 _("infinite loop while fetching a register")); 3681 } 3682 3683 /* If it's still lazy (for instance, a saved register on the 3684 stack), fetch it. */ 3685 if (value_lazy (new_val)) 3686 value_fetch_lazy (new_val); 3687 3688 /* If the register was not saved, mark it optimized out. */ 3689 if (value_optimized_out (new_val)) 3690 set_value_optimized_out (val, 1); 3691 else 3692 { 3693 set_value_lazy (val, 0); 3694 value_contents_copy (val, value_embedded_offset (val), 3695 new_val, value_embedded_offset (new_val), 3696 TYPE_LENGTH (type)); 3697 } 3698 3699 if (frame_debug) 3700 { 3701 struct gdbarch *gdbarch; 3702 frame = frame_find_by_id (VALUE_FRAME_ID (val)); 3703 regnum = VALUE_REGNUM (val); 3704 gdbarch = get_frame_arch (frame); 3705 3706 fprintf_unfiltered (gdb_stdlog, 3707 "{ value_fetch_lazy " 3708 "(frame=%d,regnum=%d(%s),...) ", 3709 frame_relative_level (frame), regnum, 3710 user_reg_map_regnum_to_name (gdbarch, regnum)); 3711 3712 fprintf_unfiltered (gdb_stdlog, "->"); 3713 if (value_optimized_out (new_val)) 3714 { 3715 fprintf_unfiltered (gdb_stdlog, " "); 3716 val_print_optimized_out (new_val, gdb_stdlog); 3717 } 3718 else 3719 { 3720 int i; 3721 const gdb_byte *buf = value_contents (new_val); 3722 3723 if (VALUE_LVAL (new_val) == lval_register) 3724 fprintf_unfiltered (gdb_stdlog, " register=%d", 3725 VALUE_REGNUM (new_val)); 3726 else if (VALUE_LVAL (new_val) == lval_memory) 3727 fprintf_unfiltered (gdb_stdlog, " address=%s", 3728 paddress (gdbarch, 3729 value_address (new_val))); 3730 else 3731 fprintf_unfiltered (gdb_stdlog, " computed"); 3732 3733 fprintf_unfiltered (gdb_stdlog, " bytes="); 3734 fprintf_unfiltered (gdb_stdlog, "["); 3735 for (i = 0; i < register_size (gdbarch, regnum); i++) 3736 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); 3737 fprintf_unfiltered (gdb_stdlog, "]"); 3738 } 3739 3740 fprintf_unfiltered (gdb_stdlog, " }\n"); 3741 } 3742 3743 /* Dispose of the intermediate values. This prevents 3744 watchpoints from trying to watch the saved frame pointer. */ 3745 value_free_to_mark (mark); 3746 } 3747 else if (VALUE_LVAL (val) == lval_computed 3748 && value_computed_funcs (val)->read != NULL) 3749 value_computed_funcs (val)->read (val); 3750 /* Don't call value_optimized_out on val, doing so would result in a 3751 recursive call back to value_fetch_lazy, instead check the 3752 optimized_out flag directly. */ 3753 else if (val->optimized_out) 3754 /* Keep it optimized out. */; 3755 else 3756 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type.")); 3757 3758 set_value_lazy (val, 0); 3759 return 0; 3760 } 3761 3762 /* Implementation of the convenience function $_isvoid. */ 3763 3764 static struct value * 3765 isvoid_internal_fn (struct gdbarch *gdbarch, 3766 const struct language_defn *language, 3767 void *cookie, int argc, struct value **argv) 3768 { 3769 int ret; 3770 3771 if (argc != 1) 3772 error (_("You must provide one argument for $_isvoid.")); 3773 3774 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID; 3775 3776 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); 3777 } 3778 3779 void 3780 _initialize_values (void) 3781 { 3782 add_cmd ("convenience", no_class, show_convenience, _("\ 3783 Debugger convenience (\"$foo\") variables and functions.\n\ 3784 Convenience variables are created when you assign them values;\n\ 3785 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\ 3786 \n\ 3787 A few convenience variables are given values automatically:\n\ 3788 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\ 3789 \"$__\" holds the contents of the last address examined with \"x\"." 3790 #ifdef HAVE_PYTHON 3791 "\n\n\ 3792 Convenience functions are defined via the Python API." 3793 #endif 3794 ), &showlist); 3795 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist); 3796 3797 add_cmd ("values", no_set_class, show_values, _("\ 3798 Elements of value history around item number IDX (or last ten)."), 3799 &showlist); 3800 3801 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\ 3802 Initialize a convenience variable if necessary.\n\ 3803 init-if-undefined VARIABLE = EXPRESSION\n\ 3804 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\ 3805 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\ 3806 VARIABLE is already initialized.")); 3807 3808 add_prefix_cmd ("function", no_class, function_command, _("\ 3809 Placeholder command for showing help on convenience functions."), 3810 &functionlist, "function ", 0, &cmdlist); 3811 3812 add_internal_function ("_isvoid", _("\ 3813 Check whether an expression is void.\n\ 3814 Usage: $_isvoid (expression)\n\ 3815 Return 1 if the expression is void, zero otherwise."), 3816 isvoid_internal_fn, NULL); 3817 } 3818