1 /* Caching code for GDB, the GNU debugger. 2 3 Copyright (C) 1992-2019 Free Software Foundation, Inc. 4 5 This file is part of GDB. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 19 20 #include "defs.h" 21 #include "dcache.h" 22 #include "gdbcmd.h" 23 #include "gdbcore.h" 24 #include "target-dcache.h" 25 #include "inferior.h" 26 #include "splay-tree.h" 27 28 /* Commands with a prefix of `{set,show} dcache'. */ 29 static struct cmd_list_element *dcache_set_list = NULL; 30 static struct cmd_list_element *dcache_show_list = NULL; 31 32 /* The data cache could lead to incorrect results because it doesn't 33 know about volatile variables, thus making it impossible to debug 34 functions which use memory mapped I/O devices. Set the nocache 35 memory region attribute in those cases. 36 37 In general the dcache speeds up performance. Some speed improvement 38 comes from the actual caching mechanism, but the major gain is in 39 the reduction of the remote protocol overhead; instead of reading 40 or writing a large area of memory in 4 byte requests, the cache 41 bundles up the requests into LINE_SIZE chunks, reducing overhead 42 significantly. This is most useful when accessing a large amount 43 of data, such as when performing a backtrace. 44 45 The cache is a splay tree along with a linked list for replacement. 46 Each block caches a LINE_SIZE area of memory. Within each line we 47 remember the address of the line (which must be a multiple of 48 LINE_SIZE) and the actual data block. 49 50 Lines are only allocated as needed, so DCACHE_SIZE really specifies the 51 *maximum* number of lines in the cache. 52 53 At present, the cache is write-through rather than writeback: as soon 54 as data is written to the cache, it is also immediately written to 55 the target. Therefore, cache lines are never "dirty". Whether a given 56 line is valid or not depends on where it is stored in the dcache_struct; 57 there is no per-block valid flag. */ 58 59 /* NOTE: Interaction of dcache and memory region attributes 60 61 As there is no requirement that memory region attributes be aligned 62 to or be a multiple of the dcache page size, dcache_read_line() and 63 dcache_write_line() must break up the page by memory region. If a 64 chunk does not have the cache attribute set, an invalid memory type 65 is set, etc., then the chunk is skipped. Those chunks are handled 66 in target_xfer_memory() (or target_xfer_memory_partial()). 67 68 This doesn't occur very often. The most common occurance is when 69 the last bit of the .text segment and the first bit of the .data 70 segment fall within the same dcache page with a ro/cacheable memory 71 region defined for the .text segment and a rw/non-cacheable memory 72 region defined for the .data segment. */ 73 74 /* The maximum number of lines stored. The total size of the cache is 75 equal to DCACHE_SIZE times LINE_SIZE. */ 76 #define DCACHE_DEFAULT_SIZE 4096 77 static unsigned dcache_size = DCACHE_DEFAULT_SIZE; 78 79 /* The default size of a cache line. Smaller values reduce the time taken to 80 read a single byte and make the cache more granular, but increase 81 overhead and reduce the effectiveness of the cache as a prefetcher. */ 82 #define DCACHE_DEFAULT_LINE_SIZE 64 83 static unsigned dcache_line_size = DCACHE_DEFAULT_LINE_SIZE; 84 85 /* Each cache block holds LINE_SIZE bytes of data 86 starting at a multiple-of-LINE_SIZE address. */ 87 88 #define LINE_SIZE_MASK(dcache) ((dcache->line_size - 1)) 89 #define XFORM(dcache, x) ((x) & LINE_SIZE_MASK (dcache)) 90 #define MASK(dcache, x) ((x) & ~LINE_SIZE_MASK (dcache)) 91 92 struct dcache_block 93 { 94 /* For least-recently-allocated and free lists. */ 95 struct dcache_block *prev; 96 struct dcache_block *next; 97 98 CORE_ADDR addr; /* address of data */ 99 int refs; /* # hits */ 100 gdb_byte data[1]; /* line_size bytes at given address */ 101 }; 102 103 struct dcache_struct 104 { 105 splay_tree tree; 106 struct dcache_block *oldest; /* least-recently-allocated list. */ 107 108 /* The free list is maintained identically to OLDEST to simplify 109 the code: we only need one set of accessors. */ 110 struct dcache_block *freelist; 111 112 /* The number of in-use lines in the cache. */ 113 int size; 114 CORE_ADDR line_size; /* current line_size. */ 115 116 /* The ptid of last inferior to use cache or null_ptid. */ 117 ptid_t ptid; 118 }; 119 120 typedef void (block_func) (struct dcache_block *block, void *param); 121 122 static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr); 123 124 static int dcache_read_line (DCACHE *dcache, struct dcache_block *db); 125 126 static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr); 127 128 static int dcache_enabled_p = 0; /* OBSOLETE */ 129 130 static void 131 show_dcache_enabled_p (struct ui_file *file, int from_tty, 132 struct cmd_list_element *c, const char *value) 133 { 134 fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value); 135 } 136 137 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST. 138 *BLIST is not updated (unless it was previously NULL of course). 139 This is for the least-recently-allocated list's sake: 140 BLIST points to the oldest block. 141 ??? This makes for poor cache usage of the free list, 142 but is it measurable? */ 143 144 static void 145 append_block (struct dcache_block **blist, struct dcache_block *block) 146 { 147 if (*blist) 148 { 149 block->next = *blist; 150 block->prev = (*blist)->prev; 151 block->prev->next = block; 152 (*blist)->prev = block; 153 /* We don't update *BLIST here to maintain the invariant that for the 154 least-recently-allocated list *BLIST points to the oldest block. */ 155 } 156 else 157 { 158 block->next = block; 159 block->prev = block; 160 *blist = block; 161 } 162 } 163 164 /* Remove BLOCK from circular block list BLIST. */ 165 166 static void 167 remove_block (struct dcache_block **blist, struct dcache_block *block) 168 { 169 if (block->next == block) 170 { 171 *blist = NULL; 172 } 173 else 174 { 175 block->next->prev = block->prev; 176 block->prev->next = block->next; 177 /* If we removed the block *BLIST points to, shift it to the next block 178 to maintain the invariant that for the least-recently-allocated list 179 *BLIST points to the oldest block. */ 180 if (*blist == block) 181 *blist = block->next; 182 } 183 } 184 185 /* Iterate over all elements in BLIST, calling FUNC. 186 PARAM is passed to FUNC. 187 FUNC may remove the block it's passed, but only that block. */ 188 189 static void 190 for_each_block (struct dcache_block **blist, block_func *func, void *param) 191 { 192 struct dcache_block *db; 193 194 if (*blist == NULL) 195 return; 196 197 db = *blist; 198 do 199 { 200 struct dcache_block *next = db->next; 201 202 func (db, param); 203 db = next; 204 } 205 while (*blist && db != *blist); 206 } 207 208 /* BLOCK_FUNC routine for dcache_free. */ 209 210 static void 211 free_block (struct dcache_block *block, void *param) 212 { 213 xfree (block); 214 } 215 216 /* Free a data cache. */ 217 218 void 219 dcache_free (DCACHE *dcache) 220 { 221 splay_tree_delete (dcache->tree); 222 for_each_block (&dcache->oldest, free_block, NULL); 223 for_each_block (&dcache->freelist, free_block, NULL); 224 xfree (dcache); 225 } 226 227 228 /* BLOCK_FUNC function for dcache_invalidate. 229 This doesn't remove the block from the oldest list on purpose. 230 dcache_invalidate will do it later. */ 231 232 static void 233 invalidate_block (struct dcache_block *block, void *param) 234 { 235 DCACHE *dcache = (DCACHE *) param; 236 237 splay_tree_remove (dcache->tree, (splay_tree_key) block->addr); 238 append_block (&dcache->freelist, block); 239 } 240 241 /* Free all the data cache blocks, thus discarding all cached data. */ 242 243 void 244 dcache_invalidate (DCACHE *dcache) 245 { 246 for_each_block (&dcache->oldest, invalidate_block, dcache); 247 248 dcache->oldest = NULL; 249 dcache->size = 0; 250 dcache->ptid = null_ptid; 251 252 if (dcache->line_size != dcache_line_size) 253 { 254 /* We've been asked to use a different line size. 255 All of our freelist blocks are now the wrong size, so free them. */ 256 257 for_each_block (&dcache->freelist, free_block, dcache); 258 dcache->freelist = NULL; 259 dcache->line_size = dcache_line_size; 260 } 261 } 262 263 /* Invalidate the line associated with ADDR. */ 264 265 static void 266 dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr) 267 { 268 struct dcache_block *db = dcache_hit (dcache, addr); 269 270 if (db) 271 { 272 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); 273 remove_block (&dcache->oldest, db); 274 append_block (&dcache->freelist, db); 275 --dcache->size; 276 } 277 } 278 279 /* If addr is present in the dcache, return the address of the block 280 containing it. Otherwise return NULL. */ 281 282 static struct dcache_block * 283 dcache_hit (DCACHE *dcache, CORE_ADDR addr) 284 { 285 struct dcache_block *db; 286 287 splay_tree_node node = splay_tree_lookup (dcache->tree, 288 (splay_tree_key) MASK (dcache, addr)); 289 290 if (!node) 291 return NULL; 292 293 db = (struct dcache_block *) node->value; 294 db->refs++; 295 return db; 296 } 297 298 /* Fill a cache line from target memory. 299 The result is 1 for success, 0 if the (entire) cache line 300 wasn't readable. */ 301 302 static int 303 dcache_read_line (DCACHE *dcache, struct dcache_block *db) 304 { 305 CORE_ADDR memaddr; 306 gdb_byte *myaddr; 307 int len; 308 int res; 309 int reg_len; 310 struct mem_region *region; 311 312 len = dcache->line_size; 313 memaddr = db->addr; 314 myaddr = db->data; 315 316 while (len > 0) 317 { 318 /* Don't overrun if this block is right at the end of the region. */ 319 region = lookup_mem_region (memaddr); 320 if (region->hi == 0 || memaddr + len < region->hi) 321 reg_len = len; 322 else 323 reg_len = region->hi - memaddr; 324 325 /* Skip non-readable regions. The cache attribute can be ignored, 326 since we may be loading this for a stack access. */ 327 if (region->attrib.mode == MEM_WO) 328 { 329 memaddr += reg_len; 330 myaddr += reg_len; 331 len -= reg_len; 332 continue; 333 } 334 335 res = target_read_raw_memory (memaddr, myaddr, reg_len); 336 if (res != 0) 337 return 0; 338 339 memaddr += reg_len; 340 myaddr += reg_len; 341 len -= reg_len; 342 } 343 344 return 1; 345 } 346 347 /* Get a free cache block, put or keep it on the valid list, 348 and return its address. */ 349 350 static struct dcache_block * 351 dcache_alloc (DCACHE *dcache, CORE_ADDR addr) 352 { 353 struct dcache_block *db; 354 355 if (dcache->size >= dcache_size) 356 { 357 /* Evict the least recently allocated line. */ 358 db = dcache->oldest; 359 remove_block (&dcache->oldest, db); 360 361 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); 362 } 363 else 364 { 365 db = dcache->freelist; 366 if (db) 367 remove_block (&dcache->freelist, db); 368 else 369 db = ((struct dcache_block *) 370 xmalloc (offsetof (struct dcache_block, data) 371 + dcache->line_size)); 372 373 dcache->size++; 374 } 375 376 db->addr = MASK (dcache, addr); 377 db->refs = 0; 378 379 /* Put DB at the end of the list, it's the newest. */ 380 append_block (&dcache->oldest, db); 381 382 splay_tree_insert (dcache->tree, (splay_tree_key) db->addr, 383 (splay_tree_value) db); 384 385 return db; 386 } 387 388 /* Using the data cache DCACHE, store in *PTR the contents of the byte at 389 address ADDR in the remote machine. 390 391 Returns 1 for success, 0 for error. */ 392 393 static int 394 dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr) 395 { 396 struct dcache_block *db = dcache_hit (dcache, addr); 397 398 if (!db) 399 { 400 db = dcache_alloc (dcache, addr); 401 402 if (!dcache_read_line (dcache, db)) 403 return 0; 404 } 405 406 *ptr = db->data[XFORM (dcache, addr)]; 407 return 1; 408 } 409 410 /* Write the byte at PTR into ADDR in the data cache. 411 412 The caller should have written the data through to target memory 413 already. 414 415 If ADDR is not in cache, this function does nothing; writing to an 416 area of memory which wasn't present in the cache doesn't cause it 417 to be loaded in. */ 418 419 static void 420 dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, const gdb_byte *ptr) 421 { 422 struct dcache_block *db = dcache_hit (dcache, addr); 423 424 if (db) 425 db->data[XFORM (dcache, addr)] = *ptr; 426 } 427 428 static int 429 dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b) 430 { 431 if (a > b) 432 return 1; 433 else if (a == b) 434 return 0; 435 else 436 return -1; 437 } 438 439 /* Allocate and initialize a data cache. */ 440 441 DCACHE * 442 dcache_init (void) 443 { 444 DCACHE *dcache = XNEW (DCACHE); 445 446 dcache->tree = splay_tree_new (dcache_splay_tree_compare, 447 NULL, 448 NULL); 449 450 dcache->oldest = NULL; 451 dcache->freelist = NULL; 452 dcache->size = 0; 453 dcache->line_size = dcache_line_size; 454 dcache->ptid = null_ptid; 455 456 return dcache; 457 } 458 459 460 /* Read LEN bytes from dcache memory at MEMADDR, transferring to 461 debugger address MYADDR. If the data is presently cached, this 462 fills the cache. Arguments/return are like the target_xfer_partial 463 interface. */ 464 465 enum target_xfer_status 466 dcache_read_memory_partial (struct target_ops *ops, DCACHE *dcache, 467 CORE_ADDR memaddr, gdb_byte *myaddr, 468 ULONGEST len, ULONGEST *xfered_len) 469 { 470 ULONGEST i; 471 472 /* If this is a different inferior from what we've recorded, 473 flush the cache. */ 474 475 if (inferior_ptid != dcache->ptid) 476 { 477 dcache_invalidate (dcache); 478 dcache->ptid = inferior_ptid; 479 } 480 481 for (i = 0; i < len; i++) 482 { 483 if (!dcache_peek_byte (dcache, memaddr + i, myaddr + i)) 484 { 485 /* That failed. Discard its cache line so we don't have a 486 partially read line. */ 487 dcache_invalidate_line (dcache, memaddr + i); 488 break; 489 } 490 } 491 492 if (i == 0) 493 { 494 /* Even though reading the whole line failed, we may be able to 495 read a piece starting where the caller wanted. */ 496 return raw_memory_xfer_partial (ops, myaddr, NULL, memaddr, len, 497 xfered_len); 498 } 499 else 500 { 501 *xfered_len = i; 502 return TARGET_XFER_OK; 503 } 504 } 505 506 /* FIXME: There would be some benefit to making the cache write-back and 507 moving the writeback operation to a higher layer, as it could occur 508 after a sequence of smaller writes have been completed (as when a stack 509 frame is constructed for an inferior function call). Note that only 510 moving it up one level to target_xfer_memory[_partial]() is not 511 sufficient since we want to coalesce memory transfers that are 512 "logically" connected but not actually a single call to one of the 513 memory transfer functions. */ 514 515 /* Just update any cache lines which are already present. This is 516 called by the target_xfer_partial machinery when writing raw 517 memory. */ 518 519 void 520 dcache_update (DCACHE *dcache, enum target_xfer_status status, 521 CORE_ADDR memaddr, const gdb_byte *myaddr, 522 ULONGEST len) 523 { 524 ULONGEST i; 525 526 for (i = 0; i < len; i++) 527 if (status == TARGET_XFER_OK) 528 dcache_poke_byte (dcache, memaddr + i, myaddr + i); 529 else 530 { 531 /* Discard the whole cache line so we don't have a partially 532 valid line. */ 533 dcache_invalidate_line (dcache, memaddr + i); 534 } 535 } 536 537 /* Print DCACHE line INDEX. */ 538 539 static void 540 dcache_print_line (DCACHE *dcache, int index) 541 { 542 splay_tree_node n; 543 struct dcache_block *db; 544 int i, j; 545 546 if (dcache == NULL) 547 { 548 printf_filtered (_("No data cache available.\n")); 549 return; 550 } 551 552 n = splay_tree_min (dcache->tree); 553 554 for (i = index; i > 0; --i) 555 { 556 if (!n) 557 break; 558 n = splay_tree_successor (dcache->tree, n->key); 559 } 560 561 if (!n) 562 { 563 printf_filtered (_("No such cache line exists.\n")); 564 return; 565 } 566 567 db = (struct dcache_block *) n->value; 568 569 printf_filtered (_("Line %d: address %s [%d hits]\n"), 570 index, paddress (target_gdbarch (), db->addr), db->refs); 571 572 for (j = 0; j < dcache->line_size; j++) 573 { 574 printf_filtered ("%02x ", db->data[j]); 575 576 /* Print a newline every 16 bytes (48 characters). */ 577 if ((j % 16 == 15) && (j != dcache->line_size - 1)) 578 printf_filtered ("\n"); 579 } 580 printf_filtered ("\n"); 581 } 582 583 /* Parse EXP and show the info about DCACHE. */ 584 585 static void 586 dcache_info_1 (DCACHE *dcache, const char *exp) 587 { 588 splay_tree_node n; 589 int i, refcount; 590 591 if (exp) 592 { 593 char *linestart; 594 595 i = strtol (exp, &linestart, 10); 596 if (linestart == exp || i < 0) 597 { 598 printf_filtered (_("Usage: info dcache [LINENUMBER]\n")); 599 return; 600 } 601 602 dcache_print_line (dcache, i); 603 return; 604 } 605 606 printf_filtered (_("Dcache %u lines of %u bytes each.\n"), 607 dcache_size, 608 dcache ? (unsigned) dcache->line_size 609 : dcache_line_size); 610 611 if (dcache == NULL || dcache->ptid == null_ptid) 612 { 613 printf_filtered (_("No data cache available.\n")); 614 return; 615 } 616 617 printf_filtered (_("Contains data for %s\n"), 618 target_pid_to_str (dcache->ptid)); 619 620 refcount = 0; 621 622 n = splay_tree_min (dcache->tree); 623 i = 0; 624 625 while (n) 626 { 627 struct dcache_block *db = (struct dcache_block *) n->value; 628 629 printf_filtered (_("Line %d: address %s [%d hits]\n"), 630 i, paddress (target_gdbarch (), db->addr), db->refs); 631 i++; 632 refcount += db->refs; 633 634 n = splay_tree_successor (dcache->tree, n->key); 635 } 636 637 printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount); 638 } 639 640 static void 641 info_dcache_command (const char *exp, int tty) 642 { 643 dcache_info_1 (target_dcache_get (), exp); 644 } 645 646 static void 647 set_dcache_size (const char *args, int from_tty, 648 struct cmd_list_element *c) 649 { 650 if (dcache_size == 0) 651 { 652 dcache_size = DCACHE_DEFAULT_SIZE; 653 error (_("Dcache size must be greater than 0.")); 654 } 655 target_dcache_invalidate (); 656 } 657 658 static void 659 set_dcache_line_size (const char *args, int from_tty, 660 struct cmd_list_element *c) 661 { 662 if (dcache_line_size < 2 663 || (dcache_line_size & (dcache_line_size - 1)) != 0) 664 { 665 unsigned d = dcache_line_size; 666 dcache_line_size = DCACHE_DEFAULT_LINE_SIZE; 667 error (_("Invalid dcache line size: %u (must be power of 2)."), d); 668 } 669 target_dcache_invalidate (); 670 } 671 672 static void 673 set_dcache_command (const char *arg, int from_tty) 674 { 675 printf_unfiltered ( 676 "\"set dcache\" must be followed by the name of a subcommand.\n"); 677 help_list (dcache_set_list, "set dcache ", all_commands, gdb_stdout); 678 } 679 680 static void 681 show_dcache_command (const char *args, int from_tty) 682 { 683 cmd_show_list (dcache_show_list, from_tty, ""); 684 } 685 686 void 687 _initialize_dcache (void) 688 { 689 add_setshow_boolean_cmd ("remotecache", class_support, 690 &dcache_enabled_p, _("\ 691 Set cache use for remote targets."), _("\ 692 Show cache use for remote targets."), _("\ 693 This used to enable the data cache for remote targets. The cache\n\ 694 functionality is now controlled by the memory region system and the\n\ 695 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\ 696 exists only for compatibility reasons."), 697 NULL, 698 show_dcache_enabled_p, 699 &setlist, &showlist); 700 701 add_info ("dcache", info_dcache_command, 702 _("\ 703 Print information on the dcache performance.\n\ 704 Usage: info dcache [LINENUMBER]\n\ 705 With no arguments, this command prints the cache configuration and a\n\ 706 summary of each line in the cache. With an argument, dump\"\n\ 707 the contents of the given line.")); 708 709 add_prefix_cmd ("dcache", class_obscure, set_dcache_command, _("\ 710 Use this command to set number of lines in dcache and line-size."), 711 &dcache_set_list, "set dcache ", /*allow_unknown*/0, &setlist); 712 add_prefix_cmd ("dcache", class_obscure, show_dcache_command, _("\ 713 Show dcachesettings."), 714 &dcache_show_list, "show dcache ", /*allow_unknown*/0, &showlist); 715 716 add_setshow_zuinteger_cmd ("line-size", class_obscure, 717 &dcache_line_size, _("\ 718 Set dcache line size in bytes (must be power of 2)."), _("\ 719 Show dcache line size."), 720 NULL, 721 set_dcache_line_size, 722 NULL, 723 &dcache_set_list, &dcache_show_list); 724 add_setshow_zuinteger_cmd ("size", class_obscure, 725 &dcache_size, _("\ 726 Set number of dcache lines."), _("\ 727 Show number of dcache lines."), 728 NULL, 729 set_dcache_size, 730 NULL, 731 &dcache_set_list, &dcache_show_list); 732 } 733