1 /* Parts of target interface that deal with accessing memory and memory-like 2 objects. 3 4 Copyright (C) 2006-2015 Free Software Foundation, Inc. 5 6 This file is part of GDB. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 #include "defs.h" 22 #include "vec.h" 23 #include "target.h" 24 #include "memory-map.h" 25 26 #include <sys/time.h> 27 28 static int 29 compare_block_starting_address (const void *a, const void *b) 30 { 31 const struct memory_write_request *a_req = a; 32 const struct memory_write_request *b_req = b; 33 34 if (a_req->begin < b_req->begin) 35 return -1; 36 else if (a_req->begin == b_req->begin) 37 return 0; 38 else 39 return 1; 40 } 41 42 /* Adds to RESULT all memory write requests from BLOCK that are 43 in [BEGIN, END) range. 44 45 If any memory request is only partially in the specified range, 46 that part of the memory request will be added. */ 47 48 static void 49 claim_memory (VEC(memory_write_request_s) *blocks, 50 VEC(memory_write_request_s) **result, 51 ULONGEST begin, 52 ULONGEST end) 53 { 54 int i; 55 ULONGEST claimed_begin; 56 ULONGEST claimed_end; 57 struct memory_write_request *r; 58 59 for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i) 60 { 61 /* If the request doesn't overlap [BEGIN, END), skip it. We 62 must handle END == 0 meaning the top of memory; we don't yet 63 check for R->end == 0, which would also mean the top of 64 memory, but there's an assertion in 65 target_write_memory_blocks which checks for that. */ 66 67 if (begin >= r->end) 68 continue; 69 if (end != 0 && end <= r->begin) 70 continue; 71 72 claimed_begin = max (begin, r->begin); 73 if (end == 0) 74 claimed_end = r->end; 75 else 76 claimed_end = min (end, r->end); 77 78 if (claimed_begin == r->begin && claimed_end == r->end) 79 VEC_safe_push (memory_write_request_s, *result, r); 80 else 81 { 82 struct memory_write_request *n = 83 VEC_safe_push (memory_write_request_s, *result, NULL); 84 85 *n = *r; 86 n->begin = claimed_begin; 87 n->end = claimed_end; 88 n->data += claimed_begin - r->begin; 89 } 90 } 91 } 92 93 /* Given a vector of struct memory_write_request objects in BLOCKS, 94 add memory requests for flash memory into FLASH_BLOCKS, and for 95 regular memory to REGULAR_BLOCKS. */ 96 97 static void 98 split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks, 99 VEC(memory_write_request_s) **regular_blocks, 100 VEC(memory_write_request_s) **flash_blocks) 101 { 102 struct mem_region *region; 103 CORE_ADDR cur_address; 104 105 /* This implementation runs in O(length(regions)*length(blocks)) time. 106 However, in most cases the number of blocks will be small, so this does 107 not matter. 108 109 Note also that it's extremely unlikely that a memory write request 110 will span more than one memory region, however for safety we handle 111 such situations. */ 112 113 cur_address = 0; 114 while (1) 115 { 116 VEC(memory_write_request_s) **r; 117 118 region = lookup_mem_region (cur_address); 119 r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks; 120 cur_address = region->hi; 121 claim_memory (blocks, r, region->lo, region->hi); 122 123 if (cur_address == 0) 124 break; 125 } 126 } 127 128 /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN 129 to the start of the flash block containing the address. Similarly, 130 if END is non-NULL *END will be set to the address one past the end 131 of the block containing the address. */ 132 133 static void 134 block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end) 135 { 136 struct mem_region *region; 137 unsigned blocksize; 138 139 region = lookup_mem_region (address); 140 gdb_assert (region->attrib.mode == MEM_FLASH); 141 blocksize = region->attrib.blocksize; 142 if (begin) 143 *begin = address / blocksize * blocksize; 144 if (end) 145 *end = (address + blocksize - 1) / blocksize * blocksize; 146 } 147 148 /* Given the list of memory requests to be WRITTEN, this function 149 returns write requests covering each group of flash blocks which must 150 be erased. */ 151 152 static VEC(memory_write_request_s) * 153 blocks_to_erase (VEC(memory_write_request_s) *written) 154 { 155 unsigned i; 156 struct memory_write_request *ptr; 157 158 VEC(memory_write_request_s) *result = NULL; 159 160 for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i) 161 { 162 CORE_ADDR begin, end; 163 164 block_boundaries (ptr->begin, &begin, 0); 165 block_boundaries (ptr->end - 1, 0, &end); 166 167 if (!VEC_empty (memory_write_request_s, result) 168 && VEC_last (memory_write_request_s, result)->end >= begin) 169 { 170 VEC_last (memory_write_request_s, result)->end = end; 171 } 172 else 173 { 174 struct memory_write_request *n = 175 VEC_safe_push (memory_write_request_s, result, NULL); 176 177 memset (n, 0, sizeof (struct memory_write_request)); 178 n->begin = begin; 179 n->end = end; 180 } 181 } 182 183 return result; 184 } 185 186 /* Given ERASED_BLOCKS, a list of blocks that will be erased with 187 flash erase commands, and WRITTEN_BLOCKS, the list of memory 188 addresses that will be written, compute the set of memory addresses 189 that will be erased but not rewritten (e.g. padding within a block 190 which is only partially filled by "load"). */ 191 192 static VEC(memory_write_request_s) * 193 compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks, 194 VEC(memory_write_request_s) *written_blocks) 195 { 196 VEC(memory_write_request_s) *result = NULL; 197 198 unsigned i, j; 199 unsigned je = VEC_length (memory_write_request_s, written_blocks); 200 struct memory_write_request *erased_p; 201 202 /* Look at each erased memory_write_request in turn, and 203 see what part of it is subsequently written to. 204 205 This implementation is O(length(erased) * length(written)). If 206 the lists are sorted at this point it could be rewritten more 207 efficiently, but the complexity is not generally worthwhile. */ 208 209 for (i = 0; 210 VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p); 211 ++i) 212 { 213 /* Make a deep copy -- it will be modified inside the loop, but 214 we don't want to modify original vector. */ 215 struct memory_write_request erased = *erased_p; 216 217 for (j = 0; j != je;) 218 { 219 struct memory_write_request *written 220 = VEC_index (memory_write_request_s, 221 written_blocks, j); 222 223 /* Now try various cases. */ 224 225 /* If WRITTEN is fully to the left of ERASED, check the next 226 written memory_write_request. */ 227 if (written->end <= erased.begin) 228 { 229 ++j; 230 continue; 231 } 232 233 /* If WRITTEN is fully to the right of ERASED, then ERASED 234 is not written at all. WRITTEN might affect other 235 blocks. */ 236 if (written->begin >= erased.end) 237 { 238 VEC_safe_push (memory_write_request_s, result, &erased); 239 goto next_erased; 240 } 241 242 /* If all of ERASED is completely written, we can move on to 243 the next erased region. */ 244 if (written->begin <= erased.begin 245 && written->end >= erased.end) 246 { 247 goto next_erased; 248 } 249 250 /* If there is an unwritten part at the beginning of ERASED, 251 then we should record that part and try this inner loop 252 again for the remainder. */ 253 if (written->begin > erased.begin) 254 { 255 struct memory_write_request *n = 256 VEC_safe_push (memory_write_request_s, result, NULL); 257 258 memset (n, 0, sizeof (struct memory_write_request)); 259 n->begin = erased.begin; 260 n->end = written->begin; 261 erased.begin = written->begin; 262 continue; 263 } 264 265 /* If there is an unwritten part at the end of ERASED, we 266 forget about the part that was written to and wait to see 267 if the next write request writes more of ERASED. We can't 268 push it yet. */ 269 if (written->end < erased.end) 270 { 271 erased.begin = written->end; 272 ++j; 273 continue; 274 } 275 } 276 277 /* If we ran out of write requests without doing anything about 278 ERASED, then that means it's really erased. */ 279 VEC_safe_push (memory_write_request_s, result, &erased); 280 281 next_erased: 282 ; 283 } 284 285 return result; 286 } 287 288 static void 289 cleanup_request_data (void *p) 290 { 291 VEC(memory_write_request_s) **v = p; 292 struct memory_write_request *r; 293 int i; 294 295 for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i) 296 xfree (r->data); 297 } 298 299 static void 300 cleanup_write_requests_vector (void *p) 301 { 302 VEC(memory_write_request_s) **v = p; 303 304 VEC_free (memory_write_request_s, *v); 305 } 306 307 int 308 target_write_memory_blocks (VEC(memory_write_request_s) *requests, 309 enum flash_preserve_mode preserve_flash_p, 310 void (*progress_cb) (ULONGEST, void *)) 311 { 312 struct cleanup *back_to = make_cleanup (null_cleanup, NULL); 313 VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s, 314 requests); 315 unsigned i; 316 int err = 0; 317 struct memory_write_request *r; 318 VEC(memory_write_request_s) *regular = NULL; 319 VEC(memory_write_request_s) *flash = NULL; 320 VEC(memory_write_request_s) *erased, *garbled; 321 322 /* END == 0 would represent wraparound: a write to the very last 323 byte of the address space. This file was not written with that 324 possibility in mind. This is fixable, but a lot of work for a 325 rare problem; so for now, fail noisily here instead of obscurely 326 later. */ 327 for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i) 328 gdb_assert (r->end != 0); 329 330 make_cleanup (cleanup_write_requests_vector, &blocks); 331 332 /* Sort the blocks by their start address. */ 333 qsort (VEC_address (memory_write_request_s, blocks), 334 VEC_length (memory_write_request_s, blocks), 335 sizeof (struct memory_write_request), compare_block_starting_address); 336 337 /* Split blocks into list of regular memory blocks, 338 and list of flash memory blocks. */ 339 make_cleanup (cleanup_write_requests_vector, ®ular); 340 make_cleanup (cleanup_write_requests_vector, &flash); 341 split_regular_and_flash_blocks (blocks, ®ular, &flash); 342 343 /* If a variable is added to forbid flash write, even during "load", 344 it should be checked here. Similarly, if this function is used 345 for other situations besides "load" in which writing to flash 346 is undesirable, that should be checked here. */ 347 348 /* Find flash blocks to erase. */ 349 erased = blocks_to_erase (flash); 350 make_cleanup (cleanup_write_requests_vector, &erased); 351 352 /* Find what flash regions will be erased, and not overwritten; then 353 either preserve or discard the old contents. */ 354 garbled = compute_garbled_blocks (erased, flash); 355 make_cleanup (cleanup_request_data, &garbled); 356 make_cleanup (cleanup_write_requests_vector, &garbled); 357 358 if (!VEC_empty (memory_write_request_s, garbled)) 359 { 360 if (preserve_flash_p == flash_preserve) 361 { 362 struct memory_write_request *r; 363 364 /* Read in regions that must be preserved and add them to 365 the list of blocks we read. */ 366 for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i) 367 { 368 gdb_assert (r->data == NULL); 369 r->data = xmalloc (r->end - r->begin); 370 err = target_read_memory (r->begin, r->data, r->end - r->begin); 371 if (err != 0) 372 goto out; 373 374 VEC_safe_push (memory_write_request_s, flash, r); 375 } 376 377 qsort (VEC_address (memory_write_request_s, flash), 378 VEC_length (memory_write_request_s, flash), 379 sizeof (struct memory_write_request), 380 compare_block_starting_address); 381 } 382 } 383 384 /* We could coalesce adjacent memory blocks here, to reduce the 385 number of write requests for small sections. However, we would 386 have to reallocate and copy the data pointers, which could be 387 large; large sections are more common in loadable objects than 388 large numbers of small sections (although the reverse can be true 389 in object files). So, we issue at least one write request per 390 passed struct memory_write_request. The remote stub will still 391 have the opportunity to batch flash requests. */ 392 393 /* Write regular blocks. */ 394 for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i) 395 { 396 LONGEST len; 397 398 len = target_write_with_progress (current_target.beneath, 399 TARGET_OBJECT_MEMORY, NULL, 400 r->data, r->begin, r->end - r->begin, 401 progress_cb, r->baton); 402 if (len < (LONGEST) (r->end - r->begin)) 403 { 404 /* Call error? */ 405 err = -1; 406 goto out; 407 } 408 } 409 410 if (!VEC_empty (memory_write_request_s, erased)) 411 { 412 /* Erase all pages. */ 413 for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i) 414 target_flash_erase (r->begin, r->end - r->begin); 415 416 /* Write flash data. */ 417 for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i) 418 { 419 LONGEST len; 420 421 len = target_write_with_progress (¤t_target, 422 TARGET_OBJECT_FLASH, NULL, 423 r->data, r->begin, 424 r->end - r->begin, 425 progress_cb, r->baton); 426 if (len < (LONGEST) (r->end - r->begin)) 427 error (_("Error writing data to flash")); 428 } 429 430 target_flash_done (); 431 } 432 433 out: 434 do_cleanups (back_to); 435 436 return err; 437 } 438