xref: /dflybsd-src/contrib/gdb-7/gdb/target-memory.c (revision de8e141f24382815c10a4012d209bbbf7abf1112)
15796c8dcSSimon Schubert /* Parts of target interface that deal with accessing memory and memory-like
25796c8dcSSimon Schubert    objects.
35796c8dcSSimon Schubert 
4*ef5ccd6cSJohn Marino    Copyright (C) 2006-2013 Free Software Foundation, Inc.
55796c8dcSSimon Schubert 
65796c8dcSSimon Schubert    This file is part of GDB.
75796c8dcSSimon Schubert 
85796c8dcSSimon Schubert    This program is free software; you can redistribute it and/or modify
95796c8dcSSimon Schubert    it under the terms of the GNU General Public License as published by
105796c8dcSSimon Schubert    the Free Software Foundation; either version 3 of the License, or
115796c8dcSSimon Schubert    (at your option) any later version.
125796c8dcSSimon Schubert 
135796c8dcSSimon Schubert    This program is distributed in the hope that it will be useful,
145796c8dcSSimon Schubert    but WITHOUT ANY WARRANTY; without even the implied warranty of
155796c8dcSSimon Schubert    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
165796c8dcSSimon Schubert    GNU General Public License for more details.
175796c8dcSSimon Schubert 
185796c8dcSSimon Schubert    You should have received a copy of the GNU General Public License
195796c8dcSSimon Schubert    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
205796c8dcSSimon Schubert 
215796c8dcSSimon Schubert #include "defs.h"
225796c8dcSSimon Schubert #include "vec.h"
235796c8dcSSimon Schubert #include "target.h"
245796c8dcSSimon Schubert #include "memory-map.h"
255796c8dcSSimon Schubert 
265796c8dcSSimon Schubert #include "gdb_assert.h"
275796c8dcSSimon Schubert 
285796c8dcSSimon Schubert #include <stdio.h>
295796c8dcSSimon Schubert #include <sys/time.h>
305796c8dcSSimon Schubert 
315796c8dcSSimon Schubert static int
compare_block_starting_address(const void * a,const void * b)325796c8dcSSimon Schubert compare_block_starting_address (const void *a, const void *b)
335796c8dcSSimon Schubert {
345796c8dcSSimon Schubert   const struct memory_write_request *a_req = a;
355796c8dcSSimon Schubert   const struct memory_write_request *b_req = b;
365796c8dcSSimon Schubert 
375796c8dcSSimon Schubert   if (a_req->begin < b_req->begin)
385796c8dcSSimon Schubert     return -1;
395796c8dcSSimon Schubert   else if (a_req->begin == b_req->begin)
405796c8dcSSimon Schubert     return 0;
415796c8dcSSimon Schubert   else
425796c8dcSSimon Schubert     return 1;
435796c8dcSSimon Schubert }
445796c8dcSSimon Schubert 
455796c8dcSSimon Schubert /* Adds to RESULT all memory write requests from BLOCK that are
465796c8dcSSimon Schubert    in [BEGIN, END) range.
475796c8dcSSimon Schubert 
485796c8dcSSimon Schubert    If any memory request is only partially in the specified range,
495796c8dcSSimon Schubert    that part of the memory request will be added.  */
505796c8dcSSimon Schubert 
515796c8dcSSimon Schubert static void
claim_memory(VEC (memory_write_request_s)* blocks,VEC (memory_write_request_s)** result,ULONGEST begin,ULONGEST end)525796c8dcSSimon Schubert claim_memory (VEC(memory_write_request_s) *blocks,
535796c8dcSSimon Schubert 	      VEC(memory_write_request_s) **result,
545796c8dcSSimon Schubert 	      ULONGEST begin,
555796c8dcSSimon Schubert 	      ULONGEST end)
565796c8dcSSimon Schubert {
575796c8dcSSimon Schubert   int i;
585796c8dcSSimon Schubert   ULONGEST claimed_begin;
595796c8dcSSimon Schubert   ULONGEST claimed_end;
605796c8dcSSimon Schubert   struct memory_write_request *r;
615796c8dcSSimon Schubert 
625796c8dcSSimon Schubert   for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
635796c8dcSSimon Schubert     {
645796c8dcSSimon Schubert       /* If the request doesn't overlap [BEGIN, END), skip it.  We
655796c8dcSSimon Schubert 	 must handle END == 0 meaning the top of memory; we don't yet
665796c8dcSSimon Schubert 	 check for R->end == 0, which would also mean the top of
675796c8dcSSimon Schubert 	 memory, but there's an assertion in
685796c8dcSSimon Schubert 	 target_write_memory_blocks which checks for that.  */
695796c8dcSSimon Schubert 
705796c8dcSSimon Schubert       if (begin >= r->end)
715796c8dcSSimon Schubert 	continue;
725796c8dcSSimon Schubert       if (end != 0 && end <= r->begin)
735796c8dcSSimon Schubert 	continue;
745796c8dcSSimon Schubert 
755796c8dcSSimon Schubert       claimed_begin = max (begin, r->begin);
765796c8dcSSimon Schubert       if (end == 0)
775796c8dcSSimon Schubert 	claimed_end = r->end;
785796c8dcSSimon Schubert       else
795796c8dcSSimon Schubert 	claimed_end = min (end, r->end);
805796c8dcSSimon Schubert 
815796c8dcSSimon Schubert       if (claimed_begin == r->begin && claimed_end == r->end)
825796c8dcSSimon Schubert 	VEC_safe_push (memory_write_request_s, *result, r);
835796c8dcSSimon Schubert       else
845796c8dcSSimon Schubert 	{
855796c8dcSSimon Schubert 	  struct memory_write_request *n =
865796c8dcSSimon Schubert 	    VEC_safe_push (memory_write_request_s, *result, NULL);
87cf7f2e2dSJohn Marino 
885796c8dcSSimon Schubert 	  *n = *r;
895796c8dcSSimon Schubert 	  n->begin = claimed_begin;
905796c8dcSSimon Schubert 	  n->end = claimed_end;
915796c8dcSSimon Schubert 	  n->data += claimed_begin - r->begin;
925796c8dcSSimon Schubert 	}
935796c8dcSSimon Schubert     }
945796c8dcSSimon Schubert }
955796c8dcSSimon Schubert 
965796c8dcSSimon Schubert /* Given a vector of struct memory_write_request objects in BLOCKS,
975796c8dcSSimon Schubert    add memory requests for flash memory into FLASH_BLOCKS, and for
985796c8dcSSimon Schubert    regular memory to REGULAR_BLOCKS.  */
995796c8dcSSimon Schubert 
1005796c8dcSSimon Schubert static void
split_regular_and_flash_blocks(VEC (memory_write_request_s)* blocks,VEC (memory_write_request_s)** regular_blocks,VEC (memory_write_request_s)** flash_blocks)1015796c8dcSSimon Schubert split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
1025796c8dcSSimon Schubert 				VEC(memory_write_request_s) **regular_blocks,
1035796c8dcSSimon Schubert 				VEC(memory_write_request_s) **flash_blocks)
1045796c8dcSSimon Schubert {
1055796c8dcSSimon Schubert   struct mem_region *region;
1065796c8dcSSimon Schubert   CORE_ADDR cur_address;
1075796c8dcSSimon Schubert 
1085796c8dcSSimon Schubert   /* This implementation runs in O(length(regions)*length(blocks)) time.
1095796c8dcSSimon Schubert      However, in most cases the number of blocks will be small, so this does
1105796c8dcSSimon Schubert      not matter.
1115796c8dcSSimon Schubert 
1125796c8dcSSimon Schubert      Note also that it's extremely unlikely that a memory write request
1135796c8dcSSimon Schubert      will span more than one memory region, however for safety we handle
1145796c8dcSSimon Schubert      such situations.  */
1155796c8dcSSimon Schubert 
1165796c8dcSSimon Schubert   cur_address = 0;
1175796c8dcSSimon Schubert   while (1)
1185796c8dcSSimon Schubert     {
1195796c8dcSSimon Schubert       VEC(memory_write_request_s) **r;
1205796c8dcSSimon Schubert 
121cf7f2e2dSJohn Marino       region = lookup_mem_region (cur_address);
1225796c8dcSSimon Schubert       r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
1235796c8dcSSimon Schubert       cur_address = region->hi;
1245796c8dcSSimon Schubert       claim_memory (blocks, r, region->lo, region->hi);
1255796c8dcSSimon Schubert 
1265796c8dcSSimon Schubert       if (cur_address == 0)
1275796c8dcSSimon Schubert 	break;
1285796c8dcSSimon Schubert     }
1295796c8dcSSimon Schubert }
1305796c8dcSSimon Schubert 
1315796c8dcSSimon Schubert /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
1325796c8dcSSimon Schubert    to the start of the flash block containing the address.  Similarly,
1335796c8dcSSimon Schubert    if END is non-NULL *END will be set to the address one past the end
1345796c8dcSSimon Schubert    of the block containing the address.  */
1355796c8dcSSimon Schubert 
1365796c8dcSSimon Schubert static void
block_boundaries(CORE_ADDR address,CORE_ADDR * begin,CORE_ADDR * end)1375796c8dcSSimon Schubert block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
1385796c8dcSSimon Schubert {
1395796c8dcSSimon Schubert   struct mem_region *region;
1405796c8dcSSimon Schubert   unsigned blocksize;
1415796c8dcSSimon Schubert 
1425796c8dcSSimon Schubert   region = lookup_mem_region (address);
1435796c8dcSSimon Schubert   gdb_assert (region->attrib.mode == MEM_FLASH);
1445796c8dcSSimon Schubert   blocksize = region->attrib.blocksize;
1455796c8dcSSimon Schubert   if (begin)
1465796c8dcSSimon Schubert     *begin = address / blocksize * blocksize;
1475796c8dcSSimon Schubert   if (end)
1485796c8dcSSimon Schubert     *end = (address + blocksize - 1) / blocksize * blocksize;
1495796c8dcSSimon Schubert }
1505796c8dcSSimon Schubert 
1515796c8dcSSimon Schubert /* Given the list of memory requests to be WRITTEN, this function
1525796c8dcSSimon Schubert    returns write requests covering each group of flash blocks which must
1535796c8dcSSimon Schubert    be erased.  */
1545796c8dcSSimon Schubert 
VEC(memory_write_request_s)1555796c8dcSSimon Schubert static VEC(memory_write_request_s) *
1565796c8dcSSimon Schubert blocks_to_erase (VEC(memory_write_request_s) *written)
1575796c8dcSSimon Schubert {
1585796c8dcSSimon Schubert   unsigned i;
1595796c8dcSSimon Schubert   struct memory_write_request *ptr;
1605796c8dcSSimon Schubert 
1615796c8dcSSimon Schubert   VEC(memory_write_request_s) *result = NULL;
1625796c8dcSSimon Schubert 
1635796c8dcSSimon Schubert   for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
1645796c8dcSSimon Schubert     {
1655796c8dcSSimon Schubert       CORE_ADDR begin, end;
1665796c8dcSSimon Schubert 
1675796c8dcSSimon Schubert       block_boundaries (ptr->begin, &begin, 0);
1685796c8dcSSimon Schubert       block_boundaries (ptr->end - 1, 0, &end);
1695796c8dcSSimon Schubert 
1705796c8dcSSimon Schubert       if (!VEC_empty (memory_write_request_s, result)
1715796c8dcSSimon Schubert 	  && VEC_last (memory_write_request_s, result)->end >= begin)
1725796c8dcSSimon Schubert 	{
1735796c8dcSSimon Schubert 	  VEC_last (memory_write_request_s, result)->end = end;
1745796c8dcSSimon Schubert 	}
1755796c8dcSSimon Schubert       else
1765796c8dcSSimon Schubert 	{
1775796c8dcSSimon Schubert 	  struct memory_write_request *n =
1785796c8dcSSimon Schubert 	    VEC_safe_push (memory_write_request_s, result, NULL);
179cf7f2e2dSJohn Marino 
1805796c8dcSSimon Schubert 	  memset (n, 0, sizeof (struct memory_write_request));
1815796c8dcSSimon Schubert 	  n->begin = begin;
1825796c8dcSSimon Schubert 	  n->end = end;
1835796c8dcSSimon Schubert 	}
1845796c8dcSSimon Schubert     }
1855796c8dcSSimon Schubert 
1865796c8dcSSimon Schubert   return result;
1875796c8dcSSimon Schubert }
1885796c8dcSSimon Schubert 
1895796c8dcSSimon Schubert /* Given ERASED_BLOCKS, a list of blocks that will be erased with
1905796c8dcSSimon Schubert    flash erase commands, and WRITTEN_BLOCKS, the list of memory
1915796c8dcSSimon Schubert    addresses that will be written, compute the set of memory addresses
1925796c8dcSSimon Schubert    that will be erased but not rewritten (e.g. padding within a block
1935796c8dcSSimon Schubert    which is only partially filled by "load").  */
1945796c8dcSSimon Schubert 
VEC(memory_write_request_s)1955796c8dcSSimon Schubert static VEC(memory_write_request_s) *
1965796c8dcSSimon Schubert compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
1975796c8dcSSimon Schubert 			VEC(memory_write_request_s) *written_blocks)
1985796c8dcSSimon Schubert {
1995796c8dcSSimon Schubert   VEC(memory_write_request_s) *result = NULL;
2005796c8dcSSimon Schubert 
2015796c8dcSSimon Schubert   unsigned i, j;
2025796c8dcSSimon Schubert   unsigned je = VEC_length (memory_write_request_s, written_blocks);
2035796c8dcSSimon Schubert   struct memory_write_request *erased_p;
2045796c8dcSSimon Schubert 
2055796c8dcSSimon Schubert   /* Look at each erased memory_write_request in turn, and
2065796c8dcSSimon Schubert      see what part of it is subsequently written to.
2075796c8dcSSimon Schubert 
2085796c8dcSSimon Schubert      This implementation is O(length(erased) * length(written)).  If
2095796c8dcSSimon Schubert      the lists are sorted at this point it could be rewritten more
2105796c8dcSSimon Schubert      efficiently, but the complexity is not generally worthwhile.  */
2115796c8dcSSimon Schubert 
2125796c8dcSSimon Schubert   for (i = 0;
2135796c8dcSSimon Schubert        VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
2145796c8dcSSimon Schubert        ++i)
2155796c8dcSSimon Schubert     {
2165796c8dcSSimon Schubert       /* Make a deep copy -- it will be modified inside the loop, but
2175796c8dcSSimon Schubert 	 we don't want to modify original vector.  */
2185796c8dcSSimon Schubert       struct memory_write_request erased = *erased_p;
2195796c8dcSSimon Schubert 
2205796c8dcSSimon Schubert       for (j = 0; j != je;)
2215796c8dcSSimon Schubert 	{
2225796c8dcSSimon Schubert 	  struct memory_write_request *written
2235796c8dcSSimon Schubert 	    = VEC_index (memory_write_request_s,
2245796c8dcSSimon Schubert 			 written_blocks, j);
2255796c8dcSSimon Schubert 
2265796c8dcSSimon Schubert 	  /* Now try various cases.  */
2275796c8dcSSimon Schubert 
2285796c8dcSSimon Schubert 	  /* If WRITTEN is fully to the left of ERASED, check the next
2295796c8dcSSimon Schubert 	     written memory_write_request.  */
2305796c8dcSSimon Schubert 	  if (written->end <= erased.begin)
2315796c8dcSSimon Schubert 	    {
2325796c8dcSSimon Schubert 	      ++j;
2335796c8dcSSimon Schubert 	      continue;
2345796c8dcSSimon Schubert 	    }
2355796c8dcSSimon Schubert 
2365796c8dcSSimon Schubert 	  /* If WRITTEN is fully to the right of ERASED, then ERASED
2375796c8dcSSimon Schubert 	     is not written at all.  WRITTEN might affect other
2385796c8dcSSimon Schubert 	     blocks.  */
2395796c8dcSSimon Schubert 	  if (written->begin >= erased.end)
2405796c8dcSSimon Schubert 	    {
2415796c8dcSSimon Schubert 	      VEC_safe_push (memory_write_request_s, result, &erased);
2425796c8dcSSimon Schubert 	      goto next_erased;
2435796c8dcSSimon Schubert 	    }
2445796c8dcSSimon Schubert 
2455796c8dcSSimon Schubert 	  /* If all of ERASED is completely written, we can move on to
2465796c8dcSSimon Schubert 	     the next erased region.  */
2475796c8dcSSimon Schubert 	  if (written->begin <= erased.begin
2485796c8dcSSimon Schubert 	      && written->end >= erased.end)
2495796c8dcSSimon Schubert 	    {
2505796c8dcSSimon Schubert 	      goto next_erased;
2515796c8dcSSimon Schubert 	    }
2525796c8dcSSimon Schubert 
2535796c8dcSSimon Schubert 	  /* If there is an unwritten part at the beginning of ERASED,
2545796c8dcSSimon Schubert 	     then we should record that part and try this inner loop
2555796c8dcSSimon Schubert 	     again for the remainder.  */
2565796c8dcSSimon Schubert 	  if (written->begin > erased.begin)
2575796c8dcSSimon Schubert 	    {
2585796c8dcSSimon Schubert 	      struct memory_write_request *n =
2595796c8dcSSimon Schubert 		VEC_safe_push (memory_write_request_s, result, NULL);
260cf7f2e2dSJohn Marino 
2615796c8dcSSimon Schubert 	      memset (n, 0, sizeof (struct memory_write_request));
2625796c8dcSSimon Schubert 	      n->begin = erased.begin;
2635796c8dcSSimon Schubert 	      n->end = written->begin;
2645796c8dcSSimon Schubert 	      erased.begin = written->begin;
2655796c8dcSSimon Schubert 	      continue;
2665796c8dcSSimon Schubert 	    }
2675796c8dcSSimon Schubert 
2685796c8dcSSimon Schubert 	  /* If there is an unwritten part at the end of ERASED, we
2695796c8dcSSimon Schubert 	     forget about the part that was written to and wait to see
2705796c8dcSSimon Schubert 	     if the next write request writes more of ERASED.  We can't
2715796c8dcSSimon Schubert 	     push it yet.  */
2725796c8dcSSimon Schubert 	  if (written->end < erased.end)
2735796c8dcSSimon Schubert 	    {
2745796c8dcSSimon Schubert 	      erased.begin = written->end;
2755796c8dcSSimon Schubert 	      ++j;
2765796c8dcSSimon Schubert 	      continue;
2775796c8dcSSimon Schubert 	    }
2785796c8dcSSimon Schubert 	}
2795796c8dcSSimon Schubert 
2805796c8dcSSimon Schubert       /* If we ran out of write requests without doing anything about
2815796c8dcSSimon Schubert 	 ERASED, then that means it's really erased.  */
2825796c8dcSSimon Schubert       VEC_safe_push (memory_write_request_s, result, &erased);
2835796c8dcSSimon Schubert 
2845796c8dcSSimon Schubert     next_erased:
2855796c8dcSSimon Schubert       ;
2865796c8dcSSimon Schubert     }
2875796c8dcSSimon Schubert 
2885796c8dcSSimon Schubert   return result;
2895796c8dcSSimon Schubert }
2905796c8dcSSimon Schubert 
2915796c8dcSSimon Schubert static void
cleanup_request_data(void * p)2925796c8dcSSimon Schubert cleanup_request_data (void *p)
2935796c8dcSSimon Schubert {
2945796c8dcSSimon Schubert   VEC(memory_write_request_s) **v = p;
2955796c8dcSSimon Schubert   struct memory_write_request *r;
2965796c8dcSSimon Schubert   int i;
2975796c8dcSSimon Schubert 
2985796c8dcSSimon Schubert   for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
2995796c8dcSSimon Schubert     xfree (r->data);
3005796c8dcSSimon Schubert }
3015796c8dcSSimon Schubert 
3025796c8dcSSimon Schubert static void
cleanup_write_requests_vector(void * p)3035796c8dcSSimon Schubert cleanup_write_requests_vector (void *p)
3045796c8dcSSimon Schubert {
3055796c8dcSSimon Schubert   VEC(memory_write_request_s) **v = p;
306cf7f2e2dSJohn Marino 
3075796c8dcSSimon Schubert   VEC_free (memory_write_request_s, *v);
3085796c8dcSSimon Schubert }
3095796c8dcSSimon Schubert 
3105796c8dcSSimon Schubert int
target_write_memory_blocks(VEC (memory_write_request_s)* requests,enum flash_preserve_mode preserve_flash_p,void (* progress_cb)(ULONGEST,void *))3115796c8dcSSimon Schubert target_write_memory_blocks (VEC(memory_write_request_s) *requests,
3125796c8dcSSimon Schubert 			    enum flash_preserve_mode preserve_flash_p,
3135796c8dcSSimon Schubert 			    void (*progress_cb) (ULONGEST, void *))
3145796c8dcSSimon Schubert {
3155796c8dcSSimon Schubert   struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
3165796c8dcSSimon Schubert   VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
3175796c8dcSSimon Schubert 						  requests);
3185796c8dcSSimon Schubert   unsigned i;
3195796c8dcSSimon Schubert   int err = 0;
3205796c8dcSSimon Schubert   struct memory_write_request *r;
3215796c8dcSSimon Schubert   VEC(memory_write_request_s) *regular = NULL;
3225796c8dcSSimon Schubert   VEC(memory_write_request_s) *flash = NULL;
3235796c8dcSSimon Schubert   VEC(memory_write_request_s) *erased, *garbled;
3245796c8dcSSimon Schubert 
3255796c8dcSSimon Schubert   /* END == 0 would represent wraparound: a write to the very last
3265796c8dcSSimon Schubert      byte of the address space.  This file was not written with that
3275796c8dcSSimon Schubert      possibility in mind.  This is fixable, but a lot of work for a
3285796c8dcSSimon Schubert      rare problem; so for now, fail noisily here instead of obscurely
3295796c8dcSSimon Schubert      later.  */
3305796c8dcSSimon Schubert   for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
3315796c8dcSSimon Schubert     gdb_assert (r->end != 0);
3325796c8dcSSimon Schubert 
3335796c8dcSSimon Schubert   make_cleanup (cleanup_write_requests_vector, &blocks);
3345796c8dcSSimon Schubert 
3355796c8dcSSimon Schubert   /* Sort the blocks by their start address.  */
3365796c8dcSSimon Schubert   qsort (VEC_address (memory_write_request_s, blocks),
3375796c8dcSSimon Schubert 	 VEC_length (memory_write_request_s, blocks),
3385796c8dcSSimon Schubert 	 sizeof (struct memory_write_request), compare_block_starting_address);
3395796c8dcSSimon Schubert 
3405796c8dcSSimon Schubert   /* Split blocks into list of regular memory blocks,
3415796c8dcSSimon Schubert      and list of flash memory blocks.  */
3425796c8dcSSimon Schubert   make_cleanup (cleanup_write_requests_vector, &regular);
3435796c8dcSSimon Schubert   make_cleanup (cleanup_write_requests_vector, &flash);
3445796c8dcSSimon Schubert   split_regular_and_flash_blocks (blocks, &regular, &flash);
3455796c8dcSSimon Schubert 
3465796c8dcSSimon Schubert   /* If a variable is added to forbid flash write, even during "load",
3475796c8dcSSimon Schubert      it should be checked here.  Similarly, if this function is used
3485796c8dcSSimon Schubert      for other situations besides "load" in which writing to flash
3495796c8dcSSimon Schubert      is undesirable, that should be checked here.  */
3505796c8dcSSimon Schubert 
3515796c8dcSSimon Schubert   /* Find flash blocks to erase.  */
3525796c8dcSSimon Schubert   erased = blocks_to_erase (flash);
3535796c8dcSSimon Schubert   make_cleanup (cleanup_write_requests_vector, &erased);
3545796c8dcSSimon Schubert 
3555796c8dcSSimon Schubert   /* Find what flash regions will be erased, and not overwritten; then
3565796c8dcSSimon Schubert      either preserve or discard the old contents.  */
3575796c8dcSSimon Schubert   garbled = compute_garbled_blocks (erased, flash);
3585796c8dcSSimon Schubert   make_cleanup (cleanup_request_data, &garbled);
3595796c8dcSSimon Schubert   make_cleanup (cleanup_write_requests_vector, &garbled);
3605796c8dcSSimon Schubert 
3615796c8dcSSimon Schubert   if (!VEC_empty (memory_write_request_s, garbled))
3625796c8dcSSimon Schubert     {
3635796c8dcSSimon Schubert       if (preserve_flash_p == flash_preserve)
3645796c8dcSSimon Schubert 	{
3655796c8dcSSimon Schubert 	  struct memory_write_request *r;
3665796c8dcSSimon Schubert 
3675796c8dcSSimon Schubert 	  /* Read in regions that must be preserved and add them to
3685796c8dcSSimon Schubert 	     the list of blocks we read.  */
3695796c8dcSSimon Schubert 	  for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
3705796c8dcSSimon Schubert 	    {
3715796c8dcSSimon Schubert 	      gdb_assert (r->data == NULL);
3725796c8dcSSimon Schubert 	      r->data = xmalloc (r->end - r->begin);
3735796c8dcSSimon Schubert 	      err = target_read_memory (r->begin, r->data, r->end - r->begin);
3745796c8dcSSimon Schubert 	      if (err != 0)
3755796c8dcSSimon Schubert 		goto out;
3765796c8dcSSimon Schubert 
3775796c8dcSSimon Schubert 	      VEC_safe_push (memory_write_request_s, flash, r);
3785796c8dcSSimon Schubert 	    }
3795796c8dcSSimon Schubert 
3805796c8dcSSimon Schubert 	  qsort (VEC_address (memory_write_request_s, flash),
3815796c8dcSSimon Schubert 		 VEC_length (memory_write_request_s, flash),
382c50c785cSJohn Marino 		 sizeof (struct memory_write_request),
383c50c785cSJohn Marino 		 compare_block_starting_address);
3845796c8dcSSimon Schubert 	}
3855796c8dcSSimon Schubert     }
3865796c8dcSSimon Schubert 
3875796c8dcSSimon Schubert   /* We could coalesce adjacent memory blocks here, to reduce the
3885796c8dcSSimon Schubert      number of write requests for small sections.  However, we would
3895796c8dcSSimon Schubert      have to reallocate and copy the data pointers, which could be
3905796c8dcSSimon Schubert      large; large sections are more common in loadable objects than
3915796c8dcSSimon Schubert      large numbers of small sections (although the reverse can be true
3925796c8dcSSimon Schubert      in object files).  So, we issue at least one write request per
3935796c8dcSSimon Schubert      passed struct memory_write_request.  The remote stub will still
3945796c8dcSSimon Schubert      have the opportunity to batch flash requests.  */
3955796c8dcSSimon Schubert 
3965796c8dcSSimon Schubert   /* Write regular blocks.  */
3975796c8dcSSimon Schubert   for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
3985796c8dcSSimon Schubert     {
3995796c8dcSSimon Schubert       LONGEST len;
4005796c8dcSSimon Schubert 
4015796c8dcSSimon Schubert       len = target_write_with_progress (current_target.beneath,
4025796c8dcSSimon Schubert 					TARGET_OBJECT_MEMORY, NULL,
4035796c8dcSSimon Schubert 					r->data, r->begin, r->end - r->begin,
4045796c8dcSSimon Schubert 					progress_cb, r->baton);
4055796c8dcSSimon Schubert       if (len < (LONGEST) (r->end - r->begin))
4065796c8dcSSimon Schubert 	{
4075796c8dcSSimon Schubert 	  /* Call error?  */
4085796c8dcSSimon Schubert 	  err = -1;
4095796c8dcSSimon Schubert 	  goto out;
4105796c8dcSSimon Schubert 	}
4115796c8dcSSimon Schubert     }
4125796c8dcSSimon Schubert 
4135796c8dcSSimon Schubert   if (!VEC_empty (memory_write_request_s, erased))
4145796c8dcSSimon Schubert     {
4155796c8dcSSimon Schubert       /* Erase all pages.  */
4165796c8dcSSimon Schubert       for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
4175796c8dcSSimon Schubert 	target_flash_erase (r->begin, r->end - r->begin);
4185796c8dcSSimon Schubert 
4195796c8dcSSimon Schubert       /* Write flash data.  */
4205796c8dcSSimon Schubert       for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
4215796c8dcSSimon Schubert 	{
4225796c8dcSSimon Schubert 	  LONGEST len;
4235796c8dcSSimon Schubert 
4245796c8dcSSimon Schubert 	  len = target_write_with_progress (&current_target,
4255796c8dcSSimon Schubert 					    TARGET_OBJECT_FLASH, NULL,
426c50c785cSJohn Marino 					    r->data, r->begin,
427c50c785cSJohn Marino 					    r->end - r->begin,
4285796c8dcSSimon Schubert 					    progress_cb, r->baton);
4295796c8dcSSimon Schubert 	  if (len < (LONGEST) (r->end - r->begin))
4305796c8dcSSimon Schubert 	    error (_("Error writing data to flash"));
4315796c8dcSSimon Schubert 	}
4325796c8dcSSimon Schubert 
4335796c8dcSSimon Schubert       target_flash_done ();
4345796c8dcSSimon Schubert     }
4355796c8dcSSimon Schubert 
4365796c8dcSSimon Schubert  out:
4375796c8dcSSimon Schubert   do_cleanups (back_to);
4385796c8dcSSimon Schubert 
4395796c8dcSSimon Schubert   return err;
4405796c8dcSSimon Schubert }
441