xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/nat/aarch64-linux-hw-point.c (revision 4c3eb207d36f67d31994830c0a694161fc1ca39b)
1 /* Copyright (C) 2009-2019 Free Software Foundation, Inc.
2    Contributed by ARM Ltd.
3 
4    This file is part of GDB.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
18 
19 #include "common/common-defs.h"
20 #include "common/break-common.h"
21 #include "common/common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
24 
25 #include <sys/uio.h>
26 #include <asm/ptrace.h>
27 #include <sys/ptrace.h>
28 #include <elf.h>
29 
30 /* Number of hardware breakpoints/watchpoints the target supports.
31    They are initialized with values obtained via the ptrace calls
32    with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively.  */
33 
34 int aarch64_num_bp_regs;
35 int aarch64_num_wp_regs;
36 
37 /* True if this kernel does not have the bug described by PR
38    external/20207 (Linux >= 4.10).  A fixed kernel supports any
39    contiguous range of bits in 8-bit byte DR_CONTROL_MASK.  A buggy
40    kernel supports only 0x01, 0x03, 0x0f and 0xff.  We start by
41    assuming the bug is fixed, and then detect the bug at
42    PTRACE_SETREGSET time.  */
43 static bool kernel_supports_any_contiguous_range = true;
44 
45 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL.  */
46 
47 unsigned int
48 aarch64_watchpoint_offset (unsigned int ctrl)
49 {
50   uint8_t mask = DR_CONTROL_MASK (ctrl);
51   unsigned retval;
52 
53   /* Shift out bottom zeros.  */
54   for (retval = 0; mask && (mask & 1) == 0; ++retval)
55     mask >>= 1;
56 
57   return retval;
58 }
59 
60 /* Utility function that returns the length in bytes of a watchpoint
61    according to the content of a hardware debug control register CTRL.
62    Any contiguous range of bytes in CTRL is supported.  The returned
63    value can be between 0..8 (inclusive).  */
64 
65 unsigned int
66 aarch64_watchpoint_length (unsigned int ctrl)
67 {
68   uint8_t mask = DR_CONTROL_MASK (ctrl);
69   unsigned retval;
70 
71   /* Shift out bottom zeros.  */
72   mask >>= aarch64_watchpoint_offset (ctrl);
73 
74   /* Count bottom ones.  */
75   for (retval = 0; (mask & 1) != 0; ++retval)
76     mask >>= 1;
77 
78   if (mask != 0)
79     error (_("Unexpected hardware watchpoint length register value 0x%x"),
80 	   DR_CONTROL_MASK (ctrl));
81 
82   return retval;
83 }
84 
85 /* Given the hardware breakpoint or watchpoint type TYPE and its
86    length LEN, return the expected encoding for a hardware
87    breakpoint/watchpoint control register.  */
88 
89 static unsigned int
90 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
91 {
92   unsigned int ctrl, ttype;
93 
94   gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
95   gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
96 
97   /* type */
98   switch (type)
99     {
100     case hw_write:
101       ttype = 2;
102       break;
103     case hw_read:
104       ttype = 1;
105       break;
106     case hw_access:
107       ttype = 3;
108       break;
109     case hw_execute:
110       ttype = 0;
111       break;
112     default:
113       perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
114     }
115 
116   ctrl = ttype << 3;
117 
118   /* offset and length bitmask */
119   ctrl |= ((1 << len) - 1) << (5 + offset);
120   /* enabled at el0 */
121   ctrl |= (2 << 1) | 1;
122 
123   return ctrl;
124 }
125 
126 /* Addresses to be written to the hardware breakpoint and watchpoint
127    value registers need to be aligned; the alignment is 4-byte and
128    8-type respectively.  Linux kernel rejects any non-aligned address
129    it receives from the related ptrace call.  Furthermore, the kernel
130    currently only supports the following Byte Address Select (BAS)
131    values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
132    watchpoint to be accepted by the kernel (via ptrace call), its
133    valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
134    Despite these limitations, the unaligned watchpoint is supported in
135    this port.
136 
137    Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise.  */
138 
139 static int
140 aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len)
141 {
142   unsigned int alignment = 0;
143 
144   if (is_watchpoint)
145     alignment = AARCH64_HWP_ALIGNMENT;
146   else
147     {
148       struct regcache *regcache
149 	= get_thread_regcache_for_ptid (current_lwp_ptid ());
150 
151       /* Set alignment to 2 only if the current process is 32-bit,
152 	 since thumb instruction can be 2-byte aligned.  Otherwise, set
153 	 alignment to AARCH64_HBP_ALIGNMENT.  */
154       if (regcache_register_size (regcache, 0) == 8)
155 	alignment = AARCH64_HBP_ALIGNMENT;
156       else
157 	alignment = 2;
158     }
159 
160   if (addr & (alignment - 1))
161     return 0;
162 
163   if ((!kernel_supports_any_contiguous_range
164        && len != 8 && len != 4 && len != 2 && len != 1)
165       || (kernel_supports_any_contiguous_range
166 	  && (len < 1 || len > 8)))
167     return 0;
168 
169   return 1;
170 }
171 
172 /* Given the (potentially unaligned) watchpoint address in ADDR and
173    length in LEN, return the aligned address, offset from that base
174    address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
175    and *ALIGNED_LEN_P, respectively.  The returned values will be
176    valid values to write to the hardware watchpoint value and control
177    registers.
178 
179    The given watchpoint may get truncated if more than one hardware
180    register is needed to cover the watched region.  *NEXT_ADDR_P
181    and *NEXT_LEN_P, if non-NULL, will return the address and length
182    of the remaining part of the watchpoint (which can be processed
183    by calling this routine again to generate another aligned address,
184    offset and length tuple.
185 
186    Essentially, unaligned watchpoint is achieved by minimally
187    enlarging the watched area to meet the alignment requirement, and
188    if necessary, splitting the watchpoint over several hardware
189    watchpoint registers.
190 
191    On kernels that predate the support for Byte Address Select (BAS)
192    in the hardware watchpoint control register, the offset from the
193    base address is always zero, and so in that case the trade-off is
194    that there will be false-positive hits for the read-type or the
195    access-type hardware watchpoints; for the write type, which is more
196    commonly used, there will be no such issues, as the higher-level
197    breakpoint management in gdb always examines the exact watched
198    region for any content change, and transparently resumes a thread
199    from a watchpoint trap if there is no change to the watched region.
200 
201    Another limitation is that because the watched region is enlarged,
202    the watchpoint fault address discovered by
203    aarch64_stopped_data_address may be outside of the original watched
204    region, especially when the triggering instruction is accessing a
205    larger region.  When the fault address is not within any known
206    range, watchpoints_triggered in gdb will get confused, as the
207    higher-level watchpoint management is only aware of original
208    watched regions, and will think that some unknown watchpoint has
209    been triggered.  To prevent such a case,
210    aarch64_stopped_data_address implementations in gdb and gdbserver
211    try to match the trapped address with a watched region, and return
212    an address within the latter. */
213 
214 static void
215 aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
216 			  int *aligned_offset_p, int *aligned_len_p,
217 			  CORE_ADDR *next_addr_p, int *next_len_p,
218 			  CORE_ADDR *next_addr_orig_p)
219 {
220   int aligned_len;
221   unsigned int offset, aligned_offset;
222   CORE_ADDR aligned_addr;
223   const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
224   const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
225 
226   /* As assumed by the algorithm.  */
227   gdb_assert (alignment == max_wp_len);
228 
229   if (len <= 0)
230     return;
231 
232   /* The address put into the hardware watchpoint value register must
233      be aligned.  */
234   offset = addr & (alignment - 1);
235   aligned_addr = addr - offset;
236   aligned_offset
237     = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
238 
239   gdb_assert (offset >= 0 && offset < alignment);
240   gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
241   gdb_assert (offset + len > 0);
242 
243   if (offset + len >= max_wp_len)
244     {
245       /* Need more than one watchpoint register; truncate at the
246 	 alignment boundary.  */
247       aligned_len
248 	= max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
249       len -= (max_wp_len - offset);
250       addr += (max_wp_len - offset);
251       gdb_assert ((addr & (alignment - 1)) == 0);
252     }
253   else
254     {
255       /* Find the smallest valid length that is large enough to
256 	 accommodate this watchpoint.  */
257       static const unsigned char
258 	aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
259 	{ 1, 2, 4, 4, 8, 8, 8, 8 };
260 
261       aligned_len = (kernel_supports_any_contiguous_range
262 		     ? len : aligned_len_array[offset + len - 1]);
263       addr += len;
264       len = 0;
265     }
266 
267   if (aligned_addr_p)
268     *aligned_addr_p = aligned_addr;
269   if (aligned_offset_p)
270     *aligned_offset_p = aligned_offset;
271   if (aligned_len_p)
272     *aligned_len_p = aligned_len;
273   if (next_addr_p)
274     *next_addr_p = addr;
275   if (next_len_p)
276     *next_len_p = len;
277   if (next_addr_orig_p)
278     *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
279 }
280 
281 struct aarch64_dr_update_callback_param
282 {
283   int is_watchpoint;
284   unsigned int idx;
285 };
286 
287 /* Callback for iterate_over_lwps.  Records the
288    information about the change of one hardware breakpoint/watchpoint
289    setting for the thread LWP.
290    The information is passed in via PTR.
291    N.B.  The actual updating of hardware debug registers is not
292    carried out until the moment the thread is resumed.  */
293 
294 static int
295 debug_reg_change_callback (struct lwp_info *lwp, void *ptr)
296 {
297   struct aarch64_dr_update_callback_param *param_p
298     = (struct aarch64_dr_update_callback_param *) ptr;
299   int tid = ptid_of_lwp (lwp).lwp ();
300   int idx = param_p->idx;
301   int is_watchpoint = param_p->is_watchpoint;
302   struct arch_lwp_info *info = lwp_arch_private_info (lwp);
303   dr_changed_t *dr_changed_ptr;
304   dr_changed_t dr_changed;
305 
306   if (info == NULL)
307     {
308       info = XCNEW (struct arch_lwp_info);
309       lwp_set_arch_private_info (lwp, info);
310     }
311 
312   if (show_debug_regs)
313     {
314       debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
315       debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
316 		    "dr_changed_wp=0x%s\n", tid,
317 		    phex (info->dr_changed_bp, 8),
318 		    phex (info->dr_changed_wp, 8));
319     }
320 
321   dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp
322     : &info->dr_changed_bp;
323   dr_changed = *dr_changed_ptr;
324 
325   gdb_assert (idx >= 0
326 	      && (idx <= (is_watchpoint ? aarch64_num_wp_regs
327 			  : aarch64_num_bp_regs)));
328 
329   /* The actual update is done later just before resuming the lwp,
330      we just mark that one register pair needs updating.  */
331   DR_MARK_N_CHANGED (dr_changed, idx);
332   *dr_changed_ptr = dr_changed;
333 
334   /* If the lwp isn't stopped, force it to momentarily pause, so
335      we can update its debug registers.  */
336   if (!lwp_is_stopped (lwp))
337     linux_stop_lwp (lwp);
338 
339   if (show_debug_regs)
340     {
341       debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
342 		    "dr_changed_wp=0x%s\n", tid,
343 		    phex (info->dr_changed_bp, 8),
344 		    phex (info->dr_changed_wp, 8));
345     }
346 
347   return 0;
348 }
349 
350 /* Notify each thread that their IDXth breakpoint/watchpoint register
351    pair needs to be updated.  The message will be recorded in each
352    thread's arch-specific data area, the actual updating will be done
353    when the thread is resumed.  */
354 
355 static void
356 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state,
357 				 int is_watchpoint, unsigned int idx)
358 {
359   struct aarch64_dr_update_callback_param param;
360   ptid_t pid_ptid = ptid_t (current_lwp_ptid ().pid ());
361 
362   param.is_watchpoint = is_watchpoint;
363   param.idx = idx;
364 
365   iterate_over_lwps (pid_ptid, debug_reg_change_callback, (void *) &param);
366 }
367 
368 /* Reconfigure STATE to be compatible with Linux kernels with the PR
369    external/20207 bug.  This is called when
370    KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false.  Note we
371    don't try to support combining watchpoints with matching (and thus
372    shared) masks, as it's too late when we get here.  On buggy
373    kernels, GDB will try to first setup the perfect matching ranges,
374    which will run out of registers before this function can merge
375    them.  It doesn't look like worth the effort to improve that, given
376    eventually buggy kernels will be phased out.  */
377 
378 static void
379 aarch64_downgrade_regs (struct aarch64_debug_reg_state *state)
380 {
381   for (int i = 0; i < aarch64_num_wp_regs; ++i)
382     if ((state->dr_ctrl_wp[i] & 1) != 0)
383       {
384 	gdb_assert (state->dr_ref_count_wp[i] != 0);
385 	uint8_t mask_orig = (state->dr_ctrl_wp[i] >> 5) & 0xff;
386 	gdb_assert (mask_orig != 0);
387 	static const uint8_t old_valid[] = { 0x01, 0x03, 0x0f, 0xff };
388 	uint8_t mask = 0;
389 	for (const uint8_t old_mask : old_valid)
390 	  if (mask_orig <= old_mask)
391 	    {
392 	      mask = old_mask;
393 	      break;
394 	    }
395 	gdb_assert (mask != 0);
396 
397 	/* No update needed for this watchpoint?  */
398 	if (mask == mask_orig)
399 	  continue;
400 	state->dr_ctrl_wp[i] |= mask << 5;
401 	state->dr_addr_wp[i]
402 	  = align_down (state->dr_addr_wp[i], AARCH64_HWP_ALIGNMENT);
403 
404 	/* Try to match duplicate entries.  */
405 	for (int j = 0; j < i; ++j)
406 	  if ((state->dr_ctrl_wp[j] & 1) != 0
407 	      && state->dr_addr_wp[j] == state->dr_addr_wp[i]
408 	      && state->dr_addr_orig_wp[j] == state->dr_addr_orig_wp[i]
409 	      && state->dr_ctrl_wp[j] == state->dr_ctrl_wp[i])
410 	    {
411 	      state->dr_ref_count_wp[j] += state->dr_ref_count_wp[i];
412 	      state->dr_ref_count_wp[i] = 0;
413 	      state->dr_addr_wp[i] = 0;
414 	      state->dr_addr_orig_wp[i] = 0;
415 	      state->dr_ctrl_wp[i] &= ~1;
416 	      break;
417 	    }
418 
419 	aarch64_notify_debug_reg_change (state, 1 /* is_watchpoint */, i);
420       }
421 }
422 
423 /* Record the insertion of one breakpoint/watchpoint, as represented
424    by ADDR and CTRL, in the process' arch-specific data area *STATE.  */
425 
426 static int
427 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state,
428 				   enum target_hw_bp_type type,
429 				   CORE_ADDR addr, int offset, int len,
430 				   CORE_ADDR addr_orig)
431 {
432   int i, idx, num_regs, is_watchpoint;
433   unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
434   CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
435 
436   /* Set up state pointers.  */
437   is_watchpoint = (type != hw_execute);
438   gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
439   if (is_watchpoint)
440     {
441       num_regs = aarch64_num_wp_regs;
442       dr_addr_p = state->dr_addr_wp;
443       dr_addr_orig_p = state->dr_addr_orig_wp;
444       dr_ctrl_p = state->dr_ctrl_wp;
445       dr_ref_count = state->dr_ref_count_wp;
446     }
447   else
448     {
449       num_regs = aarch64_num_bp_regs;
450       dr_addr_p = state->dr_addr_bp;
451       dr_addr_orig_p = nullptr;
452       dr_ctrl_p = state->dr_ctrl_bp;
453       dr_ref_count = state->dr_ref_count_bp;
454     }
455 
456   ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
457 
458   /* Find an existing or free register in our cache.  */
459   idx = -1;
460   for (i = 0; i < num_regs; ++i)
461     {
462       if ((dr_ctrl_p[i] & 1) == 0)
463 	{
464 	  gdb_assert (dr_ref_count[i] == 0);
465 	  idx = i;
466 	  /* no break; continue hunting for an exising one.  */
467 	}
468       else if (dr_addr_p[i] == addr
469 	       && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
470 	       && dr_ctrl_p[i] == ctrl)
471 	{
472 	  gdb_assert (dr_ref_count[i] != 0);
473 	  idx = i;
474 	  break;
475 	}
476     }
477 
478   /* No space.  */
479   if (idx == -1)
480     return -1;
481 
482   /* Update our cache.  */
483   if ((dr_ctrl_p[idx] & 1) == 0)
484     {
485       /* new entry */
486       dr_addr_p[idx] = addr;
487       if (dr_addr_orig_p != nullptr)
488 	dr_addr_orig_p[idx] = addr_orig;
489       dr_ctrl_p[idx] = ctrl;
490       dr_ref_count[idx] = 1;
491       /* Notify the change.  */
492       aarch64_notify_debug_reg_change (state, is_watchpoint, idx);
493     }
494   else
495     {
496       /* existing entry */
497       dr_ref_count[idx]++;
498     }
499 
500   return 0;
501 }
502 
503 /* Record the removal of one breakpoint/watchpoint, as represented by
504    ADDR and CTRL, in the process' arch-specific data area *STATE.  */
505 
506 static int
507 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state,
508 				   enum target_hw_bp_type type,
509 				   CORE_ADDR addr, int offset, int len,
510 				   CORE_ADDR addr_orig)
511 {
512   int i, num_regs, is_watchpoint;
513   unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
514   CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
515 
516   /* Set up state pointers.  */
517   is_watchpoint = (type != hw_execute);
518   if (is_watchpoint)
519     {
520       num_regs = aarch64_num_wp_regs;
521       dr_addr_p = state->dr_addr_wp;
522       dr_addr_orig_p = state->dr_addr_orig_wp;
523       dr_ctrl_p = state->dr_ctrl_wp;
524       dr_ref_count = state->dr_ref_count_wp;
525     }
526   else
527     {
528       num_regs = aarch64_num_bp_regs;
529       dr_addr_p = state->dr_addr_bp;
530       dr_addr_orig_p = nullptr;
531       dr_ctrl_p = state->dr_ctrl_bp;
532       dr_ref_count = state->dr_ref_count_bp;
533     }
534 
535   ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
536 
537   /* Find the entry that matches the ADDR and CTRL.  */
538   for (i = 0; i < num_regs; ++i)
539     if (dr_addr_p[i] == addr
540 	&& (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
541 	&& dr_ctrl_p[i] == ctrl)
542       {
543 	gdb_assert (dr_ref_count[i] != 0);
544 	break;
545       }
546 
547   /* Not found.  */
548   if (i == num_regs)
549     return -1;
550 
551   /* Clear our cache.  */
552   if (--dr_ref_count[i] == 0)
553     {
554       /* Clear the enable bit.  */
555       ctrl &= ~1;
556       dr_addr_p[i] = 0;
557       if (dr_addr_orig_p != nullptr)
558 	dr_addr_orig_p[i] = 0;
559       dr_ctrl_p[i] = ctrl;
560       /* Notify the change.  */
561       aarch64_notify_debug_reg_change (state, is_watchpoint, i);
562     }
563 
564   return 0;
565 }
566 
567 int
568 aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
569 			   int len, int is_insert,
570 			   struct aarch64_debug_reg_state *state)
571 {
572   if (is_insert)
573     {
574       /* The hardware breakpoint on AArch64 should always be 4-byte
575 	 aligned, but on AArch32, it can be 2-byte aligned.  Note that
576 	 we only check the alignment on inserting breakpoint because
577 	 aarch64_point_is_aligned needs the inferior_ptid inferior's
578 	 regcache to decide whether the inferior is 32-bit or 64-bit.
579 	 However when GDB follows the parent process and detach breakpoints
580 	 from child process, inferior_ptid is the child ptid, but the
581 	 child inferior doesn't exist in GDB's view yet.  */
582       if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len))
583 	return -1;
584 
585       return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, -1);
586     }
587   else
588     return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, -1);
589 }
590 
591 /* This is essentially the same as aarch64_handle_breakpoint, apart
592    from that it is an aligned watchpoint to be handled.  */
593 
594 static int
595 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
596 				   CORE_ADDR addr, int len, int is_insert,
597 				   struct aarch64_debug_reg_state *state)
598 {
599   if (is_insert)
600     return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, addr);
601   else
602     return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, addr);
603 }
604 
605 /* Insert/remove unaligned watchpoint by calling
606    aarch64_align_watchpoint repeatedly until the whole watched region,
607    as represented by ADDR and LEN, has been properly aligned and ready
608    to be written to one or more hardware watchpoint registers.
609    IS_INSERT indicates whether this is an insertion or a deletion.
610    Return 0 if succeed.  */
611 
612 static int
613 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
614 				     CORE_ADDR addr, int len, int is_insert,
615 				     struct aarch64_debug_reg_state *state)
616 {
617   CORE_ADDR addr_orig = addr;
618 
619   while (len > 0)
620     {
621       CORE_ADDR aligned_addr;
622       int aligned_offset, aligned_len, ret;
623       CORE_ADDR addr_orig_next = addr_orig;
624 
625       aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
626 				&aligned_len, &addr, &len, &addr_orig_next);
627 
628       if (is_insert)
629 	ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr,
630 						 aligned_offset,
631 						 aligned_len, addr_orig);
632       else
633 	ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr,
634 						 aligned_offset,
635 						 aligned_len, addr_orig);
636 
637       if (show_debug_regs)
638 	debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
639 		      "                             "
640 		      "aligned_addr: %s, aligned_len: %d\n"
641 		      "                                "
642 		      "addr_orig: %s\n"
643 		      "                                "
644 		      "next_addr: %s,    next_len: %d\n"
645 		      "                           "
646 		      "addr_orig_next: %s\n",
647 		      is_insert, core_addr_to_string_nz (aligned_addr),
648 		      aligned_len, core_addr_to_string_nz (addr_orig),
649 		      core_addr_to_string_nz (addr), len,
650 		      core_addr_to_string_nz (addr_orig_next));
651 
652       addr_orig = addr_orig_next;
653 
654       if (ret != 0)
655 	return ret;
656     }
657 
658   return 0;
659 }
660 
661 int
662 aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
663 			   int len, int is_insert,
664 			   struct aarch64_debug_reg_state *state)
665 {
666   if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len))
667     return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert,
668 					      state);
669   else
670     return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
671 						state);
672 }
673 
674 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
675    registers with data from *STATE.  */
676 
677 void
678 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state *state,
679 			      int tid, int watchpoint)
680 {
681   int i, count;
682   struct iovec iov;
683   struct user_hwdebug_state regs;
684   const CORE_ADDR *addr;
685   const unsigned int *ctrl;
686 
687   memset (&regs, 0, sizeof (regs));
688   iov.iov_base = &regs;
689   count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
690   addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
691   ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
692   if (count == 0)
693     return;
694   iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs)
695 		 + count * sizeof (regs.dbg_regs[0]));
696 
697   for (i = 0; i < count; i++)
698     {
699       regs.dbg_regs[i].addr = addr[i];
700       regs.dbg_regs[i].ctrl = ctrl[i];
701     }
702 
703   if (ptrace (PTRACE_SETREGSET, tid,
704 	      watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK,
705 	      (void *) &iov))
706     {
707       /* Handle Linux kernels with the PR external/20207 bug.  */
708       if (watchpoint && errno == EINVAL
709 	  && kernel_supports_any_contiguous_range)
710 	{
711 	  kernel_supports_any_contiguous_range = false;
712 	  aarch64_downgrade_regs (state);
713 	  aarch64_linux_set_debug_regs (state, tid, watchpoint);
714 	  return;
715 	}
716       error (_("Unexpected error setting hardware debug registers"));
717     }
718 }
719 
720 /* See nat/aarch64-linux-hw-point.h.  */
721 
722 bool
723 aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state *state,
724 					bool watchpoint)
725 {
726   int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
727   if (count == 0)
728     return false;
729 
730   const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
731   const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
732 
733   for (int i = 0; i < count; i++)
734     if (addr[i] != 0 || ctrl[i] != 0)
735       return true;
736 
737   return false;
738 }
739 
740 /* Print the values of the cached breakpoint/watchpoint registers.  */
741 
742 void
743 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
744 			      const char *func, CORE_ADDR addr,
745 			      int len, enum target_hw_bp_type type)
746 {
747   int i;
748 
749   debug_printf ("%s", func);
750   if (addr || len)
751     debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
752 		  (unsigned long) addr, len,
753 		  type == hw_write ? "hw-write-watchpoint"
754 		  : (type == hw_read ? "hw-read-watchpoint"
755 		     : (type == hw_access ? "hw-access-watchpoint"
756 			: (type == hw_execute ? "hw-breakpoint"
757 			   : "??unknown??"))));
758   debug_printf (":\n");
759 
760   debug_printf ("\tBREAKPOINTs:\n");
761   for (i = 0; i < aarch64_num_bp_regs; i++)
762     debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
763 		  i, core_addr_to_string_nz (state->dr_addr_bp[i]),
764 		  state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
765 
766   debug_printf ("\tWATCHPOINTs:\n");
767   for (i = 0; i < aarch64_num_wp_regs; i++)
768     debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
769 		  i, core_addr_to_string_nz (state->dr_addr_wp[i]),
770 		  core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
771 		  state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
772 }
773 
774 /* Get the hardware debug register capacity information from the
775    process represented by TID.  */
776 
777 void
778 aarch64_linux_get_debug_reg_capacity (int tid)
779 {
780   struct iovec iov;
781   struct user_hwdebug_state dreg_state;
782 
783   iov.iov_base = &dreg_state;
784   iov.iov_len = sizeof (dreg_state);
785 
786   /* Get hardware watchpoint register info.  */
787   if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0
788       && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
789 	  || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
790 	  || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
791     {
792       aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
793       if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM)
794 	{
795 	  warning (_("Unexpected number of hardware watchpoint registers"
796 		     " reported by ptrace, got %d, expected %d."),
797 		   aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM);
798 	  aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM;
799 	}
800     }
801   else
802     {
803       warning (_("Unable to determine the number of hardware watchpoints"
804 		 " available."));
805       aarch64_num_wp_regs = 0;
806     }
807 
808   /* Get hardware breakpoint register info.  */
809   if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0
810       && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
811 	  || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
812 	  || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
813     {
814       aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
815       if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM)
816 	{
817 	  warning (_("Unexpected number of hardware breakpoint registers"
818 		     " reported by ptrace, got %d, expected %d."),
819 		   aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM);
820 	  aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM;
821 	}
822     }
823   else
824     {
825       warning (_("Unable to determine the number of hardware breakpoints"
826 		 " available."));
827       aarch64_num_bp_regs = 0;
828     }
829 }
830 
831 /* Return true if we can watch a memory region that starts address
832    ADDR and whose length is LEN in bytes.  */
833 
834 int
835 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr, int len)
836 {
837   CORE_ADDR aligned_addr;
838 
839   /* Can not set watchpoints for zero or negative lengths.  */
840   if (len <= 0)
841     return 0;
842 
843   /* Must have hardware watchpoint debug register(s).  */
844   if (aarch64_num_wp_regs == 0)
845     return 0;
846 
847   /* We support unaligned watchpoint address and arbitrary length,
848      as long as the size of the whole watched area after alignment
849      doesn't exceed size of the total area that all watchpoint debug
850      registers can watch cooperatively.
851 
852      This is a very relaxed rule, but unfortunately there are
853      limitations, e.g. false-positive hits, due to limited support of
854      hardware debug registers in the kernel.  See comment above
855      aarch64_align_watchpoint for more information.  */
856 
857   aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
858   if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
859       < addr + len)
860     return 0;
861 
862   /* All tests passed so we are likely to be able to set the watchpoint.
863      The reason that it is 'likely' rather than 'must' is because
864      we don't check the current usage of the watchpoint registers, and
865      there may not be enough registers available for this watchpoint.
866      Ideally we should check the cached debug register state, however
867      the checking is costly.  */
868   return 1;
869 }
870