xref: /netbsd-src/external/gpl3/gdb/dist/gdbserver/linux-x86-low.cc (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2    for GDB.
3    Copyright (C) 2002-2023 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29 
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33 
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36    gdb_proc_service.h.  */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40 
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49 
50 #ifdef __x86_64__
51 static target_desc_up tdesc_amd64_linux_no_xml;
52 #endif
53 static target_desc_up tdesc_i386_linux_no_xml;
54 
55 
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58 
59 /* Backward compatibility for gdb without XML support.  */
60 
61 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65 
66 #ifdef __x86_64__
67 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72 
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76 
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80 
81 /* This definition comes from prctl.h, but some kernels may not have it.  */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL      30
84 #endif
85 
86 /* The following definitions come from prctl.h, but may be absent
87    for certain configurations.  */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94 
95 /* Linux target op definitions for the x86 architecture.
96    This is initialized assuming an amd64 target.
97    'low_arch_setup' will correct it for i386 or amd64 targets.  */
98 
99 class x86_target : public linux_process_target
100 {
101 public:
102 
103   const regs_info *get_regs_info () override;
104 
105   const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106 
107   bool supports_z_point_type (char z_type) override;
108 
109   void process_qsupported (gdb::array_view<const char * const> features) override;
110 
111   bool supports_tracepoints () override;
112 
113   bool supports_fast_tracepoints () override;
114 
115   int install_fast_tracepoint_jump_pad
116     (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117      CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118      CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119      unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120      CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121      char *err) override;
122 
123   int get_min_fast_tracepoint_insn_len () override;
124 
125   struct emit_ops *emit_ops () override;
126 
127   int get_ipa_tdesc_idx () override;
128 
129 protected:
130 
131   void low_arch_setup () override;
132 
133   bool low_cannot_fetch_register (int regno) override;
134 
135   bool low_cannot_store_register (int regno) override;
136 
137   bool low_supports_breakpoints () override;
138 
139   CORE_ADDR low_get_pc (regcache *regcache) override;
140 
141   void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142 
143   int low_decr_pc_after_break () override;
144 
145   bool low_breakpoint_at (CORE_ADDR pc) override;
146 
147   int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 			int size, raw_breakpoint *bp) override;
149 
150   int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 			int size, raw_breakpoint *bp) override;
152 
153   bool low_stopped_by_watchpoint () override;
154 
155   CORE_ADDR low_stopped_data_address () override;
156 
157   /* collect_ptrace_register/supply_ptrace_register are not needed in the
158      native i386 case (no registers smaller than an xfer unit), and are not
159      used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
160 
161   /* Need to fix up i386 siginfo if host is amd64.  */
162   bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 			  int direction) override;
164 
165   arch_process_info *low_new_process () override;
166 
167   void low_delete_process (arch_process_info *info) override;
168 
169   void low_new_thread (lwp_info *) override;
170 
171   void low_delete_thread (arch_lwp_info *) override;
172 
173   void low_new_fork (process_info *parent, process_info *child) override;
174 
175   void low_prepare_to_resume (lwp_info *lwp) override;
176 
177   int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178 
179   bool low_supports_range_stepping () override;
180 
181   bool low_supports_catch_syscall () override;
182 
183   void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184 
185 private:
186 
187   /* Update all the target description of all processes; a new GDB
188      connected, and it may or not support xml target descriptions.  */
189   void update_xmltarget ();
190 };
191 
192 /* The singleton target ops object.  */
193 
194 static x86_target the_x86_target;
195 
196 /* Per-process arch-specific data we want to keep.  */
197 
198 struct arch_process_info
199 {
200   struct x86_debug_reg_state debug_reg_state;
201 };
202 
203 #ifdef __x86_64__
204 
205 /* Mapping between the general-purpose registers in `struct user'
206    format and GDB's register array layout.
207    Note that the transfer layout uses 64-bit regs.  */
208 static /*const*/ int i386_regmap[] =
209 {
210   RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211   RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213   DS * 8, ES * 8, FS * 8, GS * 8
214 };
215 
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217 
218 /* So code below doesn't have to care, i386 or amd64.  */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221 
222 static const int x86_64_regmap[] =
223 {
224   RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225   RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226   R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227   R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229   DS * 8, ES * 8, FS * 8, GS * 8,
230   -1, -1, -1, -1, -1, -1, -1, -1,
231   -1, -1, -1, -1, -1, -1, -1, -1,
232   -1, -1, -1, -1, -1, -1, -1, -1,
233   -1,
234   -1, -1, -1, -1, -1, -1, -1, -1,
235   ORIG_RAX * 8,
236   21 * 8,  22 * 8,
237   -1, -1, -1, -1,			/* MPX registers BND0 ... BND3.  */
238   -1, -1,				/* MPX registers BNDCFGU, BNDSTATUS.  */
239   -1, -1, -1, -1, -1, -1, -1, -1,       /* xmm16 ... xmm31 (AVX512)  */
240   -1, -1, -1, -1, -1, -1, -1, -1,
241   -1, -1, -1, -1, -1, -1, -1, -1,       /* ymm16 ... ymm31 (AVX512)  */
242   -1, -1, -1, -1, -1, -1, -1, -1,
243   -1, -1, -1, -1, -1, -1, -1, -1,       /* k0 ... k7 (AVX512)  */
244   -1, -1, -1, -1, -1, -1, -1, -1,       /* zmm0 ... zmm31 (AVX512)  */
245   -1, -1, -1, -1, -1, -1, -1, -1,
246   -1, -1, -1, -1, -1, -1, -1, -1,
247   -1, -1, -1, -1, -1, -1, -1, -1,
248   -1					/* pkru  */
249 };
250 
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
253 
254 #else /* ! __x86_64__ */
255 
256 /* Mapping between the general-purpose registers in `struct user'
257    format and GDB's register array layout.  */
258 static /*const*/ int i386_regmap[] =
259 {
260   EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261   UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262   EIP * 4, EFL * 4, CS * 4, SS * 4,
263   DS * 4, ES * 4, FS * 4, GS * 4
264 };
265 
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267 
268 #define REGSIZE 4
269 
270 #endif
271 
272 #ifdef __x86_64__
273 
274 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc.  */
275 
276 static int
277 is_64bit_tdesc (thread_info *thread)
278 {
279   struct regcache *regcache = get_thread_regcache (thread, 0);
280 
281   return register_size (regcache->tdesc, 0) == 8;
282 }
283 
284 #endif
285 
286 
287 /* Called by libthread_db.  */
288 
289 ps_err_e
290 ps_get_thread_area (struct ps_prochandle *ph,
291 		    lwpid_t lwpid, int idx, void **base)
292 {
293 #ifdef __x86_64__
294   lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
295   gdb_assert (lwp != nullptr);
296   int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
297 
298   if (use_64bit)
299     {
300       switch (idx)
301 	{
302 	case FS:
303 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
304 	    return PS_OK;
305 	  break;
306 	case GS:
307 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
308 	    return PS_OK;
309 	  break;
310 	default:
311 	  return PS_BADADDR;
312 	}
313       return PS_ERR;
314     }
315 #endif
316 
317   {
318     unsigned int desc[4];
319 
320     if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
321 		(void *) (intptr_t) idx, (unsigned long) &desc) < 0)
322       return PS_ERR;
323 
324     /* Ensure we properly extend the value to 64-bits for x86_64.  */
325     *base = (void *) (uintptr_t) desc[1];
326     return PS_OK;
327   }
328 }
329 
330 /* Get the thread area address.  This is used to recognize which
331    thread is which when tracing with the in-process agent library.  We
332    don't read anything from the address, and treat it as opaque; it's
333    the address itself that we assume is unique per-thread.  */
334 
335 int
336 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
337 {
338   lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
339   gdb_assert (lwp != nullptr);
340 #ifdef __x86_64__
341   int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
342 
343   if (use_64bit)
344     {
345       void *base;
346       if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
347 	{
348 	  *addr = (CORE_ADDR) (uintptr_t) base;
349 	  return 0;
350 	}
351 
352       return -1;
353     }
354 #endif
355 
356   {
357     struct thread_info *thr = get_lwp_thread (lwp);
358     struct regcache *regcache = get_thread_regcache (thr, 1);
359     unsigned int desc[4];
360     ULONGEST gs = 0;
361     const int reg_thread_area = 3; /* bits to scale down register value.  */
362     int idx;
363 
364     collect_register_by_name (regcache, "gs", &gs);
365 
366     idx = gs >> reg_thread_area;
367 
368     if (ptrace (PTRACE_GET_THREAD_AREA,
369 		lwpid_of (thr),
370 		(void *) (long) idx, (unsigned long) &desc) < 0)
371       return -1;
372 
373     *addr = desc[1];
374     return 0;
375   }
376 }
377 
378 
379 
380 bool
381 x86_target::low_cannot_store_register (int regno)
382 {
383 #ifdef __x86_64__
384   if (is_64bit_tdesc (current_thread))
385     return false;
386 #endif
387 
388   return regno >= I386_NUM_REGS;
389 }
390 
391 bool
392 x86_target::low_cannot_fetch_register (int regno)
393 {
394 #ifdef __x86_64__
395   if (is_64bit_tdesc (current_thread))
396     return false;
397 #endif
398 
399   return regno >= I386_NUM_REGS;
400 }
401 
402 static void
403 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
404 {
405   collect_register (regcache, regno, buf);
406 
407 #ifdef __x86_64__
408   /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
409      space reserved in buf for the register is 8 bytes.  Make sure the entire
410      reserved space is initialized.  */
411 
412   gdb_assert (register_size (regcache->tdesc, regno) == 4);
413 
414   if (regno == RAX)
415     {
416       /* Sign extend EAX value to avoid potential syscall restart
417 	 problems.
418 
419 	 See amd64_linux_collect_native_gregset() in
420 	 gdb/amd64-linux-nat.c for a detailed explanation.  */
421       *(int64_t *) buf = *(int32_t *) buf;
422     }
423   else
424     {
425       /* Zero-extend.  */
426       *(uint64_t *) buf = *(uint32_t *) buf;
427     }
428 #endif
429 }
430 
431 static void
432 x86_fill_gregset (struct regcache *regcache, void *buf)
433 {
434   int i;
435 
436 #ifdef __x86_64__
437   if (register_size (regcache->tdesc, 0) == 8)
438     {
439       for (i = 0; i < X86_64_NUM_REGS; i++)
440 	if (x86_64_regmap[i] != -1)
441 	  collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
442 
443       return;
444     }
445 #endif
446 
447   for (i = 0; i < I386_NUM_REGS; i++)
448     collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
449 
450   /* Handle ORIG_EAX, which is not in i386_regmap.  */
451   collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
452 			 ((char *) buf) + ORIG_EAX * REGSIZE);
453 }
454 
455 static void
456 x86_store_gregset (struct regcache *regcache, const void *buf)
457 {
458   int i;
459 
460 #ifdef __x86_64__
461   if (register_size (regcache->tdesc, 0) == 8)
462     {
463       for (i = 0; i < X86_64_NUM_REGS; i++)
464 	if (x86_64_regmap[i] != -1)
465 	  supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
466 
467       return;
468     }
469 #endif
470 
471   for (i = 0; i < I386_NUM_REGS; i++)
472     supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
473 
474   supply_register_by_name (regcache, "orig_eax",
475 			   ((char *) buf) + ORIG_EAX * REGSIZE);
476 }
477 
478 static void
479 x86_fill_fpregset (struct regcache *regcache, void *buf)
480 {
481 #ifdef __x86_64__
482   i387_cache_to_fxsave (regcache, buf);
483 #else
484   i387_cache_to_fsave (regcache, buf);
485 #endif
486 }
487 
488 static void
489 x86_store_fpregset (struct regcache *regcache, const void *buf)
490 {
491 #ifdef __x86_64__
492   i387_fxsave_to_cache (regcache, buf);
493 #else
494   i387_fsave_to_cache (regcache, buf);
495 #endif
496 }
497 
498 #ifndef __x86_64__
499 
500 static void
501 x86_fill_fpxregset (struct regcache *regcache, void *buf)
502 {
503   i387_cache_to_fxsave (regcache, buf);
504 }
505 
506 static void
507 x86_store_fpxregset (struct regcache *regcache, const void *buf)
508 {
509   i387_fxsave_to_cache (regcache, buf);
510 }
511 
512 #endif
513 
514 static void
515 x86_fill_xstateregset (struct regcache *regcache, void *buf)
516 {
517   i387_cache_to_xsave (regcache, buf);
518 }
519 
520 static void
521 x86_store_xstateregset (struct regcache *regcache, const void *buf)
522 {
523   i387_xsave_to_cache (regcache, buf);
524 }
525 
526 /* ??? The non-biarch i386 case stores all the i387 regs twice.
527    Once in i387_.*fsave.* and once in i387_.*fxsave.*.
528    This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
529    doesn't work.  IWBN to avoid the duplication in the case where it
530    does work.  Maybe the arch_setup routine could check whether it works
531    and update the supported regsets accordingly.  */
532 
533 static struct regset_info x86_regsets[] =
534 {
535 #ifdef HAVE_PTRACE_GETREGS
536   { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
537     GENERAL_REGS,
538     x86_fill_gregset, x86_store_gregset },
539   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
540     EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
541 # ifndef __x86_64__
542 #  ifdef HAVE_PTRACE_GETFPXREGS
543   { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
544     EXTENDED_REGS,
545     x86_fill_fpxregset, x86_store_fpxregset },
546 #  endif
547 # endif
548   { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
549     FP_REGS,
550     x86_fill_fpregset, x86_store_fpregset },
551 #endif /* HAVE_PTRACE_GETREGS */
552   NULL_REGSET
553 };
554 
555 bool
556 x86_target::low_supports_breakpoints ()
557 {
558   return true;
559 }
560 
561 CORE_ADDR
562 x86_target::low_get_pc (regcache *regcache)
563 {
564   int use_64bit = register_size (regcache->tdesc, 0) == 8;
565 
566   if (use_64bit)
567     {
568       uint64_t pc;
569 
570       collect_register_by_name (regcache, "rip", &pc);
571       return (CORE_ADDR) pc;
572     }
573   else
574     {
575       uint32_t pc;
576 
577       collect_register_by_name (regcache, "eip", &pc);
578       return (CORE_ADDR) pc;
579     }
580 }
581 
582 void
583 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
584 {
585   int use_64bit = register_size (regcache->tdesc, 0) == 8;
586 
587   if (use_64bit)
588     {
589       uint64_t newpc = pc;
590 
591       supply_register_by_name (regcache, "rip", &newpc);
592     }
593   else
594     {
595       uint32_t newpc = pc;
596 
597       supply_register_by_name (regcache, "eip", &newpc);
598     }
599 }
600 
601 int
602 x86_target::low_decr_pc_after_break ()
603 {
604   return 1;
605 }
606 
607 
608 static const gdb_byte x86_breakpoint[] = { 0xCC };
609 #define x86_breakpoint_len 1
610 
611 bool
612 x86_target::low_breakpoint_at (CORE_ADDR pc)
613 {
614   unsigned char c;
615 
616   read_memory (pc, &c, 1);
617   if (c == 0xCC)
618     return true;
619 
620   return false;
621 }
622 
623 /* Low-level function vector.  */
624 struct x86_dr_low_type x86_dr_low =
625   {
626     x86_linux_dr_set_control,
627     x86_linux_dr_set_addr,
628     x86_linux_dr_get_addr,
629     x86_linux_dr_get_status,
630     x86_linux_dr_get_control,
631     sizeof (void *),
632   };
633 
634 /* Breakpoint/Watchpoint support.  */
635 
636 bool
637 x86_target::supports_z_point_type (char z_type)
638 {
639   switch (z_type)
640     {
641     case Z_PACKET_SW_BP:
642     case Z_PACKET_HW_BP:
643     case Z_PACKET_WRITE_WP:
644     case Z_PACKET_ACCESS_WP:
645       return true;
646     default:
647       return false;
648     }
649 }
650 
651 int
652 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
653 			      int size, raw_breakpoint *bp)
654 {
655   struct process_info *proc = current_process ();
656 
657   switch (type)
658     {
659     case raw_bkpt_type_hw:
660     case raw_bkpt_type_write_wp:
661     case raw_bkpt_type_access_wp:
662       {
663 	enum target_hw_bp_type hw_type
664 	  = raw_bkpt_type_to_target_hw_bp_type (type);
665 	struct x86_debug_reg_state *state
666 	  = &proc->priv->arch_private->debug_reg_state;
667 
668 	return x86_dr_insert_watchpoint (state, hw_type, addr, size);
669       }
670 
671     default:
672       /* Unsupported.  */
673       return 1;
674     }
675 }
676 
677 int
678 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
679 			      int size, raw_breakpoint *bp)
680 {
681   struct process_info *proc = current_process ();
682 
683   switch (type)
684     {
685     case raw_bkpt_type_hw:
686     case raw_bkpt_type_write_wp:
687     case raw_bkpt_type_access_wp:
688       {
689 	enum target_hw_bp_type hw_type
690 	  = raw_bkpt_type_to_target_hw_bp_type (type);
691 	struct x86_debug_reg_state *state
692 	  = &proc->priv->arch_private->debug_reg_state;
693 
694 	return x86_dr_remove_watchpoint (state, hw_type, addr, size);
695       }
696     default:
697       /* Unsupported.  */
698       return 1;
699     }
700 }
701 
702 bool
703 x86_target::low_stopped_by_watchpoint ()
704 {
705   struct process_info *proc = current_process ();
706   return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
707 }
708 
709 CORE_ADDR
710 x86_target::low_stopped_data_address ()
711 {
712   struct process_info *proc = current_process ();
713   CORE_ADDR addr;
714   if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
715 				   &addr))
716     return addr;
717   return 0;
718 }
719 
720 /* Called when a new process is created.  */
721 
722 arch_process_info *
723 x86_target::low_new_process ()
724 {
725   struct arch_process_info *info = XCNEW (struct arch_process_info);
726 
727   x86_low_init_dregs (&info->debug_reg_state);
728 
729   return info;
730 }
731 
732 /* Called when a process is being deleted.  */
733 
734 void
735 x86_target::low_delete_process (arch_process_info *info)
736 {
737   xfree (info);
738 }
739 
740 void
741 x86_target::low_new_thread (lwp_info *lwp)
742 {
743   /* This comes from nat/.  */
744   x86_linux_new_thread (lwp);
745 }
746 
747 void
748 x86_target::low_delete_thread (arch_lwp_info *alwp)
749 {
750   /* This comes from nat/.  */
751   x86_linux_delete_thread (alwp);
752 }
753 
754 /* Target routine for new_fork.  */
755 
756 void
757 x86_target::low_new_fork (process_info *parent, process_info *child)
758 {
759   /* These are allocated by linux_add_process.  */
760   gdb_assert (parent->priv != NULL
761 	      && parent->priv->arch_private != NULL);
762   gdb_assert (child->priv != NULL
763 	      && child->priv->arch_private != NULL);
764 
765   /* Linux kernel before 2.6.33 commit
766      72f674d203cd230426437cdcf7dd6f681dad8b0d
767      will inherit hardware debug registers from parent
768      on fork/vfork/clone.  Newer Linux kernels create such tasks with
769      zeroed debug registers.
770 
771      GDB core assumes the child inherits the watchpoints/hw
772      breakpoints of the parent, and will remove them all from the
773      forked off process.  Copy the debug registers mirrors into the
774      new process so that all breakpoints and watchpoints can be
775      removed together.  The debug registers mirror will become zeroed
776      in the end before detaching the forked off process, thus making
777      this compatible with older Linux kernels too.  */
778 
779   *child->priv->arch_private = *parent->priv->arch_private;
780 }
781 
782 void
783 x86_target::low_prepare_to_resume (lwp_info *lwp)
784 {
785   /* This comes from nat/.  */
786   x86_linux_prepare_to_resume (lwp);
787 }
788 
789 /* See nat/x86-dregs.h.  */
790 
791 struct x86_debug_reg_state *
792 x86_debug_reg_state (pid_t pid)
793 {
794   struct process_info *proc = find_process_pid (pid);
795 
796   return &proc->priv->arch_private->debug_reg_state;
797 }
798 
799 /* When GDBSERVER is built as a 64-bit application on linux, the
800    PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
801    debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
802    as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
803    conversion in-place ourselves.  */
804 
805 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
806    layout of the inferiors' architecture.  Returns true if any
807    conversion was done; false otherwise.  If DIRECTION is 1, then copy
808    from INF to PTRACE.  If DIRECTION is 0, copy from PTRACE to
809    INF.  */
810 
811 bool
812 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
813 {
814 #ifdef __x86_64__
815   unsigned int machine;
816   int tid = lwpid_of (current_thread);
817   int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
818 
819   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
820   if (!is_64bit_tdesc (current_thread))
821       return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
822 					       FIXUP_32);
823   /* No fixup for native x32 GDB.  */
824   else if (!is_elf64 && sizeof (void *) == 8)
825     return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
826 					     FIXUP_X32);
827 #endif
828 
829   return false;
830 }
831 
832 static int use_xml;
833 
834 /* Format of XSAVE extended state is:
835 	struct
836 	{
837 	  fxsave_bytes[0..463]
838 	  sw_usable_bytes[464..511]
839 	  xstate_hdr_bytes[512..575]
840 	  avx_bytes[576..831]
841 	  future_state etc
842 	};
843 
844   Same memory layout will be used for the coredump NT_X86_XSTATE
845   representing the XSAVE extended state registers.
846 
847   The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
848   extended state mask, which is the same as the extended control register
849   0 (the XFEATURE_ENABLED_MASK register), XCR0.  We can use this mask
850   together with the mask saved in the xstate_hdr_bytes to determine what
851   states the processor/OS supports and what state, used or initialized,
852   the process/thread is in.  */
853 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
854 
855 /* Does the current host support the GETFPXREGS request?  The header
856    file may or may not define it, and even if it is defined, the
857    kernel will return EIO if it's running on a pre-SSE processor.  */
858 int have_ptrace_getfpxregs =
859 #ifdef HAVE_PTRACE_GETFPXREGS
860   -1
861 #else
862   0
863 #endif
864 ;
865 
866 /* Get Linux/x86 target description from running target.  */
867 
868 static const struct target_desc *
869 x86_linux_read_description (void)
870 {
871   unsigned int machine;
872   int is_elf64;
873   int xcr0_features;
874   int tid;
875   static uint64_t xcr0;
876   struct regset_info *regset;
877 
878   tid = lwpid_of (current_thread);
879 
880   is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
881 
882   if (sizeof (void *) == 4)
883     {
884       if (is_elf64 > 0)
885        error (_("Can't debug 64-bit process with 32-bit GDBserver"));
886 #ifndef __x86_64__
887       else if (machine == EM_X86_64)
888        error (_("Can't debug x86-64 process with 32-bit GDBserver"));
889 #endif
890     }
891 
892 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
893   if (machine == EM_386 && have_ptrace_getfpxregs == -1)
894     {
895       elf_fpxregset_t fpxregs;
896 
897       if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
898 	{
899 	  have_ptrace_getfpxregs = 0;
900 	  have_ptrace_getregset = 0;
901 	  return i386_linux_read_description (X86_XSTATE_X87);
902 	}
903       else
904 	have_ptrace_getfpxregs = 1;
905     }
906 #endif
907 
908   if (!use_xml)
909     {
910       x86_xcr0 = X86_XSTATE_SSE_MASK;
911 
912       /* Don't use XML.  */
913 #ifdef __x86_64__
914       if (machine == EM_X86_64)
915 	return tdesc_amd64_linux_no_xml.get ();
916       else
917 #endif
918 	return tdesc_i386_linux_no_xml.get ();
919     }
920 
921   if (have_ptrace_getregset == -1)
922     {
923       uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
924       struct iovec iov;
925 
926       iov.iov_base = xstateregs;
927       iov.iov_len = sizeof (xstateregs);
928 
929       /* Check if PTRACE_GETREGSET works.  */
930       if (ptrace (PTRACE_GETREGSET, tid,
931 		  (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
932 	have_ptrace_getregset = 0;
933       else
934 	{
935 	  have_ptrace_getregset = 1;
936 
937 	  /* Get XCR0 from XSAVE extended state.  */
938 	  xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
939 			     / sizeof (uint64_t))];
940 
941 	  /* Use PTRACE_GETREGSET if it is available.  */
942 	  for (regset = x86_regsets;
943 	       regset->fill_function != NULL; regset++)
944 	    if (regset->get_request == PTRACE_GETREGSET)
945 	      regset->size = X86_XSTATE_SIZE (xcr0);
946 	    else if (regset->type != GENERAL_REGS)
947 	      regset->size = 0;
948 	}
949     }
950 
951   /* Check the native XCR0 only if PTRACE_GETREGSET is available.  */
952   xcr0_features = (have_ptrace_getregset
953 		   && (xcr0 & X86_XSTATE_ALL_MASK));
954 
955   if (xcr0_features)
956     x86_xcr0 = xcr0;
957 
958   if (machine == EM_X86_64)
959     {
960 #ifdef __x86_64__
961       const target_desc *tdesc = NULL;
962 
963       if (xcr0_features)
964 	{
965 	  tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
966 						!is_elf64);
967 	}
968 
969       if (tdesc == NULL)
970 	tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
971       return tdesc;
972 #endif
973     }
974   else
975     {
976       const target_desc *tdesc = NULL;
977 
978       if (xcr0_features)
979 	  tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
980 
981       if (tdesc == NULL)
982 	tdesc = i386_linux_read_description (X86_XSTATE_SSE);
983 
984       return tdesc;
985     }
986 
987   gdb_assert_not_reached ("failed to return tdesc");
988 }
989 
990 /* Update all the target description of all processes; a new GDB
991    connected, and it may or not support xml target descriptions.  */
992 
993 void
994 x86_target::update_xmltarget ()
995 {
996   scoped_restore_current_thread restore_thread;
997 
998   /* Before changing the register cache's internal layout, flush the
999      contents of the current valid caches back to the threads, and
1000      release the current regcache objects.  */
1001   regcache_release ();
1002 
1003   for_each_process ([this] (process_info *proc) {
1004     int pid = proc->pid;
1005 
1006     /* Look up any thread of this process.  */
1007     switch_to_thread (find_any_thread_of_pid (pid));
1008 
1009     low_arch_setup ();
1010   });
1011 }
1012 
1013 /* Process qSupported query, "xmlRegisters=".  Update the buffer size for
1014    PTRACE_GETREGSET.  */
1015 
1016 void
1017 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1018 {
1019   /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
1020      with "i386" in qSupported query, it supports x86 XML target
1021      descriptions.  */
1022   use_xml = 0;
1023 
1024   for (const char *feature : features)
1025     {
1026       if (startswith (feature, "xmlRegisters="))
1027 	{
1028 	  char *copy = xstrdup (feature + 13);
1029 
1030 	  char *saveptr;
1031 	  for (char *p = strtok_r (copy, ",", &saveptr);
1032 	       p != NULL;
1033 	       p = strtok_r (NULL, ",", &saveptr))
1034 	    {
1035 	      if (strcmp (p, "i386") == 0)
1036 		{
1037 		  use_xml = 1;
1038 		  break;
1039 		}
1040 	    }
1041 
1042 	  free (copy);
1043 	}
1044     }
1045 
1046   update_xmltarget ();
1047 }
1048 
1049 /* Common for x86/x86-64.  */
1050 
1051 static struct regsets_info x86_regsets_info =
1052   {
1053     x86_regsets, /* regsets */
1054     0, /* num_regsets */
1055     NULL, /* disabled_regsets */
1056   };
1057 
1058 #ifdef __x86_64__
1059 static struct regs_info amd64_linux_regs_info =
1060   {
1061     NULL, /* regset_bitmap */
1062     NULL, /* usrregs_info */
1063     &x86_regsets_info
1064   };
1065 #endif
1066 static struct usrregs_info i386_linux_usrregs_info =
1067   {
1068     I386_NUM_REGS,
1069     i386_regmap,
1070   };
1071 
1072 static struct regs_info i386_linux_regs_info =
1073   {
1074     NULL, /* regset_bitmap */
1075     &i386_linux_usrregs_info,
1076     &x86_regsets_info
1077   };
1078 
1079 const regs_info *
1080 x86_target::get_regs_info ()
1081 {
1082 #ifdef __x86_64__
1083   if (is_64bit_tdesc (current_thread))
1084     return &amd64_linux_regs_info;
1085   else
1086 #endif
1087     return &i386_linux_regs_info;
1088 }
1089 
1090 /* Initialize the target description for the architecture of the
1091    inferior.  */
1092 
1093 void
1094 x86_target::low_arch_setup ()
1095 {
1096   current_process ()->tdesc = x86_linux_read_description ();
1097 }
1098 
1099 bool
1100 x86_target::low_supports_catch_syscall ()
1101 {
1102   return true;
1103 }
1104 
1105 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1106    code.  This should only be called if LWP got a SYSCALL_SIGTRAP.  */
1107 
1108 void
1109 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1110 {
1111   int use_64bit = register_size (regcache->tdesc, 0) == 8;
1112 
1113   if (use_64bit)
1114     {
1115       long l_sysno;
1116 
1117       collect_register_by_name (regcache, "orig_rax", &l_sysno);
1118       *sysno = (int) l_sysno;
1119     }
1120   else
1121     collect_register_by_name (regcache, "orig_eax", sysno);
1122 }
1123 
1124 bool
1125 x86_target::supports_tracepoints ()
1126 {
1127   return true;
1128 }
1129 
1130 static void
1131 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1132 {
1133   target_write_memory (*to, buf, len);
1134   *to += len;
1135 }
1136 
1137 static int
1138 push_opcode (unsigned char *buf, const char *op)
1139 {
1140   unsigned char *buf_org = buf;
1141 
1142   while (1)
1143     {
1144       char *endptr;
1145       unsigned long ul = strtoul (op, &endptr, 16);
1146 
1147       if (endptr == op)
1148 	break;
1149 
1150       *buf++ = ul;
1151       op = endptr;
1152     }
1153 
1154   return buf - buf_org;
1155 }
1156 
1157 #ifdef __x86_64__
1158 
1159 /* Build a jump pad that saves registers and calls a collection
1160    function.  Writes a jump instruction to the jump pad to
1161    JJUMPAD_INSN.  The caller is responsible to write it in at the
1162    tracepoint address.  */
1163 
1164 static int
1165 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1166 					CORE_ADDR collector,
1167 					CORE_ADDR lockaddr,
1168 					ULONGEST orig_size,
1169 					CORE_ADDR *jump_entry,
1170 					CORE_ADDR *trampoline,
1171 					ULONGEST *trampoline_size,
1172 					unsigned char *jjump_pad_insn,
1173 					ULONGEST *jjump_pad_insn_size,
1174 					CORE_ADDR *adjusted_insn_addr,
1175 					CORE_ADDR *adjusted_insn_addr_end,
1176 					char *err)
1177 {
1178   unsigned char buf[40];
1179   int i, offset;
1180   int64_t loffset;
1181 
1182   CORE_ADDR buildaddr = *jump_entry;
1183 
1184   /* Build the jump pad.  */
1185 
1186   /* First, do tracepoint data collection.  Save registers.  */
1187   i = 0;
1188   /* Need to ensure stack pointer saved first.  */
1189   buf[i++] = 0x54; /* push %rsp */
1190   buf[i++] = 0x55; /* push %rbp */
1191   buf[i++] = 0x57; /* push %rdi */
1192   buf[i++] = 0x56; /* push %rsi */
1193   buf[i++] = 0x52; /* push %rdx */
1194   buf[i++] = 0x51; /* push %rcx */
1195   buf[i++] = 0x53; /* push %rbx */
1196   buf[i++] = 0x50; /* push %rax */
1197   buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1198   buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1199   buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1200   buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1201   buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1202   buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1203   buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1204   buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1205   buf[i++] = 0x9c; /* pushfq */
1206   buf[i++] = 0x48; /* movabs <addr>,%rdi */
1207   buf[i++] = 0xbf;
1208   memcpy (buf + i, &tpaddr, 8);
1209   i += 8;
1210   buf[i++] = 0x57; /* push %rdi */
1211   append_insns (&buildaddr, i, buf);
1212 
1213   /* Stack space for the collecting_t object.  */
1214   i = 0;
1215   i += push_opcode (&buf[i], "48 83 ec 18");	/* sub $0x18,%rsp */
1216   i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
1217   memcpy (buf + i, &tpoint, 8);
1218   i += 8;
1219   i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
1220   i += push_opcode (&buf[i],
1221 		    "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1222   i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1223   append_insns (&buildaddr, i, buf);
1224 
1225   /* spin-lock.  */
1226   i = 0;
1227   i += push_opcode (&buf[i], "48 be");		/* movl <lockaddr>,%rsi */
1228   memcpy (&buf[i], (void *) &lockaddr, 8);
1229   i += 8;
1230   i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
1231   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1232   i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1233   i += push_opcode (&buf[i], "48 85 c0");	/* test %rax,%rax */
1234   i += push_opcode (&buf[i], "75 f4");		/* jne <again> */
1235   append_insns (&buildaddr, i, buf);
1236 
1237   /* Set up the gdb_collect call.  */
1238   /* At this point, (stack pointer + 0x18) is the base of our saved
1239      register block.  */
1240 
1241   i = 0;
1242   i += push_opcode (&buf[i], "48 89 e6");	/* mov %rsp,%rsi */
1243   i += push_opcode (&buf[i], "48 83 c6 18");	/* add $0x18,%rsi */
1244 
1245   /* tpoint address may be 64-bit wide.  */
1246   i += push_opcode (&buf[i], "48 bf");		/* movl <addr>,%rdi */
1247   memcpy (buf + i, &tpoint, 8);
1248   i += 8;
1249   append_insns (&buildaddr, i, buf);
1250 
1251   /* The collector function being in the shared library, may be
1252      >31-bits away off the jump pad.  */
1253   i = 0;
1254   i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
1255   memcpy (buf + i, &collector, 8);
1256   i += 8;
1257   i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
1258   append_insns (&buildaddr, i, buf);
1259 
1260   /* Clear the spin-lock.  */
1261   i = 0;
1262   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1263   i += push_opcode (&buf[i], "48 a3");		/* mov %rax, lockaddr */
1264   memcpy (buf + i, &lockaddr, 8);
1265   i += 8;
1266   append_insns (&buildaddr, i, buf);
1267 
1268   /* Remove stack that had been used for the collect_t object.  */
1269   i = 0;
1270   i += push_opcode (&buf[i], "48 83 c4 18");	/* add $0x18,%rsp */
1271   append_insns (&buildaddr, i, buf);
1272 
1273   /* Restore register state.  */
1274   i = 0;
1275   buf[i++] = 0x48; /* add $0x8,%rsp */
1276   buf[i++] = 0x83;
1277   buf[i++] = 0xc4;
1278   buf[i++] = 0x08;
1279   buf[i++] = 0x9d; /* popfq */
1280   buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1281   buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1282   buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1283   buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1284   buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1285   buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1286   buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1287   buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1288   buf[i++] = 0x58; /* pop %rax */
1289   buf[i++] = 0x5b; /* pop %rbx */
1290   buf[i++] = 0x59; /* pop %rcx */
1291   buf[i++] = 0x5a; /* pop %rdx */
1292   buf[i++] = 0x5e; /* pop %rsi */
1293   buf[i++] = 0x5f; /* pop %rdi */
1294   buf[i++] = 0x5d; /* pop %rbp */
1295   buf[i++] = 0x5c; /* pop %rsp */
1296   append_insns (&buildaddr, i, buf);
1297 
1298   /* Now, adjust the original instruction to execute in the jump
1299      pad.  */
1300   *adjusted_insn_addr = buildaddr;
1301   relocate_instruction (&buildaddr, tpaddr);
1302   *adjusted_insn_addr_end = buildaddr;
1303 
1304   /* Finally, write a jump back to the program.  */
1305 
1306   loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1307   if (loffset > INT_MAX || loffset < INT_MIN)
1308     {
1309       sprintf (err,
1310 	       "E.Jump back from jump pad too far from tracepoint "
1311 	       "(offset 0x%" PRIx64 " > int32).", loffset);
1312       return 1;
1313     }
1314 
1315   offset = (int) loffset;
1316   memcpy (buf, jump_insn, sizeof (jump_insn));
1317   memcpy (buf + 1, &offset, 4);
1318   append_insns (&buildaddr, sizeof (jump_insn), buf);
1319 
1320   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1321      is always done last (by our caller actually), so that we can
1322      install fast tracepoints with threads running.  This relies on
1323      the agent's atomic write support.  */
1324   loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325   if (loffset > INT_MAX || loffset < INT_MIN)
1326     {
1327       sprintf (err,
1328 	       "E.Jump pad too far from tracepoint "
1329 	       "(offset 0x%" PRIx64 " > int32).", loffset);
1330       return 1;
1331     }
1332 
1333   offset = (int) loffset;
1334 
1335   memcpy (buf, jump_insn, sizeof (jump_insn));
1336   memcpy (buf + 1, &offset, 4);
1337   memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1338   *jjump_pad_insn_size = sizeof (jump_insn);
1339 
1340   /* Return the end address of our pad.  */
1341   *jump_entry = buildaddr;
1342 
1343   return 0;
1344 }
1345 
1346 #endif /* __x86_64__ */
1347 
1348 /* Build a jump pad that saves registers and calls a collection
1349    function.  Writes a jump instruction to the jump pad to
1350    JJUMPAD_INSN.  The caller is responsible to write it in at the
1351    tracepoint address.  */
1352 
1353 static int
1354 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1355 				       CORE_ADDR collector,
1356 				       CORE_ADDR lockaddr,
1357 				       ULONGEST orig_size,
1358 				       CORE_ADDR *jump_entry,
1359 				       CORE_ADDR *trampoline,
1360 				       ULONGEST *trampoline_size,
1361 				       unsigned char *jjump_pad_insn,
1362 				       ULONGEST *jjump_pad_insn_size,
1363 				       CORE_ADDR *adjusted_insn_addr,
1364 				       CORE_ADDR *adjusted_insn_addr_end,
1365 				       char *err)
1366 {
1367   unsigned char buf[0x100];
1368   int i, offset;
1369   CORE_ADDR buildaddr = *jump_entry;
1370 
1371   /* Build the jump pad.  */
1372 
1373   /* First, do tracepoint data collection.  Save registers.  */
1374   i = 0;
1375   buf[i++] = 0x60; /* pushad */
1376   buf[i++] = 0x68; /* push tpaddr aka $pc */
1377   *((int *)(buf + i)) = (int) tpaddr;
1378   i += 4;
1379   buf[i++] = 0x9c; /* pushf */
1380   buf[i++] = 0x1e; /* push %ds */
1381   buf[i++] = 0x06; /* push %es */
1382   buf[i++] = 0x0f; /* push %fs */
1383   buf[i++] = 0xa0;
1384   buf[i++] = 0x0f; /* push %gs */
1385   buf[i++] = 0xa8;
1386   buf[i++] = 0x16; /* push %ss */
1387   buf[i++] = 0x0e; /* push %cs */
1388   append_insns (&buildaddr, i, buf);
1389 
1390   /* Stack space for the collecting_t object.  */
1391   i = 0;
1392   i += push_opcode (&buf[i], "83 ec 08");	/* sub    $0x8,%esp */
1393 
1394   /* Build the object.  */
1395   i += push_opcode (&buf[i], "b8");		/* mov    <tpoint>,%eax */
1396   memcpy (buf + i, &tpoint, 4);
1397   i += 4;
1398   i += push_opcode (&buf[i], "89 04 24");	   /* mov %eax,(%esp) */
1399 
1400   i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401   i += push_opcode (&buf[i], "89 44 24 04");	   /* mov %eax,0x4(%esp) */
1402   append_insns (&buildaddr, i, buf);
1403 
1404   /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
1405      If we cared for it, this could be using xchg alternatively.  */
1406 
1407   i = 0;
1408   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1409   i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
1410 						   %esp,<lockaddr> */
1411   memcpy (&buf[i], (void *) &lockaddr, 4);
1412   i += 4;
1413   i += push_opcode (&buf[i], "85 c0");		/* test %eax,%eax */
1414   i += push_opcode (&buf[i], "75 f2");		/* jne <again> */
1415   append_insns (&buildaddr, i, buf);
1416 
1417 
1418   /* Set up arguments to the gdb_collect call.  */
1419   i = 0;
1420   i += push_opcode (&buf[i], "89 e0");		/* mov %esp,%eax */
1421   i += push_opcode (&buf[i], "83 c0 08");	/* add $0x08,%eax */
1422   i += push_opcode (&buf[i], "89 44 24 fc");	/* mov %eax,-0x4(%esp) */
1423   append_insns (&buildaddr, i, buf);
1424 
1425   i = 0;
1426   i += push_opcode (&buf[i], "83 ec 08");	/* sub $0x8,%esp */
1427   append_insns (&buildaddr, i, buf);
1428 
1429   i = 0;
1430   i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
1431   memcpy (&buf[i], (void *) &tpoint, 4);
1432   i += 4;
1433   append_insns (&buildaddr, i, buf);
1434 
1435   buf[0] = 0xe8; /* call <reladdr> */
1436   offset = collector - (buildaddr + sizeof (jump_insn));
1437   memcpy (buf + 1, &offset, 4);
1438   append_insns (&buildaddr, 5, buf);
1439   /* Clean up after the call.  */
1440   buf[0] = 0x83; /* add $0x8,%esp */
1441   buf[1] = 0xc4;
1442   buf[2] = 0x08;
1443   append_insns (&buildaddr, 3, buf);
1444 
1445 
1446   /* Clear the spin-lock.  This would need the LOCK prefix on older
1447      broken archs.  */
1448   i = 0;
1449   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1450   i += push_opcode (&buf[i], "a3");		/* mov %eax, lockaddr */
1451   memcpy (buf + i, &lockaddr, 4);
1452   i += 4;
1453   append_insns (&buildaddr, i, buf);
1454 
1455 
1456   /* Remove stack that had been used for the collect_t object.  */
1457   i = 0;
1458   i += push_opcode (&buf[i], "83 c4 08");	/* add $0x08,%esp */
1459   append_insns (&buildaddr, i, buf);
1460 
1461   i = 0;
1462   buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1463   buf[i++] = 0xc4;
1464   buf[i++] = 0x04;
1465   buf[i++] = 0x17; /* pop %ss */
1466   buf[i++] = 0x0f; /* pop %gs */
1467   buf[i++] = 0xa9;
1468   buf[i++] = 0x0f; /* pop %fs */
1469   buf[i++] = 0xa1;
1470   buf[i++] = 0x07; /* pop %es */
1471   buf[i++] = 0x1f; /* pop %ds */
1472   buf[i++] = 0x9d; /* popf */
1473   buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1474   buf[i++] = 0xc4;
1475   buf[i++] = 0x04;
1476   buf[i++] = 0x61; /* popad */
1477   append_insns (&buildaddr, i, buf);
1478 
1479   /* Now, adjust the original instruction to execute in the jump
1480      pad.  */
1481   *adjusted_insn_addr = buildaddr;
1482   relocate_instruction (&buildaddr, tpaddr);
1483   *adjusted_insn_addr_end = buildaddr;
1484 
1485   /* Write the jump back to the program.  */
1486   offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1487   memcpy (buf, jump_insn, sizeof (jump_insn));
1488   memcpy (buf + 1, &offset, 4);
1489   append_insns (&buildaddr, sizeof (jump_insn), buf);
1490 
1491   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1492      is always done last (by our caller actually), so that we can
1493      install fast tracepoints with threads running.  This relies on
1494      the agent's atomic write support.  */
1495   if (orig_size == 4)
1496     {
1497       /* Create a trampoline.  */
1498       *trampoline_size = sizeof (jump_insn);
1499       if (!claim_trampoline_space (*trampoline_size, trampoline))
1500 	{
1501 	  /* No trampoline space available.  */
1502 	  strcpy (err,
1503 		  "E.Cannot allocate trampoline space needed for fast "
1504 		  "tracepoints on 4-byte instructions.");
1505 	  return 1;
1506 	}
1507 
1508       offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1509       memcpy (buf, jump_insn, sizeof (jump_insn));
1510       memcpy (buf + 1, &offset, 4);
1511       target_write_memory (*trampoline, buf, sizeof (jump_insn));
1512 
1513       /* Use a 16-bit relative jump instruction to jump to the trampoline.  */
1514       offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1515       memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1516       memcpy (buf + 2, &offset, 2);
1517       memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1518       *jjump_pad_insn_size = sizeof (small_jump_insn);
1519     }
1520   else
1521     {
1522       /* Else use a 32-bit relative jump instruction.  */
1523       offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1524       memcpy (buf, jump_insn, sizeof (jump_insn));
1525       memcpy (buf + 1, &offset, 4);
1526       memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1527       *jjump_pad_insn_size = sizeof (jump_insn);
1528     }
1529 
1530   /* Return the end address of our pad.  */
1531   *jump_entry = buildaddr;
1532 
1533   return 0;
1534 }
1535 
1536 bool
1537 x86_target::supports_fast_tracepoints ()
1538 {
1539   return true;
1540 }
1541 
1542 int
1543 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1544 					      CORE_ADDR tpaddr,
1545 					      CORE_ADDR collector,
1546 					      CORE_ADDR lockaddr,
1547 					      ULONGEST orig_size,
1548 					      CORE_ADDR *jump_entry,
1549 					      CORE_ADDR *trampoline,
1550 					      ULONGEST *trampoline_size,
1551 					      unsigned char *jjump_pad_insn,
1552 					      ULONGEST *jjump_pad_insn_size,
1553 					      CORE_ADDR *adjusted_insn_addr,
1554 					      CORE_ADDR *adjusted_insn_addr_end,
1555 					      char *err)
1556 {
1557 #ifdef __x86_64__
1558   if (is_64bit_tdesc (current_thread))
1559     return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 						   collector, lockaddr,
1561 						   orig_size, jump_entry,
1562 						   trampoline, trampoline_size,
1563 						   jjump_pad_insn,
1564 						   jjump_pad_insn_size,
1565 						   adjusted_insn_addr,
1566 						   adjusted_insn_addr_end,
1567 						   err);
1568 #endif
1569 
1570   return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1571 						collector, lockaddr,
1572 						orig_size, jump_entry,
1573 						trampoline, trampoline_size,
1574 						jjump_pad_insn,
1575 						jjump_pad_insn_size,
1576 						adjusted_insn_addr,
1577 						adjusted_insn_addr_end,
1578 						err);
1579 }
1580 
1581 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1582    architectures.  */
1583 
1584 int
1585 x86_target::get_min_fast_tracepoint_insn_len ()
1586 {
1587   static int warned_about_fast_tracepoints = 0;
1588 
1589 #ifdef __x86_64__
1590   /*  On x86-64, 5-byte jump instructions with a 4-byte offset are always
1591       used for fast tracepoints.  */
1592   if (is_64bit_tdesc (current_thread))
1593     return 5;
1594 #endif
1595 
1596   if (agent_loaded_p ())
1597     {
1598       char errbuf[IPA_BUFSIZ];
1599 
1600       errbuf[0] = '\0';
1601 
1602       /* On x86, if trampolines are available, then 4-byte jump instructions
1603 	 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1604 	 with a 4-byte offset are used instead.  */
1605       if (have_fast_tracepoint_trampoline_buffer (errbuf))
1606 	return 4;
1607       else
1608 	{
1609 	  /* GDB has no channel to explain to user why a shorter fast
1610 	     tracepoint is not possible, but at least make GDBserver
1611 	     mention that something has gone awry.  */
1612 	  if (!warned_about_fast_tracepoints)
1613 	    {
1614 	      warning ("4-byte fast tracepoints not available; %s", errbuf);
1615 	      warned_about_fast_tracepoints = 1;
1616 	    }
1617 	  return 5;
1618 	}
1619     }
1620   else
1621     {
1622       /* Indicate that the minimum length is currently unknown since the IPA
1623 	 has not loaded yet.  */
1624       return 0;
1625     }
1626 }
1627 
1628 static void
1629 add_insns (unsigned char *start, int len)
1630 {
1631   CORE_ADDR buildaddr = current_insn_ptr;
1632 
1633   threads_debug_printf ("Adding %d bytes of insn at %s",
1634 			len, paddress (buildaddr));
1635 
1636   append_insns (&buildaddr, len, start);
1637   current_insn_ptr = buildaddr;
1638 }
1639 
1640 /* Our general strategy for emitting code is to avoid specifying raw
1641    bytes whenever possible, and instead copy a block of inline asm
1642    that is embedded in the function.  This is a little messy, because
1643    we need to keep the compiler from discarding what looks like dead
1644    code, plus suppress various warnings.  */
1645 
1646 #define EMIT_ASM(NAME, INSNS)						\
1647   do									\
1648     {									\
1649       extern unsigned char start_ ## NAME, end_ ## NAME;		\
1650       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
1651       __asm__ ("jmp end_" #NAME "\n"					\
1652 	       "\t" "start_" #NAME ":"					\
1653 	       "\t" INSNS "\n"						\
1654 	       "\t" "end_" #NAME ":");					\
1655     } while (0)
1656 
1657 #ifdef __x86_64__
1658 
1659 #define EMIT_ASM32(NAME,INSNS)						\
1660   do									\
1661     {									\
1662       extern unsigned char start_ ## NAME, end_ ## NAME;		\
1663       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
1664       __asm__ (".code32\n"						\
1665 	       "\t" "jmp end_" #NAME "\n"				\
1666 	       "\t" "start_" #NAME ":\n"				\
1667 	       "\t" INSNS "\n"						\
1668 	       "\t" "end_" #NAME ":\n"					\
1669 	       ".code64\n");						\
1670     } while (0)
1671 
1672 #else
1673 
1674 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1675 
1676 #endif
1677 
1678 #ifdef __x86_64__
1679 
1680 static void
1681 amd64_emit_prologue (void)
1682 {
1683   EMIT_ASM (amd64_prologue,
1684 	    "pushq %rbp\n\t"
1685 	    "movq %rsp,%rbp\n\t"
1686 	    "sub $0x20,%rsp\n\t"
1687 	    "movq %rdi,-8(%rbp)\n\t"
1688 	    "movq %rsi,-16(%rbp)");
1689 }
1690 
1691 
1692 static void
1693 amd64_emit_epilogue (void)
1694 {
1695   EMIT_ASM (amd64_epilogue,
1696 	    "movq -16(%rbp),%rdi\n\t"
1697 	    "movq %rax,(%rdi)\n\t"
1698 	    "xor %rax,%rax\n\t"
1699 	    "leave\n\t"
1700 	    "ret");
1701 }
1702 
1703 static void
1704 amd64_emit_add (void)
1705 {
1706   EMIT_ASM (amd64_add,
1707 	    "add (%rsp),%rax\n\t"
1708 	    "lea 0x8(%rsp),%rsp");
1709 }
1710 
1711 static void
1712 amd64_emit_sub (void)
1713 {
1714   EMIT_ASM (amd64_sub,
1715 	    "sub %rax,(%rsp)\n\t"
1716 	    "pop %rax");
1717 }
1718 
1719 static void
1720 amd64_emit_mul (void)
1721 {
1722   emit_error = 1;
1723 }
1724 
1725 static void
1726 amd64_emit_lsh (void)
1727 {
1728   emit_error = 1;
1729 }
1730 
1731 static void
1732 amd64_emit_rsh_signed (void)
1733 {
1734   emit_error = 1;
1735 }
1736 
1737 static void
1738 amd64_emit_rsh_unsigned (void)
1739 {
1740   emit_error = 1;
1741 }
1742 
1743 static void
1744 amd64_emit_ext (int arg)
1745 {
1746   switch (arg)
1747     {
1748     case 8:
1749       EMIT_ASM (amd64_ext_8,
1750 		"cbtw\n\t"
1751 		"cwtl\n\t"
1752 		"cltq");
1753       break;
1754     case 16:
1755       EMIT_ASM (amd64_ext_16,
1756 		"cwtl\n\t"
1757 		"cltq");
1758       break;
1759     case 32:
1760       EMIT_ASM (amd64_ext_32,
1761 		"cltq");
1762       break;
1763     default:
1764       emit_error = 1;
1765     }
1766 }
1767 
1768 static void
1769 amd64_emit_log_not (void)
1770 {
1771   EMIT_ASM (amd64_log_not,
1772 	    "test %rax,%rax\n\t"
1773 	    "sete %cl\n\t"
1774 	    "movzbq %cl,%rax");
1775 }
1776 
1777 static void
1778 amd64_emit_bit_and (void)
1779 {
1780   EMIT_ASM (amd64_and,
1781 	    "and (%rsp),%rax\n\t"
1782 	    "lea 0x8(%rsp),%rsp");
1783 }
1784 
1785 static void
1786 amd64_emit_bit_or (void)
1787 {
1788   EMIT_ASM (amd64_or,
1789 	    "or (%rsp),%rax\n\t"
1790 	    "lea 0x8(%rsp),%rsp");
1791 }
1792 
1793 static void
1794 amd64_emit_bit_xor (void)
1795 {
1796   EMIT_ASM (amd64_xor,
1797 	    "xor (%rsp),%rax\n\t"
1798 	    "lea 0x8(%rsp),%rsp");
1799 }
1800 
1801 static void
1802 amd64_emit_bit_not (void)
1803 {
1804   EMIT_ASM (amd64_bit_not,
1805 	    "xorq $0xffffffffffffffff,%rax");
1806 }
1807 
1808 static void
1809 amd64_emit_equal (void)
1810 {
1811   EMIT_ASM (amd64_equal,
1812 	    "cmp %rax,(%rsp)\n\t"
1813 	    "je .Lamd64_equal_true\n\t"
1814 	    "xor %rax,%rax\n\t"
1815 	    "jmp .Lamd64_equal_end\n\t"
1816 	    ".Lamd64_equal_true:\n\t"
1817 	    "mov $0x1,%rax\n\t"
1818 	    ".Lamd64_equal_end:\n\t"
1819 	    "lea 0x8(%rsp),%rsp");
1820 }
1821 
1822 static void
1823 amd64_emit_less_signed (void)
1824 {
1825   EMIT_ASM (amd64_less_signed,
1826 	    "cmp %rax,(%rsp)\n\t"
1827 	    "jl .Lamd64_less_signed_true\n\t"
1828 	    "xor %rax,%rax\n\t"
1829 	    "jmp .Lamd64_less_signed_end\n\t"
1830 	    ".Lamd64_less_signed_true:\n\t"
1831 	    "mov $1,%rax\n\t"
1832 	    ".Lamd64_less_signed_end:\n\t"
1833 	    "lea 0x8(%rsp),%rsp");
1834 }
1835 
1836 static void
1837 amd64_emit_less_unsigned (void)
1838 {
1839   EMIT_ASM (amd64_less_unsigned,
1840 	    "cmp %rax,(%rsp)\n\t"
1841 	    "jb .Lamd64_less_unsigned_true\n\t"
1842 	    "xor %rax,%rax\n\t"
1843 	    "jmp .Lamd64_less_unsigned_end\n\t"
1844 	    ".Lamd64_less_unsigned_true:\n\t"
1845 	    "mov $1,%rax\n\t"
1846 	    ".Lamd64_less_unsigned_end:\n\t"
1847 	    "lea 0x8(%rsp),%rsp");
1848 }
1849 
1850 static void
1851 amd64_emit_ref (int size)
1852 {
1853   switch (size)
1854     {
1855     case 1:
1856       EMIT_ASM (amd64_ref1,
1857 		"movb (%rax),%al");
1858       break;
1859     case 2:
1860       EMIT_ASM (amd64_ref2,
1861 		"movw (%rax),%ax");
1862       break;
1863     case 4:
1864       EMIT_ASM (amd64_ref4,
1865 		"movl (%rax),%eax");
1866       break;
1867     case 8:
1868       EMIT_ASM (amd64_ref8,
1869 		"movq (%rax),%rax");
1870       break;
1871     }
1872 }
1873 
1874 static void
1875 amd64_emit_if_goto (int *offset_p, int *size_p)
1876 {
1877   EMIT_ASM (amd64_if_goto,
1878 	    "mov %rax,%rcx\n\t"
1879 	    "pop %rax\n\t"
1880 	    "cmp $0,%rcx\n\t"
1881 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1882   if (offset_p)
1883     *offset_p = 10;
1884   if (size_p)
1885     *size_p = 4;
1886 }
1887 
1888 static void
1889 amd64_emit_goto (int *offset_p, int *size_p)
1890 {
1891   EMIT_ASM (amd64_goto,
1892 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1893   if (offset_p)
1894     *offset_p = 1;
1895   if (size_p)
1896     *size_p = 4;
1897 }
1898 
1899 static void
1900 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1901 {
1902   int diff = (to - (from + size));
1903   unsigned char buf[sizeof (int)];
1904 
1905   if (size != 4)
1906     {
1907       emit_error = 1;
1908       return;
1909     }
1910 
1911   memcpy (buf, &diff, sizeof (int));
1912   target_write_memory (from, buf, sizeof (int));
1913 }
1914 
1915 static void
1916 amd64_emit_const (LONGEST num)
1917 {
1918   unsigned char buf[16];
1919   int i;
1920   CORE_ADDR buildaddr = current_insn_ptr;
1921 
1922   i = 0;
1923   buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
1924   memcpy (&buf[i], &num, sizeof (num));
1925   i += 8;
1926   append_insns (&buildaddr, i, buf);
1927   current_insn_ptr = buildaddr;
1928 }
1929 
1930 static void
1931 amd64_emit_call (CORE_ADDR fn)
1932 {
1933   unsigned char buf[16];
1934   int i;
1935   CORE_ADDR buildaddr;
1936   LONGEST offset64;
1937 
1938   /* The destination function being in the shared library, may be
1939      >31-bits away off the compiled code pad.  */
1940 
1941   buildaddr = current_insn_ptr;
1942 
1943   offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1944 
1945   i = 0;
1946 
1947   if (offset64 > INT_MAX || offset64 < INT_MIN)
1948     {
1949       /* Offset is too large for a call.  Use callq, but that requires
1950 	 a register, so avoid it if possible.  Use r10, since it is
1951 	 call-clobbered, we don't have to push/pop it.  */
1952       buf[i++] = 0x48; /* mov $fn,%r10 */
1953       buf[i++] = 0xba;
1954       memcpy (buf + i, &fn, 8);
1955       i += 8;
1956       buf[i++] = 0xff; /* callq *%r10 */
1957       buf[i++] = 0xd2;
1958     }
1959   else
1960     {
1961       int offset32 = offset64; /* we know we can't overflow here.  */
1962 
1963       buf[i++] = 0xe8; /* call <reladdr> */
1964       memcpy (buf + i, &offset32, 4);
1965       i += 4;
1966     }
1967 
1968   append_insns (&buildaddr, i, buf);
1969   current_insn_ptr = buildaddr;
1970 }
1971 
1972 static void
1973 amd64_emit_reg (int reg)
1974 {
1975   unsigned char buf[16];
1976   int i;
1977   CORE_ADDR buildaddr;
1978 
1979   /* Assume raw_regs is still in %rdi.  */
1980   buildaddr = current_insn_ptr;
1981   i = 0;
1982   buf[i++] = 0xbe; /* mov $<n>,%esi */
1983   memcpy (&buf[i], &reg, sizeof (reg));
1984   i += 4;
1985   append_insns (&buildaddr, i, buf);
1986   current_insn_ptr = buildaddr;
1987   amd64_emit_call (get_raw_reg_func_addr ());
1988 }
1989 
1990 static void
1991 amd64_emit_pop (void)
1992 {
1993   EMIT_ASM (amd64_pop,
1994 	    "pop %rax");
1995 }
1996 
1997 static void
1998 amd64_emit_stack_flush (void)
1999 {
2000   EMIT_ASM (amd64_stack_flush,
2001 	    "push %rax");
2002 }
2003 
2004 static void
2005 amd64_emit_zero_ext (int arg)
2006 {
2007   switch (arg)
2008     {
2009     case 8:
2010       EMIT_ASM (amd64_zero_ext_8,
2011 		"and $0xff,%rax");
2012       break;
2013     case 16:
2014       EMIT_ASM (amd64_zero_ext_16,
2015 		"and $0xffff,%rax");
2016       break;
2017     case 32:
2018       EMIT_ASM (amd64_zero_ext_32,
2019 		"mov $0xffffffff,%rcx\n\t"
2020 		"and %rcx,%rax");
2021       break;
2022     default:
2023       emit_error = 1;
2024     }
2025 }
2026 
2027 static void
2028 amd64_emit_swap (void)
2029 {
2030   EMIT_ASM (amd64_swap,
2031 	    "mov %rax,%rcx\n\t"
2032 	    "pop %rax\n\t"
2033 	    "push %rcx");
2034 }
2035 
2036 static void
2037 amd64_emit_stack_adjust (int n)
2038 {
2039   unsigned char buf[16];
2040   int i;
2041   CORE_ADDR buildaddr = current_insn_ptr;
2042 
2043   i = 0;
2044   buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2045   buf[i++] = 0x8d;
2046   buf[i++] = 0x64;
2047   buf[i++] = 0x24;
2048   /* This only handles adjustments up to 16, but we don't expect any more.  */
2049   buf[i++] = n * 8;
2050   append_insns (&buildaddr, i, buf);
2051   current_insn_ptr = buildaddr;
2052 }
2053 
2054 /* FN's prototype is `LONGEST(*fn)(int)'.  */
2055 
2056 static void
2057 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2058 {
2059   unsigned char buf[16];
2060   int i;
2061   CORE_ADDR buildaddr;
2062 
2063   buildaddr = current_insn_ptr;
2064   i = 0;
2065   buf[i++] = 0xbf; /* movl $<n>,%edi */
2066   memcpy (&buf[i], &arg1, sizeof (arg1));
2067   i += 4;
2068   append_insns (&buildaddr, i, buf);
2069   current_insn_ptr = buildaddr;
2070   amd64_emit_call (fn);
2071 }
2072 
2073 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
2074 
2075 static void
2076 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2077 {
2078   unsigned char buf[16];
2079   int i;
2080   CORE_ADDR buildaddr;
2081 
2082   buildaddr = current_insn_ptr;
2083   i = 0;
2084   buf[i++] = 0xbf; /* movl $<n>,%edi */
2085   memcpy (&buf[i], &arg1, sizeof (arg1));
2086   i += 4;
2087   append_insns (&buildaddr, i, buf);
2088   current_insn_ptr = buildaddr;
2089   EMIT_ASM (amd64_void_call_2_a,
2090 	    /* Save away a copy of the stack top.  */
2091 	    "push %rax\n\t"
2092 	    /* Also pass top as the second argument.  */
2093 	    "mov %rax,%rsi");
2094   amd64_emit_call (fn);
2095   EMIT_ASM (amd64_void_call_2_b,
2096 	    /* Restore the stack top, %rax may have been trashed.  */
2097 	    "pop %rax");
2098 }
2099 
2100 static void
2101 amd64_emit_eq_goto (int *offset_p, int *size_p)
2102 {
2103   EMIT_ASM (amd64_eq,
2104 	    "cmp %rax,(%rsp)\n\t"
2105 	    "jne .Lamd64_eq_fallthru\n\t"
2106 	    "lea 0x8(%rsp),%rsp\n\t"
2107 	    "pop %rax\n\t"
2108 	    /* jmp, but don't trust the assembler to choose the right jump */
2109 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2110 	    ".Lamd64_eq_fallthru:\n\t"
2111 	    "lea 0x8(%rsp),%rsp\n\t"
2112 	    "pop %rax");
2113 
2114   if (offset_p)
2115     *offset_p = 13;
2116   if (size_p)
2117     *size_p = 4;
2118 }
2119 
2120 static void
2121 amd64_emit_ne_goto (int *offset_p, int *size_p)
2122 {
2123   EMIT_ASM (amd64_ne,
2124 	    "cmp %rax,(%rsp)\n\t"
2125 	    "je .Lamd64_ne_fallthru\n\t"
2126 	    "lea 0x8(%rsp),%rsp\n\t"
2127 	    "pop %rax\n\t"
2128 	    /* jmp, but don't trust the assembler to choose the right jump */
2129 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2130 	    ".Lamd64_ne_fallthru:\n\t"
2131 	    "lea 0x8(%rsp),%rsp\n\t"
2132 	    "pop %rax");
2133 
2134   if (offset_p)
2135     *offset_p = 13;
2136   if (size_p)
2137     *size_p = 4;
2138 }
2139 
2140 static void
2141 amd64_emit_lt_goto (int *offset_p, int *size_p)
2142 {
2143   EMIT_ASM (amd64_lt,
2144 	    "cmp %rax,(%rsp)\n\t"
2145 	    "jnl .Lamd64_lt_fallthru\n\t"
2146 	    "lea 0x8(%rsp),%rsp\n\t"
2147 	    "pop %rax\n\t"
2148 	    /* jmp, but don't trust the assembler to choose the right jump */
2149 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2150 	    ".Lamd64_lt_fallthru:\n\t"
2151 	    "lea 0x8(%rsp),%rsp\n\t"
2152 	    "pop %rax");
2153 
2154   if (offset_p)
2155     *offset_p = 13;
2156   if (size_p)
2157     *size_p = 4;
2158 }
2159 
2160 static void
2161 amd64_emit_le_goto (int *offset_p, int *size_p)
2162 {
2163   EMIT_ASM (amd64_le,
2164 	    "cmp %rax,(%rsp)\n\t"
2165 	    "jnle .Lamd64_le_fallthru\n\t"
2166 	    "lea 0x8(%rsp),%rsp\n\t"
2167 	    "pop %rax\n\t"
2168 	    /* jmp, but don't trust the assembler to choose the right jump */
2169 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2170 	    ".Lamd64_le_fallthru:\n\t"
2171 	    "lea 0x8(%rsp),%rsp\n\t"
2172 	    "pop %rax");
2173 
2174   if (offset_p)
2175     *offset_p = 13;
2176   if (size_p)
2177     *size_p = 4;
2178 }
2179 
2180 static void
2181 amd64_emit_gt_goto (int *offset_p, int *size_p)
2182 {
2183   EMIT_ASM (amd64_gt,
2184 	    "cmp %rax,(%rsp)\n\t"
2185 	    "jng .Lamd64_gt_fallthru\n\t"
2186 	    "lea 0x8(%rsp),%rsp\n\t"
2187 	    "pop %rax\n\t"
2188 	    /* jmp, but don't trust the assembler to choose the right jump */
2189 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2190 	    ".Lamd64_gt_fallthru:\n\t"
2191 	    "lea 0x8(%rsp),%rsp\n\t"
2192 	    "pop %rax");
2193 
2194   if (offset_p)
2195     *offset_p = 13;
2196   if (size_p)
2197     *size_p = 4;
2198 }
2199 
2200 static void
2201 amd64_emit_ge_goto (int *offset_p, int *size_p)
2202 {
2203   EMIT_ASM (amd64_ge,
2204 	    "cmp %rax,(%rsp)\n\t"
2205 	    "jnge .Lamd64_ge_fallthru\n\t"
2206 	    ".Lamd64_ge_jump:\n\t"
2207 	    "lea 0x8(%rsp),%rsp\n\t"
2208 	    "pop %rax\n\t"
2209 	    /* jmp, but don't trust the assembler to choose the right jump */
2210 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2211 	    ".Lamd64_ge_fallthru:\n\t"
2212 	    "lea 0x8(%rsp),%rsp\n\t"
2213 	    "pop %rax");
2214 
2215   if (offset_p)
2216     *offset_p = 13;
2217   if (size_p)
2218     *size_p = 4;
2219 }
2220 
2221 static emit_ops amd64_emit_ops =
2222   {
2223     amd64_emit_prologue,
2224     amd64_emit_epilogue,
2225     amd64_emit_add,
2226     amd64_emit_sub,
2227     amd64_emit_mul,
2228     amd64_emit_lsh,
2229     amd64_emit_rsh_signed,
2230     amd64_emit_rsh_unsigned,
2231     amd64_emit_ext,
2232     amd64_emit_log_not,
2233     amd64_emit_bit_and,
2234     amd64_emit_bit_or,
2235     amd64_emit_bit_xor,
2236     amd64_emit_bit_not,
2237     amd64_emit_equal,
2238     amd64_emit_less_signed,
2239     amd64_emit_less_unsigned,
2240     amd64_emit_ref,
2241     amd64_emit_if_goto,
2242     amd64_emit_goto,
2243     amd64_write_goto_address,
2244     amd64_emit_const,
2245     amd64_emit_call,
2246     amd64_emit_reg,
2247     amd64_emit_pop,
2248     amd64_emit_stack_flush,
2249     amd64_emit_zero_ext,
2250     amd64_emit_swap,
2251     amd64_emit_stack_adjust,
2252     amd64_emit_int_call_1,
2253     amd64_emit_void_call_2,
2254     amd64_emit_eq_goto,
2255     amd64_emit_ne_goto,
2256     amd64_emit_lt_goto,
2257     amd64_emit_le_goto,
2258     amd64_emit_gt_goto,
2259     amd64_emit_ge_goto
2260   };
2261 
2262 #endif /* __x86_64__ */
2263 
2264 static void
2265 i386_emit_prologue (void)
2266 {
2267   EMIT_ASM32 (i386_prologue,
2268 	    "push %ebp\n\t"
2269 	    "mov %esp,%ebp\n\t"
2270 	    "push %ebx");
2271   /* At this point, the raw regs base address is at 8(%ebp), and the
2272      value pointer is at 12(%ebp).  */
2273 }
2274 
2275 static void
2276 i386_emit_epilogue (void)
2277 {
2278   EMIT_ASM32 (i386_epilogue,
2279 	    "mov 12(%ebp),%ecx\n\t"
2280 	    "mov %eax,(%ecx)\n\t"
2281 	    "mov %ebx,0x4(%ecx)\n\t"
2282 	    "xor %eax,%eax\n\t"
2283 	    "pop %ebx\n\t"
2284 	    "pop %ebp\n\t"
2285 	    "ret");
2286 }
2287 
2288 static void
2289 i386_emit_add (void)
2290 {
2291   EMIT_ASM32 (i386_add,
2292 	    "add (%esp),%eax\n\t"
2293 	    "adc 0x4(%esp),%ebx\n\t"
2294 	    "lea 0x8(%esp),%esp");
2295 }
2296 
2297 static void
2298 i386_emit_sub (void)
2299 {
2300   EMIT_ASM32 (i386_sub,
2301 	    "subl %eax,(%esp)\n\t"
2302 	    "sbbl %ebx,4(%esp)\n\t"
2303 	    "pop %eax\n\t"
2304 	    "pop %ebx\n\t");
2305 }
2306 
2307 static void
2308 i386_emit_mul (void)
2309 {
2310   emit_error = 1;
2311 }
2312 
2313 static void
2314 i386_emit_lsh (void)
2315 {
2316   emit_error = 1;
2317 }
2318 
2319 static void
2320 i386_emit_rsh_signed (void)
2321 {
2322   emit_error = 1;
2323 }
2324 
2325 static void
2326 i386_emit_rsh_unsigned (void)
2327 {
2328   emit_error = 1;
2329 }
2330 
2331 static void
2332 i386_emit_ext (int arg)
2333 {
2334   switch (arg)
2335     {
2336     case 8:
2337       EMIT_ASM32 (i386_ext_8,
2338 		"cbtw\n\t"
2339 		"cwtl\n\t"
2340 		"movl %eax,%ebx\n\t"
2341 		"sarl $31,%ebx");
2342       break;
2343     case 16:
2344       EMIT_ASM32 (i386_ext_16,
2345 		"cwtl\n\t"
2346 		"movl %eax,%ebx\n\t"
2347 		"sarl $31,%ebx");
2348       break;
2349     case 32:
2350       EMIT_ASM32 (i386_ext_32,
2351 		"movl %eax,%ebx\n\t"
2352 		"sarl $31,%ebx");
2353       break;
2354     default:
2355       emit_error = 1;
2356     }
2357 }
2358 
2359 static void
2360 i386_emit_log_not (void)
2361 {
2362   EMIT_ASM32 (i386_log_not,
2363 	    "or %ebx,%eax\n\t"
2364 	    "test %eax,%eax\n\t"
2365 	    "sete %cl\n\t"
2366 	    "xor %ebx,%ebx\n\t"
2367 	    "movzbl %cl,%eax");
2368 }
2369 
2370 static void
2371 i386_emit_bit_and (void)
2372 {
2373   EMIT_ASM32 (i386_and,
2374 	    "and (%esp),%eax\n\t"
2375 	    "and 0x4(%esp),%ebx\n\t"
2376 	    "lea 0x8(%esp),%esp");
2377 }
2378 
2379 static void
2380 i386_emit_bit_or (void)
2381 {
2382   EMIT_ASM32 (i386_or,
2383 	    "or (%esp),%eax\n\t"
2384 	    "or 0x4(%esp),%ebx\n\t"
2385 	    "lea 0x8(%esp),%esp");
2386 }
2387 
2388 static void
2389 i386_emit_bit_xor (void)
2390 {
2391   EMIT_ASM32 (i386_xor,
2392 	    "xor (%esp),%eax\n\t"
2393 	    "xor 0x4(%esp),%ebx\n\t"
2394 	    "lea 0x8(%esp),%esp");
2395 }
2396 
2397 static void
2398 i386_emit_bit_not (void)
2399 {
2400   EMIT_ASM32 (i386_bit_not,
2401 	    "xor $0xffffffff,%eax\n\t"
2402 	    "xor $0xffffffff,%ebx\n\t");
2403 }
2404 
2405 static void
2406 i386_emit_equal (void)
2407 {
2408   EMIT_ASM32 (i386_equal,
2409 	    "cmpl %ebx,4(%esp)\n\t"
2410 	    "jne .Li386_equal_false\n\t"
2411 	    "cmpl %eax,(%esp)\n\t"
2412 	    "je .Li386_equal_true\n\t"
2413 	    ".Li386_equal_false:\n\t"
2414 	    "xor %eax,%eax\n\t"
2415 	    "jmp .Li386_equal_end\n\t"
2416 	    ".Li386_equal_true:\n\t"
2417 	    "mov $1,%eax\n\t"
2418 	    ".Li386_equal_end:\n\t"
2419 	    "xor %ebx,%ebx\n\t"
2420 	    "lea 0x8(%esp),%esp");
2421 }
2422 
2423 static void
2424 i386_emit_less_signed (void)
2425 {
2426   EMIT_ASM32 (i386_less_signed,
2427 	    "cmpl %ebx,4(%esp)\n\t"
2428 	    "jl .Li386_less_signed_true\n\t"
2429 	    "jne .Li386_less_signed_false\n\t"
2430 	    "cmpl %eax,(%esp)\n\t"
2431 	    "jl .Li386_less_signed_true\n\t"
2432 	    ".Li386_less_signed_false:\n\t"
2433 	    "xor %eax,%eax\n\t"
2434 	    "jmp .Li386_less_signed_end\n\t"
2435 	    ".Li386_less_signed_true:\n\t"
2436 	    "mov $1,%eax\n\t"
2437 	    ".Li386_less_signed_end:\n\t"
2438 	    "xor %ebx,%ebx\n\t"
2439 	    "lea 0x8(%esp),%esp");
2440 }
2441 
2442 static void
2443 i386_emit_less_unsigned (void)
2444 {
2445   EMIT_ASM32 (i386_less_unsigned,
2446 	    "cmpl %ebx,4(%esp)\n\t"
2447 	    "jb .Li386_less_unsigned_true\n\t"
2448 	    "jne .Li386_less_unsigned_false\n\t"
2449 	    "cmpl %eax,(%esp)\n\t"
2450 	    "jb .Li386_less_unsigned_true\n\t"
2451 	    ".Li386_less_unsigned_false:\n\t"
2452 	    "xor %eax,%eax\n\t"
2453 	    "jmp .Li386_less_unsigned_end\n\t"
2454 	    ".Li386_less_unsigned_true:\n\t"
2455 	    "mov $1,%eax\n\t"
2456 	    ".Li386_less_unsigned_end:\n\t"
2457 	    "xor %ebx,%ebx\n\t"
2458 	    "lea 0x8(%esp),%esp");
2459 }
2460 
2461 static void
2462 i386_emit_ref (int size)
2463 {
2464   switch (size)
2465     {
2466     case 1:
2467       EMIT_ASM32 (i386_ref1,
2468 		"movb (%eax),%al");
2469       break;
2470     case 2:
2471       EMIT_ASM32 (i386_ref2,
2472 		"movw (%eax),%ax");
2473       break;
2474     case 4:
2475       EMIT_ASM32 (i386_ref4,
2476 		"movl (%eax),%eax");
2477       break;
2478     case 8:
2479       EMIT_ASM32 (i386_ref8,
2480 		"movl 4(%eax),%ebx\n\t"
2481 		"movl (%eax),%eax");
2482       break;
2483     }
2484 }
2485 
2486 static void
2487 i386_emit_if_goto (int *offset_p, int *size_p)
2488 {
2489   EMIT_ASM32 (i386_if_goto,
2490 	    "mov %eax,%ecx\n\t"
2491 	    "or %ebx,%ecx\n\t"
2492 	    "pop %eax\n\t"
2493 	    "pop %ebx\n\t"
2494 	    "cmpl $0,%ecx\n\t"
2495 	    /* Don't trust the assembler to choose the right jump */
2496 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2497 
2498   if (offset_p)
2499     *offset_p = 11; /* be sure that this matches the sequence above */
2500   if (size_p)
2501     *size_p = 4;
2502 }
2503 
2504 static void
2505 i386_emit_goto (int *offset_p, int *size_p)
2506 {
2507   EMIT_ASM32 (i386_goto,
2508 	    /* Don't trust the assembler to choose the right jump */
2509 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2510   if (offset_p)
2511     *offset_p = 1;
2512   if (size_p)
2513     *size_p = 4;
2514 }
2515 
2516 static void
2517 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2518 {
2519   int diff = (to - (from + size));
2520   unsigned char buf[sizeof (int)];
2521 
2522   /* We're only doing 4-byte sizes at the moment.  */
2523   if (size != 4)
2524     {
2525       emit_error = 1;
2526       return;
2527     }
2528 
2529   memcpy (buf, &diff, sizeof (int));
2530   target_write_memory (from, buf, sizeof (int));
2531 }
2532 
2533 static void
2534 i386_emit_const (LONGEST num)
2535 {
2536   unsigned char buf[16];
2537   int i, hi, lo;
2538   CORE_ADDR buildaddr = current_insn_ptr;
2539 
2540   i = 0;
2541   buf[i++] = 0xb8; /* mov $<n>,%eax */
2542   lo = num & 0xffffffff;
2543   memcpy (&buf[i], &lo, sizeof (lo));
2544   i += 4;
2545   hi = ((num >> 32) & 0xffffffff);
2546   if (hi)
2547     {
2548       buf[i++] = 0xbb; /* mov $<n>,%ebx */
2549       memcpy (&buf[i], &hi, sizeof (hi));
2550       i += 4;
2551     }
2552   else
2553     {
2554       buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2555     }
2556   append_insns (&buildaddr, i, buf);
2557   current_insn_ptr = buildaddr;
2558 }
2559 
2560 static void
2561 i386_emit_call (CORE_ADDR fn)
2562 {
2563   unsigned char buf[16];
2564   int i, offset;
2565   CORE_ADDR buildaddr;
2566 
2567   buildaddr = current_insn_ptr;
2568   i = 0;
2569   buf[i++] = 0xe8; /* call <reladdr> */
2570   offset = ((int) fn) - (buildaddr + 5);
2571   memcpy (buf + 1, &offset, 4);
2572   append_insns (&buildaddr, 5, buf);
2573   current_insn_ptr = buildaddr;
2574 }
2575 
2576 static void
2577 i386_emit_reg (int reg)
2578 {
2579   unsigned char buf[16];
2580   int i;
2581   CORE_ADDR buildaddr;
2582 
2583   EMIT_ASM32 (i386_reg_a,
2584 	    "sub $0x8,%esp");
2585   buildaddr = current_insn_ptr;
2586   i = 0;
2587   buf[i++] = 0xb8; /* mov $<n>,%eax */
2588   memcpy (&buf[i], &reg, sizeof (reg));
2589   i += 4;
2590   append_insns (&buildaddr, i, buf);
2591   current_insn_ptr = buildaddr;
2592   EMIT_ASM32 (i386_reg_b,
2593 	    "mov %eax,4(%esp)\n\t"
2594 	    "mov 8(%ebp),%eax\n\t"
2595 	    "mov %eax,(%esp)");
2596   i386_emit_call (get_raw_reg_func_addr ());
2597   EMIT_ASM32 (i386_reg_c,
2598 	    "xor %ebx,%ebx\n\t"
2599 	    "lea 0x8(%esp),%esp");
2600 }
2601 
2602 static void
2603 i386_emit_pop (void)
2604 {
2605   EMIT_ASM32 (i386_pop,
2606 	    "pop %eax\n\t"
2607 	    "pop %ebx");
2608 }
2609 
2610 static void
2611 i386_emit_stack_flush (void)
2612 {
2613   EMIT_ASM32 (i386_stack_flush,
2614 	    "push %ebx\n\t"
2615 	    "push %eax");
2616 }
2617 
2618 static void
2619 i386_emit_zero_ext (int arg)
2620 {
2621   switch (arg)
2622     {
2623     case 8:
2624       EMIT_ASM32 (i386_zero_ext_8,
2625 		"and $0xff,%eax\n\t"
2626 		"xor %ebx,%ebx");
2627       break;
2628     case 16:
2629       EMIT_ASM32 (i386_zero_ext_16,
2630 		"and $0xffff,%eax\n\t"
2631 		"xor %ebx,%ebx");
2632       break;
2633     case 32:
2634       EMIT_ASM32 (i386_zero_ext_32,
2635 		"xor %ebx,%ebx");
2636       break;
2637     default:
2638       emit_error = 1;
2639     }
2640 }
2641 
2642 static void
2643 i386_emit_swap (void)
2644 {
2645   EMIT_ASM32 (i386_swap,
2646 	    "mov %eax,%ecx\n\t"
2647 	    "mov %ebx,%edx\n\t"
2648 	    "pop %eax\n\t"
2649 	    "pop %ebx\n\t"
2650 	    "push %edx\n\t"
2651 	    "push %ecx");
2652 }
2653 
2654 static void
2655 i386_emit_stack_adjust (int n)
2656 {
2657   unsigned char buf[16];
2658   int i;
2659   CORE_ADDR buildaddr = current_insn_ptr;
2660 
2661   i = 0;
2662   buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2663   buf[i++] = 0x64;
2664   buf[i++] = 0x24;
2665   buf[i++] = n * 8;
2666   append_insns (&buildaddr, i, buf);
2667   current_insn_ptr = buildaddr;
2668 }
2669 
2670 /* FN's prototype is `LONGEST(*fn)(int)'.  */
2671 
2672 static void
2673 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2674 {
2675   unsigned char buf[16];
2676   int i;
2677   CORE_ADDR buildaddr;
2678 
2679   EMIT_ASM32 (i386_int_call_1_a,
2680 	    /* Reserve a bit of stack space.  */
2681 	    "sub $0x8,%esp");
2682   /* Put the one argument on the stack.  */
2683   buildaddr = current_insn_ptr;
2684   i = 0;
2685   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2686   buf[i++] = 0x04;
2687   buf[i++] = 0x24;
2688   memcpy (&buf[i], &arg1, sizeof (arg1));
2689   i += 4;
2690   append_insns (&buildaddr, i, buf);
2691   current_insn_ptr = buildaddr;
2692   i386_emit_call (fn);
2693   EMIT_ASM32 (i386_int_call_1_c,
2694 	    "mov %edx,%ebx\n\t"
2695 	    "lea 0x8(%esp),%esp");
2696 }
2697 
2698 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
2699 
2700 static void
2701 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2702 {
2703   unsigned char buf[16];
2704   int i;
2705   CORE_ADDR buildaddr;
2706 
2707   EMIT_ASM32 (i386_void_call_2_a,
2708 	    /* Preserve %eax only; we don't have to worry about %ebx.  */
2709 	    "push %eax\n\t"
2710 	    /* Reserve a bit of stack space for arguments.  */
2711 	    "sub $0x10,%esp\n\t"
2712 	    /* Copy "top" to the second argument position.  (Note that
2713 	       we can't assume function won't scribble on its
2714 	       arguments, so don't try to restore from this.)  */
2715 	    "mov %eax,4(%esp)\n\t"
2716 	    "mov %ebx,8(%esp)");
2717   /* Put the first argument on the stack.  */
2718   buildaddr = current_insn_ptr;
2719   i = 0;
2720   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2721   buf[i++] = 0x04;
2722   buf[i++] = 0x24;
2723   memcpy (&buf[i], &arg1, sizeof (arg1));
2724   i += 4;
2725   append_insns (&buildaddr, i, buf);
2726   current_insn_ptr = buildaddr;
2727   i386_emit_call (fn);
2728   EMIT_ASM32 (i386_void_call_2_b,
2729 	    "lea 0x10(%esp),%esp\n\t"
2730 	    /* Restore original stack top.  */
2731 	    "pop %eax");
2732 }
2733 
2734 
2735 static void
2736 i386_emit_eq_goto (int *offset_p, int *size_p)
2737 {
2738   EMIT_ASM32 (eq,
2739 	      /* Check low half first, more likely to be decider  */
2740 	      "cmpl %eax,(%esp)\n\t"
2741 	      "jne .Leq_fallthru\n\t"
2742 	      "cmpl %ebx,4(%esp)\n\t"
2743 	      "jne .Leq_fallthru\n\t"
2744 	      "lea 0x8(%esp),%esp\n\t"
2745 	      "pop %eax\n\t"
2746 	      "pop %ebx\n\t"
2747 	      /* jmp, but don't trust the assembler to choose the right jump */
2748 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 	      ".Leq_fallthru:\n\t"
2750 	      "lea 0x8(%esp),%esp\n\t"
2751 	      "pop %eax\n\t"
2752 	      "pop %ebx");
2753 
2754   if (offset_p)
2755     *offset_p = 18;
2756   if (size_p)
2757     *size_p = 4;
2758 }
2759 
2760 static void
2761 i386_emit_ne_goto (int *offset_p, int *size_p)
2762 {
2763   EMIT_ASM32 (ne,
2764 	      /* Check low half first, more likely to be decider  */
2765 	      "cmpl %eax,(%esp)\n\t"
2766 	      "jne .Lne_jump\n\t"
2767 	      "cmpl %ebx,4(%esp)\n\t"
2768 	      "je .Lne_fallthru\n\t"
2769 	      ".Lne_jump:\n\t"
2770 	      "lea 0x8(%esp),%esp\n\t"
2771 	      "pop %eax\n\t"
2772 	      "pop %ebx\n\t"
2773 	      /* jmp, but don't trust the assembler to choose the right jump */
2774 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 	      ".Lne_fallthru:\n\t"
2776 	      "lea 0x8(%esp),%esp\n\t"
2777 	      "pop %eax\n\t"
2778 	      "pop %ebx");
2779 
2780   if (offset_p)
2781     *offset_p = 18;
2782   if (size_p)
2783     *size_p = 4;
2784 }
2785 
2786 static void
2787 i386_emit_lt_goto (int *offset_p, int *size_p)
2788 {
2789   EMIT_ASM32 (lt,
2790 	      "cmpl %ebx,4(%esp)\n\t"
2791 	      "jl .Llt_jump\n\t"
2792 	      "jne .Llt_fallthru\n\t"
2793 	      "cmpl %eax,(%esp)\n\t"
2794 	      "jnl .Llt_fallthru\n\t"
2795 	      ".Llt_jump:\n\t"
2796 	      "lea 0x8(%esp),%esp\n\t"
2797 	      "pop %eax\n\t"
2798 	      "pop %ebx\n\t"
2799 	      /* jmp, but don't trust the assembler to choose the right jump */
2800 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2801 	      ".Llt_fallthru:\n\t"
2802 	      "lea 0x8(%esp),%esp\n\t"
2803 	      "pop %eax\n\t"
2804 	      "pop %ebx");
2805 
2806   if (offset_p)
2807     *offset_p = 20;
2808   if (size_p)
2809     *size_p = 4;
2810 }
2811 
2812 static void
2813 i386_emit_le_goto (int *offset_p, int *size_p)
2814 {
2815   EMIT_ASM32 (le,
2816 	      "cmpl %ebx,4(%esp)\n\t"
2817 	      "jle .Lle_jump\n\t"
2818 	      "jne .Lle_fallthru\n\t"
2819 	      "cmpl %eax,(%esp)\n\t"
2820 	      "jnle .Lle_fallthru\n\t"
2821 	      ".Lle_jump:\n\t"
2822 	      "lea 0x8(%esp),%esp\n\t"
2823 	      "pop %eax\n\t"
2824 	      "pop %ebx\n\t"
2825 	      /* jmp, but don't trust the assembler to choose the right jump */
2826 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2827 	      ".Lle_fallthru:\n\t"
2828 	      "lea 0x8(%esp),%esp\n\t"
2829 	      "pop %eax\n\t"
2830 	      "pop %ebx");
2831 
2832   if (offset_p)
2833     *offset_p = 20;
2834   if (size_p)
2835     *size_p = 4;
2836 }
2837 
2838 static void
2839 i386_emit_gt_goto (int *offset_p, int *size_p)
2840 {
2841   EMIT_ASM32 (gt,
2842 	      "cmpl %ebx,4(%esp)\n\t"
2843 	      "jg .Lgt_jump\n\t"
2844 	      "jne .Lgt_fallthru\n\t"
2845 	      "cmpl %eax,(%esp)\n\t"
2846 	      "jng .Lgt_fallthru\n\t"
2847 	      ".Lgt_jump:\n\t"
2848 	      "lea 0x8(%esp),%esp\n\t"
2849 	      "pop %eax\n\t"
2850 	      "pop %ebx\n\t"
2851 	      /* jmp, but don't trust the assembler to choose the right jump */
2852 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2853 	      ".Lgt_fallthru:\n\t"
2854 	      "lea 0x8(%esp),%esp\n\t"
2855 	      "pop %eax\n\t"
2856 	      "pop %ebx");
2857 
2858   if (offset_p)
2859     *offset_p = 20;
2860   if (size_p)
2861     *size_p = 4;
2862 }
2863 
2864 static void
2865 i386_emit_ge_goto (int *offset_p, int *size_p)
2866 {
2867   EMIT_ASM32 (ge,
2868 	      "cmpl %ebx,4(%esp)\n\t"
2869 	      "jge .Lge_jump\n\t"
2870 	      "jne .Lge_fallthru\n\t"
2871 	      "cmpl %eax,(%esp)\n\t"
2872 	      "jnge .Lge_fallthru\n\t"
2873 	      ".Lge_jump:\n\t"
2874 	      "lea 0x8(%esp),%esp\n\t"
2875 	      "pop %eax\n\t"
2876 	      "pop %ebx\n\t"
2877 	      /* jmp, but don't trust the assembler to choose the right jump */
2878 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2879 	      ".Lge_fallthru:\n\t"
2880 	      "lea 0x8(%esp),%esp\n\t"
2881 	      "pop %eax\n\t"
2882 	      "pop %ebx");
2883 
2884   if (offset_p)
2885     *offset_p = 20;
2886   if (size_p)
2887     *size_p = 4;
2888 }
2889 
2890 static emit_ops i386_emit_ops =
2891   {
2892     i386_emit_prologue,
2893     i386_emit_epilogue,
2894     i386_emit_add,
2895     i386_emit_sub,
2896     i386_emit_mul,
2897     i386_emit_lsh,
2898     i386_emit_rsh_signed,
2899     i386_emit_rsh_unsigned,
2900     i386_emit_ext,
2901     i386_emit_log_not,
2902     i386_emit_bit_and,
2903     i386_emit_bit_or,
2904     i386_emit_bit_xor,
2905     i386_emit_bit_not,
2906     i386_emit_equal,
2907     i386_emit_less_signed,
2908     i386_emit_less_unsigned,
2909     i386_emit_ref,
2910     i386_emit_if_goto,
2911     i386_emit_goto,
2912     i386_write_goto_address,
2913     i386_emit_const,
2914     i386_emit_call,
2915     i386_emit_reg,
2916     i386_emit_pop,
2917     i386_emit_stack_flush,
2918     i386_emit_zero_ext,
2919     i386_emit_swap,
2920     i386_emit_stack_adjust,
2921     i386_emit_int_call_1,
2922     i386_emit_void_call_2,
2923     i386_emit_eq_goto,
2924     i386_emit_ne_goto,
2925     i386_emit_lt_goto,
2926     i386_emit_le_goto,
2927     i386_emit_gt_goto,
2928     i386_emit_ge_goto
2929   };
2930 
2931 
2932 emit_ops *
2933 x86_target::emit_ops ()
2934 {
2935 #ifdef __x86_64__
2936   if (is_64bit_tdesc (current_thread))
2937     return &amd64_emit_ops;
2938   else
2939 #endif
2940     return &i386_emit_ops;
2941 }
2942 
2943 /* Implementation of target ops method "sw_breakpoint_from_kind".  */
2944 
2945 const gdb_byte *
2946 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2947 {
2948   *size = x86_breakpoint_len;
2949   return x86_breakpoint;
2950 }
2951 
2952 bool
2953 x86_target::low_supports_range_stepping ()
2954 {
2955   return true;
2956 }
2957 
2958 int
2959 x86_target::get_ipa_tdesc_idx ()
2960 {
2961   struct regcache *regcache = get_thread_regcache (current_thread, 0);
2962   const struct target_desc *tdesc = regcache->tdesc;
2963 
2964 #ifdef __x86_64__
2965   return amd64_get_ipa_tdesc_idx (tdesc);
2966 #endif
2967 
2968   if (tdesc == tdesc_i386_linux_no_xml.get ())
2969     return X86_TDESC_SSE;
2970 
2971   return i386_get_ipa_tdesc_idx (tdesc);
2972 }
2973 
2974 /* The linux target ops object.  */
2975 
2976 linux_process_target *the_linux_target = &the_x86_target;
2977 
2978 void
2979 initialize_low_arch (void)
2980 {
2981   /* Initialize the Linux target descriptions.  */
2982 #ifdef __x86_64__
2983   tdesc_amd64_linux_no_xml = allocate_target_description ();
2984   copy_target_description (tdesc_amd64_linux_no_xml.get (),
2985 			   amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2986 							 false));
2987   tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2988 #endif
2989 
2990   tdesc_i386_linux_no_xml = allocate_target_description ();
2991   copy_target_description (tdesc_i386_linux_no_xml.get (),
2992 			   i386_linux_read_description (X86_XSTATE_SSE_MASK));
2993   tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2994 
2995   initialize_regsets_info (&x86_regsets_info);
2996 }
2997