xref: /netbsd-src/external/gpl3/gdb/dist/gdbserver/linux-x86-low.cc (revision dd3ee07da436799d8de85f3055253118b76bf345)
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2    for GDB.
3    Copyright (C) 2002-2020 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29 
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33 
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36    gdb_proc_service.h.  */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40 
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49 
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54 
55 
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58 
59 /* Backward compatibility for gdb without XML support.  */
60 
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65 
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72 
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76 
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80 
81 /* This definition comes from prctl.h, but some kernels may not have it.  */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL      30
84 #endif
85 
86 /* The following definitions come from prctl.h, but may be absent
87    for certain configurations.  */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94 
95 /* Linux target op definitions for the x86 architecture.
96    This is initialized assuming an amd64 target.
97    'low_arch_setup' will correct it for i386 or amd64 targets.  */
98 
99 class x86_target : public linux_process_target
100 {
101 public:
102 
103   const regs_info *get_regs_info () override;
104 
105   const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106 
107   bool supports_z_point_type (char z_type) override;
108 
109   void process_qsupported (gdb::array_view<const char * const> features) override;
110 
111   bool supports_tracepoints () override;
112 
113   bool supports_fast_tracepoints () override;
114 
115   int install_fast_tracepoint_jump_pad
116     (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117      CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118      CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119      unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120      CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121      char *err) override;
122 
123   int get_min_fast_tracepoint_insn_len () override;
124 
125   struct emit_ops *emit_ops () override;
126 
127   int get_ipa_tdesc_idx () override;
128 
129 protected:
130 
131   void low_arch_setup () override;
132 
133   bool low_cannot_fetch_register (int regno) override;
134 
135   bool low_cannot_store_register (int regno) override;
136 
137   bool low_supports_breakpoints () override;
138 
139   CORE_ADDR low_get_pc (regcache *regcache) override;
140 
141   void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142 
143   int low_decr_pc_after_break () override;
144 
145   bool low_breakpoint_at (CORE_ADDR pc) override;
146 
147   int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 			int size, raw_breakpoint *bp) override;
149 
150   int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 			int size, raw_breakpoint *bp) override;
152 
153   bool low_stopped_by_watchpoint () override;
154 
155   CORE_ADDR low_stopped_data_address () override;
156 
157   /* collect_ptrace_register/supply_ptrace_register are not needed in the
158      native i386 case (no registers smaller than an xfer unit), and are not
159      used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
160 
161   /* Need to fix up i386 siginfo if host is amd64.  */
162   bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 			  int direction) override;
164 
165   arch_process_info *low_new_process () override;
166 
167   void low_delete_process (arch_process_info *info) override;
168 
169   void low_new_thread (lwp_info *) override;
170 
171   void low_delete_thread (arch_lwp_info *) override;
172 
173   void low_new_fork (process_info *parent, process_info *child) override;
174 
175   void low_prepare_to_resume (lwp_info *lwp) override;
176 
177   int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178 
179   bool low_supports_range_stepping () override;
180 
181   bool low_supports_catch_syscall () override;
182 
183   void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184 
185 private:
186 
187   /* Update all the target description of all processes; a new GDB
188      connected, and it may or not support xml target descriptions.  */
189   void update_xmltarget ();
190 };
191 
192 /* The singleton target ops object.  */
193 
194 static x86_target the_x86_target;
195 
196 /* Per-process arch-specific data we want to keep.  */
197 
198 struct arch_process_info
199 {
200   struct x86_debug_reg_state debug_reg_state;
201 };
202 
203 #ifdef __x86_64__
204 
205 /* Mapping between the general-purpose registers in `struct user'
206    format and GDB's register array layout.
207    Note that the transfer layout uses 64-bit regs.  */
208 static /*const*/ int i386_regmap[] =
209 {
210   RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211   RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213   DS * 8, ES * 8, FS * 8, GS * 8
214 };
215 
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217 
218 /* So code below doesn't have to care, i386 or amd64.  */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221 
222 static const int x86_64_regmap[] =
223 {
224   RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225   RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226   R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227   R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229   DS * 8, ES * 8, FS * 8, GS * 8,
230   -1, -1, -1, -1, -1, -1, -1, -1,
231   -1, -1, -1, -1, -1, -1, -1, -1,
232   -1, -1, -1, -1, -1, -1, -1, -1,
233   -1,
234   -1, -1, -1, -1, -1, -1, -1, -1,
235   ORIG_RAX * 8,
236   21 * 8,  22 * 8,
237   -1, -1, -1, -1,			/* MPX registers BND0 ... BND3.  */
238   -1, -1,				/* MPX registers BNDCFGU, BNDSTATUS.  */
239   -1, -1, -1, -1, -1, -1, -1, -1,       /* xmm16 ... xmm31 (AVX512)  */
240   -1, -1, -1, -1, -1, -1, -1, -1,
241   -1, -1, -1, -1, -1, -1, -1, -1,       /* ymm16 ... ymm31 (AVX512)  */
242   -1, -1, -1, -1, -1, -1, -1, -1,
243   -1, -1, -1, -1, -1, -1, -1, -1,       /* k0 ... k7 (AVX512)  */
244   -1, -1, -1, -1, -1, -1, -1, -1,       /* zmm0 ... zmm31 (AVX512)  */
245   -1, -1, -1, -1, -1, -1, -1, -1,
246   -1, -1, -1, -1, -1, -1, -1, -1,
247   -1, -1, -1, -1, -1, -1, -1, -1,
248   -1					/* pkru  */
249 };
250 
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
253 
254 #else /* ! __x86_64__ */
255 
256 /* Mapping between the general-purpose registers in `struct user'
257    format and GDB's register array layout.  */
258 static /*const*/ int i386_regmap[] =
259 {
260   EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261   UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262   EIP * 4, EFL * 4, CS * 4, SS * 4,
263   DS * 4, ES * 4, FS * 4, GS * 4
264 };
265 
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267 
268 #define REGSIZE 4
269 
270 #endif
271 
272 #ifdef __x86_64__
273 
274 /* Returns true if the current inferior belongs to a x86-64 process,
275    per the tdesc.  */
276 
277 static int
278 is_64bit_tdesc (void)
279 {
280   struct regcache *regcache = get_thread_regcache (current_thread, 0);
281 
282   return register_size (regcache->tdesc, 0) == 8;
283 }
284 
285 #endif
286 
287 
288 /* Called by libthread_db.  */
289 
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 		    lwpid_t lwpid, int idx, void **base)
293 {
294 #ifdef __x86_64__
295   int use_64bit = is_64bit_tdesc ();
296 
297   if (use_64bit)
298     {
299       switch (idx)
300 	{
301 	case FS:
302 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 	    return PS_OK;
304 	  break;
305 	case GS:
306 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 	    return PS_OK;
308 	  break;
309 	default:
310 	  return PS_BADADDR;
311 	}
312       return PS_ERR;
313     }
314 #endif
315 
316   {
317     unsigned int desc[4];
318 
319     if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 		(void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321       return PS_ERR;
322 
323     /* Ensure we properly extend the value to 64-bits for x86_64.  */
324     *base = (void *) (uintptr_t) desc[1];
325     return PS_OK;
326   }
327 }
328 
329 /* Get the thread area address.  This is used to recognize which
330    thread is which when tracing with the in-process agent library.  We
331    don't read anything from the address, and treat it as opaque; it's
332    the address itself that we assume is unique per-thread.  */
333 
334 int
335 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
336 {
337 #ifdef __x86_64__
338   int use_64bit = is_64bit_tdesc ();
339 
340   if (use_64bit)
341     {
342       void *base;
343       if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 	{
345 	  *addr = (CORE_ADDR) (uintptr_t) base;
346 	  return 0;
347 	}
348 
349       return -1;
350     }
351 #endif
352 
353   {
354     struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
355     struct thread_info *thr = get_lwp_thread (lwp);
356     struct regcache *regcache = get_thread_regcache (thr, 1);
357     unsigned int desc[4];
358     ULONGEST gs = 0;
359     const int reg_thread_area = 3; /* bits to scale down register value.  */
360     int idx;
361 
362     collect_register_by_name (regcache, "gs", &gs);
363 
364     idx = gs >> reg_thread_area;
365 
366     if (ptrace (PTRACE_GET_THREAD_AREA,
367 		lwpid_of (thr),
368 		(void *) (long) idx, (unsigned long) &desc) < 0)
369       return -1;
370 
371     *addr = desc[1];
372     return 0;
373   }
374 }
375 
376 
377 
378 bool
379 x86_target::low_cannot_store_register (int regno)
380 {
381 #ifdef __x86_64__
382   if (is_64bit_tdesc ())
383     return false;
384 #endif
385 
386   return regno >= I386_NUM_REGS;
387 }
388 
389 bool
390 x86_target::low_cannot_fetch_register (int regno)
391 {
392 #ifdef __x86_64__
393   if (is_64bit_tdesc ())
394     return false;
395 #endif
396 
397   return regno >= I386_NUM_REGS;
398 }
399 
400 static void
401 x86_fill_gregset (struct regcache *regcache, void *buf)
402 {
403   int i;
404 
405 #ifdef __x86_64__
406   if (register_size (regcache->tdesc, 0) == 8)
407     {
408       for (i = 0; i < X86_64_NUM_REGS; i++)
409 	if (x86_64_regmap[i] != -1)
410 	  collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
411 
412       return;
413     }
414 
415   /* 32-bit inferior registers need to be zero-extended.
416      Callers would read uninitialized memory otherwise.  */
417   memset (buf, 0x00, X86_64_USER_REGS * 8);
418 #endif
419 
420   for (i = 0; i < I386_NUM_REGS; i++)
421     collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
422 
423   collect_register_by_name (regcache, "orig_eax",
424 			    ((char *) buf) + ORIG_EAX * REGSIZE);
425 
426 #ifdef __x86_64__
427   /* Sign extend EAX value to avoid potential syscall restart
428      problems.
429 
430      See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
431      for a detailed explanation.  */
432   if (register_size (regcache->tdesc, 0) == 4)
433     {
434       void *ptr = ((gdb_byte *) buf
435                    + i386_regmap[find_regno (regcache->tdesc, "eax")]);
436 
437       *(int64_t *) ptr = *(int32_t *) ptr;
438     }
439 #endif
440 }
441 
442 static void
443 x86_store_gregset (struct regcache *regcache, const void *buf)
444 {
445   int i;
446 
447 #ifdef __x86_64__
448   if (register_size (regcache->tdesc, 0) == 8)
449     {
450       for (i = 0; i < X86_64_NUM_REGS; i++)
451 	if (x86_64_regmap[i] != -1)
452 	  supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
453 
454       return;
455     }
456 #endif
457 
458   for (i = 0; i < I386_NUM_REGS; i++)
459     supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
460 
461   supply_register_by_name (regcache, "orig_eax",
462 			   ((char *) buf) + ORIG_EAX * REGSIZE);
463 }
464 
465 static void
466 x86_fill_fpregset (struct regcache *regcache, void *buf)
467 {
468 #ifdef __x86_64__
469   i387_cache_to_fxsave (regcache, buf);
470 #else
471   i387_cache_to_fsave (regcache, buf);
472 #endif
473 }
474 
475 static void
476 x86_store_fpregset (struct regcache *regcache, const void *buf)
477 {
478 #ifdef __x86_64__
479   i387_fxsave_to_cache (regcache, buf);
480 #else
481   i387_fsave_to_cache (regcache, buf);
482 #endif
483 }
484 
485 #ifndef __x86_64__
486 
487 static void
488 x86_fill_fpxregset (struct regcache *regcache, void *buf)
489 {
490   i387_cache_to_fxsave (regcache, buf);
491 }
492 
493 static void
494 x86_store_fpxregset (struct regcache *regcache, const void *buf)
495 {
496   i387_fxsave_to_cache (regcache, buf);
497 }
498 
499 #endif
500 
501 static void
502 x86_fill_xstateregset (struct regcache *regcache, void *buf)
503 {
504   i387_cache_to_xsave (regcache, buf);
505 }
506 
507 static void
508 x86_store_xstateregset (struct regcache *regcache, const void *buf)
509 {
510   i387_xsave_to_cache (regcache, buf);
511 }
512 
513 /* ??? The non-biarch i386 case stores all the i387 regs twice.
514    Once in i387_.*fsave.* and once in i387_.*fxsave.*.
515    This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
516    doesn't work.  IWBN to avoid the duplication in the case where it
517    does work.  Maybe the arch_setup routine could check whether it works
518    and update the supported regsets accordingly.  */
519 
520 static struct regset_info x86_regsets[] =
521 {
522 #ifdef HAVE_PTRACE_GETREGS
523   { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
524     GENERAL_REGS,
525     x86_fill_gregset, x86_store_gregset },
526   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
527     EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
528 # ifndef __x86_64__
529 #  ifdef HAVE_PTRACE_GETFPXREGS
530   { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
531     EXTENDED_REGS,
532     x86_fill_fpxregset, x86_store_fpxregset },
533 #  endif
534 # endif
535   { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
536     FP_REGS,
537     x86_fill_fpregset, x86_store_fpregset },
538 #endif /* HAVE_PTRACE_GETREGS */
539   NULL_REGSET
540 };
541 
542 bool
543 x86_target::low_supports_breakpoints ()
544 {
545   return true;
546 }
547 
548 CORE_ADDR
549 x86_target::low_get_pc (regcache *regcache)
550 {
551   int use_64bit = register_size (regcache->tdesc, 0) == 8;
552 
553   if (use_64bit)
554     {
555       uint64_t pc;
556 
557       collect_register_by_name (regcache, "rip", &pc);
558       return (CORE_ADDR) pc;
559     }
560   else
561     {
562       uint32_t pc;
563 
564       collect_register_by_name (regcache, "eip", &pc);
565       return (CORE_ADDR) pc;
566     }
567 }
568 
569 void
570 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
571 {
572   int use_64bit = register_size (regcache->tdesc, 0) == 8;
573 
574   if (use_64bit)
575     {
576       uint64_t newpc = pc;
577 
578       supply_register_by_name (regcache, "rip", &newpc);
579     }
580   else
581     {
582       uint32_t newpc = pc;
583 
584       supply_register_by_name (regcache, "eip", &newpc);
585     }
586 }
587 
588 int
589 x86_target::low_decr_pc_after_break ()
590 {
591   return 1;
592 }
593 
594 
595 static const gdb_byte x86_breakpoint[] = { 0xCC };
596 #define x86_breakpoint_len 1
597 
598 bool
599 x86_target::low_breakpoint_at (CORE_ADDR pc)
600 {
601   unsigned char c;
602 
603   read_memory (pc, &c, 1);
604   if (c == 0xCC)
605     return true;
606 
607   return false;
608 }
609 
610 /* Low-level function vector.  */
611 struct x86_dr_low_type x86_dr_low =
612   {
613     x86_linux_dr_set_control,
614     x86_linux_dr_set_addr,
615     x86_linux_dr_get_addr,
616     x86_linux_dr_get_status,
617     x86_linux_dr_get_control,
618     sizeof (void *),
619   };
620 
621 /* Breakpoint/Watchpoint support.  */
622 
623 bool
624 x86_target::supports_z_point_type (char z_type)
625 {
626   switch (z_type)
627     {
628     case Z_PACKET_SW_BP:
629     case Z_PACKET_HW_BP:
630     case Z_PACKET_WRITE_WP:
631     case Z_PACKET_ACCESS_WP:
632       return true;
633     default:
634       return false;
635     }
636 }
637 
638 int
639 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
640 			      int size, raw_breakpoint *bp)
641 {
642   struct process_info *proc = current_process ();
643 
644   switch (type)
645     {
646     case raw_bkpt_type_hw:
647     case raw_bkpt_type_write_wp:
648     case raw_bkpt_type_access_wp:
649       {
650 	enum target_hw_bp_type hw_type
651 	  = raw_bkpt_type_to_target_hw_bp_type (type);
652 	struct x86_debug_reg_state *state
653 	  = &proc->priv->arch_private->debug_reg_state;
654 
655 	return x86_dr_insert_watchpoint (state, hw_type, addr, size);
656       }
657 
658     default:
659       /* Unsupported.  */
660       return 1;
661     }
662 }
663 
664 int
665 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
666 			      int size, raw_breakpoint *bp)
667 {
668   struct process_info *proc = current_process ();
669 
670   switch (type)
671     {
672     case raw_bkpt_type_hw:
673     case raw_bkpt_type_write_wp:
674     case raw_bkpt_type_access_wp:
675       {
676 	enum target_hw_bp_type hw_type
677 	  = raw_bkpt_type_to_target_hw_bp_type (type);
678 	struct x86_debug_reg_state *state
679 	  = &proc->priv->arch_private->debug_reg_state;
680 
681 	return x86_dr_remove_watchpoint (state, hw_type, addr, size);
682       }
683     default:
684       /* Unsupported.  */
685       return 1;
686     }
687 }
688 
689 bool
690 x86_target::low_stopped_by_watchpoint ()
691 {
692   struct process_info *proc = current_process ();
693   return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
694 }
695 
696 CORE_ADDR
697 x86_target::low_stopped_data_address ()
698 {
699   struct process_info *proc = current_process ();
700   CORE_ADDR addr;
701   if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
702 				   &addr))
703     return addr;
704   return 0;
705 }
706 
707 /* Called when a new process is created.  */
708 
709 arch_process_info *
710 x86_target::low_new_process ()
711 {
712   struct arch_process_info *info = XCNEW (struct arch_process_info);
713 
714   x86_low_init_dregs (&info->debug_reg_state);
715 
716   return info;
717 }
718 
719 /* Called when a process is being deleted.  */
720 
721 void
722 x86_target::low_delete_process (arch_process_info *info)
723 {
724   xfree (info);
725 }
726 
727 void
728 x86_target::low_new_thread (lwp_info *lwp)
729 {
730   /* This comes from nat/.  */
731   x86_linux_new_thread (lwp);
732 }
733 
734 void
735 x86_target::low_delete_thread (arch_lwp_info *alwp)
736 {
737   /* This comes from nat/.  */
738   x86_linux_delete_thread (alwp);
739 }
740 
741 /* Target routine for new_fork.  */
742 
743 void
744 x86_target::low_new_fork (process_info *parent, process_info *child)
745 {
746   /* These are allocated by linux_add_process.  */
747   gdb_assert (parent->priv != NULL
748 	      && parent->priv->arch_private != NULL);
749   gdb_assert (child->priv != NULL
750 	      && child->priv->arch_private != NULL);
751 
752   /* Linux kernel before 2.6.33 commit
753      72f674d203cd230426437cdcf7dd6f681dad8b0d
754      will inherit hardware debug registers from parent
755      on fork/vfork/clone.  Newer Linux kernels create such tasks with
756      zeroed debug registers.
757 
758      GDB core assumes the child inherits the watchpoints/hw
759      breakpoints of the parent, and will remove them all from the
760      forked off process.  Copy the debug registers mirrors into the
761      new process so that all breakpoints and watchpoints can be
762      removed together.  The debug registers mirror will become zeroed
763      in the end before detaching the forked off process, thus making
764      this compatible with older Linux kernels too.  */
765 
766   *child->priv->arch_private = *parent->priv->arch_private;
767 }
768 
769 void
770 x86_target::low_prepare_to_resume (lwp_info *lwp)
771 {
772   /* This comes from nat/.  */
773   x86_linux_prepare_to_resume (lwp);
774 }
775 
776 /* See nat/x86-dregs.h.  */
777 
778 struct x86_debug_reg_state *
779 x86_debug_reg_state (pid_t pid)
780 {
781   struct process_info *proc = find_process_pid (pid);
782 
783   return &proc->priv->arch_private->debug_reg_state;
784 }
785 
786 /* When GDBSERVER is built as a 64-bit application on linux, the
787    PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
788    debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
789    as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
790    conversion in-place ourselves.  */
791 
792 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
793    layout of the inferiors' architecture.  Returns true if any
794    conversion was done; false otherwise.  If DIRECTION is 1, then copy
795    from INF to PTRACE.  If DIRECTION is 0, copy from PTRACE to
796    INF.  */
797 
798 bool
799 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
800 {
801 #ifdef __x86_64__
802   unsigned int machine;
803   int tid = lwpid_of (current_thread);
804   int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
805 
806   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
807   if (!is_64bit_tdesc ())
808       return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
809 					       FIXUP_32);
810   /* No fixup for native x32 GDB.  */
811   else if (!is_elf64 && sizeof (void *) == 8)
812     return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
813 					     FIXUP_X32);
814 #endif
815 
816   return false;
817 }
818 
819 static int use_xml;
820 
821 /* Format of XSAVE extended state is:
822 	struct
823 	{
824 	  fxsave_bytes[0..463]
825 	  sw_usable_bytes[464..511]
826 	  xstate_hdr_bytes[512..575]
827 	  avx_bytes[576..831]
828 	  future_state etc
829 	};
830 
831   Same memory layout will be used for the coredump NT_X86_XSTATE
832   representing the XSAVE extended state registers.
833 
834   The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
835   extended state mask, which is the same as the extended control register
836   0 (the XFEATURE_ENABLED_MASK register), XCR0.  We can use this mask
837   together with the mask saved in the xstate_hdr_bytes to determine what
838   states the processor/OS supports and what state, used or initialized,
839   the process/thread is in.  */
840 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
841 
842 /* Does the current host support the GETFPXREGS request?  The header
843    file may or may not define it, and even if it is defined, the
844    kernel will return EIO if it's running on a pre-SSE processor.  */
845 int have_ptrace_getfpxregs =
846 #ifdef HAVE_PTRACE_GETFPXREGS
847   -1
848 #else
849   0
850 #endif
851 ;
852 
853 /* Get Linux/x86 target description from running target.  */
854 
855 static const struct target_desc *
856 x86_linux_read_description (void)
857 {
858   unsigned int machine;
859   int is_elf64;
860   int xcr0_features;
861   int tid;
862   static uint64_t xcr0;
863   struct regset_info *regset;
864 
865   tid = lwpid_of (current_thread);
866 
867   is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
868 
869   if (sizeof (void *) == 4)
870     {
871       if (is_elf64 > 0)
872        error (_("Can't debug 64-bit process with 32-bit GDBserver"));
873 #ifndef __x86_64__
874       else if (machine == EM_X86_64)
875        error (_("Can't debug x86-64 process with 32-bit GDBserver"));
876 #endif
877     }
878 
879 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
880   if (machine == EM_386 && have_ptrace_getfpxregs == -1)
881     {
882       elf_fpxregset_t fpxregs;
883 
884       if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
885 	{
886 	  have_ptrace_getfpxregs = 0;
887 	  have_ptrace_getregset = 0;
888 	  return i386_linux_read_description (X86_XSTATE_X87);
889 	}
890       else
891 	have_ptrace_getfpxregs = 1;
892     }
893 #endif
894 
895   if (!use_xml)
896     {
897       x86_xcr0 = X86_XSTATE_SSE_MASK;
898 
899       /* Don't use XML.  */
900 #ifdef __x86_64__
901       if (machine == EM_X86_64)
902 	return tdesc_amd64_linux_no_xml;
903       else
904 #endif
905 	return tdesc_i386_linux_no_xml;
906     }
907 
908   if (have_ptrace_getregset == -1)
909     {
910       uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
911       struct iovec iov;
912 
913       iov.iov_base = xstateregs;
914       iov.iov_len = sizeof (xstateregs);
915 
916       /* Check if PTRACE_GETREGSET works.  */
917       if (ptrace (PTRACE_GETREGSET, tid,
918 		  (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
919 	have_ptrace_getregset = 0;
920       else
921 	{
922 	  have_ptrace_getregset = 1;
923 
924 	  /* Get XCR0 from XSAVE extended state.  */
925 	  xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
926 			     / sizeof (uint64_t))];
927 
928 	  /* Use PTRACE_GETREGSET if it is available.  */
929 	  for (regset = x86_regsets;
930 	       regset->fill_function != NULL; regset++)
931 	    if (regset->get_request == PTRACE_GETREGSET)
932 	      regset->size = X86_XSTATE_SIZE (xcr0);
933 	    else if (regset->type != GENERAL_REGS)
934 	      regset->size = 0;
935 	}
936     }
937 
938   /* Check the native XCR0 only if PTRACE_GETREGSET is available.  */
939   xcr0_features = (have_ptrace_getregset
940 		   && (xcr0 & X86_XSTATE_ALL_MASK));
941 
942   if (xcr0_features)
943     x86_xcr0 = xcr0;
944 
945   if (machine == EM_X86_64)
946     {
947 #ifdef __x86_64__
948       const target_desc *tdesc = NULL;
949 
950       if (xcr0_features)
951 	{
952 	  tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
953 						!is_elf64);
954 	}
955 
956       if (tdesc == NULL)
957 	tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
958       return tdesc;
959 #endif
960     }
961   else
962     {
963       const target_desc *tdesc = NULL;
964 
965       if (xcr0_features)
966 	  tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
967 
968       if (tdesc == NULL)
969 	tdesc = i386_linux_read_description (X86_XSTATE_SSE);
970 
971       return tdesc;
972     }
973 
974   gdb_assert_not_reached ("failed to return tdesc");
975 }
976 
977 /* Update all the target description of all processes; a new GDB
978    connected, and it may or not support xml target descriptions.  */
979 
980 void
981 x86_target::update_xmltarget ()
982 {
983   struct thread_info *saved_thread = current_thread;
984 
985   /* Before changing the register cache's internal layout, flush the
986      contents of the current valid caches back to the threads, and
987      release the current regcache objects.  */
988   regcache_release ();
989 
990   for_each_process ([this] (process_info *proc) {
991     int pid = proc->pid;
992 
993     /* Look up any thread of this process.  */
994     current_thread = find_any_thread_of_pid (pid);
995 
996     low_arch_setup ();
997   });
998 
999   current_thread = saved_thread;
1000 }
1001 
1002 /* Process qSupported query, "xmlRegisters=".  Update the buffer size for
1003    PTRACE_GETREGSET.  */
1004 
1005 void
1006 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1007 {
1008   /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
1009      with "i386" in qSupported query, it supports x86 XML target
1010      descriptions.  */
1011   use_xml = 0;
1012 
1013   for (const char *feature : features)
1014     {
1015       if (startswith (feature, "xmlRegisters="))
1016 	{
1017 	  char *copy = xstrdup (feature + 13);
1018 
1019 	  char *saveptr;
1020 	  for (char *p = strtok_r (copy, ",", &saveptr);
1021 	       p != NULL;
1022 	       p = strtok_r (NULL, ",", &saveptr))
1023 	    {
1024 	      if (strcmp (p, "i386") == 0)
1025 		{
1026 		  use_xml = 1;
1027 		  break;
1028 		}
1029 	    }
1030 
1031 	  free (copy);
1032 	}
1033     }
1034 
1035   update_xmltarget ();
1036 }
1037 
1038 /* Common for x86/x86-64.  */
1039 
1040 static struct regsets_info x86_regsets_info =
1041   {
1042     x86_regsets, /* regsets */
1043     0, /* num_regsets */
1044     NULL, /* disabled_regsets */
1045   };
1046 
1047 #ifdef __x86_64__
1048 static struct regs_info amd64_linux_regs_info =
1049   {
1050     NULL, /* regset_bitmap */
1051     NULL, /* usrregs_info */
1052     &x86_regsets_info
1053   };
1054 #endif
1055 static struct usrregs_info i386_linux_usrregs_info =
1056   {
1057     I386_NUM_REGS,
1058     i386_regmap,
1059   };
1060 
1061 static struct regs_info i386_linux_regs_info =
1062   {
1063     NULL, /* regset_bitmap */
1064     &i386_linux_usrregs_info,
1065     &x86_regsets_info
1066   };
1067 
1068 const regs_info *
1069 x86_target::get_regs_info ()
1070 {
1071 #ifdef __x86_64__
1072   if (is_64bit_tdesc ())
1073     return &amd64_linux_regs_info;
1074   else
1075 #endif
1076     return &i386_linux_regs_info;
1077 }
1078 
1079 /* Initialize the target description for the architecture of the
1080    inferior.  */
1081 
1082 void
1083 x86_target::low_arch_setup ()
1084 {
1085   current_process ()->tdesc = x86_linux_read_description ();
1086 }
1087 
1088 bool
1089 x86_target::low_supports_catch_syscall ()
1090 {
1091   return true;
1092 }
1093 
1094 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1095    code.  This should only be called if LWP got a SYSCALL_SIGTRAP.  */
1096 
1097 void
1098 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1099 {
1100   int use_64bit = register_size (regcache->tdesc, 0) == 8;
1101 
1102   if (use_64bit)
1103     {
1104       long l_sysno;
1105 
1106       collect_register_by_name (regcache, "orig_rax", &l_sysno);
1107       *sysno = (int) l_sysno;
1108     }
1109   else
1110     collect_register_by_name (regcache, "orig_eax", sysno);
1111 }
1112 
1113 bool
1114 x86_target::supports_tracepoints ()
1115 {
1116   return true;
1117 }
1118 
1119 static void
1120 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1121 {
1122   target_write_memory (*to, buf, len);
1123   *to += len;
1124 }
1125 
1126 static int
1127 push_opcode (unsigned char *buf, const char *op)
1128 {
1129   unsigned char *buf_org = buf;
1130 
1131   while (1)
1132     {
1133       char *endptr;
1134       unsigned long ul = strtoul (op, &endptr, 16);
1135 
1136       if (endptr == op)
1137 	break;
1138 
1139       *buf++ = ul;
1140       op = endptr;
1141     }
1142 
1143   return buf - buf_org;
1144 }
1145 
1146 #ifdef __x86_64__
1147 
1148 /* Build a jump pad that saves registers and calls a collection
1149    function.  Writes a jump instruction to the jump pad to
1150    JJUMPAD_INSN.  The caller is responsible to write it in at the
1151    tracepoint address.  */
1152 
1153 static int
1154 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1155 					CORE_ADDR collector,
1156 					CORE_ADDR lockaddr,
1157 					ULONGEST orig_size,
1158 					CORE_ADDR *jump_entry,
1159 					CORE_ADDR *trampoline,
1160 					ULONGEST *trampoline_size,
1161 					unsigned char *jjump_pad_insn,
1162 					ULONGEST *jjump_pad_insn_size,
1163 					CORE_ADDR *adjusted_insn_addr,
1164 					CORE_ADDR *adjusted_insn_addr_end,
1165 					char *err)
1166 {
1167   unsigned char buf[40];
1168   int i, offset;
1169   int64_t loffset;
1170 
1171   CORE_ADDR buildaddr = *jump_entry;
1172 
1173   /* Build the jump pad.  */
1174 
1175   /* First, do tracepoint data collection.  Save registers.  */
1176   i = 0;
1177   /* Need to ensure stack pointer saved first.  */
1178   buf[i++] = 0x54; /* push %rsp */
1179   buf[i++] = 0x55; /* push %rbp */
1180   buf[i++] = 0x57; /* push %rdi */
1181   buf[i++] = 0x56; /* push %rsi */
1182   buf[i++] = 0x52; /* push %rdx */
1183   buf[i++] = 0x51; /* push %rcx */
1184   buf[i++] = 0x53; /* push %rbx */
1185   buf[i++] = 0x50; /* push %rax */
1186   buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1187   buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1188   buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1189   buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1190   buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1191   buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1192   buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1193   buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1194   buf[i++] = 0x9c; /* pushfq */
1195   buf[i++] = 0x48; /* movabs <addr>,%rdi */
1196   buf[i++] = 0xbf;
1197   memcpy (buf + i, &tpaddr, 8);
1198   i += 8;
1199   buf[i++] = 0x57; /* push %rdi */
1200   append_insns (&buildaddr, i, buf);
1201 
1202   /* Stack space for the collecting_t object.  */
1203   i = 0;
1204   i += push_opcode (&buf[i], "48 83 ec 18");	/* sub $0x18,%rsp */
1205   i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
1206   memcpy (buf + i, &tpoint, 8);
1207   i += 8;
1208   i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
1209   i += push_opcode (&buf[i],
1210 		    "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1211   i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1212   append_insns (&buildaddr, i, buf);
1213 
1214   /* spin-lock.  */
1215   i = 0;
1216   i += push_opcode (&buf[i], "48 be");		/* movl <lockaddr>,%rsi */
1217   memcpy (&buf[i], (void *) &lockaddr, 8);
1218   i += 8;
1219   i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
1220   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1221   i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1222   i += push_opcode (&buf[i], "48 85 c0");	/* test %rax,%rax */
1223   i += push_opcode (&buf[i], "75 f4");		/* jne <again> */
1224   append_insns (&buildaddr, i, buf);
1225 
1226   /* Set up the gdb_collect call.  */
1227   /* At this point, (stack pointer + 0x18) is the base of our saved
1228      register block.  */
1229 
1230   i = 0;
1231   i += push_opcode (&buf[i], "48 89 e6");	/* mov %rsp,%rsi */
1232   i += push_opcode (&buf[i], "48 83 c6 18");	/* add $0x18,%rsi */
1233 
1234   /* tpoint address may be 64-bit wide.  */
1235   i += push_opcode (&buf[i], "48 bf");		/* movl <addr>,%rdi */
1236   memcpy (buf + i, &tpoint, 8);
1237   i += 8;
1238   append_insns (&buildaddr, i, buf);
1239 
1240   /* The collector function being in the shared library, may be
1241      >31-bits away off the jump pad.  */
1242   i = 0;
1243   i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
1244   memcpy (buf + i, &collector, 8);
1245   i += 8;
1246   i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
1247   append_insns (&buildaddr, i, buf);
1248 
1249   /* Clear the spin-lock.  */
1250   i = 0;
1251   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1252   i += push_opcode (&buf[i], "48 a3");		/* mov %rax, lockaddr */
1253   memcpy (buf + i, &lockaddr, 8);
1254   i += 8;
1255   append_insns (&buildaddr, i, buf);
1256 
1257   /* Remove stack that had been used for the collect_t object.  */
1258   i = 0;
1259   i += push_opcode (&buf[i], "48 83 c4 18");	/* add $0x18,%rsp */
1260   append_insns (&buildaddr, i, buf);
1261 
1262   /* Restore register state.  */
1263   i = 0;
1264   buf[i++] = 0x48; /* add $0x8,%rsp */
1265   buf[i++] = 0x83;
1266   buf[i++] = 0xc4;
1267   buf[i++] = 0x08;
1268   buf[i++] = 0x9d; /* popfq */
1269   buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1270   buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1271   buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1272   buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1273   buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1274   buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1275   buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1276   buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1277   buf[i++] = 0x58; /* pop %rax */
1278   buf[i++] = 0x5b; /* pop %rbx */
1279   buf[i++] = 0x59; /* pop %rcx */
1280   buf[i++] = 0x5a; /* pop %rdx */
1281   buf[i++] = 0x5e; /* pop %rsi */
1282   buf[i++] = 0x5f; /* pop %rdi */
1283   buf[i++] = 0x5d; /* pop %rbp */
1284   buf[i++] = 0x5c; /* pop %rsp */
1285   append_insns (&buildaddr, i, buf);
1286 
1287   /* Now, adjust the original instruction to execute in the jump
1288      pad.  */
1289   *adjusted_insn_addr = buildaddr;
1290   relocate_instruction (&buildaddr, tpaddr);
1291   *adjusted_insn_addr_end = buildaddr;
1292 
1293   /* Finally, write a jump back to the program.  */
1294 
1295   loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1296   if (loffset > INT_MAX || loffset < INT_MIN)
1297     {
1298       sprintf (err,
1299 	       "E.Jump back from jump pad too far from tracepoint "
1300 	       "(offset 0x%" PRIx64 " > int32).", loffset);
1301       return 1;
1302     }
1303 
1304   offset = (int) loffset;
1305   memcpy (buf, jump_insn, sizeof (jump_insn));
1306   memcpy (buf + 1, &offset, 4);
1307   append_insns (&buildaddr, sizeof (jump_insn), buf);
1308 
1309   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1310      is always done last (by our caller actually), so that we can
1311      install fast tracepoints with threads running.  This relies on
1312      the agent's atomic write support.  */
1313   loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1314   if (loffset > INT_MAX || loffset < INT_MIN)
1315     {
1316       sprintf (err,
1317 	       "E.Jump pad too far from tracepoint "
1318 	       "(offset 0x%" PRIx64 " > int32).", loffset);
1319       return 1;
1320     }
1321 
1322   offset = (int) loffset;
1323 
1324   memcpy (buf, jump_insn, sizeof (jump_insn));
1325   memcpy (buf + 1, &offset, 4);
1326   memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1327   *jjump_pad_insn_size = sizeof (jump_insn);
1328 
1329   /* Return the end address of our pad.  */
1330   *jump_entry = buildaddr;
1331 
1332   return 0;
1333 }
1334 
1335 #endif /* __x86_64__ */
1336 
1337 /* Build a jump pad that saves registers and calls a collection
1338    function.  Writes a jump instruction to the jump pad to
1339    JJUMPAD_INSN.  The caller is responsible to write it in at the
1340    tracepoint address.  */
1341 
1342 static int
1343 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1344 				       CORE_ADDR collector,
1345 				       CORE_ADDR lockaddr,
1346 				       ULONGEST orig_size,
1347 				       CORE_ADDR *jump_entry,
1348 				       CORE_ADDR *trampoline,
1349 				       ULONGEST *trampoline_size,
1350 				       unsigned char *jjump_pad_insn,
1351 				       ULONGEST *jjump_pad_insn_size,
1352 				       CORE_ADDR *adjusted_insn_addr,
1353 				       CORE_ADDR *adjusted_insn_addr_end,
1354 				       char *err)
1355 {
1356   unsigned char buf[0x100];
1357   int i, offset;
1358   CORE_ADDR buildaddr = *jump_entry;
1359 
1360   /* Build the jump pad.  */
1361 
1362   /* First, do tracepoint data collection.  Save registers.  */
1363   i = 0;
1364   buf[i++] = 0x60; /* pushad */
1365   buf[i++] = 0x68; /* push tpaddr aka $pc */
1366   *((int *)(buf + i)) = (int) tpaddr;
1367   i += 4;
1368   buf[i++] = 0x9c; /* pushf */
1369   buf[i++] = 0x1e; /* push %ds */
1370   buf[i++] = 0x06; /* push %es */
1371   buf[i++] = 0x0f; /* push %fs */
1372   buf[i++] = 0xa0;
1373   buf[i++] = 0x0f; /* push %gs */
1374   buf[i++] = 0xa8;
1375   buf[i++] = 0x16; /* push %ss */
1376   buf[i++] = 0x0e; /* push %cs */
1377   append_insns (&buildaddr, i, buf);
1378 
1379   /* Stack space for the collecting_t object.  */
1380   i = 0;
1381   i += push_opcode (&buf[i], "83 ec 08");	/* sub    $0x8,%esp */
1382 
1383   /* Build the object.  */
1384   i += push_opcode (&buf[i], "b8");		/* mov    <tpoint>,%eax */
1385   memcpy (buf + i, &tpoint, 4);
1386   i += 4;
1387   i += push_opcode (&buf[i], "89 04 24");	   /* mov %eax,(%esp) */
1388 
1389   i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1390   i += push_opcode (&buf[i], "89 44 24 04");	   /* mov %eax,0x4(%esp) */
1391   append_insns (&buildaddr, i, buf);
1392 
1393   /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
1394      If we cared for it, this could be using xchg alternatively.  */
1395 
1396   i = 0;
1397   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1398   i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
1399 						   %esp,<lockaddr> */
1400   memcpy (&buf[i], (void *) &lockaddr, 4);
1401   i += 4;
1402   i += push_opcode (&buf[i], "85 c0");		/* test %eax,%eax */
1403   i += push_opcode (&buf[i], "75 f2");		/* jne <again> */
1404   append_insns (&buildaddr, i, buf);
1405 
1406 
1407   /* Set up arguments to the gdb_collect call.  */
1408   i = 0;
1409   i += push_opcode (&buf[i], "89 e0");		/* mov %esp,%eax */
1410   i += push_opcode (&buf[i], "83 c0 08");	/* add $0x08,%eax */
1411   i += push_opcode (&buf[i], "89 44 24 fc");	/* mov %eax,-0x4(%esp) */
1412   append_insns (&buildaddr, i, buf);
1413 
1414   i = 0;
1415   i += push_opcode (&buf[i], "83 ec 08");	/* sub $0x8,%esp */
1416   append_insns (&buildaddr, i, buf);
1417 
1418   i = 0;
1419   i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
1420   memcpy (&buf[i], (void *) &tpoint, 4);
1421   i += 4;
1422   append_insns (&buildaddr, i, buf);
1423 
1424   buf[0] = 0xe8; /* call <reladdr> */
1425   offset = collector - (buildaddr + sizeof (jump_insn));
1426   memcpy (buf + 1, &offset, 4);
1427   append_insns (&buildaddr, 5, buf);
1428   /* Clean up after the call.  */
1429   buf[0] = 0x83; /* add $0x8,%esp */
1430   buf[1] = 0xc4;
1431   buf[2] = 0x08;
1432   append_insns (&buildaddr, 3, buf);
1433 
1434 
1435   /* Clear the spin-lock.  This would need the LOCK prefix on older
1436      broken archs.  */
1437   i = 0;
1438   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
1439   i += push_opcode (&buf[i], "a3");		/* mov %eax, lockaddr */
1440   memcpy (buf + i, &lockaddr, 4);
1441   i += 4;
1442   append_insns (&buildaddr, i, buf);
1443 
1444 
1445   /* Remove stack that had been used for the collect_t object.  */
1446   i = 0;
1447   i += push_opcode (&buf[i], "83 c4 08");	/* add $0x08,%esp */
1448   append_insns (&buildaddr, i, buf);
1449 
1450   i = 0;
1451   buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1452   buf[i++] = 0xc4;
1453   buf[i++] = 0x04;
1454   buf[i++] = 0x17; /* pop %ss */
1455   buf[i++] = 0x0f; /* pop %gs */
1456   buf[i++] = 0xa9;
1457   buf[i++] = 0x0f; /* pop %fs */
1458   buf[i++] = 0xa1;
1459   buf[i++] = 0x07; /* pop %es */
1460   buf[i++] = 0x1f; /* pop %ds */
1461   buf[i++] = 0x9d; /* popf */
1462   buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1463   buf[i++] = 0xc4;
1464   buf[i++] = 0x04;
1465   buf[i++] = 0x61; /* popad */
1466   append_insns (&buildaddr, i, buf);
1467 
1468   /* Now, adjust the original instruction to execute in the jump
1469      pad.  */
1470   *adjusted_insn_addr = buildaddr;
1471   relocate_instruction (&buildaddr, tpaddr);
1472   *adjusted_insn_addr_end = buildaddr;
1473 
1474   /* Write the jump back to the program.  */
1475   offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1476   memcpy (buf, jump_insn, sizeof (jump_insn));
1477   memcpy (buf + 1, &offset, 4);
1478   append_insns (&buildaddr, sizeof (jump_insn), buf);
1479 
1480   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1481      is always done last (by our caller actually), so that we can
1482      install fast tracepoints with threads running.  This relies on
1483      the agent's atomic write support.  */
1484   if (orig_size == 4)
1485     {
1486       /* Create a trampoline.  */
1487       *trampoline_size = sizeof (jump_insn);
1488       if (!claim_trampoline_space (*trampoline_size, trampoline))
1489 	{
1490 	  /* No trampoline space available.  */
1491 	  strcpy (err,
1492 		  "E.Cannot allocate trampoline space needed for fast "
1493 		  "tracepoints on 4-byte instructions.");
1494 	  return 1;
1495 	}
1496 
1497       offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1498       memcpy (buf, jump_insn, sizeof (jump_insn));
1499       memcpy (buf + 1, &offset, 4);
1500       target_write_memory (*trampoline, buf, sizeof (jump_insn));
1501 
1502       /* Use a 16-bit relative jump instruction to jump to the trampoline.  */
1503       offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1504       memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1505       memcpy (buf + 2, &offset, 2);
1506       memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1507       *jjump_pad_insn_size = sizeof (small_jump_insn);
1508     }
1509   else
1510     {
1511       /* Else use a 32-bit relative jump instruction.  */
1512       offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1513       memcpy (buf, jump_insn, sizeof (jump_insn));
1514       memcpy (buf + 1, &offset, 4);
1515       memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1516       *jjump_pad_insn_size = sizeof (jump_insn);
1517     }
1518 
1519   /* Return the end address of our pad.  */
1520   *jump_entry = buildaddr;
1521 
1522   return 0;
1523 }
1524 
1525 bool
1526 x86_target::supports_fast_tracepoints ()
1527 {
1528   return true;
1529 }
1530 
1531 int
1532 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1533 					      CORE_ADDR tpaddr,
1534 					      CORE_ADDR collector,
1535 					      CORE_ADDR lockaddr,
1536 					      ULONGEST orig_size,
1537 					      CORE_ADDR *jump_entry,
1538 					      CORE_ADDR *trampoline,
1539 					      ULONGEST *trampoline_size,
1540 					      unsigned char *jjump_pad_insn,
1541 					      ULONGEST *jjump_pad_insn_size,
1542 					      CORE_ADDR *adjusted_insn_addr,
1543 					      CORE_ADDR *adjusted_insn_addr_end,
1544 					      char *err)
1545 {
1546 #ifdef __x86_64__
1547   if (is_64bit_tdesc ())
1548     return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1549 						   collector, lockaddr,
1550 						   orig_size, jump_entry,
1551 						   trampoline, trampoline_size,
1552 						   jjump_pad_insn,
1553 						   jjump_pad_insn_size,
1554 						   adjusted_insn_addr,
1555 						   adjusted_insn_addr_end,
1556 						   err);
1557 #endif
1558 
1559   return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 						collector, lockaddr,
1561 						orig_size, jump_entry,
1562 						trampoline, trampoline_size,
1563 						jjump_pad_insn,
1564 						jjump_pad_insn_size,
1565 						adjusted_insn_addr,
1566 						adjusted_insn_addr_end,
1567 						err);
1568 }
1569 
1570 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1571    architectures.  */
1572 
1573 int
1574 x86_target::get_min_fast_tracepoint_insn_len ()
1575 {
1576   static int warned_about_fast_tracepoints = 0;
1577 
1578 #ifdef __x86_64__
1579   /*  On x86-64, 5-byte jump instructions with a 4-byte offset are always
1580       used for fast tracepoints.  */
1581   if (is_64bit_tdesc ())
1582     return 5;
1583 #endif
1584 
1585   if (agent_loaded_p ())
1586     {
1587       char errbuf[IPA_BUFSIZ];
1588 
1589       errbuf[0] = '\0';
1590 
1591       /* On x86, if trampolines are available, then 4-byte jump instructions
1592 	 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1593 	 with a 4-byte offset are used instead.  */
1594       if (have_fast_tracepoint_trampoline_buffer (errbuf))
1595 	return 4;
1596       else
1597 	{
1598 	  /* GDB has no channel to explain to user why a shorter fast
1599 	     tracepoint is not possible, but at least make GDBserver
1600 	     mention that something has gone awry.  */
1601 	  if (!warned_about_fast_tracepoints)
1602 	    {
1603 	      warning ("4-byte fast tracepoints not available; %s", errbuf);
1604 	      warned_about_fast_tracepoints = 1;
1605 	    }
1606 	  return 5;
1607 	}
1608     }
1609   else
1610     {
1611       /* Indicate that the minimum length is currently unknown since the IPA
1612 	 has not loaded yet.  */
1613       return 0;
1614     }
1615 }
1616 
1617 static void
1618 add_insns (unsigned char *start, int len)
1619 {
1620   CORE_ADDR buildaddr = current_insn_ptr;
1621 
1622   if (debug_threads)
1623     debug_printf ("Adding %d bytes of insn at %s\n",
1624 		  len, paddress (buildaddr));
1625 
1626   append_insns (&buildaddr, len, start);
1627   current_insn_ptr = buildaddr;
1628 }
1629 
1630 /* Our general strategy for emitting code is to avoid specifying raw
1631    bytes whenever possible, and instead copy a block of inline asm
1632    that is embedded in the function.  This is a little messy, because
1633    we need to keep the compiler from discarding what looks like dead
1634    code, plus suppress various warnings.  */
1635 
1636 #define EMIT_ASM(NAME, INSNS)						\
1637   do									\
1638     {									\
1639       extern unsigned char start_ ## NAME, end_ ## NAME;		\
1640       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
1641       __asm__ ("jmp end_" #NAME "\n"					\
1642 	       "\t" "start_" #NAME ":"					\
1643 	       "\t" INSNS "\n"						\
1644 	       "\t" "end_" #NAME ":");					\
1645     } while (0)
1646 
1647 #ifdef __x86_64__
1648 
1649 #define EMIT_ASM32(NAME,INSNS)						\
1650   do									\
1651     {									\
1652       extern unsigned char start_ ## NAME, end_ ## NAME;		\
1653       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
1654       __asm__ (".code32\n"						\
1655 	       "\t" "jmp end_" #NAME "\n"				\
1656 	       "\t" "start_" #NAME ":\n"				\
1657 	       "\t" INSNS "\n"						\
1658 	       "\t" "end_" #NAME ":\n"					\
1659 	       ".code64\n");						\
1660     } while (0)
1661 
1662 #else
1663 
1664 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1665 
1666 #endif
1667 
1668 #ifdef __x86_64__
1669 
1670 static void
1671 amd64_emit_prologue (void)
1672 {
1673   EMIT_ASM (amd64_prologue,
1674 	    "pushq %rbp\n\t"
1675 	    "movq %rsp,%rbp\n\t"
1676 	    "sub $0x20,%rsp\n\t"
1677 	    "movq %rdi,-8(%rbp)\n\t"
1678 	    "movq %rsi,-16(%rbp)");
1679 }
1680 
1681 
1682 static void
1683 amd64_emit_epilogue (void)
1684 {
1685   EMIT_ASM (amd64_epilogue,
1686 	    "movq -16(%rbp),%rdi\n\t"
1687 	    "movq %rax,(%rdi)\n\t"
1688 	    "xor %rax,%rax\n\t"
1689 	    "leave\n\t"
1690 	    "ret");
1691 }
1692 
1693 static void
1694 amd64_emit_add (void)
1695 {
1696   EMIT_ASM (amd64_add,
1697 	    "add (%rsp),%rax\n\t"
1698 	    "lea 0x8(%rsp),%rsp");
1699 }
1700 
1701 static void
1702 amd64_emit_sub (void)
1703 {
1704   EMIT_ASM (amd64_sub,
1705 	    "sub %rax,(%rsp)\n\t"
1706 	    "pop %rax");
1707 }
1708 
1709 static void
1710 amd64_emit_mul (void)
1711 {
1712   emit_error = 1;
1713 }
1714 
1715 static void
1716 amd64_emit_lsh (void)
1717 {
1718   emit_error = 1;
1719 }
1720 
1721 static void
1722 amd64_emit_rsh_signed (void)
1723 {
1724   emit_error = 1;
1725 }
1726 
1727 static void
1728 amd64_emit_rsh_unsigned (void)
1729 {
1730   emit_error = 1;
1731 }
1732 
1733 static void
1734 amd64_emit_ext (int arg)
1735 {
1736   switch (arg)
1737     {
1738     case 8:
1739       EMIT_ASM (amd64_ext_8,
1740 		"cbtw\n\t"
1741 		"cwtl\n\t"
1742 		"cltq");
1743       break;
1744     case 16:
1745       EMIT_ASM (amd64_ext_16,
1746 		"cwtl\n\t"
1747 		"cltq");
1748       break;
1749     case 32:
1750       EMIT_ASM (amd64_ext_32,
1751 		"cltq");
1752       break;
1753     default:
1754       emit_error = 1;
1755     }
1756 }
1757 
1758 static void
1759 amd64_emit_log_not (void)
1760 {
1761   EMIT_ASM (amd64_log_not,
1762 	    "test %rax,%rax\n\t"
1763 	    "sete %cl\n\t"
1764 	    "movzbq %cl,%rax");
1765 }
1766 
1767 static void
1768 amd64_emit_bit_and (void)
1769 {
1770   EMIT_ASM (amd64_and,
1771 	    "and (%rsp),%rax\n\t"
1772 	    "lea 0x8(%rsp),%rsp");
1773 }
1774 
1775 static void
1776 amd64_emit_bit_or (void)
1777 {
1778   EMIT_ASM (amd64_or,
1779 	    "or (%rsp),%rax\n\t"
1780 	    "lea 0x8(%rsp),%rsp");
1781 }
1782 
1783 static void
1784 amd64_emit_bit_xor (void)
1785 {
1786   EMIT_ASM (amd64_xor,
1787 	    "xor (%rsp),%rax\n\t"
1788 	    "lea 0x8(%rsp),%rsp");
1789 }
1790 
1791 static void
1792 amd64_emit_bit_not (void)
1793 {
1794   EMIT_ASM (amd64_bit_not,
1795 	    "xorq $0xffffffffffffffff,%rax");
1796 }
1797 
1798 static void
1799 amd64_emit_equal (void)
1800 {
1801   EMIT_ASM (amd64_equal,
1802 	    "cmp %rax,(%rsp)\n\t"
1803 	    "je .Lamd64_equal_true\n\t"
1804 	    "xor %rax,%rax\n\t"
1805 	    "jmp .Lamd64_equal_end\n\t"
1806 	    ".Lamd64_equal_true:\n\t"
1807 	    "mov $0x1,%rax\n\t"
1808 	    ".Lamd64_equal_end:\n\t"
1809 	    "lea 0x8(%rsp),%rsp");
1810 }
1811 
1812 static void
1813 amd64_emit_less_signed (void)
1814 {
1815   EMIT_ASM (amd64_less_signed,
1816 	    "cmp %rax,(%rsp)\n\t"
1817 	    "jl .Lamd64_less_signed_true\n\t"
1818 	    "xor %rax,%rax\n\t"
1819 	    "jmp .Lamd64_less_signed_end\n\t"
1820 	    ".Lamd64_less_signed_true:\n\t"
1821 	    "mov $1,%rax\n\t"
1822 	    ".Lamd64_less_signed_end:\n\t"
1823 	    "lea 0x8(%rsp),%rsp");
1824 }
1825 
1826 static void
1827 amd64_emit_less_unsigned (void)
1828 {
1829   EMIT_ASM (amd64_less_unsigned,
1830 	    "cmp %rax,(%rsp)\n\t"
1831 	    "jb .Lamd64_less_unsigned_true\n\t"
1832 	    "xor %rax,%rax\n\t"
1833 	    "jmp .Lamd64_less_unsigned_end\n\t"
1834 	    ".Lamd64_less_unsigned_true:\n\t"
1835 	    "mov $1,%rax\n\t"
1836 	    ".Lamd64_less_unsigned_end:\n\t"
1837 	    "lea 0x8(%rsp),%rsp");
1838 }
1839 
1840 static void
1841 amd64_emit_ref (int size)
1842 {
1843   switch (size)
1844     {
1845     case 1:
1846       EMIT_ASM (amd64_ref1,
1847 		"movb (%rax),%al");
1848       break;
1849     case 2:
1850       EMIT_ASM (amd64_ref2,
1851 		"movw (%rax),%ax");
1852       break;
1853     case 4:
1854       EMIT_ASM (amd64_ref4,
1855 		"movl (%rax),%eax");
1856       break;
1857     case 8:
1858       EMIT_ASM (amd64_ref8,
1859 		"movq (%rax),%rax");
1860       break;
1861     }
1862 }
1863 
1864 static void
1865 amd64_emit_if_goto (int *offset_p, int *size_p)
1866 {
1867   EMIT_ASM (amd64_if_goto,
1868 	    "mov %rax,%rcx\n\t"
1869 	    "pop %rax\n\t"
1870 	    "cmp $0,%rcx\n\t"
1871 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1872   if (offset_p)
1873     *offset_p = 10;
1874   if (size_p)
1875     *size_p = 4;
1876 }
1877 
1878 static void
1879 amd64_emit_goto (int *offset_p, int *size_p)
1880 {
1881   EMIT_ASM (amd64_goto,
1882 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1883   if (offset_p)
1884     *offset_p = 1;
1885   if (size_p)
1886     *size_p = 4;
1887 }
1888 
1889 static void
1890 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1891 {
1892   int diff = (to - (from + size));
1893   unsigned char buf[sizeof (int)];
1894 
1895   if (size != 4)
1896     {
1897       emit_error = 1;
1898       return;
1899     }
1900 
1901   memcpy (buf, &diff, sizeof (int));
1902   target_write_memory (from, buf, sizeof (int));
1903 }
1904 
1905 static void
1906 amd64_emit_const (LONGEST num)
1907 {
1908   unsigned char buf[16];
1909   int i;
1910   CORE_ADDR buildaddr = current_insn_ptr;
1911 
1912   i = 0;
1913   buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
1914   memcpy (&buf[i], &num, sizeof (num));
1915   i += 8;
1916   append_insns (&buildaddr, i, buf);
1917   current_insn_ptr = buildaddr;
1918 }
1919 
1920 static void
1921 amd64_emit_call (CORE_ADDR fn)
1922 {
1923   unsigned char buf[16];
1924   int i;
1925   CORE_ADDR buildaddr;
1926   LONGEST offset64;
1927 
1928   /* The destination function being in the shared library, may be
1929      >31-bits away off the compiled code pad.  */
1930 
1931   buildaddr = current_insn_ptr;
1932 
1933   offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1934 
1935   i = 0;
1936 
1937   if (offset64 > INT_MAX || offset64 < INT_MIN)
1938     {
1939       /* Offset is too large for a call.  Use callq, but that requires
1940 	 a register, so avoid it if possible.  Use r10, since it is
1941 	 call-clobbered, we don't have to push/pop it.  */
1942       buf[i++] = 0x48; /* mov $fn,%r10 */
1943       buf[i++] = 0xba;
1944       memcpy (buf + i, &fn, 8);
1945       i += 8;
1946       buf[i++] = 0xff; /* callq *%r10 */
1947       buf[i++] = 0xd2;
1948     }
1949   else
1950     {
1951       int offset32 = offset64; /* we know we can't overflow here.  */
1952 
1953       buf[i++] = 0xe8; /* call <reladdr> */
1954       memcpy (buf + i, &offset32, 4);
1955       i += 4;
1956     }
1957 
1958   append_insns (&buildaddr, i, buf);
1959   current_insn_ptr = buildaddr;
1960 }
1961 
1962 static void
1963 amd64_emit_reg (int reg)
1964 {
1965   unsigned char buf[16];
1966   int i;
1967   CORE_ADDR buildaddr;
1968 
1969   /* Assume raw_regs is still in %rdi.  */
1970   buildaddr = current_insn_ptr;
1971   i = 0;
1972   buf[i++] = 0xbe; /* mov $<n>,%esi */
1973   memcpy (&buf[i], &reg, sizeof (reg));
1974   i += 4;
1975   append_insns (&buildaddr, i, buf);
1976   current_insn_ptr = buildaddr;
1977   amd64_emit_call (get_raw_reg_func_addr ());
1978 }
1979 
1980 static void
1981 amd64_emit_pop (void)
1982 {
1983   EMIT_ASM (amd64_pop,
1984 	    "pop %rax");
1985 }
1986 
1987 static void
1988 amd64_emit_stack_flush (void)
1989 {
1990   EMIT_ASM (amd64_stack_flush,
1991 	    "push %rax");
1992 }
1993 
1994 static void
1995 amd64_emit_zero_ext (int arg)
1996 {
1997   switch (arg)
1998     {
1999     case 8:
2000       EMIT_ASM (amd64_zero_ext_8,
2001 		"and $0xff,%rax");
2002       break;
2003     case 16:
2004       EMIT_ASM (amd64_zero_ext_16,
2005 		"and $0xffff,%rax");
2006       break;
2007     case 32:
2008       EMIT_ASM (amd64_zero_ext_32,
2009 		"mov $0xffffffff,%rcx\n\t"
2010 		"and %rcx,%rax");
2011       break;
2012     default:
2013       emit_error = 1;
2014     }
2015 }
2016 
2017 static void
2018 amd64_emit_swap (void)
2019 {
2020   EMIT_ASM (amd64_swap,
2021 	    "mov %rax,%rcx\n\t"
2022 	    "pop %rax\n\t"
2023 	    "push %rcx");
2024 }
2025 
2026 static void
2027 amd64_emit_stack_adjust (int n)
2028 {
2029   unsigned char buf[16];
2030   int i;
2031   CORE_ADDR buildaddr = current_insn_ptr;
2032 
2033   i = 0;
2034   buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2035   buf[i++] = 0x8d;
2036   buf[i++] = 0x64;
2037   buf[i++] = 0x24;
2038   /* This only handles adjustments up to 16, but we don't expect any more.  */
2039   buf[i++] = n * 8;
2040   append_insns (&buildaddr, i, buf);
2041   current_insn_ptr = buildaddr;
2042 }
2043 
2044 /* FN's prototype is `LONGEST(*fn)(int)'.  */
2045 
2046 static void
2047 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2048 {
2049   unsigned char buf[16];
2050   int i;
2051   CORE_ADDR buildaddr;
2052 
2053   buildaddr = current_insn_ptr;
2054   i = 0;
2055   buf[i++] = 0xbf; /* movl $<n>,%edi */
2056   memcpy (&buf[i], &arg1, sizeof (arg1));
2057   i += 4;
2058   append_insns (&buildaddr, i, buf);
2059   current_insn_ptr = buildaddr;
2060   amd64_emit_call (fn);
2061 }
2062 
2063 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
2064 
2065 static void
2066 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2067 {
2068   unsigned char buf[16];
2069   int i;
2070   CORE_ADDR buildaddr;
2071 
2072   buildaddr = current_insn_ptr;
2073   i = 0;
2074   buf[i++] = 0xbf; /* movl $<n>,%edi */
2075   memcpy (&buf[i], &arg1, sizeof (arg1));
2076   i += 4;
2077   append_insns (&buildaddr, i, buf);
2078   current_insn_ptr = buildaddr;
2079   EMIT_ASM (amd64_void_call_2_a,
2080 	    /* Save away a copy of the stack top.  */
2081 	    "push %rax\n\t"
2082 	    /* Also pass top as the second argument.  */
2083 	    "mov %rax,%rsi");
2084   amd64_emit_call (fn);
2085   EMIT_ASM (amd64_void_call_2_b,
2086 	    /* Restore the stack top, %rax may have been trashed.  */
2087 	    "pop %rax");
2088 }
2089 
2090 static void
2091 amd64_emit_eq_goto (int *offset_p, int *size_p)
2092 {
2093   EMIT_ASM (amd64_eq,
2094 	    "cmp %rax,(%rsp)\n\t"
2095 	    "jne .Lamd64_eq_fallthru\n\t"
2096 	    "lea 0x8(%rsp),%rsp\n\t"
2097 	    "pop %rax\n\t"
2098 	    /* jmp, but don't trust the assembler to choose the right jump */
2099 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2100 	    ".Lamd64_eq_fallthru:\n\t"
2101 	    "lea 0x8(%rsp),%rsp\n\t"
2102 	    "pop %rax");
2103 
2104   if (offset_p)
2105     *offset_p = 13;
2106   if (size_p)
2107     *size_p = 4;
2108 }
2109 
2110 static void
2111 amd64_emit_ne_goto (int *offset_p, int *size_p)
2112 {
2113   EMIT_ASM (amd64_ne,
2114 	    "cmp %rax,(%rsp)\n\t"
2115 	    "je .Lamd64_ne_fallthru\n\t"
2116 	    "lea 0x8(%rsp),%rsp\n\t"
2117 	    "pop %rax\n\t"
2118 	    /* jmp, but don't trust the assembler to choose the right jump */
2119 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2120 	    ".Lamd64_ne_fallthru:\n\t"
2121 	    "lea 0x8(%rsp),%rsp\n\t"
2122 	    "pop %rax");
2123 
2124   if (offset_p)
2125     *offset_p = 13;
2126   if (size_p)
2127     *size_p = 4;
2128 }
2129 
2130 static void
2131 amd64_emit_lt_goto (int *offset_p, int *size_p)
2132 {
2133   EMIT_ASM (amd64_lt,
2134 	    "cmp %rax,(%rsp)\n\t"
2135 	    "jnl .Lamd64_lt_fallthru\n\t"
2136 	    "lea 0x8(%rsp),%rsp\n\t"
2137 	    "pop %rax\n\t"
2138 	    /* jmp, but don't trust the assembler to choose the right jump */
2139 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2140 	    ".Lamd64_lt_fallthru:\n\t"
2141 	    "lea 0x8(%rsp),%rsp\n\t"
2142 	    "pop %rax");
2143 
2144   if (offset_p)
2145     *offset_p = 13;
2146   if (size_p)
2147     *size_p = 4;
2148 }
2149 
2150 static void
2151 amd64_emit_le_goto (int *offset_p, int *size_p)
2152 {
2153   EMIT_ASM (amd64_le,
2154 	    "cmp %rax,(%rsp)\n\t"
2155 	    "jnle .Lamd64_le_fallthru\n\t"
2156 	    "lea 0x8(%rsp),%rsp\n\t"
2157 	    "pop %rax\n\t"
2158 	    /* jmp, but don't trust the assembler to choose the right jump */
2159 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2160 	    ".Lamd64_le_fallthru:\n\t"
2161 	    "lea 0x8(%rsp),%rsp\n\t"
2162 	    "pop %rax");
2163 
2164   if (offset_p)
2165     *offset_p = 13;
2166   if (size_p)
2167     *size_p = 4;
2168 }
2169 
2170 static void
2171 amd64_emit_gt_goto (int *offset_p, int *size_p)
2172 {
2173   EMIT_ASM (amd64_gt,
2174 	    "cmp %rax,(%rsp)\n\t"
2175 	    "jng .Lamd64_gt_fallthru\n\t"
2176 	    "lea 0x8(%rsp),%rsp\n\t"
2177 	    "pop %rax\n\t"
2178 	    /* jmp, but don't trust the assembler to choose the right jump */
2179 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2180 	    ".Lamd64_gt_fallthru:\n\t"
2181 	    "lea 0x8(%rsp),%rsp\n\t"
2182 	    "pop %rax");
2183 
2184   if (offset_p)
2185     *offset_p = 13;
2186   if (size_p)
2187     *size_p = 4;
2188 }
2189 
2190 static void
2191 amd64_emit_ge_goto (int *offset_p, int *size_p)
2192 {
2193   EMIT_ASM (amd64_ge,
2194 	    "cmp %rax,(%rsp)\n\t"
2195 	    "jnge .Lamd64_ge_fallthru\n\t"
2196 	    ".Lamd64_ge_jump:\n\t"
2197 	    "lea 0x8(%rsp),%rsp\n\t"
2198 	    "pop %rax\n\t"
2199 	    /* jmp, but don't trust the assembler to choose the right jump */
2200 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2201 	    ".Lamd64_ge_fallthru:\n\t"
2202 	    "lea 0x8(%rsp),%rsp\n\t"
2203 	    "pop %rax");
2204 
2205   if (offset_p)
2206     *offset_p = 13;
2207   if (size_p)
2208     *size_p = 4;
2209 }
2210 
2211 struct emit_ops amd64_emit_ops =
2212   {
2213     amd64_emit_prologue,
2214     amd64_emit_epilogue,
2215     amd64_emit_add,
2216     amd64_emit_sub,
2217     amd64_emit_mul,
2218     amd64_emit_lsh,
2219     amd64_emit_rsh_signed,
2220     amd64_emit_rsh_unsigned,
2221     amd64_emit_ext,
2222     amd64_emit_log_not,
2223     amd64_emit_bit_and,
2224     amd64_emit_bit_or,
2225     amd64_emit_bit_xor,
2226     amd64_emit_bit_not,
2227     amd64_emit_equal,
2228     amd64_emit_less_signed,
2229     amd64_emit_less_unsigned,
2230     amd64_emit_ref,
2231     amd64_emit_if_goto,
2232     amd64_emit_goto,
2233     amd64_write_goto_address,
2234     amd64_emit_const,
2235     amd64_emit_call,
2236     amd64_emit_reg,
2237     amd64_emit_pop,
2238     amd64_emit_stack_flush,
2239     amd64_emit_zero_ext,
2240     amd64_emit_swap,
2241     amd64_emit_stack_adjust,
2242     amd64_emit_int_call_1,
2243     amd64_emit_void_call_2,
2244     amd64_emit_eq_goto,
2245     amd64_emit_ne_goto,
2246     amd64_emit_lt_goto,
2247     amd64_emit_le_goto,
2248     amd64_emit_gt_goto,
2249     amd64_emit_ge_goto
2250   };
2251 
2252 #endif /* __x86_64__ */
2253 
2254 static void
2255 i386_emit_prologue (void)
2256 {
2257   EMIT_ASM32 (i386_prologue,
2258 	    "push %ebp\n\t"
2259 	    "mov %esp,%ebp\n\t"
2260 	    "push %ebx");
2261   /* At this point, the raw regs base address is at 8(%ebp), and the
2262      value pointer is at 12(%ebp).  */
2263 }
2264 
2265 static void
2266 i386_emit_epilogue (void)
2267 {
2268   EMIT_ASM32 (i386_epilogue,
2269 	    "mov 12(%ebp),%ecx\n\t"
2270 	    "mov %eax,(%ecx)\n\t"
2271 	    "mov %ebx,0x4(%ecx)\n\t"
2272 	    "xor %eax,%eax\n\t"
2273 	    "pop %ebx\n\t"
2274 	    "pop %ebp\n\t"
2275 	    "ret");
2276 }
2277 
2278 static void
2279 i386_emit_add (void)
2280 {
2281   EMIT_ASM32 (i386_add,
2282 	    "add (%esp),%eax\n\t"
2283 	    "adc 0x4(%esp),%ebx\n\t"
2284 	    "lea 0x8(%esp),%esp");
2285 }
2286 
2287 static void
2288 i386_emit_sub (void)
2289 {
2290   EMIT_ASM32 (i386_sub,
2291 	    "subl %eax,(%esp)\n\t"
2292 	    "sbbl %ebx,4(%esp)\n\t"
2293 	    "pop %eax\n\t"
2294 	    "pop %ebx\n\t");
2295 }
2296 
2297 static void
2298 i386_emit_mul (void)
2299 {
2300   emit_error = 1;
2301 }
2302 
2303 static void
2304 i386_emit_lsh (void)
2305 {
2306   emit_error = 1;
2307 }
2308 
2309 static void
2310 i386_emit_rsh_signed (void)
2311 {
2312   emit_error = 1;
2313 }
2314 
2315 static void
2316 i386_emit_rsh_unsigned (void)
2317 {
2318   emit_error = 1;
2319 }
2320 
2321 static void
2322 i386_emit_ext (int arg)
2323 {
2324   switch (arg)
2325     {
2326     case 8:
2327       EMIT_ASM32 (i386_ext_8,
2328 		"cbtw\n\t"
2329 		"cwtl\n\t"
2330 		"movl %eax,%ebx\n\t"
2331 		"sarl $31,%ebx");
2332       break;
2333     case 16:
2334       EMIT_ASM32 (i386_ext_16,
2335 		"cwtl\n\t"
2336 		"movl %eax,%ebx\n\t"
2337 		"sarl $31,%ebx");
2338       break;
2339     case 32:
2340       EMIT_ASM32 (i386_ext_32,
2341 		"movl %eax,%ebx\n\t"
2342 		"sarl $31,%ebx");
2343       break;
2344     default:
2345       emit_error = 1;
2346     }
2347 }
2348 
2349 static void
2350 i386_emit_log_not (void)
2351 {
2352   EMIT_ASM32 (i386_log_not,
2353 	    "or %ebx,%eax\n\t"
2354 	    "test %eax,%eax\n\t"
2355 	    "sete %cl\n\t"
2356 	    "xor %ebx,%ebx\n\t"
2357 	    "movzbl %cl,%eax");
2358 }
2359 
2360 static void
2361 i386_emit_bit_and (void)
2362 {
2363   EMIT_ASM32 (i386_and,
2364 	    "and (%esp),%eax\n\t"
2365 	    "and 0x4(%esp),%ebx\n\t"
2366 	    "lea 0x8(%esp),%esp");
2367 }
2368 
2369 static void
2370 i386_emit_bit_or (void)
2371 {
2372   EMIT_ASM32 (i386_or,
2373 	    "or (%esp),%eax\n\t"
2374 	    "or 0x4(%esp),%ebx\n\t"
2375 	    "lea 0x8(%esp),%esp");
2376 }
2377 
2378 static void
2379 i386_emit_bit_xor (void)
2380 {
2381   EMIT_ASM32 (i386_xor,
2382 	    "xor (%esp),%eax\n\t"
2383 	    "xor 0x4(%esp),%ebx\n\t"
2384 	    "lea 0x8(%esp),%esp");
2385 }
2386 
2387 static void
2388 i386_emit_bit_not (void)
2389 {
2390   EMIT_ASM32 (i386_bit_not,
2391 	    "xor $0xffffffff,%eax\n\t"
2392 	    "xor $0xffffffff,%ebx\n\t");
2393 }
2394 
2395 static void
2396 i386_emit_equal (void)
2397 {
2398   EMIT_ASM32 (i386_equal,
2399 	    "cmpl %ebx,4(%esp)\n\t"
2400 	    "jne .Li386_equal_false\n\t"
2401 	    "cmpl %eax,(%esp)\n\t"
2402 	    "je .Li386_equal_true\n\t"
2403 	    ".Li386_equal_false:\n\t"
2404 	    "xor %eax,%eax\n\t"
2405 	    "jmp .Li386_equal_end\n\t"
2406 	    ".Li386_equal_true:\n\t"
2407 	    "mov $1,%eax\n\t"
2408 	    ".Li386_equal_end:\n\t"
2409 	    "xor %ebx,%ebx\n\t"
2410 	    "lea 0x8(%esp),%esp");
2411 }
2412 
2413 static void
2414 i386_emit_less_signed (void)
2415 {
2416   EMIT_ASM32 (i386_less_signed,
2417 	    "cmpl %ebx,4(%esp)\n\t"
2418 	    "jl .Li386_less_signed_true\n\t"
2419 	    "jne .Li386_less_signed_false\n\t"
2420 	    "cmpl %eax,(%esp)\n\t"
2421 	    "jl .Li386_less_signed_true\n\t"
2422 	    ".Li386_less_signed_false:\n\t"
2423 	    "xor %eax,%eax\n\t"
2424 	    "jmp .Li386_less_signed_end\n\t"
2425 	    ".Li386_less_signed_true:\n\t"
2426 	    "mov $1,%eax\n\t"
2427 	    ".Li386_less_signed_end:\n\t"
2428 	    "xor %ebx,%ebx\n\t"
2429 	    "lea 0x8(%esp),%esp");
2430 }
2431 
2432 static void
2433 i386_emit_less_unsigned (void)
2434 {
2435   EMIT_ASM32 (i386_less_unsigned,
2436 	    "cmpl %ebx,4(%esp)\n\t"
2437 	    "jb .Li386_less_unsigned_true\n\t"
2438 	    "jne .Li386_less_unsigned_false\n\t"
2439 	    "cmpl %eax,(%esp)\n\t"
2440 	    "jb .Li386_less_unsigned_true\n\t"
2441 	    ".Li386_less_unsigned_false:\n\t"
2442 	    "xor %eax,%eax\n\t"
2443 	    "jmp .Li386_less_unsigned_end\n\t"
2444 	    ".Li386_less_unsigned_true:\n\t"
2445 	    "mov $1,%eax\n\t"
2446 	    ".Li386_less_unsigned_end:\n\t"
2447 	    "xor %ebx,%ebx\n\t"
2448 	    "lea 0x8(%esp),%esp");
2449 }
2450 
2451 static void
2452 i386_emit_ref (int size)
2453 {
2454   switch (size)
2455     {
2456     case 1:
2457       EMIT_ASM32 (i386_ref1,
2458 		"movb (%eax),%al");
2459       break;
2460     case 2:
2461       EMIT_ASM32 (i386_ref2,
2462 		"movw (%eax),%ax");
2463       break;
2464     case 4:
2465       EMIT_ASM32 (i386_ref4,
2466 		"movl (%eax),%eax");
2467       break;
2468     case 8:
2469       EMIT_ASM32 (i386_ref8,
2470 		"movl 4(%eax),%ebx\n\t"
2471 		"movl (%eax),%eax");
2472       break;
2473     }
2474 }
2475 
2476 static void
2477 i386_emit_if_goto (int *offset_p, int *size_p)
2478 {
2479   EMIT_ASM32 (i386_if_goto,
2480 	    "mov %eax,%ecx\n\t"
2481 	    "or %ebx,%ecx\n\t"
2482 	    "pop %eax\n\t"
2483 	    "pop %ebx\n\t"
2484 	    "cmpl $0,%ecx\n\t"
2485 	    /* Don't trust the assembler to choose the right jump */
2486 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2487 
2488   if (offset_p)
2489     *offset_p = 11; /* be sure that this matches the sequence above */
2490   if (size_p)
2491     *size_p = 4;
2492 }
2493 
2494 static void
2495 i386_emit_goto (int *offset_p, int *size_p)
2496 {
2497   EMIT_ASM32 (i386_goto,
2498 	    /* Don't trust the assembler to choose the right jump */
2499 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2500   if (offset_p)
2501     *offset_p = 1;
2502   if (size_p)
2503     *size_p = 4;
2504 }
2505 
2506 static void
2507 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2508 {
2509   int diff = (to - (from + size));
2510   unsigned char buf[sizeof (int)];
2511 
2512   /* We're only doing 4-byte sizes at the moment.  */
2513   if (size != 4)
2514     {
2515       emit_error = 1;
2516       return;
2517     }
2518 
2519   memcpy (buf, &diff, sizeof (int));
2520   target_write_memory (from, buf, sizeof (int));
2521 }
2522 
2523 static void
2524 i386_emit_const (LONGEST num)
2525 {
2526   unsigned char buf[16];
2527   int i, hi, lo;
2528   CORE_ADDR buildaddr = current_insn_ptr;
2529 
2530   i = 0;
2531   buf[i++] = 0xb8; /* mov $<n>,%eax */
2532   lo = num & 0xffffffff;
2533   memcpy (&buf[i], &lo, sizeof (lo));
2534   i += 4;
2535   hi = ((num >> 32) & 0xffffffff);
2536   if (hi)
2537     {
2538       buf[i++] = 0xbb; /* mov $<n>,%ebx */
2539       memcpy (&buf[i], &hi, sizeof (hi));
2540       i += 4;
2541     }
2542   else
2543     {
2544       buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2545     }
2546   append_insns (&buildaddr, i, buf);
2547   current_insn_ptr = buildaddr;
2548 }
2549 
2550 static void
2551 i386_emit_call (CORE_ADDR fn)
2552 {
2553   unsigned char buf[16];
2554   int i, offset;
2555   CORE_ADDR buildaddr;
2556 
2557   buildaddr = current_insn_ptr;
2558   i = 0;
2559   buf[i++] = 0xe8; /* call <reladdr> */
2560   offset = ((int) fn) - (buildaddr + 5);
2561   memcpy (buf + 1, &offset, 4);
2562   append_insns (&buildaddr, 5, buf);
2563   current_insn_ptr = buildaddr;
2564 }
2565 
2566 static void
2567 i386_emit_reg (int reg)
2568 {
2569   unsigned char buf[16];
2570   int i;
2571   CORE_ADDR buildaddr;
2572 
2573   EMIT_ASM32 (i386_reg_a,
2574 	    "sub $0x8,%esp");
2575   buildaddr = current_insn_ptr;
2576   i = 0;
2577   buf[i++] = 0xb8; /* mov $<n>,%eax */
2578   memcpy (&buf[i], &reg, sizeof (reg));
2579   i += 4;
2580   append_insns (&buildaddr, i, buf);
2581   current_insn_ptr = buildaddr;
2582   EMIT_ASM32 (i386_reg_b,
2583 	    "mov %eax,4(%esp)\n\t"
2584 	    "mov 8(%ebp),%eax\n\t"
2585 	    "mov %eax,(%esp)");
2586   i386_emit_call (get_raw_reg_func_addr ());
2587   EMIT_ASM32 (i386_reg_c,
2588 	    "xor %ebx,%ebx\n\t"
2589 	    "lea 0x8(%esp),%esp");
2590 }
2591 
2592 static void
2593 i386_emit_pop (void)
2594 {
2595   EMIT_ASM32 (i386_pop,
2596 	    "pop %eax\n\t"
2597 	    "pop %ebx");
2598 }
2599 
2600 static void
2601 i386_emit_stack_flush (void)
2602 {
2603   EMIT_ASM32 (i386_stack_flush,
2604 	    "push %ebx\n\t"
2605 	    "push %eax");
2606 }
2607 
2608 static void
2609 i386_emit_zero_ext (int arg)
2610 {
2611   switch (arg)
2612     {
2613     case 8:
2614       EMIT_ASM32 (i386_zero_ext_8,
2615 		"and $0xff,%eax\n\t"
2616 		"xor %ebx,%ebx");
2617       break;
2618     case 16:
2619       EMIT_ASM32 (i386_zero_ext_16,
2620 		"and $0xffff,%eax\n\t"
2621 		"xor %ebx,%ebx");
2622       break;
2623     case 32:
2624       EMIT_ASM32 (i386_zero_ext_32,
2625 		"xor %ebx,%ebx");
2626       break;
2627     default:
2628       emit_error = 1;
2629     }
2630 }
2631 
2632 static void
2633 i386_emit_swap (void)
2634 {
2635   EMIT_ASM32 (i386_swap,
2636 	    "mov %eax,%ecx\n\t"
2637 	    "mov %ebx,%edx\n\t"
2638 	    "pop %eax\n\t"
2639 	    "pop %ebx\n\t"
2640 	    "push %edx\n\t"
2641 	    "push %ecx");
2642 }
2643 
2644 static void
2645 i386_emit_stack_adjust (int n)
2646 {
2647   unsigned char buf[16];
2648   int i;
2649   CORE_ADDR buildaddr = current_insn_ptr;
2650 
2651   i = 0;
2652   buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2653   buf[i++] = 0x64;
2654   buf[i++] = 0x24;
2655   buf[i++] = n * 8;
2656   append_insns (&buildaddr, i, buf);
2657   current_insn_ptr = buildaddr;
2658 }
2659 
2660 /* FN's prototype is `LONGEST(*fn)(int)'.  */
2661 
2662 static void
2663 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2664 {
2665   unsigned char buf[16];
2666   int i;
2667   CORE_ADDR buildaddr;
2668 
2669   EMIT_ASM32 (i386_int_call_1_a,
2670 	    /* Reserve a bit of stack space.  */
2671 	    "sub $0x8,%esp");
2672   /* Put the one argument on the stack.  */
2673   buildaddr = current_insn_ptr;
2674   i = 0;
2675   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2676   buf[i++] = 0x04;
2677   buf[i++] = 0x24;
2678   memcpy (&buf[i], &arg1, sizeof (arg1));
2679   i += 4;
2680   append_insns (&buildaddr, i, buf);
2681   current_insn_ptr = buildaddr;
2682   i386_emit_call (fn);
2683   EMIT_ASM32 (i386_int_call_1_c,
2684 	    "mov %edx,%ebx\n\t"
2685 	    "lea 0x8(%esp),%esp");
2686 }
2687 
2688 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
2689 
2690 static void
2691 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2692 {
2693   unsigned char buf[16];
2694   int i;
2695   CORE_ADDR buildaddr;
2696 
2697   EMIT_ASM32 (i386_void_call_2_a,
2698 	    /* Preserve %eax only; we don't have to worry about %ebx.  */
2699 	    "push %eax\n\t"
2700 	    /* Reserve a bit of stack space for arguments.  */
2701 	    "sub $0x10,%esp\n\t"
2702 	    /* Copy "top" to the second argument position.  (Note that
2703 	       we can't assume function won't scribble on its
2704 	       arguments, so don't try to restore from this.)  */
2705 	    "mov %eax,4(%esp)\n\t"
2706 	    "mov %ebx,8(%esp)");
2707   /* Put the first argument on the stack.  */
2708   buildaddr = current_insn_ptr;
2709   i = 0;
2710   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2711   buf[i++] = 0x04;
2712   buf[i++] = 0x24;
2713   memcpy (&buf[i], &arg1, sizeof (arg1));
2714   i += 4;
2715   append_insns (&buildaddr, i, buf);
2716   current_insn_ptr = buildaddr;
2717   i386_emit_call (fn);
2718   EMIT_ASM32 (i386_void_call_2_b,
2719 	    "lea 0x10(%esp),%esp\n\t"
2720 	    /* Restore original stack top.  */
2721 	    "pop %eax");
2722 }
2723 
2724 
2725 static void
2726 i386_emit_eq_goto (int *offset_p, int *size_p)
2727 {
2728   EMIT_ASM32 (eq,
2729 	      /* Check low half first, more likely to be decider  */
2730 	      "cmpl %eax,(%esp)\n\t"
2731 	      "jne .Leq_fallthru\n\t"
2732 	      "cmpl %ebx,4(%esp)\n\t"
2733 	      "jne .Leq_fallthru\n\t"
2734 	      "lea 0x8(%esp),%esp\n\t"
2735 	      "pop %eax\n\t"
2736 	      "pop %ebx\n\t"
2737 	      /* jmp, but don't trust the assembler to choose the right jump */
2738 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2739 	      ".Leq_fallthru:\n\t"
2740 	      "lea 0x8(%esp),%esp\n\t"
2741 	      "pop %eax\n\t"
2742 	      "pop %ebx");
2743 
2744   if (offset_p)
2745     *offset_p = 18;
2746   if (size_p)
2747     *size_p = 4;
2748 }
2749 
2750 static void
2751 i386_emit_ne_goto (int *offset_p, int *size_p)
2752 {
2753   EMIT_ASM32 (ne,
2754 	      /* Check low half first, more likely to be decider  */
2755 	      "cmpl %eax,(%esp)\n\t"
2756 	      "jne .Lne_jump\n\t"
2757 	      "cmpl %ebx,4(%esp)\n\t"
2758 	      "je .Lne_fallthru\n\t"
2759 	      ".Lne_jump:\n\t"
2760 	      "lea 0x8(%esp),%esp\n\t"
2761 	      "pop %eax\n\t"
2762 	      "pop %ebx\n\t"
2763 	      /* jmp, but don't trust the assembler to choose the right jump */
2764 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 	      ".Lne_fallthru:\n\t"
2766 	      "lea 0x8(%esp),%esp\n\t"
2767 	      "pop %eax\n\t"
2768 	      "pop %ebx");
2769 
2770   if (offset_p)
2771     *offset_p = 18;
2772   if (size_p)
2773     *size_p = 4;
2774 }
2775 
2776 static void
2777 i386_emit_lt_goto (int *offset_p, int *size_p)
2778 {
2779   EMIT_ASM32 (lt,
2780 	      "cmpl %ebx,4(%esp)\n\t"
2781 	      "jl .Llt_jump\n\t"
2782 	      "jne .Llt_fallthru\n\t"
2783 	      "cmpl %eax,(%esp)\n\t"
2784 	      "jnl .Llt_fallthru\n\t"
2785 	      ".Llt_jump:\n\t"
2786 	      "lea 0x8(%esp),%esp\n\t"
2787 	      "pop %eax\n\t"
2788 	      "pop %ebx\n\t"
2789 	      /* jmp, but don't trust the assembler to choose the right jump */
2790 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 	      ".Llt_fallthru:\n\t"
2792 	      "lea 0x8(%esp),%esp\n\t"
2793 	      "pop %eax\n\t"
2794 	      "pop %ebx");
2795 
2796   if (offset_p)
2797     *offset_p = 20;
2798   if (size_p)
2799     *size_p = 4;
2800 }
2801 
2802 static void
2803 i386_emit_le_goto (int *offset_p, int *size_p)
2804 {
2805   EMIT_ASM32 (le,
2806 	      "cmpl %ebx,4(%esp)\n\t"
2807 	      "jle .Lle_jump\n\t"
2808 	      "jne .Lle_fallthru\n\t"
2809 	      "cmpl %eax,(%esp)\n\t"
2810 	      "jnle .Lle_fallthru\n\t"
2811 	      ".Lle_jump:\n\t"
2812 	      "lea 0x8(%esp),%esp\n\t"
2813 	      "pop %eax\n\t"
2814 	      "pop %ebx\n\t"
2815 	      /* jmp, but don't trust the assembler to choose the right jump */
2816 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2817 	      ".Lle_fallthru:\n\t"
2818 	      "lea 0x8(%esp),%esp\n\t"
2819 	      "pop %eax\n\t"
2820 	      "pop %ebx");
2821 
2822   if (offset_p)
2823     *offset_p = 20;
2824   if (size_p)
2825     *size_p = 4;
2826 }
2827 
2828 static void
2829 i386_emit_gt_goto (int *offset_p, int *size_p)
2830 {
2831   EMIT_ASM32 (gt,
2832 	      "cmpl %ebx,4(%esp)\n\t"
2833 	      "jg .Lgt_jump\n\t"
2834 	      "jne .Lgt_fallthru\n\t"
2835 	      "cmpl %eax,(%esp)\n\t"
2836 	      "jng .Lgt_fallthru\n\t"
2837 	      ".Lgt_jump:\n\t"
2838 	      "lea 0x8(%esp),%esp\n\t"
2839 	      "pop %eax\n\t"
2840 	      "pop %ebx\n\t"
2841 	      /* jmp, but don't trust the assembler to choose the right jump */
2842 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2843 	      ".Lgt_fallthru:\n\t"
2844 	      "lea 0x8(%esp),%esp\n\t"
2845 	      "pop %eax\n\t"
2846 	      "pop %ebx");
2847 
2848   if (offset_p)
2849     *offset_p = 20;
2850   if (size_p)
2851     *size_p = 4;
2852 }
2853 
2854 static void
2855 i386_emit_ge_goto (int *offset_p, int *size_p)
2856 {
2857   EMIT_ASM32 (ge,
2858 	      "cmpl %ebx,4(%esp)\n\t"
2859 	      "jge .Lge_jump\n\t"
2860 	      "jne .Lge_fallthru\n\t"
2861 	      "cmpl %eax,(%esp)\n\t"
2862 	      "jnge .Lge_fallthru\n\t"
2863 	      ".Lge_jump:\n\t"
2864 	      "lea 0x8(%esp),%esp\n\t"
2865 	      "pop %eax\n\t"
2866 	      "pop %ebx\n\t"
2867 	      /* jmp, but don't trust the assembler to choose the right jump */
2868 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2869 	      ".Lge_fallthru:\n\t"
2870 	      "lea 0x8(%esp),%esp\n\t"
2871 	      "pop %eax\n\t"
2872 	      "pop %ebx");
2873 
2874   if (offset_p)
2875     *offset_p = 20;
2876   if (size_p)
2877     *size_p = 4;
2878 }
2879 
2880 struct emit_ops i386_emit_ops =
2881   {
2882     i386_emit_prologue,
2883     i386_emit_epilogue,
2884     i386_emit_add,
2885     i386_emit_sub,
2886     i386_emit_mul,
2887     i386_emit_lsh,
2888     i386_emit_rsh_signed,
2889     i386_emit_rsh_unsigned,
2890     i386_emit_ext,
2891     i386_emit_log_not,
2892     i386_emit_bit_and,
2893     i386_emit_bit_or,
2894     i386_emit_bit_xor,
2895     i386_emit_bit_not,
2896     i386_emit_equal,
2897     i386_emit_less_signed,
2898     i386_emit_less_unsigned,
2899     i386_emit_ref,
2900     i386_emit_if_goto,
2901     i386_emit_goto,
2902     i386_write_goto_address,
2903     i386_emit_const,
2904     i386_emit_call,
2905     i386_emit_reg,
2906     i386_emit_pop,
2907     i386_emit_stack_flush,
2908     i386_emit_zero_ext,
2909     i386_emit_swap,
2910     i386_emit_stack_adjust,
2911     i386_emit_int_call_1,
2912     i386_emit_void_call_2,
2913     i386_emit_eq_goto,
2914     i386_emit_ne_goto,
2915     i386_emit_lt_goto,
2916     i386_emit_le_goto,
2917     i386_emit_gt_goto,
2918     i386_emit_ge_goto
2919   };
2920 
2921 
2922 emit_ops *
2923 x86_target::emit_ops ()
2924 {
2925 #ifdef __x86_64__
2926   if (is_64bit_tdesc ())
2927     return &amd64_emit_ops;
2928   else
2929 #endif
2930     return &i386_emit_ops;
2931 }
2932 
2933 /* Implementation of target ops method "sw_breakpoint_from_kind".  */
2934 
2935 const gdb_byte *
2936 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2937 {
2938   *size = x86_breakpoint_len;
2939   return x86_breakpoint;
2940 }
2941 
2942 bool
2943 x86_target::low_supports_range_stepping ()
2944 {
2945   return true;
2946 }
2947 
2948 int
2949 x86_target::get_ipa_tdesc_idx ()
2950 {
2951   struct regcache *regcache = get_thread_regcache (current_thread, 0);
2952   const struct target_desc *tdesc = regcache->tdesc;
2953 
2954 #ifdef __x86_64__
2955   return amd64_get_ipa_tdesc_idx (tdesc);
2956 #endif
2957 
2958   if (tdesc == tdesc_i386_linux_no_xml)
2959     return X86_TDESC_SSE;
2960 
2961   return i386_get_ipa_tdesc_idx (tdesc);
2962 }
2963 
2964 /* The linux target ops object.  */
2965 
2966 linux_process_target *the_linux_target = &the_x86_target;
2967 
2968 void
2969 initialize_low_arch (void)
2970 {
2971   /* Initialize the Linux target descriptions.  */
2972 #ifdef __x86_64__
2973   tdesc_amd64_linux_no_xml = allocate_target_description ();
2974   copy_target_description (tdesc_amd64_linux_no_xml,
2975 			   amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2976 							 false));
2977   tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2978 #endif
2979 
2980   tdesc_i386_linux_no_xml = allocate_target_description ();
2981   copy_target_description (tdesc_i386_linux_no_xml,
2982 			   i386_linux_read_description (X86_XSTATE_SSE_MASK));
2983   tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2984 
2985   initialize_regsets_info (&x86_regsets_info);
2986 }
2987