Home | History | Annotate | Line # | Download | only in gdbserver
      1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
      2    for GDB.
      3    Copyright (C) 2002-2024 Free Software Foundation, Inc.
      4 
      5    This file is part of GDB.
      6 
      7    This program is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3 of the License, or
     10    (at your option) any later version.
     11 
     12    This program is distributed in the hope that it will be useful,
     13    but WITHOUT ANY WARRANTY; without even the implied warranty of
     14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15    GNU General Public License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
     19 
     20 #include <signal.h>
     21 #include <limits.h>
     22 #include <inttypes.h>
     23 #include "linux-low.h"
     24 #include "i387-fp.h"
     25 #include "x86-low.h"
     26 #include "gdbsupport/x86-xstate.h"
     27 #include "nat/x86-xstate.h"
     28 #include "nat/gdb_ptrace.h"
     29 
     30 #ifdef __x86_64__
     31 #include "nat/amd64-linux-siginfo.h"
     32 #include "arch/amd64-linux-tdesc.h"
     33 #else
     34 #include "nat/i386-linux.h"
     35 #endif
     36 
     37 #include "arch/i386-linux-tdesc.h"
     38 #include "arch/x86-linux-tdesc-features.h"
     39 
     40 #include "gdb_proc_service.h"
     41 /* Don't include elf/common.h if linux/elf.h got included by
     42    gdb_proc_service.h.  */
     43 #ifndef ELFMAG0
     44 #include "elf/common.h"
     45 #endif
     46 
     47 #include "gdbsupport/agent.h"
     48 #include "tdesc.h"
     49 #include "tracepoint.h"
     50 #include "ax.h"
     51 #include "nat/linux-nat.h"
     52 #include "nat/x86-linux.h"
     53 #include "nat/x86-linux-dregs.h"
     54 #include "nat/x86-linux-tdesc.h"
     55 
     56 #ifdef __x86_64__
     57 static target_desc_up tdesc_amd64_linux_no_xml;
     58 #endif
     59 static target_desc_up tdesc_i386_linux_no_xml;
     60 
     61 
     62 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
     63 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
     64 
     65 /* Backward compatibility for gdb without XML support.  */
     66 
     67 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
     68 <architecture>i386</architecture>\
     69 <osabi>GNU/Linux</osabi>\
     70 </target>";
     71 
     72 #ifdef __x86_64__
     73 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
     74 <architecture>i386:x86-64</architecture>\
     75 <osabi>GNU/Linux</osabi>\
     76 </target>";
     77 #endif
     78 
     79 #include <sys/reg.h>
     80 #include <sys/procfs.h>
     81 #include <sys/uio.h>
     82 
     83 #ifndef PTRACE_GET_THREAD_AREA
     84 #define PTRACE_GET_THREAD_AREA 25
     85 #endif
     86 
     87 /* This definition comes from prctl.h, but some kernels may not have it.  */
     88 #ifndef PTRACE_ARCH_PRCTL
     89 #define PTRACE_ARCH_PRCTL      30
     90 #endif
     91 
     92 /* The following definitions come from prctl.h, but may be absent
     93    for certain configurations.  */
     94 #ifndef ARCH_GET_FS
     95 #define ARCH_SET_GS 0x1001
     96 #define ARCH_SET_FS 0x1002
     97 #define ARCH_GET_FS 0x1003
     98 #define ARCH_GET_GS 0x1004
     99 #endif
    100 
    101 /* Linux target op definitions for the x86 architecture.
    102    This is initialized assuming an amd64 target.
    103    'low_arch_setup' will correct it for i386 or amd64 targets.  */
    104 
    105 class x86_target : public linux_process_target
    106 {
    107 public:
    108 
    109   const regs_info *get_regs_info () override;
    110 
    111   const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
    112 
    113   bool supports_z_point_type (char z_type) override;
    114 
    115   void process_qsupported (gdb::array_view<const char * const> features) override;
    116 
    117   bool supports_tracepoints () override;
    118 
    119   bool supports_fast_tracepoints () override;
    120 
    121   int install_fast_tracepoint_jump_pad
    122     (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
    123      CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
    124      CORE_ADDR *trampoline, ULONGEST *trampoline_size,
    125      unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
    126      CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
    127      char *err) override;
    128 
    129   int get_min_fast_tracepoint_insn_len () override;
    130 
    131   struct emit_ops *emit_ops () override;
    132 
    133   int get_ipa_tdesc_idx () override;
    134 
    135 protected:
    136 
    137   void low_arch_setup () override;
    138 
    139   bool low_cannot_fetch_register (int regno) override;
    140 
    141   bool low_cannot_store_register (int regno) override;
    142 
    143   bool low_supports_breakpoints () override;
    144 
    145   CORE_ADDR low_get_pc (regcache *regcache) override;
    146 
    147   void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
    148 
    149   int low_decr_pc_after_break () override;
    150 
    151   bool low_breakpoint_at (CORE_ADDR pc) override;
    152 
    153   int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
    154 			int size, raw_breakpoint *bp) override;
    155 
    156   int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
    157 			int size, raw_breakpoint *bp) override;
    158 
    159   bool low_stopped_by_watchpoint () override;
    160 
    161   CORE_ADDR low_stopped_data_address () override;
    162 
    163   /* collect_ptrace_register/supply_ptrace_register are not needed in the
    164      native i386 case (no registers smaller than an xfer unit), and are not
    165      used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
    166 
    167   /* Need to fix up i386 siginfo if host is amd64.  */
    168   bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
    169 			  int direction) override;
    170 
    171   arch_process_info *low_new_process () override;
    172 
    173   void low_delete_process (arch_process_info *info) override;
    174 
    175   void low_new_thread (lwp_info *) override;
    176 
    177   void low_delete_thread (arch_lwp_info *) override;
    178 
    179   void low_new_fork (process_info *parent, process_info *child) override;
    180 
    181   void low_prepare_to_resume (lwp_info *lwp) override;
    182 
    183   int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
    184 
    185   bool low_supports_range_stepping () override;
    186 
    187   bool low_supports_catch_syscall () override;
    188 
    189   void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
    190 
    191 private:
    192 
    193   /* Update all the target description of all processes; a new GDB
    194      connected, and it may or not support xml target descriptions.  */
    195   void update_xmltarget ();
    196 };
    197 
    198 /* The singleton target ops object.  */
    199 
    200 static x86_target the_x86_target;
    201 
    202 /* Per-process arch-specific data we want to keep.  */
    203 
    204 struct arch_process_info
    205 {
    206   struct x86_debug_reg_state debug_reg_state;
    207 };
    208 
    209 #ifdef __x86_64__
    210 
    211 /* Mapping between the general-purpose registers in `struct user'
    212    format and GDB's register array layout.
    213    Note that the transfer layout uses 64-bit regs.  */
    214 static /*const*/ int i386_regmap[] =
    215 {
    216   RAX * 8, RCX * 8, RDX * 8, RBX * 8,
    217   RSP * 8, RBP * 8, RSI * 8, RDI * 8,
    218   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
    219   DS * 8, ES * 8, FS * 8, GS * 8
    220 };
    221 
    222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
    223 
    224 /* So code below doesn't have to care, i386 or amd64.  */
    225 #define ORIG_EAX ORIG_RAX
    226 #define REGSIZE 8
    227 
    228 static const int x86_64_regmap[] =
    229 {
    230   RAX * 8, RBX * 8, RCX * 8, RDX * 8,
    231   RSI * 8, RDI * 8, RBP * 8, RSP * 8,
    232   R8 * 8, R9 * 8, R10 * 8, R11 * 8,
    233   R12 * 8, R13 * 8, R14 * 8, R15 * 8,
    234   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
    235   DS * 8, ES * 8, FS * 8, GS * 8,
    236   -1, -1, -1, -1, -1, -1, -1, -1,
    237   -1, -1, -1, -1, -1, -1, -1, -1,
    238   -1, -1, -1, -1, -1, -1, -1, -1,
    239   -1,
    240   -1, -1, -1, -1, -1, -1, -1, -1,
    241   ORIG_RAX * 8,
    242   21 * 8,  22 * 8,
    243   /* MPX is deprecated.  Yet we keep this to not give the registers below
    244      a new number.  That could break older gdbs.  */
    245   -1, -1, -1, -1,			/* MPX registers BND0 ... BND3.  */
    246   -1, -1,				/* MPX registers BNDCFGU, BNDSTATUS.  */
    247   -1, -1, -1, -1, -1, -1, -1, -1,       /* xmm16 ... xmm31 (AVX512)  */
    248   -1, -1, -1, -1, -1, -1, -1, -1,
    249   -1, -1, -1, -1, -1, -1, -1, -1,       /* ymm16 ... ymm31 (AVX512)  */
    250   -1, -1, -1, -1, -1, -1, -1, -1,
    251   -1, -1, -1, -1, -1, -1, -1, -1,       /* k0 ... k7 (AVX512)  */
    252   -1, -1, -1, -1, -1, -1, -1, -1,       /* zmm0 ... zmm31 (AVX512)  */
    253   -1, -1, -1, -1, -1, -1, -1, -1,
    254   -1, -1, -1, -1, -1, -1, -1, -1,
    255   -1, -1, -1, -1, -1, -1, -1, -1,
    256   -1					/* pkru  */
    257 };
    258 
    259 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
    260 #define X86_64_USER_REGS (GS + 1)
    261 
    262 #else /* ! __x86_64__ */
    263 
    264 /* Mapping between the general-purpose registers in `struct user'
    265    format and GDB's register array layout.  */
    266 static /*const*/ int i386_regmap[] =
    267 {
    268   EAX * 4, ECX * 4, EDX * 4, EBX * 4,
    269   UESP * 4, EBP * 4, ESI * 4, EDI * 4,
    270   EIP * 4, EFL * 4, CS * 4, SS * 4,
    271   DS * 4, ES * 4, FS * 4, GS * 4
    272 };
    273 
    274 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
    275 
    276 #define REGSIZE 4
    277 
    278 #endif
    279 
    280 #ifdef __x86_64__
    281 
    282 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc.  */
    283 
    284 static int
    285 is_64bit_tdesc (thread_info *thread)
    286 {
    287   return register_size (thread->process ()->tdesc, 0) == 8;
    288 }
    289 
    290 #endif
    291 
    292 
    293 /* Called by libthread_db.  */
    295 
    296 ps_err_e
    297 ps_get_thread_area (struct ps_prochandle *ph,
    298 		    lwpid_t lwpid, int idx, void **base)
    299 {
    300 #ifdef __x86_64__
    301   lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
    302   gdb_assert (lwp != nullptr);
    303   int use_64bit = is_64bit_tdesc (lwp->thread);
    304 
    305   if (use_64bit)
    306     {
    307       switch (idx)
    308 	{
    309 	case FS:
    310 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
    311 	    return PS_OK;
    312 	  break;
    313 	case GS:
    314 	  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
    315 	    return PS_OK;
    316 	  break;
    317 	default:
    318 	  return PS_BADADDR;
    319 	}
    320       return PS_ERR;
    321     }
    322 #endif
    323 
    324   {
    325     unsigned int desc[4];
    326 
    327     if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
    328 		(void *) (intptr_t) idx, (unsigned long) &desc) < 0)
    329       return PS_ERR;
    330 
    331     /* Ensure we properly extend the value to 64-bits for x86_64.  */
    332     *base = (void *) (uintptr_t) desc[1];
    333     return PS_OK;
    334   }
    335 }
    336 
    337 /* Get the thread area address.  This is used to recognize which
    338    thread is which when tracing with the in-process agent library.  We
    339    don't read anything from the address, and treat it as opaque; it's
    340    the address itself that we assume is unique per-thread.  */
    341 
    342 int
    343 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
    344 {
    345   lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
    346   gdb_assert (lwp != nullptr);
    347 #ifdef __x86_64__
    348   int use_64bit = is_64bit_tdesc (lwp->thread);
    349 
    350   if (use_64bit)
    351     {
    352       void *base;
    353       if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
    354 	{
    355 	  *addr = (CORE_ADDR) (uintptr_t) base;
    356 	  return 0;
    357 	}
    358 
    359       return -1;
    360     }
    361 #endif
    362 
    363   {
    364     thread_info *thr = lwp->thread;
    365     regcache *regcache = get_thread_regcache (thr);
    366     unsigned int desc[4];
    367     ULONGEST gs = 0;
    368     const int reg_thread_area = 3; /* bits to scale down register value.  */
    369     int idx;
    370 
    371     collect_register_by_name (regcache, "gs", &gs);
    372 
    373     idx = gs >> reg_thread_area;
    374 
    375     if (ptrace (PTRACE_GET_THREAD_AREA,
    376 		thr->id.lwp (),
    377 		(void *) (long) idx, (unsigned long) &desc) < 0)
    378       return -1;
    379 
    380     *addr = desc[1];
    381     return 0;
    382   }
    383 }
    384 
    385 
    386 
    387 bool
    389 x86_target::low_cannot_store_register (int regno)
    390 {
    391 #ifdef __x86_64__
    392   if (is_64bit_tdesc (current_thread))
    393     return false;
    394 #endif
    395 
    396   return regno >= I386_NUM_REGS;
    397 }
    398 
    399 bool
    400 x86_target::low_cannot_fetch_register (int regno)
    401 {
    402 #ifdef __x86_64__
    403   if (is_64bit_tdesc (current_thread))
    404     return false;
    405 #endif
    406 
    407   return regno >= I386_NUM_REGS;
    408 }
    409 
    410 static void
    411 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
    412 {
    413   collect_register (regcache, regno, buf);
    414 
    415 #ifdef __x86_64__
    416   /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
    417      space reserved in buf for the register is 8 bytes.  Make sure the entire
    418      reserved space is initialized.  */
    419 
    420   gdb_assert (register_size (regcache->tdesc, regno) == 4);
    421 
    422   if (regno == RAX)
    423     {
    424       /* Sign extend EAX value to avoid potential syscall restart
    425 	 problems.
    426 
    427 	 See amd64_linux_collect_native_gregset() in
    428 	 gdb/amd64-linux-nat.c for a detailed explanation.  */
    429       *(int64_t *) buf = *(int32_t *) buf;
    430     }
    431   else
    432     {
    433       /* Zero-extend.  */
    434       *(uint64_t *) buf = *(uint32_t *) buf;
    435     }
    436 #endif
    437 }
    438 
    439 static void
    440 x86_fill_gregset (struct regcache *regcache, void *buf)
    441 {
    442   int i;
    443 
    444 #ifdef __x86_64__
    445   if (register_size (regcache->tdesc, 0) == 8)
    446     {
    447       for (i = 0; i < X86_64_NUM_REGS; i++)
    448 	if (x86_64_regmap[i] != -1)
    449 	  collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
    450 
    451       return;
    452     }
    453 #endif
    454 
    455   for (i = 0; i < I386_NUM_REGS; i++)
    456     collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
    457 
    458   /* Handle ORIG_EAX, which is not in i386_regmap.  */
    459   collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
    460 			 ((char *) buf) + ORIG_EAX * REGSIZE);
    461 }
    462 
    463 static void
    464 x86_store_gregset (struct regcache *regcache, const void *buf)
    465 {
    466   int i;
    467 
    468 #ifdef __x86_64__
    469   if (register_size (regcache->tdesc, 0) == 8)
    470     {
    471       for (i = 0; i < X86_64_NUM_REGS; i++)
    472 	if (x86_64_regmap[i] != -1)
    473 	  supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
    474 
    475       return;
    476     }
    477 #endif
    478 
    479   for (i = 0; i < I386_NUM_REGS; i++)
    480     supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
    481 
    482   supply_register_by_name (regcache, "orig_eax",
    483 			   ((char *) buf) + ORIG_EAX * REGSIZE);
    484 }
    485 
    486 static void
    487 x86_fill_fpregset (struct regcache *regcache, void *buf)
    488 {
    489 #ifdef __x86_64__
    490   i387_cache_to_fxsave (regcache, buf);
    491 #else
    492   i387_cache_to_fsave (regcache, buf);
    493 #endif
    494 }
    495 
    496 static void
    497 x86_store_fpregset (struct regcache *regcache, const void *buf)
    498 {
    499 #ifdef __x86_64__
    500   i387_fxsave_to_cache (regcache, buf);
    501 #else
    502   i387_fsave_to_cache (regcache, buf);
    503 #endif
    504 }
    505 
    506 #ifndef __x86_64__
    507 
    508 static void
    509 x86_fill_fpxregset (struct regcache *regcache, void *buf)
    510 {
    511   i387_cache_to_fxsave (regcache, buf);
    512 }
    513 
    514 static void
    515 x86_store_fpxregset (struct regcache *regcache, const void *buf)
    516 {
    517   i387_fxsave_to_cache (regcache, buf);
    518 }
    519 
    520 #endif
    521 
    522 static void
    523 x86_fill_xstateregset (struct regcache *regcache, void *buf)
    524 {
    525   i387_cache_to_xsave (regcache, buf);
    526 }
    527 
    528 static void
    529 x86_store_xstateregset (struct regcache *regcache, const void *buf)
    530 {
    531   i387_xsave_to_cache (regcache, buf);
    532 }
    533 
    534 /* ??? The non-biarch i386 case stores all the i387 regs twice.
    535    Once in i387_.*fsave.* and once in i387_.*fxsave.*.
    536    This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
    537    doesn't work.  IWBN to avoid the duplication in the case where it
    538    does work.  Maybe the arch_setup routine could check whether it works
    539    and update the supported regsets accordingly.  */
    540 
    541 static struct regset_info x86_regsets[] =
    542 {
    543 #ifdef HAVE_PTRACE_GETREGS
    544   { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
    545     GENERAL_REGS,
    546     x86_fill_gregset, x86_store_gregset },
    547   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
    548     EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
    549 # ifndef __x86_64__
    550 #  ifdef HAVE_PTRACE_GETFPXREGS
    551   { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
    552     EXTENDED_REGS,
    553     x86_fill_fpxregset, x86_store_fpxregset },
    554 #  endif
    555 # endif
    556   { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
    557     FP_REGS,
    558     x86_fill_fpregset, x86_store_fpregset },
    559 #endif /* HAVE_PTRACE_GETREGS */
    560   NULL_REGSET
    561 };
    562 
    563 bool
    564 x86_target::low_supports_breakpoints ()
    565 {
    566   return true;
    567 }
    568 
    569 CORE_ADDR
    570 x86_target::low_get_pc (regcache *regcache)
    571 {
    572   int use_64bit = register_size (regcache->tdesc, 0) == 8;
    573 
    574   if (use_64bit)
    575     {
    576       uint64_t pc;
    577 
    578       collect_register_by_name (regcache, "rip", &pc);
    579       return (CORE_ADDR) pc;
    580     }
    581   else
    582     {
    583       uint32_t pc;
    584 
    585       collect_register_by_name (regcache, "eip", &pc);
    586       return (CORE_ADDR) pc;
    587     }
    588 }
    589 
    590 void
    591 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
    592 {
    593   int use_64bit = register_size (regcache->tdesc, 0) == 8;
    594 
    595   if (use_64bit)
    596     {
    597       uint64_t newpc = pc;
    598 
    599       supply_register_by_name (regcache, "rip", &newpc);
    600     }
    601   else
    602     {
    603       uint32_t newpc = pc;
    604 
    605       supply_register_by_name (regcache, "eip", &newpc);
    606     }
    607 }
    608 
    609 int
    610 x86_target::low_decr_pc_after_break ()
    611 {
    612   return 1;
    613 }
    614 
    615 
    616 static const gdb_byte x86_breakpoint[] = { 0xCC };
    618 #define x86_breakpoint_len 1
    619 
    620 bool
    621 x86_target::low_breakpoint_at (CORE_ADDR pc)
    622 {
    623   unsigned char c;
    624 
    625   read_memory (pc, &c, 1);
    626   if (c == 0xCC)
    627     return true;
    628 
    629   return false;
    630 }
    631 
    632 /* Low-level function vector.  */
    634 struct x86_dr_low_type x86_dr_low =
    635   {
    636     x86_linux_dr_set_control,
    637     x86_linux_dr_set_addr,
    638     x86_linux_dr_get_addr,
    639     x86_linux_dr_get_status,
    640     x86_linux_dr_get_control,
    641     sizeof (void *),
    642   };
    643 
    644 /* Breakpoint/Watchpoint support.  */
    646 
    647 bool
    648 x86_target::supports_z_point_type (char z_type)
    649 {
    650   switch (z_type)
    651     {
    652     case Z_PACKET_SW_BP:
    653     case Z_PACKET_HW_BP:
    654     case Z_PACKET_WRITE_WP:
    655     case Z_PACKET_ACCESS_WP:
    656       return true;
    657     default:
    658       return false;
    659     }
    660 }
    661 
    662 int
    663 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
    664 			      int size, raw_breakpoint *bp)
    665 {
    666   struct process_info *proc = current_process ();
    667 
    668   switch (type)
    669     {
    670     case raw_bkpt_type_hw:
    671     case raw_bkpt_type_write_wp:
    672     case raw_bkpt_type_access_wp:
    673       {
    674 	enum target_hw_bp_type hw_type
    675 	  = raw_bkpt_type_to_target_hw_bp_type (type);
    676 	struct x86_debug_reg_state *state
    677 	  = &proc->priv->arch_private->debug_reg_state;
    678 
    679 	return x86_dr_insert_watchpoint (state, hw_type, addr, size);
    680       }
    681 
    682     default:
    683       /* Unsupported.  */
    684       return 1;
    685     }
    686 }
    687 
    688 int
    689 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
    690 			      int size, raw_breakpoint *bp)
    691 {
    692   struct process_info *proc = current_process ();
    693 
    694   switch (type)
    695     {
    696     case raw_bkpt_type_hw:
    697     case raw_bkpt_type_write_wp:
    698     case raw_bkpt_type_access_wp:
    699       {
    700 	enum target_hw_bp_type hw_type
    701 	  = raw_bkpt_type_to_target_hw_bp_type (type);
    702 	struct x86_debug_reg_state *state
    703 	  = &proc->priv->arch_private->debug_reg_state;
    704 
    705 	return x86_dr_remove_watchpoint (state, hw_type, addr, size);
    706       }
    707     default:
    708       /* Unsupported.  */
    709       return 1;
    710     }
    711 }
    712 
    713 bool
    714 x86_target::low_stopped_by_watchpoint ()
    715 {
    716   struct process_info *proc = current_process ();
    717   return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
    718 }
    719 
    720 CORE_ADDR
    721 x86_target::low_stopped_data_address ()
    722 {
    723   struct process_info *proc = current_process ();
    724   CORE_ADDR addr;
    725   if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
    726 				   &addr))
    727     return addr;
    728   return 0;
    729 }
    730 
    731 /* Called when a new process is created.  */
    733 
    734 arch_process_info *
    735 x86_target::low_new_process ()
    736 {
    737   struct arch_process_info *info = XCNEW (struct arch_process_info);
    738 
    739   x86_low_init_dregs (&info->debug_reg_state);
    740 
    741   return info;
    742 }
    743 
    744 /* Called when a process is being deleted.  */
    745 
    746 void
    747 x86_target::low_delete_process (arch_process_info *info)
    748 {
    749   xfree (info);
    750 }
    751 
    752 void
    753 x86_target::low_new_thread (lwp_info *lwp)
    754 {
    755   /* This comes from nat/.  */
    756   x86_linux_new_thread (lwp);
    757 }
    758 
    759 void
    760 x86_target::low_delete_thread (arch_lwp_info *alwp)
    761 {
    762   /* This comes from nat/.  */
    763   x86_linux_delete_thread (alwp);
    764 }
    765 
    766 /* Target routine for new_fork.  */
    767 
    768 void
    769 x86_target::low_new_fork (process_info *parent, process_info *child)
    770 {
    771   /* These are allocated by linux_add_process.  */
    772   gdb_assert (parent->priv != NULL
    773 	      && parent->priv->arch_private != NULL);
    774   gdb_assert (child->priv != NULL
    775 	      && child->priv->arch_private != NULL);
    776 
    777   /* Linux kernel before 2.6.33 commit
    778      72f674d203cd230426437cdcf7dd6f681dad8b0d
    779      will inherit hardware debug registers from parent
    780      on fork/vfork/clone.  Newer Linux kernels create such tasks with
    781      zeroed debug registers.
    782 
    783      GDB core assumes the child inherits the watchpoints/hw
    784      breakpoints of the parent, and will remove them all from the
    785      forked off process.  Copy the debug registers mirrors into the
    786      new process so that all breakpoints and watchpoints can be
    787      removed together.  The debug registers mirror will become zeroed
    788      in the end before detaching the forked off process, thus making
    789      this compatible with older Linux kernels too.  */
    790 
    791   *child->priv->arch_private = *parent->priv->arch_private;
    792 }
    793 
    794 void
    795 x86_target::low_prepare_to_resume (lwp_info *lwp)
    796 {
    797   /* This comes from nat/.  */
    798   x86_linux_prepare_to_resume (lwp);
    799 }
    800 
    801 /* See nat/x86-dregs.h.  */
    802 
    803 struct x86_debug_reg_state *
    804 x86_debug_reg_state (pid_t pid)
    805 {
    806   struct process_info *proc = find_process_pid (pid);
    807 
    808   return &proc->priv->arch_private->debug_reg_state;
    809 }
    810 
    811 /* When GDBSERVER is built as a 64-bit application on linux, the
    813    PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
    814    debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
    815    as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
    816    conversion in-place ourselves.  */
    817 
    818 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
    819    layout of the inferiors' architecture.  Returns true if any
    820    conversion was done; false otherwise.  If DIRECTION is 1, then copy
    821    from INF to PTRACE.  If DIRECTION is 0, copy from PTRACE to
    822    INF.  */
    823 
    824 bool
    825 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
    826 {
    827 #ifdef __x86_64__
    828   unsigned int machine;
    829   int tid = current_thread->id.lwp ();
    830   int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
    831 
    832   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
    833   if (!is_64bit_tdesc (current_thread))
    834       return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
    835 					       FIXUP_32);
    836   /* No fixup for native x32 GDB.  */
    837   else if (!is_elf64 && sizeof (void *) == 8)
    838     return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
    839 					     FIXUP_X32);
    840 #endif
    841 
    842   return false;
    843 }
    844 
    845 static int use_xml;
    847 
    848 /* Get Linux/x86 target description from running target.  */
    849 
    850 static const struct target_desc *
    851 x86_linux_read_description ()
    852 {
    853   int tid = current_thread->id.lwp ();
    854 
    855   /* If we are not allowed to send an XML target description then we need
    856      to use the hard-wired target descriptions.  This corresponds to GDB's
    857      default machine for x86.
    858 
    859      This check needs to occur before any returns statements that might
    860      generate some alternative target descriptions.  */
    861   if (!use_xml)
    862     {
    863       x86_linux_arch_size arch_size = x86_linux_ptrace_get_arch_size (tid);
    864       bool is_64bit = arch_size.is_64bit ();
    865       bool is_x32 = arch_size.is_x32 ();
    866 
    867       if (sizeof (void *) == 4 && is_64bit && !is_x32)
    868 	error (_("Can't debug 64-bit process with 32-bit GDBserver"));
    869 
    870 #ifdef __x86_64__
    871       if (is_64bit && !is_x32)
    872 	return tdesc_amd64_linux_no_xml.get ();
    873       else
    874 #endif
    875 	return tdesc_i386_linux_no_xml.get ();
    876     }
    877 
    878   /* If have_ptrace_getregset is changed to true by calling
    879      x86_linux_tdesc_for_tid then we will perform some additional
    880      initialisation.  */
    881   bool have_ptrace_getregset_was_unknown
    882     = have_ptrace_getregset == TRIBOOL_UNKNOWN;
    883 
    884   /* Get pointers to where we should store the xcr0 and xsave_layout
    885      values.  These will be filled in by x86_linux_tdesc_for_tid the first
    886      time that the function is called.  Subsequent calls will not modify
    887      the stored values.  */
    888   std::pair<uint64_t *, x86_xsave_layout *> storage
    889     = i387_get_xsave_storage ();
    890 
    891   const target_desc *tdesc
    892     = x86_linux_tdesc_for_tid (tid, storage.first, storage.second);
    893 
    894   if (have_ptrace_getregset_was_unknown
    895       && have_ptrace_getregset == TRIBOOL_TRUE)
    896     {
    897       int xsave_len = x86_xsave_length ();
    898 
    899       /* Use PTRACE_GETREGSET if it is available.  */
    900       for (regset_info *regset = x86_regsets;
    901 	   regset->fill_function != nullptr;
    902 	   regset++)
    903 	{
    904 	  if (regset->get_request == PTRACE_GETREGSET)
    905 	    regset->size = xsave_len;
    906 	  else if (regset->type != GENERAL_REGS)
    907 	    regset->size = 0;
    908 	}
    909     }
    910 
    911   return tdesc;
    912 }
    913 
    914 /* Update all the target description of all processes; a new GDB
    915    connected, and it may or not support xml target descriptions.  */
    916 
    917 void
    918 x86_target::update_xmltarget ()
    919 {
    920   scoped_restore_current_thread restore_thread;
    921 
    922   /* Before changing the register cache's internal layout, flush the
    923      contents of the current valid caches back to the threads, and
    924      release the current regcache objects.  */
    925   regcache_release ();
    926 
    927   for_each_process ([this] (process_info *proc) {
    928     int pid = proc->pid;
    929 
    930     /* Look up any thread of this process.  */
    931     switch_to_thread (find_any_thread_of_pid (pid));
    932 
    933     low_arch_setup ();
    934   });
    935 }
    936 
    937 /* Process qSupported query, "xmlRegisters=".  Update the buffer size for
    938    PTRACE_GETREGSET.  */
    939 
    940 void
    941 x86_target::process_qsupported (gdb::array_view<const char * const> features)
    942 {
    943   /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
    944      with "i386" in qSupported query, it supports x86 XML target
    945      descriptions.  */
    946   use_xml = 0;
    947 
    948   for (const char *feature : features)
    949     {
    950       if (startswith (feature, "xmlRegisters="))
    951 	{
    952 	  char *copy = xstrdup (feature + 13);
    953 
    954 	  char *saveptr;
    955 	  for (char *p = strtok_r (copy, ",", &saveptr);
    956 	       p != NULL;
    957 	       p = strtok_r (NULL, ",", &saveptr))
    958 	    {
    959 	      if (strcmp (p, "i386") == 0)
    960 		{
    961 		  use_xml = 1;
    962 		  break;
    963 		}
    964 	    }
    965 
    966 	  free (copy);
    967 	}
    968     }
    969 
    970   update_xmltarget ();
    971 }
    972 
    973 /* Common for x86/x86-64.  */
    974 
    975 static struct regsets_info x86_regsets_info =
    976   {
    977     x86_regsets, /* regsets */
    978     0, /* num_regsets */
    979     NULL, /* disabled_regsets */
    980   };
    981 
    982 #ifdef __x86_64__
    983 static struct regs_info amd64_linux_regs_info =
    984   {
    985     NULL, /* regset_bitmap */
    986     NULL, /* usrregs_info */
    987     &x86_regsets_info
    988   };
    989 #endif
    990 static struct usrregs_info i386_linux_usrregs_info =
    991   {
    992     I386_NUM_REGS,
    993     i386_regmap,
    994   };
    995 
    996 static struct regs_info i386_linux_regs_info =
    997   {
    998     NULL, /* regset_bitmap */
    999     &i386_linux_usrregs_info,
   1000     &x86_regsets_info
   1001   };
   1002 
   1003 const regs_info *
   1004 x86_target::get_regs_info ()
   1005 {
   1006 #ifdef __x86_64__
   1007   if (is_64bit_tdesc (current_thread))
   1008     return &amd64_linux_regs_info;
   1009   else
   1010 #endif
   1011     return &i386_linux_regs_info;
   1012 }
   1013 
   1014 /* Initialize the target description for the architecture of the
   1015    inferior.  */
   1016 
   1017 void
   1018 x86_target::low_arch_setup ()
   1019 {
   1020   current_process ()->tdesc = x86_linux_read_description ();
   1021 }
   1022 
   1023 bool
   1024 x86_target::low_supports_catch_syscall ()
   1025 {
   1026   return true;
   1027 }
   1028 
   1029 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
   1030    code.  This should only be called if LWP got a SYSCALL_SIGTRAP.  */
   1031 
   1032 void
   1033 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
   1034 {
   1035   int use_64bit = register_size (regcache->tdesc, 0) == 8;
   1036 
   1037   if (use_64bit)
   1038     {
   1039       long l_sysno;
   1040 
   1041       collect_register_by_name (regcache, "orig_rax", &l_sysno);
   1042       *sysno = (int) l_sysno;
   1043     }
   1044   else
   1045     collect_register_by_name (regcache, "orig_eax", sysno);
   1046 }
   1047 
   1048 bool
   1049 x86_target::supports_tracepoints ()
   1050 {
   1051   return true;
   1052 }
   1053 
   1054 static void
   1055 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
   1056 {
   1057   target_write_memory (*to, buf, len);
   1058   *to += len;
   1059 }
   1060 
   1061 static int
   1062 push_opcode (unsigned char *buf, const char *op)
   1063 {
   1064   unsigned char *buf_org = buf;
   1065 
   1066   while (1)
   1067     {
   1068       char *endptr;
   1069       unsigned long ul = strtoul (op, &endptr, 16);
   1070 
   1071       if (endptr == op)
   1072 	break;
   1073 
   1074       *buf++ = ul;
   1075       op = endptr;
   1076     }
   1077 
   1078   return buf - buf_org;
   1079 }
   1080 
   1081 #ifdef __x86_64__
   1082 
   1083 /* Build a jump pad that saves registers and calls a collection
   1084    function.  Writes a jump instruction to the jump pad to
   1085    JJUMPAD_INSN.  The caller is responsible to write it in at the
   1086    tracepoint address.  */
   1087 
   1088 static int
   1089 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
   1090 					CORE_ADDR collector,
   1091 					CORE_ADDR lockaddr,
   1092 					ULONGEST orig_size,
   1093 					CORE_ADDR *jump_entry,
   1094 					CORE_ADDR *trampoline,
   1095 					ULONGEST *trampoline_size,
   1096 					unsigned char *jjump_pad_insn,
   1097 					ULONGEST *jjump_pad_insn_size,
   1098 					CORE_ADDR *adjusted_insn_addr,
   1099 					CORE_ADDR *adjusted_insn_addr_end,
   1100 					char *err)
   1101 {
   1102   unsigned char buf[40];
   1103   int i, offset;
   1104   int64_t loffset;
   1105 
   1106   CORE_ADDR buildaddr = *jump_entry;
   1107 
   1108   /* Build the jump pad.  */
   1109 
   1110   /* First, do tracepoint data collection.  Save registers.  */
   1111   i = 0;
   1112   /* Need to ensure stack pointer saved first.  */
   1113   buf[i++] = 0x54; /* push %rsp */
   1114   buf[i++] = 0x55; /* push %rbp */
   1115   buf[i++] = 0x57; /* push %rdi */
   1116   buf[i++] = 0x56; /* push %rsi */
   1117   buf[i++] = 0x52; /* push %rdx */
   1118   buf[i++] = 0x51; /* push %rcx */
   1119   buf[i++] = 0x53; /* push %rbx */
   1120   buf[i++] = 0x50; /* push %rax */
   1121   buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
   1122   buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
   1123   buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
   1124   buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
   1125   buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
   1126   buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
   1127   buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
   1128   buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
   1129   buf[i++] = 0x9c; /* pushfq */
   1130   buf[i++] = 0x48; /* movabs <addr>,%rdi */
   1131   buf[i++] = 0xbf;
   1132   memcpy (buf + i, &tpaddr, 8);
   1133   i += 8;
   1134   buf[i++] = 0x57; /* push %rdi */
   1135   append_insns (&buildaddr, i, buf);
   1136 
   1137   /* Stack space for the collecting_t object.  */
   1138   i = 0;
   1139   i += push_opcode (&buf[i], "48 83 ec 18");	/* sub $0x18,%rsp */
   1140   i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
   1141   memcpy (buf + i, &tpoint, 8);
   1142   i += 8;
   1143   i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
   1144   i += push_opcode (&buf[i],
   1145 		    "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
   1146   i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
   1147   append_insns (&buildaddr, i, buf);
   1148 
   1149   /* spin-lock.  */
   1150   i = 0;
   1151   i += push_opcode (&buf[i], "48 be");		/* movl <lockaddr>,%rsi */
   1152   memcpy (&buf[i], (void *) &lockaddr, 8);
   1153   i += 8;
   1154   i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
   1155   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
   1156   i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
   1157   i += push_opcode (&buf[i], "48 85 c0");	/* test %rax,%rax */
   1158   i += push_opcode (&buf[i], "75 f4");		/* jne <again> */
   1159   append_insns (&buildaddr, i, buf);
   1160 
   1161   /* Set up the gdb_collect call.  */
   1162   /* At this point, (stack pointer + 0x18) is the base of our saved
   1163      register block.  */
   1164 
   1165   i = 0;
   1166   i += push_opcode (&buf[i], "48 89 e6");	/* mov %rsp,%rsi */
   1167   i += push_opcode (&buf[i], "48 83 c6 18");	/* add $0x18,%rsi */
   1168 
   1169   /* tpoint address may be 64-bit wide.  */
   1170   i += push_opcode (&buf[i], "48 bf");		/* movl <addr>,%rdi */
   1171   memcpy (buf + i, &tpoint, 8);
   1172   i += 8;
   1173   append_insns (&buildaddr, i, buf);
   1174 
   1175   /* The collector function being in the shared library, may be
   1176      >31-bits away off the jump pad.  */
   1177   i = 0;
   1178   i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
   1179   memcpy (buf + i, &collector, 8);
   1180   i += 8;
   1181   i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
   1182   append_insns (&buildaddr, i, buf);
   1183 
   1184   /* Clear the spin-lock.  */
   1185   i = 0;
   1186   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
   1187   i += push_opcode (&buf[i], "48 a3");		/* mov %rax, lockaddr */
   1188   memcpy (buf + i, &lockaddr, 8);
   1189   i += 8;
   1190   append_insns (&buildaddr, i, buf);
   1191 
   1192   /* Remove stack that had been used for the collect_t object.  */
   1193   i = 0;
   1194   i += push_opcode (&buf[i], "48 83 c4 18");	/* add $0x18,%rsp */
   1195   append_insns (&buildaddr, i, buf);
   1196 
   1197   /* Restore register state.  */
   1198   i = 0;
   1199   buf[i++] = 0x48; /* add $0x8,%rsp */
   1200   buf[i++] = 0x83;
   1201   buf[i++] = 0xc4;
   1202   buf[i++] = 0x08;
   1203   buf[i++] = 0x9d; /* popfq */
   1204   buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
   1205   buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
   1206   buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
   1207   buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
   1208   buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
   1209   buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
   1210   buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
   1211   buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
   1212   buf[i++] = 0x58; /* pop %rax */
   1213   buf[i++] = 0x5b; /* pop %rbx */
   1214   buf[i++] = 0x59; /* pop %rcx */
   1215   buf[i++] = 0x5a; /* pop %rdx */
   1216   buf[i++] = 0x5e; /* pop %rsi */
   1217   buf[i++] = 0x5f; /* pop %rdi */
   1218   buf[i++] = 0x5d; /* pop %rbp */
   1219   buf[i++] = 0x5c; /* pop %rsp */
   1220   append_insns (&buildaddr, i, buf);
   1221 
   1222   /* Now, adjust the original instruction to execute in the jump
   1223      pad.  */
   1224   *adjusted_insn_addr = buildaddr;
   1225   relocate_instruction (&buildaddr, tpaddr);
   1226   *adjusted_insn_addr_end = buildaddr;
   1227 
   1228   /* Finally, write a jump back to the program.  */
   1229 
   1230   loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
   1231   if (loffset > INT_MAX || loffset < INT_MIN)
   1232     {
   1233       sprintf (err,
   1234 	       "E.Jump back from jump pad too far from tracepoint "
   1235 	       "(offset 0x%" PRIx64 " > int32).", loffset);
   1236       return 1;
   1237     }
   1238 
   1239   offset = (int) loffset;
   1240   memcpy (buf, jump_insn, sizeof (jump_insn));
   1241   memcpy (buf + 1, &offset, 4);
   1242   append_insns (&buildaddr, sizeof (jump_insn), buf);
   1243 
   1244   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
   1245      is always done last (by our caller actually), so that we can
   1246      install fast tracepoints with threads running.  This relies on
   1247      the agent's atomic write support.  */
   1248   loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
   1249   if (loffset > INT_MAX || loffset < INT_MIN)
   1250     {
   1251       sprintf (err,
   1252 	       "E.Jump pad too far from tracepoint "
   1253 	       "(offset 0x%" PRIx64 " > int32).", loffset);
   1254       return 1;
   1255     }
   1256 
   1257   offset = (int) loffset;
   1258 
   1259   memcpy (buf, jump_insn, sizeof (jump_insn));
   1260   memcpy (buf + 1, &offset, 4);
   1261   memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
   1262   *jjump_pad_insn_size = sizeof (jump_insn);
   1263 
   1264   /* Return the end address of our pad.  */
   1265   *jump_entry = buildaddr;
   1266 
   1267   return 0;
   1268 }
   1269 
   1270 #endif /* __x86_64__ */
   1271 
   1272 /* Build a jump pad that saves registers and calls a collection
   1273    function.  Writes a jump instruction to the jump pad to
   1274    JJUMPAD_INSN.  The caller is responsible to write it in at the
   1275    tracepoint address.  */
   1276 
   1277 static int
   1278 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
   1279 				       CORE_ADDR collector,
   1280 				       CORE_ADDR lockaddr,
   1281 				       ULONGEST orig_size,
   1282 				       CORE_ADDR *jump_entry,
   1283 				       CORE_ADDR *trampoline,
   1284 				       ULONGEST *trampoline_size,
   1285 				       unsigned char *jjump_pad_insn,
   1286 				       ULONGEST *jjump_pad_insn_size,
   1287 				       CORE_ADDR *adjusted_insn_addr,
   1288 				       CORE_ADDR *adjusted_insn_addr_end,
   1289 				       char *err)
   1290 {
   1291   unsigned char buf[0x100];
   1292   int i, offset;
   1293   CORE_ADDR buildaddr = *jump_entry;
   1294 
   1295   /* Build the jump pad.  */
   1296 
   1297   /* First, do tracepoint data collection.  Save registers.  */
   1298   i = 0;
   1299   buf[i++] = 0x60; /* pushad */
   1300   buf[i++] = 0x68; /* push tpaddr aka $pc */
   1301   *((int *)(buf + i)) = (int) tpaddr;
   1302   i += 4;
   1303   buf[i++] = 0x9c; /* pushf */
   1304   buf[i++] = 0x1e; /* push %ds */
   1305   buf[i++] = 0x06; /* push %es */
   1306   buf[i++] = 0x0f; /* push %fs */
   1307   buf[i++] = 0xa0;
   1308   buf[i++] = 0x0f; /* push %gs */
   1309   buf[i++] = 0xa8;
   1310   buf[i++] = 0x16; /* push %ss */
   1311   buf[i++] = 0x0e; /* push %cs */
   1312   append_insns (&buildaddr, i, buf);
   1313 
   1314   /* Stack space for the collecting_t object.  */
   1315   i = 0;
   1316   i += push_opcode (&buf[i], "83 ec 08");	/* sub    $0x8,%esp */
   1317 
   1318   /* Build the object.  */
   1319   i += push_opcode (&buf[i], "b8");		/* mov    <tpoint>,%eax */
   1320   memcpy (buf + i, &tpoint, 4);
   1321   i += 4;
   1322   i += push_opcode (&buf[i], "89 04 24");	   /* mov %eax,(%esp) */
   1323 
   1324   i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
   1325   i += push_opcode (&buf[i], "89 44 24 04");	   /* mov %eax,0x4(%esp) */
   1326   append_insns (&buildaddr, i, buf);
   1327 
   1328   /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
   1329      If we cared for it, this could be using xchg alternatively.  */
   1330 
   1331   i = 0;
   1332   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
   1333   i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
   1334 						   %esp,<lockaddr> */
   1335   memcpy (&buf[i], (void *) &lockaddr, 4);
   1336   i += 4;
   1337   i += push_opcode (&buf[i], "85 c0");		/* test %eax,%eax */
   1338   i += push_opcode (&buf[i], "75 f2");		/* jne <again> */
   1339   append_insns (&buildaddr, i, buf);
   1340 
   1341 
   1342   /* Set up arguments to the gdb_collect call.  */
   1343   i = 0;
   1344   i += push_opcode (&buf[i], "89 e0");		/* mov %esp,%eax */
   1345   i += push_opcode (&buf[i], "83 c0 08");	/* add $0x08,%eax */
   1346   i += push_opcode (&buf[i], "89 44 24 fc");	/* mov %eax,-0x4(%esp) */
   1347   append_insns (&buildaddr, i, buf);
   1348 
   1349   i = 0;
   1350   i += push_opcode (&buf[i], "83 ec 08");	/* sub $0x8,%esp */
   1351   append_insns (&buildaddr, i, buf);
   1352 
   1353   i = 0;
   1354   i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
   1355   memcpy (&buf[i], (void *) &tpoint, 4);
   1356   i += 4;
   1357   append_insns (&buildaddr, i, buf);
   1358 
   1359   buf[0] = 0xe8; /* call <reladdr> */
   1360   offset = collector - (buildaddr + sizeof (jump_insn));
   1361   memcpy (buf + 1, &offset, 4);
   1362   append_insns (&buildaddr, 5, buf);
   1363   /* Clean up after the call.  */
   1364   buf[0] = 0x83; /* add $0x8,%esp */
   1365   buf[1] = 0xc4;
   1366   buf[2] = 0x08;
   1367   append_insns (&buildaddr, 3, buf);
   1368 
   1369 
   1370   /* Clear the spin-lock.  This would need the LOCK prefix on older
   1371      broken archs.  */
   1372   i = 0;
   1373   i += push_opcode (&buf[i], "31 c0");		/* xor %eax,%eax */
   1374   i += push_opcode (&buf[i], "a3");		/* mov %eax, lockaddr */
   1375   memcpy (buf + i, &lockaddr, 4);
   1376   i += 4;
   1377   append_insns (&buildaddr, i, buf);
   1378 
   1379 
   1380   /* Remove stack that had been used for the collect_t object.  */
   1381   i = 0;
   1382   i += push_opcode (&buf[i], "83 c4 08");	/* add $0x08,%esp */
   1383   append_insns (&buildaddr, i, buf);
   1384 
   1385   i = 0;
   1386   buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
   1387   buf[i++] = 0xc4;
   1388   buf[i++] = 0x04;
   1389   buf[i++] = 0x17; /* pop %ss */
   1390   buf[i++] = 0x0f; /* pop %gs */
   1391   buf[i++] = 0xa9;
   1392   buf[i++] = 0x0f; /* pop %fs */
   1393   buf[i++] = 0xa1;
   1394   buf[i++] = 0x07; /* pop %es */
   1395   buf[i++] = 0x1f; /* pop %ds */
   1396   buf[i++] = 0x9d; /* popf */
   1397   buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
   1398   buf[i++] = 0xc4;
   1399   buf[i++] = 0x04;
   1400   buf[i++] = 0x61; /* popad */
   1401   append_insns (&buildaddr, i, buf);
   1402 
   1403   /* Now, adjust the original instruction to execute in the jump
   1404      pad.  */
   1405   *adjusted_insn_addr = buildaddr;
   1406   relocate_instruction (&buildaddr, tpaddr);
   1407   *adjusted_insn_addr_end = buildaddr;
   1408 
   1409   /* Write the jump back to the program.  */
   1410   offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
   1411   memcpy (buf, jump_insn, sizeof (jump_insn));
   1412   memcpy (buf + 1, &offset, 4);
   1413   append_insns (&buildaddr, sizeof (jump_insn), buf);
   1414 
   1415   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
   1416      is always done last (by our caller actually), so that we can
   1417      install fast tracepoints with threads running.  This relies on
   1418      the agent's atomic write support.  */
   1419   if (orig_size == 4)
   1420     {
   1421       /* Create a trampoline.  */
   1422       *trampoline_size = sizeof (jump_insn);
   1423       if (!claim_trampoline_space (*trampoline_size, trampoline))
   1424 	{
   1425 	  /* No trampoline space available.  */
   1426 	  strcpy (err,
   1427 		  "E.Cannot allocate trampoline space needed for fast "
   1428 		  "tracepoints on 4-byte instructions.");
   1429 	  return 1;
   1430 	}
   1431 
   1432       offset = *jump_entry - (*trampoline + sizeof (jump_insn));
   1433       memcpy (buf, jump_insn, sizeof (jump_insn));
   1434       memcpy (buf + 1, &offset, 4);
   1435       target_write_memory (*trampoline, buf, sizeof (jump_insn));
   1436 
   1437       /* Use a 16-bit relative jump instruction to jump to the trampoline.  */
   1438       offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
   1439       memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
   1440       memcpy (buf + 2, &offset, 2);
   1441       memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
   1442       *jjump_pad_insn_size = sizeof (small_jump_insn);
   1443     }
   1444   else
   1445     {
   1446       /* Else use a 32-bit relative jump instruction.  */
   1447       offset = *jump_entry - (tpaddr + sizeof (jump_insn));
   1448       memcpy (buf, jump_insn, sizeof (jump_insn));
   1449       memcpy (buf + 1, &offset, 4);
   1450       memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
   1451       *jjump_pad_insn_size = sizeof (jump_insn);
   1452     }
   1453 
   1454   /* Return the end address of our pad.  */
   1455   *jump_entry = buildaddr;
   1456 
   1457   return 0;
   1458 }
   1459 
   1460 bool
   1461 x86_target::supports_fast_tracepoints ()
   1462 {
   1463   return true;
   1464 }
   1465 
   1466 int
   1467 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
   1468 					      CORE_ADDR tpaddr,
   1469 					      CORE_ADDR collector,
   1470 					      CORE_ADDR lockaddr,
   1471 					      ULONGEST orig_size,
   1472 					      CORE_ADDR *jump_entry,
   1473 					      CORE_ADDR *trampoline,
   1474 					      ULONGEST *trampoline_size,
   1475 					      unsigned char *jjump_pad_insn,
   1476 					      ULONGEST *jjump_pad_insn_size,
   1477 					      CORE_ADDR *adjusted_insn_addr,
   1478 					      CORE_ADDR *adjusted_insn_addr_end,
   1479 					      char *err)
   1480 {
   1481 #ifdef __x86_64__
   1482   if (is_64bit_tdesc (current_thread))
   1483     return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
   1484 						   collector, lockaddr,
   1485 						   orig_size, jump_entry,
   1486 						   trampoline, trampoline_size,
   1487 						   jjump_pad_insn,
   1488 						   jjump_pad_insn_size,
   1489 						   adjusted_insn_addr,
   1490 						   adjusted_insn_addr_end,
   1491 						   err);
   1492 #endif
   1493 
   1494   return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
   1495 						collector, lockaddr,
   1496 						orig_size, jump_entry,
   1497 						trampoline, trampoline_size,
   1498 						jjump_pad_insn,
   1499 						jjump_pad_insn_size,
   1500 						adjusted_insn_addr,
   1501 						adjusted_insn_addr_end,
   1502 						err);
   1503 }
   1504 
   1505 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
   1506    architectures.  */
   1507 
   1508 int
   1509 x86_target::get_min_fast_tracepoint_insn_len ()
   1510 {
   1511   static int warned_about_fast_tracepoints = 0;
   1512 
   1513 #ifdef __x86_64__
   1514   /*  On x86-64, 5-byte jump instructions with a 4-byte offset are always
   1515       used for fast tracepoints.  */
   1516   if (is_64bit_tdesc (current_thread))
   1517     return 5;
   1518 #endif
   1519 
   1520   if (agent_loaded_p ())
   1521     {
   1522       char errbuf[IPA_BUFSIZ];
   1523 
   1524       errbuf[0] = '\0';
   1525 
   1526       /* On x86, if trampolines are available, then 4-byte jump instructions
   1527 	 with a 2-byte offset may be used, otherwise 5-byte jump instructions
   1528 	 with a 4-byte offset are used instead.  */
   1529       if (have_fast_tracepoint_trampoline_buffer (errbuf))
   1530 	return 4;
   1531       else
   1532 	{
   1533 	  /* GDB has no channel to explain to user why a shorter fast
   1534 	     tracepoint is not possible, but at least make GDBserver
   1535 	     mention that something has gone awry.  */
   1536 	  if (!warned_about_fast_tracepoints)
   1537 	    {
   1538 	      warning ("4-byte fast tracepoints not available; %s", errbuf);
   1539 	      warned_about_fast_tracepoints = 1;
   1540 	    }
   1541 	  return 5;
   1542 	}
   1543     }
   1544   else
   1545     {
   1546       /* Indicate that the minimum length is currently unknown since the IPA
   1547 	 has not loaded yet.  */
   1548       return 0;
   1549     }
   1550 }
   1551 
   1552 static void
   1553 add_insns (unsigned char *start, int len)
   1554 {
   1555   CORE_ADDR buildaddr = current_insn_ptr;
   1556 
   1557   threads_debug_printf ("Adding %d bytes of insn at %s",
   1558 			len, paddress (buildaddr));
   1559 
   1560   append_insns (&buildaddr, len, start);
   1561   current_insn_ptr = buildaddr;
   1562 }
   1563 
   1564 /* Our general strategy for emitting code is to avoid specifying raw
   1565    bytes whenever possible, and instead copy a block of inline asm
   1566    that is embedded in the function.  This is a little messy, because
   1567    we need to keep the compiler from discarding what looks like dead
   1568    code, plus suppress various warnings.  */
   1569 
   1570 #define EMIT_ASM(NAME, INSNS)						\
   1571   do									\
   1572     {									\
   1573       extern unsigned char start_ ## NAME, end_ ## NAME;		\
   1574       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
   1575       __asm__ ("jmp end_" #NAME "\n"					\
   1576 	       "\t" "start_" #NAME ":"					\
   1577 	       "\t" INSNS "\n"						\
   1578 	       "\t" "end_" #NAME ":");					\
   1579     } while (0)
   1580 
   1581 #ifdef __x86_64__
   1582 
   1583 #define EMIT_ASM32(NAME,INSNS)						\
   1584   do									\
   1585     {									\
   1586       extern unsigned char start_ ## NAME, end_ ## NAME;		\
   1587       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);	\
   1588       __asm__ (".code32\n"						\
   1589 	       "\t" "jmp end_" #NAME "\n"				\
   1590 	       "\t" "start_" #NAME ":\n"				\
   1591 	       "\t" INSNS "\n"						\
   1592 	       "\t" "end_" #NAME ":\n"					\
   1593 	       ".code64\n");						\
   1594     } while (0)
   1595 
   1596 #else
   1597 
   1598 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
   1599 
   1600 #endif
   1601 
   1602 #ifdef __x86_64__
   1603 
   1604 static void
   1605 amd64_emit_prologue (void)
   1606 {
   1607   EMIT_ASM (amd64_prologue,
   1608 	    "pushq %rbp\n\t"
   1609 	    "movq %rsp,%rbp\n\t"
   1610 	    "sub $0x20,%rsp\n\t"
   1611 	    "movq %rdi,-8(%rbp)\n\t"
   1612 	    "movq %rsi,-16(%rbp)");
   1613 }
   1614 
   1615 
   1616 static void
   1617 amd64_emit_epilogue (void)
   1618 {
   1619   EMIT_ASM (amd64_epilogue,
   1620 	    "movq -16(%rbp),%rdi\n\t"
   1621 	    "movq %rax,(%rdi)\n\t"
   1622 	    "xor %rax,%rax\n\t"
   1623 	    "leave\n\t"
   1624 	    "ret");
   1625 }
   1626 
   1627 static void
   1628 amd64_emit_add (void)
   1629 {
   1630   EMIT_ASM (amd64_add,
   1631 	    "add (%rsp),%rax\n\t"
   1632 	    "lea 0x8(%rsp),%rsp");
   1633 }
   1634 
   1635 static void
   1636 amd64_emit_sub (void)
   1637 {
   1638   EMIT_ASM (amd64_sub,
   1639 	    "sub %rax,(%rsp)\n\t"
   1640 	    "pop %rax");
   1641 }
   1642 
   1643 static void
   1644 amd64_emit_mul (void)
   1645 {
   1646   emit_error = 1;
   1647 }
   1648 
   1649 static void
   1650 amd64_emit_lsh (void)
   1651 {
   1652   emit_error = 1;
   1653 }
   1654 
   1655 static void
   1656 amd64_emit_rsh_signed (void)
   1657 {
   1658   emit_error = 1;
   1659 }
   1660 
   1661 static void
   1662 amd64_emit_rsh_unsigned (void)
   1663 {
   1664   emit_error = 1;
   1665 }
   1666 
   1667 static void
   1668 amd64_emit_ext (int arg)
   1669 {
   1670   switch (arg)
   1671     {
   1672     case 8:
   1673       EMIT_ASM (amd64_ext_8,
   1674 		"cbtw\n\t"
   1675 		"cwtl\n\t"
   1676 		"cltq");
   1677       break;
   1678     case 16:
   1679       EMIT_ASM (amd64_ext_16,
   1680 		"cwtl\n\t"
   1681 		"cltq");
   1682       break;
   1683     case 32:
   1684       EMIT_ASM (amd64_ext_32,
   1685 		"cltq");
   1686       break;
   1687     default:
   1688       emit_error = 1;
   1689     }
   1690 }
   1691 
   1692 static void
   1693 amd64_emit_log_not (void)
   1694 {
   1695   EMIT_ASM (amd64_log_not,
   1696 	    "test %rax,%rax\n\t"
   1697 	    "sete %cl\n\t"
   1698 	    "movzbq %cl,%rax");
   1699 }
   1700 
   1701 static void
   1702 amd64_emit_bit_and (void)
   1703 {
   1704   EMIT_ASM (amd64_and,
   1705 	    "and (%rsp),%rax\n\t"
   1706 	    "lea 0x8(%rsp),%rsp");
   1707 }
   1708 
   1709 static void
   1710 amd64_emit_bit_or (void)
   1711 {
   1712   EMIT_ASM (amd64_or,
   1713 	    "or (%rsp),%rax\n\t"
   1714 	    "lea 0x8(%rsp),%rsp");
   1715 }
   1716 
   1717 static void
   1718 amd64_emit_bit_xor (void)
   1719 {
   1720   EMIT_ASM (amd64_xor,
   1721 	    "xor (%rsp),%rax\n\t"
   1722 	    "lea 0x8(%rsp),%rsp");
   1723 }
   1724 
   1725 static void
   1726 amd64_emit_bit_not (void)
   1727 {
   1728   EMIT_ASM (amd64_bit_not,
   1729 	    "xorq $0xffffffffffffffff,%rax");
   1730 }
   1731 
   1732 static void
   1733 amd64_emit_equal (void)
   1734 {
   1735   EMIT_ASM (amd64_equal,
   1736 	    "cmp %rax,(%rsp)\n\t"
   1737 	    "je .Lamd64_equal_true\n\t"
   1738 	    "xor %rax,%rax\n\t"
   1739 	    "jmp .Lamd64_equal_end\n\t"
   1740 	    ".Lamd64_equal_true:\n\t"
   1741 	    "mov $0x1,%rax\n\t"
   1742 	    ".Lamd64_equal_end:\n\t"
   1743 	    "lea 0x8(%rsp),%rsp");
   1744 }
   1745 
   1746 static void
   1747 amd64_emit_less_signed (void)
   1748 {
   1749   EMIT_ASM (amd64_less_signed,
   1750 	    "cmp %rax,(%rsp)\n\t"
   1751 	    "jl .Lamd64_less_signed_true\n\t"
   1752 	    "xor %rax,%rax\n\t"
   1753 	    "jmp .Lamd64_less_signed_end\n\t"
   1754 	    ".Lamd64_less_signed_true:\n\t"
   1755 	    "mov $1,%rax\n\t"
   1756 	    ".Lamd64_less_signed_end:\n\t"
   1757 	    "lea 0x8(%rsp),%rsp");
   1758 }
   1759 
   1760 static void
   1761 amd64_emit_less_unsigned (void)
   1762 {
   1763   EMIT_ASM (amd64_less_unsigned,
   1764 	    "cmp %rax,(%rsp)\n\t"
   1765 	    "jb .Lamd64_less_unsigned_true\n\t"
   1766 	    "xor %rax,%rax\n\t"
   1767 	    "jmp .Lamd64_less_unsigned_end\n\t"
   1768 	    ".Lamd64_less_unsigned_true:\n\t"
   1769 	    "mov $1,%rax\n\t"
   1770 	    ".Lamd64_less_unsigned_end:\n\t"
   1771 	    "lea 0x8(%rsp),%rsp");
   1772 }
   1773 
   1774 static void
   1775 amd64_emit_ref (int size)
   1776 {
   1777   switch (size)
   1778     {
   1779     case 1:
   1780       EMIT_ASM (amd64_ref1,
   1781 		"movb (%rax),%al");
   1782       break;
   1783     case 2:
   1784       EMIT_ASM (amd64_ref2,
   1785 		"movw (%rax),%ax");
   1786       break;
   1787     case 4:
   1788       EMIT_ASM (amd64_ref4,
   1789 		"movl (%rax),%eax");
   1790       break;
   1791     case 8:
   1792       EMIT_ASM (amd64_ref8,
   1793 		"movq (%rax),%rax");
   1794       break;
   1795     }
   1796 }
   1797 
   1798 static void
   1799 amd64_emit_if_goto (int *offset_p, int *size_p)
   1800 {
   1801   EMIT_ASM (amd64_if_goto,
   1802 	    "mov %rax,%rcx\n\t"
   1803 	    "pop %rax\n\t"
   1804 	    "cmp $0,%rcx\n\t"
   1805 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
   1806   if (offset_p)
   1807     *offset_p = 10;
   1808   if (size_p)
   1809     *size_p = 4;
   1810 }
   1811 
   1812 static void
   1813 amd64_emit_goto (int *offset_p, int *size_p)
   1814 {
   1815   EMIT_ASM (amd64_goto,
   1816 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
   1817   if (offset_p)
   1818     *offset_p = 1;
   1819   if (size_p)
   1820     *size_p = 4;
   1821 }
   1822 
   1823 static void
   1824 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
   1825 {
   1826   int diff = (to - (from + size));
   1827   unsigned char buf[sizeof (int)];
   1828 
   1829   if (size != 4)
   1830     {
   1831       emit_error = 1;
   1832       return;
   1833     }
   1834 
   1835   memcpy (buf, &diff, sizeof (int));
   1836   target_write_memory (from, buf, sizeof (int));
   1837 }
   1838 
   1839 static void
   1840 amd64_emit_const (LONGEST num)
   1841 {
   1842   unsigned char buf[16];
   1843   int i;
   1844   CORE_ADDR buildaddr = current_insn_ptr;
   1845 
   1846   i = 0;
   1847   buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
   1848   memcpy (&buf[i], &num, sizeof (num));
   1849   i += 8;
   1850   append_insns (&buildaddr, i, buf);
   1851   current_insn_ptr = buildaddr;
   1852 }
   1853 
   1854 static void
   1855 amd64_emit_call (CORE_ADDR fn)
   1856 {
   1857   unsigned char buf[16];
   1858   int i;
   1859   CORE_ADDR buildaddr;
   1860   LONGEST offset64;
   1861 
   1862   /* The destination function being in the shared library, may be
   1863      >31-bits away off the compiled code pad.  */
   1864 
   1865   buildaddr = current_insn_ptr;
   1866 
   1867   offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
   1868 
   1869   i = 0;
   1870 
   1871   if (offset64 > INT_MAX || offset64 < INT_MIN)
   1872     {
   1873       /* Offset is too large for a call.  Use callq, but that requires
   1874 	 a register, so avoid it if possible.  Use r10, since it is
   1875 	 call-clobbered, we don't have to push/pop it.  */
   1876       buf[i++] = 0x48; /* mov $fn,%r10 */
   1877       buf[i++] = 0xba;
   1878       memcpy (buf + i, &fn, 8);
   1879       i += 8;
   1880       buf[i++] = 0xff; /* callq *%r10 */
   1881       buf[i++] = 0xd2;
   1882     }
   1883   else
   1884     {
   1885       int offset32 = offset64; /* we know we can't overflow here.  */
   1886 
   1887       buf[i++] = 0xe8; /* call <reladdr> */
   1888       memcpy (buf + i, &offset32, 4);
   1889       i += 4;
   1890     }
   1891 
   1892   append_insns (&buildaddr, i, buf);
   1893   current_insn_ptr = buildaddr;
   1894 }
   1895 
   1896 static void
   1897 amd64_emit_reg (int reg)
   1898 {
   1899   unsigned char buf[16];
   1900   int i;
   1901   CORE_ADDR buildaddr;
   1902 
   1903   /* Assume raw_regs is still in %rdi.  */
   1904   buildaddr = current_insn_ptr;
   1905   i = 0;
   1906   buf[i++] = 0xbe; /* mov $<n>,%esi */
   1907   memcpy (&buf[i], &reg, sizeof (reg));
   1908   i += 4;
   1909   append_insns (&buildaddr, i, buf);
   1910   current_insn_ptr = buildaddr;
   1911   amd64_emit_call (get_raw_reg_func_addr ());
   1912 }
   1913 
   1914 static void
   1915 amd64_emit_pop (void)
   1916 {
   1917   EMIT_ASM (amd64_pop,
   1918 	    "pop %rax");
   1919 }
   1920 
   1921 static void
   1922 amd64_emit_stack_flush (void)
   1923 {
   1924   EMIT_ASM (amd64_stack_flush,
   1925 	    "push %rax");
   1926 }
   1927 
   1928 static void
   1929 amd64_emit_zero_ext (int arg)
   1930 {
   1931   switch (arg)
   1932     {
   1933     case 8:
   1934       EMIT_ASM (amd64_zero_ext_8,
   1935 		"and $0xff,%rax");
   1936       break;
   1937     case 16:
   1938       EMIT_ASM (amd64_zero_ext_16,
   1939 		"and $0xffff,%rax");
   1940       break;
   1941     case 32:
   1942       EMIT_ASM (amd64_zero_ext_32,
   1943 		"mov $0xffffffff,%rcx\n\t"
   1944 		"and %rcx,%rax");
   1945       break;
   1946     default:
   1947       emit_error = 1;
   1948     }
   1949 }
   1950 
   1951 static void
   1952 amd64_emit_swap (void)
   1953 {
   1954   EMIT_ASM (amd64_swap,
   1955 	    "mov %rax,%rcx\n\t"
   1956 	    "pop %rax\n\t"
   1957 	    "push %rcx");
   1958 }
   1959 
   1960 static void
   1961 amd64_emit_stack_adjust (int n)
   1962 {
   1963   unsigned char buf[16];
   1964   int i;
   1965   CORE_ADDR buildaddr = current_insn_ptr;
   1966 
   1967   i = 0;
   1968   buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
   1969   buf[i++] = 0x8d;
   1970   buf[i++] = 0x64;
   1971   buf[i++] = 0x24;
   1972   /* This only handles adjustments up to 16, but we don't expect any more.  */
   1973   buf[i++] = n * 8;
   1974   append_insns (&buildaddr, i, buf);
   1975   current_insn_ptr = buildaddr;
   1976 }
   1977 
   1978 /* FN's prototype is `LONGEST(*fn)(int)'.  */
   1979 
   1980 static void
   1981 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
   1982 {
   1983   unsigned char buf[16];
   1984   int i;
   1985   CORE_ADDR buildaddr;
   1986 
   1987   buildaddr = current_insn_ptr;
   1988   i = 0;
   1989   buf[i++] = 0xbf; /* movl $<n>,%edi */
   1990   memcpy (&buf[i], &arg1, sizeof (arg1));
   1991   i += 4;
   1992   append_insns (&buildaddr, i, buf);
   1993   current_insn_ptr = buildaddr;
   1994   amd64_emit_call (fn);
   1995 }
   1996 
   1997 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
   1998 
   1999 static void
   2000 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
   2001 {
   2002   unsigned char buf[16];
   2003   int i;
   2004   CORE_ADDR buildaddr;
   2005 
   2006   buildaddr = current_insn_ptr;
   2007   i = 0;
   2008   buf[i++] = 0xbf; /* movl $<n>,%edi */
   2009   memcpy (&buf[i], &arg1, sizeof (arg1));
   2010   i += 4;
   2011   append_insns (&buildaddr, i, buf);
   2012   current_insn_ptr = buildaddr;
   2013   EMIT_ASM (amd64_void_call_2_a,
   2014 	    /* Save away a copy of the stack top.  */
   2015 	    "push %rax\n\t"
   2016 	    /* Also pass top as the second argument.  */
   2017 	    "mov %rax,%rsi");
   2018   amd64_emit_call (fn);
   2019   EMIT_ASM (amd64_void_call_2_b,
   2020 	    /* Restore the stack top, %rax may have been trashed.  */
   2021 	    "pop %rax");
   2022 }
   2023 
   2024 static void
   2025 amd64_emit_eq_goto (int *offset_p, int *size_p)
   2026 {
   2027   EMIT_ASM (amd64_eq,
   2028 	    "cmp %rax,(%rsp)\n\t"
   2029 	    "jne .Lamd64_eq_fallthru\n\t"
   2030 	    "lea 0x8(%rsp),%rsp\n\t"
   2031 	    "pop %rax\n\t"
   2032 	    /* jmp, but don't trust the assembler to choose the right jump */
   2033 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2034 	    ".Lamd64_eq_fallthru:\n\t"
   2035 	    "lea 0x8(%rsp),%rsp\n\t"
   2036 	    "pop %rax");
   2037 
   2038   if (offset_p)
   2039     *offset_p = 13;
   2040   if (size_p)
   2041     *size_p = 4;
   2042 }
   2043 
   2044 static void
   2045 amd64_emit_ne_goto (int *offset_p, int *size_p)
   2046 {
   2047   EMIT_ASM (amd64_ne,
   2048 	    "cmp %rax,(%rsp)\n\t"
   2049 	    "je .Lamd64_ne_fallthru\n\t"
   2050 	    "lea 0x8(%rsp),%rsp\n\t"
   2051 	    "pop %rax\n\t"
   2052 	    /* jmp, but don't trust the assembler to choose the right jump */
   2053 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2054 	    ".Lamd64_ne_fallthru:\n\t"
   2055 	    "lea 0x8(%rsp),%rsp\n\t"
   2056 	    "pop %rax");
   2057 
   2058   if (offset_p)
   2059     *offset_p = 13;
   2060   if (size_p)
   2061     *size_p = 4;
   2062 }
   2063 
   2064 static void
   2065 amd64_emit_lt_goto (int *offset_p, int *size_p)
   2066 {
   2067   EMIT_ASM (amd64_lt,
   2068 	    "cmp %rax,(%rsp)\n\t"
   2069 	    "jnl .Lamd64_lt_fallthru\n\t"
   2070 	    "lea 0x8(%rsp),%rsp\n\t"
   2071 	    "pop %rax\n\t"
   2072 	    /* jmp, but don't trust the assembler to choose the right jump */
   2073 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2074 	    ".Lamd64_lt_fallthru:\n\t"
   2075 	    "lea 0x8(%rsp),%rsp\n\t"
   2076 	    "pop %rax");
   2077 
   2078   if (offset_p)
   2079     *offset_p = 13;
   2080   if (size_p)
   2081     *size_p = 4;
   2082 }
   2083 
   2084 static void
   2085 amd64_emit_le_goto (int *offset_p, int *size_p)
   2086 {
   2087   EMIT_ASM (amd64_le,
   2088 	    "cmp %rax,(%rsp)\n\t"
   2089 	    "jnle .Lamd64_le_fallthru\n\t"
   2090 	    "lea 0x8(%rsp),%rsp\n\t"
   2091 	    "pop %rax\n\t"
   2092 	    /* jmp, but don't trust the assembler to choose the right jump */
   2093 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2094 	    ".Lamd64_le_fallthru:\n\t"
   2095 	    "lea 0x8(%rsp),%rsp\n\t"
   2096 	    "pop %rax");
   2097 
   2098   if (offset_p)
   2099     *offset_p = 13;
   2100   if (size_p)
   2101     *size_p = 4;
   2102 }
   2103 
   2104 static void
   2105 amd64_emit_gt_goto (int *offset_p, int *size_p)
   2106 {
   2107   EMIT_ASM (amd64_gt,
   2108 	    "cmp %rax,(%rsp)\n\t"
   2109 	    "jng .Lamd64_gt_fallthru\n\t"
   2110 	    "lea 0x8(%rsp),%rsp\n\t"
   2111 	    "pop %rax\n\t"
   2112 	    /* jmp, but don't trust the assembler to choose the right jump */
   2113 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2114 	    ".Lamd64_gt_fallthru:\n\t"
   2115 	    "lea 0x8(%rsp),%rsp\n\t"
   2116 	    "pop %rax");
   2117 
   2118   if (offset_p)
   2119     *offset_p = 13;
   2120   if (size_p)
   2121     *size_p = 4;
   2122 }
   2123 
   2124 static void
   2125 amd64_emit_ge_goto (int *offset_p, int *size_p)
   2126 {
   2127   EMIT_ASM (amd64_ge,
   2128 	    "cmp %rax,(%rsp)\n\t"
   2129 	    "jnge .Lamd64_ge_fallthru\n\t"
   2130 	    ".Lamd64_ge_jump:\n\t"
   2131 	    "lea 0x8(%rsp),%rsp\n\t"
   2132 	    "pop %rax\n\t"
   2133 	    /* jmp, but don't trust the assembler to choose the right jump */
   2134 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2135 	    ".Lamd64_ge_fallthru:\n\t"
   2136 	    "lea 0x8(%rsp),%rsp\n\t"
   2137 	    "pop %rax");
   2138 
   2139   if (offset_p)
   2140     *offset_p = 13;
   2141   if (size_p)
   2142     *size_p = 4;
   2143 }
   2144 
   2145 static emit_ops amd64_emit_ops =
   2146   {
   2147     amd64_emit_prologue,
   2148     amd64_emit_epilogue,
   2149     amd64_emit_add,
   2150     amd64_emit_sub,
   2151     amd64_emit_mul,
   2152     amd64_emit_lsh,
   2153     amd64_emit_rsh_signed,
   2154     amd64_emit_rsh_unsigned,
   2155     amd64_emit_ext,
   2156     amd64_emit_log_not,
   2157     amd64_emit_bit_and,
   2158     amd64_emit_bit_or,
   2159     amd64_emit_bit_xor,
   2160     amd64_emit_bit_not,
   2161     amd64_emit_equal,
   2162     amd64_emit_less_signed,
   2163     amd64_emit_less_unsigned,
   2164     amd64_emit_ref,
   2165     amd64_emit_if_goto,
   2166     amd64_emit_goto,
   2167     amd64_write_goto_address,
   2168     amd64_emit_const,
   2169     amd64_emit_call,
   2170     amd64_emit_reg,
   2171     amd64_emit_pop,
   2172     amd64_emit_stack_flush,
   2173     amd64_emit_zero_ext,
   2174     amd64_emit_swap,
   2175     amd64_emit_stack_adjust,
   2176     amd64_emit_int_call_1,
   2177     amd64_emit_void_call_2,
   2178     amd64_emit_eq_goto,
   2179     amd64_emit_ne_goto,
   2180     amd64_emit_lt_goto,
   2181     amd64_emit_le_goto,
   2182     amd64_emit_gt_goto,
   2183     amd64_emit_ge_goto
   2184   };
   2185 
   2186 #endif /* __x86_64__ */
   2187 
   2188 static void
   2189 i386_emit_prologue (void)
   2190 {
   2191   EMIT_ASM32 (i386_prologue,
   2192 	    "push %ebp\n\t"
   2193 	    "mov %esp,%ebp\n\t"
   2194 	    "push %ebx");
   2195   /* At this point, the raw regs base address is at 8(%ebp), and the
   2196      value pointer is at 12(%ebp).  */
   2197 }
   2198 
   2199 static void
   2200 i386_emit_epilogue (void)
   2201 {
   2202   EMIT_ASM32 (i386_epilogue,
   2203 	    "mov 12(%ebp),%ecx\n\t"
   2204 	    "mov %eax,(%ecx)\n\t"
   2205 	    "mov %ebx,0x4(%ecx)\n\t"
   2206 	    "xor %eax,%eax\n\t"
   2207 	    "pop %ebx\n\t"
   2208 	    "pop %ebp\n\t"
   2209 	    "ret");
   2210 }
   2211 
   2212 static void
   2213 i386_emit_add (void)
   2214 {
   2215   EMIT_ASM32 (i386_add,
   2216 	    "add (%esp),%eax\n\t"
   2217 	    "adc 0x4(%esp),%ebx\n\t"
   2218 	    "lea 0x8(%esp),%esp");
   2219 }
   2220 
   2221 static void
   2222 i386_emit_sub (void)
   2223 {
   2224   EMIT_ASM32 (i386_sub,
   2225 	    "subl %eax,(%esp)\n\t"
   2226 	    "sbbl %ebx,4(%esp)\n\t"
   2227 	    "pop %eax\n\t"
   2228 	    "pop %ebx\n\t");
   2229 }
   2230 
   2231 static void
   2232 i386_emit_mul (void)
   2233 {
   2234   emit_error = 1;
   2235 }
   2236 
   2237 static void
   2238 i386_emit_lsh (void)
   2239 {
   2240   emit_error = 1;
   2241 }
   2242 
   2243 static void
   2244 i386_emit_rsh_signed (void)
   2245 {
   2246   emit_error = 1;
   2247 }
   2248 
   2249 static void
   2250 i386_emit_rsh_unsigned (void)
   2251 {
   2252   emit_error = 1;
   2253 }
   2254 
   2255 static void
   2256 i386_emit_ext (int arg)
   2257 {
   2258   switch (arg)
   2259     {
   2260     case 8:
   2261       EMIT_ASM32 (i386_ext_8,
   2262 		"cbtw\n\t"
   2263 		"cwtl\n\t"
   2264 		"movl %eax,%ebx\n\t"
   2265 		"sarl $31,%ebx");
   2266       break;
   2267     case 16:
   2268       EMIT_ASM32 (i386_ext_16,
   2269 		"cwtl\n\t"
   2270 		"movl %eax,%ebx\n\t"
   2271 		"sarl $31,%ebx");
   2272       break;
   2273     case 32:
   2274       EMIT_ASM32 (i386_ext_32,
   2275 		"movl %eax,%ebx\n\t"
   2276 		"sarl $31,%ebx");
   2277       break;
   2278     default:
   2279       emit_error = 1;
   2280     }
   2281 }
   2282 
   2283 static void
   2284 i386_emit_log_not (void)
   2285 {
   2286   EMIT_ASM32 (i386_log_not,
   2287 	    "or %ebx,%eax\n\t"
   2288 	    "test %eax,%eax\n\t"
   2289 	    "sete %cl\n\t"
   2290 	    "xor %ebx,%ebx\n\t"
   2291 	    "movzbl %cl,%eax");
   2292 }
   2293 
   2294 static void
   2295 i386_emit_bit_and (void)
   2296 {
   2297   EMIT_ASM32 (i386_and,
   2298 	    "and (%esp),%eax\n\t"
   2299 	    "and 0x4(%esp),%ebx\n\t"
   2300 	    "lea 0x8(%esp),%esp");
   2301 }
   2302 
   2303 static void
   2304 i386_emit_bit_or (void)
   2305 {
   2306   EMIT_ASM32 (i386_or,
   2307 	    "or (%esp),%eax\n\t"
   2308 	    "or 0x4(%esp),%ebx\n\t"
   2309 	    "lea 0x8(%esp),%esp");
   2310 }
   2311 
   2312 static void
   2313 i386_emit_bit_xor (void)
   2314 {
   2315   EMIT_ASM32 (i386_xor,
   2316 	    "xor (%esp),%eax\n\t"
   2317 	    "xor 0x4(%esp),%ebx\n\t"
   2318 	    "lea 0x8(%esp),%esp");
   2319 }
   2320 
   2321 static void
   2322 i386_emit_bit_not (void)
   2323 {
   2324   EMIT_ASM32 (i386_bit_not,
   2325 	    "xor $0xffffffff,%eax\n\t"
   2326 	    "xor $0xffffffff,%ebx\n\t");
   2327 }
   2328 
   2329 static void
   2330 i386_emit_equal (void)
   2331 {
   2332   EMIT_ASM32 (i386_equal,
   2333 	    "cmpl %ebx,4(%esp)\n\t"
   2334 	    "jne .Li386_equal_false\n\t"
   2335 	    "cmpl %eax,(%esp)\n\t"
   2336 	    "je .Li386_equal_true\n\t"
   2337 	    ".Li386_equal_false:\n\t"
   2338 	    "xor %eax,%eax\n\t"
   2339 	    "jmp .Li386_equal_end\n\t"
   2340 	    ".Li386_equal_true:\n\t"
   2341 	    "mov $1,%eax\n\t"
   2342 	    ".Li386_equal_end:\n\t"
   2343 	    "xor %ebx,%ebx\n\t"
   2344 	    "lea 0x8(%esp),%esp");
   2345 }
   2346 
   2347 static void
   2348 i386_emit_less_signed (void)
   2349 {
   2350   EMIT_ASM32 (i386_less_signed,
   2351 	    "cmpl %ebx,4(%esp)\n\t"
   2352 	    "jl .Li386_less_signed_true\n\t"
   2353 	    "jne .Li386_less_signed_false\n\t"
   2354 	    "cmpl %eax,(%esp)\n\t"
   2355 	    "jl .Li386_less_signed_true\n\t"
   2356 	    ".Li386_less_signed_false:\n\t"
   2357 	    "xor %eax,%eax\n\t"
   2358 	    "jmp .Li386_less_signed_end\n\t"
   2359 	    ".Li386_less_signed_true:\n\t"
   2360 	    "mov $1,%eax\n\t"
   2361 	    ".Li386_less_signed_end:\n\t"
   2362 	    "xor %ebx,%ebx\n\t"
   2363 	    "lea 0x8(%esp),%esp");
   2364 }
   2365 
   2366 static void
   2367 i386_emit_less_unsigned (void)
   2368 {
   2369   EMIT_ASM32 (i386_less_unsigned,
   2370 	    "cmpl %ebx,4(%esp)\n\t"
   2371 	    "jb .Li386_less_unsigned_true\n\t"
   2372 	    "jne .Li386_less_unsigned_false\n\t"
   2373 	    "cmpl %eax,(%esp)\n\t"
   2374 	    "jb .Li386_less_unsigned_true\n\t"
   2375 	    ".Li386_less_unsigned_false:\n\t"
   2376 	    "xor %eax,%eax\n\t"
   2377 	    "jmp .Li386_less_unsigned_end\n\t"
   2378 	    ".Li386_less_unsigned_true:\n\t"
   2379 	    "mov $1,%eax\n\t"
   2380 	    ".Li386_less_unsigned_end:\n\t"
   2381 	    "xor %ebx,%ebx\n\t"
   2382 	    "lea 0x8(%esp),%esp");
   2383 }
   2384 
   2385 static void
   2386 i386_emit_ref (int size)
   2387 {
   2388   switch (size)
   2389     {
   2390     case 1:
   2391       EMIT_ASM32 (i386_ref1,
   2392 		"movb (%eax),%al");
   2393       break;
   2394     case 2:
   2395       EMIT_ASM32 (i386_ref2,
   2396 		"movw (%eax),%ax");
   2397       break;
   2398     case 4:
   2399       EMIT_ASM32 (i386_ref4,
   2400 		"movl (%eax),%eax");
   2401       break;
   2402     case 8:
   2403       EMIT_ASM32 (i386_ref8,
   2404 		"movl 4(%eax),%ebx\n\t"
   2405 		"movl (%eax),%eax");
   2406       break;
   2407     }
   2408 }
   2409 
   2410 static void
   2411 i386_emit_if_goto (int *offset_p, int *size_p)
   2412 {
   2413   EMIT_ASM32 (i386_if_goto,
   2414 	    "mov %eax,%ecx\n\t"
   2415 	    "or %ebx,%ecx\n\t"
   2416 	    "pop %eax\n\t"
   2417 	    "pop %ebx\n\t"
   2418 	    "cmpl $0,%ecx\n\t"
   2419 	    /* Don't trust the assembler to choose the right jump */
   2420 	    ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
   2421 
   2422   if (offset_p)
   2423     *offset_p = 11; /* be sure that this matches the sequence above */
   2424   if (size_p)
   2425     *size_p = 4;
   2426 }
   2427 
   2428 static void
   2429 i386_emit_goto (int *offset_p, int *size_p)
   2430 {
   2431   EMIT_ASM32 (i386_goto,
   2432 	    /* Don't trust the assembler to choose the right jump */
   2433 	    ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
   2434   if (offset_p)
   2435     *offset_p = 1;
   2436   if (size_p)
   2437     *size_p = 4;
   2438 }
   2439 
   2440 static void
   2441 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
   2442 {
   2443   int diff = (to - (from + size));
   2444   unsigned char buf[sizeof (int)];
   2445 
   2446   /* We're only doing 4-byte sizes at the moment.  */
   2447   if (size != 4)
   2448     {
   2449       emit_error = 1;
   2450       return;
   2451     }
   2452 
   2453   memcpy (buf, &diff, sizeof (int));
   2454   target_write_memory (from, buf, sizeof (int));
   2455 }
   2456 
   2457 static void
   2458 i386_emit_const (LONGEST num)
   2459 {
   2460   unsigned char buf[16];
   2461   int i, hi, lo;
   2462   CORE_ADDR buildaddr = current_insn_ptr;
   2463 
   2464   i = 0;
   2465   buf[i++] = 0xb8; /* mov $<n>,%eax */
   2466   lo = num & 0xffffffff;
   2467   memcpy (&buf[i], &lo, sizeof (lo));
   2468   i += 4;
   2469   hi = ((num >> 32) & 0xffffffff);
   2470   if (hi)
   2471     {
   2472       buf[i++] = 0xbb; /* mov $<n>,%ebx */
   2473       memcpy (&buf[i], &hi, sizeof (hi));
   2474       i += 4;
   2475     }
   2476   else
   2477     {
   2478       buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
   2479     }
   2480   append_insns (&buildaddr, i, buf);
   2481   current_insn_ptr = buildaddr;
   2482 }
   2483 
   2484 static void
   2485 i386_emit_call (CORE_ADDR fn)
   2486 {
   2487   unsigned char buf[16];
   2488   int i, offset;
   2489   CORE_ADDR buildaddr;
   2490 
   2491   buildaddr = current_insn_ptr;
   2492   i = 0;
   2493   buf[i++] = 0xe8; /* call <reladdr> */
   2494   offset = ((int) fn) - (buildaddr + 5);
   2495   memcpy (buf + 1, &offset, 4);
   2496   append_insns (&buildaddr, 5, buf);
   2497   current_insn_ptr = buildaddr;
   2498 }
   2499 
   2500 static void
   2501 i386_emit_reg (int reg)
   2502 {
   2503   unsigned char buf[16];
   2504   int i;
   2505   CORE_ADDR buildaddr;
   2506 
   2507   EMIT_ASM32 (i386_reg_a,
   2508 	    "sub $0x8,%esp");
   2509   buildaddr = current_insn_ptr;
   2510   i = 0;
   2511   buf[i++] = 0xb8; /* mov $<n>,%eax */
   2512   memcpy (&buf[i], &reg, sizeof (reg));
   2513   i += 4;
   2514   append_insns (&buildaddr, i, buf);
   2515   current_insn_ptr = buildaddr;
   2516   EMIT_ASM32 (i386_reg_b,
   2517 	    "mov %eax,4(%esp)\n\t"
   2518 	    "mov 8(%ebp),%eax\n\t"
   2519 	    "mov %eax,(%esp)");
   2520   i386_emit_call (get_raw_reg_func_addr ());
   2521   EMIT_ASM32 (i386_reg_c,
   2522 	    "xor %ebx,%ebx\n\t"
   2523 	    "lea 0x8(%esp),%esp");
   2524 }
   2525 
   2526 static void
   2527 i386_emit_pop (void)
   2528 {
   2529   EMIT_ASM32 (i386_pop,
   2530 	    "pop %eax\n\t"
   2531 	    "pop %ebx");
   2532 }
   2533 
   2534 static void
   2535 i386_emit_stack_flush (void)
   2536 {
   2537   EMIT_ASM32 (i386_stack_flush,
   2538 	    "push %ebx\n\t"
   2539 	    "push %eax");
   2540 }
   2541 
   2542 static void
   2543 i386_emit_zero_ext (int arg)
   2544 {
   2545   switch (arg)
   2546     {
   2547     case 8:
   2548       EMIT_ASM32 (i386_zero_ext_8,
   2549 		"and $0xff,%eax\n\t"
   2550 		"xor %ebx,%ebx");
   2551       break;
   2552     case 16:
   2553       EMIT_ASM32 (i386_zero_ext_16,
   2554 		"and $0xffff,%eax\n\t"
   2555 		"xor %ebx,%ebx");
   2556       break;
   2557     case 32:
   2558       EMIT_ASM32 (i386_zero_ext_32,
   2559 		"xor %ebx,%ebx");
   2560       break;
   2561     default:
   2562       emit_error = 1;
   2563     }
   2564 }
   2565 
   2566 static void
   2567 i386_emit_swap (void)
   2568 {
   2569   EMIT_ASM32 (i386_swap,
   2570 	    "mov %eax,%ecx\n\t"
   2571 	    "mov %ebx,%edx\n\t"
   2572 	    "pop %eax\n\t"
   2573 	    "pop %ebx\n\t"
   2574 	    "push %edx\n\t"
   2575 	    "push %ecx");
   2576 }
   2577 
   2578 static void
   2579 i386_emit_stack_adjust (int n)
   2580 {
   2581   unsigned char buf[16];
   2582   int i;
   2583   CORE_ADDR buildaddr = current_insn_ptr;
   2584 
   2585   i = 0;
   2586   buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
   2587   buf[i++] = 0x64;
   2588   buf[i++] = 0x24;
   2589   buf[i++] = n * 8;
   2590   append_insns (&buildaddr, i, buf);
   2591   current_insn_ptr = buildaddr;
   2592 }
   2593 
   2594 /* FN's prototype is `LONGEST(*fn)(int)'.  */
   2595 
   2596 static void
   2597 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
   2598 {
   2599   unsigned char buf[16];
   2600   int i;
   2601   CORE_ADDR buildaddr;
   2602 
   2603   EMIT_ASM32 (i386_int_call_1_a,
   2604 	    /* Reserve a bit of stack space.  */
   2605 	    "sub $0x8,%esp");
   2606   /* Put the one argument on the stack.  */
   2607   buildaddr = current_insn_ptr;
   2608   i = 0;
   2609   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
   2610   buf[i++] = 0x04;
   2611   buf[i++] = 0x24;
   2612   memcpy (&buf[i], &arg1, sizeof (arg1));
   2613   i += 4;
   2614   append_insns (&buildaddr, i, buf);
   2615   current_insn_ptr = buildaddr;
   2616   i386_emit_call (fn);
   2617   EMIT_ASM32 (i386_int_call_1_c,
   2618 	    "mov %edx,%ebx\n\t"
   2619 	    "lea 0x8(%esp),%esp");
   2620 }
   2621 
   2622 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
   2623 
   2624 static void
   2625 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
   2626 {
   2627   unsigned char buf[16];
   2628   int i;
   2629   CORE_ADDR buildaddr;
   2630 
   2631   EMIT_ASM32 (i386_void_call_2_a,
   2632 	    /* Preserve %eax only; we don't have to worry about %ebx.  */
   2633 	    "push %eax\n\t"
   2634 	    /* Reserve a bit of stack space for arguments.  */
   2635 	    "sub $0x10,%esp\n\t"
   2636 	    /* Copy "top" to the second argument position.  (Note that
   2637 	       we can't assume function won't scribble on its
   2638 	       arguments, so don't try to restore from this.)  */
   2639 	    "mov %eax,4(%esp)\n\t"
   2640 	    "mov %ebx,8(%esp)");
   2641   /* Put the first argument on the stack.  */
   2642   buildaddr = current_insn_ptr;
   2643   i = 0;
   2644   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
   2645   buf[i++] = 0x04;
   2646   buf[i++] = 0x24;
   2647   memcpy (&buf[i], &arg1, sizeof (arg1));
   2648   i += 4;
   2649   append_insns (&buildaddr, i, buf);
   2650   current_insn_ptr = buildaddr;
   2651   i386_emit_call (fn);
   2652   EMIT_ASM32 (i386_void_call_2_b,
   2653 	    "lea 0x10(%esp),%esp\n\t"
   2654 	    /* Restore original stack top.  */
   2655 	    "pop %eax");
   2656 }
   2657 
   2658 
   2659 static void
   2660 i386_emit_eq_goto (int *offset_p, int *size_p)
   2661 {
   2662   EMIT_ASM32 (eq,
   2663 	      /* Check low half first, more likely to be decider  */
   2664 	      "cmpl %eax,(%esp)\n\t"
   2665 	      "jne .Leq_fallthru\n\t"
   2666 	      "cmpl %ebx,4(%esp)\n\t"
   2667 	      "jne .Leq_fallthru\n\t"
   2668 	      "lea 0x8(%esp),%esp\n\t"
   2669 	      "pop %eax\n\t"
   2670 	      "pop %ebx\n\t"
   2671 	      /* jmp, but don't trust the assembler to choose the right jump */
   2672 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2673 	      ".Leq_fallthru:\n\t"
   2674 	      "lea 0x8(%esp),%esp\n\t"
   2675 	      "pop %eax\n\t"
   2676 	      "pop %ebx");
   2677 
   2678   if (offset_p)
   2679     *offset_p = 18;
   2680   if (size_p)
   2681     *size_p = 4;
   2682 }
   2683 
   2684 static void
   2685 i386_emit_ne_goto (int *offset_p, int *size_p)
   2686 {
   2687   EMIT_ASM32 (ne,
   2688 	      /* Check low half first, more likely to be decider  */
   2689 	      "cmpl %eax,(%esp)\n\t"
   2690 	      "jne .Lne_jump\n\t"
   2691 	      "cmpl %ebx,4(%esp)\n\t"
   2692 	      "je .Lne_fallthru\n\t"
   2693 	      ".Lne_jump:\n\t"
   2694 	      "lea 0x8(%esp),%esp\n\t"
   2695 	      "pop %eax\n\t"
   2696 	      "pop %ebx\n\t"
   2697 	      /* jmp, but don't trust the assembler to choose the right jump */
   2698 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2699 	      ".Lne_fallthru:\n\t"
   2700 	      "lea 0x8(%esp),%esp\n\t"
   2701 	      "pop %eax\n\t"
   2702 	      "pop %ebx");
   2703 
   2704   if (offset_p)
   2705     *offset_p = 18;
   2706   if (size_p)
   2707     *size_p = 4;
   2708 }
   2709 
   2710 static void
   2711 i386_emit_lt_goto (int *offset_p, int *size_p)
   2712 {
   2713   EMIT_ASM32 (lt,
   2714 	      "cmpl %ebx,4(%esp)\n\t"
   2715 	      "jl .Llt_jump\n\t"
   2716 	      "jne .Llt_fallthru\n\t"
   2717 	      "cmpl %eax,(%esp)\n\t"
   2718 	      "jnl .Llt_fallthru\n\t"
   2719 	      ".Llt_jump:\n\t"
   2720 	      "lea 0x8(%esp),%esp\n\t"
   2721 	      "pop %eax\n\t"
   2722 	      "pop %ebx\n\t"
   2723 	      /* jmp, but don't trust the assembler to choose the right jump */
   2724 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2725 	      ".Llt_fallthru:\n\t"
   2726 	      "lea 0x8(%esp),%esp\n\t"
   2727 	      "pop %eax\n\t"
   2728 	      "pop %ebx");
   2729 
   2730   if (offset_p)
   2731     *offset_p = 20;
   2732   if (size_p)
   2733     *size_p = 4;
   2734 }
   2735 
   2736 static void
   2737 i386_emit_le_goto (int *offset_p, int *size_p)
   2738 {
   2739   EMIT_ASM32 (le,
   2740 	      "cmpl %ebx,4(%esp)\n\t"
   2741 	      "jle .Lle_jump\n\t"
   2742 	      "jne .Lle_fallthru\n\t"
   2743 	      "cmpl %eax,(%esp)\n\t"
   2744 	      "jnle .Lle_fallthru\n\t"
   2745 	      ".Lle_jump:\n\t"
   2746 	      "lea 0x8(%esp),%esp\n\t"
   2747 	      "pop %eax\n\t"
   2748 	      "pop %ebx\n\t"
   2749 	      /* jmp, but don't trust the assembler to choose the right jump */
   2750 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2751 	      ".Lle_fallthru:\n\t"
   2752 	      "lea 0x8(%esp),%esp\n\t"
   2753 	      "pop %eax\n\t"
   2754 	      "pop %ebx");
   2755 
   2756   if (offset_p)
   2757     *offset_p = 20;
   2758   if (size_p)
   2759     *size_p = 4;
   2760 }
   2761 
   2762 static void
   2763 i386_emit_gt_goto (int *offset_p, int *size_p)
   2764 {
   2765   EMIT_ASM32 (gt,
   2766 	      "cmpl %ebx,4(%esp)\n\t"
   2767 	      "jg .Lgt_jump\n\t"
   2768 	      "jne .Lgt_fallthru\n\t"
   2769 	      "cmpl %eax,(%esp)\n\t"
   2770 	      "jng .Lgt_fallthru\n\t"
   2771 	      ".Lgt_jump:\n\t"
   2772 	      "lea 0x8(%esp),%esp\n\t"
   2773 	      "pop %eax\n\t"
   2774 	      "pop %ebx\n\t"
   2775 	      /* jmp, but don't trust the assembler to choose the right jump */
   2776 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2777 	      ".Lgt_fallthru:\n\t"
   2778 	      "lea 0x8(%esp),%esp\n\t"
   2779 	      "pop %eax\n\t"
   2780 	      "pop %ebx");
   2781 
   2782   if (offset_p)
   2783     *offset_p = 20;
   2784   if (size_p)
   2785     *size_p = 4;
   2786 }
   2787 
   2788 static void
   2789 i386_emit_ge_goto (int *offset_p, int *size_p)
   2790 {
   2791   EMIT_ASM32 (ge,
   2792 	      "cmpl %ebx,4(%esp)\n\t"
   2793 	      "jge .Lge_jump\n\t"
   2794 	      "jne .Lge_fallthru\n\t"
   2795 	      "cmpl %eax,(%esp)\n\t"
   2796 	      "jnge .Lge_fallthru\n\t"
   2797 	      ".Lge_jump:\n\t"
   2798 	      "lea 0x8(%esp),%esp\n\t"
   2799 	      "pop %eax\n\t"
   2800 	      "pop %ebx\n\t"
   2801 	      /* jmp, but don't trust the assembler to choose the right jump */
   2802 	      ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
   2803 	      ".Lge_fallthru:\n\t"
   2804 	      "lea 0x8(%esp),%esp\n\t"
   2805 	      "pop %eax\n\t"
   2806 	      "pop %ebx");
   2807 
   2808   if (offset_p)
   2809     *offset_p = 20;
   2810   if (size_p)
   2811     *size_p = 4;
   2812 }
   2813 
   2814 static emit_ops i386_emit_ops =
   2815   {
   2816     i386_emit_prologue,
   2817     i386_emit_epilogue,
   2818     i386_emit_add,
   2819     i386_emit_sub,
   2820     i386_emit_mul,
   2821     i386_emit_lsh,
   2822     i386_emit_rsh_signed,
   2823     i386_emit_rsh_unsigned,
   2824     i386_emit_ext,
   2825     i386_emit_log_not,
   2826     i386_emit_bit_and,
   2827     i386_emit_bit_or,
   2828     i386_emit_bit_xor,
   2829     i386_emit_bit_not,
   2830     i386_emit_equal,
   2831     i386_emit_less_signed,
   2832     i386_emit_less_unsigned,
   2833     i386_emit_ref,
   2834     i386_emit_if_goto,
   2835     i386_emit_goto,
   2836     i386_write_goto_address,
   2837     i386_emit_const,
   2838     i386_emit_call,
   2839     i386_emit_reg,
   2840     i386_emit_pop,
   2841     i386_emit_stack_flush,
   2842     i386_emit_zero_ext,
   2843     i386_emit_swap,
   2844     i386_emit_stack_adjust,
   2845     i386_emit_int_call_1,
   2846     i386_emit_void_call_2,
   2847     i386_emit_eq_goto,
   2848     i386_emit_ne_goto,
   2849     i386_emit_lt_goto,
   2850     i386_emit_le_goto,
   2851     i386_emit_gt_goto,
   2852     i386_emit_ge_goto
   2853   };
   2854 
   2855 
   2856 emit_ops *
   2857 x86_target::emit_ops ()
   2858 {
   2859 #ifdef __x86_64__
   2860   if (is_64bit_tdesc (current_thread))
   2861     return &amd64_emit_ops;
   2862   else
   2863 #endif
   2864     return &i386_emit_ops;
   2865 }
   2866 
   2867 /* Implementation of target ops method "sw_breakpoint_from_kind".  */
   2868 
   2869 const gdb_byte *
   2870 x86_target::sw_breakpoint_from_kind (int kind, int *size)
   2871 {
   2872   *size = x86_breakpoint_len;
   2873   return x86_breakpoint;
   2874 }
   2875 
   2876 bool
   2877 x86_target::low_supports_range_stepping ()
   2878 {
   2879   return true;
   2880 }
   2881 
   2882 int
   2883 x86_target::get_ipa_tdesc_idx ()
   2884 {
   2885   const target_desc *tdesc = current_process ()->tdesc;
   2886 
   2887   if (!use_xml)
   2888     {
   2889       /* If USE_XML is false then we should be using one of these target
   2890 	 descriptions, see x86_linux_read_description for where we choose
   2891 	 one of these.  Both of these descriptions are created from this
   2892 	 fixed xcr0 value X86_XSTATE_SSE_MASK.  */
   2893       gdb_assert (tdesc == tdesc_i386_linux_no_xml.get ()
   2894 #ifdef __x86_64__
   2895 		  || tdesc == tdesc_amd64_linux_no_xml.get ()
   2896 #endif /* __x86_64__ */
   2897 		  );
   2898       return x86_linux_xcr0_to_tdesc_idx (X86_XSTATE_SSE_MASK);
   2899     }
   2900 
   2901   /* The xcr0 value and xsave layout value are cached when the target
   2902      description is read.  Grab their cache location, and use the cached
   2903      value to calculate a tdesc index.  */
   2904   std::pair<uint64_t *, x86_xsave_layout *> storage
   2905     = i387_get_xsave_storage ();
   2906   uint64_t xcr0 = *storage.first;
   2907 
   2908   return x86_linux_xcr0_to_tdesc_idx (xcr0);
   2909 }
   2910 
   2911 /* The linux target ops object.  */
   2912 
   2913 linux_process_target *the_linux_target = &the_x86_target;
   2914 
   2915 void
   2916 initialize_low_arch (void)
   2917 {
   2918   /* Initialize the Linux target descriptions.  */
   2919 #ifdef __x86_64__
   2920   tdesc_amd64_linux_no_xml = allocate_target_description ();
   2921   copy_target_description (tdesc_amd64_linux_no_xml.get (),
   2922 			   amd64_linux_read_description (X86_XSTATE_SSE_MASK,
   2923 							 false));
   2924   tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
   2925 #endif
   2926 
   2927   tdesc_i386_linux_no_xml = allocate_target_description ();
   2928   copy_target_description (tdesc_i386_linux_no_xml.get (),
   2929 			   i386_linux_read_description (X86_XSTATE_SSE_MASK));
   2930   tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
   2931 
   2932   initialize_regsets_info (&x86_regsets_info);
   2933 }
   2934