Home | History | Annotate | Line # | Download | only in gdb
amd64-tdep.c revision 1.3.2.1
      1 /* Target-dependent code for AMD64.
      2 
      3    Copyright (C) 2001-2015 Free Software Foundation, Inc.
      4 
      5    Contributed by Jiri Smid, SuSE Labs.
      6 
      7    This file is part of GDB.
      8 
      9    This program is free software; you can redistribute it and/or modify
     10    it under the terms of the GNU General Public License as published by
     11    the Free Software Foundation; either version 3 of the License, or
     12    (at your option) any later version.
     13 
     14    This program is distributed in the hope that it will be useful,
     15    but WITHOUT ANY WARRANTY; without even the implied warranty of
     16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     17    GNU General Public License for more details.
     18 
     19    You should have received a copy of the GNU General Public License
     20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
     21 
     22 #include "defs.h"
     23 #include "opcode/i386.h"
     24 #include "dis-asm.h"
     25 #include "arch-utils.h"
     26 #include "block.h"
     27 #include "dummy-frame.h"
     28 #include "frame.h"
     29 #include "frame-base.h"
     30 #include "frame-unwind.h"
     31 #include "inferior.h"
     32 #include "infrun.h"
     33 #include "gdbcmd.h"
     34 #include "gdbcore.h"
     35 #include "objfiles.h"
     36 #include "regcache.h"
     37 #include "regset.h"
     38 #include "symfile.h"
     39 #include "disasm.h"
     40 #include "amd64-tdep.h"
     41 #include "i387-tdep.h"
     42 #include "x86-xstate.h"
     43 
     44 #include "features/i386/amd64.c"
     45 #include "features/i386/amd64-avx.c"
     46 #include "features/i386/amd64-mpx.c"
     47 #include "features/i386/amd64-avx512.c"
     48 
     49 #include "features/i386/x32.c"
     50 #include "features/i386/x32-avx.c"
     51 #include "features/i386/x32-avx512.c"
     52 
     53 #include "ax.h"
     54 #include "ax-gdb.h"
     55 
     56 /* Note that the AMD64 architecture was previously known as x86-64.
     57    The latter is (forever) engraved into the canonical system name as
     58    returned by config.guess, and used as the name for the AMD64 port
     59    of GNU/Linux.  The BSD's have renamed their ports to amd64; they
     60    don't like to shout.  For GDB we prefer the amd64_-prefix over the
     61    x86_64_-prefix since it's so much easier to type.  */
     62 
     63 /* Register information.  */
     64 
     65 static const char *amd64_register_names[] =
     66 {
     67   "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
     68 
     69   /* %r8 is indeed register number 8.  */
     70   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
     71   "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
     72 
     73   /* %st0 is register number 24.  */
     74   "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
     75   "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
     76 
     77   /* %xmm0 is register number 40.  */
     78   "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
     79   "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
     80   "mxcsr",
     81 };
     82 
     83 static const char *amd64_ymm_names[] =
     84 {
     85   "ymm0", "ymm1", "ymm2", "ymm3",
     86   "ymm4", "ymm5", "ymm6", "ymm7",
     87   "ymm8", "ymm9", "ymm10", "ymm11",
     88   "ymm12", "ymm13", "ymm14", "ymm15"
     89 };
     90 
     91 static const char *amd64_ymm_avx512_names[] =
     92 {
     93   "ymm16", "ymm17", "ymm18", "ymm19",
     94   "ymm20", "ymm21", "ymm22", "ymm23",
     95   "ymm24", "ymm25", "ymm26", "ymm27",
     96   "ymm28", "ymm29", "ymm30", "ymm31"
     97 };
     98 
     99 static const char *amd64_ymmh_names[] =
    100 {
    101   "ymm0h", "ymm1h", "ymm2h", "ymm3h",
    102   "ymm4h", "ymm5h", "ymm6h", "ymm7h",
    103   "ymm8h", "ymm9h", "ymm10h", "ymm11h",
    104   "ymm12h", "ymm13h", "ymm14h", "ymm15h"
    105 };
    106 
    107 static const char *amd64_ymmh_avx512_names[] =
    108 {
    109   "ymm16h", "ymm17h", "ymm18h", "ymm19h",
    110   "ymm20h", "ymm21h", "ymm22h", "ymm23h",
    111   "ymm24h", "ymm25h", "ymm26h", "ymm27h",
    112   "ymm28h", "ymm29h", "ymm30h", "ymm31h"
    113 };
    114 
    115 static const char *amd64_mpx_names[] =
    116 {
    117   "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
    118 };
    119 
    120 static const char *amd64_k_names[] =
    121 {
    122   "k0", "k1", "k2", "k3",
    123   "k4", "k5", "k6", "k7"
    124 };
    125 
    126 static const char *amd64_zmmh_names[] =
    127 {
    128   "zmm0h", "zmm1h", "zmm2h", "zmm3h",
    129   "zmm4h", "zmm5h", "zmm6h", "zmm7h",
    130   "zmm8h", "zmm9h", "zmm10h", "zmm11h",
    131   "zmm12h", "zmm13h", "zmm14h", "zmm15h",
    132   "zmm16h", "zmm17h", "zmm18h", "zmm19h",
    133   "zmm20h", "zmm21h", "zmm22h", "zmm23h",
    134   "zmm24h", "zmm25h", "zmm26h", "zmm27h",
    135   "zmm28h", "zmm29h", "zmm30h", "zmm31h"
    136 };
    137 
    138 static const char *amd64_zmm_names[] =
    139 {
    140   "zmm0", "zmm1", "zmm2", "zmm3",
    141   "zmm4", "zmm5", "zmm6", "zmm7",
    142   "zmm8", "zmm9", "zmm10", "zmm11",
    143   "zmm12", "zmm13", "zmm14", "zmm15",
    144   "zmm16", "zmm17", "zmm18", "zmm19",
    145   "zmm20", "zmm21", "zmm22", "zmm23",
    146   "zmm24", "zmm25", "zmm26", "zmm27",
    147   "zmm28", "zmm29", "zmm30", "zmm31"
    148 };
    149 
    150 static const char *amd64_xmm_avx512_names[] = {
    151     "xmm16",  "xmm17",  "xmm18",  "xmm19",
    152     "xmm20",  "xmm21",  "xmm22",  "xmm23",
    153     "xmm24",  "xmm25",  "xmm26",  "xmm27",
    154     "xmm28",  "xmm29",  "xmm30",  "xmm31"
    155 };
    156 
    157 /* DWARF Register Number Mapping as defined in the System V psABI,
    158    section 3.6.  */
    159 
    160 static int amd64_dwarf_regmap[] =
    161 {
    162   /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
    163   AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
    164   AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
    165   AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
    166 
    167   /* Frame Pointer Register RBP.  */
    168   AMD64_RBP_REGNUM,
    169 
    170   /* Stack Pointer Register RSP.  */
    171   AMD64_RSP_REGNUM,
    172 
    173   /* Extended Integer Registers 8 - 15.  */
    174   AMD64_R8_REGNUM,		/* %r8 */
    175   AMD64_R9_REGNUM,		/* %r9 */
    176   AMD64_R10_REGNUM,		/* %r10 */
    177   AMD64_R11_REGNUM,		/* %r11 */
    178   AMD64_R12_REGNUM,		/* %r12 */
    179   AMD64_R13_REGNUM,		/* %r13 */
    180   AMD64_R14_REGNUM,		/* %r14 */
    181   AMD64_R15_REGNUM,		/* %r15 */
    182 
    183   /* Return Address RA.  Mapped to RIP.  */
    184   AMD64_RIP_REGNUM,
    185 
    186   /* SSE Registers 0 - 7.  */
    187   AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    188   AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    189   AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    190   AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    191 
    192   /* Extended SSE Registers 8 - 15.  */
    193   AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
    194   AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
    195   AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
    196   AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
    197 
    198   /* Floating Point Registers 0-7.  */
    199   AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
    200   AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
    201   AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
    202   AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
    203 
    204   /* MMX Registers 0 - 7.
    205      We have to handle those registers specifically, as their register
    206      number within GDB depends on the target (or they may even not be
    207      available at all).  */
    208   -1, -1, -1, -1, -1, -1, -1, -1,
    209 
    210   /* Control and Status Flags Register.  */
    211   AMD64_EFLAGS_REGNUM,
    212 
    213   /* Selector Registers.  */
    214   AMD64_ES_REGNUM,
    215   AMD64_CS_REGNUM,
    216   AMD64_SS_REGNUM,
    217   AMD64_DS_REGNUM,
    218   AMD64_FS_REGNUM,
    219   AMD64_GS_REGNUM,
    220   -1,
    221   -1,
    222 
    223   /* Segment Base Address Registers.  */
    224   -1,
    225   -1,
    226   -1,
    227   -1,
    228 
    229   /* Special Selector Registers.  */
    230   -1,
    231   -1,
    232 
    233   /* Floating Point Control Registers.  */
    234   AMD64_MXCSR_REGNUM,
    235   AMD64_FCTRL_REGNUM,
    236   AMD64_FSTAT_REGNUM
    237 };
    238 
    239 static const int amd64_dwarf_regmap_len =
    240   (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
    241 
    242 /* Convert DWARF register number REG to the appropriate register
    243    number used by GDB.  */
    244 
    245 static int
    246 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
    247 {
    248   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    249   int ymm0_regnum = tdep->ymm0_regnum;
    250   int regnum = -1;
    251 
    252   if (reg >= 0 && reg < amd64_dwarf_regmap_len)
    253     regnum = amd64_dwarf_regmap[reg];
    254 
    255   if (regnum == -1)
    256     warning (_("Unmapped DWARF Register #%d encountered."), reg);
    257   else if (ymm0_regnum >= 0
    258 	   && i386_xmm_regnum_p (gdbarch, regnum))
    259     regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
    260 
    261   return regnum;
    262 }
    263 
    264 /* Map architectural register numbers to gdb register numbers.  */
    265 
    266 static const int amd64_arch_regmap[16] =
    267 {
    268   AMD64_RAX_REGNUM,	/* %rax */
    269   AMD64_RCX_REGNUM,	/* %rcx */
    270   AMD64_RDX_REGNUM,	/* %rdx */
    271   AMD64_RBX_REGNUM,	/* %rbx */
    272   AMD64_RSP_REGNUM,	/* %rsp */
    273   AMD64_RBP_REGNUM,	/* %rbp */
    274   AMD64_RSI_REGNUM,	/* %rsi */
    275   AMD64_RDI_REGNUM,	/* %rdi */
    276   AMD64_R8_REGNUM,	/* %r8 */
    277   AMD64_R9_REGNUM,	/* %r9 */
    278   AMD64_R10_REGNUM,	/* %r10 */
    279   AMD64_R11_REGNUM,	/* %r11 */
    280   AMD64_R12_REGNUM,	/* %r12 */
    281   AMD64_R13_REGNUM,	/* %r13 */
    282   AMD64_R14_REGNUM,	/* %r14 */
    283   AMD64_R15_REGNUM	/* %r15 */
    284 };
    285 
    286 static const int amd64_arch_regmap_len =
    287   (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
    288 
    289 /* Convert architectural register number REG to the appropriate register
    290    number used by GDB.  */
    291 
    292 static int
    293 amd64_arch_reg_to_regnum (int reg)
    294 {
    295   gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
    296 
    297   return amd64_arch_regmap[reg];
    298 }
    299 
    300 /* Register names for byte pseudo-registers.  */
    301 
    302 static const char *amd64_byte_names[] =
    303 {
    304   "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
    305   "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
    306   "ah", "bh", "ch", "dh"
    307 };
    308 
    309 /* Number of lower byte registers.  */
    310 #define AMD64_NUM_LOWER_BYTE_REGS 16
    311 
    312 /* Register names for word pseudo-registers.  */
    313 
    314 static const char *amd64_word_names[] =
    315 {
    316   "ax", "bx", "cx", "dx", "si", "di", "bp", "",
    317   "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
    318 };
    319 
    320 /* Register names for dword pseudo-registers.  */
    321 
    322 static const char *amd64_dword_names[] =
    323 {
    324   "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
    325   "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
    326   "eip"
    327 };
    328 
    329 /* Return the name of register REGNUM.  */
    330 
    331 static const char *
    332 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
    333 {
    334   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    335   if (i386_byte_regnum_p (gdbarch, regnum))
    336     return amd64_byte_names[regnum - tdep->al_regnum];
    337   else if (i386_zmm_regnum_p (gdbarch, regnum))
    338     return amd64_zmm_names[regnum - tdep->zmm0_regnum];
    339   else if (i386_ymm_regnum_p (gdbarch, regnum))
    340     return amd64_ymm_names[regnum - tdep->ymm0_regnum];
    341   else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
    342     return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
    343   else if (i386_word_regnum_p (gdbarch, regnum))
    344     return amd64_word_names[regnum - tdep->ax_regnum];
    345   else if (i386_dword_regnum_p (gdbarch, regnum))
    346     return amd64_dword_names[regnum - tdep->eax_regnum];
    347   else
    348     return i386_pseudo_register_name (gdbarch, regnum);
    349 }
    350 
    351 static struct value *
    352 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
    353 				  struct regcache *regcache,
    354 				  int regnum)
    355 {
    356   gdb_byte raw_buf[MAX_REGISTER_SIZE];
    357   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    358   enum register_status status;
    359   struct value *result_value;
    360   gdb_byte *buf;
    361 
    362   result_value = allocate_value (register_type (gdbarch, regnum));
    363   VALUE_LVAL (result_value) = lval_register;
    364   VALUE_REGNUM (result_value) = regnum;
    365   buf = value_contents_raw (result_value);
    366 
    367   if (i386_byte_regnum_p (gdbarch, regnum))
    368     {
    369       int gpnum = regnum - tdep->al_regnum;
    370 
    371       /* Extract (always little endian).  */
    372       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    373 	{
    374 	  /* Special handling for AH, BH, CH, DH.  */
    375 	  status = regcache_raw_read (regcache,
    376 				      gpnum - AMD64_NUM_LOWER_BYTE_REGS,
    377 				      raw_buf);
    378 	  if (status == REG_VALID)
    379 	    memcpy (buf, raw_buf + 1, 1);
    380 	  else
    381 	    mark_value_bytes_unavailable (result_value, 0,
    382 					  TYPE_LENGTH (value_type (result_value)));
    383 	}
    384       else
    385 	{
    386 	  status = regcache_raw_read (regcache, gpnum, raw_buf);
    387 	  if (status == REG_VALID)
    388 	    memcpy (buf, raw_buf, 1);
    389 	  else
    390 	    mark_value_bytes_unavailable (result_value, 0,
    391 					  TYPE_LENGTH (value_type (result_value)));
    392 	}
    393     }
    394   else if (i386_dword_regnum_p (gdbarch, regnum))
    395     {
    396       int gpnum = regnum - tdep->eax_regnum;
    397       /* Extract (always little endian).  */
    398       status = regcache_raw_read (regcache, gpnum, raw_buf);
    399       if (status == REG_VALID)
    400 	memcpy (buf, raw_buf, 4);
    401       else
    402 	mark_value_bytes_unavailable (result_value, 0,
    403 				      TYPE_LENGTH (value_type (result_value)));
    404     }
    405   else
    406     i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
    407 					  result_value);
    408 
    409   return result_value;
    410 }
    411 
    412 static void
    413 amd64_pseudo_register_write (struct gdbarch *gdbarch,
    414 			     struct regcache *regcache,
    415 			     int regnum, const gdb_byte *buf)
    416 {
    417   gdb_byte raw_buf[MAX_REGISTER_SIZE];
    418   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    419 
    420   if (i386_byte_regnum_p (gdbarch, regnum))
    421     {
    422       int gpnum = regnum - tdep->al_regnum;
    423 
    424       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    425 	{
    426 	  /* Read ... AH, BH, CH, DH.  */
    427 	  regcache_raw_read (regcache,
    428 			     gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
    429 	  /* ... Modify ... (always little endian).  */
    430 	  memcpy (raw_buf + 1, buf, 1);
    431 	  /* ... Write.  */
    432 	  regcache_raw_write (regcache,
    433 			      gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
    434 	}
    435       else
    436 	{
    437 	  /* Read ...  */
    438 	  regcache_raw_read (regcache, gpnum, raw_buf);
    439 	  /* ... Modify ... (always little endian).  */
    440 	  memcpy (raw_buf, buf, 1);
    441 	  /* ... Write.  */
    442 	  regcache_raw_write (regcache, gpnum, raw_buf);
    443 	}
    444     }
    445   else if (i386_dword_regnum_p (gdbarch, regnum))
    446     {
    447       int gpnum = regnum - tdep->eax_regnum;
    448 
    449       /* Read ...  */
    450       regcache_raw_read (regcache, gpnum, raw_buf);
    451       /* ... Modify ... (always little endian).  */
    452       memcpy (raw_buf, buf, 4);
    453       /* ... Write.  */
    454       regcache_raw_write (regcache, gpnum, raw_buf);
    455     }
    456   else
    457     i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
    458 }
    459 
    460 
    461 
    463 /* Register classes as defined in the psABI.  */
    464 
    465 enum amd64_reg_class
    466 {
    467   AMD64_INTEGER,
    468   AMD64_SSE,
    469   AMD64_SSEUP,
    470   AMD64_X87,
    471   AMD64_X87UP,
    472   AMD64_COMPLEX_X87,
    473   AMD64_NO_CLASS,
    474   AMD64_MEMORY
    475 };
    476 
    477 /* Return the union class of CLASS1 and CLASS2.  See the psABI for
    478    details.  */
    479 
    480 static enum amd64_reg_class
    481 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
    482 {
    483   /* Rule (a): If both classes are equal, this is the resulting class.  */
    484   if (class1 == class2)
    485     return class1;
    486 
    487   /* Rule (b): If one of the classes is NO_CLASS, the resulting class
    488      is the other class.  */
    489   if (class1 == AMD64_NO_CLASS)
    490     return class2;
    491   if (class2 == AMD64_NO_CLASS)
    492     return class1;
    493 
    494   /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
    495   if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
    496     return AMD64_MEMORY;
    497 
    498   /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
    499   if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
    500     return AMD64_INTEGER;
    501 
    502   /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
    503      MEMORY is used as class.  */
    504   if (class1 == AMD64_X87 || class1 == AMD64_X87UP
    505       || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
    506       || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
    507     return AMD64_MEMORY;
    508 
    509   /* Rule (f): Otherwise class SSE is used.  */
    510   return AMD64_SSE;
    511 }
    512 
    513 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
    514 
    515 /* Return non-zero if TYPE is a non-POD structure or union type.  */
    516 
    517 static int
    518 amd64_non_pod_p (struct type *type)
    519 {
    520   /* ??? A class with a base class certainly isn't POD, but does this
    521      catch all non-POD structure types?  */
    522   if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
    523     return 1;
    524 
    525   return 0;
    526 }
    527 
    528 /* Classify TYPE according to the rules for aggregate (structures and
    529    arrays) and union types, and store the result in CLASS.  */
    530 
    531 static void
    532 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
    533 {
    534   /* 1. If the size of an object is larger than two eightbytes, or in
    535         C++, is a non-POD structure or union type, or contains
    536         unaligned fields, it has class memory.  */
    537   if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
    538     {
    539       theclass[0] = theclass[1] = AMD64_MEMORY;
    540       return;
    541     }
    542 
    543   /* 2. Both eightbytes get initialized to class NO_CLASS.  */
    544   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    545 
    546   /* 3. Each field of an object is classified recursively so that
    547         always two fields are considered. The resulting class is
    548         calculated according to the classes of the fields in the
    549         eightbyte: */
    550 
    551   if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
    552     {
    553       struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
    554 
    555       /* All fields in an array have the same type.  */
    556       amd64_classify (subtype, theclass);
    557       if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
    558 	theclass[1] = theclass[0];
    559     }
    560   else
    561     {
    562       int i;
    563 
    564       /* Structure or union.  */
    565       gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
    566 		  || TYPE_CODE (type) == TYPE_CODE_UNION);
    567 
    568       for (i = 0; i < TYPE_NFIELDS (type); i++)
    569 	{
    570 	  struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
    571 	  int pos = TYPE_FIELD_BITPOS (type, i) / 64;
    572 	  enum amd64_reg_class subclass[2];
    573 	  int bitsize = TYPE_FIELD_BITSIZE (type, i);
    574 	  int endpos;
    575 
    576 	  if (bitsize == 0)
    577 	    bitsize = TYPE_LENGTH (subtype) * 8;
    578 	  endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
    579 
    580 	  /* Ignore static fields.  */
    581 	  if (field_is_static (&TYPE_FIELD (type, i)))
    582 	    continue;
    583 
    584 	  gdb_assert (pos == 0 || pos == 1);
    585 
    586 	  amd64_classify (subtype, subclass);
    587 	  theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
    588 	  if (bitsize <= 64 && pos == 0 && endpos == 1)
    589 	    /* This is a bit of an odd case:  We have a field that would
    590 	       normally fit in one of the two eightbytes, except that
    591 	       it is placed in a way that this field straddles them.
    592 	       This has been seen with a structure containing an array.
    593 
    594 	       The ABI is a bit unclear in this case, but we assume that
    595 	       this field's class (stored in subclass[0]) must also be merged
    596 	       into class[1].  In other words, our field has a piece stored
    597 	       in the second eight-byte, and thus its class applies to
    598 	       the second eight-byte as well.
    599 
    600 	       In the case where the field length exceeds 8 bytes,
    601 	       it should not be necessary to merge the field class
    602 	       into class[1].  As LEN > 8, subclass[1] is necessarily
    603 	       different from AMD64_NO_CLASS.  If subclass[1] is equal
    604 	       to subclass[0], then the normal class[1]/subclass[1]
    605 	       merging will take care of everything.  For subclass[1]
    606 	       to be different from subclass[0], I can only see the case
    607 	       where we have a SSE/SSEUP or X87/X87UP pair, which both
    608 	       use up all 16 bytes of the aggregate, and are already
    609 	       handled just fine (because each portion sits on its own
    610 	       8-byte).  */
    611 	    theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
    612 	  if (pos == 0)
    613 	    theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
    614 	}
    615     }
    616 
    617   /* 4. Then a post merger cleanup is done:  */
    618 
    619   /* Rule (a): If one of the classes is MEMORY, the whole argument is
    620      passed in memory.  */
    621   if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
    622     theclass[0] = theclass[1] = AMD64_MEMORY;
    623 
    624   /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
    625      SSE.  */
    626   if (theclass[0] == AMD64_SSEUP)
    627     theclass[0] = AMD64_SSE;
    628   if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
    629     theclass[1] = AMD64_SSE;
    630 }
    631 
    632 /* Classify TYPE, and store the result in CLASS.  */
    633 
    634 static void
    635 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
    636 {
    637   enum type_code code = TYPE_CODE (type);
    638   int len = TYPE_LENGTH (type);
    639 
    640   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    641 
    642   /* Arguments of types (signed and unsigned) _Bool, char, short, int,
    643      long, long long, and pointers are in the INTEGER class.  Similarly,
    644      range types, used by languages such as Ada, are also in the INTEGER
    645      class.  */
    646   if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
    647        || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
    648        || code == TYPE_CODE_CHAR
    649        || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
    650       && (len == 1 || len == 2 || len == 4 || len == 8))
    651     theclass[0] = AMD64_INTEGER;
    652 
    653   /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
    654      are in class SSE.  */
    655   else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
    656 	   && (len == 4 || len == 8))
    657     /* FIXME: __m64 .  */
    658     theclass[0] = AMD64_SSE;
    659 
    660   /* Arguments of types __float128, _Decimal128 and __m128 are split into
    661      two halves.  The least significant ones belong to class SSE, the most
    662      significant one to class SSEUP.  */
    663   else if (code == TYPE_CODE_DECFLOAT && len == 16)
    664     /* FIXME: __float128, __m128.  */
    665     theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
    666 
    667   /* The 64-bit mantissa of arguments of type long double belongs to
    668      class X87, the 16-bit exponent plus 6 bytes of padding belongs to
    669      class X87UP.  */
    670   else if (code == TYPE_CODE_FLT && len == 16)
    671     /* Class X87 and X87UP.  */
    672     theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
    673 
    674   /* Arguments of complex T where T is one of the types float or
    675      double get treated as if they are implemented as:
    676 
    677      struct complexT {
    678        T real;
    679        T imag;
    680      };
    681 
    682   */
    683   else if (code == TYPE_CODE_COMPLEX && len == 8)
    684     theclass[0] = AMD64_SSE;
    685   else if (code == TYPE_CODE_COMPLEX && len == 16)
    686     theclass[0] = theclass[1] = AMD64_SSE;
    687 
    688   /* A variable of type complex long double is classified as type
    689      COMPLEX_X87.  */
    690   else if (code == TYPE_CODE_COMPLEX && len == 32)
    691     theclass[0] = AMD64_COMPLEX_X87;
    692 
    693   /* Aggregates.  */
    694   else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
    695 	   || code == TYPE_CODE_UNION)
    696     amd64_classify_aggregate (type, theclass);
    697 }
    698 
    699 static enum return_value_convention
    700 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
    701 		    struct type *type, struct regcache *regcache,
    702 		    gdb_byte *readbuf, const gdb_byte *writebuf)
    703 {
    704   enum amd64_reg_class theclass[2];
    705   int len = TYPE_LENGTH (type);
    706   static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
    707   static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
    708   int integer_reg = 0;
    709   int sse_reg = 0;
    710   int i;
    711 
    712   gdb_assert (!(readbuf && writebuf));
    713 
    714   /* 1. Classify the return type with the classification algorithm.  */
    715   amd64_classify (type, theclass);
    716 
    717   /* 2. If the type has class MEMORY, then the caller provides space
    718      for the return value and passes the address of this storage in
    719      %rdi as if it were the first argument to the function.  In effect,
    720      this address becomes a hidden first argument.
    721 
    722      On return %rax will contain the address that has been passed in
    723      by the caller in %rdi.  */
    724   if (theclass[0] == AMD64_MEMORY)
    725     {
    726       /* As indicated by the comment above, the ABI guarantees that we
    727          can always find the return value just after the function has
    728          returned.  */
    729 
    730       if (readbuf)
    731 	{
    732 	  ULONGEST addr;
    733 
    734 	  regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
    735 	  read_memory (addr, readbuf, TYPE_LENGTH (type));
    736 	}
    737 
    738       return RETURN_VALUE_ABI_RETURNS_ADDRESS;
    739     }
    740 
    741   /* 8. If the class is COMPLEX_X87, the real part of the value is
    742         returned in %st0 and the imaginary part in %st1.  */
    743   if (theclass[0] == AMD64_COMPLEX_X87)
    744     {
    745       if (readbuf)
    746 	{
    747 	  regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
    748 	  regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
    749 	}
    750 
    751       if (writebuf)
    752 	{
    753 	  i387_return_value (gdbarch, regcache);
    754 	  regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
    755 	  regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
    756 
    757 	  /* Fix up the tag word such that both %st(0) and %st(1) are
    758 	     marked as valid.  */
    759 	  regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
    760 	}
    761 
    762       return RETURN_VALUE_REGISTER_CONVENTION;
    763     }
    764 
    765   gdb_assert (theclass[1] != AMD64_MEMORY);
    766   gdb_assert (len <= 16);
    767 
    768   for (i = 0; len > 0; i++, len -= 8)
    769     {
    770       int regnum = -1;
    771       int offset = 0;
    772 
    773       switch (theclass[i])
    774 	{
    775 	case AMD64_INTEGER:
    776 	  /* 3. If the class is INTEGER, the next available register
    777 	     of the sequence %rax, %rdx is used.  */
    778 	  regnum = integer_regnum[integer_reg++];
    779 	  break;
    780 
    781 	case AMD64_SSE:
    782 	  /* 4. If the class is SSE, the next available SSE register
    783              of the sequence %xmm0, %xmm1 is used.  */
    784 	  regnum = sse_regnum[sse_reg++];
    785 	  break;
    786 
    787 	case AMD64_SSEUP:
    788 	  /* 5. If the class is SSEUP, the eightbyte is passed in the
    789 	     upper half of the last used SSE register.  */
    790 	  gdb_assert (sse_reg > 0);
    791 	  regnum = sse_regnum[sse_reg - 1];
    792 	  offset = 8;
    793 	  break;
    794 
    795 	case AMD64_X87:
    796 	  /* 6. If the class is X87, the value is returned on the X87
    797              stack in %st0 as 80-bit x87 number.  */
    798 	  regnum = AMD64_ST0_REGNUM;
    799 	  if (writebuf)
    800 	    i387_return_value (gdbarch, regcache);
    801 	  break;
    802 
    803 	case AMD64_X87UP:
    804 	  /* 7. If the class is X87UP, the value is returned together
    805              with the previous X87 value in %st0.  */
    806 	  gdb_assert (i > 0 && theclass[0] == AMD64_X87);
    807 	  regnum = AMD64_ST0_REGNUM;
    808 	  offset = 8;
    809 	  len = 2;
    810 	  break;
    811 
    812 	case AMD64_NO_CLASS:
    813 	  continue;
    814 
    815 	default:
    816 	  gdb_assert (!"Unexpected register class.");
    817 	}
    818 
    819       gdb_assert (regnum != -1);
    820 
    821       if (readbuf)
    822 	regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
    823 				readbuf + i * 8);
    824       if (writebuf)
    825 	regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
    826 				 writebuf + i * 8);
    827     }
    828 
    829   return RETURN_VALUE_REGISTER_CONVENTION;
    830 }
    831 
    832 
    834 static CORE_ADDR
    835 amd64_push_arguments (struct regcache *regcache, int nargs,
    836 		      struct value **args, CORE_ADDR sp, int struct_return)
    837 {
    838   static int integer_regnum[] =
    839   {
    840     AMD64_RDI_REGNUM,		/* %rdi */
    841     AMD64_RSI_REGNUM,		/* %rsi */
    842     AMD64_RDX_REGNUM,		/* %rdx */
    843     AMD64_RCX_REGNUM,		/* %rcx */
    844     AMD64_R8_REGNUM,		/* %r8 */
    845     AMD64_R9_REGNUM		/* %r9 */
    846   };
    847   static int sse_regnum[] =
    848   {
    849     /* %xmm0 ... %xmm7 */
    850     AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    851     AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    852     AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    853     AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    854   };
    855   struct value **stack_args = alloca (nargs * sizeof (struct value *));
    856   int num_stack_args = 0;
    857   int num_elements = 0;
    858   int element = 0;
    859   int integer_reg = 0;
    860   int sse_reg = 0;
    861   int i;
    862 
    863   /* Reserve a register for the "hidden" argument.  */
    864   if (struct_return)
    865     integer_reg++;
    866 
    867   for (i = 0; i < nargs; i++)
    868     {
    869       struct type *type = value_type (args[i]);
    870       int len = TYPE_LENGTH (type);
    871       enum amd64_reg_class theclass[2];
    872       int needed_integer_regs = 0;
    873       int needed_sse_regs = 0;
    874       int j;
    875 
    876       /* Classify argument.  */
    877       amd64_classify (type, theclass);
    878 
    879       /* Calculate the number of integer and SSE registers needed for
    880          this argument.  */
    881       for (j = 0; j < 2; j++)
    882 	{
    883 	  if (theclass[j] == AMD64_INTEGER)
    884 	    needed_integer_regs++;
    885 	  else if (theclass[j] == AMD64_SSE)
    886 	    needed_sse_regs++;
    887 	}
    888 
    889       /* Check whether enough registers are available, and if the
    890          argument should be passed in registers at all.  */
    891       if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
    892 	  || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
    893 	  || (needed_integer_regs == 0 && needed_sse_regs == 0))
    894 	{
    895 	  /* The argument will be passed on the stack.  */
    896 	  num_elements += ((len + 7) / 8);
    897 	  stack_args[num_stack_args++] = args[i];
    898 	}
    899       else
    900 	{
    901 	  /* The argument will be passed in registers.  */
    902 	  const gdb_byte *valbuf = value_contents (args[i]);
    903 	  gdb_byte buf[8];
    904 
    905 	  gdb_assert (len <= 16);
    906 
    907 	  for (j = 0; len > 0; j++, len -= 8)
    908 	    {
    909 	      int regnum = -1;
    910 	      int offset = 0;
    911 
    912 	      switch (theclass[j])
    913 		{
    914 		case AMD64_INTEGER:
    915 		  regnum = integer_regnum[integer_reg++];
    916 		  break;
    917 
    918 		case AMD64_SSE:
    919 		  regnum = sse_regnum[sse_reg++];
    920 		  break;
    921 
    922 		case AMD64_SSEUP:
    923 		  gdb_assert (sse_reg > 0);
    924 		  regnum = sse_regnum[sse_reg - 1];
    925 		  offset = 8;
    926 		  break;
    927 
    928 		default:
    929 		  gdb_assert (!"Unexpected register class.");
    930 		}
    931 
    932 	      gdb_assert (regnum != -1);
    933 	      memset (buf, 0, sizeof buf);
    934 	      memcpy (buf, valbuf + j * 8, min (len, 8));
    935 	      regcache_raw_write_part (regcache, regnum, offset, 8, buf);
    936 	    }
    937 	}
    938     }
    939 
    940   /* Allocate space for the arguments on the stack.  */
    941   sp -= num_elements * 8;
    942 
    943   /* The psABI says that "The end of the input argument area shall be
    944      aligned on a 16 byte boundary."  */
    945   sp &= ~0xf;
    946 
    947   /* Write out the arguments to the stack.  */
    948   for (i = 0; i < num_stack_args; i++)
    949     {
    950       struct type *type = value_type (stack_args[i]);
    951       const gdb_byte *valbuf = value_contents (stack_args[i]);
    952       int len = TYPE_LENGTH (type);
    953 
    954       write_memory (sp + element * 8, valbuf, len);
    955       element += ((len + 7) / 8);
    956     }
    957 
    958   /* The psABI says that "For calls that may call functions that use
    959      varargs or stdargs (prototype-less calls or calls to functions
    960      containing ellipsis (...) in the declaration) %al is used as
    961      hidden argument to specify the number of SSE registers used.  */
    962   regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
    963   return sp;
    964 }
    965 
    966 static CORE_ADDR
    967 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
    968 		       struct regcache *regcache, CORE_ADDR bp_addr,
    969 		       int nargs, struct value **args,	CORE_ADDR sp,
    970 		       int struct_return, CORE_ADDR struct_addr)
    971 {
    972   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
    973   gdb_byte buf[8];
    974 
    975   /* Pass arguments.  */
    976   sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
    977 
    978   /* Pass "hidden" argument".  */
    979   if (struct_return)
    980     {
    981       store_unsigned_integer (buf, 8, byte_order, struct_addr);
    982       regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
    983     }
    984 
    985   /* Store return address.  */
    986   sp -= 8;
    987   store_unsigned_integer (buf, 8, byte_order, bp_addr);
    988   write_memory (sp, buf, 8);
    989 
    990   /* Finally, update the stack pointer...  */
    991   store_unsigned_integer (buf, 8, byte_order, sp);
    992   regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
    993 
    994   /* ...and fake a frame pointer.  */
    995   regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
    996 
    997   return sp + 16;
    998 }
    999 
   1000 /* Displaced instruction handling.  */
   1002 
   1003 /* A partially decoded instruction.
   1004    This contains enough details for displaced stepping purposes.  */
   1005 
   1006 struct amd64_insn
   1007 {
   1008   /* The number of opcode bytes.  */
   1009   int opcode_len;
   1010   /* The offset of the rex prefix or -1 if not present.  */
   1011   int rex_offset;
   1012   /* The offset to the first opcode byte.  */
   1013   int opcode_offset;
   1014   /* The offset to the modrm byte or -1 if not present.  */
   1015   int modrm_offset;
   1016 
   1017   /* The raw instruction.  */
   1018   gdb_byte *raw_insn;
   1019 };
   1020 
   1021 struct displaced_step_closure
   1022 {
   1023   /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
   1024   int tmp_used;
   1025   int tmp_regno;
   1026   ULONGEST tmp_save;
   1027 
   1028   /* Details of the instruction.  */
   1029   struct amd64_insn insn_details;
   1030 
   1031   /* Amount of space allocated to insn_buf.  */
   1032   int max_len;
   1033 
   1034   /* The possibly modified insn.
   1035      This is a variable-length field.  */
   1036   gdb_byte insn_buf[1];
   1037 };
   1038 
   1039 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
   1040    ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
   1041    at which point delete these in favor of libopcodes' versions).  */
   1042 
   1043 static const unsigned char onebyte_has_modrm[256] = {
   1044   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1045   /*	   -------------------------------	  */
   1046   /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
   1047   /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
   1048   /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
   1049   /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
   1050   /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
   1051   /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
   1052   /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
   1053   /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
   1054   /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
   1055   /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
   1056   /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
   1057   /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
   1058   /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
   1059   /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
   1060   /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
   1061   /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
   1062   /*	   -------------------------------	  */
   1063   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1064 };
   1065 
   1066 static const unsigned char twobyte_has_modrm[256] = {
   1067   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1068   /*	   -------------------------------	  */
   1069   /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
   1070   /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
   1071   /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
   1072   /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
   1073   /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
   1074   /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
   1075   /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
   1076   /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
   1077   /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
   1078   /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
   1079   /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
   1080   /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
   1081   /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
   1082   /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
   1083   /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
   1084   /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
   1085   /*	   -------------------------------	  */
   1086   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1087 };
   1088 
   1089 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
   1090 
   1091 static int
   1092 rex_prefix_p (gdb_byte pfx)
   1093 {
   1094   return REX_PREFIX_P (pfx);
   1095 }
   1096 
   1097 /* Skip the legacy instruction prefixes in INSN.
   1098    We assume INSN is properly sentineled so we don't have to worry
   1099    about falling off the end of the buffer.  */
   1100 
   1101 static gdb_byte *
   1102 amd64_skip_prefixes (gdb_byte *insn)
   1103 {
   1104   while (1)
   1105     {
   1106       switch (*insn)
   1107 	{
   1108 	case DATA_PREFIX_OPCODE:
   1109 	case ADDR_PREFIX_OPCODE:
   1110 	case CS_PREFIX_OPCODE:
   1111 	case DS_PREFIX_OPCODE:
   1112 	case ES_PREFIX_OPCODE:
   1113 	case FS_PREFIX_OPCODE:
   1114 	case GS_PREFIX_OPCODE:
   1115 	case SS_PREFIX_OPCODE:
   1116 	case LOCK_PREFIX_OPCODE:
   1117 	case REPE_PREFIX_OPCODE:
   1118 	case REPNE_PREFIX_OPCODE:
   1119 	  ++insn;
   1120 	  continue;
   1121 	default:
   1122 	  break;
   1123 	}
   1124       break;
   1125     }
   1126 
   1127   return insn;
   1128 }
   1129 
   1130 /* Return an integer register (other than RSP) that is unused as an input
   1131    operand in INSN.
   1132    In order to not require adding a rex prefix if the insn doesn't already
   1133    have one, the result is restricted to RAX ... RDI, sans RSP.
   1134    The register numbering of the result follows architecture ordering,
   1135    e.g. RDI = 7.  */
   1136 
   1137 static int
   1138 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
   1139 {
   1140   /* 1 bit for each reg */
   1141   int used_regs_mask = 0;
   1142 
   1143   /* There can be at most 3 int regs used as inputs in an insn, and we have
   1144      7 to choose from (RAX ... RDI, sans RSP).
   1145      This allows us to take a conservative approach and keep things simple.
   1146      E.g. By avoiding RAX, we don't have to specifically watch for opcodes
   1147      that implicitly specify RAX.  */
   1148 
   1149   /* Avoid RAX.  */
   1150   used_regs_mask |= 1 << EAX_REG_NUM;
   1151   /* Similarily avoid RDX, implicit operand in divides.  */
   1152   used_regs_mask |= 1 << EDX_REG_NUM;
   1153   /* Avoid RSP.  */
   1154   used_regs_mask |= 1 << ESP_REG_NUM;
   1155 
   1156   /* If the opcode is one byte long and there's no ModRM byte,
   1157      assume the opcode specifies a register.  */
   1158   if (details->opcode_len == 1 && details->modrm_offset == -1)
   1159     used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
   1160 
   1161   /* Mark used regs in the modrm/sib bytes.  */
   1162   if (details->modrm_offset != -1)
   1163     {
   1164       int modrm = details->raw_insn[details->modrm_offset];
   1165       int mod = MODRM_MOD_FIELD (modrm);
   1166       int reg = MODRM_REG_FIELD (modrm);
   1167       int rm = MODRM_RM_FIELD (modrm);
   1168       int have_sib = mod != 3 && rm == 4;
   1169 
   1170       /* Assume the reg field of the modrm byte specifies a register.  */
   1171       used_regs_mask |= 1 << reg;
   1172 
   1173       if (have_sib)
   1174 	{
   1175 	  int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1176 	  int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1177 	  used_regs_mask |= 1 << base;
   1178 	  used_regs_mask |= 1 << idx;
   1179 	}
   1180       else
   1181 	{
   1182 	  used_regs_mask |= 1 << rm;
   1183 	}
   1184     }
   1185 
   1186   gdb_assert (used_regs_mask < 256);
   1187   gdb_assert (used_regs_mask != 255);
   1188 
   1189   /* Finally, find a free reg.  */
   1190   {
   1191     int i;
   1192 
   1193     for (i = 0; i < 8; ++i)
   1194       {
   1195 	if (! (used_regs_mask & (1 << i)))
   1196 	  return i;
   1197       }
   1198 
   1199     /* We shouldn't get here.  */
   1200     internal_error (__FILE__, __LINE__, _("unable to find free reg"));
   1201   }
   1202 }
   1203 
   1204 /* Extract the details of INSN that we need.  */
   1205 
   1206 static void
   1207 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
   1208 {
   1209   gdb_byte *start = insn;
   1210   int need_modrm;
   1211 
   1212   details->raw_insn = insn;
   1213 
   1214   details->opcode_len = -1;
   1215   details->rex_offset = -1;
   1216   details->opcode_offset = -1;
   1217   details->modrm_offset = -1;
   1218 
   1219   /* Skip legacy instruction prefixes.  */
   1220   insn = amd64_skip_prefixes (insn);
   1221 
   1222   /* Skip REX instruction prefix.  */
   1223   if (rex_prefix_p (*insn))
   1224     {
   1225       details->rex_offset = insn - start;
   1226       ++insn;
   1227     }
   1228 
   1229   details->opcode_offset = insn - start;
   1230 
   1231   if (*insn == TWO_BYTE_OPCODE_ESCAPE)
   1232     {
   1233       /* Two or three-byte opcode.  */
   1234       ++insn;
   1235       need_modrm = twobyte_has_modrm[*insn];
   1236 
   1237       /* Check for three-byte opcode.  */
   1238       switch (*insn)
   1239 	{
   1240 	case 0x24:
   1241 	case 0x25:
   1242 	case 0x38:
   1243 	case 0x3a:
   1244 	case 0x7a:
   1245 	case 0x7b:
   1246 	  ++insn;
   1247 	  details->opcode_len = 3;
   1248 	  break;
   1249 	default:
   1250 	  details->opcode_len = 2;
   1251 	  break;
   1252 	}
   1253     }
   1254   else
   1255     {
   1256       /* One-byte opcode.  */
   1257       need_modrm = onebyte_has_modrm[*insn];
   1258       details->opcode_len = 1;
   1259     }
   1260 
   1261   if (need_modrm)
   1262     {
   1263       ++insn;
   1264       details->modrm_offset = insn - start;
   1265     }
   1266 }
   1267 
   1268 /* Update %rip-relative addressing in INSN.
   1269 
   1270    %rip-relative addressing only uses a 32-bit displacement.
   1271    32 bits is not enough to be guaranteed to cover the distance between where
   1272    the real instruction is and where its copy is.
   1273    Convert the insn to use base+disp addressing.
   1274    We set base = pc + insn_length so we can leave disp unchanged.  */
   1275 
   1276 static void
   1277 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
   1278 	      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1279 {
   1280   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1281   const struct amd64_insn *insn_details = &dsc->insn_details;
   1282   int modrm_offset = insn_details->modrm_offset;
   1283   gdb_byte *insn = insn_details->raw_insn + modrm_offset;
   1284   CORE_ADDR rip_base;
   1285   int32_t disp;
   1286   int insn_length;
   1287   int arch_tmp_regno, tmp_regno;
   1288   ULONGEST orig_value;
   1289 
   1290   /* %rip+disp32 addressing mode, displacement follows ModRM byte.  */
   1291   ++insn;
   1292 
   1293   /* Compute the rip-relative address.	*/
   1294   disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
   1295   insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
   1296 					  dsc->max_len, from);
   1297   rip_base = from + insn_length;
   1298 
   1299   /* We need a register to hold the address.
   1300      Pick one not used in the insn.
   1301      NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
   1302   arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
   1303   tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
   1304 
   1305   /* REX.B should be unset as we were using rip-relative addressing,
   1306      but ensure it's unset anyway, tmp_regno is not r8-r15.  */
   1307   if (insn_details->rex_offset != -1)
   1308     dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
   1309 
   1310   regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
   1311   dsc->tmp_regno = tmp_regno;
   1312   dsc->tmp_save = orig_value;
   1313   dsc->tmp_used = 1;
   1314 
   1315   /* Convert the ModRM field to be base+disp.  */
   1316   dsc->insn_buf[modrm_offset] &= ~0xc7;
   1317   dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
   1318 
   1319   regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
   1320 
   1321   if (debug_displaced)
   1322     fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
   1323 			"displaced: using temp reg %d, old value %s, new value %s\n",
   1324 			dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
   1325 			paddress (gdbarch, rip_base));
   1326 }
   1327 
   1328 static void
   1329 fixup_displaced_copy (struct gdbarch *gdbarch,
   1330 		      struct displaced_step_closure *dsc,
   1331 		      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1332 {
   1333   const struct amd64_insn *details = &dsc->insn_details;
   1334 
   1335   if (details->modrm_offset != -1)
   1336     {
   1337       gdb_byte modrm = details->raw_insn[details->modrm_offset];
   1338 
   1339       if ((modrm & 0xc7) == 0x05)
   1340 	{
   1341 	  /* The insn uses rip-relative addressing.
   1342 	     Deal with it.  */
   1343 	  fixup_riprel (gdbarch, dsc, from, to, regs);
   1344 	}
   1345     }
   1346 }
   1347 
   1348 struct displaced_step_closure *
   1349 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
   1350 				CORE_ADDR from, CORE_ADDR to,
   1351 				struct regcache *regs)
   1352 {
   1353   int len = gdbarch_max_insn_length (gdbarch);
   1354   /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
   1355      continually watch for running off the end of the buffer.  */
   1356   int fixup_sentinel_space = len;
   1357   struct displaced_step_closure *dsc =
   1358     xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
   1359   gdb_byte *buf = &dsc->insn_buf[0];
   1360   struct amd64_insn *details = &dsc->insn_details;
   1361 
   1362   dsc->tmp_used = 0;
   1363   dsc->max_len = len + fixup_sentinel_space;
   1364 
   1365   read_memory (from, buf, len);
   1366 
   1367   /* Set up the sentinel space so we don't have to worry about running
   1368      off the end of the buffer.  An excessive number of leading prefixes
   1369      could otherwise cause this.  */
   1370   memset (buf + len, 0, fixup_sentinel_space);
   1371 
   1372   amd64_get_insn_details (buf, details);
   1373 
   1374   /* GDB may get control back after the insn after the syscall.
   1375      Presumably this is a kernel bug.
   1376      If this is a syscall, make sure there's a nop afterwards.  */
   1377   {
   1378     int syscall_length;
   1379 
   1380     if (amd64_syscall_p (details, &syscall_length))
   1381       buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
   1382   }
   1383 
   1384   /* Modify the insn to cope with the address where it will be executed from.
   1385      In particular, handle any rip-relative addressing.	 */
   1386   fixup_displaced_copy (gdbarch, dsc, from, to, regs);
   1387 
   1388   write_memory (to, buf, len);
   1389 
   1390   if (debug_displaced)
   1391     {
   1392       fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
   1393 			  paddress (gdbarch, from), paddress (gdbarch, to));
   1394       displaced_step_dump_bytes (gdb_stdlog, buf, len);
   1395     }
   1396 
   1397   return dsc;
   1398 }
   1399 
   1400 static int
   1401 amd64_absolute_jmp_p (const struct amd64_insn *details)
   1402 {
   1403   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1404 
   1405   if (insn[0] == 0xff)
   1406     {
   1407       /* jump near, absolute indirect (/4) */
   1408       if ((insn[1] & 0x38) == 0x20)
   1409 	return 1;
   1410 
   1411       /* jump far, absolute indirect (/5) */
   1412       if ((insn[1] & 0x38) == 0x28)
   1413 	return 1;
   1414     }
   1415 
   1416   return 0;
   1417 }
   1418 
   1419 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise.  */
   1420 
   1421 static int
   1422 amd64_jmp_p (const struct amd64_insn *details)
   1423 {
   1424   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1425 
   1426   /* jump short, relative.  */
   1427   if (insn[0] == 0xeb)
   1428     return 1;
   1429 
   1430   /* jump near, relative.  */
   1431   if (insn[0] == 0xe9)
   1432     return 1;
   1433 
   1434   return amd64_absolute_jmp_p (details);
   1435 }
   1436 
   1437 static int
   1438 amd64_absolute_call_p (const struct amd64_insn *details)
   1439 {
   1440   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1441 
   1442   if (insn[0] == 0xff)
   1443     {
   1444       /* Call near, absolute indirect (/2) */
   1445       if ((insn[1] & 0x38) == 0x10)
   1446 	return 1;
   1447 
   1448       /* Call far, absolute indirect (/3) */
   1449       if ((insn[1] & 0x38) == 0x18)
   1450 	return 1;
   1451     }
   1452 
   1453   return 0;
   1454 }
   1455 
   1456 static int
   1457 amd64_ret_p (const struct amd64_insn *details)
   1458 {
   1459   /* NOTE: gcc can emit "repz ; ret".  */
   1460   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1461 
   1462   switch (insn[0])
   1463     {
   1464     case 0xc2: /* ret near, pop N bytes */
   1465     case 0xc3: /* ret near */
   1466     case 0xca: /* ret far, pop N bytes */
   1467     case 0xcb: /* ret far */
   1468     case 0xcf: /* iret */
   1469       return 1;
   1470 
   1471     default:
   1472       return 0;
   1473     }
   1474 }
   1475 
   1476 static int
   1477 amd64_call_p (const struct amd64_insn *details)
   1478 {
   1479   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1480 
   1481   if (amd64_absolute_call_p (details))
   1482     return 1;
   1483 
   1484   /* call near, relative */
   1485   if (insn[0] == 0xe8)
   1486     return 1;
   1487 
   1488   return 0;
   1489 }
   1490 
   1491 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
   1492    length in bytes.  Otherwise, return zero.  */
   1493 
   1494 static int
   1495 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
   1496 {
   1497   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1498 
   1499   if (insn[0] == 0x0f && insn[1] == 0x05)
   1500     {
   1501       *lengthp = 2;
   1502       return 1;
   1503     }
   1504 
   1505   return 0;
   1506 }
   1507 
   1508 /* Classify the instruction at ADDR using PRED.
   1509    Throw an error if the memory can't be read.  */
   1510 
   1511 static int
   1512 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
   1513 			int (*pred) (const struct amd64_insn *))
   1514 {
   1515   struct amd64_insn details;
   1516   gdb_byte *buf;
   1517   int len, classification;
   1518 
   1519   len = gdbarch_max_insn_length (gdbarch);
   1520   buf = alloca (len);
   1521 
   1522   read_code (addr, buf, len);
   1523   amd64_get_insn_details (buf, &details);
   1524 
   1525   classification = pred (&details);
   1526 
   1527   return classification;
   1528 }
   1529 
   1530 /* The gdbarch insn_is_call method.  */
   1531 
   1532 static int
   1533 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
   1534 {
   1535   return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
   1536 }
   1537 
   1538 /* The gdbarch insn_is_ret method.  */
   1539 
   1540 static int
   1541 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
   1542 {
   1543   return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
   1544 }
   1545 
   1546 /* The gdbarch insn_is_jump method.  */
   1547 
   1548 static int
   1549 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
   1550 {
   1551   return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
   1552 }
   1553 
   1554 /* Fix up the state of registers and memory after having single-stepped
   1555    a displaced instruction.  */
   1556 
   1557 void
   1558 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
   1559 			    struct displaced_step_closure *dsc,
   1560 			    CORE_ADDR from, CORE_ADDR to,
   1561 			    struct regcache *regs)
   1562 {
   1563   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1564   /* The offset we applied to the instruction's address.  */
   1565   ULONGEST insn_offset = to - from;
   1566   gdb_byte *insn = dsc->insn_buf;
   1567   const struct amd64_insn *insn_details = &dsc->insn_details;
   1568 
   1569   if (debug_displaced)
   1570     fprintf_unfiltered (gdb_stdlog,
   1571 			"displaced: fixup (%s, %s), "
   1572 			"insn = 0x%02x 0x%02x ...\n",
   1573 			paddress (gdbarch, from), paddress (gdbarch, to),
   1574 			insn[0], insn[1]);
   1575 
   1576   /* If we used a tmp reg, restore it.	*/
   1577 
   1578   if (dsc->tmp_used)
   1579     {
   1580       if (debug_displaced)
   1581 	fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
   1582 			    dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
   1583       regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
   1584     }
   1585 
   1586   /* The list of issues to contend with here is taken from
   1587      resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
   1588      Yay for Free Software!  */
   1589 
   1590   /* Relocate the %rip back to the program's instruction stream,
   1591      if necessary.  */
   1592 
   1593   /* Except in the case of absolute or indirect jump or call
   1594      instructions, or a return instruction, the new rip is relative to
   1595      the displaced instruction; make it relative to the original insn.
   1596      Well, signal handler returns don't need relocation either, but we use the
   1597      value of %rip to recognize those; see below.  */
   1598   if (! amd64_absolute_jmp_p (insn_details)
   1599       && ! amd64_absolute_call_p (insn_details)
   1600       && ! amd64_ret_p (insn_details))
   1601     {
   1602       ULONGEST orig_rip;
   1603       int insn_len;
   1604 
   1605       regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
   1606 
   1607       /* A signal trampoline system call changes the %rip, resuming
   1608 	 execution of the main program after the signal handler has
   1609 	 returned.  That makes them like 'return' instructions; we
   1610 	 shouldn't relocate %rip.
   1611 
   1612 	 But most system calls don't, and we do need to relocate %rip.
   1613 
   1614 	 Our heuristic for distinguishing these cases: if stepping
   1615 	 over the system call instruction left control directly after
   1616 	 the instruction, the we relocate --- control almost certainly
   1617 	 doesn't belong in the displaced copy.	Otherwise, we assume
   1618 	 the instruction has put control where it belongs, and leave
   1619 	 it unrelocated.  Goodness help us if there are PC-relative
   1620 	 system calls.	*/
   1621       if (amd64_syscall_p (insn_details, &insn_len)
   1622 	  && orig_rip != to + insn_len
   1623 	  /* GDB can get control back after the insn after the syscall.
   1624 	     Presumably this is a kernel bug.
   1625 	     Fixup ensures its a nop, we add one to the length for it.  */
   1626 	  && orig_rip != to + insn_len + 1)
   1627 	{
   1628 	  if (debug_displaced)
   1629 	    fprintf_unfiltered (gdb_stdlog,
   1630 				"displaced: syscall changed %%rip; "
   1631 				"not relocating\n");
   1632 	}
   1633       else
   1634 	{
   1635 	  ULONGEST rip = orig_rip - insn_offset;
   1636 
   1637 	  /* If we just stepped over a breakpoint insn, we don't backup
   1638 	     the pc on purpose; this is to match behaviour without
   1639 	     stepping.  */
   1640 
   1641 	  regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
   1642 
   1643 	  if (debug_displaced)
   1644 	    fprintf_unfiltered (gdb_stdlog,
   1645 				"displaced: "
   1646 				"relocated %%rip from %s to %s\n",
   1647 				paddress (gdbarch, orig_rip),
   1648 				paddress (gdbarch, rip));
   1649 	}
   1650     }
   1651 
   1652   /* If the instruction was PUSHFL, then the TF bit will be set in the
   1653      pushed value, and should be cleared.  We'll leave this for later,
   1654      since GDB already messes up the TF flag when stepping over a
   1655      pushfl.  */
   1656 
   1657   /* If the instruction was a call, the return address now atop the
   1658      stack is the address following the copied instruction.  We need
   1659      to make it the address following the original instruction.	 */
   1660   if (amd64_call_p (insn_details))
   1661     {
   1662       ULONGEST rsp;
   1663       ULONGEST retaddr;
   1664       const ULONGEST retaddr_len = 8;
   1665 
   1666       regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
   1667       retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
   1668       retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
   1669       write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
   1670 
   1671       if (debug_displaced)
   1672 	fprintf_unfiltered (gdb_stdlog,
   1673 			    "displaced: relocated return addr at %s "
   1674 			    "to %s\n",
   1675 			    paddress (gdbarch, rsp),
   1676 			    paddress (gdbarch, retaddr));
   1677     }
   1678 }
   1679 
   1680 /* If the instruction INSN uses RIP-relative addressing, return the
   1681    offset into the raw INSN where the displacement to be adjusted is
   1682    found.  Returns 0 if the instruction doesn't use RIP-relative
   1683    addressing.  */
   1684 
   1685 static int
   1686 rip_relative_offset (struct amd64_insn *insn)
   1687 {
   1688   if (insn->modrm_offset != -1)
   1689     {
   1690       gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
   1691 
   1692       if ((modrm & 0xc7) == 0x05)
   1693 	{
   1694 	  /* The displacement is found right after the ModRM byte.  */
   1695 	  return insn->modrm_offset + 1;
   1696 	}
   1697     }
   1698 
   1699   return 0;
   1700 }
   1701 
   1702 static void
   1703 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
   1704 {
   1705   target_write_memory (*to, buf, len);
   1706   *to += len;
   1707 }
   1708 
   1709 static void
   1710 amd64_relocate_instruction (struct gdbarch *gdbarch,
   1711 			    CORE_ADDR *to, CORE_ADDR oldloc)
   1712 {
   1713   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1714   int len = gdbarch_max_insn_length (gdbarch);
   1715   /* Extra space for sentinels.  */
   1716   int fixup_sentinel_space = len;
   1717   gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
   1718   struct amd64_insn insn_details;
   1719   int offset = 0;
   1720   LONGEST rel32, newrel;
   1721   gdb_byte *insn;
   1722   int insn_length;
   1723 
   1724   read_memory (oldloc, buf, len);
   1725 
   1726   /* Set up the sentinel space so we don't have to worry about running
   1727      off the end of the buffer.  An excessive number of leading prefixes
   1728      could otherwise cause this.  */
   1729   memset (buf + len, 0, fixup_sentinel_space);
   1730 
   1731   insn = buf;
   1732   amd64_get_insn_details (insn, &insn_details);
   1733 
   1734   insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
   1735 
   1736   /* Skip legacy instruction prefixes.  */
   1737   insn = amd64_skip_prefixes (insn);
   1738 
   1739   /* Adjust calls with 32-bit relative addresses as push/jump, with
   1740      the address pushed being the location where the original call in
   1741      the user program would return to.  */
   1742   if (insn[0] == 0xe8)
   1743     {
   1744       gdb_byte push_buf[16];
   1745       unsigned int ret_addr;
   1746 
   1747       /* Where "ret" in the original code will return to.  */
   1748       ret_addr = oldloc + insn_length;
   1749       push_buf[0] = 0x68; /* pushq $...  */
   1750       store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
   1751       /* Push the push.  */
   1752       append_insns (to, 5, push_buf);
   1753 
   1754       /* Convert the relative call to a relative jump.  */
   1755       insn[0] = 0xe9;
   1756 
   1757       /* Adjust the destination offset.  */
   1758       rel32 = extract_signed_integer (insn + 1, 4, byte_order);
   1759       newrel = (oldloc - *to) + rel32;
   1760       store_signed_integer (insn + 1, 4, byte_order, newrel);
   1761 
   1762       if (debug_displaced)
   1763 	fprintf_unfiltered (gdb_stdlog,
   1764 			    "Adjusted insn rel32=%s at %s to"
   1765 			    " rel32=%s at %s\n",
   1766 			    hex_string (rel32), paddress (gdbarch, oldloc),
   1767 			    hex_string (newrel), paddress (gdbarch, *to));
   1768 
   1769       /* Write the adjusted jump into its displaced location.  */
   1770       append_insns (to, 5, insn);
   1771       return;
   1772     }
   1773 
   1774   offset = rip_relative_offset (&insn_details);
   1775   if (!offset)
   1776     {
   1777       /* Adjust jumps with 32-bit relative addresses.  Calls are
   1778 	 already handled above.  */
   1779       if (insn[0] == 0xe9)
   1780 	offset = 1;
   1781       /* Adjust conditional jumps.  */
   1782       else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
   1783 	offset = 2;
   1784     }
   1785 
   1786   if (offset)
   1787     {
   1788       rel32 = extract_signed_integer (insn + offset, 4, byte_order);
   1789       newrel = (oldloc - *to) + rel32;
   1790       store_signed_integer (insn + offset, 4, byte_order, newrel);
   1791       if (debug_displaced)
   1792 	fprintf_unfiltered (gdb_stdlog,
   1793 			    "Adjusted insn rel32=%s at %s to"
   1794 			    " rel32=%s at %s\n",
   1795 			    hex_string (rel32), paddress (gdbarch, oldloc),
   1796 			    hex_string (newrel), paddress (gdbarch, *to));
   1797     }
   1798 
   1799   /* Write the adjusted instruction into its displaced location.  */
   1800   append_insns (to, insn_length, buf);
   1801 }
   1802 
   1803 
   1804 /* The maximum number of saved registers.  This should include %rip.  */
   1806 #define AMD64_NUM_SAVED_REGS	AMD64_NUM_GREGS
   1807 
   1808 struct amd64_frame_cache
   1809 {
   1810   /* Base address.  */
   1811   CORE_ADDR base;
   1812   int base_p;
   1813   CORE_ADDR sp_offset;
   1814   CORE_ADDR pc;
   1815 
   1816   /* Saved registers.  */
   1817   CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
   1818   CORE_ADDR saved_sp;
   1819   int saved_sp_reg;
   1820 
   1821   /* Do we have a frame?  */
   1822   int frameless_p;
   1823 };
   1824 
   1825 /* Initialize a frame cache.  */
   1826 
   1827 static void
   1828 amd64_init_frame_cache (struct amd64_frame_cache *cache)
   1829 {
   1830   int i;
   1831 
   1832   /* Base address.  */
   1833   cache->base = 0;
   1834   cache->base_p = 0;
   1835   cache->sp_offset = -8;
   1836   cache->pc = 0;
   1837 
   1838   /* Saved registers.  We initialize these to -1 since zero is a valid
   1839      offset (that's where %rbp is supposed to be stored).
   1840      The values start out as being offsets, and are later converted to
   1841      addresses (at which point -1 is interpreted as an address, still meaning
   1842      "invalid").  */
   1843   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   1844     cache->saved_regs[i] = -1;
   1845   cache->saved_sp = 0;
   1846   cache->saved_sp_reg = -1;
   1847 
   1848   /* Frameless until proven otherwise.  */
   1849   cache->frameless_p = 1;
   1850 }
   1851 
   1852 /* Allocate and initialize a frame cache.  */
   1853 
   1854 static struct amd64_frame_cache *
   1855 amd64_alloc_frame_cache (void)
   1856 {
   1857   struct amd64_frame_cache *cache;
   1858 
   1859   cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
   1860   amd64_init_frame_cache (cache);
   1861   return cache;
   1862 }
   1863 
   1864 /* GCC 4.4 and later, can put code in the prologue to realign the
   1865    stack pointer.  Check whether PC points to such code, and update
   1866    CACHE accordingly.  Return the first instruction after the code
   1867    sequence or CURRENT_PC, whichever is smaller.  If we don't
   1868    recognize the code, return PC.  */
   1869 
   1870 static CORE_ADDR
   1871 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   1872 			   struct amd64_frame_cache *cache)
   1873 {
   1874   /* There are 2 code sequences to re-align stack before the frame
   1875      gets set up:
   1876 
   1877 	1. Use a caller-saved saved register:
   1878 
   1879 		leaq  8(%rsp), %reg
   1880 		andq  $-XXX, %rsp
   1881 		pushq -8(%reg)
   1882 
   1883 	2. Use a callee-saved saved register:
   1884 
   1885 		pushq %reg
   1886 		leaq  16(%rsp), %reg
   1887 		andq  $-XXX, %rsp
   1888 		pushq -8(%reg)
   1889 
   1890      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   1891 
   1892      	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   1893      	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   1894    */
   1895 
   1896   gdb_byte buf[18];
   1897   int reg, r;
   1898   int offset, offset_and;
   1899 
   1900   if (target_read_code (pc, buf, sizeof buf))
   1901     return pc;
   1902 
   1903   /* Check caller-saved saved register.  The first instruction has
   1904      to be "leaq 8(%rsp), %reg".  */
   1905   if ((buf[0] & 0xfb) == 0x48
   1906       && buf[1] == 0x8d
   1907       && buf[3] == 0x24
   1908       && buf[4] == 0x8)
   1909     {
   1910       /* MOD must be binary 10 and R/M must be binary 100.  */
   1911       if ((buf[2] & 0xc7) != 0x44)
   1912 	return pc;
   1913 
   1914       /* REG has register number.  */
   1915       reg = (buf[2] >> 3) & 7;
   1916 
   1917       /* Check the REX.R bit.  */
   1918       if (buf[0] == 0x4c)
   1919 	reg += 8;
   1920 
   1921       offset = 5;
   1922     }
   1923   else
   1924     {
   1925       /* Check callee-saved saved register.  The first instruction
   1926 	 has to be "pushq %reg".  */
   1927       reg = 0;
   1928       if ((buf[0] & 0xf8) == 0x50)
   1929 	offset = 0;
   1930       else if ((buf[0] & 0xf6) == 0x40
   1931 	       && (buf[1] & 0xf8) == 0x50)
   1932 	{
   1933 	  /* Check the REX.B bit.  */
   1934 	  if ((buf[0] & 1) != 0)
   1935 	    reg = 8;
   1936 
   1937 	  offset = 1;
   1938 	}
   1939       else
   1940 	return pc;
   1941 
   1942       /* Get register.  */
   1943       reg += buf[offset] & 0x7;
   1944 
   1945       offset++;
   1946 
   1947       /* The next instruction has to be "leaq 16(%rsp), %reg".  */
   1948       if ((buf[offset] & 0xfb) != 0x48
   1949 	  || buf[offset + 1] != 0x8d
   1950 	  || buf[offset + 3] != 0x24
   1951 	  || buf[offset + 4] != 0x10)
   1952 	return pc;
   1953 
   1954       /* MOD must be binary 10 and R/M must be binary 100.  */
   1955       if ((buf[offset + 2] & 0xc7) != 0x44)
   1956 	return pc;
   1957 
   1958       /* REG has register number.  */
   1959       r = (buf[offset + 2] >> 3) & 7;
   1960 
   1961       /* Check the REX.R bit.  */
   1962       if (buf[offset] == 0x4c)
   1963 	r += 8;
   1964 
   1965       /* Registers in pushq and leaq have to be the same.  */
   1966       if (reg != r)
   1967 	return pc;
   1968 
   1969       offset += 5;
   1970     }
   1971 
   1972   /* Rigister can't be %rsp nor %rbp.  */
   1973   if (reg == 4 || reg == 5)
   1974     return pc;
   1975 
   1976   /* The next instruction has to be "andq $-XXX, %rsp".  */
   1977   if (buf[offset] != 0x48
   1978       || buf[offset + 2] != 0xe4
   1979       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   1980     return pc;
   1981 
   1982   offset_and = offset;
   1983   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   1984 
   1985   /* The next instruction has to be "pushq -8(%reg)".  */
   1986   r = 0;
   1987   if (buf[offset] == 0xff)
   1988     offset++;
   1989   else if ((buf[offset] & 0xf6) == 0x40
   1990 	   && buf[offset + 1] == 0xff)
   1991     {
   1992       /* Check the REX.B bit.  */
   1993       if ((buf[offset] & 0x1) != 0)
   1994 	r = 8;
   1995       offset += 2;
   1996     }
   1997   else
   1998     return pc;
   1999 
   2000   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2001      01.  */
   2002   if (buf[offset + 1] != 0xf8
   2003       || (buf[offset] & 0xf8) != 0x70)
   2004     return pc;
   2005 
   2006   /* R/M has register.  */
   2007   r += buf[offset] & 7;
   2008 
   2009   /* Registers in leaq and pushq have to be the same.  */
   2010   if (reg != r)
   2011     return pc;
   2012 
   2013   if (current_pc > pc + offset_and)
   2014     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2015 
   2016   return min (pc + offset + 2, current_pc);
   2017 }
   2018 
   2019 /* Similar to amd64_analyze_stack_align for x32.  */
   2020 
   2021 static CORE_ADDR
   2022 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   2023 			       struct amd64_frame_cache *cache)
   2024 {
   2025   /* There are 2 code sequences to re-align stack before the frame
   2026      gets set up:
   2027 
   2028 	1. Use a caller-saved saved register:
   2029 
   2030 		leaq  8(%rsp), %reg
   2031 		andq  $-XXX, %rsp
   2032 		pushq -8(%reg)
   2033 
   2034 	   or
   2035 
   2036 		[addr32] leal  8(%rsp), %reg
   2037 		andl  $-XXX, %esp
   2038 		[addr32] pushq -8(%reg)
   2039 
   2040 	2. Use a callee-saved saved register:
   2041 
   2042 		pushq %reg
   2043 		leaq  16(%rsp), %reg
   2044 		andq  $-XXX, %rsp
   2045 		pushq -8(%reg)
   2046 
   2047 	   or
   2048 
   2049 		pushq %reg
   2050 		[addr32] leal  16(%rsp), %reg
   2051 		andl  $-XXX, %esp
   2052 		[addr32] pushq -8(%reg)
   2053 
   2054      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   2055 
   2056      	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   2057      	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   2058 
   2059      "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
   2060 
   2061      	0x83 0xe4 0xf0			andl $-16, %esp
   2062      	0x81 0xe4 0x00 0xff 0xff 0xff	andl $-256, %esp
   2063    */
   2064 
   2065   gdb_byte buf[19];
   2066   int reg, r;
   2067   int offset, offset_and;
   2068 
   2069   if (target_read_memory (pc, buf, sizeof buf))
   2070     return pc;
   2071 
   2072   /* Skip optional addr32 prefix.  */
   2073   offset = buf[0] == 0x67 ? 1 : 0;
   2074 
   2075   /* Check caller-saved saved register.  The first instruction has
   2076      to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg".  */
   2077   if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
   2078       && buf[offset + 1] == 0x8d
   2079       && buf[offset + 3] == 0x24
   2080       && buf[offset + 4] == 0x8)
   2081     {
   2082       /* MOD must be binary 10 and R/M must be binary 100.  */
   2083       if ((buf[offset + 2] & 0xc7) != 0x44)
   2084 	return pc;
   2085 
   2086       /* REG has register number.  */
   2087       reg = (buf[offset + 2] >> 3) & 7;
   2088 
   2089       /* Check the REX.R bit.  */
   2090       if ((buf[offset] & 0x4) != 0)
   2091 	reg += 8;
   2092 
   2093       offset += 5;
   2094     }
   2095   else
   2096     {
   2097       /* Check callee-saved saved register.  The first instruction
   2098 	 has to be "pushq %reg".  */
   2099       reg = 0;
   2100       if ((buf[offset] & 0xf6) == 0x40
   2101 	  && (buf[offset + 1] & 0xf8) == 0x50)
   2102 	{
   2103 	  /* Check the REX.B bit.  */
   2104 	  if ((buf[offset] & 1) != 0)
   2105 	    reg = 8;
   2106 
   2107 	  offset += 1;
   2108 	}
   2109       else if ((buf[offset] & 0xf8) != 0x50)
   2110 	return pc;
   2111 
   2112       /* Get register.  */
   2113       reg += buf[offset] & 0x7;
   2114 
   2115       offset++;
   2116 
   2117       /* Skip optional addr32 prefix.  */
   2118       if (buf[offset] == 0x67)
   2119 	offset++;
   2120 
   2121       /* The next instruction has to be "leaq 16(%rsp), %reg" or
   2122 	 "leal 16(%rsp), %reg".  */
   2123       if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
   2124 	  || buf[offset + 1] != 0x8d
   2125 	  || buf[offset + 3] != 0x24
   2126 	  || buf[offset + 4] != 0x10)
   2127 	return pc;
   2128 
   2129       /* MOD must be binary 10 and R/M must be binary 100.  */
   2130       if ((buf[offset + 2] & 0xc7) != 0x44)
   2131 	return pc;
   2132 
   2133       /* REG has register number.  */
   2134       r = (buf[offset + 2] >> 3) & 7;
   2135 
   2136       /* Check the REX.R bit.  */
   2137       if ((buf[offset] & 0x4) != 0)
   2138 	r += 8;
   2139 
   2140       /* Registers in pushq and leaq have to be the same.  */
   2141       if (reg != r)
   2142 	return pc;
   2143 
   2144       offset += 5;
   2145     }
   2146 
   2147   /* Rigister can't be %rsp nor %rbp.  */
   2148   if (reg == 4 || reg == 5)
   2149     return pc;
   2150 
   2151   /* The next instruction may be "andq $-XXX, %rsp" or
   2152      "andl $-XXX, %esp".  */
   2153   if (buf[offset] != 0x48)
   2154     offset--;
   2155 
   2156   if (buf[offset + 2] != 0xe4
   2157       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   2158     return pc;
   2159 
   2160   offset_and = offset;
   2161   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   2162 
   2163   /* Skip optional addr32 prefix.  */
   2164   if (buf[offset] == 0x67)
   2165     offset++;
   2166 
   2167   /* The next instruction has to be "pushq -8(%reg)".  */
   2168   r = 0;
   2169   if (buf[offset] == 0xff)
   2170     offset++;
   2171   else if ((buf[offset] & 0xf6) == 0x40
   2172 	   && buf[offset + 1] == 0xff)
   2173     {
   2174       /* Check the REX.B bit.  */
   2175       if ((buf[offset] & 0x1) != 0)
   2176 	r = 8;
   2177       offset += 2;
   2178     }
   2179   else
   2180     return pc;
   2181 
   2182   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2183      01.  */
   2184   if (buf[offset + 1] != 0xf8
   2185       || (buf[offset] & 0xf8) != 0x70)
   2186     return pc;
   2187 
   2188   /* R/M has register.  */
   2189   r += buf[offset] & 7;
   2190 
   2191   /* Registers in leaq and pushq have to be the same.  */
   2192   if (reg != r)
   2193     return pc;
   2194 
   2195   if (current_pc > pc + offset_and)
   2196     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2197 
   2198   return min (pc + offset + 2, current_pc);
   2199 }
   2200 
   2201 /* Do a limited analysis of the prologue at PC and update CACHE
   2202    accordingly.  Bail out early if CURRENT_PC is reached.  Return the
   2203    address where the analysis stopped.
   2204 
   2205    We will handle only functions beginning with:
   2206 
   2207       pushq %rbp        0x55
   2208       movq %rsp, %rbp   0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
   2209 
   2210    or (for the X32 ABI):
   2211 
   2212       pushq %rbp        0x55
   2213       movl %esp, %ebp   0x89 0xe5 (or 0x8b 0xec)
   2214 
   2215    Any function that doesn't start with one of these sequences will be
   2216    assumed to have no prologue and thus no valid frame pointer in
   2217    %rbp.  */
   2218 
   2219 static CORE_ADDR
   2220 amd64_analyze_prologue (struct gdbarch *gdbarch,
   2221 			CORE_ADDR pc, CORE_ADDR current_pc,
   2222 			struct amd64_frame_cache *cache)
   2223 {
   2224   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2225   /* There are two variations of movq %rsp, %rbp.  */
   2226   static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
   2227   static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
   2228   /* Ditto for movl %esp, %ebp.  */
   2229   static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
   2230   static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
   2231 
   2232   gdb_byte buf[3];
   2233   gdb_byte op;
   2234 
   2235   if (current_pc <= pc)
   2236     return current_pc;
   2237 
   2238   if (gdbarch_ptr_bit (gdbarch) == 32)
   2239     pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
   2240   else
   2241     pc = amd64_analyze_stack_align (pc, current_pc, cache);
   2242 
   2243   op = read_code_unsigned_integer (pc, 1, byte_order);
   2244 
   2245   if (op == 0x55)		/* pushq %rbp */
   2246     {
   2247       /* Take into account that we've executed the `pushq %rbp' that
   2248          starts this instruction sequence.  */
   2249       cache->saved_regs[AMD64_RBP_REGNUM] = 0;
   2250       cache->sp_offset += 8;
   2251 
   2252       /* If that's all, return now.  */
   2253       if (current_pc <= pc + 1)
   2254         return current_pc;
   2255 
   2256       read_code (pc + 1, buf, 3);
   2257 
   2258       /* Check for `movq %rsp, %rbp'.  */
   2259       if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
   2260 	  || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
   2261 	{
   2262 	  /* OK, we actually have a frame.  */
   2263 	  cache->frameless_p = 0;
   2264 	  return pc + 4;
   2265 	}
   2266 
   2267       /* For X32, also check for `movq %esp, %ebp'.  */
   2268       if (gdbarch_ptr_bit (gdbarch) == 32)
   2269 	{
   2270 	  if (memcmp (buf, mov_esp_ebp_1, 2) == 0
   2271 	      || memcmp (buf, mov_esp_ebp_2, 2) == 0)
   2272 	    {
   2273 	      /* OK, we actually have a frame.  */
   2274 	      cache->frameless_p = 0;
   2275 	      return pc + 3;
   2276 	    }
   2277 	}
   2278 
   2279       return pc + 1;
   2280     }
   2281 
   2282   return pc;
   2283 }
   2284 
   2285 /* Work around false termination of prologue - GCC PR debug/48827.
   2286 
   2287    START_PC is the first instruction of a function, PC is its minimal already
   2288    determined advanced address.  Function returns PC if it has nothing to do.
   2289 
   2290    84 c0                test   %al,%al
   2291    74 23                je     after
   2292    <-- here is 0 lines advance - the false prologue end marker.
   2293    0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
   2294    0f 29 4d 80          movaps %xmm1,-0x80(%rbp)
   2295    0f 29 55 90          movaps %xmm2,-0x70(%rbp)
   2296    0f 29 5d a0          movaps %xmm3,-0x60(%rbp)
   2297    0f 29 65 b0          movaps %xmm4,-0x50(%rbp)
   2298    0f 29 6d c0          movaps %xmm5,-0x40(%rbp)
   2299    0f 29 75 d0          movaps %xmm6,-0x30(%rbp)
   2300    0f 29 7d e0          movaps %xmm7,-0x20(%rbp)
   2301    after:  */
   2302 
   2303 static CORE_ADDR
   2304 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
   2305 {
   2306   struct symtab_and_line start_pc_sal, next_sal;
   2307   gdb_byte buf[4 + 8 * 7];
   2308   int offset, xmmreg;
   2309 
   2310   if (pc == start_pc)
   2311     return pc;
   2312 
   2313   start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
   2314   if (start_pc_sal.symtab == NULL
   2315       || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
   2316 	   (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
   2317       || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
   2318     return pc;
   2319 
   2320   next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
   2321   if (next_sal.line != start_pc_sal.line)
   2322     return pc;
   2323 
   2324   /* START_PC can be from overlayed memory, ignored here.  */
   2325   if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
   2326     return pc;
   2327 
   2328   /* test %al,%al */
   2329   if (buf[0] != 0x84 || buf[1] != 0xc0)
   2330     return pc;
   2331   /* je AFTER */
   2332   if (buf[2] != 0x74)
   2333     return pc;
   2334 
   2335   offset = 4;
   2336   for (xmmreg = 0; xmmreg < 8; xmmreg++)
   2337     {
   2338       /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
   2339       if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
   2340           || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
   2341 	return pc;
   2342 
   2343       /* 0b01?????? */
   2344       if ((buf[offset + 2] & 0xc0) == 0x40)
   2345 	{
   2346 	  /* 8-bit displacement.  */
   2347 	  offset += 4;
   2348 	}
   2349       /* 0b10?????? */
   2350       else if ((buf[offset + 2] & 0xc0) == 0x80)
   2351 	{
   2352 	  /* 32-bit displacement.  */
   2353 	  offset += 7;
   2354 	}
   2355       else
   2356 	return pc;
   2357     }
   2358 
   2359   /* je AFTER */
   2360   if (offset - 4 != buf[3])
   2361     return pc;
   2362 
   2363   return next_sal.end;
   2364 }
   2365 
   2366 /* Return PC of first real instruction.  */
   2367 
   2368 static CORE_ADDR
   2369 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
   2370 {
   2371   struct amd64_frame_cache cache;
   2372   CORE_ADDR pc;
   2373   CORE_ADDR func_addr;
   2374 
   2375   if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
   2376     {
   2377       CORE_ADDR post_prologue_pc
   2378 	= skip_prologue_using_sal (gdbarch, func_addr);
   2379       struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
   2380 
   2381       /* Clang always emits a line note before the prologue and another
   2382 	 one after.  We trust clang to emit usable line notes.  */
   2383       if (post_prologue_pc
   2384 	  && (cust != NULL
   2385 	      && COMPUNIT_PRODUCER (cust) != NULL
   2386 	      && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
   2387         return max (start_pc, post_prologue_pc);
   2388     }
   2389 
   2390   amd64_init_frame_cache (&cache);
   2391   pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
   2392 			       &cache);
   2393   if (cache.frameless_p)
   2394     return start_pc;
   2395 
   2396   return amd64_skip_xmm_prologue (pc, start_pc);
   2397 }
   2398 
   2399 
   2401 /* Normal frames.  */
   2402 
   2403 static void
   2404 amd64_frame_cache_1 (struct frame_info *this_frame,
   2405 		     struct amd64_frame_cache *cache)
   2406 {
   2407   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2408   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2409   gdb_byte buf[8];
   2410   int i;
   2411 
   2412   cache->pc = get_frame_func (this_frame);
   2413   if (cache->pc != 0)
   2414     amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
   2415 			    cache);
   2416 
   2417   if (cache->frameless_p)
   2418     {
   2419       /* We didn't find a valid frame.  If we're at the start of a
   2420 	 function, or somewhere half-way its prologue, the function's
   2421 	 frame probably hasn't been fully setup yet.  Try to
   2422 	 reconstruct the base address for the stack frame by looking
   2423 	 at the stack pointer.  For truly "frameless" functions this
   2424 	 might work too.  */
   2425 
   2426       if (cache->saved_sp_reg != -1)
   2427 	{
   2428 	  /* Stack pointer has been saved.  */
   2429 	  get_frame_register (this_frame, cache->saved_sp_reg, buf);
   2430 	  cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
   2431 
   2432 	  /* We're halfway aligning the stack.  */
   2433 	  cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
   2434 	  cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
   2435 
   2436 	  /* This will be added back below.  */
   2437 	  cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
   2438 	}
   2439       else
   2440 	{
   2441 	  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2442 	  cache->base = extract_unsigned_integer (buf, 8, byte_order)
   2443 			+ cache->sp_offset;
   2444 	}
   2445     }
   2446   else
   2447     {
   2448       get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
   2449       cache->base = extract_unsigned_integer (buf, 8, byte_order);
   2450     }
   2451 
   2452   /* Now that we have the base address for the stack frame we can
   2453      calculate the value of %rsp in the calling frame.  */
   2454   cache->saved_sp = cache->base + 16;
   2455 
   2456   /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
   2457      frame we find it at the same offset from the reconstructed base
   2458      address.  If we're halfway aligning the stack, %rip is handled
   2459      differently (see above).  */
   2460   if (!cache->frameless_p || cache->saved_sp_reg == -1)
   2461     cache->saved_regs[AMD64_RIP_REGNUM] = 8;
   2462 
   2463   /* Adjust all the saved registers such that they contain addresses
   2464      instead of offsets.  */
   2465   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   2466     if (cache->saved_regs[i] != -1)
   2467       cache->saved_regs[i] += cache->base;
   2468 
   2469   cache->base_p = 1;
   2470 }
   2471 
   2472 static struct amd64_frame_cache *
   2473 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
   2474 {
   2475   struct amd64_frame_cache *cache;
   2476 
   2477   if (*this_cache)
   2478     return *this_cache;
   2479 
   2480   cache = amd64_alloc_frame_cache ();
   2481   *this_cache = cache;
   2482 
   2483   TRY
   2484     {
   2485       amd64_frame_cache_1 (this_frame, cache);
   2486     }
   2487   CATCH (ex, RETURN_MASK_ERROR)
   2488     {
   2489       if (ex.error != NOT_AVAILABLE_ERROR)
   2490 	throw_exception (ex);
   2491     }
   2492   END_CATCH
   2493 
   2494   return cache;
   2495 }
   2496 
   2497 static enum unwind_stop_reason
   2498 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
   2499 				void **this_cache)
   2500 {
   2501   struct amd64_frame_cache *cache =
   2502     amd64_frame_cache (this_frame, this_cache);
   2503 
   2504   if (!cache->base_p)
   2505     return UNWIND_UNAVAILABLE;
   2506 
   2507   /* This marks the outermost frame.  */
   2508   if (cache->base == 0)
   2509     return UNWIND_OUTERMOST;
   2510 
   2511   return UNWIND_NO_REASON;
   2512 }
   2513 
   2514 static void
   2515 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
   2516 		     struct frame_id *this_id)
   2517 {
   2518   struct amd64_frame_cache *cache =
   2519     amd64_frame_cache (this_frame, this_cache);
   2520 
   2521   if (!cache->base_p)
   2522     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   2523   else if (cache->base == 0)
   2524     {
   2525       /* This marks the outermost frame.  */
   2526       return;
   2527     }
   2528   else
   2529     (*this_id) = frame_id_build (cache->base + 16, cache->pc);
   2530 }
   2531 
   2532 static struct value *
   2533 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
   2534 			   int regnum)
   2535 {
   2536   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2537   struct amd64_frame_cache *cache =
   2538     amd64_frame_cache (this_frame, this_cache);
   2539 
   2540   gdb_assert (regnum >= 0);
   2541 
   2542   if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
   2543     return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
   2544 
   2545   if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
   2546     return frame_unwind_got_memory (this_frame, regnum,
   2547 				    cache->saved_regs[regnum]);
   2548 
   2549   return frame_unwind_got_register (this_frame, regnum, regnum);
   2550 }
   2551 
   2552 static const struct frame_unwind amd64_frame_unwind =
   2553 {
   2554   NORMAL_FRAME,
   2555   amd64_frame_unwind_stop_reason,
   2556   amd64_frame_this_id,
   2557   amd64_frame_prev_register,
   2558   NULL,
   2559   default_frame_sniffer
   2560 };
   2561 
   2562 /* Generate a bytecode expression to get the value of the saved PC.  */
   2564 
   2565 static void
   2566 amd64_gen_return_address (struct gdbarch *gdbarch,
   2567 			  struct agent_expr *ax, struct axs_value *value,
   2568 			  CORE_ADDR scope)
   2569 {
   2570   /* The following sequence assumes the traditional use of the base
   2571      register.  */
   2572   ax_reg (ax, AMD64_RBP_REGNUM);
   2573   ax_const_l (ax, 8);
   2574   ax_simple (ax, aop_add);
   2575   value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
   2576   value->kind = axs_lvalue_memory;
   2577 }
   2578 
   2579 
   2581 /* Signal trampolines.  */
   2582 
   2583 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
   2584    64-bit variants.  This would require using identical frame caches
   2585    on both platforms.  */
   2586 
   2587 static struct amd64_frame_cache *
   2588 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
   2589 {
   2590   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2591   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   2592   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2593   struct amd64_frame_cache *cache;
   2594   CORE_ADDR addr;
   2595   gdb_byte buf[8];
   2596   int i;
   2597 
   2598   if (*this_cache)
   2599     return *this_cache;
   2600 
   2601   cache = amd64_alloc_frame_cache ();
   2602 
   2603   TRY
   2604     {
   2605       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2606       cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
   2607 
   2608       addr = tdep->sigcontext_addr (this_frame);
   2609       gdb_assert (tdep->sc_reg_offset);
   2610       gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
   2611       for (i = 0; i < tdep->sc_num_regs; i++)
   2612 	if (tdep->sc_reg_offset[i] != -1)
   2613 	  cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
   2614 
   2615       cache->base_p = 1;
   2616     }
   2617   CATCH (ex, RETURN_MASK_ERROR)
   2618     {
   2619       if (ex.error != NOT_AVAILABLE_ERROR)
   2620 	throw_exception (ex);
   2621     }
   2622   END_CATCH
   2623 
   2624   *this_cache = cache;
   2625   return cache;
   2626 }
   2627 
   2628 static enum unwind_stop_reason
   2629 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
   2630 					 void **this_cache)
   2631 {
   2632   struct amd64_frame_cache *cache =
   2633     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2634 
   2635   if (!cache->base_p)
   2636     return UNWIND_UNAVAILABLE;
   2637 
   2638   return UNWIND_NO_REASON;
   2639 }
   2640 
   2641 static void
   2642 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
   2643 			      void **this_cache, struct frame_id *this_id)
   2644 {
   2645   struct amd64_frame_cache *cache =
   2646     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2647 
   2648   if (!cache->base_p)
   2649     (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
   2650   else if (cache->base == 0)
   2651     {
   2652       /* This marks the outermost frame.  */
   2653       return;
   2654     }
   2655   else
   2656     (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
   2657 }
   2658 
   2659 static struct value *
   2660 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
   2661 				    void **this_cache, int regnum)
   2662 {
   2663   /* Make sure we've initialized the cache.  */
   2664   amd64_sigtramp_frame_cache (this_frame, this_cache);
   2665 
   2666   return amd64_frame_prev_register (this_frame, this_cache, regnum);
   2667 }
   2668 
   2669 static int
   2670 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
   2671 			      struct frame_info *this_frame,
   2672 			      void **this_cache)
   2673 {
   2674   struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
   2675 
   2676   /* We shouldn't even bother if we don't have a sigcontext_addr
   2677      handler.  */
   2678   if (tdep->sigcontext_addr == NULL)
   2679     return 0;
   2680 
   2681   if (tdep->sigtramp_p != NULL)
   2682     {
   2683       if (tdep->sigtramp_p (this_frame))
   2684 	return 1;
   2685     }
   2686 
   2687   if (tdep->sigtramp_start != 0)
   2688     {
   2689       CORE_ADDR pc = get_frame_pc (this_frame);
   2690 
   2691       gdb_assert (tdep->sigtramp_end != 0);
   2692       if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
   2693 	return 1;
   2694     }
   2695 
   2696   return 0;
   2697 }
   2698 
   2699 static const struct frame_unwind amd64_sigtramp_frame_unwind =
   2700 {
   2701   SIGTRAMP_FRAME,
   2702   amd64_sigtramp_frame_unwind_stop_reason,
   2703   amd64_sigtramp_frame_this_id,
   2704   amd64_sigtramp_frame_prev_register,
   2705   NULL,
   2706   amd64_sigtramp_frame_sniffer
   2707 };
   2708 
   2709 
   2711 static CORE_ADDR
   2712 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
   2713 {
   2714   struct amd64_frame_cache *cache =
   2715     amd64_frame_cache (this_frame, this_cache);
   2716 
   2717   return cache->base;
   2718 }
   2719 
   2720 static const struct frame_base amd64_frame_base =
   2721 {
   2722   &amd64_frame_unwind,
   2723   amd64_frame_base_address,
   2724   amd64_frame_base_address,
   2725   amd64_frame_base_address
   2726 };
   2727 
   2728 /* Normal frames, but in a function epilogue.  */
   2729 
   2730 /* Implement the stack_frame_destroyed_p gdbarch method.
   2731 
   2732    The epilogue is defined here as the 'ret' instruction, which will
   2733    follow any instruction such as 'leave' or 'pop %ebp' that destroys
   2734    the function's stack frame.  */
   2735 
   2736 static int
   2737 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
   2738 {
   2739   gdb_byte insn;
   2740   struct compunit_symtab *cust;
   2741 
   2742   cust = find_pc_compunit_symtab (pc);
   2743   if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
   2744     return 0;
   2745 
   2746   if (target_read_memory (pc, &insn, 1))
   2747     return 0;   /* Can't read memory at pc.  */
   2748 
   2749   if (insn != 0xc3)     /* 'ret' instruction.  */
   2750     return 0;
   2751 
   2752   return 1;
   2753 }
   2754 
   2755 static int
   2756 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
   2757 			      struct frame_info *this_frame,
   2758 			      void **this_prologue_cache)
   2759 {
   2760   if (frame_relative_level (this_frame) == 0)
   2761     return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
   2762 					  get_frame_pc (this_frame));
   2763   else
   2764     return 0;
   2765 }
   2766 
   2767 static struct amd64_frame_cache *
   2768 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
   2769 {
   2770   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2771   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2772   struct amd64_frame_cache *cache;
   2773   gdb_byte buf[8];
   2774 
   2775   if (*this_cache)
   2776     return *this_cache;
   2777 
   2778   cache = amd64_alloc_frame_cache ();
   2779   *this_cache = cache;
   2780 
   2781   TRY
   2782     {
   2783       /* Cache base will be %esp plus cache->sp_offset (-8).  */
   2784       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2785       cache->base = extract_unsigned_integer (buf, 8,
   2786 					      byte_order) + cache->sp_offset;
   2787 
   2788       /* Cache pc will be the frame func.  */
   2789       cache->pc = get_frame_pc (this_frame);
   2790 
   2791       /* The saved %esp will be at cache->base plus 16.  */
   2792       cache->saved_sp = cache->base + 16;
   2793 
   2794       /* The saved %eip will be at cache->base plus 8.  */
   2795       cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
   2796 
   2797       cache->base_p = 1;
   2798     }
   2799   CATCH (ex, RETURN_MASK_ERROR)
   2800     {
   2801       if (ex.error != NOT_AVAILABLE_ERROR)
   2802 	throw_exception (ex);
   2803     }
   2804   END_CATCH
   2805 
   2806   return cache;
   2807 }
   2808 
   2809 static enum unwind_stop_reason
   2810 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
   2811 					 void **this_cache)
   2812 {
   2813   struct amd64_frame_cache *cache
   2814     = amd64_epilogue_frame_cache (this_frame, this_cache);
   2815 
   2816   if (!cache->base_p)
   2817     return UNWIND_UNAVAILABLE;
   2818 
   2819   return UNWIND_NO_REASON;
   2820 }
   2821 
   2822 static void
   2823 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
   2824 			      void **this_cache,
   2825 			      struct frame_id *this_id)
   2826 {
   2827   struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
   2828 							       this_cache);
   2829 
   2830   if (!cache->base_p)
   2831     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   2832   else
   2833     (*this_id) = frame_id_build (cache->base + 8, cache->pc);
   2834 }
   2835 
   2836 static const struct frame_unwind amd64_epilogue_frame_unwind =
   2837 {
   2838   NORMAL_FRAME,
   2839   amd64_epilogue_frame_unwind_stop_reason,
   2840   amd64_epilogue_frame_this_id,
   2841   amd64_frame_prev_register,
   2842   NULL,
   2843   amd64_epilogue_frame_sniffer
   2844 };
   2845 
   2846 static struct frame_id
   2847 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
   2848 {
   2849   CORE_ADDR fp;
   2850 
   2851   fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
   2852 
   2853   return frame_id_build (fp + 16, get_frame_pc (this_frame));
   2854 }
   2855 
   2856 /* 16 byte align the SP per frame requirements.  */
   2857 
   2858 static CORE_ADDR
   2859 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
   2860 {
   2861   return sp & -(CORE_ADDR)16;
   2862 }
   2863 
   2864 
   2866 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
   2867    in the floating-point register set REGSET to register cache
   2868    REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
   2869 
   2870 static void
   2871 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
   2872 		       int regnum, const void *fpregs, size_t len)
   2873 {
   2874   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   2875   const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   2876 
   2877   gdb_assert (len >= tdep->sizeof_fpregset);
   2878   amd64_supply_fxsave (regcache, regnum, fpregs);
   2879 }
   2880 
   2881 /* Collect register REGNUM from the register cache REGCACHE and store
   2882    it in the buffer specified by FPREGS and LEN as described by the
   2883    floating-point register set REGSET.  If REGNUM is -1, do this for
   2884    all registers in REGSET.  */
   2885 
   2886 static void
   2887 amd64_collect_fpregset (const struct regset *regset,
   2888 			const struct regcache *regcache,
   2889 			int regnum, void *fpregs, size_t len)
   2890 {
   2891   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   2892   const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   2893 
   2894   gdb_assert (len >= tdep->sizeof_fpregset);
   2895   amd64_collect_fxsave (regcache, regnum, fpregs);
   2896 }
   2897 
   2898 const struct regset amd64_fpregset =
   2899   {
   2900     NULL, amd64_supply_fpregset, amd64_collect_fpregset
   2901   };
   2902 
   2903 
   2905 /* Figure out where the longjmp will land.  Slurp the jmp_buf out of
   2906    %rdi.  We expect its value to be a pointer to the jmp_buf structure
   2907    from which we extract the address that we will land at.  This
   2908    address is copied into PC.  This routine returns non-zero on
   2909    success.  */
   2910 
   2911 static int
   2912 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
   2913 {
   2914   gdb_byte buf[8];
   2915   CORE_ADDR jb_addr;
   2916   struct gdbarch *gdbarch = get_frame_arch (frame);
   2917   int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
   2918   int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
   2919 
   2920   /* If JB_PC_OFFSET is -1, we have no way to find out where the
   2921      longjmp will land.	 */
   2922   if (jb_pc_offset == -1)
   2923     return 0;
   2924 
   2925   get_frame_register (frame, AMD64_RDI_REGNUM, buf);
   2926   jb_addr= extract_typed_address
   2927 	    (buf, builtin_type (gdbarch)->builtin_data_ptr);
   2928   if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
   2929     return 0;
   2930 
   2931   *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
   2932 
   2933   return 1;
   2934 }
   2935 
   2936 static const int amd64_record_regmap[] =
   2937 {
   2938   AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
   2939   AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
   2940   AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
   2941   AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
   2942   AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
   2943   AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
   2944 };
   2945 
   2946 void
   2947 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
   2948 {
   2949   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   2950   const struct target_desc *tdesc = info.target_desc;
   2951   static const char *const stap_integer_prefixes[] = { "$", NULL };
   2952   static const char *const stap_register_prefixes[] = { "%", NULL };
   2953   static const char *const stap_register_indirection_prefixes[] = { "(",
   2954 								    NULL };
   2955   static const char *const stap_register_indirection_suffixes[] = { ")",
   2956 								    NULL };
   2957 
   2958   /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
   2959      floating-point registers.  */
   2960   tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
   2961   tdep->fpregset = &amd64_fpregset;
   2962 
   2963   if (! tdesc_has_registers (tdesc))
   2964     tdesc = tdesc_amd64;
   2965   tdep->tdesc = tdesc;
   2966 
   2967   tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
   2968   tdep->register_names = amd64_register_names;
   2969 
   2970   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
   2971     {
   2972       tdep->zmmh_register_names = amd64_zmmh_names;
   2973       tdep->k_register_names = amd64_k_names;
   2974       tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
   2975       tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
   2976 
   2977       tdep->num_zmm_regs = 32;
   2978       tdep->num_xmm_avx512_regs = 16;
   2979       tdep->num_ymm_avx512_regs = 16;
   2980 
   2981       tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
   2982       tdep->k0_regnum = AMD64_K0_REGNUM;
   2983       tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
   2984       tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
   2985     }
   2986 
   2987   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
   2988     {
   2989       tdep->ymmh_register_names = amd64_ymmh_names;
   2990       tdep->num_ymm_regs = 16;
   2991       tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
   2992     }
   2993 
   2994   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
   2995     {
   2996       tdep->mpx_register_names = amd64_mpx_names;
   2997       tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
   2998       tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
   2999     }
   3000 
   3001   tdep->num_byte_regs = 20;
   3002   tdep->num_word_regs = 16;
   3003   tdep->num_dword_regs = 16;
   3004   /* Avoid wiring in the MMX registers for now.  */
   3005   tdep->num_mmx_regs = 0;
   3006 
   3007   set_gdbarch_pseudo_register_read_value (gdbarch,
   3008 					  amd64_pseudo_register_read_value);
   3009   set_gdbarch_pseudo_register_write (gdbarch,
   3010 				     amd64_pseudo_register_write);
   3011 
   3012   set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
   3013 
   3014   /* AMD64 has an FPU and 16 SSE registers.  */
   3015   tdep->st0_regnum = AMD64_ST0_REGNUM;
   3016   tdep->num_xmm_regs = 16;
   3017 
   3018   /* This is what all the fuss is about.  */
   3019   set_gdbarch_long_bit (gdbarch, 64);
   3020   set_gdbarch_long_long_bit (gdbarch, 64);
   3021   set_gdbarch_ptr_bit (gdbarch, 64);
   3022 
   3023   /* In contrast to the i386, on AMD64 a `long double' actually takes
   3024      up 128 bits, even though it's still based on the i387 extended
   3025      floating-point format which has only 80 significant bits.  */
   3026   set_gdbarch_long_double_bit (gdbarch, 128);
   3027 
   3028   set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
   3029 
   3030   /* Register numbers of various important registers.  */
   3031   set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
   3032   set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
   3033   set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
   3034   set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
   3035 
   3036   /* The "default" register numbering scheme for AMD64 is referred to
   3037      as the "DWARF Register Number Mapping" in the System V psABI.
   3038      The preferred debugging format for all known AMD64 targets is
   3039      actually DWARF2, and GCC doesn't seem to support DWARF (that is
   3040      DWARF-1), but we provide the same mapping just in case.  This
   3041      mapping is also used for stabs, which GCC does support.  */
   3042   set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3043   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3044 
   3045   /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
   3046      be in use on any of the supported AMD64 targets.  */
   3047 
   3048   /* Call dummy code.  */
   3049   set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
   3050   set_gdbarch_frame_align (gdbarch, amd64_frame_align);
   3051   set_gdbarch_frame_red_zone_size (gdbarch, 128);
   3052 
   3053   set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
   3054   set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
   3055   set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
   3056 
   3057   set_gdbarch_return_value (gdbarch, amd64_return_value);
   3058 
   3059   set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
   3060 
   3061   tdep->record_regmap = amd64_record_regmap;
   3062 
   3063   set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
   3064 
   3065   /* Hook the function epilogue frame unwinder.  This unwinder is
   3066      appended to the list first, so that it supercedes the other
   3067      unwinders in function epilogues.  */
   3068   frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
   3069 
   3070   /* Hook the prologue-based frame unwinders.  */
   3071   frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
   3072   frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
   3073   frame_base_set_default (gdbarch, &amd64_frame_base);
   3074 
   3075   set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
   3076 
   3077   set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
   3078 
   3079   set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
   3080 
   3081   /* SystemTap variables and functions.  */
   3082   set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
   3083   set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
   3084   set_gdbarch_stap_register_indirection_prefixes (gdbarch,
   3085 					  stap_register_indirection_prefixes);
   3086   set_gdbarch_stap_register_indirection_suffixes (gdbarch,
   3087 					  stap_register_indirection_suffixes);
   3088   set_gdbarch_stap_is_single_operand (gdbarch,
   3089 				      i386_stap_is_single_operand);
   3090   set_gdbarch_stap_parse_special_token (gdbarch,
   3091 					i386_stap_parse_special_token);
   3092   set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
   3093   set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
   3094   set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
   3095 }
   3096 
   3097 
   3099 static struct type *
   3100 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
   3101 {
   3102   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3103 
   3104   switch (regnum - tdep->eax_regnum)
   3105     {
   3106     case AMD64_RBP_REGNUM:	/* %ebp */
   3107     case AMD64_RSP_REGNUM:	/* %esp */
   3108       return builtin_type (gdbarch)->builtin_data_ptr;
   3109     case AMD64_RIP_REGNUM:	/* %eip */
   3110       return builtin_type (gdbarch)->builtin_func_ptr;
   3111     }
   3112 
   3113   return i386_pseudo_register_type (gdbarch, regnum);
   3114 }
   3115 
   3116 void
   3117 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
   3118 {
   3119   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3120   const struct target_desc *tdesc = info.target_desc;
   3121 
   3122   amd64_init_abi (info, gdbarch);
   3123 
   3124   if (! tdesc_has_registers (tdesc))
   3125     tdesc = tdesc_x32;
   3126   tdep->tdesc = tdesc;
   3127 
   3128   tdep->num_dword_regs = 17;
   3129   set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
   3130 
   3131   set_gdbarch_long_bit (gdbarch, 32);
   3132   set_gdbarch_ptr_bit (gdbarch, 32);
   3133 }
   3134 
   3135 /* Return the target description for a specified XSAVE feature mask.  */
   3136 
   3137 const struct target_desc *
   3138 amd64_target_description (uint64_t xcr0)
   3139 {
   3140   switch (xcr0 & X86_XSTATE_ALL_MASK)
   3141     {
   3142     case X86_XSTATE_MPX_AVX512_MASK:
   3143     case X86_XSTATE_AVX512_MASK:
   3144       return tdesc_amd64_avx512;
   3145     case X86_XSTATE_MPX_MASK:
   3146       return tdesc_amd64_mpx;
   3147     case X86_XSTATE_AVX_MASK:
   3148       return tdesc_amd64_avx;
   3149     default:
   3150       return tdesc_amd64;
   3151     }
   3152 }
   3153 
   3154 /* Provide a prototype to silence -Wmissing-prototypes.  */
   3155 void _initialize_amd64_tdep (void);
   3156 
   3157 void
   3158 _initialize_amd64_tdep (void)
   3159 {
   3160   initialize_tdesc_amd64 ();
   3161   initialize_tdesc_amd64_avx ();
   3162   initialize_tdesc_amd64_mpx ();
   3163   initialize_tdesc_amd64_avx512 ();
   3164 
   3165   initialize_tdesc_x32 ();
   3166   initialize_tdesc_x32_avx ();
   3167   initialize_tdesc_x32_avx512 ();
   3168 }
   3169 
   3170 
   3172 /* The 64-bit FXSAVE format differs from the 32-bit format in the
   3173    sense that the instruction pointer and data pointer are simply
   3174    64-bit offsets into the code segment and the data segment instead
   3175    of a selector offset pair.  The functions below store the upper 32
   3176    bits of these pointers (instead of just the 16-bits of the segment
   3177    selector).  */
   3178 
   3179 /* Fill register REGNUM in REGCACHE with the appropriate
   3180    floating-point or SSE register value from *FXSAVE.  If REGNUM is
   3181    -1, do this for all registers.  This function masks off any of the
   3182    reserved bits in *FXSAVE.  */
   3183 
   3184 void
   3185 amd64_supply_fxsave (struct regcache *regcache, int regnum,
   3186 		     const void *fxsave)
   3187 {
   3188   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   3189   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3190 
   3191   i387_supply_fxsave (regcache, regnum, fxsave);
   3192 
   3193   if (fxsave
   3194       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3195     {
   3196       const gdb_byte *regs = fxsave;
   3197 
   3198       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3199 	regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
   3200       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3201 	regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
   3202     }
   3203 }
   3204 
   3205 /* Similar to amd64_supply_fxsave, but use XSAVE extended state.  */
   3206 
   3207 void
   3208 amd64_supply_xsave (struct regcache *regcache, int regnum,
   3209 		    const void *xsave)
   3210 {
   3211   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   3212   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3213 
   3214   i387_supply_xsave (regcache, regnum, xsave);
   3215 
   3216   if (xsave
   3217       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3218     {
   3219       const gdb_byte *regs = xsave;
   3220 
   3221       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3222 	regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
   3223 			     regs + 12);
   3224       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3225 	regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
   3226 			     regs + 20);
   3227     }
   3228 }
   3229 
   3230 /* Fill register REGNUM (if it is a floating-point or SSE register) in
   3231    *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
   3232    all registers.  This function doesn't touch any of the reserved
   3233    bits in *FXSAVE.  */
   3234 
   3235 void
   3236 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
   3237 		      void *fxsave)
   3238 {
   3239   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   3240   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3241   gdb_byte *regs = fxsave;
   3242 
   3243   i387_collect_fxsave (regcache, regnum, fxsave);
   3244 
   3245   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3246     {
   3247       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3248 	regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
   3249       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3250 	regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
   3251     }
   3252 }
   3253 
   3254 /* Similar to amd64_collect_fxsave, but use XSAVE extended state.  */
   3255 
   3256 void
   3257 amd64_collect_xsave (const struct regcache *regcache, int regnum,
   3258 		     void *xsave, int gcore)
   3259 {
   3260   struct gdbarch *gdbarch = get_regcache_arch (regcache);
   3261   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3262   gdb_byte *regs = xsave;
   3263 
   3264   i387_collect_xsave (regcache, regnum, xsave, gcore);
   3265 
   3266   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3267     {
   3268       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3269 	regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
   3270 			      regs + 12);
   3271       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3272 	regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
   3273 			      regs + 20);
   3274     }
   3275 }
   3276