Home | History | Annotate | Line # | Download | only in gdb
amd64-tdep.c revision 1.9
      1 /* Target-dependent code for AMD64.
      2 
      3    Copyright (C) 2001-2020 Free Software Foundation, Inc.
      4 
      5    Contributed by Jiri Smid, SuSE Labs.
      6 
      7    This file is part of GDB.
      8 
      9    This program is free software; you can redistribute it and/or modify
     10    it under the terms of the GNU General Public License as published by
     11    the Free Software Foundation; either version 3 of the License, or
     12    (at your option) any later version.
     13 
     14    This program is distributed in the hope that it will be useful,
     15    but WITHOUT ANY WARRANTY; without even the implied warranty of
     16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     17    GNU General Public License for more details.
     18 
     19    You should have received a copy of the GNU General Public License
     20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
     21 
     22 #include "defs.h"
     23 #include "opcode/i386.h"
     24 #include "dis-asm.h"
     25 #include "arch-utils.h"
     26 #include "block.h"
     27 #include "dummy-frame.h"
     28 #include "frame.h"
     29 #include "frame-base.h"
     30 #include "frame-unwind.h"
     31 #include "inferior.h"
     32 #include "infrun.h"
     33 #include "gdbcmd.h"
     34 #include "gdbcore.h"
     35 #include "objfiles.h"
     36 #include "regcache.h"
     37 #include "regset.h"
     38 #include "symfile.h"
     39 #include "disasm.h"
     40 #include "amd64-tdep.h"
     41 #include "i387-tdep.h"
     42 #include "gdbsupport/x86-xstate.h"
     43 #include <algorithm>
     44 #include "target-descriptions.h"
     45 #include "arch/amd64.h"
     46 #include "producer.h"
     47 #include "ax.h"
     48 #include "ax-gdb.h"
     49 #include "gdbsupport/byte-vector.h"
     50 #include "osabi.h"
     51 #include "x86-tdep.h"
     52 
     53 /* Note that the AMD64 architecture was previously known as x86-64.
     54    The latter is (forever) engraved into the canonical system name as
     55    returned by config.guess, and used as the name for the AMD64 port
     56    of GNU/Linux.  The BSD's have renamed their ports to amd64; they
     57    don't like to shout.  For GDB we prefer the amd64_-prefix over the
     58    x86_64_-prefix since it's so much easier to type.  */
     59 
     60 /* Register information.  */
     61 
     62 static const char *amd64_register_names[] =
     63 {
     64   "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
     65 
     66   /* %r8 is indeed register number 8.  */
     67   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
     68   "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
     69 
     70   /* %st0 is register number 24.  */
     71   "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
     72   "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
     73 
     74   /* %xmm0 is register number 40.  */
     75   "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
     76   "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
     77   "mxcsr",
     78 };
     79 
     80 static const char *amd64_ymm_names[] =
     81 {
     82   "ymm0", "ymm1", "ymm2", "ymm3",
     83   "ymm4", "ymm5", "ymm6", "ymm7",
     84   "ymm8", "ymm9", "ymm10", "ymm11",
     85   "ymm12", "ymm13", "ymm14", "ymm15"
     86 };
     87 
     88 static const char *amd64_ymm_avx512_names[] =
     89 {
     90   "ymm16", "ymm17", "ymm18", "ymm19",
     91   "ymm20", "ymm21", "ymm22", "ymm23",
     92   "ymm24", "ymm25", "ymm26", "ymm27",
     93   "ymm28", "ymm29", "ymm30", "ymm31"
     94 };
     95 
     96 static const char *amd64_ymmh_names[] =
     97 {
     98   "ymm0h", "ymm1h", "ymm2h", "ymm3h",
     99   "ymm4h", "ymm5h", "ymm6h", "ymm7h",
    100   "ymm8h", "ymm9h", "ymm10h", "ymm11h",
    101   "ymm12h", "ymm13h", "ymm14h", "ymm15h"
    102 };
    103 
    104 static const char *amd64_ymmh_avx512_names[] =
    105 {
    106   "ymm16h", "ymm17h", "ymm18h", "ymm19h",
    107   "ymm20h", "ymm21h", "ymm22h", "ymm23h",
    108   "ymm24h", "ymm25h", "ymm26h", "ymm27h",
    109   "ymm28h", "ymm29h", "ymm30h", "ymm31h"
    110 };
    111 
    112 static const char *amd64_mpx_names[] =
    113 {
    114   "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
    115 };
    116 
    117 static const char *amd64_k_names[] =
    118 {
    119   "k0", "k1", "k2", "k3",
    120   "k4", "k5", "k6", "k7"
    121 };
    122 
    123 static const char *amd64_zmmh_names[] =
    124 {
    125   "zmm0h", "zmm1h", "zmm2h", "zmm3h",
    126   "zmm4h", "zmm5h", "zmm6h", "zmm7h",
    127   "zmm8h", "zmm9h", "zmm10h", "zmm11h",
    128   "zmm12h", "zmm13h", "zmm14h", "zmm15h",
    129   "zmm16h", "zmm17h", "zmm18h", "zmm19h",
    130   "zmm20h", "zmm21h", "zmm22h", "zmm23h",
    131   "zmm24h", "zmm25h", "zmm26h", "zmm27h",
    132   "zmm28h", "zmm29h", "zmm30h", "zmm31h"
    133 };
    134 
    135 static const char *amd64_zmm_names[] =
    136 {
    137   "zmm0", "zmm1", "zmm2", "zmm3",
    138   "zmm4", "zmm5", "zmm6", "zmm7",
    139   "zmm8", "zmm9", "zmm10", "zmm11",
    140   "zmm12", "zmm13", "zmm14", "zmm15",
    141   "zmm16", "zmm17", "zmm18", "zmm19",
    142   "zmm20", "zmm21", "zmm22", "zmm23",
    143   "zmm24", "zmm25", "zmm26", "zmm27",
    144   "zmm28", "zmm29", "zmm30", "zmm31"
    145 };
    146 
    147 static const char *amd64_xmm_avx512_names[] = {
    148     "xmm16",  "xmm17",  "xmm18",  "xmm19",
    149     "xmm20",  "xmm21",  "xmm22",  "xmm23",
    150     "xmm24",  "xmm25",  "xmm26",  "xmm27",
    151     "xmm28",  "xmm29",  "xmm30",  "xmm31"
    152 };
    153 
    154 static const char *amd64_pkeys_names[] = {
    155     "pkru"
    156 };
    157 
    158 /* DWARF Register Number Mapping as defined in the System V psABI,
    159    section 3.6.  */
    160 
    161 static int amd64_dwarf_regmap[] =
    162 {
    163   /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
    164   AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
    165   AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
    166   AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
    167 
    168   /* Frame Pointer Register RBP.  */
    169   AMD64_RBP_REGNUM,
    170 
    171   /* Stack Pointer Register RSP.  */
    172   AMD64_RSP_REGNUM,
    173 
    174   /* Extended Integer Registers 8 - 15.  */
    175   AMD64_R8_REGNUM,		/* %r8 */
    176   AMD64_R9_REGNUM,		/* %r9 */
    177   AMD64_R10_REGNUM,		/* %r10 */
    178   AMD64_R11_REGNUM,		/* %r11 */
    179   AMD64_R12_REGNUM,		/* %r12 */
    180   AMD64_R13_REGNUM,		/* %r13 */
    181   AMD64_R14_REGNUM,		/* %r14 */
    182   AMD64_R15_REGNUM,		/* %r15 */
    183 
    184   /* Return Address RA.  Mapped to RIP.  */
    185   AMD64_RIP_REGNUM,
    186 
    187   /* SSE Registers 0 - 7.  */
    188   AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    189   AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    190   AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    191   AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    192 
    193   /* Extended SSE Registers 8 - 15.  */
    194   AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
    195   AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
    196   AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
    197   AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
    198 
    199   /* Floating Point Registers 0-7.  */
    200   AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
    201   AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
    202   AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
    203   AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
    204 
    205   /* MMX Registers 0 - 7.
    206      We have to handle those registers specifically, as their register
    207      number within GDB depends on the target (or they may even not be
    208      available at all).  */
    209   -1, -1, -1, -1, -1, -1, -1, -1,
    210 
    211   /* Control and Status Flags Register.  */
    212   AMD64_EFLAGS_REGNUM,
    213 
    214   /* Selector Registers.  */
    215   AMD64_ES_REGNUM,
    216   AMD64_CS_REGNUM,
    217   AMD64_SS_REGNUM,
    218   AMD64_DS_REGNUM,
    219   AMD64_FS_REGNUM,
    220   AMD64_GS_REGNUM,
    221   -1,
    222   -1,
    223 
    224   /* Segment Base Address Registers.  */
    225   -1,
    226   -1,
    227   -1,
    228   -1,
    229 
    230   /* Special Selector Registers.  */
    231   -1,
    232   -1,
    233 
    234   /* Floating Point Control Registers.  */
    235   AMD64_MXCSR_REGNUM,
    236   AMD64_FCTRL_REGNUM,
    237   AMD64_FSTAT_REGNUM
    238 };
    239 
    240 static const int amd64_dwarf_regmap_len =
    241   (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
    242 
    243 /* Convert DWARF register number REG to the appropriate register
    244    number used by GDB.  */
    245 
    246 static int
    247 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
    248 {
    249   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    250   int ymm0_regnum = tdep->ymm0_regnum;
    251   int regnum = -1;
    252 
    253   if (reg >= 0 && reg < amd64_dwarf_regmap_len)
    254     regnum = amd64_dwarf_regmap[reg];
    255 
    256   if (ymm0_regnum >= 0
    257 	   && i386_xmm_regnum_p (gdbarch, regnum))
    258     regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
    259 
    260   return regnum;
    261 }
    262 
    263 /* Map architectural register numbers to gdb register numbers.  */
    264 
    265 static const int amd64_arch_regmap[16] =
    266 {
    267   AMD64_RAX_REGNUM,	/* %rax */
    268   AMD64_RCX_REGNUM,	/* %rcx */
    269   AMD64_RDX_REGNUM,	/* %rdx */
    270   AMD64_RBX_REGNUM,	/* %rbx */
    271   AMD64_RSP_REGNUM,	/* %rsp */
    272   AMD64_RBP_REGNUM,	/* %rbp */
    273   AMD64_RSI_REGNUM,	/* %rsi */
    274   AMD64_RDI_REGNUM,	/* %rdi */
    275   AMD64_R8_REGNUM,	/* %r8 */
    276   AMD64_R9_REGNUM,	/* %r9 */
    277   AMD64_R10_REGNUM,	/* %r10 */
    278   AMD64_R11_REGNUM,	/* %r11 */
    279   AMD64_R12_REGNUM,	/* %r12 */
    280   AMD64_R13_REGNUM,	/* %r13 */
    281   AMD64_R14_REGNUM,	/* %r14 */
    282   AMD64_R15_REGNUM	/* %r15 */
    283 };
    284 
    285 static const int amd64_arch_regmap_len =
    286   (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
    287 
    288 /* Convert architectural register number REG to the appropriate register
    289    number used by GDB.  */
    290 
    291 static int
    292 amd64_arch_reg_to_regnum (int reg)
    293 {
    294   gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
    295 
    296   return amd64_arch_regmap[reg];
    297 }
    298 
    299 /* Register names for byte pseudo-registers.  */
    300 
    301 static const char *amd64_byte_names[] =
    302 {
    303   "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
    304   "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
    305   "ah", "bh", "ch", "dh"
    306 };
    307 
    308 /* Number of lower byte registers.  */
    309 #define AMD64_NUM_LOWER_BYTE_REGS 16
    310 
    311 /* Register names for word pseudo-registers.  */
    312 
    313 static const char *amd64_word_names[] =
    314 {
    315   "ax", "bx", "cx", "dx", "si", "di", "bp", "",
    316   "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
    317 };
    318 
    319 /* Register names for dword pseudo-registers.  */
    320 
    321 static const char *amd64_dword_names[] =
    322 {
    323   "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
    324   "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
    325   "eip"
    326 };
    327 
    328 /* Return the name of register REGNUM.  */
    329 
    330 static const char *
    331 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
    332 {
    333   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    334   if (i386_byte_regnum_p (gdbarch, regnum))
    335     return amd64_byte_names[regnum - tdep->al_regnum];
    336   else if (i386_zmm_regnum_p (gdbarch, regnum))
    337     return amd64_zmm_names[regnum - tdep->zmm0_regnum];
    338   else if (i386_ymm_regnum_p (gdbarch, regnum))
    339     return amd64_ymm_names[regnum - tdep->ymm0_regnum];
    340   else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
    341     return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
    342   else if (i386_word_regnum_p (gdbarch, regnum))
    343     return amd64_word_names[regnum - tdep->ax_regnum];
    344   else if (i386_dword_regnum_p (gdbarch, regnum))
    345     return amd64_dword_names[regnum - tdep->eax_regnum];
    346   else
    347     return i386_pseudo_register_name (gdbarch, regnum);
    348 }
    349 
    350 static struct value *
    351 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
    352 				  readable_regcache *regcache,
    353 				  int regnum)
    354 {
    355   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    356 
    357   value *result_value = allocate_value (register_type (gdbarch, regnum));
    358   VALUE_LVAL (result_value) = lval_register;
    359   VALUE_REGNUM (result_value) = regnum;
    360   gdb_byte *buf = value_contents_raw (result_value);
    361 
    362   if (i386_byte_regnum_p (gdbarch, regnum))
    363     {
    364       int gpnum = regnum - tdep->al_regnum;
    365 
    366       /* Extract (always little endian).  */
    367       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    368 	{
    369 	  gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
    370 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    371 
    372 	  /* Special handling for AH, BH, CH, DH.  */
    373 	  register_status status = regcache->raw_read (gpnum, raw_buf);
    374 	  if (status == REG_VALID)
    375 	    memcpy (buf, raw_buf + 1, 1);
    376 	  else
    377 	    mark_value_bytes_unavailable (result_value, 0,
    378 					  TYPE_LENGTH (value_type (result_value)));
    379 	}
    380       else
    381 	{
    382 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    383 	  register_status status = regcache->raw_read (gpnum, raw_buf);
    384 	  if (status == REG_VALID)
    385 	    memcpy (buf, raw_buf, 1);
    386 	  else
    387 	    mark_value_bytes_unavailable (result_value, 0,
    388 					  TYPE_LENGTH (value_type (result_value)));
    389 	}
    390     }
    391   else if (i386_dword_regnum_p (gdbarch, regnum))
    392     {
    393       int gpnum = regnum - tdep->eax_regnum;
    394       gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    395       /* Extract (always little endian).  */
    396       register_status status = regcache->raw_read (gpnum, raw_buf);
    397       if (status == REG_VALID)
    398 	memcpy (buf, raw_buf, 4);
    399       else
    400 	mark_value_bytes_unavailable (result_value, 0,
    401 				      TYPE_LENGTH (value_type (result_value)));
    402     }
    403   else
    404     i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
    405 					  result_value);
    406 
    407   return result_value;
    408 }
    409 
    410 static void
    411 amd64_pseudo_register_write (struct gdbarch *gdbarch,
    412 			     struct regcache *regcache,
    413 			     int regnum, const gdb_byte *buf)
    414 {
    415   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    416 
    417   if (i386_byte_regnum_p (gdbarch, regnum))
    418     {
    419       int gpnum = regnum - tdep->al_regnum;
    420 
    421       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    422 	{
    423 	  gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
    424 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    425 
    426 	  /* Read ... AH, BH, CH, DH.  */
    427 	  regcache->raw_read (gpnum, raw_buf);
    428 	  /* ... Modify ... (always little endian).  */
    429 	  memcpy (raw_buf + 1, buf, 1);
    430 	  /* ... Write.  */
    431 	  regcache->raw_write (gpnum, raw_buf);
    432 	}
    433       else
    434 	{
    435 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    436 
    437 	  /* Read ...  */
    438 	  regcache->raw_read (gpnum, raw_buf);
    439 	  /* ... Modify ... (always little endian).  */
    440 	  memcpy (raw_buf, buf, 1);
    441 	  /* ... Write.  */
    442 	  regcache->raw_write (gpnum, raw_buf);
    443 	}
    444     }
    445   else if (i386_dword_regnum_p (gdbarch, regnum))
    446     {
    447       int gpnum = regnum - tdep->eax_regnum;
    448       gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    449 
    450       /* Read ...  */
    451       regcache->raw_read (gpnum, raw_buf);
    452       /* ... Modify ... (always little endian).  */
    453       memcpy (raw_buf, buf, 4);
    454       /* ... Write.  */
    455       regcache->raw_write (gpnum, raw_buf);
    456     }
    457   else
    458     i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
    459 }
    460 
    461 /* Implement the 'ax_pseudo_register_collect' gdbarch method.  */
    462 
    463 static int
    464 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
    465 				  struct agent_expr *ax, int regnum)
    466 {
    467   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
    468 
    469   if (i386_byte_regnum_p (gdbarch, regnum))
    470     {
    471       int gpnum = regnum - tdep->al_regnum;
    472 
    473       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    474 	ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
    475       else
    476 	ax_reg_mask (ax, gpnum);
    477       return 0;
    478     }
    479   else if (i386_dword_regnum_p (gdbarch, regnum))
    480     {
    481       int gpnum = regnum - tdep->eax_regnum;
    482 
    483       ax_reg_mask (ax, gpnum);
    484       return 0;
    485     }
    486   else
    487     return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
    488 }
    489 
    490 
    491 
    493 /* Register classes as defined in the psABI.  */
    494 
    495 enum amd64_reg_class
    496 {
    497   AMD64_INTEGER,
    498   AMD64_SSE,
    499   AMD64_SSEUP,
    500   AMD64_X87,
    501   AMD64_X87UP,
    502   AMD64_COMPLEX_X87,
    503   AMD64_NO_CLASS,
    504   AMD64_MEMORY
    505 };
    506 
    507 /* Return the union class of CLASS1 and CLASS2.  See the psABI for
    508    details.  */
    509 
    510 static enum amd64_reg_class
    511 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
    512 {
    513   /* Rule (a): If both classes are equal, this is the resulting class.  */
    514   if (class1 == class2)
    515     return class1;
    516 
    517   /* Rule (b): If one of the classes is NO_CLASS, the resulting class
    518      is the other class.  */
    519   if (class1 == AMD64_NO_CLASS)
    520     return class2;
    521   if (class2 == AMD64_NO_CLASS)
    522     return class1;
    523 
    524   /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
    525   if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
    526     return AMD64_MEMORY;
    527 
    528   /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
    529   if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
    530     return AMD64_INTEGER;
    531 
    532   /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
    533      MEMORY is used as class.  */
    534   if (class1 == AMD64_X87 || class1 == AMD64_X87UP
    535       || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
    536       || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
    537     return AMD64_MEMORY;
    538 
    539   /* Rule (f): Otherwise class SSE is used.  */
    540   return AMD64_SSE;
    541 }
    542 
    543 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
    544 
    545 /* Return true if TYPE is a structure or union with unaligned fields.  */
    546 
    547 static bool
    548 amd64_has_unaligned_fields (struct type *type)
    549 {
    550   if (type->code () == TYPE_CODE_STRUCT
    551       || type->code () == TYPE_CODE_UNION)
    552     {
    553       for (int i = 0; i < type->num_fields (); i++)
    554 	{
    555 	  struct type *subtype = check_typedef (type->field (i).type ());
    556 	  int bitpos = TYPE_FIELD_BITPOS (type, i);
    557 	  int align = type_align(subtype);
    558 
    559 	  /* Ignore static fields, empty fields (for example nested
    560 	     empty structures), and bitfields (these are handled by
    561 	     the caller).  */
    562 	  if (field_is_static (&type->field (i))
    563 	      || (TYPE_FIELD_BITSIZE (type, i) == 0
    564 		  && TYPE_LENGTH (subtype) == 0)
    565 	      || TYPE_FIELD_PACKED (type, i))
    566 	    continue;
    567 
    568 	  if (bitpos % 8 != 0)
    569 	    return true;
    570 
    571 	  int bytepos = bitpos / 8;
    572 	  if (bytepos % align != 0)
    573 	    return true;
    574 
    575 	  if (amd64_has_unaligned_fields (subtype))
    576 	    return true;
    577 	}
    578     }
    579 
    580   return false;
    581 }
    582 
    583 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
    584    structures and union types, and store the result in THECLASS.  */
    585 
    586 static void
    587 amd64_classify_aggregate_field (struct type *type, int i,
    588 				enum amd64_reg_class theclass[2],
    589 				unsigned int bitoffset)
    590 {
    591   struct type *subtype = check_typedef (type->field (i).type ());
    592   int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i);
    593   int pos = bitpos / 64;
    594   enum amd64_reg_class subclass[2];
    595   int bitsize = TYPE_FIELD_BITSIZE (type, i);
    596   int endpos;
    597 
    598   if (bitsize == 0)
    599     bitsize = TYPE_LENGTH (subtype) * 8;
    600   endpos = (bitpos + bitsize - 1) / 64;
    601 
    602   /* Ignore static fields, or empty fields, for example nested
    603      empty structures.*/
    604   if (field_is_static (&type->field (i)) || bitsize == 0)
    605     return;
    606 
    607   if (subtype->code () == TYPE_CODE_STRUCT
    608       || subtype->code () == TYPE_CODE_UNION)
    609     {
    610       /* Each field of an object is classified recursively.  */
    611       int j;
    612       for (j = 0; j < subtype->num_fields (); j++)
    613 	amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
    614       return;
    615     }
    616 
    617   gdb_assert (pos == 0 || pos == 1);
    618 
    619   amd64_classify (subtype, subclass);
    620   theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
    621   if (bitsize <= 64 && pos == 0 && endpos == 1)
    622     /* This is a bit of an odd case:  We have a field that would
    623        normally fit in one of the two eightbytes, except that
    624        it is placed in a way that this field straddles them.
    625        This has been seen with a structure containing an array.
    626 
    627        The ABI is a bit unclear in this case, but we assume that
    628        this field's class (stored in subclass[0]) must also be merged
    629        into class[1].  In other words, our field has a piece stored
    630        in the second eight-byte, and thus its class applies to
    631        the second eight-byte as well.
    632 
    633        In the case where the field length exceeds 8 bytes,
    634        it should not be necessary to merge the field class
    635        into class[1].  As LEN > 8, subclass[1] is necessarily
    636        different from AMD64_NO_CLASS.  If subclass[1] is equal
    637        to subclass[0], then the normal class[1]/subclass[1]
    638        merging will take care of everything.  For subclass[1]
    639        to be different from subclass[0], I can only see the case
    640        where we have a SSE/SSEUP or X87/X87UP pair, which both
    641        use up all 16 bytes of the aggregate, and are already
    642        handled just fine (because each portion sits on its own
    643        8-byte).  */
    644     theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
    645   if (pos == 0)
    646     theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
    647 }
    648 
    649 /* Classify TYPE according to the rules for aggregate (structures and
    650    arrays) and union types, and store the result in CLASS.  */
    651 
    652 static void
    653 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
    654 {
    655   /* 1. If the size of an object is larger than two eightbytes, or it has
    656         unaligned fields, it has class memory.  */
    657   if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
    658     {
    659       theclass[0] = theclass[1] = AMD64_MEMORY;
    660       return;
    661     }
    662 
    663   /* 2. Both eightbytes get initialized to class NO_CLASS.  */
    664   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    665 
    666   /* 3. Each field of an object is classified recursively so that
    667         always two fields are considered. The resulting class is
    668         calculated according to the classes of the fields in the
    669         eightbyte: */
    670 
    671   if (type->code () == TYPE_CODE_ARRAY)
    672     {
    673       struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
    674 
    675       /* All fields in an array have the same type.  */
    676       amd64_classify (subtype, theclass);
    677       if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
    678 	theclass[1] = theclass[0];
    679     }
    680   else
    681     {
    682       int i;
    683 
    684       /* Structure or union.  */
    685       gdb_assert (type->code () == TYPE_CODE_STRUCT
    686 		  || type->code () == TYPE_CODE_UNION);
    687 
    688       for (i = 0; i < type->num_fields (); i++)
    689 	amd64_classify_aggregate_field (type, i, theclass, 0);
    690     }
    691 
    692   /* 4. Then a post merger cleanup is done:  */
    693 
    694   /* Rule (a): If one of the classes is MEMORY, the whole argument is
    695      passed in memory.  */
    696   if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
    697     theclass[0] = theclass[1] = AMD64_MEMORY;
    698 
    699   /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
    700      SSE.  */
    701   if (theclass[0] == AMD64_SSEUP)
    702     theclass[0] = AMD64_SSE;
    703   if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
    704     theclass[1] = AMD64_SSE;
    705 }
    706 
    707 /* Classify TYPE, and store the result in CLASS.  */
    708 
    709 static void
    710 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
    711 {
    712   enum type_code code = type->code ();
    713   int len = TYPE_LENGTH (type);
    714 
    715   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    716 
    717   /* Arguments of types (signed and unsigned) _Bool, char, short, int,
    718      long, long long, and pointers are in the INTEGER class.  Similarly,
    719      range types, used by languages such as Ada, are also in the INTEGER
    720      class.  */
    721   if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
    722        || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
    723        || code == TYPE_CODE_CHAR
    724        || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
    725       && (len == 1 || len == 2 || len == 4 || len == 8))
    726     theclass[0] = AMD64_INTEGER;
    727 
    728   /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
    729      are in class SSE.  */
    730   else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
    731 	   && (len == 4 || len == 8))
    732     /* FIXME: __m64 .  */
    733     theclass[0] = AMD64_SSE;
    734 
    735   /* Arguments of types __float128, _Decimal128 and __m128 are split into
    736      two halves.  The least significant ones belong to class SSE, the most
    737      significant one to class SSEUP.  */
    738   else if (code == TYPE_CODE_DECFLOAT && len == 16)
    739     /* FIXME: __float128, __m128.  */
    740     theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
    741 
    742   /* The 64-bit mantissa of arguments of type long double belongs to
    743      class X87, the 16-bit exponent plus 6 bytes of padding belongs to
    744      class X87UP.  */
    745   else if (code == TYPE_CODE_FLT && len == 16)
    746     /* Class X87 and X87UP.  */
    747     theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
    748 
    749   /* Arguments of complex T where T is one of the types float or
    750      double get treated as if they are implemented as:
    751 
    752      struct complexT {
    753        T real;
    754        T imag;
    755      };
    756 
    757   */
    758   else if (code == TYPE_CODE_COMPLEX && len == 8)
    759     theclass[0] = AMD64_SSE;
    760   else if (code == TYPE_CODE_COMPLEX && len == 16)
    761     theclass[0] = theclass[1] = AMD64_SSE;
    762 
    763   /* A variable of type complex long double is classified as type
    764      COMPLEX_X87.  */
    765   else if (code == TYPE_CODE_COMPLEX && len == 32)
    766     theclass[0] = AMD64_COMPLEX_X87;
    767 
    768   /* Aggregates.  */
    769   else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
    770 	   || code == TYPE_CODE_UNION)
    771     amd64_classify_aggregate (type, theclass);
    772 }
    773 
    774 static enum return_value_convention
    775 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
    776 		    struct type *type, struct regcache *regcache,
    777 		    gdb_byte *readbuf, const gdb_byte *writebuf)
    778 {
    779   enum amd64_reg_class theclass[2];
    780   int len = TYPE_LENGTH (type);
    781   static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
    782   static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
    783   int integer_reg = 0;
    784   int sse_reg = 0;
    785   int i;
    786 
    787   gdb_assert (!(readbuf && writebuf));
    788 
    789   /* 1. Classify the return type with the classification algorithm.  */
    790   amd64_classify (type, theclass);
    791 
    792   /* 2. If the type has class MEMORY, then the caller provides space
    793      for the return value and passes the address of this storage in
    794      %rdi as if it were the first argument to the function.  In effect,
    795      this address becomes a hidden first argument.
    796 
    797      On return %rax will contain the address that has been passed in
    798      by the caller in %rdi.  */
    799   if (theclass[0] == AMD64_MEMORY)
    800     {
    801       /* As indicated by the comment above, the ABI guarantees that we
    802          can always find the return value just after the function has
    803          returned.  */
    804 
    805       if (readbuf)
    806 	{
    807 	  ULONGEST addr;
    808 
    809 	  regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
    810 	  read_memory (addr, readbuf, TYPE_LENGTH (type));
    811 	}
    812 
    813       return RETURN_VALUE_ABI_RETURNS_ADDRESS;
    814     }
    815 
    816   /* 8. If the class is COMPLEX_X87, the real part of the value is
    817         returned in %st0 and the imaginary part in %st1.  */
    818   if (theclass[0] == AMD64_COMPLEX_X87)
    819     {
    820       if (readbuf)
    821 	{
    822 	  regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
    823 	  regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
    824 	}
    825 
    826       if (writebuf)
    827 	{
    828 	  i387_return_value (gdbarch, regcache);
    829 	  regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
    830 	  regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
    831 
    832 	  /* Fix up the tag word such that both %st(0) and %st(1) are
    833 	     marked as valid.  */
    834 	  regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
    835 	}
    836 
    837       return RETURN_VALUE_REGISTER_CONVENTION;
    838     }
    839 
    840   gdb_assert (theclass[1] != AMD64_MEMORY);
    841   gdb_assert (len <= 16);
    842 
    843   for (i = 0; len > 0; i++, len -= 8)
    844     {
    845       int regnum = -1;
    846       int offset = 0;
    847 
    848       switch (theclass[i])
    849 	{
    850 	case AMD64_INTEGER:
    851 	  /* 3. If the class is INTEGER, the next available register
    852 	     of the sequence %rax, %rdx is used.  */
    853 	  regnum = integer_regnum[integer_reg++];
    854 	  break;
    855 
    856 	case AMD64_SSE:
    857 	  /* 4. If the class is SSE, the next available SSE register
    858              of the sequence %xmm0, %xmm1 is used.  */
    859 	  regnum = sse_regnum[sse_reg++];
    860 	  break;
    861 
    862 	case AMD64_SSEUP:
    863 	  /* 5. If the class is SSEUP, the eightbyte is passed in the
    864 	     upper half of the last used SSE register.  */
    865 	  gdb_assert (sse_reg > 0);
    866 	  regnum = sse_regnum[sse_reg - 1];
    867 	  offset = 8;
    868 	  break;
    869 
    870 	case AMD64_X87:
    871 	  /* 6. If the class is X87, the value is returned on the X87
    872              stack in %st0 as 80-bit x87 number.  */
    873 	  regnum = AMD64_ST0_REGNUM;
    874 	  if (writebuf)
    875 	    i387_return_value (gdbarch, regcache);
    876 	  break;
    877 
    878 	case AMD64_X87UP:
    879 	  /* 7. If the class is X87UP, the value is returned together
    880              with the previous X87 value in %st0.  */
    881 	  gdb_assert (i > 0 && theclass[0] == AMD64_X87);
    882 	  regnum = AMD64_ST0_REGNUM;
    883 	  offset = 8;
    884 	  len = 2;
    885 	  break;
    886 
    887 	case AMD64_NO_CLASS:
    888 	  continue;
    889 
    890 	default:
    891 	  gdb_assert (!"Unexpected register class.");
    892 	}
    893 
    894       gdb_assert (regnum != -1);
    895 
    896       if (readbuf)
    897 	regcache->raw_read_part (regnum, offset, std::min (len, 8),
    898 				 readbuf + i * 8);
    899       if (writebuf)
    900 	regcache->raw_write_part (regnum, offset, std::min (len, 8),
    901 				  writebuf + i * 8);
    902     }
    903 
    904   return RETURN_VALUE_REGISTER_CONVENTION;
    905 }
    906 
    907 
    909 static CORE_ADDR
    910 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
    911 		      CORE_ADDR sp, function_call_return_method return_method)
    912 {
    913   static int integer_regnum[] =
    914   {
    915     AMD64_RDI_REGNUM,		/* %rdi */
    916     AMD64_RSI_REGNUM,		/* %rsi */
    917     AMD64_RDX_REGNUM,		/* %rdx */
    918     AMD64_RCX_REGNUM,		/* %rcx */
    919     AMD64_R8_REGNUM,		/* %r8 */
    920     AMD64_R9_REGNUM		/* %r9 */
    921   };
    922   static int sse_regnum[] =
    923   {
    924     /* %xmm0 ... %xmm7 */
    925     AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    926     AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    927     AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    928     AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    929   };
    930   struct value **stack_args = XALLOCAVEC (struct value *, nargs);
    931   int num_stack_args = 0;
    932   int num_elements = 0;
    933   int element = 0;
    934   int integer_reg = 0;
    935   int sse_reg = 0;
    936   int i;
    937 
    938   /* Reserve a register for the "hidden" argument.  */
    939 if (return_method == return_method_struct)
    940     integer_reg++;
    941 
    942   for (i = 0; i < nargs; i++)
    943     {
    944       struct type *type = value_type (args[i]);
    945       int len = TYPE_LENGTH (type);
    946       enum amd64_reg_class theclass[2];
    947       int needed_integer_regs = 0;
    948       int needed_sse_regs = 0;
    949       int j;
    950 
    951       /* Classify argument.  */
    952       amd64_classify (type, theclass);
    953 
    954       /* Calculate the number of integer and SSE registers needed for
    955          this argument.  */
    956       for (j = 0; j < 2; j++)
    957 	{
    958 	  if (theclass[j] == AMD64_INTEGER)
    959 	    needed_integer_regs++;
    960 	  else if (theclass[j] == AMD64_SSE)
    961 	    needed_sse_regs++;
    962 	}
    963 
    964       /* Check whether enough registers are available, and if the
    965          argument should be passed in registers at all.  */
    966       if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
    967 	  || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
    968 	  || (needed_integer_regs == 0 && needed_sse_regs == 0))
    969 	{
    970 	  /* The argument will be passed on the stack.  */
    971 	  num_elements += ((len + 7) / 8);
    972 	  stack_args[num_stack_args++] = args[i];
    973 	}
    974       else
    975 	{
    976 	  /* The argument will be passed in registers.  */
    977 	  const gdb_byte *valbuf = value_contents (args[i]);
    978 	  gdb_byte buf[8];
    979 
    980 	  gdb_assert (len <= 16);
    981 
    982 	  for (j = 0; len > 0; j++, len -= 8)
    983 	    {
    984 	      int regnum = -1;
    985 	      int offset = 0;
    986 
    987 	      switch (theclass[j])
    988 		{
    989 		case AMD64_INTEGER:
    990 		  regnum = integer_regnum[integer_reg++];
    991 		  break;
    992 
    993 		case AMD64_SSE:
    994 		  regnum = sse_regnum[sse_reg++];
    995 		  break;
    996 
    997 		case AMD64_SSEUP:
    998 		  gdb_assert (sse_reg > 0);
    999 		  regnum = sse_regnum[sse_reg - 1];
   1000 		  offset = 8;
   1001 		  break;
   1002 
   1003 		case AMD64_NO_CLASS:
   1004 		  continue;
   1005 
   1006 		default:
   1007 		  gdb_assert (!"Unexpected register class.");
   1008 		}
   1009 
   1010 	      gdb_assert (regnum != -1);
   1011 	      memset (buf, 0, sizeof buf);
   1012 	      memcpy (buf, valbuf + j * 8, std::min (len, 8));
   1013 	      regcache->raw_write_part (regnum, offset, 8, buf);
   1014 	    }
   1015 	}
   1016     }
   1017 
   1018   /* Allocate space for the arguments on the stack.  */
   1019   sp -= num_elements * 8;
   1020 
   1021   /* The psABI says that "The end of the input argument area shall be
   1022      aligned on a 16 byte boundary."  */
   1023   sp &= ~0xf;
   1024 
   1025   /* Write out the arguments to the stack.  */
   1026   for (i = 0; i < num_stack_args; i++)
   1027     {
   1028       struct type *type = value_type (stack_args[i]);
   1029       const gdb_byte *valbuf = value_contents (stack_args[i]);
   1030       int len = TYPE_LENGTH (type);
   1031 
   1032       write_memory (sp + element * 8, valbuf, len);
   1033       element += ((len + 7) / 8);
   1034     }
   1035 
   1036   /* The psABI says that "For calls that may call functions that use
   1037      varargs or stdargs (prototype-less calls or calls to functions
   1038      containing ellipsis (...) in the declaration) %al is used as
   1039      hidden argument to specify the number of SSE registers used.  */
   1040   regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
   1041   return sp;
   1042 }
   1043 
   1044 static CORE_ADDR
   1045 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
   1046 		       struct regcache *regcache, CORE_ADDR bp_addr,
   1047 		       int nargs, struct value **args,	CORE_ADDR sp,
   1048 		       function_call_return_method return_method,
   1049 		       CORE_ADDR struct_addr)
   1050 {
   1051   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1052   gdb_byte buf[8];
   1053 
   1054   /* BND registers can be in arbitrary values at the moment of the
   1055      inferior call.  This can cause boundary violations that are not
   1056      due to a real bug or even desired by the user.  The best to be done
   1057      is set the BND registers to allow access to the whole memory, INIT
   1058      state, before pushing the inferior call.   */
   1059   i387_reset_bnd_regs (gdbarch, regcache);
   1060 
   1061   /* Pass arguments.  */
   1062   sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
   1063 
   1064   /* Pass "hidden" argument".  */
   1065   if (return_method == return_method_struct)
   1066     {
   1067       store_unsigned_integer (buf, 8, byte_order, struct_addr);
   1068       regcache->cooked_write (AMD64_RDI_REGNUM, buf);
   1069     }
   1070 
   1071   /* Store return address.  */
   1072   sp -= 8;
   1073   store_unsigned_integer (buf, 8, byte_order, bp_addr);
   1074   write_memory (sp, buf, 8);
   1075 
   1076   /* Finally, update the stack pointer...  */
   1077   store_unsigned_integer (buf, 8, byte_order, sp);
   1078   regcache->cooked_write (AMD64_RSP_REGNUM, buf);
   1079 
   1080   /* ...and fake a frame pointer.  */
   1081   regcache->cooked_write (AMD64_RBP_REGNUM, buf);
   1082 
   1083   return sp + 16;
   1084 }
   1085 
   1086 /* Displaced instruction handling.  */
   1088 
   1089 /* A partially decoded instruction.
   1090    This contains enough details for displaced stepping purposes.  */
   1091 
   1092 struct amd64_insn
   1093 {
   1094   /* The number of opcode bytes.  */
   1095   int opcode_len;
   1096   /* The offset of the REX/VEX instruction encoding prefix or -1 if
   1097      not present.  */
   1098   int enc_prefix_offset;
   1099   /* The offset to the first opcode byte.  */
   1100   int opcode_offset;
   1101   /* The offset to the modrm byte or -1 if not present.  */
   1102   int modrm_offset;
   1103 
   1104   /* The raw instruction.  */
   1105   gdb_byte *raw_insn;
   1106 };
   1107 
   1108 struct amd64_displaced_step_closure : public displaced_step_closure
   1109 {
   1110   amd64_displaced_step_closure (int insn_buf_len)
   1111   : insn_buf (insn_buf_len, 0)
   1112   {}
   1113 
   1114   /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
   1115   int tmp_used = 0;
   1116   int tmp_regno;
   1117   ULONGEST tmp_save;
   1118 
   1119   /* Details of the instruction.  */
   1120   struct amd64_insn insn_details;
   1121 
   1122   /* The possibly modified insn.  */
   1123   gdb::byte_vector insn_buf;
   1124 };
   1125 
   1126 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
   1127    ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
   1128    at which point delete these in favor of libopcodes' versions).  */
   1129 
   1130 static const unsigned char onebyte_has_modrm[256] = {
   1131   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1132   /*	   -------------------------------	  */
   1133   /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
   1134   /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
   1135   /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
   1136   /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
   1137   /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
   1138   /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
   1139   /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
   1140   /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
   1141   /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
   1142   /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
   1143   /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
   1144   /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
   1145   /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
   1146   /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
   1147   /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
   1148   /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
   1149   /*	   -------------------------------	  */
   1150   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1151 };
   1152 
   1153 static const unsigned char twobyte_has_modrm[256] = {
   1154   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1155   /*	   -------------------------------	  */
   1156   /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
   1157   /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
   1158   /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
   1159   /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
   1160   /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
   1161   /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
   1162   /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
   1163   /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
   1164   /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
   1165   /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
   1166   /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
   1167   /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
   1168   /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
   1169   /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
   1170   /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
   1171   /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
   1172   /*	   -------------------------------	  */
   1173   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1174 };
   1175 
   1176 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
   1177 
   1178 static int
   1179 rex_prefix_p (gdb_byte pfx)
   1180 {
   1181   return REX_PREFIX_P (pfx);
   1182 }
   1183 
   1184 /* True if PFX is the start of the 2-byte VEX prefix.  */
   1185 
   1186 static bool
   1187 vex2_prefix_p (gdb_byte pfx)
   1188 {
   1189   return pfx == 0xc5;
   1190 }
   1191 
   1192 /* True if PFX is the start of the 3-byte VEX prefix.  */
   1193 
   1194 static bool
   1195 vex3_prefix_p (gdb_byte pfx)
   1196 {
   1197   return pfx == 0xc4;
   1198 }
   1199 
   1200 /* Skip the legacy instruction prefixes in INSN.
   1201    We assume INSN is properly sentineled so we don't have to worry
   1202    about falling off the end of the buffer.  */
   1203 
   1204 static gdb_byte *
   1205 amd64_skip_prefixes (gdb_byte *insn)
   1206 {
   1207   while (1)
   1208     {
   1209       switch (*insn)
   1210 	{
   1211 	case DATA_PREFIX_OPCODE:
   1212 	case ADDR_PREFIX_OPCODE:
   1213 	case CS_PREFIX_OPCODE:
   1214 	case DS_PREFIX_OPCODE:
   1215 	case ES_PREFIX_OPCODE:
   1216 	case FS_PREFIX_OPCODE:
   1217 	case GS_PREFIX_OPCODE:
   1218 	case SS_PREFIX_OPCODE:
   1219 	case LOCK_PREFIX_OPCODE:
   1220 	case REPE_PREFIX_OPCODE:
   1221 	case REPNE_PREFIX_OPCODE:
   1222 	  ++insn;
   1223 	  continue;
   1224 	default:
   1225 	  break;
   1226 	}
   1227       break;
   1228     }
   1229 
   1230   return insn;
   1231 }
   1232 
   1233 /* Return an integer register (other than RSP) that is unused as an input
   1234    operand in INSN.
   1235    In order to not require adding a rex prefix if the insn doesn't already
   1236    have one, the result is restricted to RAX ... RDI, sans RSP.
   1237    The register numbering of the result follows architecture ordering,
   1238    e.g. RDI = 7.  */
   1239 
   1240 static int
   1241 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
   1242 {
   1243   /* 1 bit for each reg */
   1244   int used_regs_mask = 0;
   1245 
   1246   /* There can be at most 3 int regs used as inputs in an insn, and we have
   1247      7 to choose from (RAX ... RDI, sans RSP).
   1248      This allows us to take a conservative approach and keep things simple.
   1249      E.g. By avoiding RAX, we don't have to specifically watch for opcodes
   1250      that implicitly specify RAX.  */
   1251 
   1252   /* Avoid RAX.  */
   1253   used_regs_mask |= 1 << EAX_REG_NUM;
   1254   /* Similarily avoid RDX, implicit operand in divides.  */
   1255   used_regs_mask |= 1 << EDX_REG_NUM;
   1256   /* Avoid RSP.  */
   1257   used_regs_mask |= 1 << ESP_REG_NUM;
   1258 
   1259   /* If the opcode is one byte long and there's no ModRM byte,
   1260      assume the opcode specifies a register.  */
   1261   if (details->opcode_len == 1 && details->modrm_offset == -1)
   1262     used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
   1263 
   1264   /* Mark used regs in the modrm/sib bytes.  */
   1265   if (details->modrm_offset != -1)
   1266     {
   1267       int modrm = details->raw_insn[details->modrm_offset];
   1268       int mod = MODRM_MOD_FIELD (modrm);
   1269       int reg = MODRM_REG_FIELD (modrm);
   1270       int rm = MODRM_RM_FIELD (modrm);
   1271       int have_sib = mod != 3 && rm == 4;
   1272 
   1273       /* Assume the reg field of the modrm byte specifies a register.  */
   1274       used_regs_mask |= 1 << reg;
   1275 
   1276       if (have_sib)
   1277 	{
   1278 	  int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1279 	  int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1280 	  used_regs_mask |= 1 << base;
   1281 	  used_regs_mask |= 1 << idx;
   1282 	}
   1283       else
   1284 	{
   1285 	  used_regs_mask |= 1 << rm;
   1286 	}
   1287     }
   1288 
   1289   gdb_assert (used_regs_mask < 256);
   1290   gdb_assert (used_regs_mask != 255);
   1291 
   1292   /* Finally, find a free reg.  */
   1293   {
   1294     int i;
   1295 
   1296     for (i = 0; i < 8; ++i)
   1297       {
   1298 	if (! (used_regs_mask & (1 << i)))
   1299 	  return i;
   1300       }
   1301 
   1302     /* We shouldn't get here.  */
   1303     internal_error (__FILE__, __LINE__, _("unable to find free reg"));
   1304   }
   1305 }
   1306 
   1307 /* Extract the details of INSN that we need.  */
   1308 
   1309 static void
   1310 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
   1311 {
   1312   gdb_byte *start = insn;
   1313   int need_modrm;
   1314 
   1315   details->raw_insn = insn;
   1316 
   1317   details->opcode_len = -1;
   1318   details->enc_prefix_offset = -1;
   1319   details->opcode_offset = -1;
   1320   details->modrm_offset = -1;
   1321 
   1322   /* Skip legacy instruction prefixes.  */
   1323   insn = amd64_skip_prefixes (insn);
   1324 
   1325   /* Skip REX/VEX instruction encoding prefixes.  */
   1326   if (rex_prefix_p (*insn))
   1327     {
   1328       details->enc_prefix_offset = insn - start;
   1329       ++insn;
   1330     }
   1331   else if (vex2_prefix_p (*insn))
   1332     {
   1333       /* Don't record the offset in this case because this prefix has
   1334 	 no REX.B equivalent.  */
   1335       insn += 2;
   1336     }
   1337   else if (vex3_prefix_p (*insn))
   1338     {
   1339       details->enc_prefix_offset = insn - start;
   1340       insn += 3;
   1341     }
   1342 
   1343   details->opcode_offset = insn - start;
   1344 
   1345   if (*insn == TWO_BYTE_OPCODE_ESCAPE)
   1346     {
   1347       /* Two or three-byte opcode.  */
   1348       ++insn;
   1349       need_modrm = twobyte_has_modrm[*insn];
   1350 
   1351       /* Check for three-byte opcode.  */
   1352       switch (*insn)
   1353 	{
   1354 	case 0x24:
   1355 	case 0x25:
   1356 	case 0x38:
   1357 	case 0x3a:
   1358 	case 0x7a:
   1359 	case 0x7b:
   1360 	  ++insn;
   1361 	  details->opcode_len = 3;
   1362 	  break;
   1363 	default:
   1364 	  details->opcode_len = 2;
   1365 	  break;
   1366 	}
   1367     }
   1368   else
   1369     {
   1370       /* One-byte opcode.  */
   1371       need_modrm = onebyte_has_modrm[*insn];
   1372       details->opcode_len = 1;
   1373     }
   1374 
   1375   if (need_modrm)
   1376     {
   1377       ++insn;
   1378       details->modrm_offset = insn - start;
   1379     }
   1380 }
   1381 
   1382 /* Update %rip-relative addressing in INSN.
   1383 
   1384    %rip-relative addressing only uses a 32-bit displacement.
   1385    32 bits is not enough to be guaranteed to cover the distance between where
   1386    the real instruction is and where its copy is.
   1387    Convert the insn to use base+disp addressing.
   1388    We set base = pc + insn_length so we can leave disp unchanged.  */
   1389 
   1390 static void
   1391 fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
   1392 	      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1393 {
   1394   const struct amd64_insn *insn_details = &dsc->insn_details;
   1395   int modrm_offset = insn_details->modrm_offset;
   1396   gdb_byte *insn = insn_details->raw_insn + modrm_offset;
   1397   CORE_ADDR rip_base;
   1398   int insn_length;
   1399   int arch_tmp_regno, tmp_regno;
   1400   ULONGEST orig_value;
   1401 
   1402   /* %rip+disp32 addressing mode, displacement follows ModRM byte.  */
   1403   ++insn;
   1404 
   1405   /* Compute the rip-relative address.	*/
   1406   insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
   1407 					  dsc->insn_buf.size (), from);
   1408   rip_base = from + insn_length;
   1409 
   1410   /* We need a register to hold the address.
   1411      Pick one not used in the insn.
   1412      NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
   1413   arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
   1414   tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
   1415 
   1416   /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1).  */
   1417   static constexpr gdb_byte VEX3_NOT_B = 0x20;
   1418 
   1419   /* REX.B should be unset (VEX.!B set) as we were using rip-relative
   1420      addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
   1421      is not r8-r15.  */
   1422   if (insn_details->enc_prefix_offset != -1)
   1423     {
   1424       gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
   1425       if (rex_prefix_p (pfx[0]))
   1426 	pfx[0] &= ~REX_B;
   1427       else if (vex3_prefix_p (pfx[0]))
   1428 	pfx[1] |= VEX3_NOT_B;
   1429       else
   1430 	gdb_assert_not_reached ("unhandled prefix");
   1431     }
   1432 
   1433   regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
   1434   dsc->tmp_regno = tmp_regno;
   1435   dsc->tmp_save = orig_value;
   1436   dsc->tmp_used = 1;
   1437 
   1438   /* Convert the ModRM field to be base+disp.  */
   1439   dsc->insn_buf[modrm_offset] &= ~0xc7;
   1440   dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
   1441 
   1442   regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
   1443 
   1444   if (debug_displaced)
   1445     fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
   1446 			"displaced: using temp reg %d, old value %s, new value %s\n",
   1447 			dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
   1448 			paddress (gdbarch, rip_base));
   1449 }
   1450 
   1451 static void
   1452 fixup_displaced_copy (struct gdbarch *gdbarch,
   1453 		      amd64_displaced_step_closure *dsc,
   1454 		      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1455 {
   1456   const struct amd64_insn *details = &dsc->insn_details;
   1457 
   1458   if (details->modrm_offset != -1)
   1459     {
   1460       gdb_byte modrm = details->raw_insn[details->modrm_offset];
   1461 
   1462       if ((modrm & 0xc7) == 0x05)
   1463 	{
   1464 	  /* The insn uses rip-relative addressing.
   1465 	     Deal with it.  */
   1466 	  fixup_riprel (gdbarch, dsc, from, to, regs);
   1467 	}
   1468     }
   1469 }
   1470 
   1471 displaced_step_closure_up
   1472 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
   1473 				CORE_ADDR from, CORE_ADDR to,
   1474 				struct regcache *regs)
   1475 {
   1476   int len = gdbarch_max_insn_length (gdbarch);
   1477   /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
   1478      continually watch for running off the end of the buffer.  */
   1479   int fixup_sentinel_space = len;
   1480   std::unique_ptr<amd64_displaced_step_closure> dsc
   1481     (new amd64_displaced_step_closure (len + fixup_sentinel_space));
   1482   gdb_byte *buf = &dsc->insn_buf[0];
   1483   struct amd64_insn *details = &dsc->insn_details;
   1484 
   1485   read_memory (from, buf, len);
   1486 
   1487   /* Set up the sentinel space so we don't have to worry about running
   1488      off the end of the buffer.  An excessive number of leading prefixes
   1489      could otherwise cause this.  */
   1490   memset (buf + len, 0, fixup_sentinel_space);
   1491 
   1492   amd64_get_insn_details (buf, details);
   1493 
   1494   /* GDB may get control back after the insn after the syscall.
   1495      Presumably this is a kernel bug.
   1496      If this is a syscall, make sure there's a nop afterwards.  */
   1497   {
   1498     int syscall_length;
   1499 
   1500     if (amd64_syscall_p (details, &syscall_length))
   1501       buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
   1502   }
   1503 
   1504   /* Modify the insn to cope with the address where it will be executed from.
   1505      In particular, handle any rip-relative addressing.	 */
   1506   fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
   1507 
   1508   write_memory (to, buf, len);
   1509 
   1510   if (debug_displaced)
   1511     {
   1512       fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
   1513 			  paddress (gdbarch, from), paddress (gdbarch, to));
   1514       displaced_step_dump_bytes (gdb_stdlog, buf, len);
   1515     }
   1516 
   1517   /* This is a work around for a problem with g++ 4.8.  */
   1518   return displaced_step_closure_up (dsc.release ());
   1519 }
   1520 
   1521 static int
   1522 amd64_absolute_jmp_p (const struct amd64_insn *details)
   1523 {
   1524   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1525 
   1526   if (insn[0] == 0xff)
   1527     {
   1528       /* jump near, absolute indirect (/4) */
   1529       if ((insn[1] & 0x38) == 0x20)
   1530 	return 1;
   1531 
   1532       /* jump far, absolute indirect (/5) */
   1533       if ((insn[1] & 0x38) == 0x28)
   1534 	return 1;
   1535     }
   1536 
   1537   return 0;
   1538 }
   1539 
   1540 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise.  */
   1541 
   1542 static int
   1543 amd64_jmp_p (const struct amd64_insn *details)
   1544 {
   1545   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1546 
   1547   /* jump short, relative.  */
   1548   if (insn[0] == 0xeb)
   1549     return 1;
   1550 
   1551   /* jump near, relative.  */
   1552   if (insn[0] == 0xe9)
   1553     return 1;
   1554 
   1555   return amd64_absolute_jmp_p (details);
   1556 }
   1557 
   1558 static int
   1559 amd64_absolute_call_p (const struct amd64_insn *details)
   1560 {
   1561   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1562 
   1563   if (insn[0] == 0xff)
   1564     {
   1565       /* Call near, absolute indirect (/2) */
   1566       if ((insn[1] & 0x38) == 0x10)
   1567 	return 1;
   1568 
   1569       /* Call far, absolute indirect (/3) */
   1570       if ((insn[1] & 0x38) == 0x18)
   1571 	return 1;
   1572     }
   1573 
   1574   return 0;
   1575 }
   1576 
   1577 static int
   1578 amd64_ret_p (const struct amd64_insn *details)
   1579 {
   1580   /* NOTE: gcc can emit "repz ; ret".  */
   1581   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1582 
   1583   switch (insn[0])
   1584     {
   1585     case 0xc2: /* ret near, pop N bytes */
   1586     case 0xc3: /* ret near */
   1587     case 0xca: /* ret far, pop N bytes */
   1588     case 0xcb: /* ret far */
   1589     case 0xcf: /* iret */
   1590       return 1;
   1591 
   1592     default:
   1593       return 0;
   1594     }
   1595 }
   1596 
   1597 static int
   1598 amd64_call_p (const struct amd64_insn *details)
   1599 {
   1600   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1601 
   1602   if (amd64_absolute_call_p (details))
   1603     return 1;
   1604 
   1605   /* call near, relative */
   1606   if (insn[0] == 0xe8)
   1607     return 1;
   1608 
   1609   return 0;
   1610 }
   1611 
   1612 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
   1613    length in bytes.  Otherwise, return zero.  */
   1614 
   1615 static int
   1616 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
   1617 {
   1618   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1619 
   1620   if (insn[0] == 0x0f && insn[1] == 0x05)
   1621     {
   1622       *lengthp = 2;
   1623       return 1;
   1624     }
   1625 
   1626   return 0;
   1627 }
   1628 
   1629 /* Classify the instruction at ADDR using PRED.
   1630    Throw an error if the memory can't be read.  */
   1631 
   1632 static int
   1633 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
   1634 			int (*pred) (const struct amd64_insn *))
   1635 {
   1636   struct amd64_insn details;
   1637   gdb_byte *buf;
   1638   int len, classification;
   1639 
   1640   len = gdbarch_max_insn_length (gdbarch);
   1641   buf = (gdb_byte *) alloca (len);
   1642 
   1643   read_code (addr, buf, len);
   1644   amd64_get_insn_details (buf, &details);
   1645 
   1646   classification = pred (&details);
   1647 
   1648   return classification;
   1649 }
   1650 
   1651 /* The gdbarch insn_is_call method.  */
   1652 
   1653 static int
   1654 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
   1655 {
   1656   return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
   1657 }
   1658 
   1659 /* The gdbarch insn_is_ret method.  */
   1660 
   1661 static int
   1662 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
   1663 {
   1664   return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
   1665 }
   1666 
   1667 /* The gdbarch insn_is_jump method.  */
   1668 
   1669 static int
   1670 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
   1671 {
   1672   return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
   1673 }
   1674 
   1675 /* Fix up the state of registers and memory after having single-stepped
   1676    a displaced instruction.  */
   1677 
   1678 void
   1679 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
   1680 			    struct displaced_step_closure *dsc_,
   1681 			    CORE_ADDR from, CORE_ADDR to,
   1682 			    struct regcache *regs)
   1683 {
   1684   amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
   1685   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1686   /* The offset we applied to the instruction's address.  */
   1687   ULONGEST insn_offset = to - from;
   1688   gdb_byte *insn = dsc->insn_buf.data ();
   1689   const struct amd64_insn *insn_details = &dsc->insn_details;
   1690 
   1691   if (debug_displaced)
   1692     fprintf_unfiltered (gdb_stdlog,
   1693 			"displaced: fixup (%s, %s), "
   1694 			"insn = 0x%02x 0x%02x ...\n",
   1695 			paddress (gdbarch, from), paddress (gdbarch, to),
   1696 			insn[0], insn[1]);
   1697 
   1698   /* If we used a tmp reg, restore it.	*/
   1699 
   1700   if (dsc->tmp_used)
   1701     {
   1702       if (debug_displaced)
   1703 	fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
   1704 			    dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
   1705       regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
   1706     }
   1707 
   1708   /* The list of issues to contend with here is taken from
   1709      resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
   1710      Yay for Free Software!  */
   1711 
   1712   /* Relocate the %rip back to the program's instruction stream,
   1713      if necessary.  */
   1714 
   1715   /* Except in the case of absolute or indirect jump or call
   1716      instructions, or a return instruction, the new rip is relative to
   1717      the displaced instruction; make it relative to the original insn.
   1718      Well, signal handler returns don't need relocation either, but we use the
   1719      value of %rip to recognize those; see below.  */
   1720   if (! amd64_absolute_jmp_p (insn_details)
   1721       && ! amd64_absolute_call_p (insn_details)
   1722       && ! amd64_ret_p (insn_details))
   1723     {
   1724       ULONGEST orig_rip;
   1725       int insn_len;
   1726 
   1727       regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
   1728 
   1729       /* A signal trampoline system call changes the %rip, resuming
   1730 	 execution of the main program after the signal handler has
   1731 	 returned.  That makes them like 'return' instructions; we
   1732 	 shouldn't relocate %rip.
   1733 
   1734 	 But most system calls don't, and we do need to relocate %rip.
   1735 
   1736 	 Our heuristic for distinguishing these cases: if stepping
   1737 	 over the system call instruction left control directly after
   1738 	 the instruction, the we relocate --- control almost certainly
   1739 	 doesn't belong in the displaced copy.	Otherwise, we assume
   1740 	 the instruction has put control where it belongs, and leave
   1741 	 it unrelocated.  Goodness help us if there are PC-relative
   1742 	 system calls.	*/
   1743       if (amd64_syscall_p (insn_details, &insn_len)
   1744 	  && orig_rip != to + insn_len
   1745 	  /* GDB can get control back after the insn after the syscall.
   1746 	     Presumably this is a kernel bug.
   1747 	     Fixup ensures its a nop, we add one to the length for it.  */
   1748 	  && orig_rip != to + insn_len + 1)
   1749 	{
   1750 	  if (debug_displaced)
   1751 	    fprintf_unfiltered (gdb_stdlog,
   1752 				"displaced: syscall changed %%rip; "
   1753 				"not relocating\n");
   1754 	}
   1755       else
   1756 	{
   1757 	  ULONGEST rip = orig_rip - insn_offset;
   1758 
   1759 	  /* If we just stepped over a breakpoint insn, we don't backup
   1760 	     the pc on purpose; this is to match behaviour without
   1761 	     stepping.  */
   1762 
   1763 	  regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
   1764 
   1765 	  if (debug_displaced)
   1766 	    fprintf_unfiltered (gdb_stdlog,
   1767 				"displaced: "
   1768 				"relocated %%rip from %s to %s\n",
   1769 				paddress (gdbarch, orig_rip),
   1770 				paddress (gdbarch, rip));
   1771 	}
   1772     }
   1773 
   1774   /* If the instruction was PUSHFL, then the TF bit will be set in the
   1775      pushed value, and should be cleared.  We'll leave this for later,
   1776      since GDB already messes up the TF flag when stepping over a
   1777      pushfl.  */
   1778 
   1779   /* If the instruction was a call, the return address now atop the
   1780      stack is the address following the copied instruction.  We need
   1781      to make it the address following the original instruction.	 */
   1782   if (amd64_call_p (insn_details))
   1783     {
   1784       ULONGEST rsp;
   1785       ULONGEST retaddr;
   1786       const ULONGEST retaddr_len = 8;
   1787 
   1788       regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
   1789       retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
   1790       retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
   1791       write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
   1792 
   1793       if (debug_displaced)
   1794 	fprintf_unfiltered (gdb_stdlog,
   1795 			    "displaced: relocated return addr at %s "
   1796 			    "to %s\n",
   1797 			    paddress (gdbarch, rsp),
   1798 			    paddress (gdbarch, retaddr));
   1799     }
   1800 }
   1801 
   1802 /* If the instruction INSN uses RIP-relative addressing, return the
   1803    offset into the raw INSN where the displacement to be adjusted is
   1804    found.  Returns 0 if the instruction doesn't use RIP-relative
   1805    addressing.  */
   1806 
   1807 static int
   1808 rip_relative_offset (struct amd64_insn *insn)
   1809 {
   1810   if (insn->modrm_offset != -1)
   1811     {
   1812       gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
   1813 
   1814       if ((modrm & 0xc7) == 0x05)
   1815 	{
   1816 	  /* The displacement is found right after the ModRM byte.  */
   1817 	  return insn->modrm_offset + 1;
   1818 	}
   1819     }
   1820 
   1821   return 0;
   1822 }
   1823 
   1824 static void
   1825 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
   1826 {
   1827   target_write_memory (*to, buf, len);
   1828   *to += len;
   1829 }
   1830 
   1831 static void
   1832 amd64_relocate_instruction (struct gdbarch *gdbarch,
   1833 			    CORE_ADDR *to, CORE_ADDR oldloc)
   1834 {
   1835   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1836   int len = gdbarch_max_insn_length (gdbarch);
   1837   /* Extra space for sentinels.  */
   1838   int fixup_sentinel_space = len;
   1839   gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
   1840   struct amd64_insn insn_details;
   1841   int offset = 0;
   1842   LONGEST rel32, newrel;
   1843   gdb_byte *insn;
   1844   int insn_length;
   1845 
   1846   read_memory (oldloc, buf, len);
   1847 
   1848   /* Set up the sentinel space so we don't have to worry about running
   1849      off the end of the buffer.  An excessive number of leading prefixes
   1850      could otherwise cause this.  */
   1851   memset (buf + len, 0, fixup_sentinel_space);
   1852 
   1853   insn = buf;
   1854   amd64_get_insn_details (insn, &insn_details);
   1855 
   1856   insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
   1857 
   1858   /* Skip legacy instruction prefixes.  */
   1859   insn = amd64_skip_prefixes (insn);
   1860 
   1861   /* Adjust calls with 32-bit relative addresses as push/jump, with
   1862      the address pushed being the location where the original call in
   1863      the user program would return to.  */
   1864   if (insn[0] == 0xe8)
   1865     {
   1866       gdb_byte push_buf[32];
   1867       CORE_ADDR ret_addr;
   1868       int i = 0;
   1869 
   1870       /* Where "ret" in the original code will return to.  */
   1871       ret_addr = oldloc + insn_length;
   1872 
   1873       /* If pushing an address higher than or equal to 0x80000000,
   1874 	 avoid 'pushq', as that sign extends its 32-bit operand, which
   1875 	 would be incorrect.  */
   1876       if (ret_addr <= 0x7fffffff)
   1877 	{
   1878 	  push_buf[0] = 0x68; /* pushq $...  */
   1879 	  store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
   1880 	  i = 5;
   1881 	}
   1882       else
   1883 	{
   1884 	  push_buf[i++] = 0x48; /* sub    $0x8,%rsp */
   1885 	  push_buf[i++] = 0x83;
   1886 	  push_buf[i++] = 0xec;
   1887 	  push_buf[i++] = 0x08;
   1888 
   1889 	  push_buf[i++] = 0xc7; /* movl    $imm,(%rsp) */
   1890 	  push_buf[i++] = 0x04;
   1891 	  push_buf[i++] = 0x24;
   1892 	  store_unsigned_integer (&push_buf[i], 4, byte_order,
   1893 				  ret_addr & 0xffffffff);
   1894 	  i += 4;
   1895 
   1896 	  push_buf[i++] = 0xc7; /* movl    $imm,4(%rsp) */
   1897 	  push_buf[i++] = 0x44;
   1898 	  push_buf[i++] = 0x24;
   1899 	  push_buf[i++] = 0x04;
   1900 	  store_unsigned_integer (&push_buf[i], 4, byte_order,
   1901 				  ret_addr >> 32);
   1902 	  i += 4;
   1903 	}
   1904       gdb_assert (i <= sizeof (push_buf));
   1905       /* Push the push.  */
   1906       append_insns (to, i, push_buf);
   1907 
   1908       /* Convert the relative call to a relative jump.  */
   1909       insn[0] = 0xe9;
   1910 
   1911       /* Adjust the destination offset.  */
   1912       rel32 = extract_signed_integer (insn + 1, 4, byte_order);
   1913       newrel = (oldloc - *to) + rel32;
   1914       store_signed_integer (insn + 1, 4, byte_order, newrel);
   1915 
   1916       if (debug_displaced)
   1917 	fprintf_unfiltered (gdb_stdlog,
   1918 			    "Adjusted insn rel32=%s at %s to"
   1919 			    " rel32=%s at %s\n",
   1920 			    hex_string (rel32), paddress (gdbarch, oldloc),
   1921 			    hex_string (newrel), paddress (gdbarch, *to));
   1922 
   1923       /* Write the adjusted jump into its displaced location.  */
   1924       append_insns (to, 5, insn);
   1925       return;
   1926     }
   1927 
   1928   offset = rip_relative_offset (&insn_details);
   1929   if (!offset)
   1930     {
   1931       /* Adjust jumps with 32-bit relative addresses.  Calls are
   1932 	 already handled above.  */
   1933       if (insn[0] == 0xe9)
   1934 	offset = 1;
   1935       /* Adjust conditional jumps.  */
   1936       else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
   1937 	offset = 2;
   1938     }
   1939 
   1940   if (offset)
   1941     {
   1942       rel32 = extract_signed_integer (insn + offset, 4, byte_order);
   1943       newrel = (oldloc - *to) + rel32;
   1944       store_signed_integer (insn + offset, 4, byte_order, newrel);
   1945       if (debug_displaced)
   1946 	fprintf_unfiltered (gdb_stdlog,
   1947 			    "Adjusted insn rel32=%s at %s to"
   1948 			    " rel32=%s at %s\n",
   1949 			    hex_string (rel32), paddress (gdbarch, oldloc),
   1950 			    hex_string (newrel), paddress (gdbarch, *to));
   1951     }
   1952 
   1953   /* Write the adjusted instruction into its displaced location.  */
   1954   append_insns (to, insn_length, buf);
   1955 }
   1956 
   1957 
   1958 /* The maximum number of saved registers.  This should include %rip.  */
   1960 #define AMD64_NUM_SAVED_REGS	AMD64_NUM_GREGS
   1961 
   1962 struct amd64_frame_cache
   1963 {
   1964   /* Base address.  */
   1965   CORE_ADDR base;
   1966   int base_p;
   1967   CORE_ADDR sp_offset;
   1968   CORE_ADDR pc;
   1969 
   1970   /* Saved registers.  */
   1971   CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
   1972   CORE_ADDR saved_sp;
   1973   int saved_sp_reg;
   1974 
   1975   /* Do we have a frame?  */
   1976   int frameless_p;
   1977 };
   1978 
   1979 /* Initialize a frame cache.  */
   1980 
   1981 static void
   1982 amd64_init_frame_cache (struct amd64_frame_cache *cache)
   1983 {
   1984   int i;
   1985 
   1986   /* Base address.  */
   1987   cache->base = 0;
   1988   cache->base_p = 0;
   1989   cache->sp_offset = -8;
   1990   cache->pc = 0;
   1991 
   1992   /* Saved registers.  We initialize these to -1 since zero is a valid
   1993      offset (that's where %rbp is supposed to be stored).
   1994      The values start out as being offsets, and are later converted to
   1995      addresses (at which point -1 is interpreted as an address, still meaning
   1996      "invalid").  */
   1997   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   1998     cache->saved_regs[i] = -1;
   1999   cache->saved_sp = 0;
   2000   cache->saved_sp_reg = -1;
   2001 
   2002   /* Frameless until proven otherwise.  */
   2003   cache->frameless_p = 1;
   2004 }
   2005 
   2006 /* Allocate and initialize a frame cache.  */
   2007 
   2008 static struct amd64_frame_cache *
   2009 amd64_alloc_frame_cache (void)
   2010 {
   2011   struct amd64_frame_cache *cache;
   2012 
   2013   cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
   2014   amd64_init_frame_cache (cache);
   2015   return cache;
   2016 }
   2017 
   2018 /* GCC 4.4 and later, can put code in the prologue to realign the
   2019    stack pointer.  Check whether PC points to such code, and update
   2020    CACHE accordingly.  Return the first instruction after the code
   2021    sequence or CURRENT_PC, whichever is smaller.  If we don't
   2022    recognize the code, return PC.  */
   2023 
   2024 static CORE_ADDR
   2025 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   2026 			   struct amd64_frame_cache *cache)
   2027 {
   2028   /* There are 2 code sequences to re-align stack before the frame
   2029      gets set up:
   2030 
   2031 	1. Use a caller-saved saved register:
   2032 
   2033 		leaq  8(%rsp), %reg
   2034 		andq  $-XXX, %rsp
   2035 		pushq -8(%reg)
   2036 
   2037 	2. Use a callee-saved saved register:
   2038 
   2039 		pushq %reg
   2040 		leaq  16(%rsp), %reg
   2041 		andq  $-XXX, %rsp
   2042 		pushq -8(%reg)
   2043 
   2044      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   2045 
   2046      	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   2047      	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   2048    */
   2049 
   2050   gdb_byte buf[18];
   2051   int reg, r;
   2052   int offset, offset_and;
   2053 
   2054   if (target_read_code (pc, buf, sizeof buf))
   2055     return pc;
   2056 
   2057   /* Check caller-saved saved register.  The first instruction has
   2058      to be "leaq 8(%rsp), %reg".  */
   2059   if ((buf[0] & 0xfb) == 0x48
   2060       && buf[1] == 0x8d
   2061       && buf[3] == 0x24
   2062       && buf[4] == 0x8)
   2063     {
   2064       /* MOD must be binary 10 and R/M must be binary 100.  */
   2065       if ((buf[2] & 0xc7) != 0x44)
   2066 	return pc;
   2067 
   2068       /* REG has register number.  */
   2069       reg = (buf[2] >> 3) & 7;
   2070 
   2071       /* Check the REX.R bit.  */
   2072       if (buf[0] == 0x4c)
   2073 	reg += 8;
   2074 
   2075       offset = 5;
   2076     }
   2077   else
   2078     {
   2079       /* Check callee-saved saved register.  The first instruction
   2080 	 has to be "pushq %reg".  */
   2081       reg = 0;
   2082       if ((buf[0] & 0xf8) == 0x50)
   2083 	offset = 0;
   2084       else if ((buf[0] & 0xf6) == 0x40
   2085 	       && (buf[1] & 0xf8) == 0x50)
   2086 	{
   2087 	  /* Check the REX.B bit.  */
   2088 	  if ((buf[0] & 1) != 0)
   2089 	    reg = 8;
   2090 
   2091 	  offset = 1;
   2092 	}
   2093       else
   2094 	return pc;
   2095 
   2096       /* Get register.  */
   2097       reg += buf[offset] & 0x7;
   2098 
   2099       offset++;
   2100 
   2101       /* The next instruction has to be "leaq 16(%rsp), %reg".  */
   2102       if ((buf[offset] & 0xfb) != 0x48
   2103 	  || buf[offset + 1] != 0x8d
   2104 	  || buf[offset + 3] != 0x24
   2105 	  || buf[offset + 4] != 0x10)
   2106 	return pc;
   2107 
   2108       /* MOD must be binary 10 and R/M must be binary 100.  */
   2109       if ((buf[offset + 2] & 0xc7) != 0x44)
   2110 	return pc;
   2111 
   2112       /* REG has register number.  */
   2113       r = (buf[offset + 2] >> 3) & 7;
   2114 
   2115       /* Check the REX.R bit.  */
   2116       if (buf[offset] == 0x4c)
   2117 	r += 8;
   2118 
   2119       /* Registers in pushq and leaq have to be the same.  */
   2120       if (reg != r)
   2121 	return pc;
   2122 
   2123       offset += 5;
   2124     }
   2125 
   2126   /* Rigister can't be %rsp nor %rbp.  */
   2127   if (reg == 4 || reg == 5)
   2128     return pc;
   2129 
   2130   /* The next instruction has to be "andq $-XXX, %rsp".  */
   2131   if (buf[offset] != 0x48
   2132       || buf[offset + 2] != 0xe4
   2133       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   2134     return pc;
   2135 
   2136   offset_and = offset;
   2137   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   2138 
   2139   /* The next instruction has to be "pushq -8(%reg)".  */
   2140   r = 0;
   2141   if (buf[offset] == 0xff)
   2142     offset++;
   2143   else if ((buf[offset] & 0xf6) == 0x40
   2144 	   && buf[offset + 1] == 0xff)
   2145     {
   2146       /* Check the REX.B bit.  */
   2147       if ((buf[offset] & 0x1) != 0)
   2148 	r = 8;
   2149       offset += 2;
   2150     }
   2151   else
   2152     return pc;
   2153 
   2154   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2155      01.  */
   2156   if (buf[offset + 1] != 0xf8
   2157       || (buf[offset] & 0xf8) != 0x70)
   2158     return pc;
   2159 
   2160   /* R/M has register.  */
   2161   r += buf[offset] & 7;
   2162 
   2163   /* Registers in leaq and pushq have to be the same.  */
   2164   if (reg != r)
   2165     return pc;
   2166 
   2167   if (current_pc > pc + offset_and)
   2168     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2169 
   2170   return std::min (pc + offset + 2, current_pc);
   2171 }
   2172 
   2173 /* Similar to amd64_analyze_stack_align for x32.  */
   2174 
   2175 static CORE_ADDR
   2176 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   2177 			       struct amd64_frame_cache *cache)
   2178 {
   2179   /* There are 2 code sequences to re-align stack before the frame
   2180      gets set up:
   2181 
   2182 	1. Use a caller-saved saved register:
   2183 
   2184 		leaq  8(%rsp), %reg
   2185 		andq  $-XXX, %rsp
   2186 		pushq -8(%reg)
   2187 
   2188 	   or
   2189 
   2190 		[addr32] leal  8(%rsp), %reg
   2191 		andl  $-XXX, %esp
   2192 		[addr32] pushq -8(%reg)
   2193 
   2194 	2. Use a callee-saved saved register:
   2195 
   2196 		pushq %reg
   2197 		leaq  16(%rsp), %reg
   2198 		andq  $-XXX, %rsp
   2199 		pushq -8(%reg)
   2200 
   2201 	   or
   2202 
   2203 		pushq %reg
   2204 		[addr32] leal  16(%rsp), %reg
   2205 		andl  $-XXX, %esp
   2206 		[addr32] pushq -8(%reg)
   2207 
   2208      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   2209 
   2210      	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   2211      	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   2212 
   2213      "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
   2214 
   2215      	0x83 0xe4 0xf0			andl $-16, %esp
   2216      	0x81 0xe4 0x00 0xff 0xff 0xff	andl $-256, %esp
   2217    */
   2218 
   2219   gdb_byte buf[19];
   2220   int reg, r;
   2221   int offset, offset_and;
   2222 
   2223   if (target_read_memory (pc, buf, sizeof buf))
   2224     return pc;
   2225 
   2226   /* Skip optional addr32 prefix.  */
   2227   offset = buf[0] == 0x67 ? 1 : 0;
   2228 
   2229   /* Check caller-saved saved register.  The first instruction has
   2230      to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg".  */
   2231   if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
   2232       && buf[offset + 1] == 0x8d
   2233       && buf[offset + 3] == 0x24
   2234       && buf[offset + 4] == 0x8)
   2235     {
   2236       /* MOD must be binary 10 and R/M must be binary 100.  */
   2237       if ((buf[offset + 2] & 0xc7) != 0x44)
   2238 	return pc;
   2239 
   2240       /* REG has register number.  */
   2241       reg = (buf[offset + 2] >> 3) & 7;
   2242 
   2243       /* Check the REX.R bit.  */
   2244       if ((buf[offset] & 0x4) != 0)
   2245 	reg += 8;
   2246 
   2247       offset += 5;
   2248     }
   2249   else
   2250     {
   2251       /* Check callee-saved saved register.  The first instruction
   2252 	 has to be "pushq %reg".  */
   2253       reg = 0;
   2254       if ((buf[offset] & 0xf6) == 0x40
   2255 	  && (buf[offset + 1] & 0xf8) == 0x50)
   2256 	{
   2257 	  /* Check the REX.B bit.  */
   2258 	  if ((buf[offset] & 1) != 0)
   2259 	    reg = 8;
   2260 
   2261 	  offset += 1;
   2262 	}
   2263       else if ((buf[offset] & 0xf8) != 0x50)
   2264 	return pc;
   2265 
   2266       /* Get register.  */
   2267       reg += buf[offset] & 0x7;
   2268 
   2269       offset++;
   2270 
   2271       /* Skip optional addr32 prefix.  */
   2272       if (buf[offset] == 0x67)
   2273 	offset++;
   2274 
   2275       /* The next instruction has to be "leaq 16(%rsp), %reg" or
   2276 	 "leal 16(%rsp), %reg".  */
   2277       if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
   2278 	  || buf[offset + 1] != 0x8d
   2279 	  || buf[offset + 3] != 0x24
   2280 	  || buf[offset + 4] != 0x10)
   2281 	return pc;
   2282 
   2283       /* MOD must be binary 10 and R/M must be binary 100.  */
   2284       if ((buf[offset + 2] & 0xc7) != 0x44)
   2285 	return pc;
   2286 
   2287       /* REG has register number.  */
   2288       r = (buf[offset + 2] >> 3) & 7;
   2289 
   2290       /* Check the REX.R bit.  */
   2291       if ((buf[offset] & 0x4) != 0)
   2292 	r += 8;
   2293 
   2294       /* Registers in pushq and leaq have to be the same.  */
   2295       if (reg != r)
   2296 	return pc;
   2297 
   2298       offset += 5;
   2299     }
   2300 
   2301   /* Rigister can't be %rsp nor %rbp.  */
   2302   if (reg == 4 || reg == 5)
   2303     return pc;
   2304 
   2305   /* The next instruction may be "andq $-XXX, %rsp" or
   2306      "andl $-XXX, %esp".  */
   2307   if (buf[offset] != 0x48)
   2308     offset--;
   2309 
   2310   if (buf[offset + 2] != 0xe4
   2311       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   2312     return pc;
   2313 
   2314   offset_and = offset;
   2315   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   2316 
   2317   /* Skip optional addr32 prefix.  */
   2318   if (buf[offset] == 0x67)
   2319     offset++;
   2320 
   2321   /* The next instruction has to be "pushq -8(%reg)".  */
   2322   r = 0;
   2323   if (buf[offset] == 0xff)
   2324     offset++;
   2325   else if ((buf[offset] & 0xf6) == 0x40
   2326 	   && buf[offset + 1] == 0xff)
   2327     {
   2328       /* Check the REX.B bit.  */
   2329       if ((buf[offset] & 0x1) != 0)
   2330 	r = 8;
   2331       offset += 2;
   2332     }
   2333   else
   2334     return pc;
   2335 
   2336   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2337      01.  */
   2338   if (buf[offset + 1] != 0xf8
   2339       || (buf[offset] & 0xf8) != 0x70)
   2340     return pc;
   2341 
   2342   /* R/M has register.  */
   2343   r += buf[offset] & 7;
   2344 
   2345   /* Registers in leaq and pushq have to be the same.  */
   2346   if (reg != r)
   2347     return pc;
   2348 
   2349   if (current_pc > pc + offset_and)
   2350     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2351 
   2352   return std::min (pc + offset + 2, current_pc);
   2353 }
   2354 
   2355 /* Do a limited analysis of the prologue at PC and update CACHE
   2356    accordingly.  Bail out early if CURRENT_PC is reached.  Return the
   2357    address where the analysis stopped.
   2358 
   2359    We will handle only functions beginning with:
   2360 
   2361       pushq %rbp        0x55
   2362       movq %rsp, %rbp   0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
   2363 
   2364    or (for the X32 ABI):
   2365 
   2366       pushq %rbp        0x55
   2367       movl %esp, %ebp   0x89 0xe5 (or 0x8b 0xec)
   2368 
   2369    The `endbr64` instruction can be found before these sequences, and will be
   2370    skipped if found.
   2371 
   2372    Any function that doesn't start with one of these sequences will be
   2373    assumed to have no prologue and thus no valid frame pointer in
   2374    %rbp.  */
   2375 
   2376 static CORE_ADDR
   2377 amd64_analyze_prologue (struct gdbarch *gdbarch,
   2378 			CORE_ADDR pc, CORE_ADDR current_pc,
   2379 			struct amd64_frame_cache *cache)
   2380 {
   2381   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2382   /* The `endbr64` instruction.  */
   2383   static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
   2384   /* There are two variations of movq %rsp, %rbp.  */
   2385   static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
   2386   static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
   2387   /* Ditto for movl %esp, %ebp.  */
   2388   static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
   2389   static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
   2390 
   2391   gdb_byte buf[3];
   2392   gdb_byte op;
   2393 
   2394   if (current_pc <= pc)
   2395     return current_pc;
   2396 
   2397   if (gdbarch_ptr_bit (gdbarch) == 32)
   2398     pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
   2399   else
   2400     pc = amd64_analyze_stack_align (pc, current_pc, cache);
   2401 
   2402   op = read_code_unsigned_integer (pc, 1, byte_order);
   2403 
   2404   /* Check for the `endbr64` instruction, skip it if found.  */
   2405   if (op == endbr64[0])
   2406     {
   2407       read_code (pc + 1, buf, 3);
   2408 
   2409       if (memcmp (buf, &endbr64[1], 3) == 0)
   2410 	pc += 4;
   2411 
   2412       op = read_code_unsigned_integer (pc, 1, byte_order);
   2413     }
   2414 
   2415   if (current_pc <= pc)
   2416     return current_pc;
   2417 
   2418   if (op == 0x55)		/* pushq %rbp */
   2419     {
   2420       /* Take into account that we've executed the `pushq %rbp' that
   2421          starts this instruction sequence.  */
   2422       cache->saved_regs[AMD64_RBP_REGNUM] = 0;
   2423       cache->sp_offset += 8;
   2424 
   2425       /* If that's all, return now.  */
   2426       if (current_pc <= pc + 1)
   2427         return current_pc;
   2428 
   2429       read_code (pc + 1, buf, 3);
   2430 
   2431       /* Check for `movq %rsp, %rbp'.  */
   2432       if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
   2433 	  || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
   2434 	{
   2435 	  /* OK, we actually have a frame.  */
   2436 	  cache->frameless_p = 0;
   2437 	  return pc + 4;
   2438 	}
   2439 
   2440       /* For X32, also check for `movl %esp, %ebp'.  */
   2441       if (gdbarch_ptr_bit (gdbarch) == 32)
   2442 	{
   2443 	  if (memcmp (buf, mov_esp_ebp_1, 2) == 0
   2444 	      || memcmp (buf, mov_esp_ebp_2, 2) == 0)
   2445 	    {
   2446 	      /* OK, we actually have a frame.  */
   2447 	      cache->frameless_p = 0;
   2448 	      return pc + 3;
   2449 	    }
   2450 	}
   2451 
   2452       return pc + 1;
   2453     }
   2454 
   2455   return pc;
   2456 }
   2457 
   2458 /* Work around false termination of prologue - GCC PR debug/48827.
   2459 
   2460    START_PC is the first instruction of a function, PC is its minimal already
   2461    determined advanced address.  Function returns PC if it has nothing to do.
   2462 
   2463    84 c0                test   %al,%al
   2464    74 23                je     after
   2465    <-- here is 0 lines advance - the false prologue end marker.
   2466    0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
   2467    0f 29 4d 80          movaps %xmm1,-0x80(%rbp)
   2468    0f 29 55 90          movaps %xmm2,-0x70(%rbp)
   2469    0f 29 5d a0          movaps %xmm3,-0x60(%rbp)
   2470    0f 29 65 b0          movaps %xmm4,-0x50(%rbp)
   2471    0f 29 6d c0          movaps %xmm5,-0x40(%rbp)
   2472    0f 29 75 d0          movaps %xmm6,-0x30(%rbp)
   2473    0f 29 7d e0          movaps %xmm7,-0x20(%rbp)
   2474    after:  */
   2475 
   2476 static CORE_ADDR
   2477 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
   2478 {
   2479   struct symtab_and_line start_pc_sal, next_sal;
   2480   gdb_byte buf[4 + 8 * 7];
   2481   int offset, xmmreg;
   2482 
   2483   if (pc == start_pc)
   2484     return pc;
   2485 
   2486   start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
   2487   if (start_pc_sal.symtab == NULL
   2488       || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
   2489 	   (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
   2490       || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
   2491     return pc;
   2492 
   2493   next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
   2494   if (next_sal.line != start_pc_sal.line)
   2495     return pc;
   2496 
   2497   /* START_PC can be from overlayed memory, ignored here.  */
   2498   if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
   2499     return pc;
   2500 
   2501   /* test %al,%al */
   2502   if (buf[0] != 0x84 || buf[1] != 0xc0)
   2503     return pc;
   2504   /* je AFTER */
   2505   if (buf[2] != 0x74)
   2506     return pc;
   2507 
   2508   offset = 4;
   2509   for (xmmreg = 0; xmmreg < 8; xmmreg++)
   2510     {
   2511       /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
   2512       if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
   2513           || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
   2514 	return pc;
   2515 
   2516       /* 0b01?????? */
   2517       if ((buf[offset + 2] & 0xc0) == 0x40)
   2518 	{
   2519 	  /* 8-bit displacement.  */
   2520 	  offset += 4;
   2521 	}
   2522       /* 0b10?????? */
   2523       else if ((buf[offset + 2] & 0xc0) == 0x80)
   2524 	{
   2525 	  /* 32-bit displacement.  */
   2526 	  offset += 7;
   2527 	}
   2528       else
   2529 	return pc;
   2530     }
   2531 
   2532   /* je AFTER */
   2533   if (offset - 4 != buf[3])
   2534     return pc;
   2535 
   2536   return next_sal.end;
   2537 }
   2538 
   2539 /* Return PC of first real instruction.  */
   2540 
   2541 static CORE_ADDR
   2542 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
   2543 {
   2544   struct amd64_frame_cache cache;
   2545   CORE_ADDR pc;
   2546   CORE_ADDR func_addr;
   2547 
   2548   if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
   2549     {
   2550       CORE_ADDR post_prologue_pc
   2551 	= skip_prologue_using_sal (gdbarch, func_addr);
   2552       struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
   2553 
   2554       /* LLVM backend (Clang/Flang) always emits a line note before the
   2555          prologue and another one after.  We trust clang to emit usable
   2556          line notes.  */
   2557       if (post_prologue_pc
   2558 	  && (cust != NULL
   2559 	      && COMPUNIT_PRODUCER (cust) != NULL
   2560 	      && producer_is_llvm (COMPUNIT_PRODUCER (cust))))
   2561         return std::max (start_pc, post_prologue_pc);
   2562     }
   2563 
   2564   amd64_init_frame_cache (&cache);
   2565   pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
   2566 			       &cache);
   2567   if (cache.frameless_p)
   2568     return start_pc;
   2569 
   2570   return amd64_skip_xmm_prologue (pc, start_pc);
   2571 }
   2572 
   2573 
   2575 /* Normal frames.  */
   2576 
   2577 static void
   2578 amd64_frame_cache_1 (struct frame_info *this_frame,
   2579 		     struct amd64_frame_cache *cache)
   2580 {
   2581   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2582   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2583   gdb_byte buf[8];
   2584   int i;
   2585 
   2586   cache->pc = get_frame_func (this_frame);
   2587   if (cache->pc != 0)
   2588     amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
   2589 			    cache);
   2590 
   2591   if (cache->frameless_p)
   2592     {
   2593       /* We didn't find a valid frame.  If we're at the start of a
   2594 	 function, or somewhere half-way its prologue, the function's
   2595 	 frame probably hasn't been fully setup yet.  Try to
   2596 	 reconstruct the base address for the stack frame by looking
   2597 	 at the stack pointer.  For truly "frameless" functions this
   2598 	 might work too.  */
   2599 
   2600       if (cache->saved_sp_reg != -1)
   2601 	{
   2602 	  /* Stack pointer has been saved.  */
   2603 	  get_frame_register (this_frame, cache->saved_sp_reg, buf);
   2604 	  cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
   2605 
   2606 	  /* We're halfway aligning the stack.  */
   2607 	  cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
   2608 	  cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
   2609 
   2610 	  /* This will be added back below.  */
   2611 	  cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
   2612 	}
   2613       else
   2614 	{
   2615 	  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2616 	  cache->base = extract_unsigned_integer (buf, 8, byte_order)
   2617 			+ cache->sp_offset;
   2618 	}
   2619     }
   2620   else
   2621     {
   2622       get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
   2623       cache->base = extract_unsigned_integer (buf, 8, byte_order);
   2624     }
   2625 
   2626   /* Now that we have the base address for the stack frame we can
   2627      calculate the value of %rsp in the calling frame.  */
   2628   cache->saved_sp = cache->base + 16;
   2629 
   2630   /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
   2631      frame we find it at the same offset from the reconstructed base
   2632      address.  If we're halfway aligning the stack, %rip is handled
   2633      differently (see above).  */
   2634   if (!cache->frameless_p || cache->saved_sp_reg == -1)
   2635     cache->saved_regs[AMD64_RIP_REGNUM] = 8;
   2636 
   2637   /* Adjust all the saved registers such that they contain addresses
   2638      instead of offsets.  */
   2639   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   2640     if (cache->saved_regs[i] != -1)
   2641       cache->saved_regs[i] += cache->base;
   2642 
   2643   cache->base_p = 1;
   2644 }
   2645 
   2646 static struct amd64_frame_cache *
   2647 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
   2648 {
   2649   struct amd64_frame_cache *cache;
   2650 
   2651   if (*this_cache)
   2652     return (struct amd64_frame_cache *) *this_cache;
   2653 
   2654   cache = amd64_alloc_frame_cache ();
   2655   *this_cache = cache;
   2656 
   2657   try
   2658     {
   2659       amd64_frame_cache_1 (this_frame, cache);
   2660     }
   2661   catch (const gdb_exception_error &ex)
   2662     {
   2663       if (ex.error != NOT_AVAILABLE_ERROR)
   2664 	throw;
   2665     }
   2666 
   2667   return cache;
   2668 }
   2669 
   2670 static enum unwind_stop_reason
   2671 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
   2672 				void **this_cache)
   2673 {
   2674   struct amd64_frame_cache *cache =
   2675     amd64_frame_cache (this_frame, this_cache);
   2676 
   2677   if (!cache->base_p)
   2678     return UNWIND_UNAVAILABLE;
   2679 
   2680   /* This marks the outermost frame.  */
   2681   if (cache->base == 0)
   2682     return UNWIND_OUTERMOST;
   2683 
   2684   return UNWIND_NO_REASON;
   2685 }
   2686 
   2687 static void
   2688 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
   2689 		     struct frame_id *this_id)
   2690 {
   2691   struct amd64_frame_cache *cache =
   2692     amd64_frame_cache (this_frame, this_cache);
   2693 
   2694   if (!cache->base_p)
   2695     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   2696   else if (cache->base == 0)
   2697     {
   2698       /* This marks the outermost frame.  */
   2699       return;
   2700     }
   2701   else
   2702     (*this_id) = frame_id_build (cache->base + 16, cache->pc);
   2703 }
   2704 
   2705 static struct value *
   2706 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
   2707 			   int regnum)
   2708 {
   2709   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2710   struct amd64_frame_cache *cache =
   2711     amd64_frame_cache (this_frame, this_cache);
   2712 
   2713   gdb_assert (regnum >= 0);
   2714 
   2715   if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
   2716     return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
   2717 
   2718   if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
   2719     return frame_unwind_got_memory (this_frame, regnum,
   2720 				    cache->saved_regs[regnum]);
   2721 
   2722   return frame_unwind_got_register (this_frame, regnum, regnum);
   2723 }
   2724 
   2725 static const struct frame_unwind amd64_frame_unwind =
   2726 {
   2727   NORMAL_FRAME,
   2728   amd64_frame_unwind_stop_reason,
   2729   amd64_frame_this_id,
   2730   amd64_frame_prev_register,
   2731   NULL,
   2732   default_frame_sniffer
   2733 };
   2734 
   2735 /* Generate a bytecode expression to get the value of the saved PC.  */
   2737 
   2738 static void
   2739 amd64_gen_return_address (struct gdbarch *gdbarch,
   2740 			  struct agent_expr *ax, struct axs_value *value,
   2741 			  CORE_ADDR scope)
   2742 {
   2743   /* The following sequence assumes the traditional use of the base
   2744      register.  */
   2745   ax_reg (ax, AMD64_RBP_REGNUM);
   2746   ax_const_l (ax, 8);
   2747   ax_simple (ax, aop_add);
   2748   value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
   2749   value->kind = axs_lvalue_memory;
   2750 }
   2751 
   2752 
   2754 /* Signal trampolines.  */
   2755 
   2756 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
   2757    64-bit variants.  This would require using identical frame caches
   2758    on both platforms.  */
   2759 
   2760 static struct amd64_frame_cache *
   2761 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
   2762 {
   2763   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2764   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   2765   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2766   struct amd64_frame_cache *cache;
   2767   CORE_ADDR addr;
   2768   gdb_byte buf[8];
   2769   int i;
   2770 
   2771   if (*this_cache)
   2772     return (struct amd64_frame_cache *) *this_cache;
   2773 
   2774   cache = amd64_alloc_frame_cache ();
   2775 
   2776   try
   2777     {
   2778       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2779       cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
   2780 
   2781       addr = tdep->sigcontext_addr (this_frame);
   2782       gdb_assert (tdep->sc_reg_offset);
   2783       gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
   2784       for (i = 0; i < tdep->sc_num_regs; i++)
   2785 	if (tdep->sc_reg_offset[i] != -1)
   2786 	  cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
   2787 
   2788       cache->base_p = 1;
   2789     }
   2790   catch (const gdb_exception_error &ex)
   2791     {
   2792       if (ex.error != NOT_AVAILABLE_ERROR)
   2793 	throw;
   2794     }
   2795 
   2796   *this_cache = cache;
   2797   return cache;
   2798 }
   2799 
   2800 static enum unwind_stop_reason
   2801 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
   2802 					 void **this_cache)
   2803 {
   2804   struct amd64_frame_cache *cache =
   2805     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2806 
   2807   if (!cache->base_p)
   2808     return UNWIND_UNAVAILABLE;
   2809 
   2810   return UNWIND_NO_REASON;
   2811 }
   2812 
   2813 static void
   2814 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
   2815 			      void **this_cache, struct frame_id *this_id)
   2816 {
   2817   struct amd64_frame_cache *cache =
   2818     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2819 
   2820   if (!cache->base_p)
   2821     (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
   2822   else if (cache->base == 0)
   2823     {
   2824       /* This marks the outermost frame.  */
   2825       return;
   2826     }
   2827   else
   2828     (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
   2829 }
   2830 
   2831 static struct value *
   2832 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
   2833 				    void **this_cache, int regnum)
   2834 {
   2835   /* Make sure we've initialized the cache.  */
   2836   amd64_sigtramp_frame_cache (this_frame, this_cache);
   2837 
   2838   return amd64_frame_prev_register (this_frame, this_cache, regnum);
   2839 }
   2840 
   2841 static int
   2842 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
   2843 			      struct frame_info *this_frame,
   2844 			      void **this_cache)
   2845 {
   2846   struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
   2847 
   2848   /* We shouldn't even bother if we don't have a sigcontext_addr
   2849      handler.  */
   2850   if (tdep->sigcontext_addr == NULL)
   2851     return 0;
   2852 
   2853   if (tdep->sigtramp_p != NULL)
   2854     {
   2855       if (tdep->sigtramp_p (this_frame))
   2856 	return 1;
   2857     }
   2858 
   2859   if (tdep->sigtramp_start != 0)
   2860     {
   2861       CORE_ADDR pc = get_frame_pc (this_frame);
   2862 
   2863       gdb_assert (tdep->sigtramp_end != 0);
   2864       if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
   2865 	return 1;
   2866     }
   2867 
   2868   return 0;
   2869 }
   2870 
   2871 static const struct frame_unwind amd64_sigtramp_frame_unwind =
   2872 {
   2873   SIGTRAMP_FRAME,
   2874   amd64_sigtramp_frame_unwind_stop_reason,
   2875   amd64_sigtramp_frame_this_id,
   2876   amd64_sigtramp_frame_prev_register,
   2877   NULL,
   2878   amd64_sigtramp_frame_sniffer
   2879 };
   2880 
   2881 
   2883 static CORE_ADDR
   2884 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
   2885 {
   2886   struct amd64_frame_cache *cache =
   2887     amd64_frame_cache (this_frame, this_cache);
   2888 
   2889   return cache->base;
   2890 }
   2891 
   2892 static const struct frame_base amd64_frame_base =
   2893 {
   2894   &amd64_frame_unwind,
   2895   amd64_frame_base_address,
   2896   amd64_frame_base_address,
   2897   amd64_frame_base_address
   2898 };
   2899 
   2900 /* Normal frames, but in a function epilogue.  */
   2901 
   2902 /* Implement the stack_frame_destroyed_p gdbarch method.
   2903 
   2904    The epilogue is defined here as the 'ret' instruction, which will
   2905    follow any instruction such as 'leave' or 'pop %ebp' that destroys
   2906    the function's stack frame.  */
   2907 
   2908 static int
   2909 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
   2910 {
   2911   gdb_byte insn;
   2912   struct compunit_symtab *cust;
   2913 
   2914   cust = find_pc_compunit_symtab (pc);
   2915   if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
   2916     return 0;
   2917 
   2918   if (target_read_memory (pc, &insn, 1))
   2919     return 0;   /* Can't read memory at pc.  */
   2920 
   2921   if (insn != 0xc3)     /* 'ret' instruction.  */
   2922     return 0;
   2923 
   2924   return 1;
   2925 }
   2926 
   2927 static int
   2928 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
   2929 			      struct frame_info *this_frame,
   2930 			      void **this_prologue_cache)
   2931 {
   2932   if (frame_relative_level (this_frame) == 0)
   2933     return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
   2934 					  get_frame_pc (this_frame));
   2935   else
   2936     return 0;
   2937 }
   2938 
   2939 static struct amd64_frame_cache *
   2940 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
   2941 {
   2942   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2943   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2944   struct amd64_frame_cache *cache;
   2945   gdb_byte buf[8];
   2946 
   2947   if (*this_cache)
   2948     return (struct amd64_frame_cache *) *this_cache;
   2949 
   2950   cache = amd64_alloc_frame_cache ();
   2951   *this_cache = cache;
   2952 
   2953   try
   2954     {
   2955       /* Cache base will be %esp plus cache->sp_offset (-8).  */
   2956       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2957       cache->base = extract_unsigned_integer (buf, 8,
   2958 					      byte_order) + cache->sp_offset;
   2959 
   2960       /* Cache pc will be the frame func.  */
   2961       cache->pc = get_frame_pc (this_frame);
   2962 
   2963       /* The saved %esp will be at cache->base plus 16.  */
   2964       cache->saved_sp = cache->base + 16;
   2965 
   2966       /* The saved %eip will be at cache->base plus 8.  */
   2967       cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
   2968 
   2969       cache->base_p = 1;
   2970     }
   2971   catch (const gdb_exception_error &ex)
   2972     {
   2973       if (ex.error != NOT_AVAILABLE_ERROR)
   2974 	throw;
   2975     }
   2976 
   2977   return cache;
   2978 }
   2979 
   2980 static enum unwind_stop_reason
   2981 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
   2982 					 void **this_cache)
   2983 {
   2984   struct amd64_frame_cache *cache
   2985     = amd64_epilogue_frame_cache (this_frame, this_cache);
   2986 
   2987   if (!cache->base_p)
   2988     return UNWIND_UNAVAILABLE;
   2989 
   2990   return UNWIND_NO_REASON;
   2991 }
   2992 
   2993 static void
   2994 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
   2995 			      void **this_cache,
   2996 			      struct frame_id *this_id)
   2997 {
   2998   struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
   2999 							       this_cache);
   3000 
   3001   if (!cache->base_p)
   3002     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   3003   else
   3004     (*this_id) = frame_id_build (cache->base + 8, cache->pc);
   3005 }
   3006 
   3007 static const struct frame_unwind amd64_epilogue_frame_unwind =
   3008 {
   3009   NORMAL_FRAME,
   3010   amd64_epilogue_frame_unwind_stop_reason,
   3011   amd64_epilogue_frame_this_id,
   3012   amd64_frame_prev_register,
   3013   NULL,
   3014   amd64_epilogue_frame_sniffer
   3015 };
   3016 
   3017 static struct frame_id
   3018 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
   3019 {
   3020   CORE_ADDR fp;
   3021 
   3022   fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
   3023 
   3024   return frame_id_build (fp + 16, get_frame_pc (this_frame));
   3025 }
   3026 
   3027 /* 16 byte align the SP per frame requirements.  */
   3028 
   3029 static CORE_ADDR
   3030 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
   3031 {
   3032   return sp & -(CORE_ADDR)16;
   3033 }
   3034 
   3035 
   3037 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
   3038    in the floating-point register set REGSET to register cache
   3039    REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
   3040 
   3041 static void
   3042 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
   3043 		       int regnum, const void *fpregs, size_t len)
   3044 {
   3045   struct gdbarch *gdbarch = regcache->arch ();
   3046   const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3047 
   3048   gdb_assert (len >= tdep->sizeof_fpregset);
   3049   amd64_supply_fxsave (regcache, regnum, fpregs);
   3050 }
   3051 
   3052 /* Collect register REGNUM from the register cache REGCACHE and store
   3053    it in the buffer specified by FPREGS and LEN as described by the
   3054    floating-point register set REGSET.  If REGNUM is -1, do this for
   3055    all registers in REGSET.  */
   3056 
   3057 static void
   3058 amd64_collect_fpregset (const struct regset *regset,
   3059 			const struct regcache *regcache,
   3060 			int regnum, void *fpregs, size_t len)
   3061 {
   3062   struct gdbarch *gdbarch = regcache->arch ();
   3063   const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3064 
   3065   gdb_assert (len >= tdep->sizeof_fpregset);
   3066   amd64_collect_fxsave (regcache, regnum, fpregs);
   3067 }
   3068 
   3069 const struct regset amd64_fpregset =
   3070   {
   3071     NULL, amd64_supply_fpregset, amd64_collect_fpregset
   3072   };
   3073 
   3074 
   3076 /* Figure out where the longjmp will land.  Slurp the jmp_buf out of
   3077    %rdi.  We expect its value to be a pointer to the jmp_buf structure
   3078    from which we extract the address that we will land at.  This
   3079    address is copied into PC.  This routine returns non-zero on
   3080    success.  */
   3081 
   3082 static int
   3083 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
   3084 {
   3085   gdb_byte buf[8];
   3086   CORE_ADDR jb_addr;
   3087   struct gdbarch *gdbarch = get_frame_arch (frame);
   3088   int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
   3089   int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
   3090 
   3091   /* If JB_PC_OFFSET is -1, we have no way to find out where the
   3092      longjmp will land.	 */
   3093   if (jb_pc_offset == -1)
   3094     return 0;
   3095 
   3096   get_frame_register (frame, AMD64_RDI_REGNUM, buf);
   3097   jb_addr= extract_typed_address
   3098 	    (buf, builtin_type (gdbarch)->builtin_data_ptr);
   3099   if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
   3100     return 0;
   3101 
   3102   *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
   3103 
   3104   return 1;
   3105 }
   3106 
   3107 static const int amd64_record_regmap[] =
   3108 {
   3109   AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
   3110   AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
   3111   AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
   3112   AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
   3113   AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
   3114   AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
   3115 };
   3116 
   3117 /* Implement the "in_indirect_branch_thunk" gdbarch function.  */
   3118 
   3119 static bool
   3120 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
   3121 {
   3122   return x86_in_indirect_branch_thunk (pc, amd64_register_names,
   3123 				       AMD64_RAX_REGNUM,
   3124 				       AMD64_RIP_REGNUM);
   3125 }
   3126 
   3127 void
   3128 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
   3129 		const target_desc *default_tdesc)
   3130 {
   3131   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3132   const struct target_desc *tdesc = info.target_desc;
   3133   static const char *const stap_integer_prefixes[] = { "$", NULL };
   3134   static const char *const stap_register_prefixes[] = { "%", NULL };
   3135   static const char *const stap_register_indirection_prefixes[] = { "(",
   3136 								    NULL };
   3137   static const char *const stap_register_indirection_suffixes[] = { ")",
   3138 								    NULL };
   3139 
   3140   /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
   3141      floating-point registers.  */
   3142   tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
   3143   tdep->fpregset = &amd64_fpregset;
   3144 
   3145   if (! tdesc_has_registers (tdesc))
   3146     tdesc = default_tdesc;
   3147   tdep->tdesc = tdesc;
   3148 
   3149   tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
   3150   tdep->register_names = amd64_register_names;
   3151 
   3152   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
   3153     {
   3154       tdep->zmmh_register_names = amd64_zmmh_names;
   3155       tdep->k_register_names = amd64_k_names;
   3156       tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
   3157       tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
   3158 
   3159       tdep->num_zmm_regs = 32;
   3160       tdep->num_xmm_avx512_regs = 16;
   3161       tdep->num_ymm_avx512_regs = 16;
   3162 
   3163       tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
   3164       tdep->k0_regnum = AMD64_K0_REGNUM;
   3165       tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
   3166       tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
   3167     }
   3168 
   3169   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
   3170     {
   3171       tdep->ymmh_register_names = amd64_ymmh_names;
   3172       tdep->num_ymm_regs = 16;
   3173       tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
   3174     }
   3175 
   3176   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
   3177     {
   3178       tdep->mpx_register_names = amd64_mpx_names;
   3179       tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
   3180       tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
   3181     }
   3182 
   3183   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
   3184     {
   3185       tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
   3186     }
   3187 
   3188   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
   3189     {
   3190       tdep->pkeys_register_names = amd64_pkeys_names;
   3191       tdep->pkru_regnum = AMD64_PKRU_REGNUM;
   3192       tdep->num_pkeys_regs = 1;
   3193     }
   3194 
   3195   tdep->num_byte_regs = 20;
   3196   tdep->num_word_regs = 16;
   3197   tdep->num_dword_regs = 16;
   3198   /* Avoid wiring in the MMX registers for now.  */
   3199   tdep->num_mmx_regs = 0;
   3200 
   3201   set_gdbarch_pseudo_register_read_value (gdbarch,
   3202 					  amd64_pseudo_register_read_value);
   3203   set_gdbarch_pseudo_register_write (gdbarch,
   3204 				     amd64_pseudo_register_write);
   3205   set_gdbarch_ax_pseudo_register_collect (gdbarch,
   3206 					  amd64_ax_pseudo_register_collect);
   3207 
   3208   set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
   3209 
   3210   /* AMD64 has an FPU and 16 SSE registers.  */
   3211   tdep->st0_regnum = AMD64_ST0_REGNUM;
   3212   tdep->num_xmm_regs = 16;
   3213 
   3214   /* This is what all the fuss is about.  */
   3215   set_gdbarch_long_bit (gdbarch, 64);
   3216   set_gdbarch_long_long_bit (gdbarch, 64);
   3217   set_gdbarch_ptr_bit (gdbarch, 64);
   3218 
   3219   /* In contrast to the i386, on AMD64 a `long double' actually takes
   3220      up 128 bits, even though it's still based on the i387 extended
   3221      floating-point format which has only 80 significant bits.  */
   3222   set_gdbarch_long_double_bit (gdbarch, 128);
   3223 
   3224   set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
   3225 
   3226   /* Register numbers of various important registers.  */
   3227   set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
   3228   set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
   3229   set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
   3230   set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
   3231 
   3232   /* The "default" register numbering scheme for AMD64 is referred to
   3233      as the "DWARF Register Number Mapping" in the System V psABI.
   3234      The preferred debugging format for all known AMD64 targets is
   3235      actually DWARF2, and GCC doesn't seem to support DWARF (that is
   3236      DWARF-1), but we provide the same mapping just in case.  This
   3237      mapping is also used for stabs, which GCC does support.  */
   3238   set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3239   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3240 
   3241   /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
   3242      be in use on any of the supported AMD64 targets.  */
   3243 
   3244   /* Call dummy code.  */
   3245   set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
   3246   set_gdbarch_frame_align (gdbarch, amd64_frame_align);
   3247   set_gdbarch_frame_red_zone_size (gdbarch, 128);
   3248 
   3249   set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
   3250   set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
   3251   set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
   3252 
   3253   set_gdbarch_return_value (gdbarch, amd64_return_value);
   3254 
   3255   set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
   3256 
   3257   tdep->record_regmap = amd64_record_regmap;
   3258 
   3259   set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
   3260 
   3261   /* Hook the function epilogue frame unwinder.  This unwinder is
   3262      appended to the list first, so that it supercedes the other
   3263      unwinders in function epilogues.  */
   3264   frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
   3265 
   3266   /* Hook the prologue-based frame unwinders.  */
   3267   frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
   3268   frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
   3269   frame_base_set_default (gdbarch, &amd64_frame_base);
   3270 
   3271   set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
   3272 
   3273   set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
   3274 
   3275   set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
   3276 
   3277   /* SystemTap variables and functions.  */
   3278   set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
   3279   set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
   3280   set_gdbarch_stap_register_indirection_prefixes (gdbarch,
   3281 					  stap_register_indirection_prefixes);
   3282   set_gdbarch_stap_register_indirection_suffixes (gdbarch,
   3283 					  stap_register_indirection_suffixes);
   3284   set_gdbarch_stap_is_single_operand (gdbarch,
   3285 				      i386_stap_is_single_operand);
   3286   set_gdbarch_stap_parse_special_token (gdbarch,
   3287 					i386_stap_parse_special_token);
   3288   set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
   3289   set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
   3290   set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
   3291 
   3292   set_gdbarch_in_indirect_branch_thunk (gdbarch,
   3293 					amd64_in_indirect_branch_thunk);
   3294 }
   3295 
   3296 /* Initialize ARCH for x86-64, no osabi.  */
   3297 
   3298 static void
   3299 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
   3300 {
   3301   amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
   3302 							true));
   3303 }
   3304 
   3305 static struct type *
   3306 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
   3307 {
   3308   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3309 
   3310   switch (regnum - tdep->eax_regnum)
   3311     {
   3312     case AMD64_RBP_REGNUM:	/* %ebp */
   3313     case AMD64_RSP_REGNUM:	/* %esp */
   3314       return builtin_type (gdbarch)->builtin_data_ptr;
   3315     case AMD64_RIP_REGNUM:	/* %eip */
   3316       return builtin_type (gdbarch)->builtin_func_ptr;
   3317     }
   3318 
   3319   return i386_pseudo_register_type (gdbarch, regnum);
   3320 }
   3321 
   3322 void
   3323 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
   3324 		    const target_desc *default_tdesc)
   3325 {
   3326   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3327 
   3328   amd64_init_abi (info, gdbarch, default_tdesc);
   3329 
   3330   tdep->num_dword_regs = 17;
   3331   set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
   3332 
   3333   set_gdbarch_long_bit (gdbarch, 32);
   3334   set_gdbarch_ptr_bit (gdbarch, 32);
   3335 }
   3336 
   3337 /* Initialize ARCH for x64-32, no osabi.  */
   3338 
   3339 static void
   3340 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
   3341 {
   3342   amd64_x32_init_abi (info, arch,
   3343 		      amd64_target_description (X86_XSTATE_SSE_MASK, true));
   3344 }
   3345 
   3346 /* Return the target description for a specified XSAVE feature mask.  */
   3347 
   3348 const struct target_desc *
   3349 amd64_target_description (uint64_t xcr0, bool segments)
   3350 {
   3351   static target_desc *amd64_tdescs \
   3352     [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
   3353   target_desc **tdesc;
   3354 
   3355   tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
   3356     [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
   3357     [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
   3358     [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
   3359     [segments ? 1 : 0];
   3360 
   3361   if (*tdesc == NULL)
   3362     *tdesc = amd64_create_target_description (xcr0, false, false,
   3363 					      segments);
   3364 
   3365   return *tdesc;
   3366 }
   3367 
   3368 void _initialize_amd64_tdep ();
   3369 void
   3370 _initialize_amd64_tdep ()
   3371 {
   3372   gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
   3373  			  amd64_none_init_abi);
   3374   gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
   3375  			  amd64_x32_none_init_abi);
   3376 }
   3377 
   3378 
   3380 /* The 64-bit FXSAVE format differs from the 32-bit format in the
   3381    sense that the instruction pointer and data pointer are simply
   3382    64-bit offsets into the code segment and the data segment instead
   3383    of a selector offset pair.  The functions below store the upper 32
   3384    bits of these pointers (instead of just the 16-bits of the segment
   3385    selector).  */
   3386 
   3387 /* Fill register REGNUM in REGCACHE with the appropriate
   3388    floating-point or SSE register value from *FXSAVE.  If REGNUM is
   3389    -1, do this for all registers.  This function masks off any of the
   3390    reserved bits in *FXSAVE.  */
   3391 
   3392 void
   3393 amd64_supply_fxsave (struct regcache *regcache, int regnum,
   3394 		     const void *fxsave)
   3395 {
   3396   struct gdbarch *gdbarch = regcache->arch ();
   3397   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3398 
   3399   i387_supply_fxsave (regcache, regnum, fxsave);
   3400 
   3401   if (fxsave
   3402       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3403     {
   3404       const gdb_byte *regs = (const gdb_byte *) fxsave;
   3405 
   3406       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3407 	regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
   3408       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3409 	regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
   3410     }
   3411 }
   3412 
   3413 /* Similar to amd64_supply_fxsave, but use XSAVE extended state.  */
   3414 
   3415 void
   3416 amd64_supply_xsave (struct regcache *regcache, int regnum,
   3417 		    const void *xsave)
   3418 {
   3419   struct gdbarch *gdbarch = regcache->arch ();
   3420   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3421 
   3422   i387_supply_xsave (regcache, regnum, xsave);
   3423 
   3424   if (xsave
   3425       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3426     {
   3427       const gdb_byte *regs = (const gdb_byte *) xsave;
   3428       ULONGEST clear_bv;
   3429 
   3430       clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
   3431 
   3432       /* If the FISEG and FOSEG registers have not been initialised yet
   3433 	 (their CLEAR_BV bit is set) then their default values of zero will
   3434 	 have already been setup by I387_SUPPLY_XSAVE.  */
   3435       if (!(clear_bv & X86_XSTATE_X87))
   3436 	{
   3437 	  if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3438 	    regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
   3439 	  if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3440 	    regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
   3441 	}
   3442     }
   3443 }
   3444 
   3445 /* Fill register REGNUM (if it is a floating-point or SSE register) in
   3446    *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
   3447    all registers.  This function doesn't touch any of the reserved
   3448    bits in *FXSAVE.  */
   3449 
   3450 void
   3451 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
   3452 		      void *fxsave)
   3453 {
   3454   struct gdbarch *gdbarch = regcache->arch ();
   3455   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3456   gdb_byte *regs = (gdb_byte *) fxsave;
   3457 
   3458   i387_collect_fxsave (regcache, regnum, fxsave);
   3459 
   3460   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3461     {
   3462       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3463 	regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
   3464       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3465 	regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
   3466     }
   3467 }
   3468 
   3469 /* Similar to amd64_collect_fxsave, but use XSAVE extended state.  */
   3470 
   3471 void
   3472 amd64_collect_xsave (const struct regcache *regcache, int regnum,
   3473 		     void *xsave, int gcore)
   3474 {
   3475   struct gdbarch *gdbarch = regcache->arch ();
   3476   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
   3477   gdb_byte *regs = (gdb_byte *) xsave;
   3478 
   3479   i387_collect_xsave (regcache, regnum, xsave, gcore);
   3480 
   3481   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3482     {
   3483       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3484 	regcache->raw_collect (I387_FISEG_REGNUM (tdep),
   3485 			      regs + 12);
   3486       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3487 	regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
   3488 			      regs + 20);
   3489     }
   3490 }
   3491