Home | History | Annotate | Line # | Download | only in gdb
amd64-tdep.c revision 1.9.2.1
      1 /* Target-dependent code for AMD64.
      2 
      3    Copyright (C) 2001-2023 Free Software Foundation, Inc.
      4 
      5    Contributed by Jiri Smid, SuSE Labs.
      6 
      7    This file is part of GDB.
      8 
      9    This program is free software; you can redistribute it and/or modify
     10    it under the terms of the GNU General Public License as published by
     11    the Free Software Foundation; either version 3 of the License, or
     12    (at your option) any later version.
     13 
     14    This program is distributed in the hope that it will be useful,
     15    but WITHOUT ANY WARRANTY; without even the implied warranty of
     16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     17    GNU General Public License for more details.
     18 
     19    You should have received a copy of the GNU General Public License
     20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
     21 
     22 #include "defs.h"
     23 #include "opcode/i386.h"
     24 #include "dis-asm.h"
     25 #include "arch-utils.h"
     26 #include "block.h"
     27 #include "dummy-frame.h"
     28 #include "frame.h"
     29 #include "frame-base.h"
     30 #include "frame-unwind.h"
     31 #include "inferior.h"
     32 #include "infrun.h"
     33 #include "gdbcmd.h"
     34 #include "gdbcore.h"
     35 #include "objfiles.h"
     36 #include "regcache.h"
     37 #include "regset.h"
     38 #include "symfile.h"
     39 #include "disasm.h"
     40 #include "amd64-tdep.h"
     41 #include "i387-tdep.h"
     42 #include "gdbsupport/x86-xstate.h"
     43 #include <algorithm>
     44 #include "target-descriptions.h"
     45 #include "arch/amd64.h"
     46 #include "producer.h"
     47 #include "ax.h"
     48 #include "ax-gdb.h"
     49 #include "gdbsupport/byte-vector.h"
     50 #include "osabi.h"
     51 #include "x86-tdep.h"
     52 #include "amd64-ravenscar-thread.h"
     53 
     54 /* Note that the AMD64 architecture was previously known as x86-64.
     55    The latter is (forever) engraved into the canonical system name as
     56    returned by config.guess, and used as the name for the AMD64 port
     57    of GNU/Linux.  The BSD's have renamed their ports to amd64; they
     58    don't like to shout.  For GDB we prefer the amd64_-prefix over the
     59    x86_64_-prefix since it's so much easier to type.  */
     60 
     61 /* Register information.  */
     62 
     63 static const char * const amd64_register_names[] =
     64 {
     65   "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
     66 
     67   /* %r8 is indeed register number 8.  */
     68   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
     69   "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
     70 
     71   /* %st0 is register number 24.  */
     72   "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
     73   "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
     74 
     75   /* %xmm0 is register number 40.  */
     76   "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
     77   "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
     78   "mxcsr",
     79 };
     80 
     81 static const char * const amd64_ymm_names[] =
     82 {
     83   "ymm0", "ymm1", "ymm2", "ymm3",
     84   "ymm4", "ymm5", "ymm6", "ymm7",
     85   "ymm8", "ymm9", "ymm10", "ymm11",
     86   "ymm12", "ymm13", "ymm14", "ymm15"
     87 };
     88 
     89 static const char * const amd64_ymm_avx512_names[] =
     90 {
     91   "ymm16", "ymm17", "ymm18", "ymm19",
     92   "ymm20", "ymm21", "ymm22", "ymm23",
     93   "ymm24", "ymm25", "ymm26", "ymm27",
     94   "ymm28", "ymm29", "ymm30", "ymm31"
     95 };
     96 
     97 static const char * const amd64_ymmh_names[] =
     98 {
     99   "ymm0h", "ymm1h", "ymm2h", "ymm3h",
    100   "ymm4h", "ymm5h", "ymm6h", "ymm7h",
    101   "ymm8h", "ymm9h", "ymm10h", "ymm11h",
    102   "ymm12h", "ymm13h", "ymm14h", "ymm15h"
    103 };
    104 
    105 static const char * const amd64_ymmh_avx512_names[] =
    106 {
    107   "ymm16h", "ymm17h", "ymm18h", "ymm19h",
    108   "ymm20h", "ymm21h", "ymm22h", "ymm23h",
    109   "ymm24h", "ymm25h", "ymm26h", "ymm27h",
    110   "ymm28h", "ymm29h", "ymm30h", "ymm31h"
    111 };
    112 
    113 static const char * const amd64_mpx_names[] =
    114 {
    115   "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
    116 };
    117 
    118 static const char * const amd64_k_names[] =
    119 {
    120   "k0", "k1", "k2", "k3",
    121   "k4", "k5", "k6", "k7"
    122 };
    123 
    124 static const char * const amd64_zmmh_names[] =
    125 {
    126   "zmm0h", "zmm1h", "zmm2h", "zmm3h",
    127   "zmm4h", "zmm5h", "zmm6h", "zmm7h",
    128   "zmm8h", "zmm9h", "zmm10h", "zmm11h",
    129   "zmm12h", "zmm13h", "zmm14h", "zmm15h",
    130   "zmm16h", "zmm17h", "zmm18h", "zmm19h",
    131   "zmm20h", "zmm21h", "zmm22h", "zmm23h",
    132   "zmm24h", "zmm25h", "zmm26h", "zmm27h",
    133   "zmm28h", "zmm29h", "zmm30h", "zmm31h"
    134 };
    135 
    136 static const char * const amd64_zmm_names[] =
    137 {
    138   "zmm0", "zmm1", "zmm2", "zmm3",
    139   "zmm4", "zmm5", "zmm6", "zmm7",
    140   "zmm8", "zmm9", "zmm10", "zmm11",
    141   "zmm12", "zmm13", "zmm14", "zmm15",
    142   "zmm16", "zmm17", "zmm18", "zmm19",
    143   "zmm20", "zmm21", "zmm22", "zmm23",
    144   "zmm24", "zmm25", "zmm26", "zmm27",
    145   "zmm28", "zmm29", "zmm30", "zmm31"
    146 };
    147 
    148 static const char * const amd64_xmm_avx512_names[] = {
    149     "xmm16",  "xmm17",  "xmm18",  "xmm19",
    150     "xmm20",  "xmm21",  "xmm22",  "xmm23",
    151     "xmm24",  "xmm25",  "xmm26",  "xmm27",
    152     "xmm28",  "xmm29",  "xmm30",  "xmm31"
    153 };
    154 
    155 static const char * const amd64_pkeys_names[] = {
    156     "pkru"
    157 };
    158 
    159 /* DWARF Register Number Mapping as defined in the System V psABI,
    160    section 3.6.  */
    161 
    162 static int amd64_dwarf_regmap[] =
    163 {
    164   /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
    165   AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
    166   AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
    167   AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
    168 
    169   /* Frame Pointer Register RBP.  */
    170   AMD64_RBP_REGNUM,
    171 
    172   /* Stack Pointer Register RSP.  */
    173   AMD64_RSP_REGNUM,
    174 
    175   /* Extended Integer Registers 8 - 15.  */
    176   AMD64_R8_REGNUM,		/* %r8 */
    177   AMD64_R9_REGNUM,		/* %r9 */
    178   AMD64_R10_REGNUM,		/* %r10 */
    179   AMD64_R11_REGNUM,		/* %r11 */
    180   AMD64_R12_REGNUM,		/* %r12 */
    181   AMD64_R13_REGNUM,		/* %r13 */
    182   AMD64_R14_REGNUM,		/* %r14 */
    183   AMD64_R15_REGNUM,		/* %r15 */
    184 
    185   /* Return Address RA.  Mapped to RIP.  */
    186   AMD64_RIP_REGNUM,
    187 
    188   /* SSE Registers 0 - 7.  */
    189   AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    190   AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    191   AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    192   AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    193 
    194   /* Extended SSE Registers 8 - 15.  */
    195   AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
    196   AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
    197   AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
    198   AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
    199 
    200   /* Floating Point Registers 0-7.  */
    201   AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
    202   AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
    203   AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
    204   AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
    205 
    206   /* MMX Registers 0 - 7.
    207      We have to handle those registers specifically, as their register
    208      number within GDB depends on the target (or they may even not be
    209      available at all).  */
    210   -1, -1, -1, -1, -1, -1, -1, -1,
    211 
    212   /* Control and Status Flags Register.  */
    213   AMD64_EFLAGS_REGNUM,
    214 
    215   /* Selector Registers.  */
    216   AMD64_ES_REGNUM,
    217   AMD64_CS_REGNUM,
    218   AMD64_SS_REGNUM,
    219   AMD64_DS_REGNUM,
    220   AMD64_FS_REGNUM,
    221   AMD64_GS_REGNUM,
    222   -1,
    223   -1,
    224 
    225   /* Segment Base Address Registers.  */
    226   -1,
    227   -1,
    228   -1,
    229   -1,
    230 
    231   /* Special Selector Registers.  */
    232   -1,
    233   -1,
    234 
    235   /* Floating Point Control Registers.  */
    236   AMD64_MXCSR_REGNUM,
    237   AMD64_FCTRL_REGNUM,
    238   AMD64_FSTAT_REGNUM
    239 };
    240 
    241 static const int amd64_dwarf_regmap_len =
    242   (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
    243 
    244 /* Convert DWARF register number REG to the appropriate register
    245    number used by GDB.  */
    246 
    247 static int
    248 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
    249 {
    250   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
    251   int ymm0_regnum = tdep->ymm0_regnum;
    252   int regnum = -1;
    253 
    254   if (reg >= 0 && reg < amd64_dwarf_regmap_len)
    255     regnum = amd64_dwarf_regmap[reg];
    256 
    257   if (ymm0_regnum >= 0
    258 	   && i386_xmm_regnum_p (gdbarch, regnum))
    259     regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
    260 
    261   return regnum;
    262 }
    263 
    264 /* Map architectural register numbers to gdb register numbers.  */
    265 
    266 static const int amd64_arch_regmap[16] =
    267 {
    268   AMD64_RAX_REGNUM,	/* %rax */
    269   AMD64_RCX_REGNUM,	/* %rcx */
    270   AMD64_RDX_REGNUM,	/* %rdx */
    271   AMD64_RBX_REGNUM,	/* %rbx */
    272   AMD64_RSP_REGNUM,	/* %rsp */
    273   AMD64_RBP_REGNUM,	/* %rbp */
    274   AMD64_RSI_REGNUM,	/* %rsi */
    275   AMD64_RDI_REGNUM,	/* %rdi */
    276   AMD64_R8_REGNUM,	/* %r8 */
    277   AMD64_R9_REGNUM,	/* %r9 */
    278   AMD64_R10_REGNUM,	/* %r10 */
    279   AMD64_R11_REGNUM,	/* %r11 */
    280   AMD64_R12_REGNUM,	/* %r12 */
    281   AMD64_R13_REGNUM,	/* %r13 */
    282   AMD64_R14_REGNUM,	/* %r14 */
    283   AMD64_R15_REGNUM	/* %r15 */
    284 };
    285 
    286 static const int amd64_arch_regmap_len =
    287   (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
    288 
    289 /* Convert architectural register number REG to the appropriate register
    290    number used by GDB.  */
    291 
    292 static int
    293 amd64_arch_reg_to_regnum (int reg)
    294 {
    295   gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
    296 
    297   return amd64_arch_regmap[reg];
    298 }
    299 
    300 /* Register names for byte pseudo-registers.  */
    301 
    302 static const char * const amd64_byte_names[] =
    303 {
    304   "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
    305   "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
    306   "ah", "bh", "ch", "dh"
    307 };
    308 
    309 /* Number of lower byte registers.  */
    310 #define AMD64_NUM_LOWER_BYTE_REGS 16
    311 
    312 /* Register names for word pseudo-registers.  */
    313 
    314 static const char * const amd64_word_names[] =
    315 {
    316   "ax", "bx", "cx", "dx", "si", "di", "bp", "",
    317   "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
    318 };
    319 
    320 /* Register names for dword pseudo-registers.  */
    321 
    322 static const char * const amd64_dword_names[] =
    323 {
    324   "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
    325   "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
    326   "eip"
    327 };
    328 
    329 /* Return the name of register REGNUM.  */
    330 
    331 static const char *
    332 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
    333 {
    334   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
    335   if (i386_byte_regnum_p (gdbarch, regnum))
    336     return amd64_byte_names[regnum - tdep->al_regnum];
    337   else if (i386_zmm_regnum_p (gdbarch, regnum))
    338     return amd64_zmm_names[regnum - tdep->zmm0_regnum];
    339   else if (i386_ymm_regnum_p (gdbarch, regnum))
    340     return amd64_ymm_names[regnum - tdep->ymm0_regnum];
    341   else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
    342     return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
    343   else if (i386_word_regnum_p (gdbarch, regnum))
    344     return amd64_word_names[regnum - tdep->ax_regnum];
    345   else if (i386_dword_regnum_p (gdbarch, regnum))
    346     return amd64_dword_names[regnum - tdep->eax_regnum];
    347   else
    348     return i386_pseudo_register_name (gdbarch, regnum);
    349 }
    350 
    351 static struct value *
    352 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
    353 				  readable_regcache *regcache,
    354 				  int regnum)
    355 {
    356   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
    357 
    358   value *result_value = allocate_value (register_type (gdbarch, regnum));
    359   VALUE_LVAL (result_value) = lval_register;
    360   VALUE_REGNUM (result_value) = regnum;
    361   gdb_byte *buf = value_contents_raw (result_value).data ();
    362 
    363   if (i386_byte_regnum_p (gdbarch, regnum))
    364     {
    365       int gpnum = regnum - tdep->al_regnum;
    366 
    367       /* Extract (always little endian).  */
    368       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    369 	{
    370 	  gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
    371 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    372 
    373 	  /* Special handling for AH, BH, CH, DH.  */
    374 	  register_status status = regcache->raw_read (gpnum, raw_buf);
    375 	  if (status == REG_VALID)
    376 	    memcpy (buf, raw_buf + 1, 1);
    377 	  else
    378 	    mark_value_bytes_unavailable (result_value, 0,
    379 					  value_type (result_value)->length ());
    380 	}
    381       else
    382 	{
    383 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    384 	  register_status status = regcache->raw_read (gpnum, raw_buf);
    385 	  if (status == REG_VALID)
    386 	    memcpy (buf, raw_buf, 1);
    387 	  else
    388 	    mark_value_bytes_unavailable (result_value, 0,
    389 					  value_type (result_value)->length ());
    390 	}
    391     }
    392   else if (i386_dword_regnum_p (gdbarch, regnum))
    393     {
    394       int gpnum = regnum - tdep->eax_regnum;
    395       gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    396       /* Extract (always little endian).  */
    397       register_status status = regcache->raw_read (gpnum, raw_buf);
    398       if (status == REG_VALID)
    399 	memcpy (buf, raw_buf, 4);
    400       else
    401 	mark_value_bytes_unavailable (result_value, 0,
    402 				      value_type (result_value)->length ());
    403     }
    404   else
    405     i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
    406 					  result_value);
    407 
    408   return result_value;
    409 }
    410 
    411 static void
    412 amd64_pseudo_register_write (struct gdbarch *gdbarch,
    413 			     struct regcache *regcache,
    414 			     int regnum, const gdb_byte *buf)
    415 {
    416   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
    417 
    418   if (i386_byte_regnum_p (gdbarch, regnum))
    419     {
    420       int gpnum = regnum - tdep->al_regnum;
    421 
    422       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    423 	{
    424 	  gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
    425 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    426 
    427 	  /* Read ... AH, BH, CH, DH.  */
    428 	  regcache->raw_read (gpnum, raw_buf);
    429 	  /* ... Modify ... (always little endian).  */
    430 	  memcpy (raw_buf + 1, buf, 1);
    431 	  /* ... Write.  */
    432 	  regcache->raw_write (gpnum, raw_buf);
    433 	}
    434       else
    435 	{
    436 	  gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    437 
    438 	  /* Read ...  */
    439 	  regcache->raw_read (gpnum, raw_buf);
    440 	  /* ... Modify ... (always little endian).  */
    441 	  memcpy (raw_buf, buf, 1);
    442 	  /* ... Write.  */
    443 	  regcache->raw_write (gpnum, raw_buf);
    444 	}
    445     }
    446   else if (i386_dword_regnum_p (gdbarch, regnum))
    447     {
    448       int gpnum = regnum - tdep->eax_regnum;
    449       gdb_byte raw_buf[register_size (gdbarch, gpnum)];
    450 
    451       /* Read ...  */
    452       regcache->raw_read (gpnum, raw_buf);
    453       /* ... Modify ... (always little endian).  */
    454       memcpy (raw_buf, buf, 4);
    455       /* ... Write.  */
    456       regcache->raw_write (gpnum, raw_buf);
    457     }
    458   else
    459     i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
    460 }
    461 
    462 /* Implement the 'ax_pseudo_register_collect' gdbarch method.  */
    463 
    464 static int
    465 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
    466 				  struct agent_expr *ax, int regnum)
    467 {
    468   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
    469 
    470   if (i386_byte_regnum_p (gdbarch, regnum))
    471     {
    472       int gpnum = regnum - tdep->al_regnum;
    473 
    474       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
    475 	ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
    476       else
    477 	ax_reg_mask (ax, gpnum);
    478       return 0;
    479     }
    480   else if (i386_dword_regnum_p (gdbarch, regnum))
    481     {
    482       int gpnum = regnum - tdep->eax_regnum;
    483 
    484       ax_reg_mask (ax, gpnum);
    485       return 0;
    486     }
    487   else
    488     return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
    489 }
    490 
    491 
    492 
    494 /* Register classes as defined in the psABI.  */
    495 
    496 enum amd64_reg_class
    497 {
    498   AMD64_INTEGER,
    499   AMD64_SSE,
    500   AMD64_SSEUP,
    501   AMD64_X87,
    502   AMD64_X87UP,
    503   AMD64_COMPLEX_X87,
    504   AMD64_NO_CLASS,
    505   AMD64_MEMORY
    506 };
    507 
    508 /* Return the union class of CLASS1 and CLASS2.  See the psABI for
    509    details.  */
    510 
    511 static enum amd64_reg_class
    512 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
    513 {
    514   /* Rule (a): If both classes are equal, this is the resulting class.  */
    515   if (class1 == class2)
    516     return class1;
    517 
    518   /* Rule (b): If one of the classes is NO_CLASS, the resulting class
    519      is the other class.  */
    520   if (class1 == AMD64_NO_CLASS)
    521     return class2;
    522   if (class2 == AMD64_NO_CLASS)
    523     return class1;
    524 
    525   /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
    526   if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
    527     return AMD64_MEMORY;
    528 
    529   /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
    530   if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
    531     return AMD64_INTEGER;
    532 
    533   /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
    534      MEMORY is used as class.  */
    535   if (class1 == AMD64_X87 || class1 == AMD64_X87UP
    536       || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
    537       || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
    538     return AMD64_MEMORY;
    539 
    540   /* Rule (f): Otherwise class SSE is used.  */
    541   return AMD64_SSE;
    542 }
    543 
    544 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
    545 
    546 /* Return true if TYPE is a structure or union with unaligned fields.  */
    547 
    548 static bool
    549 amd64_has_unaligned_fields (struct type *type)
    550 {
    551   if (type->code () == TYPE_CODE_STRUCT
    552       || type->code () == TYPE_CODE_UNION)
    553     {
    554       for (int i = 0; i < type->num_fields (); i++)
    555 	{
    556 	  struct type *subtype = check_typedef (type->field (i).type ());
    557 
    558 	  /* Ignore static fields, empty fields (for example nested
    559 	     empty structures), and bitfields (these are handled by
    560 	     the caller).  */
    561 	  if (field_is_static (&type->field (i))
    562 	      || (TYPE_FIELD_BITSIZE (type, i) == 0
    563 		  && subtype->length () == 0)
    564 	      || TYPE_FIELD_PACKED (type, i))
    565 	    continue;
    566 
    567 	  int bitpos = type->field (i).loc_bitpos ();
    568 
    569 	  if (bitpos % 8 != 0)
    570 	    return true;
    571 
    572 	  int align = type_align (subtype);
    573 	  if (align == 0)
    574 	    error (_("could not determine alignment of type"));
    575 
    576 	  int bytepos = bitpos / 8;
    577 	  if (bytepos % align != 0)
    578 	    return true;
    579 
    580 	  if (amd64_has_unaligned_fields (subtype))
    581 	    return true;
    582 	}
    583     }
    584 
    585   return false;
    586 }
    587 
    588 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
    589    structures and union types, and store the result in THECLASS.  */
    590 
    591 static void
    592 amd64_classify_aggregate_field (struct type *type, int i,
    593 				enum amd64_reg_class theclass[2],
    594 				unsigned int bitoffset)
    595 {
    596   struct type *subtype = check_typedef (type->field (i).type ());
    597   enum amd64_reg_class subclass[2];
    598   int bitsize = TYPE_FIELD_BITSIZE (type, i);
    599 
    600   if (bitsize == 0)
    601     bitsize = subtype->length () * 8;
    602 
    603   /* Ignore static fields, or empty fields, for example nested
    604      empty structures.*/
    605   if (field_is_static (&type->field (i)) || bitsize == 0)
    606     return;
    607 
    608   int bitpos = bitoffset + type->field (i).loc_bitpos ();
    609   int pos = bitpos / 64;
    610   int endpos = (bitpos + bitsize - 1) / 64;
    611 
    612   if (subtype->code () == TYPE_CODE_STRUCT
    613       || subtype->code () == TYPE_CODE_UNION)
    614     {
    615       /* Each field of an object is classified recursively.  */
    616       int j;
    617       for (j = 0; j < subtype->num_fields (); j++)
    618 	amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
    619       return;
    620     }
    621 
    622   gdb_assert (pos == 0 || pos == 1);
    623 
    624   amd64_classify (subtype, subclass);
    625   theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
    626   if (bitsize <= 64 && pos == 0 && endpos == 1)
    627     /* This is a bit of an odd case:  We have a field that would
    628        normally fit in one of the two eightbytes, except that
    629        it is placed in a way that this field straddles them.
    630        This has been seen with a structure containing an array.
    631 
    632        The ABI is a bit unclear in this case, but we assume that
    633        this field's class (stored in subclass[0]) must also be merged
    634        into class[1].  In other words, our field has a piece stored
    635        in the second eight-byte, and thus its class applies to
    636        the second eight-byte as well.
    637 
    638        In the case where the field length exceeds 8 bytes,
    639        it should not be necessary to merge the field class
    640        into class[1].  As LEN > 8, subclass[1] is necessarily
    641        different from AMD64_NO_CLASS.  If subclass[1] is equal
    642        to subclass[0], then the normal class[1]/subclass[1]
    643        merging will take care of everything.  For subclass[1]
    644        to be different from subclass[0], I can only see the case
    645        where we have a SSE/SSEUP or X87/X87UP pair, which both
    646        use up all 16 bytes of the aggregate, and are already
    647        handled just fine (because each portion sits on its own
    648        8-byte).  */
    649     theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
    650   if (pos == 0)
    651     theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
    652 }
    653 
    654 /* Classify TYPE according to the rules for aggregate (structures and
    655    arrays) and union types, and store the result in CLASS.  */
    656 
    657 static void
    658 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
    659 {
    660   /* 1. If the size of an object is larger than two times eight bytes, or
    661 	it is a non-trivial C++ object, or it has unaligned fields, then it
    662 	has class memory.
    663 
    664 	It is important that the trivially_copyable check is before the
    665 	unaligned fields check, as C++ classes with virtual base classes
    666 	will have fields (for the virtual base classes) with non-constant
    667 	loc_bitpos attributes, which will cause an assert to trigger within
    668 	the unaligned field check.  As classes with virtual bases are not
    669 	trivially copyable, checking that first avoids this problem.  */
    670   if (type->length () > 16
    671       || !language_pass_by_reference (type).trivially_copyable
    672       || amd64_has_unaligned_fields (type))
    673     {
    674       theclass[0] = theclass[1] = AMD64_MEMORY;
    675       return;
    676     }
    677 
    678   /* 2. Both eightbytes get initialized to class NO_CLASS.  */
    679   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    680 
    681   /* 3. Each field of an object is classified recursively so that
    682 	always two fields are considered. The resulting class is
    683 	calculated according to the classes of the fields in the
    684 	eightbyte: */
    685 
    686   if (type->code () == TYPE_CODE_ARRAY)
    687     {
    688       struct type *subtype = check_typedef (type->target_type ());
    689 
    690       /* All fields in an array have the same type.  */
    691       amd64_classify (subtype, theclass);
    692       if (type->length () > 8 && theclass[1] == AMD64_NO_CLASS)
    693 	theclass[1] = theclass[0];
    694     }
    695   else
    696     {
    697       int i;
    698 
    699       /* Structure or union.  */
    700       gdb_assert (type->code () == TYPE_CODE_STRUCT
    701 		  || type->code () == TYPE_CODE_UNION);
    702 
    703       for (i = 0; i < type->num_fields (); i++)
    704 	amd64_classify_aggregate_field (type, i, theclass, 0);
    705     }
    706 
    707   /* 4. Then a post merger cleanup is done:  */
    708 
    709   /* Rule (a): If one of the classes is MEMORY, the whole argument is
    710      passed in memory.  */
    711   if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
    712     theclass[0] = theclass[1] = AMD64_MEMORY;
    713 
    714   /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
    715      SSE.  */
    716   if (theclass[0] == AMD64_SSEUP)
    717     theclass[0] = AMD64_SSE;
    718   if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
    719     theclass[1] = AMD64_SSE;
    720 }
    721 
    722 /* Classify TYPE, and store the result in CLASS.  */
    723 
    724 static void
    725 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
    726 {
    727   enum type_code code = type->code ();
    728   int len = type->length ();
    729 
    730   theclass[0] = theclass[1] = AMD64_NO_CLASS;
    731 
    732   /* Arguments of types (signed and unsigned) _Bool, char, short, int,
    733      long, long long, and pointers are in the INTEGER class.  Similarly,
    734      range types, used by languages such as Ada, are also in the INTEGER
    735      class.  */
    736   if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
    737        || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
    738        || code == TYPE_CODE_CHAR
    739        || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
    740       && (len == 1 || len == 2 || len == 4 || len == 8))
    741     theclass[0] = AMD64_INTEGER;
    742 
    743   /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
    744      __m64 are in class SSE.  */
    745   else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
    746 	   && (len == 2 || len == 4 || len == 8))
    747     /* FIXME: __m64 .  */
    748     theclass[0] = AMD64_SSE;
    749 
    750   /* Arguments of types __float128, _Decimal128 and __m128 are split into
    751      two halves.  The least significant ones belong to class SSE, the most
    752      significant one to class SSEUP.  */
    753   else if (code == TYPE_CODE_DECFLOAT && len == 16)
    754     /* FIXME: __float128, __m128.  */
    755     theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
    756 
    757   /* The 64-bit mantissa of arguments of type long double belongs to
    758      class X87, the 16-bit exponent plus 6 bytes of padding belongs to
    759      class X87UP.  */
    760   else if (code == TYPE_CODE_FLT && len == 16)
    761     /* Class X87 and X87UP.  */
    762     theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
    763 
    764   /* Arguments of complex T - where T is one of the types _Float16, float or
    765      double - get treated as if they are implemented as:
    766 
    767      struct complexT {
    768        T real;
    769        T imag;
    770      };
    771 
    772   */
    773   else if (code == TYPE_CODE_COMPLEX && (len == 8 || len == 4))
    774     theclass[0] = AMD64_SSE;
    775   else if (code == TYPE_CODE_COMPLEX && len == 16)
    776     theclass[0] = theclass[1] = AMD64_SSE;
    777 
    778   /* A variable of type complex long double is classified as type
    779      COMPLEX_X87.  */
    780   else if (code == TYPE_CODE_COMPLEX && len == 32)
    781     theclass[0] = AMD64_COMPLEX_X87;
    782 
    783   /* Aggregates.  */
    784   else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
    785 	   || code == TYPE_CODE_UNION)
    786     amd64_classify_aggregate (type, theclass);
    787 }
    788 
    789 static enum return_value_convention
    790 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
    791 		    struct type *type, struct regcache *regcache,
    792 		    gdb_byte *readbuf, const gdb_byte *writebuf)
    793 {
    794   enum amd64_reg_class theclass[2];
    795   int len = type->length ();
    796   static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
    797   static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
    798   int integer_reg = 0;
    799   int sse_reg = 0;
    800   int i;
    801 
    802   gdb_assert (!(readbuf && writebuf));
    803 
    804   /* 1. Classify the return type with the classification algorithm.  */
    805   amd64_classify (type, theclass);
    806 
    807   /* 2. If the type has class MEMORY, then the caller provides space
    808      for the return value and passes the address of this storage in
    809      %rdi as if it were the first argument to the function.  In effect,
    810      this address becomes a hidden first argument.
    811 
    812      On return %rax will contain the address that has been passed in
    813      by the caller in %rdi.  */
    814   if (theclass[0] == AMD64_MEMORY)
    815     {
    816       /* As indicated by the comment above, the ABI guarantees that we
    817 	 can always find the return value just after the function has
    818 	 returned.  */
    819 
    820       if (readbuf)
    821 	{
    822 	  ULONGEST addr;
    823 
    824 	  regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
    825 	  read_memory (addr, readbuf, type->length ());
    826 	}
    827 
    828       return RETURN_VALUE_ABI_RETURNS_ADDRESS;
    829     }
    830 
    831   /* 8. If the class is COMPLEX_X87, the real part of the value is
    832 	returned in %st0 and the imaginary part in %st1.  */
    833   if (theclass[0] == AMD64_COMPLEX_X87)
    834     {
    835       if (readbuf)
    836 	{
    837 	  regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
    838 	  regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
    839 	}
    840 
    841       if (writebuf)
    842 	{
    843 	  i387_return_value (gdbarch, regcache);
    844 	  regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
    845 	  regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
    846 
    847 	  /* Fix up the tag word such that both %st(0) and %st(1) are
    848 	     marked as valid.  */
    849 	  regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
    850 	}
    851 
    852       return RETURN_VALUE_REGISTER_CONVENTION;
    853     }
    854 
    855   gdb_assert (theclass[1] != AMD64_MEMORY);
    856   gdb_assert (len <= 16);
    857 
    858   for (i = 0; len > 0; i++, len -= 8)
    859     {
    860       int regnum = -1;
    861       int offset = 0;
    862 
    863       switch (theclass[i])
    864 	{
    865 	case AMD64_INTEGER:
    866 	  /* 3. If the class is INTEGER, the next available register
    867 	     of the sequence %rax, %rdx is used.  */
    868 	  regnum = integer_regnum[integer_reg++];
    869 	  break;
    870 
    871 	case AMD64_SSE:
    872 	  /* 4. If the class is SSE, the next available SSE register
    873 	     of the sequence %xmm0, %xmm1 is used.  */
    874 	  regnum = sse_regnum[sse_reg++];
    875 	  break;
    876 
    877 	case AMD64_SSEUP:
    878 	  /* 5. If the class is SSEUP, the eightbyte is passed in the
    879 	     upper half of the last used SSE register.  */
    880 	  gdb_assert (sse_reg > 0);
    881 	  regnum = sse_regnum[sse_reg - 1];
    882 	  offset = 8;
    883 	  break;
    884 
    885 	case AMD64_X87:
    886 	  /* 6. If the class is X87, the value is returned on the X87
    887 	     stack in %st0 as 80-bit x87 number.  */
    888 	  regnum = AMD64_ST0_REGNUM;
    889 	  if (writebuf)
    890 	    i387_return_value (gdbarch, regcache);
    891 	  break;
    892 
    893 	case AMD64_X87UP:
    894 	  /* 7. If the class is X87UP, the value is returned together
    895 	     with the previous X87 value in %st0.  */
    896 	  gdb_assert (i > 0 && theclass[0] == AMD64_X87);
    897 	  regnum = AMD64_ST0_REGNUM;
    898 	  offset = 8;
    899 	  len = 2;
    900 	  break;
    901 
    902 	case AMD64_NO_CLASS:
    903 	  continue;
    904 
    905 	default:
    906 	  gdb_assert (!"Unexpected register class.");
    907 	}
    908 
    909       gdb_assert (regnum != -1);
    910 
    911       if (readbuf)
    912 	regcache->raw_read_part (regnum, offset, std::min (len, 8),
    913 				 readbuf + i * 8);
    914       if (writebuf)
    915 	regcache->raw_write_part (regnum, offset, std::min (len, 8),
    916 				  writebuf + i * 8);
    917     }
    918 
    919   return RETURN_VALUE_REGISTER_CONVENTION;
    920 }
    921 
    922 
    924 static CORE_ADDR
    925 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
    926 		      CORE_ADDR sp, function_call_return_method return_method)
    927 {
    928   static int integer_regnum[] =
    929   {
    930     AMD64_RDI_REGNUM,		/* %rdi */
    931     AMD64_RSI_REGNUM,		/* %rsi */
    932     AMD64_RDX_REGNUM,		/* %rdx */
    933     AMD64_RCX_REGNUM,		/* %rcx */
    934     AMD64_R8_REGNUM,		/* %r8 */
    935     AMD64_R9_REGNUM		/* %r9 */
    936   };
    937   static int sse_regnum[] =
    938   {
    939     /* %xmm0 ... %xmm7 */
    940     AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
    941     AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
    942     AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
    943     AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
    944   };
    945   struct value **stack_args = XALLOCAVEC (struct value *, nargs);
    946   int num_stack_args = 0;
    947   int num_elements = 0;
    948   int element = 0;
    949   int integer_reg = 0;
    950   int sse_reg = 0;
    951   int i;
    952 
    953   /* Reserve a register for the "hidden" argument.  */
    954 if (return_method == return_method_struct)
    955     integer_reg++;
    956 
    957   for (i = 0; i < nargs; i++)
    958     {
    959       struct type *type = value_type (args[i]);
    960       int len = type->length ();
    961       enum amd64_reg_class theclass[2];
    962       int needed_integer_regs = 0;
    963       int needed_sse_regs = 0;
    964       int j;
    965 
    966       /* Classify argument.  */
    967       amd64_classify (type, theclass);
    968 
    969       /* Calculate the number of integer and SSE registers needed for
    970 	 this argument.  */
    971       for (j = 0; j < 2; j++)
    972 	{
    973 	  if (theclass[j] == AMD64_INTEGER)
    974 	    needed_integer_regs++;
    975 	  else if (theclass[j] == AMD64_SSE)
    976 	    needed_sse_regs++;
    977 	}
    978 
    979       /* Check whether enough registers are available, and if the
    980 	 argument should be passed in registers at all.  */
    981       if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
    982 	  || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
    983 	  || (needed_integer_regs == 0 && needed_sse_regs == 0))
    984 	{
    985 	  /* The argument will be passed on the stack.  */
    986 	  num_elements += ((len + 7) / 8);
    987 	  stack_args[num_stack_args++] = args[i];
    988 	}
    989       else
    990 	{
    991 	  /* The argument will be passed in registers.  */
    992 	  const gdb_byte *valbuf = value_contents (args[i]).data ();
    993 	  gdb_byte buf[8];
    994 
    995 	  gdb_assert (len <= 16);
    996 
    997 	  for (j = 0; len > 0; j++, len -= 8)
    998 	    {
    999 	      int regnum = -1;
   1000 	      int offset = 0;
   1001 
   1002 	      switch (theclass[j])
   1003 		{
   1004 		case AMD64_INTEGER:
   1005 		  regnum = integer_regnum[integer_reg++];
   1006 		  break;
   1007 
   1008 		case AMD64_SSE:
   1009 		  regnum = sse_regnum[sse_reg++];
   1010 		  break;
   1011 
   1012 		case AMD64_SSEUP:
   1013 		  gdb_assert (sse_reg > 0);
   1014 		  regnum = sse_regnum[sse_reg - 1];
   1015 		  offset = 8;
   1016 		  break;
   1017 
   1018 		case AMD64_NO_CLASS:
   1019 		  continue;
   1020 
   1021 		default:
   1022 		  gdb_assert (!"Unexpected register class.");
   1023 		}
   1024 
   1025 	      gdb_assert (regnum != -1);
   1026 	      memset (buf, 0, sizeof buf);
   1027 	      memcpy (buf, valbuf + j * 8, std::min (len, 8));
   1028 	      regcache->raw_write_part (regnum, offset, 8, buf);
   1029 	    }
   1030 	}
   1031     }
   1032 
   1033   /* Allocate space for the arguments on the stack.  */
   1034   sp -= num_elements * 8;
   1035 
   1036   /* The psABI says that "The end of the input argument area shall be
   1037      aligned on a 16 byte boundary."  */
   1038   sp &= ~0xf;
   1039 
   1040   /* Write out the arguments to the stack.  */
   1041   for (i = 0; i < num_stack_args; i++)
   1042     {
   1043       struct type *type = value_type (stack_args[i]);
   1044       const gdb_byte *valbuf = value_contents (stack_args[i]).data ();
   1045       int len = type->length ();
   1046 
   1047       write_memory (sp + element * 8, valbuf, len);
   1048       element += ((len + 7) / 8);
   1049     }
   1050 
   1051   /* The psABI says that "For calls that may call functions that use
   1052      varargs or stdargs (prototype-less calls or calls to functions
   1053      containing ellipsis (...) in the declaration) %al is used as
   1054      hidden argument to specify the number of SSE registers used.  */
   1055   regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
   1056   return sp;
   1057 }
   1058 
   1059 static CORE_ADDR
   1060 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
   1061 		       struct regcache *regcache, CORE_ADDR bp_addr,
   1062 		       int nargs, struct value **args,	CORE_ADDR sp,
   1063 		       function_call_return_method return_method,
   1064 		       CORE_ADDR struct_addr)
   1065 {
   1066   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1067   gdb_byte buf[8];
   1068 
   1069   /* BND registers can be in arbitrary values at the moment of the
   1070      inferior call.  This can cause boundary violations that are not
   1071      due to a real bug or even desired by the user.  The best to be done
   1072      is set the BND registers to allow access to the whole memory, INIT
   1073      state, before pushing the inferior call.   */
   1074   i387_reset_bnd_regs (gdbarch, regcache);
   1075 
   1076   /* Pass arguments.  */
   1077   sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
   1078 
   1079   /* Pass "hidden" argument".  */
   1080   if (return_method == return_method_struct)
   1081     {
   1082       store_unsigned_integer (buf, 8, byte_order, struct_addr);
   1083       regcache->cooked_write (AMD64_RDI_REGNUM, buf);
   1084     }
   1085 
   1086   /* Store return address.  */
   1087   sp -= 8;
   1088   store_unsigned_integer (buf, 8, byte_order, bp_addr);
   1089   write_memory (sp, buf, 8);
   1090 
   1091   /* Finally, update the stack pointer...  */
   1092   store_unsigned_integer (buf, 8, byte_order, sp);
   1093   regcache->cooked_write (AMD64_RSP_REGNUM, buf);
   1094 
   1095   /* ...and fake a frame pointer.  */
   1096   regcache->cooked_write (AMD64_RBP_REGNUM, buf);
   1097 
   1098   return sp + 16;
   1099 }
   1100 
   1101 /* Displaced instruction handling.  */
   1103 
   1104 /* A partially decoded instruction.
   1105    This contains enough details for displaced stepping purposes.  */
   1106 
   1107 struct amd64_insn
   1108 {
   1109   /* The number of opcode bytes.  */
   1110   int opcode_len;
   1111   /* The offset of the REX/VEX instruction encoding prefix or -1 if
   1112      not present.  */
   1113   int enc_prefix_offset;
   1114   /* The offset to the first opcode byte.  */
   1115   int opcode_offset;
   1116   /* The offset to the modrm byte or -1 if not present.  */
   1117   int modrm_offset;
   1118 
   1119   /* The raw instruction.  */
   1120   gdb_byte *raw_insn;
   1121 };
   1122 
   1123 struct amd64_displaced_step_copy_insn_closure
   1124   : public displaced_step_copy_insn_closure
   1125 {
   1126   amd64_displaced_step_copy_insn_closure (int insn_buf_len)
   1127   : insn_buf (insn_buf_len, 0)
   1128   {}
   1129 
   1130   /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
   1131   int tmp_used = 0;
   1132   int tmp_regno;
   1133   ULONGEST tmp_save;
   1134 
   1135   /* Details of the instruction.  */
   1136   struct amd64_insn insn_details;
   1137 
   1138   /* The possibly modified insn.  */
   1139   gdb::byte_vector insn_buf;
   1140 };
   1141 
   1142 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
   1143    ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
   1144    at which point delete these in favor of libopcodes' versions).  */
   1145 
   1146 static const unsigned char onebyte_has_modrm[256] = {
   1147   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1148   /*	   -------------------------------	  */
   1149   /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
   1150   /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
   1151   /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
   1152   /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
   1153   /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
   1154   /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
   1155   /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
   1156   /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
   1157   /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
   1158   /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
   1159   /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
   1160   /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
   1161   /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
   1162   /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
   1163   /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
   1164   /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
   1165   /*	   -------------------------------	  */
   1166   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1167 };
   1168 
   1169 static const unsigned char twobyte_has_modrm[256] = {
   1170   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1171   /*	   -------------------------------	  */
   1172   /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
   1173   /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
   1174   /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
   1175   /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
   1176   /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
   1177   /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
   1178   /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
   1179   /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
   1180   /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
   1181   /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
   1182   /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
   1183   /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
   1184   /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
   1185   /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
   1186   /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
   1187   /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
   1188   /*	   -------------------------------	  */
   1189   /*	   0 1 2 3 4 5 6 7 8 9 a b c d e f	  */
   1190 };
   1191 
   1192 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
   1193 
   1194 static int
   1195 rex_prefix_p (gdb_byte pfx)
   1196 {
   1197   return REX_PREFIX_P (pfx);
   1198 }
   1199 
   1200 /* True if PFX is the start of the 2-byte VEX prefix.  */
   1201 
   1202 static bool
   1203 vex2_prefix_p (gdb_byte pfx)
   1204 {
   1205   return pfx == 0xc5;
   1206 }
   1207 
   1208 /* True if PFX is the start of the 3-byte VEX prefix.  */
   1209 
   1210 static bool
   1211 vex3_prefix_p (gdb_byte pfx)
   1212 {
   1213   return pfx == 0xc4;
   1214 }
   1215 
   1216 /* Skip the legacy instruction prefixes in INSN.
   1217    We assume INSN is properly sentineled so we don't have to worry
   1218    about falling off the end of the buffer.  */
   1219 
   1220 static gdb_byte *
   1221 amd64_skip_prefixes (gdb_byte *insn)
   1222 {
   1223   while (1)
   1224     {
   1225       switch (*insn)
   1226 	{
   1227 	case DATA_PREFIX_OPCODE:
   1228 	case ADDR_PREFIX_OPCODE:
   1229 	case CS_PREFIX_OPCODE:
   1230 	case DS_PREFIX_OPCODE:
   1231 	case ES_PREFIX_OPCODE:
   1232 	case FS_PREFIX_OPCODE:
   1233 	case GS_PREFIX_OPCODE:
   1234 	case SS_PREFIX_OPCODE:
   1235 	case LOCK_PREFIX_OPCODE:
   1236 	case REPE_PREFIX_OPCODE:
   1237 	case REPNE_PREFIX_OPCODE:
   1238 	  ++insn;
   1239 	  continue;
   1240 	default:
   1241 	  break;
   1242 	}
   1243       break;
   1244     }
   1245 
   1246   return insn;
   1247 }
   1248 
   1249 /* Return an integer register (other than RSP) that is unused as an input
   1250    operand in INSN.
   1251    In order to not require adding a rex prefix if the insn doesn't already
   1252    have one, the result is restricted to RAX ... RDI, sans RSP.
   1253    The register numbering of the result follows architecture ordering,
   1254    e.g. RDI = 7.  */
   1255 
   1256 static int
   1257 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
   1258 {
   1259   /* 1 bit for each reg */
   1260   int used_regs_mask = 0;
   1261 
   1262   /* There can be at most 3 int regs used as inputs in an insn, and we have
   1263      7 to choose from (RAX ... RDI, sans RSP).
   1264      This allows us to take a conservative approach and keep things simple.
   1265      E.g. By avoiding RAX, we don't have to specifically watch for opcodes
   1266      that implicitly specify RAX.  */
   1267 
   1268   /* Avoid RAX.  */
   1269   used_regs_mask |= 1 << EAX_REG_NUM;
   1270   /* Similarily avoid RDX, implicit operand in divides.  */
   1271   used_regs_mask |= 1 << EDX_REG_NUM;
   1272   /* Avoid RSP.  */
   1273   used_regs_mask |= 1 << ESP_REG_NUM;
   1274 
   1275   /* If the opcode is one byte long and there's no ModRM byte,
   1276      assume the opcode specifies a register.  */
   1277   if (details->opcode_len == 1 && details->modrm_offset == -1)
   1278     used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
   1279 
   1280   /* Mark used regs in the modrm/sib bytes.  */
   1281   if (details->modrm_offset != -1)
   1282     {
   1283       int modrm = details->raw_insn[details->modrm_offset];
   1284       int mod = MODRM_MOD_FIELD (modrm);
   1285       int reg = MODRM_REG_FIELD (modrm);
   1286       int rm = MODRM_RM_FIELD (modrm);
   1287       int have_sib = mod != 3 && rm == 4;
   1288 
   1289       /* Assume the reg field of the modrm byte specifies a register.  */
   1290       used_regs_mask |= 1 << reg;
   1291 
   1292       if (have_sib)
   1293 	{
   1294 	  int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1295 	  int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
   1296 	  used_regs_mask |= 1 << base;
   1297 	  used_regs_mask |= 1 << idx;
   1298 	}
   1299       else
   1300 	{
   1301 	  used_regs_mask |= 1 << rm;
   1302 	}
   1303     }
   1304 
   1305   gdb_assert (used_regs_mask < 256);
   1306   gdb_assert (used_regs_mask != 255);
   1307 
   1308   /* Finally, find a free reg.  */
   1309   {
   1310     int i;
   1311 
   1312     for (i = 0; i < 8; ++i)
   1313       {
   1314 	if (! (used_regs_mask & (1 << i)))
   1315 	  return i;
   1316       }
   1317 
   1318     /* We shouldn't get here.  */
   1319     internal_error (_("unable to find free reg"));
   1320   }
   1321 }
   1322 
   1323 /* Extract the details of INSN that we need.  */
   1324 
   1325 static void
   1326 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
   1327 {
   1328   gdb_byte *start = insn;
   1329   int need_modrm;
   1330 
   1331   details->raw_insn = insn;
   1332 
   1333   details->opcode_len = -1;
   1334   details->enc_prefix_offset = -1;
   1335   details->opcode_offset = -1;
   1336   details->modrm_offset = -1;
   1337 
   1338   /* Skip legacy instruction prefixes.  */
   1339   insn = amd64_skip_prefixes (insn);
   1340 
   1341   /* Skip REX/VEX instruction encoding prefixes.  */
   1342   if (rex_prefix_p (*insn))
   1343     {
   1344       details->enc_prefix_offset = insn - start;
   1345       ++insn;
   1346     }
   1347   else if (vex2_prefix_p (*insn))
   1348     {
   1349       /* Don't record the offset in this case because this prefix has
   1350 	 no REX.B equivalent.  */
   1351       insn += 2;
   1352     }
   1353   else if (vex3_prefix_p (*insn))
   1354     {
   1355       details->enc_prefix_offset = insn - start;
   1356       insn += 3;
   1357     }
   1358 
   1359   details->opcode_offset = insn - start;
   1360 
   1361   if (*insn == TWO_BYTE_OPCODE_ESCAPE)
   1362     {
   1363       /* Two or three-byte opcode.  */
   1364       ++insn;
   1365       need_modrm = twobyte_has_modrm[*insn];
   1366 
   1367       /* Check for three-byte opcode.  */
   1368       switch (*insn)
   1369 	{
   1370 	case 0x24:
   1371 	case 0x25:
   1372 	case 0x38:
   1373 	case 0x3a:
   1374 	case 0x7a:
   1375 	case 0x7b:
   1376 	  ++insn;
   1377 	  details->opcode_len = 3;
   1378 	  break;
   1379 	default:
   1380 	  details->opcode_len = 2;
   1381 	  break;
   1382 	}
   1383     }
   1384   else
   1385     {
   1386       /* One-byte opcode.  */
   1387       need_modrm = onebyte_has_modrm[*insn];
   1388       details->opcode_len = 1;
   1389     }
   1390 
   1391   if (need_modrm)
   1392     {
   1393       ++insn;
   1394       details->modrm_offset = insn - start;
   1395     }
   1396 }
   1397 
   1398 /* Update %rip-relative addressing in INSN.
   1399 
   1400    %rip-relative addressing only uses a 32-bit displacement.
   1401    32 bits is not enough to be guaranteed to cover the distance between where
   1402    the real instruction is and where its copy is.
   1403    Convert the insn to use base+disp addressing.
   1404    We set base = pc + insn_length so we can leave disp unchanged.  */
   1405 
   1406 static void
   1407 fixup_riprel (struct gdbarch *gdbarch,
   1408 	      amd64_displaced_step_copy_insn_closure *dsc,
   1409 	      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1410 {
   1411   const struct amd64_insn *insn_details = &dsc->insn_details;
   1412   int modrm_offset = insn_details->modrm_offset;
   1413   CORE_ADDR rip_base;
   1414   int insn_length;
   1415   int arch_tmp_regno, tmp_regno;
   1416   ULONGEST orig_value;
   1417 
   1418   /* Compute the rip-relative address.	*/
   1419   insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
   1420 					  dsc->insn_buf.size (), from);
   1421   rip_base = from + insn_length;
   1422 
   1423   /* We need a register to hold the address.
   1424      Pick one not used in the insn.
   1425      NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
   1426   arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
   1427   tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
   1428 
   1429   /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1).  */
   1430   static constexpr gdb_byte VEX3_NOT_B = 0x20;
   1431 
   1432   /* REX.B should be unset (VEX.!B set) as we were using rip-relative
   1433      addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
   1434      is not r8-r15.  */
   1435   if (insn_details->enc_prefix_offset != -1)
   1436     {
   1437       gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
   1438       if (rex_prefix_p (pfx[0]))
   1439 	pfx[0] &= ~REX_B;
   1440       else if (vex3_prefix_p (pfx[0]))
   1441 	pfx[1] |= VEX3_NOT_B;
   1442       else
   1443 	gdb_assert_not_reached ("unhandled prefix");
   1444     }
   1445 
   1446   regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
   1447   dsc->tmp_regno = tmp_regno;
   1448   dsc->tmp_save = orig_value;
   1449   dsc->tmp_used = 1;
   1450 
   1451   /* Convert the ModRM field to be base+disp.  */
   1452   dsc->insn_buf[modrm_offset] &= ~0xc7;
   1453   dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
   1454 
   1455   regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
   1456 
   1457   displaced_debug_printf ("%%rip-relative addressing used.");
   1458   displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
   1459 			  dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
   1460 			  paddress (gdbarch, rip_base));
   1461 }
   1462 
   1463 static void
   1464 fixup_displaced_copy (struct gdbarch *gdbarch,
   1465 		      amd64_displaced_step_copy_insn_closure *dsc,
   1466 		      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
   1467 {
   1468   const struct amd64_insn *details = &dsc->insn_details;
   1469 
   1470   if (details->modrm_offset != -1)
   1471     {
   1472       gdb_byte modrm = details->raw_insn[details->modrm_offset];
   1473 
   1474       if ((modrm & 0xc7) == 0x05)
   1475 	{
   1476 	  /* The insn uses rip-relative addressing.
   1477 	     Deal with it.  */
   1478 	  fixup_riprel (gdbarch, dsc, from, to, regs);
   1479 	}
   1480     }
   1481 }
   1482 
   1483 displaced_step_copy_insn_closure_up
   1484 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
   1485 				CORE_ADDR from, CORE_ADDR to,
   1486 				struct regcache *regs)
   1487 {
   1488   int len = gdbarch_max_insn_length (gdbarch);
   1489   /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
   1490      continually watch for running off the end of the buffer.  */
   1491   int fixup_sentinel_space = len;
   1492   std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
   1493     (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
   1494   gdb_byte *buf = &dsc->insn_buf[0];
   1495   struct amd64_insn *details = &dsc->insn_details;
   1496 
   1497   read_memory (from, buf, len);
   1498 
   1499   /* Set up the sentinel space so we don't have to worry about running
   1500      off the end of the buffer.  An excessive number of leading prefixes
   1501      could otherwise cause this.  */
   1502   memset (buf + len, 0, fixup_sentinel_space);
   1503 
   1504   amd64_get_insn_details (buf, details);
   1505 
   1506   /* GDB may get control back after the insn after the syscall.
   1507      Presumably this is a kernel bug.
   1508      If this is a syscall, make sure there's a nop afterwards.  */
   1509   {
   1510     int syscall_length;
   1511 
   1512     if (amd64_syscall_p (details, &syscall_length))
   1513       buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
   1514   }
   1515 
   1516   /* Modify the insn to cope with the address where it will be executed from.
   1517      In particular, handle any rip-relative addressing.	 */
   1518   fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
   1519 
   1520   write_memory (to, buf, len);
   1521 
   1522   displaced_debug_printf ("copy %s->%s: %s",
   1523 			  paddress (gdbarch, from), paddress (gdbarch, to),
   1524 			  displaced_step_dump_bytes (buf, len).c_str ());
   1525 
   1526   /* This is a work around for a problem with g++ 4.8.  */
   1527   return displaced_step_copy_insn_closure_up (dsc.release ());
   1528 }
   1529 
   1530 static int
   1531 amd64_absolute_jmp_p (const struct amd64_insn *details)
   1532 {
   1533   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1534 
   1535   if (insn[0] == 0xff)
   1536     {
   1537       /* jump near, absolute indirect (/4) */
   1538       if ((insn[1] & 0x38) == 0x20)
   1539 	return 1;
   1540 
   1541       /* jump far, absolute indirect (/5) */
   1542       if ((insn[1] & 0x38) == 0x28)
   1543 	return 1;
   1544     }
   1545 
   1546   return 0;
   1547 }
   1548 
   1549 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise.  */
   1550 
   1551 static int
   1552 amd64_jmp_p (const struct amd64_insn *details)
   1553 {
   1554   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1555 
   1556   /* jump short, relative.  */
   1557   if (insn[0] == 0xeb)
   1558     return 1;
   1559 
   1560   /* jump near, relative.  */
   1561   if (insn[0] == 0xe9)
   1562     return 1;
   1563 
   1564   return amd64_absolute_jmp_p (details);
   1565 }
   1566 
   1567 static int
   1568 amd64_absolute_call_p (const struct amd64_insn *details)
   1569 {
   1570   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1571 
   1572   if (insn[0] == 0xff)
   1573     {
   1574       /* Call near, absolute indirect (/2) */
   1575       if ((insn[1] & 0x38) == 0x10)
   1576 	return 1;
   1577 
   1578       /* Call far, absolute indirect (/3) */
   1579       if ((insn[1] & 0x38) == 0x18)
   1580 	return 1;
   1581     }
   1582 
   1583   return 0;
   1584 }
   1585 
   1586 static int
   1587 amd64_ret_p (const struct amd64_insn *details)
   1588 {
   1589   /* NOTE: gcc can emit "repz ; ret".  */
   1590   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1591 
   1592   switch (insn[0])
   1593     {
   1594     case 0xc2: /* ret near, pop N bytes */
   1595     case 0xc3: /* ret near */
   1596     case 0xca: /* ret far, pop N bytes */
   1597     case 0xcb: /* ret far */
   1598     case 0xcf: /* iret */
   1599       return 1;
   1600 
   1601     default:
   1602       return 0;
   1603     }
   1604 }
   1605 
   1606 static int
   1607 amd64_call_p (const struct amd64_insn *details)
   1608 {
   1609   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1610 
   1611   if (amd64_absolute_call_p (details))
   1612     return 1;
   1613 
   1614   /* call near, relative */
   1615   if (insn[0] == 0xe8)
   1616     return 1;
   1617 
   1618   return 0;
   1619 }
   1620 
   1621 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
   1622    length in bytes.  Otherwise, return zero.  */
   1623 
   1624 static int
   1625 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
   1626 {
   1627   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
   1628 
   1629   if (insn[0] == 0x0f && insn[1] == 0x05)
   1630     {
   1631       *lengthp = 2;
   1632       return 1;
   1633     }
   1634 
   1635   return 0;
   1636 }
   1637 
   1638 /* Classify the instruction at ADDR using PRED.
   1639    Throw an error if the memory can't be read.  */
   1640 
   1641 static int
   1642 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
   1643 			int (*pred) (const struct amd64_insn *))
   1644 {
   1645   struct amd64_insn details;
   1646   gdb_byte *buf;
   1647   int len, classification;
   1648 
   1649   len = gdbarch_max_insn_length (gdbarch);
   1650   buf = (gdb_byte *) alloca (len);
   1651 
   1652   read_code (addr, buf, len);
   1653   amd64_get_insn_details (buf, &details);
   1654 
   1655   classification = pred (&details);
   1656 
   1657   return classification;
   1658 }
   1659 
   1660 /* The gdbarch insn_is_call method.  */
   1661 
   1662 static int
   1663 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
   1664 {
   1665   return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
   1666 }
   1667 
   1668 /* The gdbarch insn_is_ret method.  */
   1669 
   1670 static int
   1671 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
   1672 {
   1673   return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
   1674 }
   1675 
   1676 /* The gdbarch insn_is_jump method.  */
   1677 
   1678 static int
   1679 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
   1680 {
   1681   return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
   1682 }
   1683 
   1684 /* Fix up the state of registers and memory after having single-stepped
   1685    a displaced instruction.  */
   1686 
   1687 void
   1688 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
   1689 			    struct displaced_step_copy_insn_closure *dsc_,
   1690 			    CORE_ADDR from, CORE_ADDR to,
   1691 			    struct regcache *regs)
   1692 {
   1693   amd64_displaced_step_copy_insn_closure *dsc
   1694     = (amd64_displaced_step_copy_insn_closure *) dsc_;
   1695   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1696   /* The offset we applied to the instruction's address.  */
   1697   ULONGEST insn_offset = to - from;
   1698   gdb_byte *insn = dsc->insn_buf.data ();
   1699   const struct amd64_insn *insn_details = &dsc->insn_details;
   1700 
   1701   displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
   1702 			  paddress (gdbarch, from), paddress (gdbarch, to),
   1703 			  insn[0], insn[1]);
   1704 
   1705   /* If we used a tmp reg, restore it.	*/
   1706 
   1707   if (dsc->tmp_used)
   1708     {
   1709       displaced_debug_printf ("restoring reg %d to %s",
   1710 			      dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
   1711       regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
   1712     }
   1713 
   1714   /* The list of issues to contend with here is taken from
   1715      resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
   1716      Yay for Free Software!  */
   1717 
   1718   /* Relocate the %rip back to the program's instruction stream,
   1719      if necessary.  */
   1720 
   1721   /* Except in the case of absolute or indirect jump or call
   1722      instructions, or a return instruction, the new rip is relative to
   1723      the displaced instruction; make it relative to the original insn.
   1724      Well, signal handler returns don't need relocation either, but we use the
   1725      value of %rip to recognize those; see below.  */
   1726   if (! amd64_absolute_jmp_p (insn_details)
   1727       && ! amd64_absolute_call_p (insn_details)
   1728       && ! amd64_ret_p (insn_details))
   1729     {
   1730       ULONGEST orig_rip;
   1731       int insn_len;
   1732 
   1733       regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
   1734 
   1735       /* A signal trampoline system call changes the %rip, resuming
   1736 	 execution of the main program after the signal handler has
   1737 	 returned.  That makes them like 'return' instructions; we
   1738 	 shouldn't relocate %rip.
   1739 
   1740 	 But most system calls don't, and we do need to relocate %rip.
   1741 
   1742 	 Our heuristic for distinguishing these cases: if stepping
   1743 	 over the system call instruction left control directly after
   1744 	 the instruction, the we relocate --- control almost certainly
   1745 	 doesn't belong in the displaced copy.	Otherwise, we assume
   1746 	 the instruction has put control where it belongs, and leave
   1747 	 it unrelocated.  Goodness help us if there are PC-relative
   1748 	 system calls.	*/
   1749       if (amd64_syscall_p (insn_details, &insn_len)
   1750 	  && orig_rip != to + insn_len
   1751 	  /* GDB can get control back after the insn after the syscall.
   1752 	     Presumably this is a kernel bug.
   1753 	     Fixup ensures its a nop, we add one to the length for it.  */
   1754 	  && orig_rip != to + insn_len + 1)
   1755 	displaced_debug_printf ("syscall changed %%rip; not relocating");
   1756       else
   1757 	{
   1758 	  ULONGEST rip = orig_rip - insn_offset;
   1759 
   1760 	  /* If we just stepped over a breakpoint insn, we don't backup
   1761 	     the pc on purpose; this is to match behaviour without
   1762 	     stepping.  */
   1763 
   1764 	  regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
   1765 
   1766 	  displaced_debug_printf ("relocated %%rip from %s to %s",
   1767 				  paddress (gdbarch, orig_rip),
   1768 				  paddress (gdbarch, rip));
   1769 	}
   1770     }
   1771 
   1772   /* If the instruction was PUSHFL, then the TF bit will be set in the
   1773      pushed value, and should be cleared.  We'll leave this for later,
   1774      since GDB already messes up the TF flag when stepping over a
   1775      pushfl.  */
   1776 
   1777   /* If the instruction was a call, the return address now atop the
   1778      stack is the address following the copied instruction.  We need
   1779      to make it the address following the original instruction.	 */
   1780   if (amd64_call_p (insn_details))
   1781     {
   1782       ULONGEST rsp;
   1783       ULONGEST retaddr;
   1784       const ULONGEST retaddr_len = 8;
   1785 
   1786       regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
   1787       retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
   1788       retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
   1789       write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
   1790 
   1791       displaced_debug_printf ("relocated return addr at %s to %s",
   1792 			      paddress (gdbarch, rsp),
   1793 			      paddress (gdbarch, retaddr));
   1794     }
   1795 }
   1796 
   1797 /* If the instruction INSN uses RIP-relative addressing, return the
   1798    offset into the raw INSN where the displacement to be adjusted is
   1799    found.  Returns 0 if the instruction doesn't use RIP-relative
   1800    addressing.  */
   1801 
   1802 static int
   1803 rip_relative_offset (struct amd64_insn *insn)
   1804 {
   1805   if (insn->modrm_offset != -1)
   1806     {
   1807       gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
   1808 
   1809       if ((modrm & 0xc7) == 0x05)
   1810 	{
   1811 	  /* The displacement is found right after the ModRM byte.  */
   1812 	  return insn->modrm_offset + 1;
   1813 	}
   1814     }
   1815 
   1816   return 0;
   1817 }
   1818 
   1819 static void
   1820 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
   1821 {
   1822   target_write_memory (*to, buf, len);
   1823   *to += len;
   1824 }
   1825 
   1826 static void
   1827 amd64_relocate_instruction (struct gdbarch *gdbarch,
   1828 			    CORE_ADDR *to, CORE_ADDR oldloc)
   1829 {
   1830   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   1831   int len = gdbarch_max_insn_length (gdbarch);
   1832   /* Extra space for sentinels.  */
   1833   int fixup_sentinel_space = len;
   1834   gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
   1835   struct amd64_insn insn_details;
   1836   int offset = 0;
   1837   LONGEST rel32, newrel;
   1838   gdb_byte *insn;
   1839   int insn_length;
   1840 
   1841   read_memory (oldloc, buf, len);
   1842 
   1843   /* Set up the sentinel space so we don't have to worry about running
   1844      off the end of the buffer.  An excessive number of leading prefixes
   1845      could otherwise cause this.  */
   1846   memset (buf + len, 0, fixup_sentinel_space);
   1847 
   1848   insn = buf;
   1849   amd64_get_insn_details (insn, &insn_details);
   1850 
   1851   insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
   1852 
   1853   /* Skip legacy instruction prefixes.  */
   1854   insn = amd64_skip_prefixes (insn);
   1855 
   1856   /* Adjust calls with 32-bit relative addresses as push/jump, with
   1857      the address pushed being the location where the original call in
   1858      the user program would return to.  */
   1859   if (insn[0] == 0xe8)
   1860     {
   1861       gdb_byte push_buf[32];
   1862       CORE_ADDR ret_addr;
   1863       int i = 0;
   1864 
   1865       /* Where "ret" in the original code will return to.  */
   1866       ret_addr = oldloc + insn_length;
   1867 
   1868       /* If pushing an address higher than or equal to 0x80000000,
   1869 	 avoid 'pushq', as that sign extends its 32-bit operand, which
   1870 	 would be incorrect.  */
   1871       if (ret_addr <= 0x7fffffff)
   1872 	{
   1873 	  push_buf[0] = 0x68; /* pushq $...  */
   1874 	  store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
   1875 	  i = 5;
   1876 	}
   1877       else
   1878 	{
   1879 	  push_buf[i++] = 0x48; /* sub    $0x8,%rsp */
   1880 	  push_buf[i++] = 0x83;
   1881 	  push_buf[i++] = 0xec;
   1882 	  push_buf[i++] = 0x08;
   1883 
   1884 	  push_buf[i++] = 0xc7; /* movl    $imm,(%rsp) */
   1885 	  push_buf[i++] = 0x04;
   1886 	  push_buf[i++] = 0x24;
   1887 	  store_unsigned_integer (&push_buf[i], 4, byte_order,
   1888 				  ret_addr & 0xffffffff);
   1889 	  i += 4;
   1890 
   1891 	  push_buf[i++] = 0xc7; /* movl    $imm,4(%rsp) */
   1892 	  push_buf[i++] = 0x44;
   1893 	  push_buf[i++] = 0x24;
   1894 	  push_buf[i++] = 0x04;
   1895 	  store_unsigned_integer (&push_buf[i], 4, byte_order,
   1896 				  ret_addr >> 32);
   1897 	  i += 4;
   1898 	}
   1899       gdb_assert (i <= sizeof (push_buf));
   1900       /* Push the push.  */
   1901       append_insns (to, i, push_buf);
   1902 
   1903       /* Convert the relative call to a relative jump.  */
   1904       insn[0] = 0xe9;
   1905 
   1906       /* Adjust the destination offset.  */
   1907       rel32 = extract_signed_integer (insn + 1, 4, byte_order);
   1908       newrel = (oldloc - *to) + rel32;
   1909       store_signed_integer (insn + 1, 4, byte_order, newrel);
   1910 
   1911       displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
   1912 			      hex_string (rel32), paddress (gdbarch, oldloc),
   1913 			      hex_string (newrel), paddress (gdbarch, *to));
   1914 
   1915       /* Write the adjusted jump into its displaced location.  */
   1916       append_insns (to, 5, insn);
   1917       return;
   1918     }
   1919 
   1920   offset = rip_relative_offset (&insn_details);
   1921   if (!offset)
   1922     {
   1923       /* Adjust jumps with 32-bit relative addresses.  Calls are
   1924 	 already handled above.  */
   1925       if (insn[0] == 0xe9)
   1926 	offset = 1;
   1927       /* Adjust conditional jumps.  */
   1928       else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
   1929 	offset = 2;
   1930     }
   1931 
   1932   if (offset)
   1933     {
   1934       rel32 = extract_signed_integer (insn + offset, 4, byte_order);
   1935       newrel = (oldloc - *to) + rel32;
   1936       store_signed_integer (insn + offset, 4, byte_order, newrel);
   1937       displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
   1938 			      hex_string (rel32), paddress (gdbarch, oldloc),
   1939 			      hex_string (newrel), paddress (gdbarch, *to));
   1940     }
   1941 
   1942   /* Write the adjusted instruction into its displaced location.  */
   1943   append_insns (to, insn_length, buf);
   1944 }
   1945 
   1946 
   1947 /* The maximum number of saved registers.  This should include %rip.  */
   1949 #define AMD64_NUM_SAVED_REGS	AMD64_NUM_GREGS
   1950 
   1951 struct amd64_frame_cache
   1952 {
   1953   /* Base address.  */
   1954   CORE_ADDR base;
   1955   int base_p;
   1956   CORE_ADDR sp_offset;
   1957   CORE_ADDR pc;
   1958 
   1959   /* Saved registers.  */
   1960   CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
   1961   CORE_ADDR saved_sp;
   1962   int saved_sp_reg;
   1963 
   1964   /* Do we have a frame?  */
   1965   int frameless_p;
   1966 };
   1967 
   1968 /* Initialize a frame cache.  */
   1969 
   1970 static void
   1971 amd64_init_frame_cache (struct amd64_frame_cache *cache)
   1972 {
   1973   int i;
   1974 
   1975   /* Base address.  */
   1976   cache->base = 0;
   1977   cache->base_p = 0;
   1978   cache->sp_offset = -8;
   1979   cache->pc = 0;
   1980 
   1981   /* Saved registers.  We initialize these to -1 since zero is a valid
   1982      offset (that's where %rbp is supposed to be stored).
   1983      The values start out as being offsets, and are later converted to
   1984      addresses (at which point -1 is interpreted as an address, still meaning
   1985      "invalid").  */
   1986   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   1987     cache->saved_regs[i] = -1;
   1988   cache->saved_sp = 0;
   1989   cache->saved_sp_reg = -1;
   1990 
   1991   /* Frameless until proven otherwise.  */
   1992   cache->frameless_p = 1;
   1993 }
   1994 
   1995 /* Allocate and initialize a frame cache.  */
   1996 
   1997 static struct amd64_frame_cache *
   1998 amd64_alloc_frame_cache (void)
   1999 {
   2000   struct amd64_frame_cache *cache;
   2001 
   2002   cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
   2003   amd64_init_frame_cache (cache);
   2004   return cache;
   2005 }
   2006 
   2007 /* GCC 4.4 and later, can put code in the prologue to realign the
   2008    stack pointer.  Check whether PC points to such code, and update
   2009    CACHE accordingly.  Return the first instruction after the code
   2010    sequence or CURRENT_PC, whichever is smaller.  If we don't
   2011    recognize the code, return PC.  */
   2012 
   2013 static CORE_ADDR
   2014 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   2015 			   struct amd64_frame_cache *cache)
   2016 {
   2017   /* There are 2 code sequences to re-align stack before the frame
   2018      gets set up:
   2019 
   2020 	1. Use a caller-saved saved register:
   2021 
   2022 		leaq  8(%rsp), %reg
   2023 		andq  $-XXX, %rsp
   2024 		pushq -8(%reg)
   2025 
   2026 	2. Use a callee-saved saved register:
   2027 
   2028 		pushq %reg
   2029 		leaq  16(%rsp), %reg
   2030 		andq  $-XXX, %rsp
   2031 		pushq -8(%reg)
   2032 
   2033      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   2034 
   2035 	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   2036 	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   2037    */
   2038 
   2039   gdb_byte buf[18];
   2040   int reg, r;
   2041   int offset, offset_and;
   2042 
   2043   if (target_read_code (pc, buf, sizeof buf))
   2044     return pc;
   2045 
   2046   /* Check caller-saved saved register.  The first instruction has
   2047      to be "leaq 8(%rsp), %reg".  */
   2048   if ((buf[0] & 0xfb) == 0x48
   2049       && buf[1] == 0x8d
   2050       && buf[3] == 0x24
   2051       && buf[4] == 0x8)
   2052     {
   2053       /* MOD must be binary 10 and R/M must be binary 100.  */
   2054       if ((buf[2] & 0xc7) != 0x44)
   2055 	return pc;
   2056 
   2057       /* REG has register number.  */
   2058       reg = (buf[2] >> 3) & 7;
   2059 
   2060       /* Check the REX.R bit.  */
   2061       if (buf[0] == 0x4c)
   2062 	reg += 8;
   2063 
   2064       offset = 5;
   2065     }
   2066   else
   2067     {
   2068       /* Check callee-saved saved register.  The first instruction
   2069 	 has to be "pushq %reg".  */
   2070       reg = 0;
   2071       if ((buf[0] & 0xf8) == 0x50)
   2072 	offset = 0;
   2073       else if ((buf[0] & 0xf6) == 0x40
   2074 	       && (buf[1] & 0xf8) == 0x50)
   2075 	{
   2076 	  /* Check the REX.B bit.  */
   2077 	  if ((buf[0] & 1) != 0)
   2078 	    reg = 8;
   2079 
   2080 	  offset = 1;
   2081 	}
   2082       else
   2083 	return pc;
   2084 
   2085       /* Get register.  */
   2086       reg += buf[offset] & 0x7;
   2087 
   2088       offset++;
   2089 
   2090       /* The next instruction has to be "leaq 16(%rsp), %reg".  */
   2091       if ((buf[offset] & 0xfb) != 0x48
   2092 	  || buf[offset + 1] != 0x8d
   2093 	  || buf[offset + 3] != 0x24
   2094 	  || buf[offset + 4] != 0x10)
   2095 	return pc;
   2096 
   2097       /* MOD must be binary 10 and R/M must be binary 100.  */
   2098       if ((buf[offset + 2] & 0xc7) != 0x44)
   2099 	return pc;
   2100 
   2101       /* REG has register number.  */
   2102       r = (buf[offset + 2] >> 3) & 7;
   2103 
   2104       /* Check the REX.R bit.  */
   2105       if (buf[offset] == 0x4c)
   2106 	r += 8;
   2107 
   2108       /* Registers in pushq and leaq have to be the same.  */
   2109       if (reg != r)
   2110 	return pc;
   2111 
   2112       offset += 5;
   2113     }
   2114 
   2115   /* Rigister can't be %rsp nor %rbp.  */
   2116   if (reg == 4 || reg == 5)
   2117     return pc;
   2118 
   2119   /* The next instruction has to be "andq $-XXX, %rsp".  */
   2120   if (buf[offset] != 0x48
   2121       || buf[offset + 2] != 0xe4
   2122       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   2123     return pc;
   2124 
   2125   offset_and = offset;
   2126   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   2127 
   2128   /* The next instruction has to be "pushq -8(%reg)".  */
   2129   r = 0;
   2130   if (buf[offset] == 0xff)
   2131     offset++;
   2132   else if ((buf[offset] & 0xf6) == 0x40
   2133 	   && buf[offset + 1] == 0xff)
   2134     {
   2135       /* Check the REX.B bit.  */
   2136       if ((buf[offset] & 0x1) != 0)
   2137 	r = 8;
   2138       offset += 2;
   2139     }
   2140   else
   2141     return pc;
   2142 
   2143   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2144      01.  */
   2145   if (buf[offset + 1] != 0xf8
   2146       || (buf[offset] & 0xf8) != 0x70)
   2147     return pc;
   2148 
   2149   /* R/M has register.  */
   2150   r += buf[offset] & 7;
   2151 
   2152   /* Registers in leaq and pushq have to be the same.  */
   2153   if (reg != r)
   2154     return pc;
   2155 
   2156   if (current_pc > pc + offset_and)
   2157     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2158 
   2159   return std::min (pc + offset + 2, current_pc);
   2160 }
   2161 
   2162 /* Similar to amd64_analyze_stack_align for x32.  */
   2163 
   2164 static CORE_ADDR
   2165 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
   2166 			       struct amd64_frame_cache *cache)
   2167 {
   2168   /* There are 2 code sequences to re-align stack before the frame
   2169      gets set up:
   2170 
   2171 	1. Use a caller-saved saved register:
   2172 
   2173 		leaq  8(%rsp), %reg
   2174 		andq  $-XXX, %rsp
   2175 		pushq -8(%reg)
   2176 
   2177 	   or
   2178 
   2179 		[addr32] leal  8(%rsp), %reg
   2180 		andl  $-XXX, %esp
   2181 		[addr32] pushq -8(%reg)
   2182 
   2183 	2. Use a callee-saved saved register:
   2184 
   2185 		pushq %reg
   2186 		leaq  16(%rsp), %reg
   2187 		andq  $-XXX, %rsp
   2188 		pushq -8(%reg)
   2189 
   2190 	   or
   2191 
   2192 		pushq %reg
   2193 		[addr32] leal  16(%rsp), %reg
   2194 		andl  $-XXX, %esp
   2195 		[addr32] pushq -8(%reg)
   2196 
   2197      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
   2198 
   2199 	0x48 0x83 0xe4 0xf0			andq $-16, %rsp
   2200 	0x48 0x81 0xe4 0x00 0xff 0xff 0xff	andq $-256, %rsp
   2201 
   2202      "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
   2203 
   2204 	0x83 0xe4 0xf0			andl $-16, %esp
   2205 	0x81 0xe4 0x00 0xff 0xff 0xff	andl $-256, %esp
   2206    */
   2207 
   2208   gdb_byte buf[19];
   2209   int reg, r;
   2210   int offset, offset_and;
   2211 
   2212   if (target_read_memory (pc, buf, sizeof buf))
   2213     return pc;
   2214 
   2215   /* Skip optional addr32 prefix.  */
   2216   offset = buf[0] == 0x67 ? 1 : 0;
   2217 
   2218   /* Check caller-saved saved register.  The first instruction has
   2219      to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg".  */
   2220   if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
   2221       && buf[offset + 1] == 0x8d
   2222       && buf[offset + 3] == 0x24
   2223       && buf[offset + 4] == 0x8)
   2224     {
   2225       /* MOD must be binary 10 and R/M must be binary 100.  */
   2226       if ((buf[offset + 2] & 0xc7) != 0x44)
   2227 	return pc;
   2228 
   2229       /* REG has register number.  */
   2230       reg = (buf[offset + 2] >> 3) & 7;
   2231 
   2232       /* Check the REX.R bit.  */
   2233       if ((buf[offset] & 0x4) != 0)
   2234 	reg += 8;
   2235 
   2236       offset += 5;
   2237     }
   2238   else
   2239     {
   2240       /* Check callee-saved saved register.  The first instruction
   2241 	 has to be "pushq %reg".  */
   2242       reg = 0;
   2243       if ((buf[offset] & 0xf6) == 0x40
   2244 	  && (buf[offset + 1] & 0xf8) == 0x50)
   2245 	{
   2246 	  /* Check the REX.B bit.  */
   2247 	  if ((buf[offset] & 1) != 0)
   2248 	    reg = 8;
   2249 
   2250 	  offset += 1;
   2251 	}
   2252       else if ((buf[offset] & 0xf8) != 0x50)
   2253 	return pc;
   2254 
   2255       /* Get register.  */
   2256       reg += buf[offset] & 0x7;
   2257 
   2258       offset++;
   2259 
   2260       /* Skip optional addr32 prefix.  */
   2261       if (buf[offset] == 0x67)
   2262 	offset++;
   2263 
   2264       /* The next instruction has to be "leaq 16(%rsp), %reg" or
   2265 	 "leal 16(%rsp), %reg".  */
   2266       if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
   2267 	  || buf[offset + 1] != 0x8d
   2268 	  || buf[offset + 3] != 0x24
   2269 	  || buf[offset + 4] != 0x10)
   2270 	return pc;
   2271 
   2272       /* MOD must be binary 10 and R/M must be binary 100.  */
   2273       if ((buf[offset + 2] & 0xc7) != 0x44)
   2274 	return pc;
   2275 
   2276       /* REG has register number.  */
   2277       r = (buf[offset + 2] >> 3) & 7;
   2278 
   2279       /* Check the REX.R bit.  */
   2280       if ((buf[offset] & 0x4) != 0)
   2281 	r += 8;
   2282 
   2283       /* Registers in pushq and leaq have to be the same.  */
   2284       if (reg != r)
   2285 	return pc;
   2286 
   2287       offset += 5;
   2288     }
   2289 
   2290   /* Rigister can't be %rsp nor %rbp.  */
   2291   if (reg == 4 || reg == 5)
   2292     return pc;
   2293 
   2294   /* The next instruction may be "andq $-XXX, %rsp" or
   2295      "andl $-XXX, %esp".  */
   2296   if (buf[offset] != 0x48)
   2297     offset--;
   2298 
   2299   if (buf[offset + 2] != 0xe4
   2300       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
   2301     return pc;
   2302 
   2303   offset_and = offset;
   2304   offset += buf[offset + 1] == 0x81 ? 7 : 4;
   2305 
   2306   /* Skip optional addr32 prefix.  */
   2307   if (buf[offset] == 0x67)
   2308     offset++;
   2309 
   2310   /* The next instruction has to be "pushq -8(%reg)".  */
   2311   r = 0;
   2312   if (buf[offset] == 0xff)
   2313     offset++;
   2314   else if ((buf[offset] & 0xf6) == 0x40
   2315 	   && buf[offset + 1] == 0xff)
   2316     {
   2317       /* Check the REX.B bit.  */
   2318       if ((buf[offset] & 0x1) != 0)
   2319 	r = 8;
   2320       offset += 2;
   2321     }
   2322   else
   2323     return pc;
   2324 
   2325   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
   2326      01.  */
   2327   if (buf[offset + 1] != 0xf8
   2328       || (buf[offset] & 0xf8) != 0x70)
   2329     return pc;
   2330 
   2331   /* R/M has register.  */
   2332   r += buf[offset] & 7;
   2333 
   2334   /* Registers in leaq and pushq have to be the same.  */
   2335   if (reg != r)
   2336     return pc;
   2337 
   2338   if (current_pc > pc + offset_and)
   2339     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
   2340 
   2341   return std::min (pc + offset + 2, current_pc);
   2342 }
   2343 
   2344 /* Do a limited analysis of the prologue at PC and update CACHE
   2345    accordingly.  Bail out early if CURRENT_PC is reached.  Return the
   2346    address where the analysis stopped.
   2347 
   2348    We will handle only functions beginning with:
   2349 
   2350       pushq %rbp        0x55
   2351       movq %rsp, %rbp   0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
   2352 
   2353    or (for the X32 ABI):
   2354 
   2355       pushq %rbp        0x55
   2356       movl %esp, %ebp   0x89 0xe5 (or 0x8b 0xec)
   2357 
   2358    The `endbr64` instruction can be found before these sequences, and will be
   2359    skipped if found.
   2360 
   2361    Any function that doesn't start with one of these sequences will be
   2362    assumed to have no prologue and thus no valid frame pointer in
   2363    %rbp.  */
   2364 
   2365 static CORE_ADDR
   2366 amd64_analyze_prologue (struct gdbarch *gdbarch,
   2367 			CORE_ADDR pc, CORE_ADDR current_pc,
   2368 			struct amd64_frame_cache *cache)
   2369 {
   2370   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2371   /* The `endbr64` instruction.  */
   2372   static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
   2373   /* There are two variations of movq %rsp, %rbp.  */
   2374   static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
   2375   static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
   2376   /* Ditto for movl %esp, %ebp.  */
   2377   static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
   2378   static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
   2379 
   2380   gdb_byte buf[3];
   2381   gdb_byte op;
   2382 
   2383   if (current_pc <= pc)
   2384     return current_pc;
   2385 
   2386   if (gdbarch_ptr_bit (gdbarch) == 32)
   2387     pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
   2388   else
   2389     pc = amd64_analyze_stack_align (pc, current_pc, cache);
   2390 
   2391   op = read_code_unsigned_integer (pc, 1, byte_order);
   2392 
   2393   /* Check for the `endbr64` instruction, skip it if found.  */
   2394   if (op == endbr64[0])
   2395     {
   2396       read_code (pc + 1, buf, 3);
   2397 
   2398       if (memcmp (buf, &endbr64[1], 3) == 0)
   2399 	pc += 4;
   2400 
   2401       op = read_code_unsigned_integer (pc, 1, byte_order);
   2402     }
   2403 
   2404   if (current_pc <= pc)
   2405     return current_pc;
   2406 
   2407   if (op == 0x55)		/* pushq %rbp */
   2408     {
   2409       /* Take into account that we've executed the `pushq %rbp' that
   2410 	 starts this instruction sequence.  */
   2411       cache->saved_regs[AMD64_RBP_REGNUM] = 0;
   2412       cache->sp_offset += 8;
   2413 
   2414       /* If that's all, return now.  */
   2415       if (current_pc <= pc + 1)
   2416 	return current_pc;
   2417 
   2418       read_code (pc + 1, buf, 3);
   2419 
   2420       /* Check for `movq %rsp, %rbp'.  */
   2421       if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
   2422 	  || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
   2423 	{
   2424 	  /* OK, we actually have a frame.  */
   2425 	  cache->frameless_p = 0;
   2426 	  return pc + 4;
   2427 	}
   2428 
   2429       /* For X32, also check for `movl %esp, %ebp'.  */
   2430       if (gdbarch_ptr_bit (gdbarch) == 32)
   2431 	{
   2432 	  if (memcmp (buf, mov_esp_ebp_1, 2) == 0
   2433 	      || memcmp (buf, mov_esp_ebp_2, 2) == 0)
   2434 	    {
   2435 	      /* OK, we actually have a frame.  */
   2436 	      cache->frameless_p = 0;
   2437 	      return pc + 3;
   2438 	    }
   2439 	}
   2440 
   2441       return pc + 1;
   2442     }
   2443 
   2444   return pc;
   2445 }
   2446 
   2447 /* Work around false termination of prologue - GCC PR debug/48827.
   2448 
   2449    START_PC is the first instruction of a function, PC is its minimal already
   2450    determined advanced address.  Function returns PC if it has nothing to do.
   2451 
   2452    84 c0                test   %al,%al
   2453    74 23                je     after
   2454    <-- here is 0 lines advance - the false prologue end marker.
   2455    0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
   2456    0f 29 4d 80          movaps %xmm1,-0x80(%rbp)
   2457    0f 29 55 90          movaps %xmm2,-0x70(%rbp)
   2458    0f 29 5d a0          movaps %xmm3,-0x60(%rbp)
   2459    0f 29 65 b0          movaps %xmm4,-0x50(%rbp)
   2460    0f 29 6d c0          movaps %xmm5,-0x40(%rbp)
   2461    0f 29 75 d0          movaps %xmm6,-0x30(%rbp)
   2462    0f 29 7d e0          movaps %xmm7,-0x20(%rbp)
   2463    after:  */
   2464 
   2465 static CORE_ADDR
   2466 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
   2467 {
   2468   struct symtab_and_line start_pc_sal, next_sal;
   2469   gdb_byte buf[4 + 8 * 7];
   2470   int offset, xmmreg;
   2471 
   2472   if (pc == start_pc)
   2473     return pc;
   2474 
   2475   start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
   2476   if (start_pc_sal.symtab == NULL
   2477       || producer_is_gcc_ge_4 (start_pc_sal.symtab->compunit ()
   2478 			       ->producer ()) < 6
   2479       || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
   2480     return pc;
   2481 
   2482   next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
   2483   if (next_sal.line != start_pc_sal.line)
   2484     return pc;
   2485 
   2486   /* START_PC can be from overlayed memory, ignored here.  */
   2487   if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
   2488     return pc;
   2489 
   2490   /* test %al,%al */
   2491   if (buf[0] != 0x84 || buf[1] != 0xc0)
   2492     return pc;
   2493   /* je AFTER */
   2494   if (buf[2] != 0x74)
   2495     return pc;
   2496 
   2497   offset = 4;
   2498   for (xmmreg = 0; xmmreg < 8; xmmreg++)
   2499     {
   2500       /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
   2501       if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
   2502 	  || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
   2503 	return pc;
   2504 
   2505       /* 0b01?????? */
   2506       if ((buf[offset + 2] & 0xc0) == 0x40)
   2507 	{
   2508 	  /* 8-bit displacement.  */
   2509 	  offset += 4;
   2510 	}
   2511       /* 0b10?????? */
   2512       else if ((buf[offset + 2] & 0xc0) == 0x80)
   2513 	{
   2514 	  /* 32-bit displacement.  */
   2515 	  offset += 7;
   2516 	}
   2517       else
   2518 	return pc;
   2519     }
   2520 
   2521   /* je AFTER */
   2522   if (offset - 4 != buf[3])
   2523     return pc;
   2524 
   2525   return next_sal.end;
   2526 }
   2527 
   2528 /* Return PC of first real instruction.  */
   2529 
   2530 static CORE_ADDR
   2531 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
   2532 {
   2533   struct amd64_frame_cache cache;
   2534   CORE_ADDR pc;
   2535   CORE_ADDR func_addr;
   2536 
   2537   if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
   2538     {
   2539       CORE_ADDR post_prologue_pc
   2540 	= skip_prologue_using_sal (gdbarch, func_addr);
   2541       struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
   2542 
   2543       /* LLVM backend (Clang/Flang) always emits a line note before the
   2544 	 prologue and another one after.  We trust clang and newer Intel
   2545 	 compilers to emit usable line notes.  */
   2546       if (post_prologue_pc
   2547 	  && (cust != NULL
   2548 	      && cust->producer () != nullptr
   2549 	      && (producer_is_llvm (cust->producer ())
   2550 	      || producer_is_icc_ge_19 (cust->producer ()))))
   2551         return std::max (start_pc, post_prologue_pc);
   2552     }
   2553 
   2554   amd64_init_frame_cache (&cache);
   2555   pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
   2556 			       &cache);
   2557   if (cache.frameless_p)
   2558     return start_pc;
   2559 
   2560   return amd64_skip_xmm_prologue (pc, start_pc);
   2561 }
   2562 
   2563 
   2565 /* Normal frames.  */
   2566 
   2567 static void
   2568 amd64_frame_cache_1 (frame_info_ptr this_frame,
   2569 		     struct amd64_frame_cache *cache)
   2570 {
   2571   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2572   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2573   gdb_byte buf[8];
   2574   int i;
   2575 
   2576   cache->pc = get_frame_func (this_frame);
   2577   if (cache->pc != 0)
   2578     amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
   2579 			    cache);
   2580 
   2581   if (cache->frameless_p)
   2582     {
   2583       /* We didn't find a valid frame.  If we're at the start of a
   2584 	 function, or somewhere half-way its prologue, the function's
   2585 	 frame probably hasn't been fully setup yet.  Try to
   2586 	 reconstruct the base address for the stack frame by looking
   2587 	 at the stack pointer.  For truly "frameless" functions this
   2588 	 might work too.  */
   2589 
   2590       if (cache->saved_sp_reg != -1)
   2591 	{
   2592 	  /* Stack pointer has been saved.  */
   2593 	  get_frame_register (this_frame, cache->saved_sp_reg, buf);
   2594 	  cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
   2595 
   2596 	  /* We're halfway aligning the stack.  */
   2597 	  cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
   2598 	  cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
   2599 
   2600 	  /* This will be added back below.  */
   2601 	  cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
   2602 	}
   2603       else
   2604 	{
   2605 	  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2606 	  cache->base = extract_unsigned_integer (buf, 8, byte_order)
   2607 			+ cache->sp_offset;
   2608 	}
   2609     }
   2610   else
   2611     {
   2612       get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
   2613       cache->base = extract_unsigned_integer (buf, 8, byte_order);
   2614     }
   2615 
   2616   /* Now that we have the base address for the stack frame we can
   2617      calculate the value of %rsp in the calling frame.  */
   2618   cache->saved_sp = cache->base + 16;
   2619 
   2620   /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
   2621      frame we find it at the same offset from the reconstructed base
   2622      address.  If we're halfway aligning the stack, %rip is handled
   2623      differently (see above).  */
   2624   if (!cache->frameless_p || cache->saved_sp_reg == -1)
   2625     cache->saved_regs[AMD64_RIP_REGNUM] = 8;
   2626 
   2627   /* Adjust all the saved registers such that they contain addresses
   2628      instead of offsets.  */
   2629   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
   2630     if (cache->saved_regs[i] != -1)
   2631       cache->saved_regs[i] += cache->base;
   2632 
   2633   cache->base_p = 1;
   2634 }
   2635 
   2636 static struct amd64_frame_cache *
   2637 amd64_frame_cache (frame_info_ptr this_frame, void **this_cache)
   2638 {
   2639   struct amd64_frame_cache *cache;
   2640 
   2641   if (*this_cache)
   2642     return (struct amd64_frame_cache *) *this_cache;
   2643 
   2644   cache = amd64_alloc_frame_cache ();
   2645   *this_cache = cache;
   2646 
   2647   try
   2648     {
   2649       amd64_frame_cache_1 (this_frame, cache);
   2650     }
   2651   catch (const gdb_exception_error &ex)
   2652     {
   2653       if (ex.error != NOT_AVAILABLE_ERROR)
   2654 	throw;
   2655     }
   2656 
   2657   return cache;
   2658 }
   2659 
   2660 static enum unwind_stop_reason
   2661 amd64_frame_unwind_stop_reason (frame_info_ptr this_frame,
   2662 				void **this_cache)
   2663 {
   2664   struct amd64_frame_cache *cache =
   2665     amd64_frame_cache (this_frame, this_cache);
   2666 
   2667   if (!cache->base_p)
   2668     return UNWIND_UNAVAILABLE;
   2669 
   2670   /* This marks the outermost frame.  */
   2671   if (cache->base == 0)
   2672     return UNWIND_OUTERMOST;
   2673 
   2674   return UNWIND_NO_REASON;
   2675 }
   2676 
   2677 static void
   2678 amd64_frame_this_id (frame_info_ptr this_frame, void **this_cache,
   2679 		     struct frame_id *this_id)
   2680 {
   2681   struct amd64_frame_cache *cache =
   2682     amd64_frame_cache (this_frame, this_cache);
   2683 
   2684   if (!cache->base_p)
   2685     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   2686   else if (cache->base == 0)
   2687     {
   2688       /* This marks the outermost frame.  */
   2689       return;
   2690     }
   2691   else
   2692     (*this_id) = frame_id_build (cache->base + 16, cache->pc);
   2693 }
   2694 
   2695 static struct value *
   2696 amd64_frame_prev_register (frame_info_ptr this_frame, void **this_cache,
   2697 			   int regnum)
   2698 {
   2699   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2700   struct amd64_frame_cache *cache =
   2701     amd64_frame_cache (this_frame, this_cache);
   2702 
   2703   gdb_assert (regnum >= 0);
   2704 
   2705   if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
   2706     return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
   2707 
   2708   if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
   2709     return frame_unwind_got_memory (this_frame, regnum,
   2710 				    cache->saved_regs[regnum]);
   2711 
   2712   return frame_unwind_got_register (this_frame, regnum, regnum);
   2713 }
   2714 
   2715 static const struct frame_unwind amd64_frame_unwind =
   2716 {
   2717   "amd64 prologue",
   2718   NORMAL_FRAME,
   2719   amd64_frame_unwind_stop_reason,
   2720   amd64_frame_this_id,
   2721   amd64_frame_prev_register,
   2722   NULL,
   2723   default_frame_sniffer
   2724 };
   2725 
   2726 /* Generate a bytecode expression to get the value of the saved PC.  */
   2728 
   2729 static void
   2730 amd64_gen_return_address (struct gdbarch *gdbarch,
   2731 			  struct agent_expr *ax, struct axs_value *value,
   2732 			  CORE_ADDR scope)
   2733 {
   2734   /* The following sequence assumes the traditional use of the base
   2735      register.  */
   2736   ax_reg (ax, AMD64_RBP_REGNUM);
   2737   ax_const_l (ax, 8);
   2738   ax_simple (ax, aop_add);
   2739   value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
   2740   value->kind = axs_lvalue_memory;
   2741 }
   2742 
   2743 
   2745 /* Signal trampolines.  */
   2746 
   2747 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
   2748    64-bit variants.  This would require using identical frame caches
   2749    on both platforms.  */
   2750 
   2751 static struct amd64_frame_cache *
   2752 amd64_sigtramp_frame_cache (frame_info_ptr this_frame, void **this_cache)
   2753 {
   2754   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2755   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   2756   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2757   struct amd64_frame_cache *cache;
   2758   CORE_ADDR addr;
   2759   gdb_byte buf[8];
   2760   int i;
   2761 
   2762   if (*this_cache)
   2763     return (struct amd64_frame_cache *) *this_cache;
   2764 
   2765   cache = amd64_alloc_frame_cache ();
   2766 
   2767   try
   2768     {
   2769       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2770       cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
   2771 
   2772       addr = tdep->sigcontext_addr (this_frame);
   2773       gdb_assert (tdep->sc_reg_offset);
   2774       gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
   2775       for (i = 0; i < tdep->sc_num_regs; i++)
   2776 	if (tdep->sc_reg_offset[i] != -1)
   2777 	  cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
   2778 
   2779       cache->base_p = 1;
   2780     }
   2781   catch (const gdb_exception_error &ex)
   2782     {
   2783       if (ex.error != NOT_AVAILABLE_ERROR)
   2784 	throw;
   2785     }
   2786 
   2787   *this_cache = cache;
   2788   return cache;
   2789 }
   2790 
   2791 static enum unwind_stop_reason
   2792 amd64_sigtramp_frame_unwind_stop_reason (frame_info_ptr this_frame,
   2793 					 void **this_cache)
   2794 {
   2795   struct amd64_frame_cache *cache =
   2796     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2797 
   2798   if (!cache->base_p)
   2799     return UNWIND_UNAVAILABLE;
   2800 
   2801   return UNWIND_NO_REASON;
   2802 }
   2803 
   2804 static void
   2805 amd64_sigtramp_frame_this_id (frame_info_ptr this_frame,
   2806 			      void **this_cache, struct frame_id *this_id)
   2807 {
   2808   struct amd64_frame_cache *cache =
   2809     amd64_sigtramp_frame_cache (this_frame, this_cache);
   2810 
   2811   if (!cache->base_p)
   2812     (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
   2813   else if (cache->base == 0)
   2814     {
   2815       /* This marks the outermost frame.  */
   2816       return;
   2817     }
   2818   else
   2819     (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
   2820 }
   2821 
   2822 static struct value *
   2823 amd64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
   2824 				    void **this_cache, int regnum)
   2825 {
   2826   /* Make sure we've initialized the cache.  */
   2827   amd64_sigtramp_frame_cache (this_frame, this_cache);
   2828 
   2829   return amd64_frame_prev_register (this_frame, this_cache, regnum);
   2830 }
   2831 
   2832 static int
   2833 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
   2834 			      frame_info_ptr this_frame,
   2835 			      void **this_cache)
   2836 {
   2837   gdbarch *arch = get_frame_arch (this_frame);
   2838   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (arch);
   2839 
   2840   /* We shouldn't even bother if we don't have a sigcontext_addr
   2841      handler.  */
   2842   if (tdep->sigcontext_addr == NULL)
   2843     return 0;
   2844 
   2845   if (tdep->sigtramp_p != NULL)
   2846     {
   2847       if (tdep->sigtramp_p (this_frame))
   2848 	return 1;
   2849     }
   2850 
   2851   if (tdep->sigtramp_start != 0)
   2852     {
   2853       CORE_ADDR pc = get_frame_pc (this_frame);
   2854 
   2855       gdb_assert (tdep->sigtramp_end != 0);
   2856       if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
   2857 	return 1;
   2858     }
   2859 
   2860   return 0;
   2861 }
   2862 
   2863 static const struct frame_unwind amd64_sigtramp_frame_unwind =
   2864 {
   2865   "amd64 sigtramp",
   2866   SIGTRAMP_FRAME,
   2867   amd64_sigtramp_frame_unwind_stop_reason,
   2868   amd64_sigtramp_frame_this_id,
   2869   amd64_sigtramp_frame_prev_register,
   2870   NULL,
   2871   amd64_sigtramp_frame_sniffer
   2872 };
   2873 
   2874 
   2876 static CORE_ADDR
   2877 amd64_frame_base_address (frame_info_ptr this_frame, void **this_cache)
   2878 {
   2879   struct amd64_frame_cache *cache =
   2880     amd64_frame_cache (this_frame, this_cache);
   2881 
   2882   return cache->base;
   2883 }
   2884 
   2885 static const struct frame_base amd64_frame_base =
   2886 {
   2887   &amd64_frame_unwind,
   2888   amd64_frame_base_address,
   2889   amd64_frame_base_address,
   2890   amd64_frame_base_address
   2891 };
   2892 
   2893 /* Normal frames, but in a function epilogue.  */
   2894 
   2895 /* Implement the stack_frame_destroyed_p gdbarch method.
   2896 
   2897    The epilogue is defined here as the 'ret' instruction, which will
   2898    follow any instruction such as 'leave' or 'pop %ebp' that destroys
   2899    the function's stack frame.  */
   2900 
   2901 static int
   2902 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
   2903 {
   2904   gdb_byte insn;
   2905   struct compunit_symtab *cust;
   2906 
   2907   cust = find_pc_compunit_symtab (pc);
   2908   if (cust != NULL && cust->epilogue_unwind_valid ())
   2909     return 0;
   2910 
   2911   if (target_read_memory (pc, &insn, 1))
   2912     return 0;   /* Can't read memory at pc.  */
   2913 
   2914   if (insn != 0xc3)     /* 'ret' instruction.  */
   2915     return 0;
   2916 
   2917   return 1;
   2918 }
   2919 
   2920 static int
   2921 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
   2922 			      frame_info_ptr this_frame,
   2923 			      void **this_prologue_cache)
   2924 {
   2925   if (frame_relative_level (this_frame) == 0)
   2926     return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
   2927 					  get_frame_pc (this_frame));
   2928   else
   2929     return 0;
   2930 }
   2931 
   2932 static struct amd64_frame_cache *
   2933 amd64_epilogue_frame_cache (frame_info_ptr this_frame, void **this_cache)
   2934 {
   2935   struct gdbarch *gdbarch = get_frame_arch (this_frame);
   2936   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
   2937   struct amd64_frame_cache *cache;
   2938   gdb_byte buf[8];
   2939 
   2940   if (*this_cache)
   2941     return (struct amd64_frame_cache *) *this_cache;
   2942 
   2943   cache = amd64_alloc_frame_cache ();
   2944   *this_cache = cache;
   2945 
   2946   try
   2947     {
   2948       /* Cache base will be %rsp plus cache->sp_offset (-8).  */
   2949       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
   2950       cache->base = extract_unsigned_integer (buf, 8,
   2951 					      byte_order) + cache->sp_offset;
   2952 
   2953       /* Cache pc will be the frame func.  */
   2954       cache->pc = get_frame_func (this_frame);
   2955 
   2956       /* The previous value of %rsp is cache->base plus 16.  */
   2957       cache->saved_sp = cache->base + 16;
   2958 
   2959       /* The saved %rip will be at cache->base plus 8.  */
   2960       cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
   2961 
   2962       cache->base_p = 1;
   2963     }
   2964   catch (const gdb_exception_error &ex)
   2965     {
   2966       if (ex.error != NOT_AVAILABLE_ERROR)
   2967 	throw;
   2968     }
   2969 
   2970   return cache;
   2971 }
   2972 
   2973 static enum unwind_stop_reason
   2974 amd64_epilogue_frame_unwind_stop_reason (frame_info_ptr this_frame,
   2975 					 void **this_cache)
   2976 {
   2977   struct amd64_frame_cache *cache
   2978     = amd64_epilogue_frame_cache (this_frame, this_cache);
   2979 
   2980   if (!cache->base_p)
   2981     return UNWIND_UNAVAILABLE;
   2982 
   2983   return UNWIND_NO_REASON;
   2984 }
   2985 
   2986 static void
   2987 amd64_epilogue_frame_this_id (frame_info_ptr this_frame,
   2988 			      void **this_cache,
   2989 			      struct frame_id *this_id)
   2990 {
   2991   struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
   2992 							       this_cache);
   2993 
   2994   if (!cache->base_p)
   2995     (*this_id) = frame_id_build_unavailable_stack (cache->pc);
   2996   else
   2997     (*this_id) = frame_id_build (cache->base + 16, cache->pc);
   2998 }
   2999 
   3000 static const struct frame_unwind amd64_epilogue_frame_unwind =
   3001 {
   3002   "amd64 epilogue",
   3003   NORMAL_FRAME,
   3004   amd64_epilogue_frame_unwind_stop_reason,
   3005   amd64_epilogue_frame_this_id,
   3006   amd64_frame_prev_register,
   3007   NULL,
   3008   amd64_epilogue_frame_sniffer
   3009 };
   3010 
   3011 static struct frame_id
   3012 amd64_dummy_id (struct gdbarch *gdbarch, frame_info_ptr this_frame)
   3013 {
   3014   CORE_ADDR fp;
   3015 
   3016   fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
   3017 
   3018   return frame_id_build (fp + 16, get_frame_pc (this_frame));
   3019 }
   3020 
   3021 /* 16 byte align the SP per frame requirements.  */
   3022 
   3023 static CORE_ADDR
   3024 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
   3025 {
   3026   return sp & -(CORE_ADDR)16;
   3027 }
   3028 
   3029 
   3031 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
   3032    in the floating-point register set REGSET to register cache
   3033    REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
   3034 
   3035 static void
   3036 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
   3037 		       int regnum, const void *fpregs, size_t len)
   3038 {
   3039   struct gdbarch *gdbarch = regcache->arch ();
   3040   const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3041 
   3042   gdb_assert (len >= tdep->sizeof_fpregset);
   3043   amd64_supply_fxsave (regcache, regnum, fpregs);
   3044 }
   3045 
   3046 /* Collect register REGNUM from the register cache REGCACHE and store
   3047    it in the buffer specified by FPREGS and LEN as described by the
   3048    floating-point register set REGSET.  If REGNUM is -1, do this for
   3049    all registers in REGSET.  */
   3050 
   3051 static void
   3052 amd64_collect_fpregset (const struct regset *regset,
   3053 			const struct regcache *regcache,
   3054 			int regnum, void *fpregs, size_t len)
   3055 {
   3056   struct gdbarch *gdbarch = regcache->arch ();
   3057   const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3058 
   3059   gdb_assert (len >= tdep->sizeof_fpregset);
   3060   amd64_collect_fxsave (regcache, regnum, fpregs);
   3061 }
   3062 
   3063 const struct regset amd64_fpregset =
   3064   {
   3065     NULL, amd64_supply_fpregset, amd64_collect_fpregset
   3066   };
   3067 
   3068 
   3070 /* Figure out where the longjmp will land.  Slurp the jmp_buf out of
   3071    %rdi.  We expect its value to be a pointer to the jmp_buf structure
   3072    from which we extract the address that we will land at.  This
   3073    address is copied into PC.  This routine returns non-zero on
   3074    success.  */
   3075 
   3076 static int
   3077 amd64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
   3078 {
   3079   gdb_byte buf[8];
   3080   CORE_ADDR jb_addr;
   3081   struct gdbarch *gdbarch = get_frame_arch (frame);
   3082   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3083   int jb_pc_offset = tdep->jb_pc_offset;
   3084   int len = builtin_type (gdbarch)->builtin_func_ptr->length ();
   3085 
   3086   /* If JB_PC_OFFSET is -1, we have no way to find out where the
   3087      longjmp will land.	 */
   3088   if (jb_pc_offset == -1)
   3089     return 0;
   3090 
   3091   get_frame_register (frame, AMD64_RDI_REGNUM, buf);
   3092   jb_addr= extract_typed_address
   3093 	    (buf, builtin_type (gdbarch)->builtin_data_ptr);
   3094   if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
   3095     return 0;
   3096 
   3097   *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
   3098 
   3099   return 1;
   3100 }
   3101 
   3102 static const int amd64_record_regmap[] =
   3103 {
   3104   AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
   3105   AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
   3106   AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
   3107   AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
   3108   AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
   3109   AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
   3110 };
   3111 
   3112 /* Implement the "in_indirect_branch_thunk" gdbarch function.  */
   3113 
   3114 static bool
   3115 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
   3116 {
   3117   return x86_in_indirect_branch_thunk (pc, amd64_register_names,
   3118 				       AMD64_RAX_REGNUM,
   3119 				       AMD64_RIP_REGNUM);
   3120 }
   3121 
   3122 void
   3123 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
   3124 		const target_desc *default_tdesc)
   3125 {
   3126   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3127   const struct target_desc *tdesc = info.target_desc;
   3128   static const char *const stap_integer_prefixes[] = { "$", NULL };
   3129   static const char *const stap_register_prefixes[] = { "%", NULL };
   3130   static const char *const stap_register_indirection_prefixes[] = { "(",
   3131 								    NULL };
   3132   static const char *const stap_register_indirection_suffixes[] = { ")",
   3133 								    NULL };
   3134 
   3135   /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
   3136      floating-point registers.  */
   3137   tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
   3138   tdep->fpregset = &amd64_fpregset;
   3139 
   3140   if (! tdesc_has_registers (tdesc))
   3141     tdesc = default_tdesc;
   3142   tdep->tdesc = tdesc;
   3143 
   3144   tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
   3145   tdep->register_names = amd64_register_names;
   3146 
   3147   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
   3148     {
   3149       tdep->zmmh_register_names = amd64_zmmh_names;
   3150       tdep->k_register_names = amd64_k_names;
   3151       tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
   3152       tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
   3153 
   3154       tdep->num_zmm_regs = 32;
   3155       tdep->num_xmm_avx512_regs = 16;
   3156       tdep->num_ymm_avx512_regs = 16;
   3157 
   3158       tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
   3159       tdep->k0_regnum = AMD64_K0_REGNUM;
   3160       tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
   3161       tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
   3162     }
   3163 
   3164   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
   3165     {
   3166       tdep->ymmh_register_names = amd64_ymmh_names;
   3167       tdep->num_ymm_regs = 16;
   3168       tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
   3169     }
   3170 
   3171   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
   3172     {
   3173       tdep->mpx_register_names = amd64_mpx_names;
   3174       tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
   3175       tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
   3176     }
   3177 
   3178   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
   3179     {
   3180       tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
   3181     }
   3182 
   3183   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
   3184     {
   3185       tdep->pkeys_register_names = amd64_pkeys_names;
   3186       tdep->pkru_regnum = AMD64_PKRU_REGNUM;
   3187       tdep->num_pkeys_regs = 1;
   3188     }
   3189 
   3190   tdep->num_byte_regs = 20;
   3191   tdep->num_word_regs = 16;
   3192   tdep->num_dword_regs = 16;
   3193   /* Avoid wiring in the MMX registers for now.  */
   3194   tdep->num_mmx_regs = 0;
   3195 
   3196   set_gdbarch_pseudo_register_read_value (gdbarch,
   3197 					  amd64_pseudo_register_read_value);
   3198   set_gdbarch_pseudo_register_write (gdbarch,
   3199 				     amd64_pseudo_register_write);
   3200   set_gdbarch_ax_pseudo_register_collect (gdbarch,
   3201 					  amd64_ax_pseudo_register_collect);
   3202 
   3203   set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
   3204 
   3205   /* AMD64 has an FPU and 16 SSE registers.  */
   3206   tdep->st0_regnum = AMD64_ST0_REGNUM;
   3207   tdep->num_xmm_regs = 16;
   3208 
   3209   /* This is what all the fuss is about.  */
   3210   set_gdbarch_long_bit (gdbarch, 64);
   3211   set_gdbarch_long_long_bit (gdbarch, 64);
   3212   set_gdbarch_ptr_bit (gdbarch, 64);
   3213 
   3214   /* In contrast to the i386, on AMD64 a `long double' actually takes
   3215      up 128 bits, even though it's still based on the i387 extended
   3216      floating-point format which has only 80 significant bits.  */
   3217   set_gdbarch_long_double_bit (gdbarch, 128);
   3218 
   3219   set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
   3220 
   3221   /* Register numbers of various important registers.  */
   3222   set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
   3223   set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
   3224   set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
   3225   set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
   3226 
   3227   /* The "default" register numbering scheme for AMD64 is referred to
   3228      as the "DWARF Register Number Mapping" in the System V psABI.
   3229      The preferred debugging format for all known AMD64 targets is
   3230      actually DWARF2, and GCC doesn't seem to support DWARF (that is
   3231      DWARF-1), but we provide the same mapping just in case.  This
   3232      mapping is also used for stabs, which GCC does support.  */
   3233   set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3234   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
   3235 
   3236   /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
   3237      be in use on any of the supported AMD64 targets.  */
   3238 
   3239   /* Call dummy code.  */
   3240   set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
   3241   set_gdbarch_frame_align (gdbarch, amd64_frame_align);
   3242   set_gdbarch_frame_red_zone_size (gdbarch, 128);
   3243 
   3244   set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
   3245   set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
   3246   set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
   3247 
   3248   set_gdbarch_return_value (gdbarch, amd64_return_value);
   3249 
   3250   set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
   3251 
   3252   tdep->record_regmap = amd64_record_regmap;
   3253 
   3254   set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
   3255 
   3256   /* Hook the function epilogue frame unwinder.  This unwinder is
   3257      appended to the list first, so that it supercedes the other
   3258      unwinders in function epilogues.  */
   3259   frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
   3260 
   3261   /* Hook the prologue-based frame unwinders.  */
   3262   frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
   3263   frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
   3264   frame_base_set_default (gdbarch, &amd64_frame_base);
   3265 
   3266   set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
   3267 
   3268   set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
   3269 
   3270   set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
   3271 
   3272   /* SystemTap variables and functions.  */
   3273   set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
   3274   set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
   3275   set_gdbarch_stap_register_indirection_prefixes (gdbarch,
   3276 					  stap_register_indirection_prefixes);
   3277   set_gdbarch_stap_register_indirection_suffixes (gdbarch,
   3278 					  stap_register_indirection_suffixes);
   3279   set_gdbarch_stap_is_single_operand (gdbarch,
   3280 				      i386_stap_is_single_operand);
   3281   set_gdbarch_stap_parse_special_token (gdbarch,
   3282 					i386_stap_parse_special_token);
   3283   set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
   3284   set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
   3285   set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
   3286 
   3287   set_gdbarch_in_indirect_branch_thunk (gdbarch,
   3288 					amd64_in_indirect_branch_thunk);
   3289 
   3290   register_amd64_ravenscar_ops (gdbarch);
   3291 }
   3292 
   3293 /* Initialize ARCH for x86-64, no osabi.  */
   3294 
   3295 static void
   3296 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
   3297 {
   3298   amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
   3299 							true));
   3300 }
   3301 
   3302 static struct type *
   3303 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
   3304 {
   3305   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3306 
   3307   switch (regnum - tdep->eax_regnum)
   3308     {
   3309     case AMD64_RBP_REGNUM:	/* %ebp */
   3310     case AMD64_RSP_REGNUM:	/* %esp */
   3311       return builtin_type (gdbarch)->builtin_data_ptr;
   3312     case AMD64_RIP_REGNUM:	/* %eip */
   3313       return builtin_type (gdbarch)->builtin_func_ptr;
   3314     }
   3315 
   3316   return i386_pseudo_register_type (gdbarch, regnum);
   3317 }
   3318 
   3319 void
   3320 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
   3321 		    const target_desc *default_tdesc)
   3322 {
   3323   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3324 
   3325   amd64_init_abi (info, gdbarch, default_tdesc);
   3326 
   3327   tdep->num_dword_regs = 17;
   3328   set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
   3329 
   3330   set_gdbarch_long_bit (gdbarch, 32);
   3331   set_gdbarch_ptr_bit (gdbarch, 32);
   3332 }
   3333 
   3334 /* Initialize ARCH for x64-32, no osabi.  */
   3335 
   3336 static void
   3337 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
   3338 {
   3339   amd64_x32_init_abi (info, arch,
   3340 		      amd64_target_description (X86_XSTATE_SSE_MASK, true));
   3341 }
   3342 
   3343 /* Return the target description for a specified XSAVE feature mask.  */
   3344 
   3345 const struct target_desc *
   3346 amd64_target_description (uint64_t xcr0, bool segments)
   3347 {
   3348   static target_desc *amd64_tdescs \
   3349     [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
   3350   target_desc **tdesc;
   3351 
   3352   tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
   3353     [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
   3354     [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
   3355     [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
   3356     [segments ? 1 : 0];
   3357 
   3358   if (*tdesc == NULL)
   3359     *tdesc = amd64_create_target_description (xcr0, false, false,
   3360 					      segments);
   3361 
   3362   return *tdesc;
   3363 }
   3364 
   3365 void _initialize_amd64_tdep ();
   3366 void
   3367 _initialize_amd64_tdep ()
   3368 {
   3369   gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
   3370 			  amd64_none_init_abi);
   3371   gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
   3372 			  amd64_x32_none_init_abi);
   3373 }
   3374 
   3375 
   3377 /* The 64-bit FXSAVE format differs from the 32-bit format in the
   3378    sense that the instruction pointer and data pointer are simply
   3379    64-bit offsets into the code segment and the data segment instead
   3380    of a selector offset pair.  The functions below store the upper 32
   3381    bits of these pointers (instead of just the 16-bits of the segment
   3382    selector).  */
   3383 
   3384 /* Fill register REGNUM in REGCACHE with the appropriate
   3385    floating-point or SSE register value from *FXSAVE.  If REGNUM is
   3386    -1, do this for all registers.  This function masks off any of the
   3387    reserved bits in *FXSAVE.  */
   3388 
   3389 void
   3390 amd64_supply_fxsave (struct regcache *regcache, int regnum,
   3391 		     const void *fxsave)
   3392 {
   3393   struct gdbarch *gdbarch = regcache->arch ();
   3394   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3395 
   3396   i387_supply_fxsave (regcache, regnum, fxsave);
   3397 
   3398   if (fxsave
   3399       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3400     {
   3401       const gdb_byte *regs = (const gdb_byte *) fxsave;
   3402 
   3403       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3404 	regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
   3405       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3406 	regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
   3407     }
   3408 }
   3409 
   3410 /* Similar to amd64_supply_fxsave, but use XSAVE extended state.  */
   3411 
   3412 void
   3413 amd64_supply_xsave (struct regcache *regcache, int regnum,
   3414 		    const void *xsave)
   3415 {
   3416   struct gdbarch *gdbarch = regcache->arch ();
   3417   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3418 
   3419   i387_supply_xsave (regcache, regnum, xsave);
   3420 
   3421   if (xsave
   3422       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3423     {
   3424       const gdb_byte *regs = (const gdb_byte *) xsave;
   3425       ULONGEST clear_bv;
   3426 
   3427       clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
   3428 
   3429       /* If the FISEG and FOSEG registers have not been initialised yet
   3430 	 (their CLEAR_BV bit is set) then their default values of zero will
   3431 	 have already been setup by I387_SUPPLY_XSAVE.  */
   3432       if (!(clear_bv & X86_XSTATE_X87))
   3433 	{
   3434 	  if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3435 	    regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
   3436 	  if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3437 	    regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
   3438 	}
   3439     }
   3440 }
   3441 
   3442 /* Fill register REGNUM (if it is a floating-point or SSE register) in
   3443    *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
   3444    all registers.  This function doesn't touch any of the reserved
   3445    bits in *FXSAVE.  */
   3446 
   3447 void
   3448 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
   3449 		      void *fxsave)
   3450 {
   3451   struct gdbarch *gdbarch = regcache->arch ();
   3452   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3453   gdb_byte *regs = (gdb_byte *) fxsave;
   3454 
   3455   i387_collect_fxsave (regcache, regnum, fxsave);
   3456 
   3457   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3458     {
   3459       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3460 	regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
   3461       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3462 	regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
   3463     }
   3464 }
   3465 
   3466 /* Similar to amd64_collect_fxsave, but use XSAVE extended state.  */
   3467 
   3468 void
   3469 amd64_collect_xsave (const struct regcache *regcache, int regnum,
   3470 		     void *xsave, int gcore)
   3471 {
   3472   struct gdbarch *gdbarch = regcache->arch ();
   3473   i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
   3474   gdb_byte *regs = (gdb_byte *) xsave;
   3475 
   3476   i387_collect_xsave (regcache, regnum, xsave, gcore);
   3477 
   3478   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
   3479     {
   3480       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
   3481 	regcache->raw_collect (I387_FISEG_REGNUM (tdep),
   3482 			      regs + 12);
   3483       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
   3484 	regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
   3485 			      regs + 20);
   3486     }
   3487 }
   3488