amd64-tdep.c revision 1.1.1.2 1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42
43 #include "features/i386/amd64.c"
44 #include "features/i386/amd64-avx.c"
45 #include "features/i386/amd64-mpx.c"
46 #include "features/i386/amd64-avx512.c"
47
48 #include "features/i386/x32.c"
49 #include "features/i386/x32-avx.c"
50 #include "features/i386/x32-avx512.c"
51
52 #include "ax.h"
53 #include "ax-gdb.h"
54
55 /* Note that the AMD64 architecture was previously known as x86-64.
56 The latter is (forever) engraved into the canonical system name as
57 returned by config.guess, and used as the name for the AMD64 port
58 of GNU/Linux. The BSD's have renamed their ports to amd64; they
59 don't like to shout. For GDB we prefer the amd64_-prefix over the
60 x86_64_-prefix since it's so much easier to type. */
61
62 /* Register information. */
63
64 static const char *amd64_register_names[] =
65 {
66 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67
68 /* %r8 is indeed register number 8. */
69 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
70 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71
72 /* %st0 is register number 24. */
73 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
74 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75
76 /* %xmm0 is register number 40. */
77 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
78 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
79 "mxcsr",
80 };
81
82 static const char *amd64_ymm_names[] =
83 {
84 "ymm0", "ymm1", "ymm2", "ymm3",
85 "ymm4", "ymm5", "ymm6", "ymm7",
86 "ymm8", "ymm9", "ymm10", "ymm11",
87 "ymm12", "ymm13", "ymm14", "ymm15"
88 };
89
90 static const char *amd64_ymm_avx512_names[] =
91 {
92 "ymm16", "ymm17", "ymm18", "ymm19",
93 "ymm20", "ymm21", "ymm22", "ymm23",
94 "ymm24", "ymm25", "ymm26", "ymm27",
95 "ymm28", "ymm29", "ymm30", "ymm31"
96 };
97
98 static const char *amd64_ymmh_names[] =
99 {
100 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
101 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
102 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
103 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
104 };
105
106 static const char *amd64_ymmh_avx512_names[] =
107 {
108 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
109 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
110 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
111 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
112 };
113
114 static const char *amd64_mpx_names[] =
115 {
116 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
117 };
118
119 static const char *amd64_k_names[] =
120 {
121 "k0", "k1", "k2", "k3",
122 "k4", "k5", "k6", "k7"
123 };
124
125 static const char *amd64_zmmh_names[] =
126 {
127 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
128 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
129 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
130 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
131 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
132 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
133 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
134 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
135 };
136
137 static const char *amd64_zmm_names[] =
138 {
139 "zmm0", "zmm1", "zmm2", "zmm3",
140 "zmm4", "zmm5", "zmm6", "zmm7",
141 "zmm8", "zmm9", "zmm10", "zmm11",
142 "zmm12", "zmm13", "zmm14", "zmm15",
143 "zmm16", "zmm17", "zmm18", "zmm19",
144 "zmm20", "zmm21", "zmm22", "zmm23",
145 "zmm24", "zmm25", "zmm26", "zmm27",
146 "zmm28", "zmm29", "zmm30", "zmm31"
147 };
148
149 static const char *amd64_xmm_avx512_names[] = {
150 "xmm16", "xmm17", "xmm18", "xmm19",
151 "xmm20", "xmm21", "xmm22", "xmm23",
152 "xmm24", "xmm25", "xmm26", "xmm27",
153 "xmm28", "xmm29", "xmm30", "xmm31"
154 };
155
156 /* DWARF Register Number Mapping as defined in the System V psABI,
157 section 3.6. */
158
159 static int amd64_dwarf_regmap[] =
160 {
161 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
162 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
163 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
164 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
165
166 /* Frame Pointer Register RBP. */
167 AMD64_RBP_REGNUM,
168
169 /* Stack Pointer Register RSP. */
170 AMD64_RSP_REGNUM,
171
172 /* Extended Integer Registers 8 - 15. */
173 AMD64_R8_REGNUM, /* %r8 */
174 AMD64_R9_REGNUM, /* %r9 */
175 AMD64_R10_REGNUM, /* %r10 */
176 AMD64_R11_REGNUM, /* %r11 */
177 AMD64_R12_REGNUM, /* %r12 */
178 AMD64_R13_REGNUM, /* %r13 */
179 AMD64_R14_REGNUM, /* %r14 */
180 AMD64_R15_REGNUM, /* %r15 */
181
182 /* Return Address RA. Mapped to RIP. */
183 AMD64_RIP_REGNUM,
184
185 /* SSE Registers 0 - 7. */
186 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
187 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
188 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
189 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
190
191 /* Extended SSE Registers 8 - 15. */
192 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
193 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
194 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
195 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
196
197 /* Floating Point Registers 0-7. */
198 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
199 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
200 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
201 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
202
203 /* MMX Registers 0 - 7.
204 We have to handle those registers specifically, as their register
205 number within GDB depends on the target (or they may even not be
206 available at all). */
207 -1, -1, -1, -1, -1, -1, -1, -1,
208
209 /* Control and Status Flags Register. */
210 AMD64_EFLAGS_REGNUM,
211
212 /* Selector Registers. */
213 AMD64_ES_REGNUM,
214 AMD64_CS_REGNUM,
215 AMD64_SS_REGNUM,
216 AMD64_DS_REGNUM,
217 AMD64_FS_REGNUM,
218 AMD64_GS_REGNUM,
219 -1,
220 -1,
221
222 /* Segment Base Address Registers. */
223 -1,
224 -1,
225 -1,
226 -1,
227
228 /* Special Selector Registers. */
229 -1,
230 -1,
231
232 /* Floating Point Control Registers. */
233 AMD64_MXCSR_REGNUM,
234 AMD64_FCTRL_REGNUM,
235 AMD64_FSTAT_REGNUM
236 };
237
238 static const int amd64_dwarf_regmap_len =
239 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
240
241 /* Convert DWARF register number REG to the appropriate register
242 number used by GDB. */
243
244 static int
245 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
246 {
247 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
248 int ymm0_regnum = tdep->ymm0_regnum;
249 int regnum = -1;
250
251 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
252 regnum = amd64_dwarf_regmap[reg];
253
254 if (regnum == -1)
255 warning (_("Unmapped DWARF Register #%d encountered."), reg);
256 else if (ymm0_regnum >= 0
257 && i386_xmm_regnum_p (gdbarch, regnum))
258 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
259
260 return regnum;
261 }
262
263 /* Map architectural register numbers to gdb register numbers. */
264
265 static const int amd64_arch_regmap[16] =
266 {
267 AMD64_RAX_REGNUM, /* %rax */
268 AMD64_RCX_REGNUM, /* %rcx */
269 AMD64_RDX_REGNUM, /* %rdx */
270 AMD64_RBX_REGNUM, /* %rbx */
271 AMD64_RSP_REGNUM, /* %rsp */
272 AMD64_RBP_REGNUM, /* %rbp */
273 AMD64_RSI_REGNUM, /* %rsi */
274 AMD64_RDI_REGNUM, /* %rdi */
275 AMD64_R8_REGNUM, /* %r8 */
276 AMD64_R9_REGNUM, /* %r9 */
277 AMD64_R10_REGNUM, /* %r10 */
278 AMD64_R11_REGNUM, /* %r11 */
279 AMD64_R12_REGNUM, /* %r12 */
280 AMD64_R13_REGNUM, /* %r13 */
281 AMD64_R14_REGNUM, /* %r14 */
282 AMD64_R15_REGNUM /* %r15 */
283 };
284
285 static const int amd64_arch_regmap_len =
286 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
287
288 /* Convert architectural register number REG to the appropriate register
289 number used by GDB. */
290
291 static int
292 amd64_arch_reg_to_regnum (int reg)
293 {
294 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
295
296 return amd64_arch_regmap[reg];
297 }
298
299 /* Register names for byte pseudo-registers. */
300
301 static const char *amd64_byte_names[] =
302 {
303 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
304 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
305 "ah", "bh", "ch", "dh"
306 };
307
308 /* Number of lower byte registers. */
309 #define AMD64_NUM_LOWER_BYTE_REGS 16
310
311 /* Register names for word pseudo-registers. */
312
313 static const char *amd64_word_names[] =
314 {
315 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
316 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
317 };
318
319 /* Register names for dword pseudo-registers. */
320
321 static const char *amd64_dword_names[] =
322 {
323 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
324 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
325 "eip"
326 };
327
328 /* Return the name of register REGNUM. */
329
330 static const char *
331 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
332 {
333 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
334 if (i386_byte_regnum_p (gdbarch, regnum))
335 return amd64_byte_names[regnum - tdep->al_regnum];
336 else if (i386_zmm_regnum_p (gdbarch, regnum))
337 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
338 else if (i386_ymm_regnum_p (gdbarch, regnum))
339 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
340 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
341 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
342 else if (i386_word_regnum_p (gdbarch, regnum))
343 return amd64_word_names[regnum - tdep->ax_regnum];
344 else if (i386_dword_regnum_p (gdbarch, regnum))
345 return amd64_dword_names[regnum - tdep->eax_regnum];
346 else
347 return i386_pseudo_register_name (gdbarch, regnum);
348 }
349
350 static struct value *
351 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
352 struct regcache *regcache,
353 int regnum)
354 {
355 gdb_byte raw_buf[MAX_REGISTER_SIZE];
356 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
357 enum register_status status;
358 struct value *result_value;
359 gdb_byte *buf;
360
361 result_value = allocate_value (register_type (gdbarch, regnum));
362 VALUE_LVAL (result_value) = lval_register;
363 VALUE_REGNUM (result_value) = regnum;
364 buf = value_contents_raw (result_value);
365
366 if (i386_byte_regnum_p (gdbarch, regnum))
367 {
368 int gpnum = regnum - tdep->al_regnum;
369
370 /* Extract (always little endian). */
371 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
372 {
373 /* Special handling for AH, BH, CH, DH. */
374 status = regcache_raw_read (regcache,
375 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
376 raw_buf);
377 if (status == REG_VALID)
378 memcpy (buf, raw_buf + 1, 1);
379 else
380 mark_value_bytes_unavailable (result_value, 0,
381 TYPE_LENGTH (value_type (result_value)));
382 }
383 else
384 {
385 status = regcache_raw_read (regcache, gpnum, raw_buf);
386 if (status == REG_VALID)
387 memcpy (buf, raw_buf, 1);
388 else
389 mark_value_bytes_unavailable (result_value, 0,
390 TYPE_LENGTH (value_type (result_value)));
391 }
392 }
393 else if (i386_dword_regnum_p (gdbarch, regnum))
394 {
395 int gpnum = regnum - tdep->eax_regnum;
396 /* Extract (always little endian). */
397 status = regcache_raw_read (regcache, gpnum, raw_buf);
398 if (status == REG_VALID)
399 memcpy (buf, raw_buf, 4);
400 else
401 mark_value_bytes_unavailable (result_value, 0,
402 TYPE_LENGTH (value_type (result_value)));
403 }
404 else
405 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
406 result_value);
407
408 return result_value;
409 }
410
411 static void
412 amd64_pseudo_register_write (struct gdbarch *gdbarch,
413 struct regcache *regcache,
414 int regnum, const gdb_byte *buf)
415 {
416 gdb_byte raw_buf[MAX_REGISTER_SIZE];
417 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
418
419 if (i386_byte_regnum_p (gdbarch, regnum))
420 {
421 int gpnum = regnum - tdep->al_regnum;
422
423 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
424 {
425 /* Read ... AH, BH, CH, DH. */
426 regcache_raw_read (regcache,
427 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
428 /* ... Modify ... (always little endian). */
429 memcpy (raw_buf + 1, buf, 1);
430 /* ... Write. */
431 regcache_raw_write (regcache,
432 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
433 }
434 else
435 {
436 /* Read ... */
437 regcache_raw_read (regcache, gpnum, raw_buf);
438 /* ... Modify ... (always little endian). */
439 memcpy (raw_buf, buf, 1);
440 /* ... Write. */
441 regcache_raw_write (regcache, gpnum, raw_buf);
442 }
443 }
444 else if (i386_dword_regnum_p (gdbarch, regnum))
445 {
446 int gpnum = regnum - tdep->eax_regnum;
447
448 /* Read ... */
449 regcache_raw_read (regcache, gpnum, raw_buf);
450 /* ... Modify ... (always little endian). */
451 memcpy (raw_buf, buf, 4);
452 /* ... Write. */
453 regcache_raw_write (regcache, gpnum, raw_buf);
454 }
455 else
456 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
457 }
458
459
460
462 /* Register classes as defined in the psABI. */
463
464 enum amd64_reg_class
465 {
466 AMD64_INTEGER,
467 AMD64_SSE,
468 AMD64_SSEUP,
469 AMD64_X87,
470 AMD64_X87UP,
471 AMD64_COMPLEX_X87,
472 AMD64_NO_CLASS,
473 AMD64_MEMORY
474 };
475
476 /* Return the union class of CLASS1 and CLASS2. See the psABI for
477 details. */
478
479 static enum amd64_reg_class
480 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
481 {
482 /* Rule (a): If both classes are equal, this is the resulting class. */
483 if (class1 == class2)
484 return class1;
485
486 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
487 is the other class. */
488 if (class1 == AMD64_NO_CLASS)
489 return class2;
490 if (class2 == AMD64_NO_CLASS)
491 return class1;
492
493 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
494 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
495 return AMD64_MEMORY;
496
497 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
498 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
499 return AMD64_INTEGER;
500
501 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
502 MEMORY is used as class. */
503 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
504 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
505 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
506 return AMD64_MEMORY;
507
508 /* Rule (f): Otherwise class SSE is used. */
509 return AMD64_SSE;
510 }
511
512 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
513
514 /* Return non-zero if TYPE is a non-POD structure or union type. */
515
516 static int
517 amd64_non_pod_p (struct type *type)
518 {
519 /* ??? A class with a base class certainly isn't POD, but does this
520 catch all non-POD structure types? */
521 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
522 return 1;
523
524 return 0;
525 }
526
527 /* Classify TYPE according to the rules for aggregate (structures and
528 arrays) and union types, and store the result in CLASS. */
529
530 static void
531 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
532 {
533 /* 1. If the size of an object is larger than two eightbytes, or in
534 C++, is a non-POD structure or union type, or contains
535 unaligned fields, it has class memory. */
536 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
537 {
538 class[0] = class[1] = AMD64_MEMORY;
539 return;
540 }
541
542 /* 2. Both eightbytes get initialized to class NO_CLASS. */
543 class[0] = class[1] = AMD64_NO_CLASS;
544
545 /* 3. Each field of an object is classified recursively so that
546 always two fields are considered. The resulting class is
547 calculated according to the classes of the fields in the
548 eightbyte: */
549
550 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
551 {
552 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
553
554 /* All fields in an array have the same type. */
555 amd64_classify (subtype, class);
556 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
557 class[1] = class[0];
558 }
559 else
560 {
561 int i;
562
563 /* Structure or union. */
564 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
565 || TYPE_CODE (type) == TYPE_CODE_UNION);
566
567 for (i = 0; i < TYPE_NFIELDS (type); i++)
568 {
569 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
570 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
571 enum amd64_reg_class subclass[2];
572 int bitsize = TYPE_FIELD_BITSIZE (type, i);
573 int endpos;
574
575 if (bitsize == 0)
576 bitsize = TYPE_LENGTH (subtype) * 8;
577 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
578
579 /* Ignore static fields. */
580 if (field_is_static (&TYPE_FIELD (type, i)))
581 continue;
582
583 gdb_assert (pos == 0 || pos == 1);
584
585 amd64_classify (subtype, subclass);
586 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
587 if (bitsize <= 64 && pos == 0 && endpos == 1)
588 /* This is a bit of an odd case: We have a field that would
589 normally fit in one of the two eightbytes, except that
590 it is placed in a way that this field straddles them.
591 This has been seen with a structure containing an array.
592
593 The ABI is a bit unclear in this case, but we assume that
594 this field's class (stored in subclass[0]) must also be merged
595 into class[1]. In other words, our field has a piece stored
596 in the second eight-byte, and thus its class applies to
597 the second eight-byte as well.
598
599 In the case where the field length exceeds 8 bytes,
600 it should not be necessary to merge the field class
601 into class[1]. As LEN > 8, subclass[1] is necessarily
602 different from AMD64_NO_CLASS. If subclass[1] is equal
603 to subclass[0], then the normal class[1]/subclass[1]
604 merging will take care of everything. For subclass[1]
605 to be different from subclass[0], I can only see the case
606 where we have a SSE/SSEUP or X87/X87UP pair, which both
607 use up all 16 bytes of the aggregate, and are already
608 handled just fine (because each portion sits on its own
609 8-byte). */
610 class[1] = amd64_merge_classes (class[1], subclass[0]);
611 if (pos == 0)
612 class[1] = amd64_merge_classes (class[1], subclass[1]);
613 }
614 }
615
616 /* 4. Then a post merger cleanup is done: */
617
618 /* Rule (a): If one of the classes is MEMORY, the whole argument is
619 passed in memory. */
620 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
621 class[0] = class[1] = AMD64_MEMORY;
622
623 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
624 SSE. */
625 if (class[0] == AMD64_SSEUP)
626 class[0] = AMD64_SSE;
627 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
628 class[1] = AMD64_SSE;
629 }
630
631 /* Classify TYPE, and store the result in CLASS. */
632
633 static void
634 amd64_classify (struct type *type, enum amd64_reg_class class[2])
635 {
636 enum type_code code = TYPE_CODE (type);
637 int len = TYPE_LENGTH (type);
638
639 class[0] = class[1] = AMD64_NO_CLASS;
640
641 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
642 long, long long, and pointers are in the INTEGER class. Similarly,
643 range types, used by languages such as Ada, are also in the INTEGER
644 class. */
645 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
646 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
647 || code == TYPE_CODE_CHAR
648 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
649 && (len == 1 || len == 2 || len == 4 || len == 8))
650 class[0] = AMD64_INTEGER;
651
652 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
653 are in class SSE. */
654 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
655 && (len == 4 || len == 8))
656 /* FIXME: __m64 . */
657 class[0] = AMD64_SSE;
658
659 /* Arguments of types __float128, _Decimal128 and __m128 are split into
660 two halves. The least significant ones belong to class SSE, the most
661 significant one to class SSEUP. */
662 else if (code == TYPE_CODE_DECFLOAT && len == 16)
663 /* FIXME: __float128, __m128. */
664 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
665
666 /* The 64-bit mantissa of arguments of type long double belongs to
667 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
668 class X87UP. */
669 else if (code == TYPE_CODE_FLT && len == 16)
670 /* Class X87 and X87UP. */
671 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
672
673 /* Arguments of complex T where T is one of the types float or
674 double get treated as if they are implemented as:
675
676 struct complexT {
677 T real;
678 T imag;
679 };
680
681 */
682 else if (code == TYPE_CODE_COMPLEX && len == 8)
683 class[0] = AMD64_SSE;
684 else if (code == TYPE_CODE_COMPLEX && len == 16)
685 class[0] = class[1] = AMD64_SSE;
686
687 /* A variable of type complex long double is classified as type
688 COMPLEX_X87. */
689 else if (code == TYPE_CODE_COMPLEX && len == 32)
690 class[0] = AMD64_COMPLEX_X87;
691
692 /* Aggregates. */
693 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
694 || code == TYPE_CODE_UNION)
695 amd64_classify_aggregate (type, class);
696 }
697
698 static enum return_value_convention
699 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
700 struct type *type, struct regcache *regcache,
701 gdb_byte *readbuf, const gdb_byte *writebuf)
702 {
703 enum amd64_reg_class class[2];
704 int len = TYPE_LENGTH (type);
705 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
706 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
707 int integer_reg = 0;
708 int sse_reg = 0;
709 int i;
710
711 gdb_assert (!(readbuf && writebuf));
712
713 /* 1. Classify the return type with the classification algorithm. */
714 amd64_classify (type, class);
715
716 /* 2. If the type has class MEMORY, then the caller provides space
717 for the return value and passes the address of this storage in
718 %rdi as if it were the first argument to the function. In effect,
719 this address becomes a hidden first argument.
720
721 On return %rax will contain the address that has been passed in
722 by the caller in %rdi. */
723 if (class[0] == AMD64_MEMORY)
724 {
725 /* As indicated by the comment above, the ABI guarantees that we
726 can always find the return value just after the function has
727 returned. */
728
729 if (readbuf)
730 {
731 ULONGEST addr;
732
733 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
734 read_memory (addr, readbuf, TYPE_LENGTH (type));
735 }
736
737 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
738 }
739
740 /* 8. If the class is COMPLEX_X87, the real part of the value is
741 returned in %st0 and the imaginary part in %st1. */
742 if (class[0] == AMD64_COMPLEX_X87)
743 {
744 if (readbuf)
745 {
746 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
747 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
748 }
749
750 if (writebuf)
751 {
752 i387_return_value (gdbarch, regcache);
753 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
754 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
755
756 /* Fix up the tag word such that both %st(0) and %st(1) are
757 marked as valid. */
758 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
759 }
760
761 return RETURN_VALUE_REGISTER_CONVENTION;
762 }
763
764 gdb_assert (class[1] != AMD64_MEMORY);
765 gdb_assert (len <= 16);
766
767 for (i = 0; len > 0; i++, len -= 8)
768 {
769 int regnum = -1;
770 int offset = 0;
771
772 switch (class[i])
773 {
774 case AMD64_INTEGER:
775 /* 3. If the class is INTEGER, the next available register
776 of the sequence %rax, %rdx is used. */
777 regnum = integer_regnum[integer_reg++];
778 break;
779
780 case AMD64_SSE:
781 /* 4. If the class is SSE, the next available SSE register
782 of the sequence %xmm0, %xmm1 is used. */
783 regnum = sse_regnum[sse_reg++];
784 break;
785
786 case AMD64_SSEUP:
787 /* 5. If the class is SSEUP, the eightbyte is passed in the
788 upper half of the last used SSE register. */
789 gdb_assert (sse_reg > 0);
790 regnum = sse_regnum[sse_reg - 1];
791 offset = 8;
792 break;
793
794 case AMD64_X87:
795 /* 6. If the class is X87, the value is returned on the X87
796 stack in %st0 as 80-bit x87 number. */
797 regnum = AMD64_ST0_REGNUM;
798 if (writebuf)
799 i387_return_value (gdbarch, regcache);
800 break;
801
802 case AMD64_X87UP:
803 /* 7. If the class is X87UP, the value is returned together
804 with the previous X87 value in %st0. */
805 gdb_assert (i > 0 && class[0] == AMD64_X87);
806 regnum = AMD64_ST0_REGNUM;
807 offset = 8;
808 len = 2;
809 break;
810
811 case AMD64_NO_CLASS:
812 continue;
813
814 default:
815 gdb_assert (!"Unexpected register class.");
816 }
817
818 gdb_assert (regnum != -1);
819
820 if (readbuf)
821 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
822 readbuf + i * 8);
823 if (writebuf)
824 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
825 writebuf + i * 8);
826 }
827
828 return RETURN_VALUE_REGISTER_CONVENTION;
829 }
830
831
833 static CORE_ADDR
834 amd64_push_arguments (struct regcache *regcache, int nargs,
835 struct value **args, CORE_ADDR sp, int struct_return)
836 {
837 static int integer_regnum[] =
838 {
839 AMD64_RDI_REGNUM, /* %rdi */
840 AMD64_RSI_REGNUM, /* %rsi */
841 AMD64_RDX_REGNUM, /* %rdx */
842 AMD64_RCX_REGNUM, /* %rcx */
843 AMD64_R8_REGNUM, /* %r8 */
844 AMD64_R9_REGNUM /* %r9 */
845 };
846 static int sse_regnum[] =
847 {
848 /* %xmm0 ... %xmm7 */
849 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
850 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
851 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
852 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
853 };
854 struct value **stack_args = alloca (nargs * sizeof (struct value *));
855 int num_stack_args = 0;
856 int num_elements = 0;
857 int element = 0;
858 int integer_reg = 0;
859 int sse_reg = 0;
860 int i;
861
862 /* Reserve a register for the "hidden" argument. */
863 if (struct_return)
864 integer_reg++;
865
866 for (i = 0; i < nargs; i++)
867 {
868 struct type *type = value_type (args[i]);
869 int len = TYPE_LENGTH (type);
870 enum amd64_reg_class class[2];
871 int needed_integer_regs = 0;
872 int needed_sse_regs = 0;
873 int j;
874
875 /* Classify argument. */
876 amd64_classify (type, class);
877
878 /* Calculate the number of integer and SSE registers needed for
879 this argument. */
880 for (j = 0; j < 2; j++)
881 {
882 if (class[j] == AMD64_INTEGER)
883 needed_integer_regs++;
884 else if (class[j] == AMD64_SSE)
885 needed_sse_regs++;
886 }
887
888 /* Check whether enough registers are available, and if the
889 argument should be passed in registers at all. */
890 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
891 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
892 || (needed_integer_regs == 0 && needed_sse_regs == 0))
893 {
894 /* The argument will be passed on the stack. */
895 num_elements += ((len + 7) / 8);
896 stack_args[num_stack_args++] = args[i];
897 }
898 else
899 {
900 /* The argument will be passed in registers. */
901 const gdb_byte *valbuf = value_contents (args[i]);
902 gdb_byte buf[8];
903
904 gdb_assert (len <= 16);
905
906 for (j = 0; len > 0; j++, len -= 8)
907 {
908 int regnum = -1;
909 int offset = 0;
910
911 switch (class[j])
912 {
913 case AMD64_INTEGER:
914 regnum = integer_regnum[integer_reg++];
915 break;
916
917 case AMD64_SSE:
918 regnum = sse_regnum[sse_reg++];
919 break;
920
921 case AMD64_SSEUP:
922 gdb_assert (sse_reg > 0);
923 regnum = sse_regnum[sse_reg - 1];
924 offset = 8;
925 break;
926
927 default:
928 gdb_assert (!"Unexpected register class.");
929 }
930
931 gdb_assert (regnum != -1);
932 memset (buf, 0, sizeof buf);
933 memcpy (buf, valbuf + j * 8, min (len, 8));
934 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
935 }
936 }
937 }
938
939 /* Allocate space for the arguments on the stack. */
940 sp -= num_elements * 8;
941
942 /* The psABI says that "The end of the input argument area shall be
943 aligned on a 16 byte boundary." */
944 sp &= ~0xf;
945
946 /* Write out the arguments to the stack. */
947 for (i = 0; i < num_stack_args; i++)
948 {
949 struct type *type = value_type (stack_args[i]);
950 const gdb_byte *valbuf = value_contents (stack_args[i]);
951 int len = TYPE_LENGTH (type);
952
953 write_memory (sp + element * 8, valbuf, len);
954 element += ((len + 7) / 8);
955 }
956
957 /* The psABI says that "For calls that may call functions that use
958 varargs or stdargs (prototype-less calls or calls to functions
959 containing ellipsis (...) in the declaration) %al is used as
960 hidden argument to specify the number of SSE registers used. */
961 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
962 return sp;
963 }
964
965 static CORE_ADDR
966 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
967 struct regcache *regcache, CORE_ADDR bp_addr,
968 int nargs, struct value **args, CORE_ADDR sp,
969 int struct_return, CORE_ADDR struct_addr)
970 {
971 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
972 gdb_byte buf[8];
973
974 /* Pass arguments. */
975 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
976
977 /* Pass "hidden" argument". */
978 if (struct_return)
979 {
980 store_unsigned_integer (buf, 8, byte_order, struct_addr);
981 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
982 }
983
984 /* Store return address. */
985 sp -= 8;
986 store_unsigned_integer (buf, 8, byte_order, bp_addr);
987 write_memory (sp, buf, 8);
988
989 /* Finally, update the stack pointer... */
990 store_unsigned_integer (buf, 8, byte_order, sp);
991 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
992
993 /* ...and fake a frame pointer. */
994 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
995
996 return sp + 16;
997 }
998
999 /* Displaced instruction handling. */
1001
1002 /* A partially decoded instruction.
1003 This contains enough details for displaced stepping purposes. */
1004
1005 struct amd64_insn
1006 {
1007 /* The number of opcode bytes. */
1008 int opcode_len;
1009 /* The offset of the rex prefix or -1 if not present. */
1010 int rex_offset;
1011 /* The offset to the first opcode byte. */
1012 int opcode_offset;
1013 /* The offset to the modrm byte or -1 if not present. */
1014 int modrm_offset;
1015
1016 /* The raw instruction. */
1017 gdb_byte *raw_insn;
1018 };
1019
1020 struct displaced_step_closure
1021 {
1022 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1023 int tmp_used;
1024 int tmp_regno;
1025 ULONGEST tmp_save;
1026
1027 /* Details of the instruction. */
1028 struct amd64_insn insn_details;
1029
1030 /* Amount of space allocated to insn_buf. */
1031 int max_len;
1032
1033 /* The possibly modified insn.
1034 This is a variable-length field. */
1035 gdb_byte insn_buf[1];
1036 };
1037
1038 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1039 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1040 at which point delete these in favor of libopcodes' versions). */
1041
1042 static const unsigned char onebyte_has_modrm[256] = {
1043 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1044 /* ------------------------------- */
1045 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1046 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1047 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1048 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1049 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1050 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1051 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1052 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1053 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1054 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1055 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1056 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1057 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1058 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1059 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1060 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1061 /* ------------------------------- */
1062 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1063 };
1064
1065 static const unsigned char twobyte_has_modrm[256] = {
1066 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1067 /* ------------------------------- */
1068 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1069 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1070 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1071 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1072 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1073 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1074 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1075 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1076 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1077 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1078 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1079 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1080 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1081 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1082 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1083 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1084 /* ------------------------------- */
1085 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1086 };
1087
1088 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1089
1090 static int
1091 rex_prefix_p (gdb_byte pfx)
1092 {
1093 return REX_PREFIX_P (pfx);
1094 }
1095
1096 /* Skip the legacy instruction prefixes in INSN.
1097 We assume INSN is properly sentineled so we don't have to worry
1098 about falling off the end of the buffer. */
1099
1100 static gdb_byte *
1101 amd64_skip_prefixes (gdb_byte *insn)
1102 {
1103 while (1)
1104 {
1105 switch (*insn)
1106 {
1107 case DATA_PREFIX_OPCODE:
1108 case ADDR_PREFIX_OPCODE:
1109 case CS_PREFIX_OPCODE:
1110 case DS_PREFIX_OPCODE:
1111 case ES_PREFIX_OPCODE:
1112 case FS_PREFIX_OPCODE:
1113 case GS_PREFIX_OPCODE:
1114 case SS_PREFIX_OPCODE:
1115 case LOCK_PREFIX_OPCODE:
1116 case REPE_PREFIX_OPCODE:
1117 case REPNE_PREFIX_OPCODE:
1118 ++insn;
1119 continue;
1120 default:
1121 break;
1122 }
1123 break;
1124 }
1125
1126 return insn;
1127 }
1128
1129 /* Return an integer register (other than RSP) that is unused as an input
1130 operand in INSN.
1131 In order to not require adding a rex prefix if the insn doesn't already
1132 have one, the result is restricted to RAX ... RDI, sans RSP.
1133 The register numbering of the result follows architecture ordering,
1134 e.g. RDI = 7. */
1135
1136 static int
1137 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1138 {
1139 /* 1 bit for each reg */
1140 int used_regs_mask = 0;
1141
1142 /* There can be at most 3 int regs used as inputs in an insn, and we have
1143 7 to choose from (RAX ... RDI, sans RSP).
1144 This allows us to take a conservative approach and keep things simple.
1145 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1146 that implicitly specify RAX. */
1147
1148 /* Avoid RAX. */
1149 used_regs_mask |= 1 << EAX_REG_NUM;
1150 /* Similarily avoid RDX, implicit operand in divides. */
1151 used_regs_mask |= 1 << EDX_REG_NUM;
1152 /* Avoid RSP. */
1153 used_regs_mask |= 1 << ESP_REG_NUM;
1154
1155 /* If the opcode is one byte long and there's no ModRM byte,
1156 assume the opcode specifies a register. */
1157 if (details->opcode_len == 1 && details->modrm_offset == -1)
1158 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1159
1160 /* Mark used regs in the modrm/sib bytes. */
1161 if (details->modrm_offset != -1)
1162 {
1163 int modrm = details->raw_insn[details->modrm_offset];
1164 int mod = MODRM_MOD_FIELD (modrm);
1165 int reg = MODRM_REG_FIELD (modrm);
1166 int rm = MODRM_RM_FIELD (modrm);
1167 int have_sib = mod != 3 && rm == 4;
1168
1169 /* Assume the reg field of the modrm byte specifies a register. */
1170 used_regs_mask |= 1 << reg;
1171
1172 if (have_sib)
1173 {
1174 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1175 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1176 used_regs_mask |= 1 << base;
1177 used_regs_mask |= 1 << idx;
1178 }
1179 else
1180 {
1181 used_regs_mask |= 1 << rm;
1182 }
1183 }
1184
1185 gdb_assert (used_regs_mask < 256);
1186 gdb_assert (used_regs_mask != 255);
1187
1188 /* Finally, find a free reg. */
1189 {
1190 int i;
1191
1192 for (i = 0; i < 8; ++i)
1193 {
1194 if (! (used_regs_mask & (1 << i)))
1195 return i;
1196 }
1197
1198 /* We shouldn't get here. */
1199 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1200 }
1201 }
1202
1203 /* Extract the details of INSN that we need. */
1204
1205 static void
1206 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1207 {
1208 gdb_byte *start = insn;
1209 int need_modrm;
1210
1211 details->raw_insn = insn;
1212
1213 details->opcode_len = -1;
1214 details->rex_offset = -1;
1215 details->opcode_offset = -1;
1216 details->modrm_offset = -1;
1217
1218 /* Skip legacy instruction prefixes. */
1219 insn = amd64_skip_prefixes (insn);
1220
1221 /* Skip REX instruction prefix. */
1222 if (rex_prefix_p (*insn))
1223 {
1224 details->rex_offset = insn - start;
1225 ++insn;
1226 }
1227
1228 details->opcode_offset = insn - start;
1229
1230 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1231 {
1232 /* Two or three-byte opcode. */
1233 ++insn;
1234 need_modrm = twobyte_has_modrm[*insn];
1235
1236 /* Check for three-byte opcode. */
1237 switch (*insn)
1238 {
1239 case 0x24:
1240 case 0x25:
1241 case 0x38:
1242 case 0x3a:
1243 case 0x7a:
1244 case 0x7b:
1245 ++insn;
1246 details->opcode_len = 3;
1247 break;
1248 default:
1249 details->opcode_len = 2;
1250 break;
1251 }
1252 }
1253 else
1254 {
1255 /* One-byte opcode. */
1256 need_modrm = onebyte_has_modrm[*insn];
1257 details->opcode_len = 1;
1258 }
1259
1260 if (need_modrm)
1261 {
1262 ++insn;
1263 details->modrm_offset = insn - start;
1264 }
1265 }
1266
1267 /* Update %rip-relative addressing in INSN.
1268
1269 %rip-relative addressing only uses a 32-bit displacement.
1270 32 bits is not enough to be guaranteed to cover the distance between where
1271 the real instruction is and where its copy is.
1272 Convert the insn to use base+disp addressing.
1273 We set base = pc + insn_length so we can leave disp unchanged. */
1274
1275 static void
1276 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1277 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1278 {
1279 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1280 const struct amd64_insn *insn_details = &dsc->insn_details;
1281 int modrm_offset = insn_details->modrm_offset;
1282 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1283 CORE_ADDR rip_base;
1284 int32_t disp;
1285 int insn_length;
1286 int arch_tmp_regno, tmp_regno;
1287 ULONGEST orig_value;
1288
1289 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1290 ++insn;
1291
1292 /* Compute the rip-relative address. */
1293 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1294 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1295 dsc->max_len, from);
1296 rip_base = from + insn_length;
1297
1298 /* We need a register to hold the address.
1299 Pick one not used in the insn.
1300 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1301 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1302 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1303
1304 /* REX.B should be unset as we were using rip-relative addressing,
1305 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1306 if (insn_details->rex_offset != -1)
1307 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1308
1309 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1310 dsc->tmp_regno = tmp_regno;
1311 dsc->tmp_save = orig_value;
1312 dsc->tmp_used = 1;
1313
1314 /* Convert the ModRM field to be base+disp. */
1315 dsc->insn_buf[modrm_offset] &= ~0xc7;
1316 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1317
1318 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1319
1320 if (debug_displaced)
1321 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1322 "displaced: using temp reg %d, old value %s, new value %s\n",
1323 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1324 paddress (gdbarch, rip_base));
1325 }
1326
1327 static void
1328 fixup_displaced_copy (struct gdbarch *gdbarch,
1329 struct displaced_step_closure *dsc,
1330 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1331 {
1332 const struct amd64_insn *details = &dsc->insn_details;
1333
1334 if (details->modrm_offset != -1)
1335 {
1336 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1337
1338 if ((modrm & 0xc7) == 0x05)
1339 {
1340 /* The insn uses rip-relative addressing.
1341 Deal with it. */
1342 fixup_riprel (gdbarch, dsc, from, to, regs);
1343 }
1344 }
1345 }
1346
1347 struct displaced_step_closure *
1348 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1349 CORE_ADDR from, CORE_ADDR to,
1350 struct regcache *regs)
1351 {
1352 int len = gdbarch_max_insn_length (gdbarch);
1353 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1354 continually watch for running off the end of the buffer. */
1355 int fixup_sentinel_space = len;
1356 struct displaced_step_closure *dsc =
1357 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1358 gdb_byte *buf = &dsc->insn_buf[0];
1359 struct amd64_insn *details = &dsc->insn_details;
1360
1361 dsc->tmp_used = 0;
1362 dsc->max_len = len + fixup_sentinel_space;
1363
1364 read_memory (from, buf, len);
1365
1366 /* Set up the sentinel space so we don't have to worry about running
1367 off the end of the buffer. An excessive number of leading prefixes
1368 could otherwise cause this. */
1369 memset (buf + len, 0, fixup_sentinel_space);
1370
1371 amd64_get_insn_details (buf, details);
1372
1373 /* GDB may get control back after the insn after the syscall.
1374 Presumably this is a kernel bug.
1375 If this is a syscall, make sure there's a nop afterwards. */
1376 {
1377 int syscall_length;
1378
1379 if (amd64_syscall_p (details, &syscall_length))
1380 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1381 }
1382
1383 /* Modify the insn to cope with the address where it will be executed from.
1384 In particular, handle any rip-relative addressing. */
1385 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1386
1387 write_memory (to, buf, len);
1388
1389 if (debug_displaced)
1390 {
1391 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1392 paddress (gdbarch, from), paddress (gdbarch, to));
1393 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1394 }
1395
1396 return dsc;
1397 }
1398
1399 static int
1400 amd64_absolute_jmp_p (const struct amd64_insn *details)
1401 {
1402 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1403
1404 if (insn[0] == 0xff)
1405 {
1406 /* jump near, absolute indirect (/4) */
1407 if ((insn[1] & 0x38) == 0x20)
1408 return 1;
1409
1410 /* jump far, absolute indirect (/5) */
1411 if ((insn[1] & 0x38) == 0x28)
1412 return 1;
1413 }
1414
1415 return 0;
1416 }
1417
1418 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1419
1420 static int
1421 amd64_jmp_p (const struct amd64_insn *details)
1422 {
1423 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1424
1425 /* jump short, relative. */
1426 if (insn[0] == 0xeb)
1427 return 1;
1428
1429 /* jump near, relative. */
1430 if (insn[0] == 0xe9)
1431 return 1;
1432
1433 return amd64_absolute_jmp_p (details);
1434 }
1435
1436 static int
1437 amd64_absolute_call_p (const struct amd64_insn *details)
1438 {
1439 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1440
1441 if (insn[0] == 0xff)
1442 {
1443 /* Call near, absolute indirect (/2) */
1444 if ((insn[1] & 0x38) == 0x10)
1445 return 1;
1446
1447 /* Call far, absolute indirect (/3) */
1448 if ((insn[1] & 0x38) == 0x18)
1449 return 1;
1450 }
1451
1452 return 0;
1453 }
1454
1455 static int
1456 amd64_ret_p (const struct amd64_insn *details)
1457 {
1458 /* NOTE: gcc can emit "repz ; ret". */
1459 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1460
1461 switch (insn[0])
1462 {
1463 case 0xc2: /* ret near, pop N bytes */
1464 case 0xc3: /* ret near */
1465 case 0xca: /* ret far, pop N bytes */
1466 case 0xcb: /* ret far */
1467 case 0xcf: /* iret */
1468 return 1;
1469
1470 default:
1471 return 0;
1472 }
1473 }
1474
1475 static int
1476 amd64_call_p (const struct amd64_insn *details)
1477 {
1478 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1479
1480 if (amd64_absolute_call_p (details))
1481 return 1;
1482
1483 /* call near, relative */
1484 if (insn[0] == 0xe8)
1485 return 1;
1486
1487 return 0;
1488 }
1489
1490 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1491 length in bytes. Otherwise, return zero. */
1492
1493 static int
1494 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1495 {
1496 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1497
1498 if (insn[0] == 0x0f && insn[1] == 0x05)
1499 {
1500 *lengthp = 2;
1501 return 1;
1502 }
1503
1504 return 0;
1505 }
1506
1507 /* Classify the instruction at ADDR using PRED.
1508 Throw an error if the memory can't be read. */
1509
1510 static int
1511 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1512 int (*pred) (const struct amd64_insn *))
1513 {
1514 struct amd64_insn details;
1515 gdb_byte *buf;
1516 int len, classification;
1517
1518 len = gdbarch_max_insn_length (gdbarch);
1519 buf = alloca (len);
1520
1521 read_code (addr, buf, len);
1522 amd64_get_insn_details (buf, &details);
1523
1524 classification = pred (&details);
1525
1526 return classification;
1527 }
1528
1529 /* The gdbarch insn_is_call method. */
1530
1531 static int
1532 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1533 {
1534 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1535 }
1536
1537 /* The gdbarch insn_is_ret method. */
1538
1539 static int
1540 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1541 {
1542 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1543 }
1544
1545 /* The gdbarch insn_is_jump method. */
1546
1547 static int
1548 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1549 {
1550 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1551 }
1552
1553 /* Fix up the state of registers and memory after having single-stepped
1554 a displaced instruction. */
1555
1556 void
1557 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1558 struct displaced_step_closure *dsc,
1559 CORE_ADDR from, CORE_ADDR to,
1560 struct regcache *regs)
1561 {
1562 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1563 /* The offset we applied to the instruction's address. */
1564 ULONGEST insn_offset = to - from;
1565 gdb_byte *insn = dsc->insn_buf;
1566 const struct amd64_insn *insn_details = &dsc->insn_details;
1567
1568 if (debug_displaced)
1569 fprintf_unfiltered (gdb_stdlog,
1570 "displaced: fixup (%s, %s), "
1571 "insn = 0x%02x 0x%02x ...\n",
1572 paddress (gdbarch, from), paddress (gdbarch, to),
1573 insn[0], insn[1]);
1574
1575 /* If we used a tmp reg, restore it. */
1576
1577 if (dsc->tmp_used)
1578 {
1579 if (debug_displaced)
1580 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1581 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1582 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1583 }
1584
1585 /* The list of issues to contend with here is taken from
1586 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1587 Yay for Free Software! */
1588
1589 /* Relocate the %rip back to the program's instruction stream,
1590 if necessary. */
1591
1592 /* Except in the case of absolute or indirect jump or call
1593 instructions, or a return instruction, the new rip is relative to
1594 the displaced instruction; make it relative to the original insn.
1595 Well, signal handler returns don't need relocation either, but we use the
1596 value of %rip to recognize those; see below. */
1597 if (! amd64_absolute_jmp_p (insn_details)
1598 && ! amd64_absolute_call_p (insn_details)
1599 && ! amd64_ret_p (insn_details))
1600 {
1601 ULONGEST orig_rip;
1602 int insn_len;
1603
1604 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1605
1606 /* A signal trampoline system call changes the %rip, resuming
1607 execution of the main program after the signal handler has
1608 returned. That makes them like 'return' instructions; we
1609 shouldn't relocate %rip.
1610
1611 But most system calls don't, and we do need to relocate %rip.
1612
1613 Our heuristic for distinguishing these cases: if stepping
1614 over the system call instruction left control directly after
1615 the instruction, the we relocate --- control almost certainly
1616 doesn't belong in the displaced copy. Otherwise, we assume
1617 the instruction has put control where it belongs, and leave
1618 it unrelocated. Goodness help us if there are PC-relative
1619 system calls. */
1620 if (amd64_syscall_p (insn_details, &insn_len)
1621 && orig_rip != to + insn_len
1622 /* GDB can get control back after the insn after the syscall.
1623 Presumably this is a kernel bug.
1624 Fixup ensures its a nop, we add one to the length for it. */
1625 && orig_rip != to + insn_len + 1)
1626 {
1627 if (debug_displaced)
1628 fprintf_unfiltered (gdb_stdlog,
1629 "displaced: syscall changed %%rip; "
1630 "not relocating\n");
1631 }
1632 else
1633 {
1634 ULONGEST rip = orig_rip - insn_offset;
1635
1636 /* If we just stepped over a breakpoint insn, we don't backup
1637 the pc on purpose; this is to match behaviour without
1638 stepping. */
1639
1640 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1641
1642 if (debug_displaced)
1643 fprintf_unfiltered (gdb_stdlog,
1644 "displaced: "
1645 "relocated %%rip from %s to %s\n",
1646 paddress (gdbarch, orig_rip),
1647 paddress (gdbarch, rip));
1648 }
1649 }
1650
1651 /* If the instruction was PUSHFL, then the TF bit will be set in the
1652 pushed value, and should be cleared. We'll leave this for later,
1653 since GDB already messes up the TF flag when stepping over a
1654 pushfl. */
1655
1656 /* If the instruction was a call, the return address now atop the
1657 stack is the address following the copied instruction. We need
1658 to make it the address following the original instruction. */
1659 if (amd64_call_p (insn_details))
1660 {
1661 ULONGEST rsp;
1662 ULONGEST retaddr;
1663 const ULONGEST retaddr_len = 8;
1664
1665 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1666 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1667 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1668 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1669
1670 if (debug_displaced)
1671 fprintf_unfiltered (gdb_stdlog,
1672 "displaced: relocated return addr at %s "
1673 "to %s\n",
1674 paddress (gdbarch, rsp),
1675 paddress (gdbarch, retaddr));
1676 }
1677 }
1678
1679 /* If the instruction INSN uses RIP-relative addressing, return the
1680 offset into the raw INSN where the displacement to be adjusted is
1681 found. Returns 0 if the instruction doesn't use RIP-relative
1682 addressing. */
1683
1684 static int
1685 rip_relative_offset (struct amd64_insn *insn)
1686 {
1687 if (insn->modrm_offset != -1)
1688 {
1689 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1690
1691 if ((modrm & 0xc7) == 0x05)
1692 {
1693 /* The displacement is found right after the ModRM byte. */
1694 return insn->modrm_offset + 1;
1695 }
1696 }
1697
1698 return 0;
1699 }
1700
1701 static void
1702 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1703 {
1704 target_write_memory (*to, buf, len);
1705 *to += len;
1706 }
1707
1708 static void
1709 amd64_relocate_instruction (struct gdbarch *gdbarch,
1710 CORE_ADDR *to, CORE_ADDR oldloc)
1711 {
1712 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1713 int len = gdbarch_max_insn_length (gdbarch);
1714 /* Extra space for sentinels. */
1715 int fixup_sentinel_space = len;
1716 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1717 struct amd64_insn insn_details;
1718 int offset = 0;
1719 LONGEST rel32, newrel;
1720 gdb_byte *insn;
1721 int insn_length;
1722
1723 read_memory (oldloc, buf, len);
1724
1725 /* Set up the sentinel space so we don't have to worry about running
1726 off the end of the buffer. An excessive number of leading prefixes
1727 could otherwise cause this. */
1728 memset (buf + len, 0, fixup_sentinel_space);
1729
1730 insn = buf;
1731 amd64_get_insn_details (insn, &insn_details);
1732
1733 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1734
1735 /* Skip legacy instruction prefixes. */
1736 insn = amd64_skip_prefixes (insn);
1737
1738 /* Adjust calls with 32-bit relative addresses as push/jump, with
1739 the address pushed being the location where the original call in
1740 the user program would return to. */
1741 if (insn[0] == 0xe8)
1742 {
1743 gdb_byte push_buf[16];
1744 unsigned int ret_addr;
1745
1746 /* Where "ret" in the original code will return to. */
1747 ret_addr = oldloc + insn_length;
1748 push_buf[0] = 0x68; /* pushq $... */
1749 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1750 /* Push the push. */
1751 append_insns (to, 5, push_buf);
1752
1753 /* Convert the relative call to a relative jump. */
1754 insn[0] = 0xe9;
1755
1756 /* Adjust the destination offset. */
1757 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1758 newrel = (oldloc - *to) + rel32;
1759 store_signed_integer (insn + 1, 4, byte_order, newrel);
1760
1761 if (debug_displaced)
1762 fprintf_unfiltered (gdb_stdlog,
1763 "Adjusted insn rel32=%s at %s to"
1764 " rel32=%s at %s\n",
1765 hex_string (rel32), paddress (gdbarch, oldloc),
1766 hex_string (newrel), paddress (gdbarch, *to));
1767
1768 /* Write the adjusted jump into its displaced location. */
1769 append_insns (to, 5, insn);
1770 return;
1771 }
1772
1773 offset = rip_relative_offset (&insn_details);
1774 if (!offset)
1775 {
1776 /* Adjust jumps with 32-bit relative addresses. Calls are
1777 already handled above. */
1778 if (insn[0] == 0xe9)
1779 offset = 1;
1780 /* Adjust conditional jumps. */
1781 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1782 offset = 2;
1783 }
1784
1785 if (offset)
1786 {
1787 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1788 newrel = (oldloc - *to) + rel32;
1789 store_signed_integer (insn + offset, 4, byte_order, newrel);
1790 if (debug_displaced)
1791 fprintf_unfiltered (gdb_stdlog,
1792 "Adjusted insn rel32=%s at %s to"
1793 " rel32=%s at %s\n",
1794 hex_string (rel32), paddress (gdbarch, oldloc),
1795 hex_string (newrel), paddress (gdbarch, *to));
1796 }
1797
1798 /* Write the adjusted instruction into its displaced location. */
1799 append_insns (to, insn_length, buf);
1800 }
1801
1802
1803 /* The maximum number of saved registers. This should include %rip. */
1805 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1806
1807 struct amd64_frame_cache
1808 {
1809 /* Base address. */
1810 CORE_ADDR base;
1811 int base_p;
1812 CORE_ADDR sp_offset;
1813 CORE_ADDR pc;
1814
1815 /* Saved registers. */
1816 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1817 CORE_ADDR saved_sp;
1818 int saved_sp_reg;
1819
1820 /* Do we have a frame? */
1821 int frameless_p;
1822 };
1823
1824 /* Initialize a frame cache. */
1825
1826 static void
1827 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1828 {
1829 int i;
1830
1831 /* Base address. */
1832 cache->base = 0;
1833 cache->base_p = 0;
1834 cache->sp_offset = -8;
1835 cache->pc = 0;
1836
1837 /* Saved registers. We initialize these to -1 since zero is a valid
1838 offset (that's where %rbp is supposed to be stored).
1839 The values start out as being offsets, and are later converted to
1840 addresses (at which point -1 is interpreted as an address, still meaning
1841 "invalid"). */
1842 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1843 cache->saved_regs[i] = -1;
1844 cache->saved_sp = 0;
1845 cache->saved_sp_reg = -1;
1846
1847 /* Frameless until proven otherwise. */
1848 cache->frameless_p = 1;
1849 }
1850
1851 /* Allocate and initialize a frame cache. */
1852
1853 static struct amd64_frame_cache *
1854 amd64_alloc_frame_cache (void)
1855 {
1856 struct amd64_frame_cache *cache;
1857
1858 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1859 amd64_init_frame_cache (cache);
1860 return cache;
1861 }
1862
1863 /* GCC 4.4 and later, can put code in the prologue to realign the
1864 stack pointer. Check whether PC points to such code, and update
1865 CACHE accordingly. Return the first instruction after the code
1866 sequence or CURRENT_PC, whichever is smaller. If we don't
1867 recognize the code, return PC. */
1868
1869 static CORE_ADDR
1870 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1871 struct amd64_frame_cache *cache)
1872 {
1873 /* There are 2 code sequences to re-align stack before the frame
1874 gets set up:
1875
1876 1. Use a caller-saved saved register:
1877
1878 leaq 8(%rsp), %reg
1879 andq $-XXX, %rsp
1880 pushq -8(%reg)
1881
1882 2. Use a callee-saved saved register:
1883
1884 pushq %reg
1885 leaq 16(%rsp), %reg
1886 andq $-XXX, %rsp
1887 pushq -8(%reg)
1888
1889 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1890
1891 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1892 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1893 */
1894
1895 gdb_byte buf[18];
1896 int reg, r;
1897 int offset, offset_and;
1898
1899 if (target_read_code (pc, buf, sizeof buf))
1900 return pc;
1901
1902 /* Check caller-saved saved register. The first instruction has
1903 to be "leaq 8(%rsp), %reg". */
1904 if ((buf[0] & 0xfb) == 0x48
1905 && buf[1] == 0x8d
1906 && buf[3] == 0x24
1907 && buf[4] == 0x8)
1908 {
1909 /* MOD must be binary 10 and R/M must be binary 100. */
1910 if ((buf[2] & 0xc7) != 0x44)
1911 return pc;
1912
1913 /* REG has register number. */
1914 reg = (buf[2] >> 3) & 7;
1915
1916 /* Check the REX.R bit. */
1917 if (buf[0] == 0x4c)
1918 reg += 8;
1919
1920 offset = 5;
1921 }
1922 else
1923 {
1924 /* Check callee-saved saved register. The first instruction
1925 has to be "pushq %reg". */
1926 reg = 0;
1927 if ((buf[0] & 0xf8) == 0x50)
1928 offset = 0;
1929 else if ((buf[0] & 0xf6) == 0x40
1930 && (buf[1] & 0xf8) == 0x50)
1931 {
1932 /* Check the REX.B bit. */
1933 if ((buf[0] & 1) != 0)
1934 reg = 8;
1935
1936 offset = 1;
1937 }
1938 else
1939 return pc;
1940
1941 /* Get register. */
1942 reg += buf[offset] & 0x7;
1943
1944 offset++;
1945
1946 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1947 if ((buf[offset] & 0xfb) != 0x48
1948 || buf[offset + 1] != 0x8d
1949 || buf[offset + 3] != 0x24
1950 || buf[offset + 4] != 0x10)
1951 return pc;
1952
1953 /* MOD must be binary 10 and R/M must be binary 100. */
1954 if ((buf[offset + 2] & 0xc7) != 0x44)
1955 return pc;
1956
1957 /* REG has register number. */
1958 r = (buf[offset + 2] >> 3) & 7;
1959
1960 /* Check the REX.R bit. */
1961 if (buf[offset] == 0x4c)
1962 r += 8;
1963
1964 /* Registers in pushq and leaq have to be the same. */
1965 if (reg != r)
1966 return pc;
1967
1968 offset += 5;
1969 }
1970
1971 /* Rigister can't be %rsp nor %rbp. */
1972 if (reg == 4 || reg == 5)
1973 return pc;
1974
1975 /* The next instruction has to be "andq $-XXX, %rsp". */
1976 if (buf[offset] != 0x48
1977 || buf[offset + 2] != 0xe4
1978 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1979 return pc;
1980
1981 offset_and = offset;
1982 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1983
1984 /* The next instruction has to be "pushq -8(%reg)". */
1985 r = 0;
1986 if (buf[offset] == 0xff)
1987 offset++;
1988 else if ((buf[offset] & 0xf6) == 0x40
1989 && buf[offset + 1] == 0xff)
1990 {
1991 /* Check the REX.B bit. */
1992 if ((buf[offset] & 0x1) != 0)
1993 r = 8;
1994 offset += 2;
1995 }
1996 else
1997 return pc;
1998
1999 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2000 01. */
2001 if (buf[offset + 1] != 0xf8
2002 || (buf[offset] & 0xf8) != 0x70)
2003 return pc;
2004
2005 /* R/M has register. */
2006 r += buf[offset] & 7;
2007
2008 /* Registers in leaq and pushq have to be the same. */
2009 if (reg != r)
2010 return pc;
2011
2012 if (current_pc > pc + offset_and)
2013 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2014
2015 return min (pc + offset + 2, current_pc);
2016 }
2017
2018 /* Similar to amd64_analyze_stack_align for x32. */
2019
2020 static CORE_ADDR
2021 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2022 struct amd64_frame_cache *cache)
2023 {
2024 /* There are 2 code sequences to re-align stack before the frame
2025 gets set up:
2026
2027 1. Use a caller-saved saved register:
2028
2029 leaq 8(%rsp), %reg
2030 andq $-XXX, %rsp
2031 pushq -8(%reg)
2032
2033 or
2034
2035 [addr32] leal 8(%rsp), %reg
2036 andl $-XXX, %esp
2037 [addr32] pushq -8(%reg)
2038
2039 2. Use a callee-saved saved register:
2040
2041 pushq %reg
2042 leaq 16(%rsp), %reg
2043 andq $-XXX, %rsp
2044 pushq -8(%reg)
2045
2046 or
2047
2048 pushq %reg
2049 [addr32] leal 16(%rsp), %reg
2050 andl $-XXX, %esp
2051 [addr32] pushq -8(%reg)
2052
2053 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2054
2055 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2056 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2057
2058 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2059
2060 0x83 0xe4 0xf0 andl $-16, %esp
2061 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2062 */
2063
2064 gdb_byte buf[19];
2065 int reg, r;
2066 int offset, offset_and;
2067
2068 if (target_read_memory (pc, buf, sizeof buf))
2069 return pc;
2070
2071 /* Skip optional addr32 prefix. */
2072 offset = buf[0] == 0x67 ? 1 : 0;
2073
2074 /* Check caller-saved saved register. The first instruction has
2075 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2076 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2077 && buf[offset + 1] == 0x8d
2078 && buf[offset + 3] == 0x24
2079 && buf[offset + 4] == 0x8)
2080 {
2081 /* MOD must be binary 10 and R/M must be binary 100. */
2082 if ((buf[offset + 2] & 0xc7) != 0x44)
2083 return pc;
2084
2085 /* REG has register number. */
2086 reg = (buf[offset + 2] >> 3) & 7;
2087
2088 /* Check the REX.R bit. */
2089 if ((buf[offset] & 0x4) != 0)
2090 reg += 8;
2091
2092 offset += 5;
2093 }
2094 else
2095 {
2096 /* Check callee-saved saved register. The first instruction
2097 has to be "pushq %reg". */
2098 reg = 0;
2099 if ((buf[offset] & 0xf6) == 0x40
2100 && (buf[offset + 1] & 0xf8) == 0x50)
2101 {
2102 /* Check the REX.B bit. */
2103 if ((buf[offset] & 1) != 0)
2104 reg = 8;
2105
2106 offset += 1;
2107 }
2108 else if ((buf[offset] & 0xf8) != 0x50)
2109 return pc;
2110
2111 /* Get register. */
2112 reg += buf[offset] & 0x7;
2113
2114 offset++;
2115
2116 /* Skip optional addr32 prefix. */
2117 if (buf[offset] == 0x67)
2118 offset++;
2119
2120 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2121 "leal 16(%rsp), %reg". */
2122 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2123 || buf[offset + 1] != 0x8d
2124 || buf[offset + 3] != 0x24
2125 || buf[offset + 4] != 0x10)
2126 return pc;
2127
2128 /* MOD must be binary 10 and R/M must be binary 100. */
2129 if ((buf[offset + 2] & 0xc7) != 0x44)
2130 return pc;
2131
2132 /* REG has register number. */
2133 r = (buf[offset + 2] >> 3) & 7;
2134
2135 /* Check the REX.R bit. */
2136 if ((buf[offset] & 0x4) != 0)
2137 r += 8;
2138
2139 /* Registers in pushq and leaq have to be the same. */
2140 if (reg != r)
2141 return pc;
2142
2143 offset += 5;
2144 }
2145
2146 /* Rigister can't be %rsp nor %rbp. */
2147 if (reg == 4 || reg == 5)
2148 return pc;
2149
2150 /* The next instruction may be "andq $-XXX, %rsp" or
2151 "andl $-XXX, %esp". */
2152 if (buf[offset] != 0x48)
2153 offset--;
2154
2155 if (buf[offset + 2] != 0xe4
2156 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2157 return pc;
2158
2159 offset_and = offset;
2160 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2161
2162 /* Skip optional addr32 prefix. */
2163 if (buf[offset] == 0x67)
2164 offset++;
2165
2166 /* The next instruction has to be "pushq -8(%reg)". */
2167 r = 0;
2168 if (buf[offset] == 0xff)
2169 offset++;
2170 else if ((buf[offset] & 0xf6) == 0x40
2171 && buf[offset + 1] == 0xff)
2172 {
2173 /* Check the REX.B bit. */
2174 if ((buf[offset] & 0x1) != 0)
2175 r = 8;
2176 offset += 2;
2177 }
2178 else
2179 return pc;
2180
2181 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2182 01. */
2183 if (buf[offset + 1] != 0xf8
2184 || (buf[offset] & 0xf8) != 0x70)
2185 return pc;
2186
2187 /* R/M has register. */
2188 r += buf[offset] & 7;
2189
2190 /* Registers in leaq and pushq have to be the same. */
2191 if (reg != r)
2192 return pc;
2193
2194 if (current_pc > pc + offset_and)
2195 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2196
2197 return min (pc + offset + 2, current_pc);
2198 }
2199
2200 /* Do a limited analysis of the prologue at PC and update CACHE
2201 accordingly. Bail out early if CURRENT_PC is reached. Return the
2202 address where the analysis stopped.
2203
2204 We will handle only functions beginning with:
2205
2206 pushq %rbp 0x55
2207 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2208
2209 or (for the X32 ABI):
2210
2211 pushq %rbp 0x55
2212 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2213
2214 Any function that doesn't start with one of these sequences will be
2215 assumed to have no prologue and thus no valid frame pointer in
2216 %rbp. */
2217
2218 static CORE_ADDR
2219 amd64_analyze_prologue (struct gdbarch *gdbarch,
2220 CORE_ADDR pc, CORE_ADDR current_pc,
2221 struct amd64_frame_cache *cache)
2222 {
2223 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2224 /* There are two variations of movq %rsp, %rbp. */
2225 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2226 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2227 /* Ditto for movl %esp, %ebp. */
2228 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2229 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2230
2231 gdb_byte buf[3];
2232 gdb_byte op;
2233
2234 if (current_pc <= pc)
2235 return current_pc;
2236
2237 if (gdbarch_ptr_bit (gdbarch) == 32)
2238 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2239 else
2240 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2241
2242 op = read_code_unsigned_integer (pc, 1, byte_order);
2243
2244 if (op == 0x55) /* pushq %rbp */
2245 {
2246 /* Take into account that we've executed the `pushq %rbp' that
2247 starts this instruction sequence. */
2248 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2249 cache->sp_offset += 8;
2250
2251 /* If that's all, return now. */
2252 if (current_pc <= pc + 1)
2253 return current_pc;
2254
2255 read_code (pc + 1, buf, 3);
2256
2257 /* Check for `movq %rsp, %rbp'. */
2258 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2259 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2260 {
2261 /* OK, we actually have a frame. */
2262 cache->frameless_p = 0;
2263 return pc + 4;
2264 }
2265
2266 /* For X32, also check for `movq %esp, %ebp'. */
2267 if (gdbarch_ptr_bit (gdbarch) == 32)
2268 {
2269 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2270 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2271 {
2272 /* OK, we actually have a frame. */
2273 cache->frameless_p = 0;
2274 return pc + 3;
2275 }
2276 }
2277
2278 return pc + 1;
2279 }
2280
2281 return pc;
2282 }
2283
2284 /* Work around false termination of prologue - GCC PR debug/48827.
2285
2286 START_PC is the first instruction of a function, PC is its minimal already
2287 determined advanced address. Function returns PC if it has nothing to do.
2288
2289 84 c0 test %al,%al
2290 74 23 je after
2291 <-- here is 0 lines advance - the false prologue end marker.
2292 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2293 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2294 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2295 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2296 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2297 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2298 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2299 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2300 after: */
2301
2302 static CORE_ADDR
2303 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2304 {
2305 struct symtab_and_line start_pc_sal, next_sal;
2306 gdb_byte buf[4 + 8 * 7];
2307 int offset, xmmreg;
2308
2309 if (pc == start_pc)
2310 return pc;
2311
2312 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2313 if (start_pc_sal.symtab == NULL
2314 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2315 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2316 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2317 return pc;
2318
2319 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2320 if (next_sal.line != start_pc_sal.line)
2321 return pc;
2322
2323 /* START_PC can be from overlayed memory, ignored here. */
2324 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2325 return pc;
2326
2327 /* test %al,%al */
2328 if (buf[0] != 0x84 || buf[1] != 0xc0)
2329 return pc;
2330 /* je AFTER */
2331 if (buf[2] != 0x74)
2332 return pc;
2333
2334 offset = 4;
2335 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2336 {
2337 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2338 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2339 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2340 return pc;
2341
2342 /* 0b01?????? */
2343 if ((buf[offset + 2] & 0xc0) == 0x40)
2344 {
2345 /* 8-bit displacement. */
2346 offset += 4;
2347 }
2348 /* 0b10?????? */
2349 else if ((buf[offset + 2] & 0xc0) == 0x80)
2350 {
2351 /* 32-bit displacement. */
2352 offset += 7;
2353 }
2354 else
2355 return pc;
2356 }
2357
2358 /* je AFTER */
2359 if (offset - 4 != buf[3])
2360 return pc;
2361
2362 return next_sal.end;
2363 }
2364
2365 /* Return PC of first real instruction. */
2366
2367 static CORE_ADDR
2368 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2369 {
2370 struct amd64_frame_cache cache;
2371 CORE_ADDR pc;
2372 CORE_ADDR func_addr;
2373
2374 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2375 {
2376 CORE_ADDR post_prologue_pc
2377 = skip_prologue_using_sal (gdbarch, func_addr);
2378 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2379
2380 /* Clang always emits a line note before the prologue and another
2381 one after. We trust clang to emit usable line notes. */
2382 if (post_prologue_pc
2383 && (cust != NULL
2384 && COMPUNIT_PRODUCER (cust) != NULL
2385 && strncmp (COMPUNIT_PRODUCER (cust), "clang ",
2386 sizeof ("clang ") - 1) == 0))
2387 return max (start_pc, post_prologue_pc);
2388 }
2389
2390 amd64_init_frame_cache (&cache);
2391 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2392 &cache);
2393 if (cache.frameless_p)
2394 return start_pc;
2395
2396 return amd64_skip_xmm_prologue (pc, start_pc);
2397 }
2398
2399
2401 /* Normal frames. */
2402
2403 static void
2404 amd64_frame_cache_1 (struct frame_info *this_frame,
2405 struct amd64_frame_cache *cache)
2406 {
2407 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2408 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2409 gdb_byte buf[8];
2410 int i;
2411
2412 cache->pc = get_frame_func (this_frame);
2413 if (cache->pc != 0)
2414 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2415 cache);
2416
2417 if (cache->frameless_p)
2418 {
2419 /* We didn't find a valid frame. If we're at the start of a
2420 function, or somewhere half-way its prologue, the function's
2421 frame probably hasn't been fully setup yet. Try to
2422 reconstruct the base address for the stack frame by looking
2423 at the stack pointer. For truly "frameless" functions this
2424 might work too. */
2425
2426 if (cache->saved_sp_reg != -1)
2427 {
2428 /* Stack pointer has been saved. */
2429 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2430 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2431
2432 /* We're halfway aligning the stack. */
2433 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2434 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2435
2436 /* This will be added back below. */
2437 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2438 }
2439 else
2440 {
2441 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2442 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2443 + cache->sp_offset;
2444 }
2445 }
2446 else
2447 {
2448 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2449 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2450 }
2451
2452 /* Now that we have the base address for the stack frame we can
2453 calculate the value of %rsp in the calling frame. */
2454 cache->saved_sp = cache->base + 16;
2455
2456 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2457 frame we find it at the same offset from the reconstructed base
2458 address. If we're halfway aligning the stack, %rip is handled
2459 differently (see above). */
2460 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2461 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2462
2463 /* Adjust all the saved registers such that they contain addresses
2464 instead of offsets. */
2465 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2466 if (cache->saved_regs[i] != -1)
2467 cache->saved_regs[i] += cache->base;
2468
2469 cache->base_p = 1;
2470 }
2471
2472 static struct amd64_frame_cache *
2473 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2474 {
2475 volatile struct gdb_exception ex;
2476 struct amd64_frame_cache *cache;
2477
2478 if (*this_cache)
2479 return *this_cache;
2480
2481 cache = amd64_alloc_frame_cache ();
2482 *this_cache = cache;
2483
2484 TRY_CATCH (ex, RETURN_MASK_ERROR)
2485 {
2486 amd64_frame_cache_1 (this_frame, cache);
2487 }
2488 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2489 throw_exception (ex);
2490
2491 return cache;
2492 }
2493
2494 static enum unwind_stop_reason
2495 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2496 void **this_cache)
2497 {
2498 struct amd64_frame_cache *cache =
2499 amd64_frame_cache (this_frame, this_cache);
2500
2501 if (!cache->base_p)
2502 return UNWIND_UNAVAILABLE;
2503
2504 /* This marks the outermost frame. */
2505 if (cache->base == 0)
2506 return UNWIND_OUTERMOST;
2507
2508 return UNWIND_NO_REASON;
2509 }
2510
2511 static void
2512 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2513 struct frame_id *this_id)
2514 {
2515 struct amd64_frame_cache *cache =
2516 amd64_frame_cache (this_frame, this_cache);
2517
2518 if (!cache->base_p)
2519 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2520 else if (cache->base == 0)
2521 {
2522 /* This marks the outermost frame. */
2523 return;
2524 }
2525 else
2526 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2527 }
2528
2529 static struct value *
2530 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2531 int regnum)
2532 {
2533 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2534 struct amd64_frame_cache *cache =
2535 amd64_frame_cache (this_frame, this_cache);
2536
2537 gdb_assert (regnum >= 0);
2538
2539 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2540 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2541
2542 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2543 return frame_unwind_got_memory (this_frame, regnum,
2544 cache->saved_regs[regnum]);
2545
2546 return frame_unwind_got_register (this_frame, regnum, regnum);
2547 }
2548
2549 static const struct frame_unwind amd64_frame_unwind =
2550 {
2551 NORMAL_FRAME,
2552 amd64_frame_unwind_stop_reason,
2553 amd64_frame_this_id,
2554 amd64_frame_prev_register,
2555 NULL,
2556 default_frame_sniffer
2557 };
2558
2559 /* Generate a bytecode expression to get the value of the saved PC. */
2561
2562 static void
2563 amd64_gen_return_address (struct gdbarch *gdbarch,
2564 struct agent_expr *ax, struct axs_value *value,
2565 CORE_ADDR scope)
2566 {
2567 /* The following sequence assumes the traditional use of the base
2568 register. */
2569 ax_reg (ax, AMD64_RBP_REGNUM);
2570 ax_const_l (ax, 8);
2571 ax_simple (ax, aop_add);
2572 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2573 value->kind = axs_lvalue_memory;
2574 }
2575
2576
2578 /* Signal trampolines. */
2579
2580 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2581 64-bit variants. This would require using identical frame caches
2582 on both platforms. */
2583
2584 static struct amd64_frame_cache *
2585 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2586 {
2587 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2588 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2589 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2590 volatile struct gdb_exception ex;
2591 struct amd64_frame_cache *cache;
2592 CORE_ADDR addr;
2593 gdb_byte buf[8];
2594 int i;
2595
2596 if (*this_cache)
2597 return *this_cache;
2598
2599 cache = amd64_alloc_frame_cache ();
2600
2601 TRY_CATCH (ex, RETURN_MASK_ERROR)
2602 {
2603 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2604 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2605
2606 addr = tdep->sigcontext_addr (this_frame);
2607 gdb_assert (tdep->sc_reg_offset);
2608 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2609 for (i = 0; i < tdep->sc_num_regs; i++)
2610 if (tdep->sc_reg_offset[i] != -1)
2611 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2612
2613 cache->base_p = 1;
2614 }
2615 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2616 throw_exception (ex);
2617
2618 *this_cache = cache;
2619 return cache;
2620 }
2621
2622 static enum unwind_stop_reason
2623 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2624 void **this_cache)
2625 {
2626 struct amd64_frame_cache *cache =
2627 amd64_sigtramp_frame_cache (this_frame, this_cache);
2628
2629 if (!cache->base_p)
2630 return UNWIND_UNAVAILABLE;
2631
2632 return UNWIND_NO_REASON;
2633 }
2634
2635 static void
2636 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2637 void **this_cache, struct frame_id *this_id)
2638 {
2639 struct amd64_frame_cache *cache =
2640 amd64_sigtramp_frame_cache (this_frame, this_cache);
2641
2642 if (!cache->base_p)
2643 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2644 else if (cache->base == 0)
2645 {
2646 /* This marks the outermost frame. */
2647 return;
2648 }
2649 else
2650 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2651 }
2652
2653 static struct value *
2654 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2655 void **this_cache, int regnum)
2656 {
2657 /* Make sure we've initialized the cache. */
2658 amd64_sigtramp_frame_cache (this_frame, this_cache);
2659
2660 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2661 }
2662
2663 static int
2664 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2665 struct frame_info *this_frame,
2666 void **this_cache)
2667 {
2668 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2669
2670 /* We shouldn't even bother if we don't have a sigcontext_addr
2671 handler. */
2672 if (tdep->sigcontext_addr == NULL)
2673 return 0;
2674
2675 if (tdep->sigtramp_p != NULL)
2676 {
2677 if (tdep->sigtramp_p (this_frame))
2678 return 1;
2679 }
2680
2681 if (tdep->sigtramp_start != 0)
2682 {
2683 CORE_ADDR pc = get_frame_pc (this_frame);
2684
2685 gdb_assert (tdep->sigtramp_end != 0);
2686 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2687 return 1;
2688 }
2689
2690 return 0;
2691 }
2692
2693 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2694 {
2695 SIGTRAMP_FRAME,
2696 amd64_sigtramp_frame_unwind_stop_reason,
2697 amd64_sigtramp_frame_this_id,
2698 amd64_sigtramp_frame_prev_register,
2699 NULL,
2700 amd64_sigtramp_frame_sniffer
2701 };
2702
2703
2705 static CORE_ADDR
2706 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2707 {
2708 struct amd64_frame_cache *cache =
2709 amd64_frame_cache (this_frame, this_cache);
2710
2711 return cache->base;
2712 }
2713
2714 static const struct frame_base amd64_frame_base =
2715 {
2716 &amd64_frame_unwind,
2717 amd64_frame_base_address,
2718 amd64_frame_base_address,
2719 amd64_frame_base_address
2720 };
2721
2722 /* Normal frames, but in a function epilogue. */
2723
2724 /* The epilogue is defined here as the 'ret' instruction, which will
2725 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2726 the function's stack frame. */
2727
2728 static int
2729 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2730 {
2731 gdb_byte insn;
2732 struct compunit_symtab *cust;
2733
2734 cust = find_pc_compunit_symtab (pc);
2735 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2736 return 0;
2737
2738 if (target_read_memory (pc, &insn, 1))
2739 return 0; /* Can't read memory at pc. */
2740
2741 if (insn != 0xc3) /* 'ret' instruction. */
2742 return 0;
2743
2744 return 1;
2745 }
2746
2747 static int
2748 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2749 struct frame_info *this_frame,
2750 void **this_prologue_cache)
2751 {
2752 if (frame_relative_level (this_frame) == 0)
2753 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2754 get_frame_pc (this_frame));
2755 else
2756 return 0;
2757 }
2758
2759 static struct amd64_frame_cache *
2760 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2761 {
2762 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2763 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2764 volatile struct gdb_exception ex;
2765 struct amd64_frame_cache *cache;
2766 gdb_byte buf[8];
2767
2768 if (*this_cache)
2769 return *this_cache;
2770
2771 cache = amd64_alloc_frame_cache ();
2772 *this_cache = cache;
2773
2774 TRY_CATCH (ex, RETURN_MASK_ERROR)
2775 {
2776 /* Cache base will be %esp plus cache->sp_offset (-8). */
2777 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2778 cache->base = extract_unsigned_integer (buf, 8,
2779 byte_order) + cache->sp_offset;
2780
2781 /* Cache pc will be the frame func. */
2782 cache->pc = get_frame_pc (this_frame);
2783
2784 /* The saved %esp will be at cache->base plus 16. */
2785 cache->saved_sp = cache->base + 16;
2786
2787 /* The saved %eip will be at cache->base plus 8. */
2788 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2789
2790 cache->base_p = 1;
2791 }
2792 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2793 throw_exception (ex);
2794
2795 return cache;
2796 }
2797
2798 static enum unwind_stop_reason
2799 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2800 void **this_cache)
2801 {
2802 struct amd64_frame_cache *cache
2803 = amd64_epilogue_frame_cache (this_frame, this_cache);
2804
2805 if (!cache->base_p)
2806 return UNWIND_UNAVAILABLE;
2807
2808 return UNWIND_NO_REASON;
2809 }
2810
2811 static void
2812 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2813 void **this_cache,
2814 struct frame_id *this_id)
2815 {
2816 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2817 this_cache);
2818
2819 if (!cache->base_p)
2820 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2821 else
2822 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2823 }
2824
2825 static const struct frame_unwind amd64_epilogue_frame_unwind =
2826 {
2827 NORMAL_FRAME,
2828 amd64_epilogue_frame_unwind_stop_reason,
2829 amd64_epilogue_frame_this_id,
2830 amd64_frame_prev_register,
2831 NULL,
2832 amd64_epilogue_frame_sniffer
2833 };
2834
2835 static struct frame_id
2836 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2837 {
2838 CORE_ADDR fp;
2839
2840 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2841
2842 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2843 }
2844
2845 /* 16 byte align the SP per frame requirements. */
2846
2847 static CORE_ADDR
2848 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2849 {
2850 return sp & -(CORE_ADDR)16;
2851 }
2852
2853
2855 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2856 in the floating-point register set REGSET to register cache
2857 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2858
2859 static void
2860 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2861 int regnum, const void *fpregs, size_t len)
2862 {
2863 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2864 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2865
2866 gdb_assert (len >= tdep->sizeof_fpregset);
2867 amd64_supply_fxsave (regcache, regnum, fpregs);
2868 }
2869
2870 /* Collect register REGNUM from the register cache REGCACHE and store
2871 it in the buffer specified by FPREGS and LEN as described by the
2872 floating-point register set REGSET. If REGNUM is -1, do this for
2873 all registers in REGSET. */
2874
2875 static void
2876 amd64_collect_fpregset (const struct regset *regset,
2877 const struct regcache *regcache,
2878 int regnum, void *fpregs, size_t len)
2879 {
2880 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2881 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2882
2883 gdb_assert (len >= tdep->sizeof_fpregset);
2884 amd64_collect_fxsave (regcache, regnum, fpregs);
2885 }
2886
2887 const struct regset amd64_fpregset =
2888 {
2889 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2890 };
2891
2892
2894 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2895 %rdi. We expect its value to be a pointer to the jmp_buf structure
2896 from which we extract the address that we will land at. This
2897 address is copied into PC. This routine returns non-zero on
2898 success. */
2899
2900 static int
2901 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2902 {
2903 gdb_byte buf[8];
2904 CORE_ADDR jb_addr;
2905 struct gdbarch *gdbarch = get_frame_arch (frame);
2906 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2907 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2908
2909 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2910 longjmp will land. */
2911 if (jb_pc_offset == -1)
2912 return 0;
2913
2914 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2915 jb_addr= extract_typed_address
2916 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2917 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2918 return 0;
2919
2920 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2921
2922 return 1;
2923 }
2924
2925 static const int amd64_record_regmap[] =
2926 {
2927 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2928 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2929 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2930 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2931 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2932 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2933 };
2934
2935 void
2936 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2937 {
2938 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2939 const struct target_desc *tdesc = info.target_desc;
2940 static const char *const stap_integer_prefixes[] = { "$", NULL };
2941 static const char *const stap_register_prefixes[] = { "%", NULL };
2942 static const char *const stap_register_indirection_prefixes[] = { "(",
2943 NULL };
2944 static const char *const stap_register_indirection_suffixes[] = { ")",
2945 NULL };
2946
2947 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2948 floating-point registers. */
2949 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2950 tdep->fpregset = &amd64_fpregset;
2951
2952 if (! tdesc_has_registers (tdesc))
2953 tdesc = tdesc_amd64;
2954 tdep->tdesc = tdesc;
2955
2956 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2957 tdep->register_names = amd64_register_names;
2958
2959 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2960 {
2961 tdep->zmmh_register_names = amd64_zmmh_names;
2962 tdep->k_register_names = amd64_k_names;
2963 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2964 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2965
2966 tdep->num_zmm_regs = 32;
2967 tdep->num_xmm_avx512_regs = 16;
2968 tdep->num_ymm_avx512_regs = 16;
2969
2970 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2971 tdep->k0_regnum = AMD64_K0_REGNUM;
2972 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2973 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2974 }
2975
2976 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2977 {
2978 tdep->ymmh_register_names = amd64_ymmh_names;
2979 tdep->num_ymm_regs = 16;
2980 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2981 }
2982
2983 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
2984 {
2985 tdep->mpx_register_names = amd64_mpx_names;
2986 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
2987 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
2988 }
2989
2990 tdep->num_byte_regs = 20;
2991 tdep->num_word_regs = 16;
2992 tdep->num_dword_regs = 16;
2993 /* Avoid wiring in the MMX registers for now. */
2994 tdep->num_mmx_regs = 0;
2995
2996 set_gdbarch_pseudo_register_read_value (gdbarch,
2997 amd64_pseudo_register_read_value);
2998 set_gdbarch_pseudo_register_write (gdbarch,
2999 amd64_pseudo_register_write);
3000
3001 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3002
3003 /* AMD64 has an FPU and 16 SSE registers. */
3004 tdep->st0_regnum = AMD64_ST0_REGNUM;
3005 tdep->num_xmm_regs = 16;
3006
3007 /* This is what all the fuss is about. */
3008 set_gdbarch_long_bit (gdbarch, 64);
3009 set_gdbarch_long_long_bit (gdbarch, 64);
3010 set_gdbarch_ptr_bit (gdbarch, 64);
3011
3012 /* In contrast to the i386, on AMD64 a `long double' actually takes
3013 up 128 bits, even though it's still based on the i387 extended
3014 floating-point format which has only 80 significant bits. */
3015 set_gdbarch_long_double_bit (gdbarch, 128);
3016
3017 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3018
3019 /* Register numbers of various important registers. */
3020 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3021 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3022 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3023 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3024
3025 /* The "default" register numbering scheme for AMD64 is referred to
3026 as the "DWARF Register Number Mapping" in the System V psABI.
3027 The preferred debugging format for all known AMD64 targets is
3028 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3029 DWARF-1), but we provide the same mapping just in case. This
3030 mapping is also used for stabs, which GCC does support. */
3031 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3032 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3033
3034 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3035 be in use on any of the supported AMD64 targets. */
3036
3037 /* Call dummy code. */
3038 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3039 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3040 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3041
3042 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3043 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3044 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3045
3046 set_gdbarch_return_value (gdbarch, amd64_return_value);
3047
3048 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3049
3050 tdep->record_regmap = amd64_record_regmap;
3051
3052 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3053
3054 /* Hook the function epilogue frame unwinder. This unwinder is
3055 appended to the list first, so that it supercedes the other
3056 unwinders in function epilogues. */
3057 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3058
3059 /* Hook the prologue-based frame unwinders. */
3060 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3061 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3062 frame_base_set_default (gdbarch, &amd64_frame_base);
3063
3064 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3065
3066 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3067
3068 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3069
3070 /* SystemTap variables and functions. */
3071 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3072 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3073 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3074 stap_register_indirection_prefixes);
3075 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3076 stap_register_indirection_suffixes);
3077 set_gdbarch_stap_is_single_operand (gdbarch,
3078 i386_stap_is_single_operand);
3079 set_gdbarch_stap_parse_special_token (gdbarch,
3080 i386_stap_parse_special_token);
3081 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3082 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3083 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3084 }
3085
3086
3088 static struct type *
3089 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3090 {
3091 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3092
3093 switch (regnum - tdep->eax_regnum)
3094 {
3095 case AMD64_RBP_REGNUM: /* %ebp */
3096 case AMD64_RSP_REGNUM: /* %esp */
3097 return builtin_type (gdbarch)->builtin_data_ptr;
3098 case AMD64_RIP_REGNUM: /* %eip */
3099 return builtin_type (gdbarch)->builtin_func_ptr;
3100 }
3101
3102 return i386_pseudo_register_type (gdbarch, regnum);
3103 }
3104
3105 void
3106 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3107 {
3108 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3109 const struct target_desc *tdesc = info.target_desc;
3110
3111 amd64_init_abi (info, gdbarch);
3112
3113 if (! tdesc_has_registers (tdesc))
3114 tdesc = tdesc_x32;
3115 tdep->tdesc = tdesc;
3116
3117 tdep->num_dword_regs = 17;
3118 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3119
3120 set_gdbarch_long_bit (gdbarch, 32);
3121 set_gdbarch_ptr_bit (gdbarch, 32);
3122 }
3123
3124 /* Provide a prototype to silence -Wmissing-prototypes. */
3125 void _initialize_amd64_tdep (void);
3126
3127 void
3128 _initialize_amd64_tdep (void)
3129 {
3130 initialize_tdesc_amd64 ();
3131 initialize_tdesc_amd64_avx ();
3132 initialize_tdesc_amd64_mpx ();
3133 initialize_tdesc_amd64_avx512 ();
3134
3135 initialize_tdesc_x32 ();
3136 initialize_tdesc_x32_avx ();
3137 initialize_tdesc_x32_avx512 ();
3138 }
3139
3140
3142 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3143 sense that the instruction pointer and data pointer are simply
3144 64-bit offsets into the code segment and the data segment instead
3145 of a selector offset pair. The functions below store the upper 32
3146 bits of these pointers (instead of just the 16-bits of the segment
3147 selector). */
3148
3149 /* Fill register REGNUM in REGCACHE with the appropriate
3150 floating-point or SSE register value from *FXSAVE. If REGNUM is
3151 -1, do this for all registers. This function masks off any of the
3152 reserved bits in *FXSAVE. */
3153
3154 void
3155 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3156 const void *fxsave)
3157 {
3158 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3159 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3160
3161 i387_supply_fxsave (regcache, regnum, fxsave);
3162
3163 if (fxsave
3164 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3165 {
3166 const gdb_byte *regs = fxsave;
3167
3168 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3169 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3170 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3171 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3172 }
3173 }
3174
3175 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3176
3177 void
3178 amd64_supply_xsave (struct regcache *regcache, int regnum,
3179 const void *xsave)
3180 {
3181 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3182 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3183
3184 i387_supply_xsave (regcache, regnum, xsave);
3185
3186 if (xsave
3187 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3188 {
3189 const gdb_byte *regs = xsave;
3190
3191 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3192 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3193 regs + 12);
3194 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3195 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3196 regs + 20);
3197 }
3198 }
3199
3200 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3201 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3202 all registers. This function doesn't touch any of the reserved
3203 bits in *FXSAVE. */
3204
3205 void
3206 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3207 void *fxsave)
3208 {
3209 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3210 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3211 gdb_byte *regs = fxsave;
3212
3213 i387_collect_fxsave (regcache, regnum, fxsave);
3214
3215 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3216 {
3217 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3218 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3219 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3220 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3221 }
3222 }
3223
3224 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3225
3226 void
3227 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3228 void *xsave, int gcore)
3229 {
3230 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3231 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3232 gdb_byte *regs = xsave;
3233
3234 i387_collect_xsave (regcache, regnum, xsave, gcore);
3235
3236 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3237 {
3238 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3239 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3240 regs + 12);
3241 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3242 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3243 regs + 20);
3244 }
3245 }
3246