frameasm.h revision 1.6.2.3 1 1.6.2.3 mjf /* $NetBSD: frameasm.h,v 1.6.2.3 2007/12/27 00:42:55 mjf Exp $ */
2 1.1 fvdl
3 1.1 fvdl #ifndef _AMD64_MACHINE_FRAMEASM_H
4 1.1 fvdl #define _AMD64_MACHINE_FRAMEASM_H
5 1.6.2.2 mjf #include "opt_xen.h"
6 1.1 fvdl
7 1.1 fvdl /*
8 1.1 fvdl * Macros to define pushing/popping frames for interrupts, traps
9 1.1 fvdl * and system calls. Currently all the same; will diverge later.
10 1.1 fvdl */
11 1.1 fvdl
12 1.6.2.2 mjf #ifdef XEN
13 1.6.2.2 mjf #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
14 1.6.2.2 mjf /* Xen do not need swapgs, done by hypervisor */
15 1.6.2.2 mjf #define swapgs
16 1.6.2.2 mjf #define iretq pushq $0 ; jmp HYPERVISOR_iret
17 1.6.2.2 mjf #endif
18 1.6.2.2 mjf
19 1.1 fvdl /*
20 1.1 fvdl * These are used on interrupt or trap entry or exit.
21 1.1 fvdl */
22 1.1 fvdl #define INTR_SAVE_GPRS \
23 1.1 fvdl movq %rdi,TF_RDI(%rsp) ; \
24 1.1 fvdl movq %rsi,TF_RSI(%rsp) ; \
25 1.1 fvdl movq %rdx,TF_RDX(%rsp) ; \
26 1.1 fvdl movq %rcx,TF_RCX(%rsp) ; \
27 1.6.2.3 mjf movq %r8,TF_R8(%rsp) ; \
28 1.6.2.3 mjf movq %r9,TF_R9(%rsp) ; \
29 1.6.2.3 mjf movq %r10,TF_R10(%rsp) ; \
30 1.6.2.3 mjf movq %r11,TF_R11(%rsp) ; \
31 1.6.2.3 mjf movq %r12,TF_R12(%rsp) ; \
32 1.6.2.3 mjf movq %r13,TF_R13(%rsp) ; \
33 1.6.2.3 mjf movq %r14,TF_R14(%rsp) ; \
34 1.6.2.3 mjf movq %r15,TF_R15(%rsp) ; \
35 1.6.2.3 mjf movq %rbp,TF_RBP(%rsp) ; \
36 1.6.2.3 mjf movq %rbx,TF_RBX(%rsp) ; \
37 1.6.2.1 mjf movq %rax,TF_RAX(%rsp) ; \
38 1.6.2.1 mjf cld
39 1.1 fvdl
40 1.1 fvdl #define INTR_RESTORE_GPRS \
41 1.1 fvdl movq TF_RDI(%rsp),%rdi ; \
42 1.1 fvdl movq TF_RSI(%rsp),%rsi ; \
43 1.1 fvdl movq TF_RDX(%rsp),%rdx ; \
44 1.1 fvdl movq TF_RCX(%rsp),%rcx ; \
45 1.6.2.3 mjf movq TF_R8(%rsp),%r8 ; \
46 1.6.2.3 mjf movq TF_R9(%rsp),%r9 ; \
47 1.6.2.3 mjf movq TF_R10(%rsp),%r10 ; \
48 1.6.2.3 mjf movq TF_R11(%rsp),%r11 ; \
49 1.6.2.3 mjf movq TF_R12(%rsp),%r12 ; \
50 1.6.2.3 mjf movq TF_R13(%rsp),%r13 ; \
51 1.6.2.3 mjf movq TF_R14(%rsp),%r14 ; \
52 1.6.2.3 mjf movq TF_R15(%rsp),%r15 ; \
53 1.6.2.3 mjf movq TF_RBP(%rsp),%rbp ; \
54 1.6.2.3 mjf movq TF_RBX(%rsp),%rbx ; \
55 1.6.2.3 mjf movq TF_RAX(%rsp),%rax
56 1.1 fvdl
57 1.1 fvdl #define INTRENTRY \
58 1.6.2.3 mjf subq $TF_REGSIZE,%rsp ; \
59 1.6.2.3 mjf testq $SEL_UPL,TF_CS(%rsp) ; \
60 1.1 fvdl je 98f ; \
61 1.1 fvdl swapgs ; \
62 1.6.2.3 mjf movw %gs,TF_GS(%rsp) ; \
63 1.6.2.3 mjf movw %fs,TF_FS(%rsp) ; \
64 1.6.2.3 mjf movw %es,TF_ES(%rsp) ; \
65 1.6.2.3 mjf movw %ds,TF_DS(%rsp) ; \
66 1.1 fvdl 98: INTR_SAVE_GPRS
67 1.1 fvdl
68 1.6.2.2 mjf #ifndef XEN
69 1.1 fvdl #define INTRFASTEXIT \
70 1.1 fvdl INTR_RESTORE_GPRS ; \
71 1.6.2.3 mjf testq $SEL_UPL,TF_CS(%rsp) /* Interrupted %cs */ ; \
72 1.1 fvdl je 99f ; \
73 1.1 fvdl cli ; \
74 1.1 fvdl swapgs ; \
75 1.6.2.3 mjf movw TF_GS(%rsp),%gs ; \
76 1.6.2.3 mjf movw TF_FS(%rsp),%fs ; \
77 1.6.2.3 mjf movw TF_ES(%rsp),%es ; \
78 1.6.2.3 mjf movw TF_DS(%rsp),%ds ; \
79 1.6.2.3 mjf 99: addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */ ; \
80 1.1 fvdl iretq
81 1.1 fvdl
82 1.1 fvdl #define INTR_RECURSE_HWFRAME \
83 1.1 fvdl movq %rsp,%r10 ; \
84 1.1 fvdl movl %ss,%r11d ; \
85 1.1 fvdl pushq %r11 ; \
86 1.1 fvdl pushq %r10 ; \
87 1.1 fvdl pushfq ; \
88 1.1 fvdl movl %cs,%r11d ; \
89 1.1 fvdl pushq %r11 ; \
90 1.1 fvdl pushq %r13 ;
91 1.1 fvdl
92 1.6.2.2 mjf #else /* !XEN */
93 1.6.2.2 mjf /*
94 1.6.2.2 mjf * Disabling events before going to user mode sounds like a BAD idea
95 1.6.2.2 mjf * do no restore gs either, HYPERVISOR_iret will do a swapgs
96 1.6.2.2 mjf */
97 1.6.2.2 mjf #define INTRFASTEXIT \
98 1.6.2.2 mjf INTR_RESTORE_GPRS ; \
99 1.6.2.3 mjf testq $SEL_UPL,TF_CS(%rsp) ; \
100 1.6.2.2 mjf je 99f ; \
101 1.6.2.3 mjf movw TF_FS(%rsp),%fs ; \
102 1.6.2.3 mjf movw TF_ES(%rsp),%es ; \
103 1.6.2.3 mjf movw TF_DS(%rsp),%ds ; \
104 1.6.2.3 mjf 99: addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */ ; \
105 1.6.2.2 mjf iretq
106 1.6.2.2 mjf
107 1.6.2.2 mjf /* We must fixup CS, as even kernel mode runs at CPL 3 */
108 1.6.2.2 mjf #define INTR_RECURSE_HWFRAME \
109 1.6.2.2 mjf movq %rsp,%r10 ; \
110 1.6.2.2 mjf movl %ss,%r11d ; \
111 1.6.2.2 mjf pushq %r11 ; \
112 1.6.2.2 mjf pushq %r10 ; \
113 1.6.2.2 mjf pushfq ; \
114 1.6.2.2 mjf movl %cs,%r11d ; \
115 1.6.2.2 mjf pushq %r11 ; \
116 1.6.2.2 mjf andb $0xfc,(%rsp) ; \
117 1.6.2.2 mjf pushq %r13 ;
118 1.6.2.2 mjf
119 1.6.2.2 mjf #endif /* !XEN */
120 1.6.2.2 mjf
121 1.6 yamt #define DO_DEFERRED_SWITCH \
122 1.6 yamt cmpq $0, CPUVAR(WANT_PMAPLOAD) ; \
123 1.6 yamt jz 1f ; \
124 1.6 yamt call _C_LABEL(do_pmap_load) ; \
125 1.6 yamt 1:
126 1.6 yamt
127 1.6 yamt #define CHECK_DEFERRED_SWITCH \
128 1.6 yamt cmpq $0, CPUVAR(WANT_PMAPLOAD)
129 1.1 fvdl
130 1.2 ad #define CHECK_ASTPENDING(reg) cmpq $0, reg ; \
131 1.1 fvdl je 99f ; \
132 1.2 ad cmpl $0, L_MD_ASTPENDING(reg) ; \
133 1.1 fvdl 99:
134 1.1 fvdl
135 1.2 ad #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
136 1.1 fvdl
137 1.6.2.2 mjf #ifdef XEN
138 1.6.2.3 mjf #define CLI(temp_reg) \
139 1.6.2.3 mjf movl CPUVAR(CPUID),%e/**/temp_reg ; \
140 1.6.2.3 mjf shlq $6,%r/**/temp_reg ; \
141 1.6.2.3 mjf addq _C_LABEL(HYPERVISOR_shared_info),%r/**/temp_reg ; \
142 1.6.2.3 mjf movb $1,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
143 1.6.2.3 mjf #define STI(temp_reg) \
144 1.6.2.3 mjf movl CPUVAR(CPUID),%e/**/temp_reg ; \
145 1.6.2.3 mjf shlq $6,%r/**/temp_reg ; \
146 1.6.2.3 mjf addq _C_LABEL(HYPERVISOR_shared_info),%r/**/temp_reg ; \
147 1.6.2.3 mjf movb $0,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
148 1.6.2.2 mjf #else /* XEN */
149 1.6.2.3 mjf #define CLI(temp_reg) cli
150 1.6.2.3 mjf #define STI(temp_reg) sti
151 1.6.2.2 mjf #endif /* XEN */
152 1.6.2.2 mjf
153 1.1 fvdl #endif /* _AMD64_MACHINE_FRAMEASM_H */
154