Home | History | Annotate | Line # | Download | only in include
frameasm.h revision 1.11
      1 /*	$NetBSD: frameasm.h,v 1.11 2008/02/29 12:34:23 yamt Exp $	*/
      2 
      3 #ifndef _AMD64_MACHINE_FRAMEASM_H
      4 #define _AMD64_MACHINE_FRAMEASM_H
      5 #include "opt_xen.h"
      6 
      7 /*
      8  * Macros to define pushing/popping frames for interrupts, traps
      9  * and system calls. Currently all the same; will diverge later.
     10  */
     11 
     12 #ifdef XEN
     13 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
     14 /* Xen do not need swapgs, done by hypervisor */
     15 #define swapgs
     16 #define iretq	pushq $0 ; jmp HYPERVISOR_iret
     17 #endif
     18 
     19 /*
     20  * These are used on interrupt or trap entry or exit.
     21  */
     22 #define INTR_SAVE_GPRS \
     23 	movq	%rdi,TF_RDI(%rsp)	; \
     24 	movq	%rsi,TF_RSI(%rsp)	; \
     25 	movq	%rdx,TF_RDX(%rsp)	; \
     26 	movq	%rcx,TF_RCX(%rsp)	; \
     27 	movq	%r8,TF_R8(%rsp)		; \
     28 	movq	%r9,TF_R9(%rsp)		; \
     29 	movq	%r10,TF_R10(%rsp)	; \
     30 	movq	%r11,TF_R11(%rsp)	; \
     31 	movq	%r12,TF_R12(%rsp)	; \
     32 	movq	%r13,TF_R13(%rsp)	; \
     33 	movq	%r14,TF_R14(%rsp)	; \
     34 	movq	%r15,TF_R15(%rsp)	; \
     35 	movq	%rbp,TF_RBP(%rsp)	; \
     36 	movq	%rbx,TF_RBX(%rsp)	; \
     37 	movq	%rax,TF_RAX(%rsp)	; \
     38 	cld
     39 
     40 #define	INTR_RESTORE_GPRS \
     41 	movq	TF_RDI(%rsp),%rdi	; \
     42 	movq	TF_RSI(%rsp),%rsi	; \
     43 	movq	TF_RDX(%rsp),%rdx	; \
     44 	movq	TF_RCX(%rsp),%rcx	; \
     45 	movq	TF_R8(%rsp),%r8		; \
     46 	movq	TF_R9(%rsp),%r9		; \
     47 	movq	TF_R10(%rsp),%r10	; \
     48 	movq	TF_R11(%rsp),%r11	; \
     49 	movq	TF_R12(%rsp),%r12	; \
     50 	movq	TF_R13(%rsp),%r13	; \
     51 	movq	TF_R14(%rsp),%r14	; \
     52 	movq	TF_R15(%rsp),%r15	; \
     53 	movq	TF_RBP(%rsp),%rbp	; \
     54 	movq	TF_RBX(%rsp),%rbx	; \
     55 	movq	TF_RAX(%rsp),%rax
     56 
     57 #define	INTRENTRY \
     58 	subq	$TF_REGSIZE,%rsp	; \
     59 	testq	$SEL_UPL,TF_CS(%rsp)	; \
     60 	je	98f			; \
     61 	swapgs				; \
     62 	movw	%gs,TF_GS(%rsp)		; \
     63 	movw	%fs,TF_FS(%rsp)		; \
     64 	movw	%es,TF_ES(%rsp)		; \
     65 	movw	%ds,TF_DS(%rsp)		; \
     66 98: 	INTR_SAVE_GPRS
     67 
     68 #ifndef XEN
     69 #define INTRFASTEXIT \
     70 	INTR_RESTORE_GPRS 		; \
     71 	testq	$SEL_UPL,TF_CS(%rsp)	/* Interrupted %cs */ ; \
     72 	je	99f			; \
     73 	cli				; \
     74 	swapgs				; \
     75 	movw	TF_GS(%rsp),%gs		; \
     76 	movw	TF_FS(%rsp),%fs		; \
     77 	movw	TF_ES(%rsp),%es		; \
     78 	movw	TF_DS(%rsp),%ds		; \
     79 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
     80 	iretq
     81 
     82 #define INTR_RECURSE_HWFRAME \
     83 	movq	%rsp,%r10		; \
     84 	movl	%ss,%r11d		; \
     85 	pushq	%r11			; \
     86 	pushq	%r10			; \
     87 	pushfq				; \
     88 	movl	%cs,%r11d		; \
     89 	pushq	%r11			; \
     90 	pushq	%r13			;
     91 
     92 #else	/* !XEN */
     93 /*
     94  * Disabling events before going to user mode sounds like a BAD idea
     95  * do no restore gs either, HYPERVISOR_iret will do a swapgs
     96  */
     97 #define INTRFASTEXIT \
     98  	INTR_RESTORE_GPRS 		; \
     99  	testq	$SEL_UPL,TF_CS(%rsp)	; \
    100  	je	99f			; \
    101  	movw	TF_FS(%rsp),%fs		; \
    102  	movw	TF_ES(%rsp),%es		; \
    103  	movw	TF_DS(%rsp),%ds		; \
    104 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
    105  	iretq
    106 
    107 /* We must fixup CS, as even kernel mode runs at CPL 3 */
    108 #define INTR_RECURSE_HWFRAME \
    109  	movq	%rsp,%r10		; \
    110  	movl	%ss,%r11d		; \
    111  	pushq	%r11			; \
    112  	pushq	%r10			; \
    113  	pushfq				; \
    114  	movl	%cs,%r11d		; \
    115  	pushq	%r11			; \
    116  	andb	$0xfc,(%rsp)		; \
    117  	pushq	%r13			;
    118 
    119 #endif	/* !XEN */
    120 
    121 #define	DO_DEFERRED_SWITCH \
    122 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)		; \
    123 	jz	1f					; \
    124 	call	_C_LABEL(do_pmap_load)			; \
    125 	1:
    126 
    127 #define	CHECK_DEFERRED_SWITCH \
    128 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)
    129 
    130 #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
    131 #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    132 
    133 #ifdef XEN
    134 #define CLI(temp_reg) \
    135  	movl CPUVAR(CPUID),%e/**/temp_reg ;			\
    136  	shlq $6,%r/**/temp_reg ;				\
    137  	addq _C_LABEL(HYPERVISOR_shared_info),%r/**/temp_reg ;	\
    138  	movb $1,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
    139 #define STI(temp_reg) \
    140  	movl CPUVAR(CPUID),%e/**/temp_reg ;			\
    141  	shlq $6,%r/**/temp_reg ;				\
    142  	addq _C_LABEL(HYPERVISOR_shared_info),%r/**/temp_reg ;	\
    143  	movb $0,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
    144 #else /* XEN */
    145 #define CLI(temp_reg) cli
    146 #define STI(temp_reg) sti
    147 #endif	/* XEN */
    148 
    149 #endif /* _AMD64_MACHINE_FRAMEASM_H */
    150