Home | History | Annotate | Line # | Download | only in include
frameasm.h revision 1.18
      1  1.18     dsl /*	$NetBSD: frameasm.h,v 1.18 2012/05/07 21:04:09 dsl Exp $	*/
      2   1.1    fvdl 
      3   1.1    fvdl #ifndef _AMD64_MACHINE_FRAMEASM_H
      4   1.1    fvdl #define _AMD64_MACHINE_FRAMEASM_H
      5  1.13      ad 
      6  1.13      ad #ifdef _KERNEL_OPT
      7   1.8  bouyer #include "opt_xen.h"
      8  1.13      ad #endif
      9   1.1    fvdl 
     10   1.1    fvdl /*
     11   1.1    fvdl  * Macros to define pushing/popping frames for interrupts, traps
     12   1.1    fvdl  * and system calls. Currently all the same; will diverge later.
     13   1.1    fvdl  */
     14   1.1    fvdl 
     15   1.8  bouyer #ifdef XEN
     16   1.8  bouyer #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
     17   1.8  bouyer /* Xen do not need swapgs, done by hypervisor */
     18   1.8  bouyer #define swapgs
     19   1.8  bouyer #define iretq	pushq $0 ; jmp HYPERVISOR_iret
     20  1.17     dsl #define	XEN_ONLY2(x,y)	x,y
     21  1.17     dsl #define	NOT_XEN(x)
     22  1.17     dsl 
     23  1.17     dsl #define CLI(temp_reg) \
     24  1.17     dsl  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     25  1.17     dsl 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
     26  1.17     dsl 
     27  1.17     dsl #define STI(temp_reg) \
     28  1.17     dsl  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     29  1.17     dsl 	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
     30  1.17     dsl 
     31  1.17     dsl #else /* XEN */
     32  1.17     dsl #define	XEN_ONLY2(x,y)
     33  1.17     dsl #define	NOT_XEN(x)	x
     34  1.17     dsl #define CLI(temp_reg) cli
     35  1.17     dsl #define STI(temp_reg) sti
     36  1.17     dsl #endif	/* XEN */
     37   1.8  bouyer 
     38   1.1    fvdl /*
     39   1.1    fvdl  * These are used on interrupt or trap entry or exit.
     40   1.1    fvdl  */
     41   1.1    fvdl #define INTR_SAVE_GPRS \
     42   1.1    fvdl 	movq	%rdi,TF_RDI(%rsp)	; \
     43   1.1    fvdl 	movq	%rsi,TF_RSI(%rsp)	; \
     44  1.10     dsl 	movq	%rdx,TF_RDX(%rsp)	; \
     45  1.10     dsl 	movq	%rcx,TF_RCX(%rsp)	; \
     46  1.10     dsl 	movq	%r8,TF_R8(%rsp)		; \
     47  1.10     dsl 	movq	%r9,TF_R9(%rsp)		; \
     48  1.10     dsl 	movq	%r10,TF_R10(%rsp)	; \
     49  1.10     dsl 	movq	%r11,TF_R11(%rsp)	; \
     50  1.10     dsl 	movq	%r12,TF_R12(%rsp)	; \
     51  1.10     dsl 	movq	%r13,TF_R13(%rsp)	; \
     52  1.10     dsl 	movq	%r14,TF_R14(%rsp)	; \
     53  1.10     dsl 	movq	%r15,TF_R15(%rsp)	; \
     54   1.1    fvdl 	movq	%rbp,TF_RBP(%rsp)	; \
     55   1.1    fvdl 	movq	%rbx,TF_RBX(%rsp)	; \
     56   1.7      ad 	movq	%rax,TF_RAX(%rsp)	; \
     57   1.7      ad 	cld
     58   1.1    fvdl 
     59   1.1    fvdl #define	INTR_RESTORE_GPRS \
     60   1.1    fvdl 	movq	TF_RDI(%rsp),%rdi	; \
     61   1.1    fvdl 	movq	TF_RSI(%rsp),%rsi	; \
     62  1.10     dsl 	movq	TF_RDX(%rsp),%rdx	; \
     63  1.10     dsl 	movq	TF_RCX(%rsp),%rcx	; \
     64  1.10     dsl 	movq	TF_R8(%rsp),%r8		; \
     65  1.10     dsl 	movq	TF_R9(%rsp),%r9		; \
     66  1.10     dsl 	movq	TF_R10(%rsp),%r10	; \
     67  1.10     dsl 	movq	TF_R11(%rsp),%r11	; \
     68  1.10     dsl 	movq	TF_R12(%rsp),%r12	; \
     69  1.10     dsl 	movq	TF_R13(%rsp),%r13	; \
     70  1.10     dsl 	movq	TF_R14(%rsp),%r14	; \
     71  1.10     dsl 	movq	TF_R15(%rsp),%r15	; \
     72   1.1    fvdl 	movq	TF_RBP(%rsp),%rbp	; \
     73   1.1    fvdl 	movq	TF_RBX(%rsp),%rbx	; \
     74  1.10     dsl 	movq	TF_RAX(%rsp),%rax
     75   1.1    fvdl 
     76  1.17     dsl #define	INTRENTRY_L(kernel_trap) \
     77  1.10     dsl 	subq	$TF_REGSIZE,%rsp	; \
     78  1.17     dsl 	INTR_SAVE_GPRS			; \
     79  1.17     dsl 	testb	$SEL_UPL,TF_CS(%rsp)	; \
     80  1.17     dsl 	je	kernel_trap		; \
     81   1.1    fvdl 	swapgs				; \
     82  1.10     dsl 	movw	%gs,TF_GS(%rsp)		; \
     83  1.10     dsl 	movw	%fs,TF_FS(%rsp)		; \
     84  1.10     dsl 	movw	%es,TF_ES(%rsp)		; \
     85  1.17     dsl 	movw	%ds,TF_DS(%rsp)
     86  1.17     dsl 
     87  1.17     dsl #define	INTRENTRY \
     88  1.17     dsl 	INTRENTRY_L(98f)		; \
     89  1.17     dsl 98:
     90   1.1    fvdl 
     91   1.1    fvdl #define INTRFASTEXIT \
     92   1.1    fvdl 	INTR_RESTORE_GPRS 		; \
     93  1.10     dsl 	testq	$SEL_UPL,TF_CS(%rsp)	/* Interrupted %cs */ ; \
     94   1.1    fvdl 	je	99f			; \
     95  1.17     dsl /* XEN: Disabling events before going to user mode sounds like a BAD idea */ \
     96  1.17     dsl 	NOT_XEN(cli;)			  \
     97  1.10     dsl 	movw	TF_ES(%rsp),%es		; \
     98  1.10     dsl 	movw	TF_DS(%rsp),%ds		; \
     99  1.14     chs 	swapgs				; \
    100  1.10     dsl 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
    101   1.1    fvdl 	iretq
    102   1.1    fvdl 
    103   1.1    fvdl #define INTR_RECURSE_HWFRAME \
    104   1.1    fvdl 	movq	%rsp,%r10		; \
    105   1.1    fvdl 	movl	%ss,%r11d		; \
    106   1.1    fvdl 	pushq	%r11			; \
    107   1.1    fvdl 	pushq	%r10			; \
    108   1.1    fvdl 	pushfq				; \
    109   1.1    fvdl 	movl	%cs,%r11d		; \
    110   1.1    fvdl 	pushq	%r11			; \
    111  1.17     dsl /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
    112  1.18     dsl  	XEN_ONLY2(andb	$0xfc,(%rsp);)	  \
    113   1.1    fvdl 	pushq	%r13			;
    114   1.1    fvdl 
    115   1.6    yamt #define	DO_DEFERRED_SWITCH \
    116  1.14     chs 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)		; \
    117   1.6    yamt 	jz	1f					; \
    118   1.6    yamt 	call	_C_LABEL(do_pmap_load)			; \
    119  1.17     dsl 1:
    120   1.6    yamt 
    121   1.6    yamt #define	CHECK_DEFERRED_SWITCH \
    122  1.14     chs 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
    123   1.1    fvdl 
    124  1.11    yamt #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
    125   1.2      ad #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    126   1.1    fvdl 
    127   1.1    fvdl #endif /* _AMD64_MACHINE_FRAMEASM_H */
    128