Home | History | Annotate | Line # | Download | only in include
frameasm.h revision 1.12.12.1
      1  1.12.12.1     riz /*	$NetBSD: frameasm.h,v 1.12.12.1 2012/06/12 20:43:47 riz Exp $	*/
      2        1.1    fvdl 
      3        1.1    fvdl #ifndef _AMD64_MACHINE_FRAMEASM_H
      4        1.1    fvdl #define _AMD64_MACHINE_FRAMEASM_H
      5        1.8  bouyer #include "opt_xen.h"
      6        1.1    fvdl 
      7        1.1    fvdl /*
      8        1.1    fvdl  * Macros to define pushing/popping frames for interrupts, traps
      9        1.1    fvdl  * and system calls. Currently all the same; will diverge later.
     10        1.1    fvdl  */
     11        1.1    fvdl 
     12        1.8  bouyer #ifdef XEN
     13        1.8  bouyer #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
     14        1.8  bouyer /* Xen do not need swapgs, done by hypervisor */
     15        1.8  bouyer #define swapgs
     16        1.8  bouyer #define iretq	pushq $0 ; jmp HYPERVISOR_iret
     17        1.8  bouyer #endif
     18        1.8  bouyer 
     19        1.1    fvdl /*
     20        1.1    fvdl  * These are used on interrupt or trap entry or exit.
     21        1.1    fvdl  */
     22        1.1    fvdl #define INTR_SAVE_GPRS \
     23        1.1    fvdl 	movq	%rdi,TF_RDI(%rsp)	; \
     24        1.1    fvdl 	movq	%rsi,TF_RSI(%rsp)	; \
     25       1.10     dsl 	movq	%rdx,TF_RDX(%rsp)	; \
     26       1.10     dsl 	movq	%rcx,TF_RCX(%rsp)	; \
     27       1.10     dsl 	movq	%r8,TF_R8(%rsp)		; \
     28       1.10     dsl 	movq	%r9,TF_R9(%rsp)		; \
     29       1.10     dsl 	movq	%r10,TF_R10(%rsp)	; \
     30       1.10     dsl 	movq	%r11,TF_R11(%rsp)	; \
     31       1.10     dsl 	movq	%r12,TF_R12(%rsp)	; \
     32       1.10     dsl 	movq	%r13,TF_R13(%rsp)	; \
     33       1.10     dsl 	movq	%r14,TF_R14(%rsp)	; \
     34       1.10     dsl 	movq	%r15,TF_R15(%rsp)	; \
     35        1.1    fvdl 	movq	%rbp,TF_RBP(%rsp)	; \
     36        1.1    fvdl 	movq	%rbx,TF_RBX(%rsp)	; \
     37        1.7      ad 	movq	%rax,TF_RAX(%rsp)	; \
     38        1.7      ad 	cld
     39        1.1    fvdl 
     40        1.1    fvdl #define	INTR_RESTORE_GPRS \
     41        1.1    fvdl 	movq	TF_RDI(%rsp),%rdi	; \
     42        1.1    fvdl 	movq	TF_RSI(%rsp),%rsi	; \
     43       1.10     dsl 	movq	TF_RDX(%rsp),%rdx	; \
     44       1.10     dsl 	movq	TF_RCX(%rsp),%rcx	; \
     45       1.10     dsl 	movq	TF_R8(%rsp),%r8		; \
     46       1.10     dsl 	movq	TF_R9(%rsp),%r9		; \
     47       1.10     dsl 	movq	TF_R10(%rsp),%r10	; \
     48       1.10     dsl 	movq	TF_R11(%rsp),%r11	; \
     49       1.10     dsl 	movq	TF_R12(%rsp),%r12	; \
     50       1.10     dsl 	movq	TF_R13(%rsp),%r13	; \
     51       1.10     dsl 	movq	TF_R14(%rsp),%r14	; \
     52       1.10     dsl 	movq	TF_R15(%rsp),%r15	; \
     53        1.1    fvdl 	movq	TF_RBP(%rsp),%rbp	; \
     54        1.1    fvdl 	movq	TF_RBX(%rsp),%rbx	; \
     55       1.10     dsl 	movq	TF_RAX(%rsp),%rax
     56        1.1    fvdl 
     57  1.12.12.1     riz 
     58  1.12.12.1     riz #define	INTRENTRY_L(kernel_trap, usertrap) \
     59       1.10     dsl 	subq	$TF_REGSIZE,%rsp	; \
     60  1.12.12.1     riz 	INTR_SAVE_GPRS			; \
     61  1.12.12.1     riz 	testb	$SEL_UPL,TF_CS(%rsp)	; \
     62  1.12.12.1     riz 	je	kernel_trap		; \
     63  1.12.12.1     riz usertrap				; \
     64        1.1    fvdl 	swapgs				; \
     65       1.10     dsl 	movw	%gs,TF_GS(%rsp)		; \
     66       1.10     dsl 	movw	%fs,TF_FS(%rsp)		; \
     67       1.10     dsl 	movw	%es,TF_ES(%rsp)		; \
     68  1.12.12.1     riz 	movw	%ds,TF_DS(%rsp)
     69  1.12.12.1     riz 
     70  1.12.12.1     riz #define	INTRENTRY \
     71  1.12.12.1     riz 	INTRENTRY_L(98f,)		; \
     72  1.12.12.1     riz 98:
     73        1.1    fvdl 
     74        1.8  bouyer #ifndef XEN
     75        1.1    fvdl #define INTRFASTEXIT \
     76        1.1    fvdl 	INTR_RESTORE_GPRS 		; \
     77       1.10     dsl 	testq	$SEL_UPL,TF_CS(%rsp)	/* Interrupted %cs */ ; \
     78        1.1    fvdl 	je	99f			; \
     79        1.1    fvdl 	cli				; \
     80       1.10     dsl 	movw	TF_FS(%rsp),%fs		; \
     81       1.10     dsl 	movw	TF_ES(%rsp),%es		; \
     82       1.10     dsl 	movw	TF_DS(%rsp),%ds		; \
     83  1.12.12.1     riz 	swapgs				; \
     84  1.12.12.1     riz 	movw	TF_GS(%rsp),%gs		; /* can fault */ \
     85       1.10     dsl 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
     86        1.1    fvdl 	iretq
     87        1.1    fvdl 
     88        1.1    fvdl #define INTR_RECURSE_HWFRAME \
     89        1.1    fvdl 	movq	%rsp,%r10		; \
     90        1.1    fvdl 	movl	%ss,%r11d		; \
     91        1.1    fvdl 	pushq	%r11			; \
     92        1.1    fvdl 	pushq	%r10			; \
     93        1.1    fvdl 	pushfq				; \
     94        1.1    fvdl 	movl	%cs,%r11d		; \
     95        1.1    fvdl 	pushq	%r11			; \
     96        1.1    fvdl 	pushq	%r13			;
     97        1.1    fvdl 
     98        1.8  bouyer #else	/* !XEN */
     99        1.8  bouyer /*
    100        1.8  bouyer  * Disabling events before going to user mode sounds like a BAD idea
    101        1.8  bouyer  * do no restore gs either, HYPERVISOR_iret will do a swapgs
    102        1.8  bouyer  */
    103        1.8  bouyer #define INTRFASTEXIT \
    104        1.8  bouyer  	INTR_RESTORE_GPRS 		; \
    105       1.10     dsl  	testq	$SEL_UPL,TF_CS(%rsp)	; \
    106        1.8  bouyer  	je	99f			; \
    107       1.10     dsl  	movw	TF_FS(%rsp),%fs		; \
    108       1.10     dsl  	movw	TF_ES(%rsp),%es		; \
    109       1.10     dsl  	movw	TF_DS(%rsp),%ds		; \
    110       1.10     dsl 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
    111        1.8  bouyer  	iretq
    112        1.8  bouyer 
    113        1.8  bouyer /* We must fixup CS, as even kernel mode runs at CPL 3 */
    114        1.8  bouyer #define INTR_RECURSE_HWFRAME \
    115        1.8  bouyer  	movq	%rsp,%r10		; \
    116        1.8  bouyer  	movl	%ss,%r11d		; \
    117        1.8  bouyer  	pushq	%r11			; \
    118        1.8  bouyer  	pushq	%r10			; \
    119        1.8  bouyer  	pushfq				; \
    120        1.8  bouyer  	movl	%cs,%r11d		; \
    121        1.8  bouyer  	pushq	%r11			; \
    122        1.8  bouyer  	andb	$0xfc,(%rsp)		; \
    123        1.8  bouyer  	pushq	%r13			;
    124        1.8  bouyer 
    125        1.8  bouyer #endif	/* !XEN */
    126        1.8  bouyer 
    127        1.6    yamt #define	DO_DEFERRED_SWITCH \
    128        1.6    yamt 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)		; \
    129        1.6    yamt 	jz	1f					; \
    130        1.6    yamt 	call	_C_LABEL(do_pmap_load)			; \
    131        1.6    yamt 	1:
    132        1.6    yamt 
    133        1.6    yamt #define	CHECK_DEFERRED_SWITCH \
    134        1.6    yamt 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)
    135        1.1    fvdl 
    136       1.11    yamt #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
    137        1.2      ad #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    138        1.1    fvdl 
    139        1.8  bouyer #ifdef XEN
    140        1.9     dsl #define CLI(temp_reg) \
    141        1.9     dsl  	movl CPUVAR(CPUID),%e/**/temp_reg ;			\
    142        1.9     dsl  	shlq $6,%r/**/temp_reg ;				\
    143       1.12  cegger  	addq CPUVAR(VCPU),%r/**/temp_reg ;			\
    144        1.9     dsl  	movb $1,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
    145        1.9     dsl #define STI(temp_reg) \
    146        1.9     dsl  	movl CPUVAR(CPUID),%e/**/temp_reg ;			\
    147        1.9     dsl  	shlq $6,%r/**/temp_reg ;				\
    148       1.12  cegger  	addq CPUVAR(VCPU),%r/**/temp_reg ;			\
    149        1.9     dsl  	movb $0,EVTCHN_UPCALL_MASK(%r/**/temp_reg)
    150        1.8  bouyer #else /* XEN */
    151        1.9     dsl #define CLI(temp_reg) cli
    152        1.9     dsl #define STI(temp_reg) sti
    153        1.8  bouyer #endif	/* XEN */
    154        1.8  bouyer 
    155        1.1    fvdl #endif /* _AMD64_MACHINE_FRAMEASM_H */
    156