Home | History | Annotate | Line # | Download | only in include
frameasm.h revision 1.15.6.1
      1  1.15.6.1  cherry /*	$NetBSD: frameasm.h,v 1.15.6.1 2011/06/03 13:27:38 cherry Exp $	*/
      2       1.1    fvdl 
      3       1.1    fvdl #ifndef _AMD64_MACHINE_FRAMEASM_H
      4       1.1    fvdl #define _AMD64_MACHINE_FRAMEASM_H
      5      1.13      ad 
      6      1.13      ad #ifdef _KERNEL_OPT
      7       1.8  bouyer #include "opt_xen.h"
      8      1.13      ad #endif
      9       1.1    fvdl 
     10       1.1    fvdl /*
     11       1.1    fvdl  * Macros to define pushing/popping frames for interrupts, traps
     12       1.1    fvdl  * and system calls. Currently all the same; will diverge later.
     13       1.1    fvdl  */
     14       1.1    fvdl 
     15       1.8  bouyer #ifdef XEN
     16       1.8  bouyer #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
     17       1.8  bouyer /* Xen do not need swapgs, done by hypervisor */
     18       1.8  bouyer #define swapgs
     19       1.8  bouyer #define iretq	pushq $0 ; jmp HYPERVISOR_iret
     20       1.8  bouyer #endif
     21       1.8  bouyer 
     22       1.1    fvdl /*
     23       1.1    fvdl  * These are used on interrupt or trap entry or exit.
     24       1.1    fvdl  */
     25       1.1    fvdl #define INTR_SAVE_GPRS \
     26       1.1    fvdl 	movq	%rdi,TF_RDI(%rsp)	; \
     27       1.1    fvdl 	movq	%rsi,TF_RSI(%rsp)	; \
     28      1.10     dsl 	movq	%rdx,TF_RDX(%rsp)	; \
     29      1.10     dsl 	movq	%rcx,TF_RCX(%rsp)	; \
     30      1.10     dsl 	movq	%r8,TF_R8(%rsp)		; \
     31      1.10     dsl 	movq	%r9,TF_R9(%rsp)		; \
     32      1.10     dsl 	movq	%r10,TF_R10(%rsp)	; \
     33      1.10     dsl 	movq	%r11,TF_R11(%rsp)	; \
     34      1.10     dsl 	movq	%r12,TF_R12(%rsp)	; \
     35      1.10     dsl 	movq	%r13,TF_R13(%rsp)	; \
     36      1.10     dsl 	movq	%r14,TF_R14(%rsp)	; \
     37      1.10     dsl 	movq	%r15,TF_R15(%rsp)	; \
     38       1.1    fvdl 	movq	%rbp,TF_RBP(%rsp)	; \
     39       1.1    fvdl 	movq	%rbx,TF_RBX(%rsp)	; \
     40       1.7      ad 	movq	%rax,TF_RAX(%rsp)	; \
     41       1.7      ad 	cld
     42       1.1    fvdl 
     43       1.1    fvdl #define	INTR_RESTORE_GPRS \
     44       1.1    fvdl 	movq	TF_RDI(%rsp),%rdi	; \
     45       1.1    fvdl 	movq	TF_RSI(%rsp),%rsi	; \
     46      1.10     dsl 	movq	TF_RDX(%rsp),%rdx	; \
     47      1.10     dsl 	movq	TF_RCX(%rsp),%rcx	; \
     48      1.10     dsl 	movq	TF_R8(%rsp),%r8		; \
     49      1.10     dsl 	movq	TF_R9(%rsp),%r9		; \
     50      1.10     dsl 	movq	TF_R10(%rsp),%r10	; \
     51      1.10     dsl 	movq	TF_R11(%rsp),%r11	; \
     52      1.10     dsl 	movq	TF_R12(%rsp),%r12	; \
     53      1.10     dsl 	movq	TF_R13(%rsp),%r13	; \
     54      1.10     dsl 	movq	TF_R14(%rsp),%r14	; \
     55      1.10     dsl 	movq	TF_R15(%rsp),%r15	; \
     56       1.1    fvdl 	movq	TF_RBP(%rsp),%rbp	; \
     57       1.1    fvdl 	movq	TF_RBX(%rsp),%rbx	; \
     58      1.10     dsl 	movq	TF_RAX(%rsp),%rax
     59       1.1    fvdl 
     60       1.1    fvdl #define	INTRENTRY \
     61      1.10     dsl 	subq	$TF_REGSIZE,%rsp	; \
     62      1.10     dsl 	testq	$SEL_UPL,TF_CS(%rsp)	; \
     63       1.1    fvdl 	je	98f			; \
     64       1.1    fvdl 	swapgs				; \
     65      1.10     dsl 	movw	%gs,TF_GS(%rsp)		; \
     66      1.10     dsl 	movw	%fs,TF_FS(%rsp)		; \
     67      1.10     dsl 	movw	%es,TF_ES(%rsp)		; \
     68      1.10     dsl 	movw	%ds,TF_DS(%rsp)		; \
     69      1.14     chs 98:	INTR_SAVE_GPRS
     70       1.1    fvdl 
     71       1.8  bouyer #ifndef XEN
     72       1.1    fvdl #define INTRFASTEXIT \
     73       1.1    fvdl 	INTR_RESTORE_GPRS 		; \
     74      1.10     dsl 	testq	$SEL_UPL,TF_CS(%rsp)	/* Interrupted %cs */ ; \
     75       1.1    fvdl 	je	99f			; \
     76       1.1    fvdl 	cli				; \
     77      1.10     dsl 	movw	TF_ES(%rsp),%es		; \
     78      1.10     dsl 	movw	TF_DS(%rsp),%ds		; \
     79      1.14     chs 	swapgs				; \
     80      1.10     dsl 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
     81       1.1    fvdl 	iretq
     82       1.1    fvdl 
     83       1.1    fvdl #define INTR_RECURSE_HWFRAME \
     84       1.1    fvdl 	movq	%rsp,%r10		; \
     85       1.1    fvdl 	movl	%ss,%r11d		; \
     86       1.1    fvdl 	pushq	%r11			; \
     87       1.1    fvdl 	pushq	%r10			; \
     88       1.1    fvdl 	pushfq				; \
     89       1.1    fvdl 	movl	%cs,%r11d		; \
     90       1.1    fvdl 	pushq	%r11			; \
     91       1.1    fvdl 	pushq	%r13			;
     92       1.1    fvdl 
     93       1.8  bouyer #else	/* !XEN */
     94       1.8  bouyer /*
     95       1.8  bouyer  * Disabling events before going to user mode sounds like a BAD idea
     96       1.8  bouyer  * do no restore gs either, HYPERVISOR_iret will do a swapgs
     97       1.8  bouyer  */
     98       1.8  bouyer #define INTRFASTEXIT \
     99       1.8  bouyer  	INTR_RESTORE_GPRS 		; \
    100      1.10     dsl  	testq	$SEL_UPL,TF_CS(%rsp)	; \
    101       1.8  bouyer  	je	99f			; \
    102      1.10     dsl  	movw	TF_ES(%rsp),%es		; \
    103      1.10     dsl  	movw	TF_DS(%rsp),%ds		; \
    104      1.10     dsl 99:	addq	$TF_REGSIZE+16,%rsp	/* + T_xxx and error code */ ; \
    105       1.8  bouyer  	iretq
    106       1.8  bouyer 
    107       1.8  bouyer /* We must fixup CS, as even kernel mode runs at CPL 3 */
    108       1.8  bouyer #define INTR_RECURSE_HWFRAME \
    109       1.8  bouyer  	movq	%rsp,%r10		; \
    110       1.8  bouyer  	movl	%ss,%r11d		; \
    111       1.8  bouyer  	pushq	%r11			; \
    112       1.8  bouyer  	pushq	%r10			; \
    113       1.8  bouyer  	pushfq				; \
    114       1.8  bouyer  	movl	%cs,%r11d		; \
    115       1.8  bouyer  	pushq	%r11			; \
    116       1.8  bouyer  	andb	$0xfc,(%rsp)		; \
    117       1.8  bouyer  	pushq	%r13			;
    118       1.8  bouyer 
    119       1.8  bouyer #endif	/* !XEN */
    120       1.8  bouyer 
    121       1.6    yamt #define	DO_DEFERRED_SWITCH \
    122      1.14     chs 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)		; \
    123       1.6    yamt 	jz	1f					; \
    124       1.6    yamt 	call	_C_LABEL(do_pmap_load)			; \
    125       1.6    yamt 	1:
    126       1.6    yamt 
    127       1.6    yamt #define	CHECK_DEFERRED_SWITCH \
    128      1.14     chs 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
    129       1.1    fvdl 
    130      1.11    yamt #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
    131       1.2      ad #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    132       1.1    fvdl 
    133       1.8  bouyer #ifdef XEN
    134       1.9     dsl #define CLI(temp_reg) \
    135  1.15.6.1  cherry  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
    136  1.15.6.1  cherry 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
    137  1.15.6.1  cherry 
    138       1.9     dsl #define STI(temp_reg) \
    139  1.15.6.1  cherry  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
    140  1.15.6.1  cherry 	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
    141  1.15.6.1  cherry 
    142       1.8  bouyer #else /* XEN */
    143       1.9     dsl #define CLI(temp_reg) cli
    144       1.9     dsl #define STI(temp_reg) sti
    145       1.8  bouyer #endif	/* XEN */
    146       1.8  bouyer 
    147       1.1    fvdl #endif /* _AMD64_MACHINE_FRAMEASM_H */
    148