Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: frameasm.h,v 1.36 2026/01/17 10:59:10 bouyer Exp $	*/
      2 
      3 #ifndef _I386_FRAMEASM_H_
      4 #define _I386_FRAMEASM_H_
      5 
      6 #ifdef _KERNEL_OPT
      7 #include "opt_multiprocessor.h"
      8 #include "opt_xen.h"
      9 #endif
     10 
     11 
     12 #ifdef XEN
     13 /* XXX assym.h */
     14 #define TRAP_INSTR	int	$0x82
     15 #endif /* XEN */
     16 
     17 #if defined(XENPV)
     18 /*
     19  * acessing EVTCHN_UPCALL_MASK is safe only if preemption is disabled, i.e.:
     20  * l_nopreempt is not 0, or
     21  * ci_ilevel is not 0, or
     22  * EVTCHN_UPCALL_MASK is not 0
     23  * ci_idepth is not negative
     24  */
     25 #ifdef DIAGNOSTIC
     26 #define CLI(reg)	\
     27 			movl	CPUVAR(CURLWP),reg ;  \
     28 			cmpl	$0, L_NOPREEMPT(reg); \
     29 			jne	199f; \
     30 			cmpb	$0, CPUVAR(ILEVEL); \
     31 			jne	199f; \
     32 			movl	CPUVAR(IDEPTH), reg; \
     33 			test	reg, reg; \
     34 			jns	199f; \
     35 			movl	$panicstr,reg ; \
     36 			cmpl	$0, 0(reg); \
     37 			jne	199f; \
     38 			pushl	$199f; \
     39 			movl	$_C_LABEL(cli_panic), reg; \
     40 			pushl	0(reg); \
     41 			call	_C_LABEL(panic); \
     42 			addl    $8,%esp; \
     43 199:			movl	CPUVAR(VCPU),reg ;  \
     44 			movb    $1,EVTCHN_UPCALL_MASK(reg)
     45 
     46 #define STI(reg) \
     47 			movl	CPUVAR(VCPU),reg ;  \
     48 			cmpb	$0, EVTCHN_UPCALL_MASK(reg) ; \
     49 			jne	198f ; \
     50 			movl	$panicstr,reg ; \
     51 			cmpl	$0, 0(reg); \
     52 			jne	198f; \
     53 			pushl	$198f; \
     54 			movl	$_C_LABEL(sti_panic), reg; \
     55 			pushl	0(reg); \
     56 			call	_C_LABEL(panic); \
     57 			addl    $8,%esp; \
     58 198:			movb	$0,EVTCHN_UPCALL_MASK(reg)
     59 
     60 
     61 /*
     62  * Here we have a window where we could be migrated between enabling
     63  * interrupts and testing * EVTCHN_UPCALL_PENDING. But it's not a big issue,
     64  * at worst we'll call stipending() on the new CPU which have no pending
     65  * interrupts, and the pending interrupts on the old CPU have already
     66  * been processed.
     67  */
     68 #define STIC(reg) \
     69 			movl	CPUVAR(VCPU),reg ;  \
     70 			cmpb	$0, EVTCHN_UPCALL_MASK(reg) ; \
     71 			jne	197f ; \
     72 			movl	$panicstr,reg ; \
     73 			cmpl	$0, 0(reg); \
     74 			jne	197f; \
     75 			pushl	$197f; \
     76 			movl	$_C_LABEL(sti_panic), reg; \
     77 			pushl	0(reg); \
     78 			call	_C_LABEL(panic); \
     79 			addl    $8,%esp; \
     80 197:			movl	CPUVAR(VCPU),reg ;  \
     81 			movb	$0,EVTCHN_UPCALL_MASK(reg); \
     82 			testb	$0xff,EVTCHN_UPCALL_PENDING(reg)
     83 
     84 #else
     85 #define CLI(reg)	\
     86 			movl	CPUVAR(VCPU),reg ;  \
     87 			movb    $1,EVTCHN_UPCALL_MASK(reg)
     88 
     89 #define STI(reg) \
     90 			movl	CPUVAR(VCPU),reg ;  \
     91 			movb	$0,EVTCHN_UPCALL_MASK(reg)
     92 
     93 #define STIC(reg) \
     94 			movl	CPUVAR(VCPU),reg ;  \
     95 			movb	$0,EVTCHN_UPCALL_MASK(reg); \
     96 			testb	$0xff,EVTCHN_UPCALL_PENDING(reg)
     97 
     98 #endif /* DIAGNOSTIC */
     99 #define CLI2(reg, reg2) \
    100 			movl    CPUVAR(CURLWP),reg; \
    101 			incl	L_NOPREEMPT(reg); \
    102 			movl	CPUVAR(VCPU),reg2 ;  \
    103 			movb    $1,EVTCHN_UPCALL_MASK(reg2); \
    104 			decl	L_NOPREEMPT(reg);
    105 
    106 #define PUSHFCLI(reg, reg2) \
    107 			movl    CPUVAR(CURLWP),reg; \
    108 			incl	L_NOPREEMPT(reg); \
    109 			movl	CPUVAR(VCPU),reg2 ;  \
    110 			movzbl	EVTCHN_UPCALL_MASK(reg2), reg2; \
    111 			pushl	reg2 ; \
    112 			movl	CPUVAR(VCPU),reg2 ;  \
    113 			movb    $1,EVTCHN_UPCALL_MASK(reg2); \
    114 			decl	L_NOPREEMPT(reg);
    115 
    116 #define POPF(reg)	call _C_LABEL(xen_write_psl); \
    117 			addl    $4,%esp
    118 #else
    119 #define CLI(reg)	cli
    120 #define CLI2(reg, reg2)	cli
    121 #define STI(reg)	sti
    122 #define PUSHFCLI(reg, reg2) pushf ; cli
    123 #define POPF(reg)	popf
    124 
    125 #endif /* XENPV */
    126 
    127 #define HP_NAME_CLAC		1
    128 #define HP_NAME_STAC		2
    129 #define HP_NAME_NOLOCK		3
    130 #define HP_NAME_RETFENCE	4
    131 #define HP_NAME_CAS_64		5
    132 #define HP_NAME_SPLLOWER	6
    133 #define HP_NAME_MUTEX_EXIT	7
    134 
    135 #define HOTPATCH(name, size) \
    136 123:						; \
    137 	.pushsection	.rodata.hotpatch, "a"	; \
    138 	.byte		name			; \
    139 	.byte		size			; \
    140 	.long		123b			; \
    141 	.popsection
    142 
    143 #define SMAP_ENABLE \
    144 	HOTPATCH(HP_NAME_CLAC, 3)		; \
    145 	.byte 0x90, 0x90, 0x90
    146 
    147 #define SMAP_DISABLE \
    148 	HOTPATCH(HP_NAME_STAC, 3)		; \
    149 	.byte 0x90, 0x90, 0x90
    150 
    151 /*
    152  * These are used on interrupt or trap entry or exit.
    153  */
    154 #define	INTRENTRY \
    155 	SMAP_ENABLE			; \
    156 	subl	$TF_PUSHSIZE,%esp	; \
    157 	movw	%gs,TF_GS(%esp)		; \
    158 	movw	%fs,TF_FS(%esp) 	; \
    159 	movl	%eax,TF_EAX(%esp)	; \
    160 	movw	%es,TF_ES(%esp) 	; \
    161 	movw	%ds,TF_DS(%esp) 	; \
    162 	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax	; \
    163 	movl	%edi,TF_EDI(%esp)	; \
    164 	movl	%esi,TF_ESI(%esp)	; \
    165 	movw	%ax,%ds			; \
    166 	movl	%ebp,TF_EBP(%esp)	; \
    167 	movw	%ax,%es			; \
    168 	movl	%ebx,TF_EBX(%esp)	; \
    169 	movw	%ax,%gs			; \
    170 	movl	%edx,TF_EDX(%esp)	; \
    171 	movl	$GSEL(GCPU_SEL, SEL_KPL),%eax	; \
    172 	movl	%ecx,TF_ECX(%esp)	; \
    173 	movl	%eax,%fs		; \
    174 	cld
    175 
    176 #define	INTRFASTEXIT \
    177 	jmp	intrfastexit
    178 
    179 #define INTR_RECURSE_HWFRAME \
    180 	pushfl				; \
    181 	pushl	%cs			; \
    182 	pushl	%esi			;
    183 
    184 #define	CHECK_DEFERRED_SWITCH \
    185 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
    186 
    187 #define	CHECK_ASTPENDING(reg)	movl	CPUVAR(CURLWP),reg	; \
    188 				cmpl	$0, L_MD_ASTPENDING(reg)
    189 #define	CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    190 
    191 /*
    192  * If the FPU state is not in the CPU, restore it. Executed with interrupts
    193  * disabled.
    194  *
    195  *     %ebx must not be modified
    196  */
    197 #define HANDLE_DEFERRED_FPU	\
    198 	movl	CPUVAR(CURLWP),%eax			; \
    199 	testl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%eax)	; \
    200 	jnz	1f					; \
    201 	pushl	%eax					; \
    202 	call	_C_LABEL(fpu_handle_deferred)		; \
    203 	popl	%eax					; \
    204 	orl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%eax)	; \
    205 1:
    206 
    207 /*
    208  * IDEPTH_INCR:
    209  * increase ci_idepth and switch to the interrupt stack if necessary.
    210  * note that the initial value of ci_idepth is -1.
    211  *
    212  * => should be called with interrupt disabled.
    213  * => save the old value of %esp in %eax.
    214  */
    215 
    216 #define	IDEPTH_INCR \
    217 	incl	CPUVAR(IDEPTH); \
    218 	movl	%esp, %eax; \
    219 	jne	999f; \
    220 	movl	CPUVAR(INTRSTACK), %esp; \
    221 999:	pushl	%eax; /* eax == pointer to intrframe */ \
    222 
    223 /*
    224  * IDEPTH_DECR:
    225  * decrement ci_idepth and switch back to
    226  * the original stack saved by IDEPTH_INCR.
    227  *
    228  * => should be called with interrupt disabled.
    229  */
    230 
    231 #define	IDEPTH_DECR \
    232 	popl	%esp; \
    233 	decl	CPUVAR(IDEPTH)
    234 
    235 #endif /* _I386_FRAMEASM_H_ */
    236