Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: frameasm.h,v 1.56 2026/01/17 10:53:19 bouyer Exp $	*/
      2 
      3 #ifndef _AMD64_MACHINE_FRAMEASM_H
      4 #define _AMD64_MACHINE_FRAMEASM_H
      5 
      6 #ifdef _KERNEL_OPT
      7 #include "opt_xen.h"
      8 #include "opt_svs.h"
      9 #include "opt_kcov.h"
     10 #include "opt_kmsan.h"
     11 #endif
     12 
     13 /*
     14  * Macros to define pushing/popping frames for interrupts, traps
     15  * and system calls. Currently all the same; will diverge later.
     16  */
     17 
     18 #ifdef XENPV
     19 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
     20 /* Xen do not need swapgs, done by hypervisor */
     21 #define swapgs
     22 #define iretq	pushq $0 ; jmp HYPERVISOR_iret
     23 #define	XEN_ONLY2(x,y)	x,y
     24 #define	NOT_XEN(x)
     25 
     26 #ifdef DIAGNOSTIC
     27 /*
     28  * acessing EVTCHN_UPCALL_MASK is safe only if preemption is disabled, i.e.:
     29  * l_nopreempt is not 0, or
     30  * ci_ilevel is not 0, or
     31  * EVTCHN_UPCALL_MASK is not 0
     32  * ci_idepth is not negative
     33  */
     34 #define CLI(temp_reg) \
     35  	movq CPUVAR(CURLWP),%r ## temp_reg ;			\
     36 	cmpl $0, L_NOPREEMPT(%r ## temp_reg);			\
     37 	jne 199f;						\
     38 	cmpb $0, CPUVAR(ILEVEL);				\
     39 	jne 199f;						\
     40 	movl CPUVAR(IDEPTH), %e ## temp_reg;			\
     41 	test %e ## temp_reg, %e ## temp_reg;			\
     42 	jns 199f;						\
     43  	movq $_C_LABEL(panicstr),%r ## temp_reg ;			 	\
     44 	cmpq $0, 0(%r ## temp_reg);				\
     45 	jne 199f;						\
     46 	movq _C_LABEL(cli_panic), %rdi;				\
     47 	call _C_LABEL(panic);					\
     48 199:	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     49 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
     50 
     51 #define STI(temp_reg) \
     52  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     53 	cmpb $0, EVTCHN_UPCALL_MASK(%r ## temp_reg);		\
     54 	jne 198f;						\
     55  	movq $_C_LABEL(panicstr),%r ## temp_reg ;		\
     56 	cmpq $0, 0(%r ## temp_reg);				\
     57 	jne 197f;						\
     58 	movq _C_LABEL(sti_panic), %rdi;				\
     59 	call _C_LABEL(panic);					\
     60 197:	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     61 198:	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
     62 #else
     63 
     64 #define CLI(temp_reg) \
     65  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     66 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
     67 
     68 #define STI(temp_reg) \
     69  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     70 	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);		\
     71 
     72 #endif /* DIAGNOSTIC */
     73 
     74 /* CLI() with preemtion disabled */
     75 #define CLI2(temp_reg, temp_reg2) \
     76  	movq CPUVAR(CURLWP),% ## temp_reg2 ;			\
     77 	incl L_NOPREEMPT(% ## temp_reg2);			\
     78  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
     79 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);		\
     80 	decl L_NOPREEMPT(% ## temp_reg2);
     81 
     82 #else /* XENPV */
     83 #define	XEN_ONLY2(x,y)
     84 #define	NOT_XEN(x)	x
     85 #define CLI(temp_reg) cli
     86 #define CLI2(temp_reg, temp_reg2) cli
     87 #define STI(temp_reg) sti
     88 #endif	/* XENPV */
     89 
     90 #define HP_NAME_CLAC		1
     91 #define HP_NAME_STAC		2
     92 #define HP_NAME_NOLOCK		3
     93 #define HP_NAME_RETFENCE	4
     94 #define HP_NAME_SVS_ENTER	5
     95 #define HP_NAME_SVS_LEAVE	6
     96 #define HP_NAME_SVS_ENTER_ALT	7
     97 #define HP_NAME_SVS_LEAVE_ALT	8
     98 #define HP_NAME_IBRS_ENTER	9
     99 #define HP_NAME_IBRS_LEAVE	10
    100 #define HP_NAME_SVS_ENTER_NMI	11
    101 #define HP_NAME_SVS_LEAVE_NMI	12
    102 #define HP_NAME_MDS_LEAVE	13
    103 
    104 #define HOTPATCH(name, size) \
    105 123:						; \
    106 	.pushsection	.rodata.hotpatch, "a"	; \
    107 	.byte		name			; \
    108 	.byte		size			; \
    109 	.quad		123b			; \
    110 	.popsection
    111 
    112 #define SMAP_ENABLE \
    113 	HOTPATCH(HP_NAME_CLAC, 3)		; \
    114 	.byte 0x0F, 0x1F, 0x00			; \
    115 
    116 #define SMAP_DISABLE \
    117 	HOTPATCH(HP_NAME_STAC, 3)		; \
    118 	.byte 0x0F, 0x1F, 0x00			; \
    119 
    120 /*
    121  * IBRS
    122  */
    123 
    124 #define IBRS_ENTER_BYTES	12
    125 #define IBRS_ENTER \
    126 	HOTPATCH(HP_NAME_IBRS_ENTER, IBRS_ENTER_BYTES)		; \
    127 	NOIBRS_ENTER
    128 #define NOIBRS_ENTER \
    129 	.byte 0xEB, (IBRS_ENTER_BYTES-2)	/* jmp */	; \
    130 	.fill	(IBRS_ENTER_BYTES-2),1,0xCC
    131 
    132 #define IBRS_LEAVE_BYTES	12
    133 #define IBRS_LEAVE \
    134 	HOTPATCH(HP_NAME_IBRS_LEAVE, IBRS_LEAVE_BYTES)		; \
    135 	NOIBRS_LEAVE
    136 #define NOIBRS_LEAVE \
    137 	.byte 0xEB, (IBRS_LEAVE_BYTES-2)	/* jmp */	; \
    138 	.fill	(IBRS_LEAVE_BYTES-2),1,0xCC
    139 
    140 /*
    141  * MDS
    142  */
    143 
    144 #define MDS_LEAVE_BYTES	10
    145 #define MDS_LEAVE \
    146 	HOTPATCH(HP_NAME_MDS_LEAVE, MDS_LEAVE_BYTES)		; \
    147 	NOMDS_LEAVE
    148 #define NOMDS_LEAVE \
    149 	.byte 0xEB, (MDS_LEAVE_BYTES-2)	/* jmp */		; \
    150 	.fill	(MDS_LEAVE_BYTES-2),1,0xCC
    151 
    152 #define	SWAPGS	NOT_XEN(swapgs)
    153 
    154 /*
    155  * These are used on interrupt or trap entry or exit.
    156  */
    157 #define INTR_SAVE_GPRS \
    158 	movq	%rdi,TF_RDI(%rsp)	; \
    159 	movq	%rsi,TF_RSI(%rsp)	; \
    160 	movq	%rdx,TF_RDX(%rsp)	; \
    161 	movq	%rcx,TF_RCX(%rsp)	; \
    162 	movq	%r8,TF_R8(%rsp)		; \
    163 	movq	%r9,TF_R9(%rsp)		; \
    164 	movq	%r10,TF_R10(%rsp)	; \
    165 	movq	%r11,TF_R11(%rsp)	; \
    166 	movq	%r12,TF_R12(%rsp)	; \
    167 	movq	%r13,TF_R13(%rsp)	; \
    168 	movq	%r14,TF_R14(%rsp)	; \
    169 	movq	%r15,TF_R15(%rsp)	; \
    170 	movq	%rbp,TF_RBP(%rsp)	; \
    171 	movq	%rbx,TF_RBX(%rsp)	; \
    172 	movq	%rax,TF_RAX(%rsp)
    173 
    174 #define	INTR_RESTORE_GPRS \
    175 	movq	TF_RDI(%rsp),%rdi	; \
    176 	movq	TF_RSI(%rsp),%rsi	; \
    177 	movq	TF_RDX(%rsp),%rdx	; \
    178 	movq	TF_RCX(%rsp),%rcx	; \
    179 	movq	TF_R8(%rsp),%r8		; \
    180 	movq	TF_R9(%rsp),%r9		; \
    181 	movq	TF_R10(%rsp),%r10	; \
    182 	movq	TF_R11(%rsp),%r11	; \
    183 	movq	TF_R12(%rsp),%r12	; \
    184 	movq	TF_R13(%rsp),%r13	; \
    185 	movq	TF_R14(%rsp),%r14	; \
    186 	movq	TF_R15(%rsp),%r15	; \
    187 	movq	TF_RBP(%rsp),%rbp	; \
    188 	movq	TF_RBX(%rsp),%rbx	; \
    189 	movq	TF_RAX(%rsp),%rax
    190 
    191 #define TEXT_USER_BEGIN	.pushsection	.text.user, "ax"
    192 #define TEXT_USER_END	.popsection
    193 
    194 #ifdef SVS
    195 
    196 /* XXX: put this somewhere else */
    197 #define SVS_UTLS		0xffffff0000000000 /* PMAP_PCPU_BASE */
    198 #define UTLS_KPDIRPA		0
    199 #define UTLS_SCRATCH		8
    200 #define UTLS_RSP0		16
    201 
    202 #define SVS_ENTER_BYTES	22
    203 #define NOSVS_ENTER \
    204 	.byte 0xEB, (SVS_ENTER_BYTES-2)	/* jmp */	; \
    205 	.fill	(SVS_ENTER_BYTES-2),1,0xCC
    206 #define SVS_ENTER \
    207 	HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES)	; \
    208 	NOSVS_ENTER
    209 
    210 #define SVS_LEAVE_BYTES	21
    211 #define NOSVS_LEAVE \
    212 	.byte 0xEB, (SVS_LEAVE_BYTES-2)	/* jmp */	; \
    213 	.fill	(SVS_LEAVE_BYTES-2),1,0xCC
    214 #define SVS_LEAVE \
    215 	HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES)	; \
    216 	NOSVS_LEAVE
    217 
    218 #define SVS_ENTER_ALT_BYTES	23
    219 #define NOSVS_ENTER_ALTSTACK \
    220 	.byte 0xEB, (SVS_ENTER_ALT_BYTES-2)	/* jmp */	; \
    221 	.fill	(SVS_ENTER_ALT_BYTES-2),1,0xCC
    222 #define SVS_ENTER_ALTSTACK \
    223 	HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES)	; \
    224 	NOSVS_ENTER_ALTSTACK
    225 
    226 #define SVS_LEAVE_ALT_BYTES	22
    227 #define NOSVS_LEAVE_ALTSTACK \
    228 	.byte 0xEB, (SVS_LEAVE_ALT_BYTES-2)	/* jmp */	; \
    229 	.fill	(SVS_LEAVE_ALT_BYTES-2),1,0xCC
    230 #define SVS_LEAVE_ALTSTACK \
    231 	HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES)	; \
    232 	NOSVS_LEAVE_ALTSTACK
    233 
    234 #define SVS_ENTER_NMI_BYTES	22
    235 #define NOSVS_ENTER_NMI \
    236 	.byte 0xEB, (SVS_ENTER_NMI_BYTES-2)	/* jmp */	; \
    237 	.fill	(SVS_ENTER_NMI_BYTES-2),1,0xCC
    238 #define SVS_ENTER_NMI \
    239 	HOTPATCH(HP_NAME_SVS_ENTER_NMI, SVS_ENTER_NMI_BYTES)	; \
    240 	NOSVS_ENTER_NMI
    241 
    242 #define SVS_LEAVE_NMI_BYTES	11
    243 #define NOSVS_LEAVE_NMI \
    244 	.byte 0xEB, (SVS_LEAVE_NMI_BYTES-2)	/* jmp */	; \
    245 	.fill	(SVS_LEAVE_NMI_BYTES-2),1,0xCC
    246 #define SVS_LEAVE_NMI \
    247 	HOTPATCH(HP_NAME_SVS_LEAVE_NMI, SVS_LEAVE_NMI_BYTES)	; \
    248 	NOSVS_LEAVE_NMI
    249 
    250 #else
    251 #define SVS_ENTER	/* nothing */
    252 #define SVS_ENTER_NMI	/* nothing */
    253 #define SVS_LEAVE	/* nothing */
    254 #define SVS_LEAVE_NMI	/* nothing */
    255 #define SVS_ENTER_ALTSTACK	/* nothing */
    256 #define SVS_LEAVE_ALTSTACK	/* nothing */
    257 #endif
    258 
    259 #ifdef KMSAN
    260 /* XXX this belongs somewhere else. */
    261 #define KMSAN_ENTER	\
    262 	movq	%rsp,%rdi		; \
    263 	movq	$TF_REGSIZE+16+40,%rsi	; \
    264 	xorq	%rdx,%rdx		; \
    265 	callq	kmsan_mark		; \
    266 	callq	kmsan_intr_enter
    267 #define KMSAN_LEAVE	\
    268 	pushq	%rbp			; \
    269 	movq	%rsp,%rbp		; \
    270 	callq	kmsan_intr_leave	; \
    271 	popq	%rbp
    272 #define KMSAN_INIT_ARG(sz)	\
    273 	pushq	%rax			; \
    274 	pushq	%rcx			; \
    275 	pushq	%rdx			; \
    276 	pushq	%rsi			; \
    277 	pushq	%rdi			; \
    278 	pushq	%r8			; \
    279 	pushq	%r9			; \
    280 	pushq	%r10			; \
    281 	pushq	%r11			; \
    282 	movq	$sz,%rdi		; \
    283 	callq	_C_LABEL(kmsan_init_arg); \
    284 	popq	%r11			; \
    285 	popq	%r10			; \
    286 	popq	%r9			; \
    287 	popq	%r8			; \
    288 	popq	%rdi			; \
    289 	popq	%rsi			; \
    290 	popq	%rdx			; \
    291 	popq	%rcx			; \
    292 	popq	%rax
    293 #define KMSAN_INIT_RET(sz)	\
    294 	pushq	%rax			; \
    295 	pushq	%rcx			; \
    296 	pushq	%rdx			; \
    297 	pushq	%rsi			; \
    298 	pushq	%rdi			; \
    299 	pushq	%r8			; \
    300 	pushq	%r9			; \
    301 	pushq	%r10			; \
    302 	pushq	%r11			; \
    303 	movq	$sz,%rdi		; \
    304 	callq	_C_LABEL(kmsan_init_ret); \
    305 	popq	%r11			; \
    306 	popq	%r10			; \
    307 	popq	%r9			; \
    308 	popq	%r8			; \
    309 	popq	%rdi			; \
    310 	popq	%rsi			; \
    311 	popq	%rdx			; \
    312 	popq	%rcx			; \
    313 	popq	%rax
    314 #else
    315 #define KMSAN_ENTER		/* nothing */
    316 #define KMSAN_LEAVE		/* nothing */
    317 #define KMSAN_INIT_ARG(sz)	/* nothing */
    318 #define KMSAN_INIT_RET(sz)	/* nothing */
    319 #endif
    320 
    321 #ifdef KCOV
    322 #define KCOV_DISABLE			\
    323 	incl	CPUVAR(IDEPTH)
    324 #define KCOV_ENABLE			\
    325 	decl	CPUVAR(IDEPTH)
    326 #else
    327 #define KCOV_DISABLE		/* nothing */
    328 #define KCOV_ENABLE		/* nothing */
    329 #endif
    330 
    331 #define	INTRENTRY \
    332 	subq	$TF_REGSIZE,%rsp	; \
    333 	INTR_SAVE_GPRS			; \
    334 	cld				; \
    335 	SMAP_ENABLE			; \
    336 	testb	$SEL_UPL,TF_CS(%rsp)	; \
    337 	je	98f			; \
    338 	SWAPGS				; \
    339 	IBRS_ENTER			; \
    340 	SVS_ENTER			; \
    341 	movw	%gs,TF_GS(%rsp)		; \
    342 	movw	%fs,TF_FS(%rsp)		; \
    343 	movw	%es,TF_ES(%rsp)		; \
    344 	movw	%ds,TF_DS(%rsp)		; \
    345 98:	KMSAN_ENTER
    346 
    347 #define INTRFASTEXIT \
    348 	jmp	intrfastexit
    349 
    350 #define INTR_RECURSE_HWFRAME \
    351 	movq	%rsp,%r10		; \
    352 	movl	%ss,%r11d		; \
    353 	pushq	%r11			; \
    354 	pushq	%r10			; \
    355 	pushfq				; \
    356 	pushq	$GSEL(GCODE_SEL,SEL_KPL); \
    357 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
    358  	XEN_ONLY2(andb	$0xfc,(%rsp);)	  \
    359 	pushq	%r13			;
    360 
    361 #define INTR_RECURSE_ENTRY \
    362 	subq	$TF_REGSIZE,%rsp	; \
    363 	INTR_SAVE_GPRS			; \
    364 	cld				; \
    365 	KMSAN_ENTER
    366 
    367 #define	CHECK_DEFERRED_SWITCH \
    368 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
    369 
    370 #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
    371 #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
    372 
    373 /*
    374  * If the FPU state is not in the CPU, restore it. Executed with interrupts
    375  * disabled.
    376  *
    377  *     %r14 is curlwp, must not be modified
    378  *     %rbx must not be modified
    379  */
    380 #define HANDLE_DEFERRED_FPU	\
    381 	testl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%r14)	; \
    382 	jnz	1f					; \
    383 	call	_C_LABEL(fpu_handle_deferred)		; \
    384 	orl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%r14)	; \
    385 1:
    386 
    387 #endif /* _AMD64_MACHINE_FRAMEASM_H */
    388