Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: profile.h,v 1.21 2021/11/02 11:26:03 ryo Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef __x86_64__
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_xen.h"
     38 #endif
     39 
     40 #define	_MCOUNT_DECL void _mcount
     41 
     42 #define EPROL_EXPORT	__asm(".globl _eprol")
     43 
     44 #ifdef __PIC__
     45 #define __MCPLT	"@PLT"
     46 #else
     47 #define __MCPLT
     48 #endif
     49 
     50 #define	MCOUNT						\
     51 __weak_alias(mcount, __mcount)				\
     52 __asm(" .globl __mcount		\n"			\
     53 "	.type __mcount,@function\n"			\
     54 "__mcount:			\n"			\
     55 "	pushq	%rbp		\n"			\
     56 "	movq	%rsp,%rbp	\n"			\
     57 "	subq	$56,%rsp	\n"			\
     58 "	movq	%rdi,0(%rsp)	\n"			\
     59 "	movq	%rsi,8(%rsp)	\n"			\
     60 "	movq	%rdx,16(%rsp)	\n"			\
     61 "	movq	%rcx,24(%rsp)	\n"			\
     62 "	movq	%r8,32(%rsp)	\n"			\
     63 "	movq	%r9,40(%rsp)	\n"			\
     64 "	movq	%rax,48(%rsp)	\n"			\
     65 "	movq	0(%rbp),%r11	\n"			\
     66 "	movq	8(%r11),%rdi	\n"			\
     67 "	movq	8(%rbp),%rsi	\n"			\
     68 "	call	_mcount"__MCPLT "	\n"			\
     69 "	movq	0(%rsp),%rdi	\n"			\
     70 "	movq	8(%rsp),%rsi	\n"			\
     71 "	movq	16(%rsp),%rdx	\n"			\
     72 "	movq	24(%rsp),%rcx	\n"			\
     73 "	movq	32(%rsp),%r8	\n"			\
     74 "	movq	40(%rsp),%r9	\n"			\
     75 "	movq	48(%rsp),%rax	\n"			\
     76 "	leave			\n"			\
     77 "	ret			\n"			\
     78 "	.size __mcount,.-__mcount");
     79 
     80 
     81 #ifdef _KERNEL
     82 #ifdef XENPV
     83 static inline __always_inline void
     84 mcount_disable_intr(void)
     85 {
     86 	/* should be __cli() but this calls x86_lfence() which calls mcount */
     87 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
     88 	__asm volatile("lfence" ::: "memory"); /* x86_lfence() */
     89 }
     90 
     91 static inline __always_inline u_long
     92 mcount_read_psl(void)
     93 {
     94 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
     95 }
     96 
     97 static inline __always_inline void
     98 mcount_write_psl(u_long psl)
     99 {
    100 	curcpu()->ci_vcpu->evtchn_upcall_mask = psl;
    101 	/* can't call x86_lfence because it calls mcount() */
    102 	__asm volatile("lfence" ::: "memory"); /* x86_lfence() */
    103 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
    104 }
    105 
    106 #else /* XENPV */
    107 static inline __always_inline void
    108 mcount_disable_intr(void)
    109 {
    110 	__asm volatile("cli");
    111 }
    112 
    113 static inline __always_inline u_long
    114 mcount_read_psl(void)
    115 {
    116 	u_long	ef;
    117 
    118 	__asm volatile("pushfq; popq %0" : "=r" (ef));
    119 	return (ef);
    120 }
    121 
    122 static inline __always_inline void
    123 mcount_write_psl(u_long ef)
    124 {
    125 	__asm volatile("pushq %0; popfq" : : "r" (ef));
    126 }
    127 
    128 #endif /* XENPV */
    129 
    130 #define MCOUNT_ENTER	\
    131 	do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0)
    132 #define MCOUNT_EXIT	do { mcount_write_psl(s); } while (0)
    133 
    134 #endif /* _KERNEL */
    135 
    136 #else	/*	__x86_64__	*/
    137 
    138 #include <i386/profile.h>
    139 
    140 #endif	/*	__x86_64__	*/
    141