Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: profile.h,v 1.38 2021/11/02 11:26:04 ryo Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL
     35 #include <machine/cpufunc.h>
     36 #endif
     37 
     38 #define	_MCOUNT_DECL static __inline void _mcount
     39 
     40 #ifdef __ELF__
     41 #define MCOUNT_ENTRY	"__mcount"
     42 #define MCOUNT_COMPAT	__weak_alias(mcount, __mcount)
     43 #else
     44 #define MCOUNT_ENTRY	"mcount"
     45 #define MCOUNT_COMPAT	/* nothing */
     46 #endif
     47 
     48 #if defined(_REENTRANT) && !defined(_KERNEL)
     49 #define MCOUNT_ACTIVE	if (_gmonparam.state != GMON_PROF_ON) return
     50 #else
     51 #define MCOUNT_ACTIVE
     52 #endif
     53 
     54 #define	MCOUNT \
     55 MCOUNT_COMPAT								\
     56 extern void mcount(void) __asm(MCOUNT_ENTRY)				\
     57 	__attribute__((__no_instrument_function__));			\
     58 void									\
     59 mcount(void)								\
     60 {									\
     61 	int selfpc, frompcindex;					\
     62 	int eax, ecx, edx;						\
     63 									\
     64 	MCOUNT_ACTIVE;							\
     65 	__asm volatile("movl %%eax,%0" : "=g" (eax));			\
     66 	__asm volatile("movl %%ecx,%0" : "=g" (ecx));			\
     67 	__asm volatile("movl %%edx,%0" : "=g" (edx));			\
     68 	/*								\
     69 	 * find the return address for mcount,				\
     70 	 * and the return address for mcount's caller.			\
     71 	 *								\
     72 	 * selfpc = pc pushed by mcount call				\
     73 	 */								\
     74 	selfpc = (int)__builtin_return_address(0);			\
     75 	/*								\
     76 	 * frompcindex = stack frame of caller, assuming frame pointer	\
     77 	 */								\
     78 	frompcindex = ((int *)__builtin_frame_address(1))[1];		\
     79 	_mcount((u_long)frompcindex, (u_long)selfpc);			\
     80 									\
     81 	__asm volatile("movl %0,%%edx" : : "g" (edx));			\
     82 	__asm volatile("movl %0,%%ecx" : : "g" (ecx));			\
     83 	__asm volatile("movl %0,%%eax" : : "g" (eax));			\
     84 }
     85 
     86 #ifdef _KERNEL
     87 static inline __always_inline void
     88 mcount_disable_intr(void)
     89 {
     90 	__asm volatile("cli");
     91 }
     92 
     93 static inline __always_inline u_long
     94 mcount_read_psl(void)
     95 {
     96 	u_long	ef;
     97 
     98 	__asm volatile("pushfl; popl %0" : "=r" (ef));
     99 	return (ef);
    100 }
    101 
    102 static inline __always_inline void
    103 mcount_write_psl(u_long ef)
    104 {
    105 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    106 }
    107 
    108 #define MCOUNT_ENTER	\
    109 	do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0)
    110 #define MCOUNT_EXIT	do { mcount_write_psl(s); } while (0)
    111 
    112 #endif /* _KERNEL */
    113