Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.35
      1 /*	$NetBSD: profile.h,v 1.35 2017/05/31 01:50:19 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL
     35 #include <machine/cpufunc.h>
     36 #endif
     37 
     38 #define	_MCOUNT_DECL static __inline void _mcount
     39 
     40 #ifdef __ELF__
     41 #define MCOUNT_ENTRY	"__mcount"
     42 #define MCOUNT_COMPAT	__weak_alias(mcount, __mcount)
     43 #else
     44 #define MCOUNT_ENTRY	"mcount"
     45 #define MCOUNT_COMPAT	/* nothing */
     46 #endif
     47 
     48 #if defined(_REENTRANT) && !defined(_KERNEL)
     49 #define MCOUNT_ACTIVE	if (_gmonparam.state != GMON_PROF_ON) return
     50 #else
     51 #define MCOUNT_ACTIVE
     52 #endif
     53 
     54 #define	MCOUNT \
     55 MCOUNT_COMPAT								\
     56 extern void mcount(void) __asm(MCOUNT_ENTRY)				\
     57 	__attribute__((__no_instrument_function__));			\
     58 void									\
     59 mcount(void)								\
     60 {									\
     61 	int selfpc, frompcindex;					\
     62 	int eax, ecx, edx;						\
     63 									\
     64 	MCOUNT_ACTIVE;							\
     65 	__asm volatile("movl %%eax,%0" : "=g" (eax));			\
     66 	__asm volatile("movl %%ecx,%0" : "=g" (ecx));			\
     67 	__asm volatile("movl %%edx,%0" : "=g" (edx));			\
     68 	/*								\
     69 	 * find the return address for mcount,				\
     70 	 * and the return address for mcount's caller.			\
     71 	 *								\
     72 	 * selfpc = pc pushed by mcount call				\
     73 	 */								\
     74 	__asm volatile("movl 4(%%ebp),%0" : "=r" (selfpc));		\
     75 	/*								\
     76 	 * frompcindex = pc pushed by call into self.			\
     77 	 */								\
     78 	__asm volatile("movl (%%ebp),%0;movl 4(%0),%0"			\
     79 	    : "=r" (frompcindex));					\
     80 	_mcount((u_long)frompcindex, (u_long)selfpc);			\
     81 									\
     82 	__asm volatile("movl %0,%%edx" : : "g" (edx));			\
     83 	__asm volatile("movl %0,%%ecx" : : "g" (ecx));			\
     84 	__asm volatile("movl %0,%%eax" : : "g" (eax));			\
     85 }
     86 
     87 #ifdef _KERNEL
     88 static inline void
     89 mcount_disable_intr(void)
     90 {
     91 	__asm volatile("cli");
     92 }
     93 
     94 static inline u_long
     95 mcount_read_psl(void)
     96 {
     97 	u_long	ef;
     98 
     99 	__asm volatile("pushfl; popl %0" : "=r" (ef));
    100 	return (ef);
    101 }
    102 
    103 static inline void
    104 mcount_write_psl(u_long ef)
    105 {
    106 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    107 }
    108 
    109 #define MCOUNT_ENTER	\
    110 	do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0)
    111 #define MCOUNT_EXIT	do { mcount_write_psl(s); } while (0)
    112 
    113 #endif /* _KERNEL */
    114