Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.34
      1 /*	$NetBSD: profile.h,v 1.34 2016/01/10 09:04:32 ryo Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL
     35 #include <machine/cpufunc.h>
     36 #endif
     37 
     38 #define	_MCOUNT_DECL static __inline void _mcount
     39 
     40 #ifdef __ELF__
     41 #define MCOUNT_ENTRY	"__mcount"
     42 #define MCOUNT_COMPAT	__weak_alias(mcount, __mcount)
     43 #else
     44 #define MCOUNT_ENTRY	"mcount"
     45 #define MCOUNT_COMPAT	/* nothing */
     46 #endif
     47 
     48 #define	MCOUNT \
     49 MCOUNT_COMPAT								\
     50 extern void mcount(void) __asm(MCOUNT_ENTRY)				\
     51 	__attribute__((__no_instrument_function__));			\
     52 void									\
     53 mcount(void)								\
     54 {									\
     55 	int selfpc, frompcindex;					\
     56 	int eax, ecx, edx;						\
     57 									\
     58 	__asm volatile("movl %%eax,%0" : "=g" (eax));			\
     59 	__asm volatile("movl %%ecx,%0" : "=g" (ecx));			\
     60 	__asm volatile("movl %%edx,%0" : "=g" (edx));			\
     61 	/*								\
     62 	 * find the return address for mcount,				\
     63 	 * and the return address for mcount's caller.			\
     64 	 *								\
     65 	 * selfpc = pc pushed by mcount call				\
     66 	 */								\
     67 	__asm volatile("movl 4(%%ebp),%0" : "=r" (selfpc));		\
     68 	/*								\
     69 	 * frompcindex = pc pushed by call into self.			\
     70 	 */								\
     71 	__asm volatile("movl (%%ebp),%0;movl 4(%0),%0"			\
     72 	    : "=r" (frompcindex));					\
     73 	_mcount((u_long)frompcindex, (u_long)selfpc);			\
     74 									\
     75 	__asm volatile("movl %0,%%edx" : : "g" (edx));			\
     76 	__asm volatile("movl %0,%%ecx" : : "g" (ecx));			\
     77 	__asm volatile("movl %0,%%eax" : : "g" (eax));			\
     78 }
     79 
     80 #ifdef _KERNEL
     81 static inline void
     82 mcount_disable_intr(void)
     83 {
     84 	__asm volatile("cli");
     85 }
     86 
     87 static inline u_long
     88 mcount_read_psl(void)
     89 {
     90 	u_long	ef;
     91 
     92 	__asm volatile("pushfl; popl %0" : "=r" (ef));
     93 	return (ef);
     94 }
     95 
     96 static inline void
     97 mcount_write_psl(u_long ef)
     98 {
     99 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    100 }
    101 
    102 #define MCOUNT_ENTER	\
    103 	do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0)
    104 #define MCOUNT_EXIT	do { mcount_write_psl(s); } while (0)
    105 
    106 #endif /* _KERNEL */
    107