Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.30
      1 /*	$NetBSD: profile.h,v 1.30 2007/09/26 21:05:21 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_multiprocessor.h"
     36 #endif
     37 
     38 #ifdef _KERNEL
     39 #include <machine/cpufunc.h>
     40 #include <machine/atomic.h>
     41 #endif
     42 
     43 #define	_MCOUNT_DECL static __inline void _mcount
     44 
     45 #ifdef __ELF__
     46 #define MCOUNT_ENTRY	"__mcount"
     47 #define MCOUNT_COMPAT	__weak_alias(mcount, __mcount)
     48 #else
     49 #define MCOUNT_ENTRY	"mcount"
     50 #define MCOUNT_COMPAT	/* nothing */
     51 #endif
     52 
     53 #define	MCOUNT \
     54 MCOUNT_COMPAT								\
     55 extern void mcount(void) __asm(MCOUNT_ENTRY)				\
     56 	__attribute__((__no_instrument_function__));			\
     57 void									\
     58 mcount(void)								\
     59 {									\
     60 	int selfpc, frompcindex;					\
     61 	int eax, ecx, edx;						\
     62 									\
     63 	__asm volatile("movl %%eax,%0" : "=g" (eax));			\
     64 	__asm volatile("movl %%ecx,%0" : "=g" (ecx));			\
     65 	__asm volatile("movl %%edx,%0" : "=g" (edx));			\
     66 	/*								\
     67 	 * find the return address for mcount,				\
     68 	 * and the return address for mcount's caller.			\
     69 	 *								\
     70 	 * selfpc = pc pushed by mcount call				\
     71 	 */								\
     72 	__asm volatile("movl 4(%%ebp),%0" : "=r" (selfpc));		\
     73 	/*								\
     74 	 * frompcindex = pc pushed by call into self.			\
     75 	 */								\
     76 	__asm volatile("movl (%%ebp),%0;movl 4(%0),%0"			\
     77 	    : "=r" (frompcindex));					\
     78 	_mcount((u_long)frompcindex, (u_long)selfpc);			\
     79 									\
     80 	__asm volatile("movl %0,%%edx" : : "g" (edx));			\
     81 	__asm volatile("movl %0,%%ecx" : : "g" (ecx));			\
     82 	__asm volatile("movl %0,%%eax" : : "g" (eax));			\
     83 }
     84 
     85 #ifdef _KERNEL
     86 #ifdef MULTIPROCESSOR
     87 __cpu_simple_lock_t __mcount_lock;
     88 
     89 static inline void
     90 MCOUNT_ENTER_MP(void)
     91 {
     92 	while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
     93 	    != __SIMPLELOCK_UNLOCKED) {
     94 		while (__mcount_lock == __SIMPLELOCK_LOCKED)
     95 			;
     96 	}
     97 	__insn_barrier();
     98 }
     99 
    100 static inline void
    101 MCOUNT_EXIT_MP(void)
    102 {
    103 	__insn_barrier();
    104 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
    105 }
    106 #else
    107 #define MCOUNT_ENTER_MP()
    108 #define MCOUNT_EXIT_MP()
    109 #endif
    110 
    111 #define	MCOUNT_ENTER							\
    112 	s = (int)x86_read_psl();					\
    113 	x86_disable_intr();						\
    114 	MCOUNT_ENTER_MP();
    115 
    116 #define	MCOUNT_EXIT							\
    117 	MCOUNT_EXIT_MP();						\
    118 	x86_write_psl(s);
    119 
    120 #endif /* _KERNEL */
    121