Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.3.16.3
      1 /*	$NetBSD: profile.h,v 1.3.16.3 2007/10/27 11:25:11 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_multiprocessor.h"
     36 #endif
     37 
     38 #include <machine/atomic.h>
     39 
     40 #define	_MCOUNT_DECL void _mcount
     41 
     42 #define EPROL_EXPORT	__asm(".globl _eprol")
     43 
     44 #ifdef PIC
     45 #define __MCPLT	"@PLT"
     46 #else
     47 #define __MCPLT
     48 #endif
     49 
     50 #define	MCOUNT						\
     51 __weak_alias(mcount, __mcount)				\
     52 __asm(" .globl __mcount		\n"			\
     53 "	.type __mcount,@function\n"			\
     54 "__mcount:			\n"			\
     55 "	pushq	%rbp		\n"			\
     56 "	movq	%rsp,%rbp	\n"			\
     57 "	subq	$56,%rsp	\n"			\
     58 "	movq	%rdi,0(%rsp)	\n"			\
     59 "	movq	%rsi,8(%rsp)	\n"			\
     60 "	movq	%rdx,16(%rsp)	\n"			\
     61 "	movq	%rcx,24(%rsp)	\n"			\
     62 "	movq	%r8,32(%rsp)	\n"			\
     63 "	movq	%r9,40(%rsp)	\n"			\
     64 "	movq	%rax,48(%rsp)	\n"			\
     65 "	movq	0(%rbp),%r11	\n"			\
     66 "	movq	8(%r11),%rdi	\n"			\
     67 "	movq	8(%rbp),%rsi	\n"			\
     68 "	call	_mcount"__MCPLT "	\n"			\
     69 "	movq	0(%rsp),%rdi	\n"			\
     70 "	movq	8(%rsp),%rsi	\n"			\
     71 "	movq	16(%rsp),%rdx	\n"			\
     72 "	movq	24(%rsp),%rcx	\n"			\
     73 "	movq	32(%rsp),%r8	\n"			\
     74 "	movq	40(%rsp),%r9	\n"			\
     75 "	movq	48(%rsp),%rax	\n"			\
     76 "	leave			\n"			\
     77 "	ret			\n"			\
     78 "	.size __mcount,.-__mcount");
     79 
     80 
     81 #ifdef _KERNEL
     82 #ifdef MULTIPROCESSOR
     83 __cpu_simple_lock_t __mcount_lock;
     84 
     85 static inline void
     86 MCOUNT_ENTER_MP(void)
     87 {
     88 	while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
     89 	    != __SIMPLELOCK_UNLOCKED) {
     90 		while (__mcount_lock == __SIMPLELOCK_LOCKED)
     91 			;
     92 	}
     93 	__insn_barrier();
     94 }
     95 
     96 static inline void
     97 MCOUNT_EXIT_MP(void)
     98 {
     99 	__insn_barrier();
    100 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
    101 }
    102 #else
    103 #define MCOUNT_ENTER_MP()
    104 #define MCOUNT_EXIT_MP()
    105 #endif
    106 
    107 static inline void
    108 mcount_disable_intr(void)
    109 {
    110 	__asm volatile("cli");
    111 }
    112 
    113 static inline u_long
    114 mcount_read_psl(void)
    115 {
    116 	u_long	ef;
    117 
    118 	__asm volatile("pushfl; popl %0" : "=r" (ef));
    119 	return (ef);
    120 }
    121 
    122 static inline void
    123 mcount_write_psl(u_long ef)
    124 {
    125 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    126 }
    127 
    128 #define	MCOUNT_ENTER							\
    129 	s = (int)mcount_read_psl();					\
    130 	mcount_disable_intr();						\
    131 	MCOUNT_ENTER_MP();
    132 
    133 #define	MCOUNT_EXIT							\
    134 	MCOUNT_EXIT_MP();						\
    135 	mcount_write_psl(s);
    136 
    137 #endif /* _KERNEL */
    138