Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.11
      1 /*	$NetBSD: profile.h,v 1.11 2007/11/24 18:55:41 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_multiprocessor.h"
     36 #include "opt_xen.h"
     37 #endif
     38 
     39 #include <machine/atomic.h>
     40 
     41 #define	_MCOUNT_DECL void _mcount
     42 
     43 #define EPROL_EXPORT	__asm(".globl _eprol")
     44 
     45 #ifdef PIC
     46 #define __MCPLT	"@PLT"
     47 #else
     48 #define __MCPLT
     49 #endif
     50 
     51 #define	MCOUNT						\
     52 __weak_alias(mcount, __mcount)				\
     53 __asm(" .globl __mcount		\n"			\
     54 "	.type __mcount,@function\n"			\
     55 "__mcount:			\n"			\
     56 "	pushq	%rbp		\n"			\
     57 "	movq	%rsp,%rbp	\n"			\
     58 "	subq	$56,%rsp	\n"			\
     59 "	movq	%rdi,0(%rsp)	\n"			\
     60 "	movq	%rsi,8(%rsp)	\n"			\
     61 "	movq	%rdx,16(%rsp)	\n"			\
     62 "	movq	%rcx,24(%rsp)	\n"			\
     63 "	movq	%r8,32(%rsp)	\n"			\
     64 "	movq	%r9,40(%rsp)	\n"			\
     65 "	movq	%rax,48(%rsp)	\n"			\
     66 "	movq	0(%rbp),%r11	\n"			\
     67 "	movq	8(%r11),%rdi	\n"			\
     68 "	movq	8(%rbp),%rsi	\n"			\
     69 "	call	_mcount"__MCPLT "	\n"			\
     70 "	movq	0(%rsp),%rdi	\n"			\
     71 "	movq	8(%rsp),%rsi	\n"			\
     72 "	movq	16(%rsp),%rdx	\n"			\
     73 "	movq	24(%rsp),%rcx	\n"			\
     74 "	movq	32(%rsp),%r8	\n"			\
     75 "	movq	40(%rsp),%r9	\n"			\
     76 "	movq	48(%rsp),%rax	\n"			\
     77 "	leave			\n"			\
     78 "	ret			\n"			\
     79 "	.size __mcount,.-__mcount");
     80 
     81 
     82 #ifdef _KERNEL
     83 #ifdef MULTIPROCESSOR
     84 __cpu_simple_lock_t __mcount_lock;
     85 
     86 static inline void
     87 MCOUNT_ENTER_MP(void)
     88 {
     89 	while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
     90 	    != __SIMPLELOCK_UNLOCKED) {
     91 		while (__mcount_lock == __SIMPLELOCK_LOCKED)
     92 			;
     93 	}
     94 	__insn_barrier();
     95 }
     96 
     97 static inline void
     98 MCOUNT_EXIT_MP(void)
     99 {
    100 	__insn_barrier();
    101 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
    102 }
    103 #else
    104 #define MCOUNT_ENTER_MP()
    105 #define MCOUNT_EXIT_MP()
    106 #endif
    107 
    108 #ifdef XEN
    109 static inline void
    110 mcount_disable_intr(void)
    111 {
    112 	/* works because __cli is a macro */
    113 	__cli();
    114 }
    115 
    116 static inline u_long
    117 mcount_read_psl(void)
    118 {
    119 	return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
    120 }
    121 
    122 static inline void
    123 mcount_write_psl(u_long psl)
    124 {
    125 	HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
    126 	x86_lfence();
    127 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
    128 }
    129 
    130 #else /* XEN */
    131 static inline void
    132 mcount_disable_intr(void)
    133 {
    134 	__asm volatile("cli");
    135 }
    136 
    137 static inline u_long
    138 mcount_read_psl(void)
    139 {
    140 	u_long	ef;
    141 
    142 	__asm volatile("pushfl; popl %0" : "=r" (ef));
    143 	return (ef);
    144 }
    145 
    146 static inline void
    147 mcount_write_psl(u_long ef)
    148 {
    149 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    150 }
    151 
    152 #endif /* XEN */
    153 #define	MCOUNT_ENTER							\
    154 	s = (int)mcount_read_psl();					\
    155 	mcount_disable_intr();						\
    156 	MCOUNT_ENTER_MP();
    157 
    158 #define	MCOUNT_EXIT							\
    159 	MCOUNT_EXIT_MP();						\
    160 	mcount_write_psl(s);
    161 
    162 #endif /* _KERNEL */
    163