Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.13
      1 /*	$NetBSD: profile.h,v 1.13 2008/04/21 15:15:33 cegger Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32  */
     33 
     34 #ifdef _KERNEL_OPT
     35 #include "opt_multiprocessor.h"
     36 #include "opt_xen.h"
     37 #endif
     38 
     39 #ifdef _KERNEL
     40 #include <machine/lock.h>
     41 #endif
     42 
     43 #define	_MCOUNT_DECL void _mcount
     44 
     45 #define EPROL_EXPORT	__asm(".globl _eprol")
     46 
     47 #ifdef PIC
     48 #define __MCPLT	"@PLT"
     49 #else
     50 #define __MCPLT
     51 #endif
     52 
     53 #define	MCOUNT						\
     54 __weak_alias(mcount, __mcount)				\
     55 __asm(" .globl __mcount		\n"			\
     56 "	.type __mcount,@function\n"			\
     57 "__mcount:			\n"			\
     58 "	pushq	%rbp		\n"			\
     59 "	movq	%rsp,%rbp	\n"			\
     60 "	subq	$56,%rsp	\n"			\
     61 "	movq	%rdi,0(%rsp)	\n"			\
     62 "	movq	%rsi,8(%rsp)	\n"			\
     63 "	movq	%rdx,16(%rsp)	\n"			\
     64 "	movq	%rcx,24(%rsp)	\n"			\
     65 "	movq	%r8,32(%rsp)	\n"			\
     66 "	movq	%r9,40(%rsp)	\n"			\
     67 "	movq	%rax,48(%rsp)	\n"			\
     68 "	movq	0(%rbp),%r11	\n"			\
     69 "	movq	8(%r11),%rdi	\n"			\
     70 "	movq	8(%rbp),%rsi	\n"			\
     71 "	call	_mcount"__MCPLT "	\n"			\
     72 "	movq	0(%rsp),%rdi	\n"			\
     73 "	movq	8(%rsp),%rsi	\n"			\
     74 "	movq	16(%rsp),%rdx	\n"			\
     75 "	movq	24(%rsp),%rcx	\n"			\
     76 "	movq	32(%rsp),%r8	\n"			\
     77 "	movq	40(%rsp),%r9	\n"			\
     78 "	movq	48(%rsp),%rax	\n"			\
     79 "	leave			\n"			\
     80 "	ret			\n"			\
     81 "	.size __mcount,.-__mcount");
     82 
     83 
     84 #ifdef _KERNEL
     85 #ifdef MULTIPROCESSOR
     86 __cpu_simple_lock_t __mcount_lock;
     87 
     88 static inline void
     89 MCOUNT_ENTER_MP(void)
     90 {
     91 	__cpu_simple_lock(&__mcount_lock);
     92 	__insn_barrier();
     93 }
     94 
     95 static inline void
     96 MCOUNT_EXIT_MP(void)
     97 {
     98 	__insn_barrier();
     99 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
    100 }
    101 #else
    102 #define MCOUNT_ENTER_MP()
    103 #define MCOUNT_EXIT_MP()
    104 #endif
    105 
    106 #ifdef XEN
    107 static inline void
    108 mcount_disable_intr(void)
    109 {
    110 	/* works because __cli is a macro */
    111 	__cli();
    112 }
    113 
    114 static inline u_long
    115 mcount_read_psl(void)
    116 {
    117 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
    118 }
    119 
    120 static inline void
    121 mcount_write_psl(u_long psl)
    122 {
    123 	curcpu()->ci_vcpu->evtchn_upcall_mask = psl;
    124 	x86_lfence();
    125 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
    126 }
    127 
    128 #else /* XEN */
    129 static inline void
    130 mcount_disable_intr(void)
    131 {
    132 	__asm volatile("cli");
    133 }
    134 
    135 static inline u_long
    136 mcount_read_psl(void)
    137 {
    138 	u_long	ef;
    139 
    140 	__asm volatile("pushfl; popl %0" : "=r" (ef));
    141 	return (ef);
    142 }
    143 
    144 static inline void
    145 mcount_write_psl(u_long ef)
    146 {
    147 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    148 }
    149 
    150 #endif /* XEN */
    151 #define	MCOUNT_ENTER							\
    152 	s = (int)mcount_read_psl();					\
    153 	mcount_disable_intr();						\
    154 	MCOUNT_ENTER_MP();
    155 
    156 #define	MCOUNT_EXIT							\
    157 	MCOUNT_EXIT_MP();						\
    158 	mcount_write_psl(s);
    159 
    160 #endif /* _KERNEL */
    161