Home | History | Annotate | Line # | Download | only in include
profile.h revision 1.7.6.2
      1  1.7.6.2     ad /*	$NetBSD: profile.h,v 1.7.6.2 2007/12/03 18:34:42 ad Exp $	*/
      2      1.1   fvdl 
      3      1.1   fvdl /*
      4      1.1   fvdl  * Copyright (c) 1992, 1993
      5      1.1   fvdl  *	The Regents of the University of California.  All rights reserved.
      6      1.1   fvdl  *
      7      1.1   fvdl  * Redistribution and use in source and binary forms, with or without
      8      1.1   fvdl  * modification, are permitted provided that the following conditions
      9      1.1   fvdl  * are met:
     10      1.1   fvdl  * 1. Redistributions of source code must retain the above copyright
     11      1.1   fvdl  *    notice, this list of conditions and the following disclaimer.
     12      1.1   fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     13      1.1   fvdl  *    notice, this list of conditions and the following disclaimer in the
     14      1.1   fvdl  *    documentation and/or other materials provided with the distribution.
     15      1.2    agc  * 3. Neither the name of the University nor the names of its contributors
     16      1.1   fvdl  *    may be used to endorse or promote products derived from this software
     17      1.1   fvdl  *    without specific prior written permission.
     18      1.1   fvdl  *
     19      1.1   fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20      1.1   fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21      1.1   fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22      1.1   fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23      1.1   fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24      1.1   fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25      1.1   fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26      1.1   fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27      1.1   fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28      1.1   fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29      1.1   fvdl  * SUCH DAMAGE.
     30      1.1   fvdl  *
     31      1.1   fvdl  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
     32      1.1   fvdl  */
     33      1.1   fvdl 
     34      1.4    chs #ifdef _KERNEL_OPT
     35      1.4    chs #include "opt_multiprocessor.h"
     36  1.7.6.2     ad #include "opt_xen.h"
     37      1.4    chs #endif
     38      1.4    chs 
     39      1.7     ad #include <machine/atomic.h>
     40      1.4    chs 
     41      1.3   fvdl #define	_MCOUNT_DECL void _mcount
     42      1.1   fvdl 
     43      1.3   fvdl #define EPROL_EXPORT	__asm(".globl _eprol")
     44      1.1   fvdl 
     45      1.3   fvdl #ifdef PIC
     46      1.3   fvdl #define __MCPLT	"@PLT"
     47      1.3   fvdl #else
     48      1.3   fvdl #define __MCPLT
     49      1.3   fvdl #endif
     50      1.3   fvdl 
     51      1.3   fvdl #define	MCOUNT						\
     52      1.3   fvdl __weak_alias(mcount, __mcount)				\
     53      1.3   fvdl __asm(" .globl __mcount		\n"			\
     54      1.3   fvdl "	.type __mcount,@function\n"			\
     55      1.3   fvdl "__mcount:			\n"			\
     56      1.3   fvdl "	pushq	%rbp		\n"			\
     57      1.3   fvdl "	movq	%rsp,%rbp	\n"			\
     58      1.3   fvdl "	subq	$56,%rsp	\n"			\
     59      1.3   fvdl "	movq	%rdi,0(%rsp)	\n"			\
     60      1.3   fvdl "	movq	%rsi,8(%rsp)	\n"			\
     61      1.3   fvdl "	movq	%rdx,16(%rsp)	\n"			\
     62      1.3   fvdl "	movq	%rcx,24(%rsp)	\n"			\
     63      1.3   fvdl "	movq	%r8,32(%rsp)	\n"			\
     64      1.3   fvdl "	movq	%r9,40(%rsp)	\n"			\
     65      1.3   fvdl "	movq	%rax,48(%rsp)	\n"			\
     66      1.3   fvdl "	movq	0(%rbp),%r11	\n"			\
     67      1.3   fvdl "	movq	8(%r11),%rdi	\n"			\
     68      1.3   fvdl "	movq	8(%rbp),%rsi	\n"			\
     69      1.6  skrll "	call	_mcount"__MCPLT "	\n"			\
     70      1.3   fvdl "	movq	0(%rsp),%rdi	\n"			\
     71      1.3   fvdl "	movq	8(%rsp),%rsi	\n"			\
     72      1.3   fvdl "	movq	16(%rsp),%rdx	\n"			\
     73      1.3   fvdl "	movq	24(%rsp),%rcx	\n"			\
     74      1.3   fvdl "	movq	32(%rsp),%r8	\n"			\
     75      1.3   fvdl "	movq	40(%rsp),%r9	\n"			\
     76      1.3   fvdl "	movq	48(%rsp),%rax	\n"			\
     77      1.3   fvdl "	leave			\n"			\
     78      1.3   fvdl "	ret			\n"			\
     79      1.3   fvdl "	.size __mcount,.-__mcount");
     80      1.1   fvdl 
     81      1.1   fvdl 
     82      1.1   fvdl #ifdef _KERNEL
     83      1.4    chs #ifdef MULTIPROCESSOR
     84      1.4    chs __cpu_simple_lock_t __mcount_lock;
     85      1.4    chs 
     86      1.7     ad static inline void
     87      1.7     ad MCOUNT_ENTER_MP(void)
     88      1.7     ad {
     89      1.7     ad 	while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
     90      1.7     ad 	    != __SIMPLELOCK_UNLOCKED) {
     91      1.7     ad 		while (__mcount_lock == __SIMPLELOCK_LOCKED)
     92      1.7     ad 			;
     93      1.7     ad 	}
     94      1.7     ad 	__insn_barrier();
     95      1.7     ad }
     96      1.7     ad 
     97  1.7.6.2     ad #ifdef XEN
     98  1.7.6.2     ad static inline void
     99  1.7.6.2     ad mcount_disable_intr(void)
    100  1.7.6.2     ad {
    101  1.7.6.2     ad 	/* works because __cli is a macro */
    102  1.7.6.2     ad 	__cli();
    103  1.7.6.2     ad }
    104  1.7.6.2     ad 
    105  1.7.6.2     ad static inline u_long
    106  1.7.6.2     ad mcount_read_psl(void)
    107  1.7.6.2     ad {
    108  1.7.6.2     ad 	return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
    109  1.7.6.2     ad }
    110  1.7.6.2     ad 
    111  1.7.6.2     ad static inline void
    112  1.7.6.2     ad mcount_write_psl(u_long psl)
    113  1.7.6.2     ad {
    114  1.7.6.2     ad 	HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
    115  1.7.6.2     ad 	x86_lfence();
    116  1.7.6.2     ad 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
    117  1.7.6.2     ad }
    118  1.7.6.2     ad 
    119  1.7.6.2     ad #else /* XEN */
    120      1.7     ad static inline void
    121      1.7     ad MCOUNT_EXIT_MP(void)
    122      1.7     ad {
    123      1.7     ad 	__insn_barrier();
    124      1.7     ad 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
    125      1.7     ad }
    126      1.4    chs #else
    127      1.7     ad #define MCOUNT_ENTER_MP()
    128      1.7     ad #define MCOUNT_EXIT_MP()
    129      1.4    chs #endif
    130      1.4    chs 
    131  1.7.6.2     ad #ifdef XEN
    132  1.7.6.2     ad static inline void
    133  1.7.6.2     ad mcount_disable_intr(void)
    134  1.7.6.2     ad {
    135  1.7.6.2     ad 	/* works because __cli is a macro */
    136  1.7.6.2     ad 	__cli();
    137  1.7.6.2     ad }
    138  1.7.6.2     ad 
    139  1.7.6.2     ad static inline u_long
    140  1.7.6.2     ad mcount_read_psl(void)
    141  1.7.6.2     ad {
    142  1.7.6.2     ad 	return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
    143  1.7.6.2     ad }
    144  1.7.6.2     ad 
    145  1.7.6.2     ad static inline void
    146  1.7.6.2     ad mcount_write_psl(u_long psl)
    147  1.7.6.2     ad {
    148  1.7.6.2     ad 	HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
    149  1.7.6.2     ad 	x86_lfence();
    150  1.7.6.2     ad 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
    151  1.7.6.2     ad }
    152  1.7.6.2     ad 
    153  1.7.6.2     ad #else /* XEN */
    154  1.7.6.1     ad static inline void
    155  1.7.6.1     ad mcount_disable_intr(void)
    156  1.7.6.1     ad {
    157  1.7.6.1     ad 	__asm volatile("cli");
    158  1.7.6.1     ad }
    159  1.7.6.1     ad 
    160  1.7.6.1     ad static inline u_long
    161  1.7.6.1     ad mcount_read_psl(void)
    162  1.7.6.1     ad {
    163  1.7.6.1     ad 	u_long	ef;
    164  1.7.6.1     ad 
    165  1.7.6.1     ad 	__asm volatile("pushfl; popl %0" : "=r" (ef));
    166  1.7.6.1     ad 	return (ef);
    167  1.7.6.1     ad }
    168  1.7.6.1     ad 
    169  1.7.6.1     ad static inline void
    170  1.7.6.1     ad mcount_write_psl(u_long ef)
    171  1.7.6.1     ad {
    172  1.7.6.1     ad 	__asm volatile("pushl %0; popfl" : : "r" (ef));
    173  1.7.6.1     ad }
    174  1.7.6.1     ad 
    175  1.7.6.2     ad #endif /* XEN */
    176      1.4    chs #define	MCOUNT_ENTER							\
    177  1.7.6.1     ad 	s = (int)mcount_read_psl();					\
    178  1.7.6.1     ad 	mcount_disable_intr();						\
    179      1.7     ad 	MCOUNT_ENTER_MP();
    180      1.4    chs 
    181      1.4    chs #define	MCOUNT_EXIT							\
    182      1.7     ad 	MCOUNT_EXIT_MP();						\
    183  1.7.6.1     ad 	mcount_write_psl(s);
    184      1.4    chs 
    185      1.1   fvdl #endif /* _KERNEL */
    186