1 1.21 ryo /* $NetBSD: profile.h,v 1.21 2021/11/02 11:26:03 ryo Exp $ */ 2 1.1 fvdl 3 1.1 fvdl /* 4 1.1 fvdl * Copyright (c) 1992, 1993 5 1.1 fvdl * The Regents of the University of California. All rights reserved. 6 1.1 fvdl * 7 1.1 fvdl * Redistribution and use in source and binary forms, with or without 8 1.1 fvdl * modification, are permitted provided that the following conditions 9 1.1 fvdl * are met: 10 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 11 1.1 fvdl * notice, this list of conditions and the following disclaimer. 12 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 14 1.1 fvdl * documentation and/or other materials provided with the distribution. 15 1.2 agc * 3. Neither the name of the University nor the names of its contributors 16 1.1 fvdl * may be used to endorse or promote products derived from this software 17 1.1 fvdl * without specific prior written permission. 18 1.1 fvdl * 19 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 1.1 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 1.1 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 1.1 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 1.1 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 1.1 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 1.1 fvdl * SUCH DAMAGE. 30 1.1 fvdl * 31 1.1 fvdl * @(#)profile.h 8.1 (Berkeley) 6/11/93 32 1.1 fvdl */ 33 1.1 fvdl 34 1.15 mrg #ifdef __x86_64__ 35 1.15 mrg 36 1.4 chs #ifdef _KERNEL_OPT 37 1.11 bouyer #include "opt_xen.h" 38 1.4 chs #endif 39 1.4 chs 40 1.3 fvdl #define _MCOUNT_DECL void _mcount 41 1.1 fvdl 42 1.3 fvdl #define EPROL_EXPORT __asm(".globl _eprol") 43 1.1 fvdl 44 1.16 joerg #ifdef __PIC__ 45 1.3 fvdl #define __MCPLT "@PLT" 46 1.3 fvdl #else 47 1.3 fvdl #define __MCPLT 48 1.3 fvdl #endif 49 1.3 fvdl 50 1.3 fvdl #define MCOUNT \ 51 1.3 fvdl __weak_alias(mcount, __mcount) \ 52 1.3 fvdl __asm(" .globl __mcount \n" \ 53 1.3 fvdl " .type __mcount,@function\n" \ 54 1.3 fvdl "__mcount: \n" \ 55 1.3 fvdl " pushq %rbp \n" \ 56 1.3 fvdl " movq %rsp,%rbp \n" \ 57 1.3 fvdl " subq $56,%rsp \n" \ 58 1.3 fvdl " movq %rdi,0(%rsp) \n" \ 59 1.3 fvdl " movq %rsi,8(%rsp) \n" \ 60 1.3 fvdl " movq %rdx,16(%rsp) \n" \ 61 1.3 fvdl " movq %rcx,24(%rsp) \n" \ 62 1.3 fvdl " movq %r8,32(%rsp) \n" \ 63 1.3 fvdl " movq %r9,40(%rsp) \n" \ 64 1.3 fvdl " movq %rax,48(%rsp) \n" \ 65 1.3 fvdl " movq 0(%rbp),%r11 \n" \ 66 1.3 fvdl " movq 8(%r11),%rdi \n" \ 67 1.3 fvdl " movq 8(%rbp),%rsi \n" \ 68 1.6 skrll " call _mcount"__MCPLT " \n" \ 69 1.3 fvdl " movq 0(%rsp),%rdi \n" \ 70 1.3 fvdl " movq 8(%rsp),%rsi \n" \ 71 1.3 fvdl " movq 16(%rsp),%rdx \n" \ 72 1.3 fvdl " movq 24(%rsp),%rcx \n" \ 73 1.3 fvdl " movq 32(%rsp),%r8 \n" \ 74 1.3 fvdl " movq 40(%rsp),%r9 \n" \ 75 1.3 fvdl " movq 48(%rsp),%rax \n" \ 76 1.3 fvdl " leave \n" \ 77 1.3 fvdl " ret \n" \ 78 1.3 fvdl " .size __mcount,.-__mcount"); 79 1.1 fvdl 80 1.1 fvdl 81 1.1 fvdl #ifdef _KERNEL 82 1.19 cherry #ifdef XENPV 83 1.21 ryo static inline __always_inline void 84 1.11 bouyer mcount_disable_intr(void) 85 1.11 bouyer { 86 1.18 bouyer /* should be __cli() but this calls x86_lfence() which calls mcount */ 87 1.18 bouyer curcpu()->ci_vcpu->evtchn_upcall_mask = 1; 88 1.18 bouyer __asm volatile("lfence" ::: "memory"); /* x86_lfence() */ 89 1.11 bouyer } 90 1.11 bouyer 91 1.21 ryo static inline __always_inline u_long 92 1.11 bouyer mcount_read_psl(void) 93 1.11 bouyer { 94 1.13 cegger return (curcpu()->ci_vcpu->evtchn_upcall_mask); 95 1.11 bouyer } 96 1.11 bouyer 97 1.21 ryo static inline __always_inline void 98 1.11 bouyer mcount_write_psl(u_long psl) 99 1.11 bouyer { 100 1.13 cegger curcpu()->ci_vcpu->evtchn_upcall_mask = psl; 101 1.18 bouyer /* can't call x86_lfence because it calls mcount() */ 102 1.18 bouyer __asm volatile("lfence" ::: "memory"); /* x86_lfence() */ 103 1.20 rillig /* XXX can't call hypervisor_force_callback() because we're in mcount*/ 104 1.11 bouyer } 105 1.11 bouyer 106 1.19 cherry #else /* XENPV */ 107 1.21 ryo static inline __always_inline void 108 1.9 ad mcount_disable_intr(void) 109 1.9 ad { 110 1.9 ad __asm volatile("cli"); 111 1.9 ad } 112 1.9 ad 113 1.21 ryo static inline __always_inline u_long 114 1.9 ad mcount_read_psl(void) 115 1.9 ad { 116 1.9 ad u_long ef; 117 1.9 ad 118 1.14 chs __asm volatile("pushfq; popq %0" : "=r" (ef)); 119 1.9 ad return (ef); 120 1.9 ad } 121 1.9 ad 122 1.21 ryo static inline __always_inline void 123 1.9 ad mcount_write_psl(u_long ef) 124 1.9 ad { 125 1.14 chs __asm volatile("pushq %0; popfq" : : "r" (ef)); 126 1.9 ad } 127 1.9 ad 128 1.19 cherry #endif /* XENPV */ 129 1.17 ryo 130 1.17 ryo #define MCOUNT_ENTER \ 131 1.17 ryo do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0) 132 1.17 ryo #define MCOUNT_EXIT do { mcount_write_psl(s); } while (0) 133 1.4 chs 134 1.1 fvdl #endif /* _KERNEL */ 135 1.15 mrg 136 1.15 mrg #else /* __x86_64__ */ 137 1.15 mrg 138 1.15 mrg #include <i386/profile.h> 139 1.15 mrg 140 1.15 mrg #endif /* __x86_64__ */ 141