1 1.1 matt /* $NetBSD: profile.h,v 1.1 2014/09/03 19:34:26 matt Exp $ */ 2 1.1 matt 3 1.1 matt /*- 4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 8 1.1 matt * by Matt Thomas of 3am Software Foundry. 9 1.1 matt * 10 1.1 matt * Redistribution and use in source and binary forms, with or without 11 1.1 matt * modification, are permitted provided that the following conditions 12 1.1 matt * are met: 13 1.1 matt * 1. Redistributions of source code must retain the above copyright 14 1.1 matt * notice, this list of conditions and the following disclaimer. 15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 matt * notice, this list of conditions and the following disclaimer in the 17 1.1 matt * documentation and/or other materials provided with the distribution. 18 1.1 matt * 19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 30 1.1 matt */ 31 1.1 matt 32 1.1 matt #ifndef _OR1K_PROFILE_H_ 33 1.1 matt #define _OR1K_PROFILE_H_ 34 1.1 matt 35 1.1 matt #define _MCOUNT_DECL void _mcount 36 1.1 matt 37 1.1 matt /* 38 1.1 matt * Cannot implement mcount in C as GCC will trash the ip register when it 39 1.1 matt * pushes a trapframe. Pity we cannot insert assembly before the function 40 1.1 matt * prologue. 41 1.1 matt */ 42 1.1 matt 43 1.1 matt #define MCOUNT_ASM_NAME "__mcount" 44 1.1 matt #define PLTSYM 45 1.1 matt 46 1.1 matt #if 0 47 1.1 matt #define MCOUNT \ 48 1.1 matt __asm(".text"); \ 49 1.1 matt __asm(".align 0"); \ 50 1.1 matt __asm(".type " MCOUNT_ASM_NAME ",@function"); \ 51 1.1 matt __asm(".global " MCOUNT_ASM_NAME); \ 52 1.1 matt __asm(MCOUNT_ASM_NAME ":"); \ 53 1.1 matt /* \ 54 1.1 matt * Preserve registers that are trashed during mcount \ 55 1.1 matt */ \ 56 1.1 matt __asm("sub sp, sp, #80"); \ 57 1.1 matt __asm("stp x29, x30, [sp, #64]"); \ 58 1.1 matt __asm("add x29, sp, #64"); \ 59 1.1 matt __asm("stp x0, x1, [x29, #0]"); \ 60 1.1 matt __asm("stp x2, x3, [x29, #16]"); \ 61 1.1 matt __asm("stp x4, x5, [x29, #32]"); \ 62 1.1 matt __asm("stp x6, x7, [x29, #48]"); \ 63 1.1 matt /* \ 64 1.1 matt * find the return address for mcount, \ 65 1.1 matt * and the return address for mcount's caller. \ 66 1.1 matt * \ 67 1.1 matt * frompcindex = pc pushed by call into self. \ 68 1.1 matt */ \ 69 1.1 matt __asm("mov x0, x19"); \ 70 1.1 matt /* \ 71 1.1 matt * selfpc = pc pushed by mcount call \ 72 1.1 matt */ \ 73 1.1 matt __asm("mov x1, x30"); \ 74 1.1 matt /* \ 75 1.1 matt * Call the real mcount code \ 76 1.1 matt */ \ 77 1.1 matt __asm("bl " ___STRING(_C_LABEL(_mcount))); \ 78 1.1 matt /* \ 79 1.1 matt * Restore registers that were trashed during mcount \ 80 1.1 matt */ \ 81 1.1 matt __asm("ldp x0, x1, [x29, #0]"); \ 82 1.1 matt __asm("ldp x2, x3, [x29, #16]"); \ 83 1.1 matt __asm("ldp x4, x5, [x29, #32]"); \ 84 1.1 matt __asm("ldp x6, x7, [x29, #48]"); \ 85 1.1 matt __asm("ldp x29, x30, [x29, #64]"); \ 86 1.1 matt __asm("add sp, sp, #80"); \ 87 1.1 matt __asm("ret"); \ 88 1.1 matt __asm(".size " MCOUNT_ASM_NAME ", .-" MCOUNT_ASM_NAME); 89 1.1 matt #endif 90 1.1 matt 91 1.1 matt #endif /* _OR1K_PROFILE_H_ */ 92