1 /* $NetBSD: idle_machdep.S,v 1.13 2024/12/30 19:13:48 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas of 3am Software Foundry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_arm_intr_impl.h" 33 #include "opt_ddb.h" 34 #include "opt_gprof.h" 35 #include <aarch64/asm.h> 36 #include <aarch64/locore.h> 37 #include "assym.h" 38 39 RCSID("$NetBSD: idle_machdep.S,v 1.13 2024/12/30 19:13:48 jmcneill Exp $"); 40 41 #ifdef ARM_INTR_IMPL 42 #include ARM_INTR_IMPL 43 #else 44 #error ARM_INTR_IMPL not defined 45 #endif 46 47 #ifndef ARM_IRQ_HANDLER 48 #error ARM_IRQ_HANDLER not defined 49 #endif 50 51 52 ENTRY(aarch64_cpu_idle_wfi) 53 #ifdef LAZY_CPUIDLE 54 55 /* 56 * hardware interrupt -> trap handler -> interrupt handler 57 */ 58 dsb sy 59 wfi 60 61 #else /* LAZY_CPUIDLE */ 62 63 /* 64 * hardware interrupt -> interrupt handler 65 */ 66 sub sp, sp, #TF_SIZE /* allocate trapframe */ 67 str x28, [sp, #TF_X28] /* save x28 */ 68 stp x29, x30, [sp, #TF_X29] /* save x29,x30 */ 69 #ifdef DDB 70 add x29, sp, #TF_X29 /* link frame for backtrace */ 71 mov x0, #-1 72 str x0, [sp, #TF_ESR] 73 str xzr, [sp, #TF_FAR] 74 #endif 75 76 /* fill the minimum required trapframe */ 77 mov x2, #SPSR_M_EL1H /* what our spsr should be */ 78 str x2, [sp, #TF_SPSR] 79 adr x0, 1f 80 str x0, [sp, #TF_PC] /* CLKF_PC refer to tf_pc */ 81 82 /* 83 * "idle/N" lwp is allocated on a per-CPU basis, 84 * curcpu() always return the same, and there is no need to 85 * consider KPREEMPT. safe even with interrupt enabled. 86 */ 87 mrs x1, tpidr_el1 /* get curlwp */ 88 ldr x28, [x1, #L_CPU] /* get curcpu */ 89 90 mov w2, #1 91 mov x0, sp /* get pointer to trapframe */ 92 93 DISABLE_INTERRUPT 94 /* 95 * assert(ci->ci_intr_depth == 0), 96 * therefore, ci->ci_intr_depth++ would be definitely 1. 97 */ 98 str w2, [x28, #CI_INTR_DEPTH] /* ci->ci_intr_depth = 1 */ 99 100 dsb sy 101 wfi 102 bl ARM_IRQ_HANDLER /* irqhandler(trapframe) */ 103 1: 104 /* x28 is curcpu() */ 105 str wzr, [x28, #CI_INTR_DEPTH] /* ci->ci_intr_depth = 0 */ 106 107 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 108 ldr w3, [x28, #CI_SOFTINTS] /* Get pending softint mask */ 109 /* CPL should be 0 */ 110 ldr w2, [x28, #CI_CPL] /* Get current priority level */ 111 lsr w3, w3, w2 /* shift mask by cpl */ 112 cbz w3, 1f 113 bl _C_LABEL(dosoftints) /* dosoftints() */ 114 1: 115 #endif /* __HAVE_FAST_SOFTINTS && !__HAVE_PIC_FAST_SOFTINTS */ 116 117 ldr x28, [sp, #TF_X28] /* restore x28 */ 118 ldp x29, x30, [sp, #TF_X29] /* restore x29,x30 */ 119 add sp, sp, #TF_SIZE /* pop trapframe */ 120 121 ENABLE_INTERRUPT 122 #endif /* LAZY_CPUIDLE */ 123 124 ret 125 END(aarch64_cpu_idle_wfi) 126