cpuswitch.S revision 1.107 1 1.107 riastrad /* $NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.58 matt #include "opt_cpuoptions.h"
82 1.101 skrll #include "opt_kasan.h"
83 1.36 martin #include "opt_lockdebug.h"
84 1.99 skrll #include "opt_multiprocessor.h"
85 1.1 chris
86 1.1 chris #include "assym.h"
87 1.79 matt #include <arm/asm.h>
88 1.79 matt #include <arm/locore.h>
89 1.58 matt
90 1.107 riastrad RCSID("$NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $")
91 1.1 chris
92 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
93 1.90 matt
94 1.95 joerg #ifdef FPU_VFP
95 1.95 joerg .fpu vfpv2
96 1.95 joerg #endif
97 1.95 joerg
98 1.1 chris #undef IRQdisable
99 1.1 chris #undef IRQenable
100 1.1 chris
101 1.1 chris /*
102 1.1 chris * New experimental definitions of IRQdisable and IRQenable
103 1.1 chris * These keep FIQ's enabled since FIQ's are special.
104 1.1 chris */
105 1.1 chris
106 1.58 matt #ifdef _ARM_ARCH_6
107 1.58 matt #define IRQdisable cpsid i
108 1.58 matt #define IRQenable cpsie i
109 1.58 matt #else
110 1.1 chris #define IRQdisable \
111 1.13 thorpej mrs r14, cpsr ; \
112 1.1 chris orr r14, r14, #(I32_bit) ; \
113 1.58 matt msr cpsr_c, r14
114 1.1 chris
115 1.1 chris #define IRQenable \
116 1.13 thorpej mrs r14, cpsr ; \
117 1.1 chris bic r14, r14, #(I32_bit) ; \
118 1.58 matt msr cpsr_c, r14
119 1.1 chris
120 1.22 bjh21 #endif
121 1.1 chris
122 1.1 chris .text
123 1.30 scw
124 1.1 chris /*
125 1.47 yamt * struct lwp *
126 1.47 yamt * cpu_switchto(struct lwp *current, struct lwp *next)
127 1.48 skrll *
128 1.47 yamt * Switch to the specified next LWP
129 1.47 yamt * Arguments:
130 1.16 thorpej *
131 1.97 skrll * r0 'struct lwp *' of the current LWP
132 1.47 yamt * r1 'struct lwp *' of the LWP to switch to
133 1.58 matt * r2 returning
134 1.1 chris */
135 1.47 yamt ENTRY(cpu_switchto)
136 1.51 skrll mov ip, sp
137 1.78 matt push {r4-r7, ip, lr}
138 1.1 chris
139 1.105 dholland /* move lwps into callee saved registers */
140 1.55 chris mov r6, r1
141 1.55 chris mov r4, r0
142 1.58 matt
143 1.68 matt #ifdef TPIDRPRW_IS_CURCPU
144 1.88 matt GET_CURCPU(r5)
145 1.90 matt #else
146 1.88 matt ldr r5, [r6, #L_CPU] /* get cpu from new lwp */
147 1.59 matt #endif
148 1.1 chris
149 1.47 yamt /* rem: r4 = old lwp */
150 1.88 matt /* rem: r5 = curcpu() */
151 1.43 skrll /* rem: r6 = new lwp */
152 1.4 chris /* rem: interrupts are enabled */
153 1.1 chris
154 1.48 skrll /* Save old context */
155 1.1 chris
156 1.29 thorpej /* Get the user structure for the old lwp. */
157 1.88 matt ldr r7, [r4, #(L_PCB)]
158 1.1 chris
159 1.29 thorpej /* Save all the registers in the old lwp's pcb */
160 1.76 matt #if defined(_ARM_ARCH_DWORD_OK)
161 1.88 matt strd r8, r9, [r7, #(PCB_R8)]
162 1.88 matt strd r10, r11, [r7, #(PCB_R10)]
163 1.88 matt strd r12, r13, [r7, #(PCB_R12)]
164 1.58 matt #else
165 1.88 matt add r0, r7, #(PCB_R8)
166 1.58 matt stmia r0, {r8-r13}
167 1.37 scw #endif
168 1.1 chris
169 1.58 matt #ifdef _ARM_ARCH_6
170 1.58 matt /*
171 1.58 matt * Save user read/write thread/process id register
172 1.58 matt */
173 1.58 matt mrc p15, 0, r0, c13, c0, 2
174 1.88 matt str r0, [r7, #(PCB_USER_PID_RW)]
175 1.58 matt #endif
176 1.1 chris /*
177 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
178 1.29 thorpej * them for the new process.
179 1.29 thorpej */
180 1.29 thorpej
181 1.48 skrll /* Restore saved context */
182 1.1 chris
183 1.90 matt /* rem: r4 = old lwp */
184 1.90 matt /* rem: r5 = curcpu() */
185 1.90 matt /* rem: r6 = new lwp */
186 1.90 matt
187 1.90 matt IRQdisable
188 1.90 matt #if defined(TPIDRPRW_IS_CURLWP)
189 1.90 matt mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
190 1.90 matt #endif
191 1.90 matt
192 1.106 riastrad /*
193 1.106 riastrad * Issue barriers to coordinate mutex_exit on this CPU with
194 1.106 riastrad * mutex_vector_enter on another CPU.
195 1.106 riastrad *
196 1.106 riastrad * 1. Any prior mutex_exit by oldlwp must be visible to other
197 1.106 riastrad * CPUs before we set ci_curlwp := newlwp on this one,
198 1.106 riastrad * requiring a store-before-store barrier.
199 1.106 riastrad *
200 1.106 riastrad * 2. ci_curlwp := newlwp must be visible on all other CPUs
201 1.106 riastrad * before any subsequent mutex_exit by newlwp can even test
202 1.106 riastrad * whether there might be waiters, requiring a
203 1.106 riastrad * store-before-load barrier.
204 1.106 riastrad *
205 1.106 riastrad * See kern_mutex.c for details -- this is necessary for
206 1.106 riastrad * adaptive mutexes to detect whether the lwp is on the CPU in
207 1.106 riastrad * order to safely block without requiring atomic r/m/w in
208 1.106 riastrad * mutex_exit.
209 1.106 riastrad */
210 1.106 riastrad
211 1.90 matt /* We have a new curlwp now so make a note of it */
212 1.106 riastrad #ifdef _ARM_ARCH_7
213 1.106 riastrad dmb /* store-before-store */
214 1.106 riastrad #endif
215 1.90 matt str r6, [r5, #(CI_CURLWP)]
216 1.103 skrll #ifdef _ARM_ARCH_7
217 1.106 riastrad dmb /* store-before-load */
218 1.103 skrll #endif
219 1.100 skrll
220 1.88 matt /* Get the new pcb */
221 1.88 matt ldr r7, [r6, #(L_PCB)]
222 1.88 matt
223 1.90 matt /* make sure we are using the new lwp's stack */
224 1.90 matt ldr sp, [r7, #(PCB_KSP)]
225 1.90 matt
226 1.90 matt /* At this point we can allow IRQ's again. */
227 1.90 matt IRQenable
228 1.90 matt
229 1.47 yamt /* rem: r4 = old lwp */
230 1.88 matt /* rem: r5 = curcpu() */
231 1.29 thorpej /* rem: r6 = new lwp */
232 1.55 chris /* rem: r7 = new pcb */
233 1.53 chris /* rem: interrupts are enabled */
234 1.29 thorpej
235 1.88 matt /*
236 1.88 matt * If we are switching to a system lwp, don't bother restoring
237 1.88 matt * thread or vfp registers and skip the ras check.
238 1.88 matt */
239 1.88 matt ldr r0, [r6, #(L_FLAG)]
240 1.88 matt tst r0, #(LW_SYSTEM)
241 1.88 matt bne .Lswitch_do_restore
242 1.88 matt
243 1.58 matt #ifdef _ARM_ARCH_6
244 1.58 matt /*
245 1.58 matt * Restore user thread/process id registers
246 1.58 matt */
247 1.58 matt ldr r0, [r7, #(PCB_USER_PID_RW)]
248 1.58 matt mcr p15, 0, r0, c13, c0, 2
249 1.63 matt ldr r0, [r6, #(L_PRIVATE)]
250 1.58 matt mcr p15, 0, r0, c13, c0, 3
251 1.58 matt #endif
252 1.58 matt
253 1.76 matt #ifdef FPU_VFP
254 1.76 matt /*
255 1.76 matt * If we have a VFP, we need to load FPEXC.
256 1.76 matt */
257 1.88 matt ldr r0, [r5, #(CI_VFP_ID)]
258 1.76 matt cmp r0, #0
259 1.76 matt ldrne r0, [r7, #(PCB_VFP_FPEXC)]
260 1.81 joerg vmsrne fpexc, r0
261 1.76 matt #endif
262 1.76 matt
263 1.91 skrll /*
264 1.88 matt * Check for restartable atomic sequences (RAS).
265 1.88 matt */
266 1.88 matt ldr r0, [r6, #(L_PROC)] /* fetch the proc for ras_lookup */
267 1.88 matt ldr r2, [r0, #(P_RASLIST)]
268 1.88 matt cmp r2, #0 /* p->p_nras == 0? */
269 1.88 matt beq .Lswitch_do_restore
270 1.88 matt
271 1.88 matt /* we can use r8 since we haven't restored saved registers yet. */
272 1.88 matt ldr r8, [r6, #(L_MD_TF)] /* r1 = trapframe (used below) */
273 1.88 matt ldr r1, [r8, #(TF_PC)] /* second ras_lookup() arg */
274 1.88 matt bl _C_LABEL(ras_lookup)
275 1.88 matt cmn r0, #1 /* -1 means "not in a RAS" */
276 1.88 matt strne r0, [r8, #(TF_PC)]
277 1.88 matt
278 1.88 matt /* rem: r4 = old lwp */
279 1.88 matt /* rem: r5 = curcpu() */
280 1.88 matt /* rem: r6 = new lwp */
281 1.88 matt /* rem: r7 = new pcb */
282 1.88 matt
283 1.88 matt .Lswitch_do_restore:
284 1.52 skrll /* Restore all the saved registers */
285 1.58 matt #ifdef __XSCALE__
286 1.37 scw ldr r8, [r7, #(PCB_R8)]
287 1.37 scw ldr r9, [r7, #(PCB_R9)]
288 1.37 scw ldr r10, [r7, #(PCB_R10)]
289 1.37 scw ldr r11, [r7, #(PCB_R11)]
290 1.37 scw ldr r12, [r7, #(PCB_R12)]
291 1.76 matt #elif defined(_ARM_ARCH_DWORD_OK)
292 1.80 joerg ldrd r8, r9, [r7, #(PCB_R8)]
293 1.80 joerg ldrd r10, r11, [r7, #(PCB_R10)]
294 1.90 matt ldr r12, [r7, #(PCB_R12)]
295 1.58 matt #else
296 1.58 matt add r0, r7, #PCB_R8
297 1.90 matt ldmia r0, {r8-r12}
298 1.37 scw #endif
299 1.29 thorpej
300 1.57 scw /* Record the old lwp for pmap_activate()'s benefit */
301 1.83 matt #ifndef ARM_MMU_EXTENDED
302 1.88 matt str r4, [r5, #CI_LASTLWP]
303 1.83 matt #endif
304 1.57 scw
305 1.47 yamt /* cpu_switchto returns the old lwp */
306 1.29 thorpej mov r0, r4
307 1.85 snj /* lwp_trampoline expects new lwp as its second argument */
308 1.47 yamt mov r1, r6
309 1.1 chris
310 1.67 matt #ifdef _ARM_ARCH_7
311 1.67 matt clrex /* cause any subsequent STREX* to fail */
312 1.67 matt #endif
313 1.67 matt
314 1.1 chris /*
315 1.51 skrll * Pull the registers that got pushed when cpu_switchto() was called,
316 1.51 skrll * and return.
317 1.1 chris */
318 1.78 matt pop {r4-r7, ip, pc}
319 1.18 thorpej
320 1.78 matt END(cpu_switchto)
321 1.1 chris
322 1.73 skrll ENTRY_NP(lwp_trampoline)
323 1.52 skrll /*
324 1.52 skrll * cpu_switchto gives us:
325 1.67 matt * arg0(r0) = old lwp
326 1.67 matt * arg1(r1) = new lwp
327 1.67 matt * setup by cpu_lwp_fork:
328 1.67 matt * r4 = func to call
329 1.67 matt * r5 = arg to func
330 1.67 matt * r6 = <unused>
331 1.67 matt * r7 = spsr mode
332 1.52 skrll */
333 1.47 yamt bl _C_LABEL(lwp_startup)
334 1.38 scw
335 1.72 matt mov fp, #0 /* top stack frame */
336 1.1 chris mov r0, r5
337 1.1 chris mov r1, sp
338 1.70 matt #ifdef _ARM_ARCH_5
339 1.67 matt blx r4
340 1.67 matt #else
341 1.24 bjh21 mov lr, pc
342 1.1 chris mov pc, r4
343 1.67 matt #endif
344 1.1 chris
345 1.67 matt GET_CPSR(r0)
346 1.67 matt CPSID_I(r0, r0) /* Kill irq's */
347 1.1 chris
348 1.104 skrll /* for DO_AST */
349 1.104 skrll GET_CURX(r4, r5) /* r4 = curcpu, r5 = curlwp */
350 1.67 matt DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
351 1.1 chris PULLFRAME
352 1.1 chris
353 1.1 chris movs pc, lr /* Exit */
354 1.78 matt END(lwp_trampoline)
355 1.58 matt
356 1.69 skrll AST_ALIGNMENT_FAULT_LOCALS
357 1.69 skrll
358 1.58 matt #ifdef __HAVE_FAST_SOFTINTS
359 1.58 matt /*
360 1.58 matt * Called at IPL_HIGH
361 1.58 matt * r0 = new lwp
362 1.58 matt * r1 = ipl for softint_dispatch
363 1.58 matt */
364 1.58 matt ENTRY_NP(softint_switch)
365 1.78 matt push {r4, r6, r7, lr}
366 1.58 matt
367 1.92 skrll ldr r7, [r0, #L_CPU] /* get curcpu */
368 1.67 matt #if defined(TPIDRPRW_IS_CURLWP)
369 1.92 skrll mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
370 1.58 matt #else
371 1.92 skrll ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
372 1.58 matt #endif
373 1.92 skrll mrs r6, cpsr /* we need to save this */
374 1.58 matt
375 1.58 matt /*
376 1.58 matt * If the soft lwp blocks, it needs to return to softint_tramp
377 1.58 matt */
378 1.92 skrll mov r2, sp /* think ip */
379 1.92 skrll adr r3, softint_tramp /* think lr */
380 1.78 matt push {r2-r3}
381 1.78 matt push {r4-r7}
382 1.58 matt
383 1.92 skrll mov r5, r0 /* save new lwp */
384 1.58 matt
385 1.92 skrll ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
386 1.58 matt
387 1.58 matt /* Save all the registers into the old lwp's pcb */
388 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
389 1.80 joerg strd r8, r9, [r2, #(PCB_R8)]
390 1.80 joerg strd r10, r11, [r2, #(PCB_R10)]
391 1.80 joerg strd r12, r13, [r2, #(PCB_R12)]
392 1.58 matt #else
393 1.58 matt add r3, r2, #(PCB_R8)
394 1.58 matt stmia r3, {r8-r13}
395 1.58 matt #endif
396 1.58 matt
397 1.86 matt #ifdef _ARM_ARCH_6
398 1.86 matt /*
399 1.93 skrll * Save user read/write thread/process id register in case it was
400 1.86 matt * set in userland.
401 1.86 matt */
402 1.86 matt mrc p15, 0, r0, c13, c0, 2
403 1.87 matt str r0, [r2, #(PCB_USER_PID_RW)]
404 1.86 matt #endif
405 1.86 matt
406 1.58 matt /* this is an invariant so load before disabling intrs */
407 1.60 rmind ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
408 1.58 matt
409 1.58 matt IRQdisable
410 1.58 matt /*
411 1.58 matt * We're switching to a bound LWP so its l_cpu is already correct.
412 1.58 matt */
413 1.67 matt #if defined(TPIDRPRW_IS_CURLWP)
414 1.92 skrll mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
415 1.58 matt #endif
416 1.106 riastrad #ifdef _ARM_ARCH_7
417 1.106 riastrad dmb /* for mutex_enter; see cpu_switchto */
418 1.106 riastrad #endif
419 1.92 skrll str r5, [r7, #(CI_CURLWP)] /* save new lwp */
420 1.107 riastrad /*
421 1.107 riastrad * No need for barrier after ci->ci_curlwp = softlwp -- when we
422 1.107 riastrad * enter a softint lwp, it can't be holding any mutexes, so it
423 1.107 riastrad * can't release any until after it has acquired them, so we
424 1.107 riastrad * need not participate in the protocol with mutex_vector_enter
425 1.107 riastrad * barriers here.
426 1.107 riastrad */
427 1.58 matt
428 1.101 skrll #ifdef KASAN
429 1.101 skrll mov r0, r5
430 1.101 skrll bl _C_LABEL(kasan_softint)
431 1.101 skrll #endif
432 1.101 skrll
433 1.58 matt /*
434 1.58 matt * Normally, we'd get {r8-r13} but since this is a softint lwp
435 1.85 snj * its existing state doesn't matter. We start the stack just
436 1.58 matt * below the trapframe.
437 1.58 matt */
438 1.66 matt ldr sp, [r5, #(L_MD_TF)] /* get new lwp's stack ptr */
439 1.58 matt
440 1.58 matt /* At this point we can allow IRQ's again. */
441 1.58 matt IRQenable
442 1.58 matt /* r1 still has ipl */
443 1.58 matt mov r0, r4 /* r0 has pinned (old) lwp */
444 1.58 matt bl _C_LABEL(softint_dispatch)
445 1.58 matt /*
446 1.58 matt * If we've returned, we need to change everything back and return.
447 1.58 matt */
448 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
449 1.58 matt
450 1.58 matt /*
451 1.58 matt * We don't need to restore all the registers since another lwp was
452 1.58 matt * never executed. But we do need the SP from the formerly pinned lwp.
453 1.58 matt */
454 1.58 matt
455 1.90 matt IRQdisable
456 1.67 matt #if defined(TPIDRPRW_IS_CURLWP)
457 1.92 skrll mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
458 1.58 matt #endif
459 1.106 riastrad #ifdef _ARM_ARCH_7
460 1.106 riastrad dmb /* for mutex_enter; see cpu_switchto */
461 1.106 riastrad #endif
462 1.92 skrll str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
463 1.106 riastrad #ifdef _ARM_ARCH_7
464 1.106 riastrad dmb /* for mutex_enter; see cpu_switchto */
465 1.106 riastrad #endif
466 1.75 matt ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */
467 1.58 matt
468 1.58 matt /* At this point we can allow IRQ's again. */
469 1.58 matt msr cpsr_c, r6
470 1.58 matt
471 1.58 matt /*
472 1.58 matt * Grab the registers that got pushed at the start and return.
473 1.58 matt */
474 1.78 matt pop {r4-r7, ip, lr} /* eat switch frame */
475 1.78 matt pop {r4, r6, r7, pc} /* pop stack and return */
476 1.58 matt
477 1.58 matt END(softint_switch)
478 1.58 matt
479 1.58 matt /*
480 1.58 matt * r0 = previous LWP (the soft lwp)
481 1.58 matt * r4 = original LWP (the current lwp)
482 1.58 matt * r6 = original CPSR
483 1.58 matt * r7 = curcpu()
484 1.58 matt */
485 1.58 matt ENTRY_NP(softint_tramp)
486 1.94 skrll ldr r3, [r7, #(CI_MTX_COUNT)] /* readjust after mi_switch */
487 1.58 matt add r3, r3, #1
488 1.58 matt str r3, [r7, #(CI_MTX_COUNT)]
489 1.58 matt
490 1.58 matt msr cpsr_c, r6 /* restore interrupts */
491 1.78 matt pop {r4, r6, r7, pc} /* pop stack and return */
492 1.58 matt END(softint_tramp)
493 1.58 matt #endif /* __HAVE_FAST_SOFTINTS */
494