cpuswitch.S revision 1.61 1 1.61 rmind /* $NetBSD: cpuswitch.S,v 1.61 2011/01/14 02:06:23 rmind Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.30 scw #include "opt_arm32_pmap.h"
82 1.19 bjh21 #include "opt_multiprocessor.h"
83 1.58 matt #include "opt_cpuoptions.h"
84 1.36 martin #include "opt_lockdebug.h"
85 1.1 chris
86 1.1 chris #include "assym.h"
87 1.46 briggs #include <arm/arm32/pte.h>
88 1.1 chris #include <machine/param.h>
89 1.1 chris #include <machine/frame.h>
90 1.1 chris #include <machine/asm.h>
91 1.58 matt #include <machine/cpu.h>
92 1.58 matt
93 1.61 rmind RCSID("$NetBSD: cpuswitch.S,v 1.61 2011/01/14 02:06:23 rmind Exp $")
94 1.1 chris
95 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
96 1.34 kristerw
97 1.1 chris #undef IRQdisable
98 1.1 chris #undef IRQenable
99 1.1 chris
100 1.1 chris /*
101 1.1 chris * New experimental definitions of IRQdisable and IRQenable
102 1.1 chris * These keep FIQ's enabled since FIQ's are special.
103 1.1 chris */
104 1.1 chris
105 1.58 matt #ifdef _ARM_ARCH_6
106 1.58 matt #define IRQdisable cpsid i
107 1.58 matt #define IRQenable cpsie i
108 1.58 matt #else
109 1.1 chris #define IRQdisable \
110 1.13 thorpej mrs r14, cpsr ; \
111 1.1 chris orr r14, r14, #(I32_bit) ; \
112 1.58 matt msr cpsr_c, r14
113 1.1 chris
114 1.1 chris #define IRQenable \
115 1.13 thorpej mrs r14, cpsr ; \
116 1.1 chris bic r14, r14, #(I32_bit) ; \
117 1.58 matt msr cpsr_c, r14
118 1.1 chris
119 1.22 bjh21 #endif
120 1.1 chris
121 1.1 chris .text
122 1.57 scw .Lpmap_previous_active_lwp:
123 1.57 scw .word _C_LABEL(pmap_previous_active_lwp)
124 1.30 scw
125 1.1 chris /*
126 1.47 yamt * struct lwp *
127 1.47 yamt * cpu_switchto(struct lwp *current, struct lwp *next)
128 1.48 skrll *
129 1.47 yamt * Switch to the specified next LWP
130 1.47 yamt * Arguments:
131 1.16 thorpej *
132 1.58 matt * r0 'struct lwp *' of the current LWP (or NULL if exiting)
133 1.47 yamt * r1 'struct lwp *' of the LWP to switch to
134 1.58 matt * r2 returning
135 1.1 chris */
136 1.47 yamt ENTRY(cpu_switchto)
137 1.51 skrll mov ip, sp
138 1.51 skrll stmfd sp!, {r4-r7, ip, lr}
139 1.1 chris
140 1.58 matt /* move lwps into caller saved registers */
141 1.55 chris mov r6, r1
142 1.55 chris mov r4, r0
143 1.58 matt
144 1.58 matt #ifdef PROCESS_ID_CURCPU
145 1.58 matt GET_CURCPU(r7)
146 1.58 matt #elif defined(PROCESS_ID_IS_CURLWP)
147 1.58 matt mcr p15, 0, r0, c13, c0, 4 /* get old lwp (r4 maybe 0) */
148 1.58 matt ldr r7, [r0, #(L_CPU)] /* get cpu from old lwp */
149 1.58 matt #elif !defined(MULTIPROCESSOR)
150 1.58 matt ldr r7, [r6, #L_CPU] /* get cpu from new lwp */
151 1.58 matt #else
152 1.58 matt #error curcpu() method not defined
153 1.58 matt #endif
154 1.7 chris
155 1.55 chris /* rem: r4 = old lwp */
156 1.55 chris /* rem: r6 = new lwp */
157 1.58 matt /* rem: r7 = curcpu() */
158 1.55 chris
159 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
160 1.1 chris IRQdisable
161 1.59 matt #endif
162 1.7 chris
163 1.19 bjh21 #ifdef MULTIPROCESSOR
164 1.58 matt str r7, [r6, #(L_CPU)]
165 1.19 bjh21 #else
166 1.29 thorpej /* l->l_cpu initialized in fork1() for single-processor */
167 1.19 bjh21 #endif
168 1.1 chris
169 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
170 1.58 matt mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
171 1.58 matt #endif
172 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
173 1.58 matt /* We have a new curlwp now so make a note it */
174 1.58 matt str r6, [r7, #(CI_CURLWP)]
175 1.58 matt #endif
176 1.55 chris
177 1.58 matt /* Hook in a new pcb */
178 1.60 rmind ldr r0, [r6, #(L_PCB)]
179 1.58 matt str r0, [r7, #(CI_CURPCB)]
180 1.58 matt mov r7, r0
181 1.1 chris
182 1.1 chris /* At this point we can allow IRQ's again. */
183 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
184 1.1 chris IRQenable
185 1.59 matt #endif
186 1.1 chris
187 1.47 yamt /* rem: r4 = old lwp */
188 1.43 skrll /* rem: r6 = new lwp */
189 1.55 chris /* rem: r7 = new pcb */
190 1.4 chris /* rem: interrupts are enabled */
191 1.1 chris
192 1.1 chris /*
193 1.47 yamt * If the old lwp on entry to cpu_switchto was zero then the
194 1.1 chris * process that called it was exiting. This means that we do
195 1.1 chris * not need to save the current context. Instead we can jump
196 1.1 chris * straight to restoring the context for the new process.
197 1.1 chris */
198 1.58 matt teq r4, #0
199 1.49 scw beq .Ldo_switch
200 1.1 chris
201 1.47 yamt /* rem: r4 = old lwp */
202 1.43 skrll /* rem: r6 = new lwp */
203 1.55 chris /* rem: r7 = new pcb */
204 1.4 chris /* rem: interrupts are enabled */
205 1.1 chris
206 1.48 skrll /* Save old context */
207 1.1 chris
208 1.29 thorpej /* Get the user structure for the old lwp. */
209 1.60 rmind ldr r5, [r4, #(L_PCB)]
210 1.1 chris
211 1.29 thorpej /* Save all the registers in the old lwp's pcb */
212 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
213 1.55 chris strd r8, [r5, #(PCB_R8)]
214 1.55 chris strd r10, [r5, #(PCB_R10)]
215 1.55 chris strd r12, [r5, #(PCB_R12)]
216 1.58 matt #else
217 1.58 matt add r0, r5, #(PCB_R8)
218 1.58 matt stmia r0, {r8-r13}
219 1.37 scw #endif
220 1.1 chris
221 1.58 matt #ifdef _ARM_ARCH_6
222 1.58 matt /*
223 1.58 matt * Save user read/write thread/process id register
224 1.58 matt */
225 1.58 matt mrc p15, 0, r0, c13, c0, 2
226 1.58 matt str r0, [r5, #(PCB_USER_PID_RW)]
227 1.58 matt #endif
228 1.1 chris /*
229 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
230 1.29 thorpej * them for the new process.
231 1.29 thorpej */
232 1.29 thorpej
233 1.47 yamt /* rem: r4 = old lwp */
234 1.55 chris /* rem: r5 = old pcb */
235 1.47 yamt /* rem: r6 = new lwp */
236 1.55 chris /* rem: r7 = new pcb */
237 1.47 yamt /* rem: interrupts are enabled */
238 1.47 yamt
239 1.56 rearnsha #ifdef FPU_VFP
240 1.56 rearnsha /*
241 1.56 rearnsha * Now's a good time to 'save' the VFP context. Note that we
242 1.56 rearnsha * don't really force a save here, which can save time if we
243 1.56 rearnsha * end up restarting the same context.
244 1.56 rearnsha */
245 1.56 rearnsha bl _C_LABEL(vfp_savecontext)
246 1.56 rearnsha #endif
247 1.1 chris
248 1.48 skrll /* Restore saved context */
249 1.1 chris
250 1.49 scw .Ldo_switch:
251 1.47 yamt /* rem: r4 = old lwp */
252 1.29 thorpej /* rem: r6 = new lwp */
253 1.55 chris /* rem: r7 = new pcb */
254 1.53 chris /* rem: interrupts are enabled */
255 1.29 thorpej
256 1.58 matt #ifdef _ARM_ARCH_6
257 1.58 matt /*
258 1.58 matt * Restore user thread/process id registers
259 1.58 matt */
260 1.58 matt ldr r0, [r7, #(PCB_USER_PID_RW)]
261 1.58 matt mcr p15, 0, r0, c13, c0, 2
262 1.58 matt ldr r0, [r7, #(PCB_USER_PID_RO)]
263 1.58 matt mcr p15, 0, r0, c13, c0, 3
264 1.58 matt #endif
265 1.58 matt
266 1.55 chris ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
267 1.55 chris
268 1.52 skrll /* Restore all the saved registers */
269 1.58 matt #ifdef __XSCALE__
270 1.37 scw ldr r8, [r7, #(PCB_R8)]
271 1.37 scw ldr r9, [r7, #(PCB_R9)]
272 1.37 scw ldr r10, [r7, #(PCB_R10)]
273 1.37 scw ldr r11, [r7, #(PCB_R11)]
274 1.37 scw ldr r12, [r7, #(PCB_R12)]
275 1.37 scw ldr r13, [r7, #(PCB_SP)]
276 1.58 matt #elif defined(_ARM_ARCH_6)
277 1.58 matt ldrd r8, [r7, #(PCB_R8)]
278 1.58 matt ldrd r10, [r7, #(PCB_R10)]
279 1.58 matt ldrd r12, [r7, #(PCB_R12)]
280 1.58 matt #else
281 1.58 matt add r0, r7, #PCB_R8
282 1.58 matt ldmia r0, {r8-r13}
283 1.37 scw #endif
284 1.29 thorpej
285 1.57 scw /* Record the old lwp for pmap_activate()'s benefit */
286 1.58 matt ldr r1, .Lpmap_previous_active_lwp
287 1.57 scw str r4, [r1]
288 1.57 scw
289 1.47 yamt /* rem: r4 = old lwp */
290 1.29 thorpej /* rem: r5 = new lwp's proc */
291 1.29 thorpej /* rem: r6 = new lwp */
292 1.29 thorpej /* rem: r7 = new pcb */
293 1.18 thorpej
294 1.56 rearnsha #ifdef FPU_VFP
295 1.56 rearnsha mov r0, r6
296 1.56 rearnsha bl _C_LABEL(vfp_loadcontext)
297 1.56 rearnsha #endif
298 1.1 chris #ifdef ARMFPE
299 1.61 rmind add r0, r7, #(PCB_SIZE) & 0x00ff
300 1.61 rmind add r0, r0, #(PCB_SIZE) & 0xff00
301 1.1 chris bl _C_LABEL(arm_fpe_core_changecontext)
302 1.1 chris #endif
303 1.1 chris
304 1.47 yamt /* rem: r4 = old lwp */
305 1.29 thorpej /* rem: r5 = new lwp's proc */
306 1.29 thorpej /* rem: r6 = new lwp */
307 1.18 thorpej /* rem: r7 = new PCB */
308 1.18 thorpej
309 1.18 thorpej /*
310 1.18 thorpej * Check for restartable atomic sequences (RAS).
311 1.18 thorpej */
312 1.18 thorpej
313 1.39 dsl ldr r2, [r5, #(P_RASLIST)]
314 1.38 scw ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
315 1.18 thorpej teq r2, #0 /* p->p_nras == 0? */
316 1.18 thorpej bne .Lswitch_do_ras /* no, check for one */
317 1.18 thorpej
318 1.14 briggs .Lswitch_return:
319 1.47 yamt /* cpu_switchto returns the old lwp */
320 1.29 thorpej mov r0, r4
321 1.47 yamt /* lwp_trampoline expects new lwp as it's second argument */
322 1.47 yamt mov r1, r6
323 1.1 chris
324 1.1 chris /*
325 1.51 skrll * Pull the registers that got pushed when cpu_switchto() was called,
326 1.51 skrll * and return.
327 1.1 chris */
328 1.51 skrll ldmfd sp, {r4-r7, sp, pc}
329 1.18 thorpej
330 1.18 thorpej .Lswitch_do_ras:
331 1.38 scw ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
332 1.29 thorpej mov r0, r5 /* first ras_lookup() arg */
333 1.18 thorpej bl _C_LABEL(ras_lookup)
334 1.18 thorpej cmn r0, #1 /* -1 means "not in a RAS" */
335 1.38 scw ldrne r1, [r7, #(PCB_TF)]
336 1.38 scw strne r0, [r1, #(TF_PC)]
337 1.18 thorpej b .Lswitch_return
338 1.1 chris
339 1.47 yamt ENTRY(lwp_trampoline)
340 1.52 skrll /*
341 1.52 skrll * cpu_switchto gives us:
342 1.52 skrll *
343 1.52 skrll * arg0(r0) = old lwp
344 1.52 skrll * arg1(r1) = new lwp
345 1.52 skrll */
346 1.47 yamt bl _C_LABEL(lwp_startup)
347 1.38 scw
348 1.1 chris mov r0, r5
349 1.1 chris mov r1, sp
350 1.24 bjh21 mov lr, pc
351 1.1 chris mov pc, r4
352 1.1 chris
353 1.1 chris /* Kill irq's */
354 1.13 thorpej mrs r0, cpsr
355 1.59 matt orr r0, r0, #(IF32_bits)
356 1.13 thorpej msr cpsr_c, r0
357 1.1 chris
358 1.1 chris PULLFRAME
359 1.1 chris
360 1.1 chris movs pc, lr /* Exit */
361 1.58 matt
362 1.58 matt #ifdef __HAVE_FAST_SOFTINTS
363 1.58 matt /*
364 1.58 matt * Called at IPL_HIGH
365 1.58 matt * r0 = new lwp
366 1.58 matt * r1 = ipl for softint_dispatch
367 1.58 matt */
368 1.58 matt ENTRY_NP(softint_switch)
369 1.58 matt stmfd sp!, {r4, r6, r7, lr}
370 1.58 matt
371 1.58 matt ldr r7, [r0, #L_CPU] /* get curcpu */
372 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
373 1.58 matt mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
374 1.58 matt #else
375 1.58 matt ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
376 1.58 matt #endif
377 1.58 matt mrs r6, cpsr /* we need to save this */
378 1.58 matt
379 1.58 matt /*
380 1.58 matt * If the soft lwp blocks, it needs to return to softint_tramp
381 1.58 matt */
382 1.58 matt mov r2, sp /* think ip */
383 1.58 matt adr r3, softint_tramp /* think lr */
384 1.58 matt stmfd sp!, {r2-r3}
385 1.58 matt stmfd sp!, {r4-r7}
386 1.58 matt
387 1.58 matt mov r5, r0 /* save new lwp */
388 1.58 matt
389 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
390 1.58 matt
391 1.58 matt /* Save all the registers into the old lwp's pcb */
392 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
393 1.58 matt strd r8, [r2, #(PCB_R8)]
394 1.58 matt strd r10, [r2, #(PCB_R10)]
395 1.58 matt strd r12, [r2, #(PCB_R12)]
396 1.58 matt #else
397 1.58 matt add r3, r2, #(PCB_R8)
398 1.58 matt stmia r3, {r8-r13}
399 1.58 matt #endif
400 1.58 matt
401 1.58 matt /* this is an invariant so load before disabling intrs */
402 1.60 rmind ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
403 1.58 matt
404 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
405 1.58 matt IRQdisable
406 1.59 matt #endif
407 1.58 matt /*
408 1.58 matt * We're switching to a bound LWP so its l_cpu is already correct.
409 1.58 matt */
410 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
411 1.58 matt mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
412 1.58 matt #endif
413 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
414 1.58 matt str r5, [r7, #(CI_CURLWP)] /* save new lwp */
415 1.58 matt #endif
416 1.58 matt
417 1.58 matt /* Hook in a new pcb */
418 1.58 matt str r2, [r7, #(CI_CURPCB)]
419 1.58 matt
420 1.58 matt /*
421 1.58 matt * Normally, we'd get {r8-r13} but since this is a softint lwp
422 1.58 matt * it's existing state doesn't matter. We start the stack just
423 1.58 matt * below the trapframe.
424 1.58 matt */
425 1.58 matt ldr sp, [r2, #(PCB_TF)] /* get new lwp's stack ptr */
426 1.58 matt
427 1.58 matt /* At this point we can allow IRQ's again. */
428 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
429 1.58 matt IRQenable
430 1.59 matt #endif
431 1.58 matt
432 1.58 matt /* r1 still has ipl */
433 1.58 matt mov r0, r4 /* r0 has pinned (old) lwp */
434 1.58 matt bl _C_LABEL(softint_dispatch)
435 1.58 matt /*
436 1.58 matt * If we've returned, we need to change everything back and return.
437 1.58 matt */
438 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
439 1.58 matt
440 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
441 1.58 matt IRQdisable
442 1.59 matt #endif
443 1.58 matt /*
444 1.58 matt * We don't need to restore all the registers since another lwp was
445 1.58 matt * never executed. But we do need the SP from the formerly pinned lwp.
446 1.58 matt */
447 1.58 matt
448 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
449 1.58 matt mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
450 1.58 matt #endif
451 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
452 1.58 matt str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
453 1.58 matt #endif
454 1.58 matt str r2, [r7, #(CI_CURPCB)] /* restore the curpcb */
455 1.58 matt ldr sp, [r2, #(PCB_SP)] /* now running on the old stack. */
456 1.58 matt
457 1.58 matt /* At this point we can allow IRQ's again. */
458 1.58 matt msr cpsr_c, r6
459 1.58 matt
460 1.58 matt /*
461 1.58 matt * Grab the registers that got pushed at the start and return.
462 1.58 matt */
463 1.58 matt ldmfd sp!, {r4-r7, ip, lr} /* eat switch frame */
464 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
465 1.58 matt
466 1.58 matt END(softint_switch)
467 1.58 matt
468 1.58 matt /*
469 1.58 matt * r0 = previous LWP (the soft lwp)
470 1.58 matt * r4 = original LWP (the current lwp)
471 1.58 matt * r6 = original CPSR
472 1.58 matt * r7 = curcpu()
473 1.58 matt */
474 1.58 matt ENTRY_NP(softint_tramp)
475 1.58 matt ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
476 1.58 matt add r3, r3, #1
477 1.58 matt str r3, [r7, #(CI_MTX_COUNT)]
478 1.58 matt
479 1.58 matt mov r3, #0 /* tell softint_dispatch */
480 1.58 matt str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
481 1.58 matt
482 1.58 matt msr cpsr_c, r6 /* restore interrupts */
483 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
484 1.58 matt END(softint_tramp)
485 1.58 matt #endif /* __HAVE_FAST_SOFTINTS */
486