cpuswitch.S revision 1.63 1 1.63 matt /* $NetBSD: cpuswitch.S,v 1.63 2011/04/07 10:03:47 matt Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.30 scw #include "opt_arm32_pmap.h"
82 1.19 bjh21 #include "opt_multiprocessor.h"
83 1.58 matt #include "opt_cpuoptions.h"
84 1.36 martin #include "opt_lockdebug.h"
85 1.1 chris
86 1.1 chris #include "assym.h"
87 1.1 chris #include <machine/param.h>
88 1.1 chris #include <machine/frame.h>
89 1.1 chris #include <machine/asm.h>
90 1.58 matt #include <machine/cpu.h>
91 1.58 matt
92 1.63 matt RCSID("$NetBSD: cpuswitch.S,v 1.63 2011/04/07 10:03:47 matt Exp $")
93 1.1 chris
94 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
95 1.34 kristerw
96 1.1 chris #undef IRQdisable
97 1.1 chris #undef IRQenable
98 1.1 chris
99 1.1 chris /*
100 1.1 chris * New experimental definitions of IRQdisable and IRQenable
101 1.1 chris * These keep FIQ's enabled since FIQ's are special.
102 1.1 chris */
103 1.1 chris
104 1.58 matt #ifdef _ARM_ARCH_6
105 1.58 matt #define IRQdisable cpsid i
106 1.58 matt #define IRQenable cpsie i
107 1.58 matt #else
108 1.1 chris #define IRQdisable \
109 1.13 thorpej mrs r14, cpsr ; \
110 1.1 chris orr r14, r14, #(I32_bit) ; \
111 1.58 matt msr cpsr_c, r14
112 1.1 chris
113 1.1 chris #define IRQenable \
114 1.13 thorpej mrs r14, cpsr ; \
115 1.1 chris bic r14, r14, #(I32_bit) ; \
116 1.58 matt msr cpsr_c, r14
117 1.1 chris
118 1.22 bjh21 #endif
119 1.1 chris
120 1.1 chris .text
121 1.57 scw .Lpmap_previous_active_lwp:
122 1.57 scw .word _C_LABEL(pmap_previous_active_lwp)
123 1.30 scw
124 1.1 chris /*
125 1.47 yamt * struct lwp *
126 1.47 yamt * cpu_switchto(struct lwp *current, struct lwp *next)
127 1.48 skrll *
128 1.47 yamt * Switch to the specified next LWP
129 1.47 yamt * Arguments:
130 1.16 thorpej *
131 1.58 matt * r0 'struct lwp *' of the current LWP (or NULL if exiting)
132 1.47 yamt * r1 'struct lwp *' of the LWP to switch to
133 1.58 matt * r2 returning
134 1.1 chris */
135 1.47 yamt ENTRY(cpu_switchto)
136 1.51 skrll mov ip, sp
137 1.51 skrll stmfd sp!, {r4-r7, ip, lr}
138 1.1 chris
139 1.58 matt /* move lwps into caller saved registers */
140 1.55 chris mov r6, r1
141 1.55 chris mov r4, r0
142 1.58 matt
143 1.58 matt #ifdef PROCESS_ID_CURCPU
144 1.58 matt GET_CURCPU(r7)
145 1.58 matt #elif defined(PROCESS_ID_IS_CURLWP)
146 1.58 matt mcr p15, 0, r0, c13, c0, 4 /* get old lwp (r4 maybe 0) */
147 1.58 matt ldr r7, [r0, #(L_CPU)] /* get cpu from old lwp */
148 1.58 matt #elif !defined(MULTIPROCESSOR)
149 1.58 matt ldr r7, [r6, #L_CPU] /* get cpu from new lwp */
150 1.58 matt #else
151 1.58 matt #error curcpu() method not defined
152 1.58 matt #endif
153 1.7 chris
154 1.55 chris /* rem: r4 = old lwp */
155 1.55 chris /* rem: r6 = new lwp */
156 1.58 matt /* rem: r7 = curcpu() */
157 1.55 chris
158 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
159 1.1 chris IRQdisable
160 1.59 matt #endif
161 1.7 chris
162 1.19 bjh21 #ifdef MULTIPROCESSOR
163 1.58 matt str r7, [r6, #(L_CPU)]
164 1.19 bjh21 #else
165 1.29 thorpej /* l->l_cpu initialized in fork1() for single-processor */
166 1.19 bjh21 #endif
167 1.1 chris
168 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
169 1.58 matt mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
170 1.58 matt #endif
171 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
172 1.58 matt /* We have a new curlwp now so make a note it */
173 1.58 matt str r6, [r7, #(CI_CURLWP)]
174 1.58 matt #endif
175 1.55 chris
176 1.58 matt /* Hook in a new pcb */
177 1.60 rmind ldr r0, [r6, #(L_PCB)]
178 1.58 matt str r0, [r7, #(CI_CURPCB)]
179 1.58 matt mov r7, r0
180 1.1 chris
181 1.1 chris /* At this point we can allow IRQ's again. */
182 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
183 1.1 chris IRQenable
184 1.59 matt #endif
185 1.1 chris
186 1.47 yamt /* rem: r4 = old lwp */
187 1.43 skrll /* rem: r6 = new lwp */
188 1.55 chris /* rem: r7 = new pcb */
189 1.4 chris /* rem: interrupts are enabled */
190 1.1 chris
191 1.1 chris /*
192 1.47 yamt * If the old lwp on entry to cpu_switchto was zero then the
193 1.1 chris * process that called it was exiting. This means that we do
194 1.1 chris * not need to save the current context. Instead we can jump
195 1.1 chris * straight to restoring the context for the new process.
196 1.1 chris */
197 1.58 matt teq r4, #0
198 1.49 scw beq .Ldo_switch
199 1.1 chris
200 1.47 yamt /* rem: r4 = old lwp */
201 1.43 skrll /* rem: r6 = new lwp */
202 1.55 chris /* rem: r7 = new pcb */
203 1.4 chris /* rem: interrupts are enabled */
204 1.1 chris
205 1.48 skrll /* Save old context */
206 1.1 chris
207 1.29 thorpej /* Get the user structure for the old lwp. */
208 1.60 rmind ldr r5, [r4, #(L_PCB)]
209 1.1 chris
210 1.29 thorpej /* Save all the registers in the old lwp's pcb */
211 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
212 1.55 chris strd r8, [r5, #(PCB_R8)]
213 1.55 chris strd r10, [r5, #(PCB_R10)]
214 1.55 chris strd r12, [r5, #(PCB_R12)]
215 1.58 matt #else
216 1.58 matt add r0, r5, #(PCB_R8)
217 1.58 matt stmia r0, {r8-r13}
218 1.37 scw #endif
219 1.1 chris
220 1.58 matt #ifdef _ARM_ARCH_6
221 1.58 matt /*
222 1.58 matt * Save user read/write thread/process id register
223 1.58 matt */
224 1.58 matt mrc p15, 0, r0, c13, c0, 2
225 1.58 matt str r0, [r5, #(PCB_USER_PID_RW)]
226 1.58 matt #endif
227 1.1 chris /*
228 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
229 1.29 thorpej * them for the new process.
230 1.29 thorpej */
231 1.29 thorpej
232 1.47 yamt /* rem: r4 = old lwp */
233 1.55 chris /* rem: r5 = old pcb */
234 1.47 yamt /* rem: r6 = new lwp */
235 1.55 chris /* rem: r7 = new pcb */
236 1.47 yamt /* rem: interrupts are enabled */
237 1.47 yamt
238 1.56 rearnsha #ifdef FPU_VFP
239 1.56 rearnsha /*
240 1.56 rearnsha * Now's a good time to 'save' the VFP context. Note that we
241 1.56 rearnsha * don't really force a save here, which can save time if we
242 1.56 rearnsha * end up restarting the same context.
243 1.56 rearnsha */
244 1.56 rearnsha bl _C_LABEL(vfp_savecontext)
245 1.56 rearnsha #endif
246 1.1 chris
247 1.48 skrll /* Restore saved context */
248 1.1 chris
249 1.49 scw .Ldo_switch:
250 1.47 yamt /* rem: r4 = old lwp */
251 1.29 thorpej /* rem: r6 = new lwp */
252 1.55 chris /* rem: r7 = new pcb */
253 1.53 chris /* rem: interrupts are enabled */
254 1.29 thorpej
255 1.58 matt #ifdef _ARM_ARCH_6
256 1.58 matt /*
257 1.58 matt * Restore user thread/process id registers
258 1.58 matt */
259 1.58 matt ldr r0, [r7, #(PCB_USER_PID_RW)]
260 1.58 matt mcr p15, 0, r0, c13, c0, 2
261 1.63 matt ldr r0, [r6, #(L_PRIVATE)]
262 1.58 matt mcr p15, 0, r0, c13, c0, 3
263 1.58 matt #endif
264 1.58 matt
265 1.55 chris ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
266 1.55 chris
267 1.52 skrll /* Restore all the saved registers */
268 1.58 matt #ifdef __XSCALE__
269 1.37 scw ldr r8, [r7, #(PCB_R8)]
270 1.37 scw ldr r9, [r7, #(PCB_R9)]
271 1.37 scw ldr r10, [r7, #(PCB_R10)]
272 1.37 scw ldr r11, [r7, #(PCB_R11)]
273 1.37 scw ldr r12, [r7, #(PCB_R12)]
274 1.37 scw ldr r13, [r7, #(PCB_SP)]
275 1.58 matt #elif defined(_ARM_ARCH_6)
276 1.58 matt ldrd r8, [r7, #(PCB_R8)]
277 1.58 matt ldrd r10, [r7, #(PCB_R10)]
278 1.58 matt ldrd r12, [r7, #(PCB_R12)]
279 1.58 matt #else
280 1.58 matt add r0, r7, #PCB_R8
281 1.58 matt ldmia r0, {r8-r13}
282 1.37 scw #endif
283 1.29 thorpej
284 1.57 scw /* Record the old lwp for pmap_activate()'s benefit */
285 1.58 matt ldr r1, .Lpmap_previous_active_lwp
286 1.57 scw str r4, [r1]
287 1.57 scw
288 1.47 yamt /* rem: r4 = old lwp */
289 1.29 thorpej /* rem: r5 = new lwp's proc */
290 1.29 thorpej /* rem: r6 = new lwp */
291 1.29 thorpej /* rem: r7 = new pcb */
292 1.18 thorpej
293 1.56 rearnsha #ifdef FPU_VFP
294 1.56 rearnsha mov r0, r6
295 1.56 rearnsha bl _C_LABEL(vfp_loadcontext)
296 1.56 rearnsha #endif
297 1.1 chris #ifdef ARMFPE
298 1.61 rmind add r0, r7, #(PCB_SIZE) & 0x00ff
299 1.61 rmind add r0, r0, #(PCB_SIZE) & 0xff00
300 1.1 chris bl _C_LABEL(arm_fpe_core_changecontext)
301 1.1 chris #endif
302 1.1 chris
303 1.47 yamt /* rem: r4 = old lwp */
304 1.29 thorpej /* rem: r5 = new lwp's proc */
305 1.29 thorpej /* rem: r6 = new lwp */
306 1.18 thorpej /* rem: r7 = new PCB */
307 1.18 thorpej
308 1.18 thorpej /*
309 1.18 thorpej * Check for restartable atomic sequences (RAS).
310 1.18 thorpej */
311 1.18 thorpej
312 1.39 dsl ldr r2, [r5, #(P_RASLIST)]
313 1.38 scw ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
314 1.18 thorpej teq r2, #0 /* p->p_nras == 0? */
315 1.18 thorpej bne .Lswitch_do_ras /* no, check for one */
316 1.18 thorpej
317 1.14 briggs .Lswitch_return:
318 1.47 yamt /* cpu_switchto returns the old lwp */
319 1.29 thorpej mov r0, r4
320 1.47 yamt /* lwp_trampoline expects new lwp as it's second argument */
321 1.47 yamt mov r1, r6
322 1.1 chris
323 1.1 chris /*
324 1.51 skrll * Pull the registers that got pushed when cpu_switchto() was called,
325 1.51 skrll * and return.
326 1.1 chris */
327 1.51 skrll ldmfd sp, {r4-r7, sp, pc}
328 1.18 thorpej
329 1.18 thorpej .Lswitch_do_ras:
330 1.38 scw ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
331 1.29 thorpej mov r0, r5 /* first ras_lookup() arg */
332 1.18 thorpej bl _C_LABEL(ras_lookup)
333 1.18 thorpej cmn r0, #1 /* -1 means "not in a RAS" */
334 1.38 scw ldrne r1, [r7, #(PCB_TF)]
335 1.38 scw strne r0, [r1, #(TF_PC)]
336 1.18 thorpej b .Lswitch_return
337 1.1 chris
338 1.47 yamt ENTRY(lwp_trampoline)
339 1.52 skrll /*
340 1.52 skrll * cpu_switchto gives us:
341 1.52 skrll *
342 1.52 skrll * arg0(r0) = old lwp
343 1.52 skrll * arg1(r1) = new lwp
344 1.52 skrll */
345 1.47 yamt bl _C_LABEL(lwp_startup)
346 1.38 scw
347 1.1 chris mov r0, r5
348 1.1 chris mov r1, sp
349 1.24 bjh21 mov lr, pc
350 1.1 chris mov pc, r4
351 1.1 chris
352 1.1 chris /* Kill irq's */
353 1.13 thorpej mrs r0, cpsr
354 1.59 matt orr r0, r0, #(IF32_bits)
355 1.13 thorpej msr cpsr_c, r0
356 1.1 chris
357 1.1 chris PULLFRAME
358 1.1 chris
359 1.1 chris movs pc, lr /* Exit */
360 1.58 matt
361 1.58 matt #ifdef __HAVE_FAST_SOFTINTS
362 1.58 matt /*
363 1.58 matt * Called at IPL_HIGH
364 1.58 matt * r0 = new lwp
365 1.58 matt * r1 = ipl for softint_dispatch
366 1.58 matt */
367 1.58 matt ENTRY_NP(softint_switch)
368 1.58 matt stmfd sp!, {r4, r6, r7, lr}
369 1.58 matt
370 1.58 matt ldr r7, [r0, #L_CPU] /* get curcpu */
371 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
372 1.58 matt mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
373 1.58 matt #else
374 1.58 matt ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
375 1.58 matt #endif
376 1.58 matt mrs r6, cpsr /* we need to save this */
377 1.58 matt
378 1.58 matt /*
379 1.58 matt * If the soft lwp blocks, it needs to return to softint_tramp
380 1.58 matt */
381 1.58 matt mov r2, sp /* think ip */
382 1.58 matt adr r3, softint_tramp /* think lr */
383 1.58 matt stmfd sp!, {r2-r3}
384 1.58 matt stmfd sp!, {r4-r7}
385 1.58 matt
386 1.58 matt mov r5, r0 /* save new lwp */
387 1.58 matt
388 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
389 1.58 matt
390 1.58 matt /* Save all the registers into the old lwp's pcb */
391 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
392 1.58 matt strd r8, [r2, #(PCB_R8)]
393 1.58 matt strd r10, [r2, #(PCB_R10)]
394 1.58 matt strd r12, [r2, #(PCB_R12)]
395 1.58 matt #else
396 1.58 matt add r3, r2, #(PCB_R8)
397 1.58 matt stmia r3, {r8-r13}
398 1.58 matt #endif
399 1.58 matt
400 1.58 matt /* this is an invariant so load before disabling intrs */
401 1.60 rmind ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
402 1.58 matt
403 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
404 1.58 matt IRQdisable
405 1.59 matt #endif
406 1.58 matt /*
407 1.58 matt * We're switching to a bound LWP so its l_cpu is already correct.
408 1.58 matt */
409 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
410 1.58 matt mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
411 1.58 matt #endif
412 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
413 1.58 matt str r5, [r7, #(CI_CURLWP)] /* save new lwp */
414 1.58 matt #endif
415 1.58 matt
416 1.58 matt /* Hook in a new pcb */
417 1.58 matt str r2, [r7, #(CI_CURPCB)]
418 1.58 matt
419 1.58 matt /*
420 1.58 matt * Normally, we'd get {r8-r13} but since this is a softint lwp
421 1.58 matt * it's existing state doesn't matter. We start the stack just
422 1.58 matt * below the trapframe.
423 1.58 matt */
424 1.58 matt ldr sp, [r2, #(PCB_TF)] /* get new lwp's stack ptr */
425 1.58 matt
426 1.58 matt /* At this point we can allow IRQ's again. */
427 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
428 1.58 matt IRQenable
429 1.59 matt #endif
430 1.58 matt
431 1.58 matt /* r1 still has ipl */
432 1.58 matt mov r0, r4 /* r0 has pinned (old) lwp */
433 1.58 matt bl _C_LABEL(softint_dispatch)
434 1.58 matt /*
435 1.58 matt * If we've returned, we need to change everything back and return.
436 1.58 matt */
437 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
438 1.58 matt
439 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
440 1.58 matt IRQdisable
441 1.59 matt #endif
442 1.58 matt /*
443 1.58 matt * We don't need to restore all the registers since another lwp was
444 1.58 matt * never executed. But we do need the SP from the formerly pinned lwp.
445 1.58 matt */
446 1.58 matt
447 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
448 1.58 matt mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
449 1.58 matt #endif
450 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
451 1.58 matt str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
452 1.58 matt #endif
453 1.58 matt str r2, [r7, #(CI_CURPCB)] /* restore the curpcb */
454 1.58 matt ldr sp, [r2, #(PCB_SP)] /* now running on the old stack. */
455 1.58 matt
456 1.58 matt /* At this point we can allow IRQ's again. */
457 1.58 matt msr cpsr_c, r6
458 1.58 matt
459 1.58 matt /*
460 1.58 matt * Grab the registers that got pushed at the start and return.
461 1.58 matt */
462 1.58 matt ldmfd sp!, {r4-r7, ip, lr} /* eat switch frame */
463 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
464 1.58 matt
465 1.58 matt END(softint_switch)
466 1.58 matt
467 1.58 matt /*
468 1.58 matt * r0 = previous LWP (the soft lwp)
469 1.58 matt * r4 = original LWP (the current lwp)
470 1.58 matt * r6 = original CPSR
471 1.58 matt * r7 = curcpu()
472 1.58 matt */
473 1.58 matt ENTRY_NP(softint_tramp)
474 1.58 matt ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
475 1.58 matt add r3, r3, #1
476 1.58 matt str r3, [r7, #(CI_MTX_COUNT)]
477 1.58 matt
478 1.58 matt mov r3, #0 /* tell softint_dispatch */
479 1.58 matt str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
480 1.58 matt
481 1.58 matt msr cpsr_c, r6 /* restore interrupts */
482 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
483 1.58 matt END(softint_tramp)
484 1.58 matt #endif /* __HAVE_FAST_SOFTINTS */
485