cpuswitch.S revision 1.65 1 1.65 matt /* $NetBSD: cpuswitch.S,v 1.65 2012/08/14 20:42:33 matt Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.30 scw #include "opt_arm32_pmap.h"
82 1.19 bjh21 #include "opt_multiprocessor.h"
83 1.58 matt #include "opt_cpuoptions.h"
84 1.36 martin #include "opt_lockdebug.h"
85 1.1 chris
86 1.1 chris #include "assym.h"
87 1.1 chris #include <machine/param.h>
88 1.1 chris #include <machine/frame.h>
89 1.1 chris #include <machine/asm.h>
90 1.58 matt #include <machine/cpu.h>
91 1.58 matt
92 1.65 matt RCSID("$NetBSD: cpuswitch.S,v 1.65 2012/08/14 20:42:33 matt Exp $")
93 1.1 chris
94 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
95 1.34 kristerw
96 1.1 chris #undef IRQdisable
97 1.1 chris #undef IRQenable
98 1.1 chris
99 1.1 chris /*
100 1.1 chris * New experimental definitions of IRQdisable and IRQenable
101 1.1 chris * These keep FIQ's enabled since FIQ's are special.
102 1.1 chris */
103 1.1 chris
104 1.58 matt #ifdef _ARM_ARCH_6
105 1.58 matt #define IRQdisable cpsid i
106 1.58 matt #define IRQenable cpsie i
107 1.58 matt #else
108 1.1 chris #define IRQdisable \
109 1.13 thorpej mrs r14, cpsr ; \
110 1.1 chris orr r14, r14, #(I32_bit) ; \
111 1.58 matt msr cpsr_c, r14
112 1.1 chris
113 1.1 chris #define IRQenable \
114 1.13 thorpej mrs r14, cpsr ; \
115 1.1 chris bic r14, r14, #(I32_bit) ; \
116 1.58 matt msr cpsr_c, r14
117 1.1 chris
118 1.22 bjh21 #endif
119 1.1 chris
120 1.1 chris .text
121 1.57 scw .Lpmap_previous_active_lwp:
122 1.57 scw .word _C_LABEL(pmap_previous_active_lwp)
123 1.30 scw
124 1.1 chris /*
125 1.47 yamt * struct lwp *
126 1.47 yamt * cpu_switchto(struct lwp *current, struct lwp *next)
127 1.48 skrll *
128 1.47 yamt * Switch to the specified next LWP
129 1.47 yamt * Arguments:
130 1.16 thorpej *
131 1.58 matt * r0 'struct lwp *' of the current LWP (or NULL if exiting)
132 1.47 yamt * r1 'struct lwp *' of the LWP to switch to
133 1.58 matt * r2 returning
134 1.1 chris */
135 1.47 yamt ENTRY(cpu_switchto)
136 1.51 skrll mov ip, sp
137 1.51 skrll stmfd sp!, {r4-r7, ip, lr}
138 1.1 chris
139 1.58 matt /* move lwps into caller saved registers */
140 1.55 chris mov r6, r1
141 1.55 chris mov r4, r0
142 1.58 matt
143 1.58 matt #ifdef PROCESS_ID_CURCPU
144 1.58 matt GET_CURCPU(r7)
145 1.58 matt #elif defined(PROCESS_ID_IS_CURLWP)
146 1.58 matt mcr p15, 0, r0, c13, c0, 4 /* get old lwp (r4 maybe 0) */
147 1.58 matt ldr r7, [r0, #(L_CPU)] /* get cpu from old lwp */
148 1.58 matt #elif !defined(MULTIPROCESSOR)
149 1.58 matt ldr r7, [r6, #L_CPU] /* get cpu from new lwp */
150 1.58 matt #else
151 1.58 matt #error curcpu() method not defined
152 1.58 matt #endif
153 1.7 chris
154 1.55 chris /* rem: r4 = old lwp */
155 1.55 chris /* rem: r6 = new lwp */
156 1.58 matt /* rem: r7 = curcpu() */
157 1.55 chris
158 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
159 1.1 chris IRQdisable
160 1.59 matt #endif
161 1.7 chris
162 1.19 bjh21 #ifdef MULTIPROCESSOR
163 1.58 matt str r7, [r6, #(L_CPU)]
164 1.19 bjh21 #else
165 1.29 thorpej /* l->l_cpu initialized in fork1() for single-processor */
166 1.19 bjh21 #endif
167 1.1 chris
168 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
169 1.58 matt mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
170 1.58 matt #endif
171 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
172 1.58 matt /* We have a new curlwp now so make a note it */
173 1.58 matt str r6, [r7, #(CI_CURLWP)]
174 1.58 matt #endif
175 1.55 chris
176 1.65 matt /* Get the new pcb */
177 1.65 matt ldr r7, [r6, #(L_PCB)]
178 1.1 chris
179 1.1 chris /* At this point we can allow IRQ's again. */
180 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
181 1.1 chris IRQenable
182 1.59 matt #endif
183 1.1 chris
184 1.47 yamt /* rem: r4 = old lwp */
185 1.43 skrll /* rem: r6 = new lwp */
186 1.55 chris /* rem: r7 = new pcb */
187 1.4 chris /* rem: interrupts are enabled */
188 1.1 chris
189 1.1 chris /*
190 1.47 yamt * If the old lwp on entry to cpu_switchto was zero then the
191 1.1 chris * process that called it was exiting. This means that we do
192 1.1 chris * not need to save the current context. Instead we can jump
193 1.1 chris * straight to restoring the context for the new process.
194 1.1 chris */
195 1.58 matt teq r4, #0
196 1.49 scw beq .Ldo_switch
197 1.1 chris
198 1.47 yamt /* rem: r4 = old lwp */
199 1.43 skrll /* rem: r6 = new lwp */
200 1.55 chris /* rem: r7 = new pcb */
201 1.4 chris /* rem: interrupts are enabled */
202 1.1 chris
203 1.48 skrll /* Save old context */
204 1.1 chris
205 1.29 thorpej /* Get the user structure for the old lwp. */
206 1.60 rmind ldr r5, [r4, #(L_PCB)]
207 1.1 chris
208 1.29 thorpej /* Save all the registers in the old lwp's pcb */
209 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
210 1.55 chris strd r8, [r5, #(PCB_R8)]
211 1.55 chris strd r10, [r5, #(PCB_R10)]
212 1.55 chris strd r12, [r5, #(PCB_R12)]
213 1.58 matt #else
214 1.58 matt add r0, r5, #(PCB_R8)
215 1.58 matt stmia r0, {r8-r13}
216 1.37 scw #endif
217 1.1 chris
218 1.58 matt #ifdef _ARM_ARCH_6
219 1.58 matt /*
220 1.58 matt * Save user read/write thread/process id register
221 1.58 matt */
222 1.58 matt mrc p15, 0, r0, c13, c0, 2
223 1.58 matt str r0, [r5, #(PCB_USER_PID_RW)]
224 1.58 matt #endif
225 1.1 chris /*
226 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
227 1.29 thorpej * them for the new process.
228 1.29 thorpej */
229 1.29 thorpej
230 1.47 yamt /* rem: r4 = old lwp */
231 1.55 chris /* rem: r5 = old pcb */
232 1.47 yamt /* rem: r6 = new lwp */
233 1.55 chris /* rem: r7 = new pcb */
234 1.47 yamt /* rem: interrupts are enabled */
235 1.47 yamt
236 1.48 skrll /* Restore saved context */
237 1.1 chris
238 1.49 scw .Ldo_switch:
239 1.47 yamt /* rem: r4 = old lwp */
240 1.29 thorpej /* rem: r6 = new lwp */
241 1.55 chris /* rem: r7 = new pcb */
242 1.53 chris /* rem: interrupts are enabled */
243 1.29 thorpej
244 1.58 matt #ifdef _ARM_ARCH_6
245 1.58 matt /*
246 1.58 matt * Restore user thread/process id registers
247 1.58 matt */
248 1.58 matt ldr r0, [r7, #(PCB_USER_PID_RW)]
249 1.58 matt mcr p15, 0, r0, c13, c0, 2
250 1.63 matt ldr r0, [r6, #(L_PRIVATE)]
251 1.58 matt mcr p15, 0, r0, c13, c0, 3
252 1.58 matt #endif
253 1.58 matt
254 1.55 chris ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
255 1.55 chris
256 1.52 skrll /* Restore all the saved registers */
257 1.58 matt #ifdef __XSCALE__
258 1.37 scw ldr r8, [r7, #(PCB_R8)]
259 1.37 scw ldr r9, [r7, #(PCB_R9)]
260 1.37 scw ldr r10, [r7, #(PCB_R10)]
261 1.37 scw ldr r11, [r7, #(PCB_R11)]
262 1.37 scw ldr r12, [r7, #(PCB_R12)]
263 1.37 scw ldr r13, [r7, #(PCB_SP)]
264 1.58 matt #elif defined(_ARM_ARCH_6)
265 1.58 matt ldrd r8, [r7, #(PCB_R8)]
266 1.58 matt ldrd r10, [r7, #(PCB_R10)]
267 1.58 matt ldrd r12, [r7, #(PCB_R12)]
268 1.58 matt #else
269 1.58 matt add r0, r7, #PCB_R8
270 1.58 matt ldmia r0, {r8-r13}
271 1.37 scw #endif
272 1.29 thorpej
273 1.57 scw /* Record the old lwp for pmap_activate()'s benefit */
274 1.58 matt ldr r1, .Lpmap_previous_active_lwp
275 1.57 scw str r4, [r1]
276 1.57 scw
277 1.47 yamt /* rem: r4 = old lwp */
278 1.29 thorpej /* rem: r5 = new lwp's proc */
279 1.29 thorpej /* rem: r6 = new lwp */
280 1.29 thorpej /* rem: r7 = new pcb */
281 1.18 thorpej
282 1.1 chris #ifdef ARMFPE
283 1.61 rmind add r0, r7, #(PCB_SIZE) & 0x00ff
284 1.61 rmind add r0, r0, #(PCB_SIZE) & 0xff00
285 1.1 chris bl _C_LABEL(arm_fpe_core_changecontext)
286 1.1 chris #endif
287 1.1 chris
288 1.47 yamt /* rem: r4 = old lwp */
289 1.29 thorpej /* rem: r5 = new lwp's proc */
290 1.29 thorpej /* rem: r6 = new lwp */
291 1.18 thorpej /* rem: r7 = new PCB */
292 1.18 thorpej
293 1.18 thorpej /*
294 1.18 thorpej * Check for restartable atomic sequences (RAS).
295 1.18 thorpej */
296 1.18 thorpej
297 1.39 dsl ldr r2, [r5, #(P_RASLIST)]
298 1.38 scw ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
299 1.18 thorpej teq r2, #0 /* p->p_nras == 0? */
300 1.18 thorpej bne .Lswitch_do_ras /* no, check for one */
301 1.18 thorpej
302 1.14 briggs .Lswitch_return:
303 1.47 yamt /* cpu_switchto returns the old lwp */
304 1.29 thorpej mov r0, r4
305 1.47 yamt /* lwp_trampoline expects new lwp as it's second argument */
306 1.47 yamt mov r1, r6
307 1.1 chris
308 1.1 chris /*
309 1.51 skrll * Pull the registers that got pushed when cpu_switchto() was called,
310 1.51 skrll * and return.
311 1.1 chris */
312 1.51 skrll ldmfd sp, {r4-r7, sp, pc}
313 1.18 thorpej
314 1.18 thorpej .Lswitch_do_ras:
315 1.38 scw ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
316 1.29 thorpej mov r0, r5 /* first ras_lookup() arg */
317 1.18 thorpej bl _C_LABEL(ras_lookup)
318 1.18 thorpej cmn r0, #1 /* -1 means "not in a RAS" */
319 1.38 scw ldrne r1, [r7, #(PCB_TF)]
320 1.38 scw strne r0, [r1, #(TF_PC)]
321 1.18 thorpej b .Lswitch_return
322 1.1 chris
323 1.47 yamt ENTRY(lwp_trampoline)
324 1.52 skrll /*
325 1.52 skrll * cpu_switchto gives us:
326 1.52 skrll *
327 1.52 skrll * arg0(r0) = old lwp
328 1.52 skrll * arg1(r1) = new lwp
329 1.52 skrll */
330 1.47 yamt bl _C_LABEL(lwp_startup)
331 1.38 scw
332 1.1 chris mov r0, r5
333 1.1 chris mov r1, sp
334 1.24 bjh21 mov lr, pc
335 1.1 chris mov pc, r4
336 1.1 chris
337 1.1 chris /* Kill irq's */
338 1.13 thorpej mrs r0, cpsr
339 1.59 matt orr r0, r0, #(IF32_bits)
340 1.13 thorpej msr cpsr_c, r0
341 1.1 chris
342 1.1 chris PULLFRAME
343 1.1 chris
344 1.1 chris movs pc, lr /* Exit */
345 1.58 matt
346 1.58 matt #ifdef __HAVE_FAST_SOFTINTS
347 1.58 matt /*
348 1.58 matt * Called at IPL_HIGH
349 1.58 matt * r0 = new lwp
350 1.58 matt * r1 = ipl for softint_dispatch
351 1.58 matt */
352 1.58 matt ENTRY_NP(softint_switch)
353 1.58 matt stmfd sp!, {r4, r6, r7, lr}
354 1.58 matt
355 1.58 matt ldr r7, [r0, #L_CPU] /* get curcpu */
356 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
357 1.58 matt mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
358 1.58 matt #else
359 1.58 matt ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
360 1.58 matt #endif
361 1.58 matt mrs r6, cpsr /* we need to save this */
362 1.58 matt
363 1.58 matt /*
364 1.58 matt * If the soft lwp blocks, it needs to return to softint_tramp
365 1.58 matt */
366 1.58 matt mov r2, sp /* think ip */
367 1.58 matt adr r3, softint_tramp /* think lr */
368 1.58 matt stmfd sp!, {r2-r3}
369 1.58 matt stmfd sp!, {r4-r7}
370 1.58 matt
371 1.58 matt mov r5, r0 /* save new lwp */
372 1.58 matt
373 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
374 1.58 matt
375 1.58 matt /* Save all the registers into the old lwp's pcb */
376 1.58 matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
377 1.58 matt strd r8, [r2, #(PCB_R8)]
378 1.58 matt strd r10, [r2, #(PCB_R10)]
379 1.58 matt strd r12, [r2, #(PCB_R12)]
380 1.58 matt #else
381 1.58 matt add r3, r2, #(PCB_R8)
382 1.58 matt stmia r3, {r8-r13}
383 1.58 matt #endif
384 1.58 matt
385 1.58 matt /* this is an invariant so load before disabling intrs */
386 1.60 rmind ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
387 1.58 matt
388 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
389 1.58 matt IRQdisable
390 1.59 matt #endif
391 1.58 matt /*
392 1.58 matt * We're switching to a bound LWP so its l_cpu is already correct.
393 1.58 matt */
394 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
395 1.58 matt mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
396 1.58 matt #endif
397 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
398 1.58 matt str r5, [r7, #(CI_CURLWP)] /* save new lwp */
399 1.58 matt #endif
400 1.58 matt
401 1.58 matt /*
402 1.58 matt * Normally, we'd get {r8-r13} but since this is a softint lwp
403 1.58 matt * it's existing state doesn't matter. We start the stack just
404 1.58 matt * below the trapframe.
405 1.58 matt */
406 1.58 matt ldr sp, [r2, #(PCB_TF)] /* get new lwp's stack ptr */
407 1.58 matt
408 1.58 matt /* At this point we can allow IRQ's again. */
409 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
410 1.58 matt IRQenable
411 1.59 matt #endif
412 1.58 matt
413 1.58 matt /* r1 still has ipl */
414 1.58 matt mov r0, r4 /* r0 has pinned (old) lwp */
415 1.58 matt bl _C_LABEL(softint_dispatch)
416 1.58 matt /*
417 1.58 matt * If we've returned, we need to change everything back and return.
418 1.58 matt */
419 1.60 rmind ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
420 1.58 matt
421 1.59 matt #ifndef __HAVE_UNNESTED_INTRS
422 1.58 matt IRQdisable
423 1.59 matt #endif
424 1.58 matt /*
425 1.58 matt * We don't need to restore all the registers since another lwp was
426 1.58 matt * never executed. But we do need the SP from the formerly pinned lwp.
427 1.58 matt */
428 1.58 matt
429 1.58 matt #if defined(PROCESS_ID_IS_CURLWP)
430 1.58 matt mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
431 1.58 matt #endif
432 1.58 matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
433 1.58 matt str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
434 1.58 matt #endif
435 1.58 matt ldr sp, [r2, #(PCB_SP)] /* now running on the old stack. */
436 1.58 matt
437 1.58 matt /* At this point we can allow IRQ's again. */
438 1.58 matt msr cpsr_c, r6
439 1.58 matt
440 1.58 matt /*
441 1.58 matt * Grab the registers that got pushed at the start and return.
442 1.58 matt */
443 1.58 matt ldmfd sp!, {r4-r7, ip, lr} /* eat switch frame */
444 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
445 1.58 matt
446 1.58 matt END(softint_switch)
447 1.58 matt
448 1.58 matt /*
449 1.58 matt * r0 = previous LWP (the soft lwp)
450 1.58 matt * r4 = original LWP (the current lwp)
451 1.58 matt * r6 = original CPSR
452 1.58 matt * r7 = curcpu()
453 1.58 matt */
454 1.58 matt ENTRY_NP(softint_tramp)
455 1.58 matt ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
456 1.58 matt add r3, r3, #1
457 1.58 matt str r3, [r7, #(CI_MTX_COUNT)]
458 1.58 matt
459 1.58 matt mov r3, #0 /* tell softint_dispatch */
460 1.58 matt str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
461 1.58 matt
462 1.58 matt msr cpsr_c, r6 /* restore interrupts */
463 1.58 matt ldmfd sp!, {r4, r6, r7, pc} /* pop stack and return */
464 1.58 matt END(softint_tramp)
465 1.58 matt #endif /* __HAVE_FAST_SOFTINTS */
466