cpuswitch.S revision 1.78 1 /* $NetBSD: cpuswitch.S,v 1.78 2013/08/18 06:28:18 matt Exp $ */
2
3 /*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 /*
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created : 15/10/94
78 */
79
80 #include "opt_armfpe.h"
81 #include "opt_arm32_pmap.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_cpuoptions.h"
84 #include "opt_lockdebug.h"
85
86 #include "assym.h"
87 #include <machine/asm.h>
88 #include <machine/cpu.h>
89 #include <machine/frame.h>
90
91 RCSID("$NetBSD: cpuswitch.S,v 1.78 2013/08/18 06:28:18 matt Exp $")
92
93 /* LINTSTUB: include <sys/param.h> */
94
95 #undef IRQdisable
96 #undef IRQenable
97
98 /*
99 * New experimental definitions of IRQdisable and IRQenable
100 * These keep FIQ's enabled since FIQ's are special.
101 */
102
103 #ifdef _ARM_ARCH_6
104 #define IRQdisable cpsid i
105 #define IRQenable cpsie i
106 #else
107 #define IRQdisable \
108 mrs r14, cpsr ; \
109 orr r14, r14, #(I32_bit) ; \
110 msr cpsr_c, r14
111
112 #define IRQenable \
113 mrs r14, cpsr ; \
114 bic r14, r14, #(I32_bit) ; \
115 msr cpsr_c, r14
116
117 #endif
118
119 .text
120 .Lpmap_previous_active_lwp:
121 .word _C_LABEL(pmap_previous_active_lwp)
122
123 /*
124 * struct lwp *
125 * cpu_switchto(struct lwp *current, struct lwp *next)
126 *
127 * Switch to the specified next LWP
128 * Arguments:
129 *
130 * r0 'struct lwp *' of the current LWP (or NULL if exiting)
131 * r1 'struct lwp *' of the LWP to switch to
132 * r2 returning
133 */
134 ENTRY(cpu_switchto)
135 mov ip, sp
136 push {r4-r7, ip, lr}
137
138 /* move lwps into caller saved registers */
139 mov r6, r1
140 mov r4, r0
141
142 #ifdef TPIDRPRW_IS_CURCPU
143 GET_CURCPU(r3)
144 #elif defined(TPIDRPRW_IS_CURLWP)
145 mcr p15, 0, r0, c13, c0, 4 /* get old lwp (r4 maybe 0) */
146 ldr r3, [r0, #(L_CPU)] /* get cpu from old lwp */
147 #elif !defined(MULTIPROCESSOR)
148 ldr r3, [r6, #L_CPU] /* get cpu from new lwp */
149 #else
150 #error curcpu() method not defined
151 #endif
152
153 /* rem: r3 = curcpu() */
154 /* rem: r4 = old lwp */
155 /* rem: r6 = new lwp */
156
157 #ifndef __HAVE_UNNESTED_INTRS
158 IRQdisable
159 #endif
160
161 #ifdef MULTIPROCESSOR
162 str r3, [r6, #(L_CPU)]
163 #else
164 /* l->l_cpu initialized in fork1() for single-processor */
165 #endif
166
167 #if defined(TPIDRPRW_IS_CURLWP)
168 mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
169 #endif
170 /* We have a new curlwp now so make a note it */
171 str r6, [r3, #(CI_CURLWP)]
172
173 /* Get the new pcb */
174 ldr r7, [r6, #(L_PCB)]
175
176 /* At this point we can allow IRQ's again. */
177 #ifndef __HAVE_UNNESTED_INTRS
178 IRQenable
179 #endif
180
181 /* rem: r3 = curlwp */
182 /* rem: r4 = old lwp */
183 /* rem: r6 = new lwp */
184 /* rem: r7 = new pcb */
185 /* rem: interrupts are enabled */
186
187 /*
188 * If the old lwp on entry to cpu_switchto was zero then the
189 * process that called it was exiting. This means that we do
190 * not need to save the current context. Instead we can jump
191 * straight to restoring the context for the new process.
192 */
193 teq r4, #0
194 beq .Ldo_switch
195
196 /* rem: r3 = curlwp */
197 /* rem: r4 = old lwp */
198 /* rem: r6 = new lwp */
199 /* rem: r7 = new pcb */
200 /* rem: interrupts are enabled */
201
202 /* Save old context */
203
204 /* Get the user structure for the old lwp. */
205 ldr r5, [r4, #(L_PCB)]
206
207 /* Save all the registers in the old lwp's pcb */
208 #if defined(_ARM_ARCH_DWORD_OK)
209 strd r8, [r5, #(PCB_R8)]
210 strd r10, [r5, #(PCB_R10)]
211 strd r12, [r5, #(PCB_R12)]
212 #else
213 add r0, r5, #(PCB_R8)
214 stmia r0, {r8-r13}
215 #endif
216
217 #ifdef _ARM_ARCH_6
218 /*
219 * Save user read/write thread/process id register
220 */
221 mrc p15, 0, r0, c13, c0, 2
222 str r0, [r5, #(PCB_USER_PID_RW)]
223 #endif
224 /*
225 * NOTE: We can now use r8-r13 until it is time to restore
226 * them for the new process.
227 */
228
229 /* rem: r3 = curlwp */
230 /* rem: r4 = old lwp */
231 /* rem: r5 = old pcb */
232 /* rem: r6 = new lwp */
233 /* rem: r7 = new pcb */
234 /* rem: interrupts are enabled */
235
236 /* Restore saved context */
237
238 .Ldo_switch:
239 /* rem: r3 = curlwp */
240 /* rem: r4 = old lwp */
241 /* rem: r6 = new lwp */
242 /* rem: r7 = new pcb */
243 /* rem: interrupts are enabled */
244
245 #ifdef _ARM_ARCH_6
246 /*
247 * Restore user thread/process id registers
248 */
249 ldr r0, [r7, #(PCB_USER_PID_RW)]
250 mcr p15, 0, r0, c13, c0, 2
251 ldr r0, [r6, #(L_PRIVATE)]
252 mcr p15, 0, r0, c13, c0, 3
253 #endif
254
255 #ifdef FPU_VFP
256 /*
257 * If we have a VFP, we need to load FPEXC.
258 */
259 ldr r0, [r3, #(CI_VFP_ID)]
260 cmp r0, #0
261 ldrne r0, [r7, #(PCB_VFP_FPEXC)]
262 mcrne p10, 7, r0, c8, c0, 0
263 #endif
264
265 ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
266
267 /* Restore all the saved registers */
268 #ifdef __XSCALE__
269 ldr r8, [r7, #(PCB_R8)]
270 ldr r9, [r7, #(PCB_R9)]
271 ldr r10, [r7, #(PCB_R10)]
272 ldr r11, [r7, #(PCB_R11)]
273 ldr r12, [r7, #(PCB_R12)]
274 ldr r13, [r7, #(PCB_KSP)] /* sp */
275 #elif defined(_ARM_ARCH_DWORD_OK)
276 ldrd r8, [r7, #(PCB_R8)]
277 ldrd r10, [r7, #(PCB_R10)]
278 ldrd r12, [r7, #(PCB_R12)] /* sp */
279 #else
280 add r0, r7, #PCB_R8
281 ldmia r0, {r8-r13}
282 #endif
283
284 /* Record the old lwp for pmap_activate()'s benefit */
285 ldr r1, .Lpmap_previous_active_lwp /* XXXSMP */
286 str r4, [r1]
287
288 /* rem: r4 = old lwp */
289 /* rem: r5 = new lwp's proc */
290 /* rem: r6 = new lwp */
291 /* rem: r7 = new pcb */
292
293 /*
294 * Check for restartable atomic sequences (RAS).
295 */
296
297 ldr r2, [r5, #(P_RASLIST)]
298 ldr r1, [r6, #(L_MD_TF)] /* r1 = trapframe (used below) */
299 teq r2, #0 /* p->p_nras == 0? */
300 bne .Lswitch_do_ras /* no, check for one */
301
302 .Lswitch_return:
303 /* cpu_switchto returns the old lwp */
304 mov r0, r4
305 /* lwp_trampoline expects new lwp as it's second argument */
306 mov r1, r6
307
308 #ifdef _ARM_ARCH_7
309 clrex /* cause any subsequent STREX* to fail */
310 #endif
311
312 /*
313 * Pull the registers that got pushed when cpu_switchto() was called,
314 * and return.
315 */
316 pop {r4-r7, ip, pc}
317
318 .Lswitch_do_ras:
319 ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
320 mov r0, r5 /* first ras_lookup() arg */
321 bl _C_LABEL(ras_lookup)
322 cmn r0, #1 /* -1 means "not in a RAS" */
323 ldrne r1, [r6, #(L_MD_TF)]
324 strne r0, [r1, #(TF_PC)]
325 b .Lswitch_return
326 END(cpu_switchto)
327
328 ENTRY_NP(lwp_trampoline)
329 /*
330 * cpu_switchto gives us:
331 * arg0(r0) = old lwp
332 * arg1(r1) = new lwp
333 * setup by cpu_lwp_fork:
334 * r4 = func to call
335 * r5 = arg to func
336 * r6 = <unused>
337 * r7 = spsr mode
338 */
339 bl _C_LABEL(lwp_startup)
340
341 mov fp, #0 /* top stack frame */
342 mov r0, r5
343 mov r1, sp
344 #ifdef _ARM_ARCH_5
345 blx r4
346 #else
347 mov lr, pc
348 mov pc, r4
349 #endif
350
351 GET_CPSR(r0)
352 CPSID_I(r0, r0) /* Kill irq's */
353
354 GET_CURCPU(r4) /* for DO_AST */
355 DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
356 PULLFRAME
357
358 movs pc, lr /* Exit */
359 END(lwp_trampoline)
360
361 AST_ALIGNMENT_FAULT_LOCALS
362
363 #ifdef __HAVE_FAST_SOFTINTS
364 /*
365 * Called at IPL_HIGH
366 * r0 = new lwp
367 * r1 = ipl for softint_dispatch
368 */
369 ENTRY_NP(softint_switch)
370 push {r4, r6, r7, lr}
371
372 ldr r7, [r0, #L_CPU] /* get curcpu */
373 #if defined(TPIDRPRW_IS_CURLWP)
374 mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
375 #else
376 ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
377 #endif
378 mrs r6, cpsr /* we need to save this */
379
380 /*
381 * If the soft lwp blocks, it needs to return to softint_tramp
382 */
383 mov r2, sp /* think ip */
384 adr r3, softint_tramp /* think lr */
385 push {r2-r3}
386 push {r4-r7}
387
388 mov r5, r0 /* save new lwp */
389
390 ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
391
392 /* Save all the registers into the old lwp's pcb */
393 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
394 strd r8, [r2, #(PCB_R8)]
395 strd r10, [r2, #(PCB_R10)]
396 strd r12, [r2, #(PCB_R12)]
397 #else
398 add r3, r2, #(PCB_R8)
399 stmia r3, {r8-r13}
400 #endif
401
402 /* this is an invariant so load before disabling intrs */
403 ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
404
405 #ifndef __HAVE_UNNESTED_INTRS
406 IRQdisable
407 #endif
408 /*
409 * We're switching to a bound LWP so its l_cpu is already correct.
410 */
411 #if defined(TPIDRPRW_IS_CURLWP)
412 mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
413 #endif
414 str r5, [r7, #(CI_CURLWP)] /* save new lwp */
415
416 /*
417 * Normally, we'd get {r8-r13} but since this is a softint lwp
418 * it's existing state doesn't matter. We start the stack just
419 * below the trapframe.
420 */
421 ldr sp, [r5, #(L_MD_TF)] /* get new lwp's stack ptr */
422
423 /* At this point we can allow IRQ's again. */
424 #ifndef __HAVE_UNNESTED_INTRS
425 IRQenable
426 #endif
427
428 /* r1 still has ipl */
429 mov r0, r4 /* r0 has pinned (old) lwp */
430 bl _C_LABEL(softint_dispatch)
431 /*
432 * If we've returned, we need to change everything back and return.
433 */
434 ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
435
436 #ifndef __HAVE_UNNESTED_INTRS
437 IRQdisable
438 #endif
439 /*
440 * We don't need to restore all the registers since another lwp was
441 * never executed. But we do need the SP from the formerly pinned lwp.
442 */
443
444 #if defined(TPIDRPRW_IS_CURLWP)
445 mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
446 #endif
447 str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
448 ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */
449
450 /* At this point we can allow IRQ's again. */
451 msr cpsr_c, r6
452
453 /*
454 * Grab the registers that got pushed at the start and return.
455 */
456 pop {r4-r7, ip, lr} /* eat switch frame */
457 pop {r4, r6, r7, pc} /* pop stack and return */
458
459 END(softint_switch)
460
461 /*
462 * r0 = previous LWP (the soft lwp)
463 * r4 = original LWP (the current lwp)
464 * r6 = original CPSR
465 * r7 = curcpu()
466 */
467 ENTRY_NP(softint_tramp)
468 ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
469 add r3, r3, #1
470 str r3, [r7, #(CI_MTX_COUNT)]
471
472 mov r3, #0 /* tell softint_dispatch */
473 str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
474
475 msr cpsr_c, r6 /* restore interrupts */
476 pop {r4, r6, r7, pc} /* pop stack and return */
477 END(softint_tramp)
478 #endif /* __HAVE_FAST_SOFTINTS */
479