cpuswitch.S revision 1.87 1 /* $NetBSD: cpuswitch.S,v 1.87 2015/03/22 23:46:08 matt Exp $ */
2
3 /*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 /*
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created : 15/10/94
78 */
79
80 #include "opt_armfpe.h"
81 #include "opt_arm32_pmap.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_cpuoptions.h"
84 #include "opt_lockdebug.h"
85
86 #include "assym.h"
87 #include <arm/asm.h>
88 #include <arm/locore.h>
89
90 RCSID("$NetBSD: cpuswitch.S,v 1.87 2015/03/22 23:46:08 matt Exp $")
91
92 /* LINTSTUB: include <sys/param.h> */
93
94 #undef IRQdisable
95 #undef IRQenable
96
97 /*
98 * New experimental definitions of IRQdisable and IRQenable
99 * These keep FIQ's enabled since FIQ's are special.
100 */
101
102 #ifdef _ARM_ARCH_6
103 #define IRQdisable cpsid i
104 #define IRQenable cpsie i
105 #else
106 #define IRQdisable \
107 mrs r14, cpsr ; \
108 orr r14, r14, #(I32_bit) ; \
109 msr cpsr_c, r14
110
111 #define IRQenable \
112 mrs r14, cpsr ; \
113 bic r14, r14, #(I32_bit) ; \
114 msr cpsr_c, r14
115
116 #endif
117
118 .text
119
120 /*
121 * struct lwp *
122 * cpu_switchto(struct lwp *current, struct lwp *next)
123 *
124 * Switch to the specified next LWP
125 * Arguments:
126 *
127 * r0 'struct lwp *' of the current LWP (or NULL if exiting)
128 * r1 'struct lwp *' of the LWP to switch to
129 * r2 returning
130 */
131 ENTRY(cpu_switchto)
132 mov ip, sp
133 push {r4-r7, ip, lr}
134
135 /* move lwps into caller saved registers */
136 mov r6, r1
137 mov r4, r0
138
139 #ifdef TPIDRPRW_IS_CURCPU
140 GET_CURCPU(r3)
141 #elif defined(TPIDRPRW_IS_CURLWP)
142 mrc p15, 0, r0, c13, c0, 4 /* get old lwp (r4 maybe 0) */
143 ldr r3, [r0, #(L_CPU)] /* get cpu from old lwp */
144 #elif !defined(MULTIPROCESSOR)
145 ldr r3, [r6, #L_CPU] /* get cpu from new lwp */
146 #else
147 #error curcpu() method not defined
148 #endif
149
150 /* rem: r3 = curcpu() */
151 /* rem: r4 = old lwp */
152 /* rem: r6 = new lwp */
153
154 #ifndef __HAVE_UNNESTED_INTRS
155 IRQdisable
156 #endif
157
158 #ifdef MULTIPROCESSOR
159 str r3, [r6, #(L_CPU)]
160 #else
161 /* l->l_cpu initialized in fork1() for single-processor */
162 #endif
163
164 #if defined(TPIDRPRW_IS_CURLWP)
165 mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
166 #endif
167 /* We have a new curlwp now so make a note it */
168 str r6, [r3, #(CI_CURLWP)]
169
170 /* Get the new pcb */
171 ldr r7, [r6, #(L_PCB)]
172
173 /* At this point we can allow IRQ's again. */
174 #ifndef __HAVE_UNNESTED_INTRS
175 IRQenable
176 #endif
177
178 /* rem: r3 = curcpu() */
179 /* rem: r4 = old lwp */
180 /* rem: r6 = new lwp */
181 /* rem: r7 = new pcb */
182 /* rem: interrupts are enabled */
183
184 /*
185 * If the old lwp on entry to cpu_switchto was zero then the
186 * process that called it was exiting. This means that we do
187 * not need to save the current context. Instead we can jump
188 * straight to restoring the context for the new process.
189 */
190 teq r4, #0
191 beq .Ldo_switch
192
193 /* rem: r3 = curcpu() */
194 /* rem: r4 = old lwp */
195 /* rem: r6 = new lwp */
196 /* rem: r7 = new pcb */
197 /* rem: interrupts are enabled */
198
199 /* Save old context */
200
201 /* Get the user structure for the old lwp. */
202 ldr r5, [r4, #(L_PCB)]
203
204 /* Save all the registers in the old lwp's pcb */
205 #if defined(_ARM_ARCH_DWORD_OK)
206 strd r8, r9, [r5, #(PCB_R8)]
207 strd r10, r11, [r5, #(PCB_R10)]
208 strd r12, r13, [r5, #(PCB_R12)]
209 #else
210 add r0, r5, #(PCB_R8)
211 stmia r0, {r8-r13}
212 #endif
213
214 #ifdef _ARM_ARCH_6
215 /*
216 * Save user read/write thread/process id register
217 */
218 mrc p15, 0, r0, c13, c0, 2
219 str r0, [r5, #(PCB_USER_PID_RW)]
220 #endif
221 /*
222 * NOTE: We can now use r8-r13 until it is time to restore
223 * them for the new process.
224 */
225
226 /* rem: r3 = curcpu() */
227 /* rem: r4 = old lwp */
228 /* rem: r5 = old pcb */
229 /* rem: r6 = new lwp */
230 /* rem: r7 = new pcb */
231 /* rem: interrupts are enabled */
232
233 /* Restore saved context */
234
235 .Ldo_switch:
236 /* rem: r3 = curcpu() */
237 /* rem: r4 = old lwp */
238 /* rem: r6 = new lwp */
239 /* rem: r7 = new pcb */
240 /* rem: interrupts are enabled */
241
242 #ifdef _ARM_ARCH_6
243 /*
244 * Restore user thread/process id registers
245 */
246 ldr r0, [r7, #(PCB_USER_PID_RW)]
247 mcr p15, 0, r0, c13, c0, 2
248 ldr r0, [r6, #(L_PRIVATE)]
249 mcr p15, 0, r0, c13, c0, 3
250 #endif
251
252 #ifdef FPU_VFP
253 /*
254 * If we have a VFP, we need to load FPEXC.
255 */
256 ldr r0, [r3, #(CI_VFP_ID)]
257 cmp r0, #0
258 ldrne r0, [r7, #(PCB_VFP_FPEXC)]
259 vmsrne fpexc, r0
260 #endif
261
262 ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
263
264 /* Restore all the saved registers */
265 #ifdef __XSCALE__
266 ldr r8, [r7, #(PCB_R8)]
267 ldr r9, [r7, #(PCB_R9)]
268 ldr r10, [r7, #(PCB_R10)]
269 ldr r11, [r7, #(PCB_R11)]
270 ldr r12, [r7, #(PCB_R12)]
271 ldr r13, [r7, #(PCB_KSP)] /* sp */
272 #elif defined(_ARM_ARCH_DWORD_OK)
273 ldrd r8, r9, [r7, #(PCB_R8)]
274 ldrd r10, r11, [r7, #(PCB_R10)]
275 ldrd r12, r13, [r7, #(PCB_R12)] /* sp */
276 #else
277 add r0, r7, #PCB_R8
278 ldmia r0, {r8-r13}
279 #endif
280
281 /* Record the old lwp for pmap_activate()'s benefit */
282 #ifndef ARM_MMU_EXTENDED
283 str r4, [r3, #CI_LASTLWP]
284 #endif
285
286 /* rem: r4 = old lwp */
287 /* rem: r5 = new lwp's proc */
288 /* rem: r6 = new lwp */
289 /* rem: r7 = new pcb */
290
291 /*
292 * Check for restartable atomic sequences (RAS).
293 */
294
295 ldr r2, [r5, #(P_RASLIST)]
296 ldr r1, [r6, #(L_MD_TF)] /* r1 = trapframe (used below) */
297 teq r2, #0 /* p->p_nras == 0? */
298 bne .Lswitch_do_ras /* no, check for one */
299
300 .Lswitch_return:
301 /* cpu_switchto returns the old lwp */
302 mov r0, r4
303 /* lwp_trampoline expects new lwp as its second argument */
304 mov r1, r6
305
306 #ifdef _ARM_ARCH_7
307 clrex /* cause any subsequent STREX* to fail */
308 #endif
309
310 /*
311 * Pull the registers that got pushed when cpu_switchto() was called,
312 * and return.
313 */
314 pop {r4-r7, ip, pc}
315
316 .Lswitch_do_ras:
317 ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
318 mov r0, r5 /* first ras_lookup() arg */
319 bl _C_LABEL(ras_lookup)
320 cmn r0, #1 /* -1 means "not in a RAS" */
321 ldrne r1, [r6, #(L_MD_TF)]
322 strne r0, [r1, #(TF_PC)]
323 b .Lswitch_return
324 END(cpu_switchto)
325
326 ENTRY_NP(lwp_trampoline)
327 /*
328 * cpu_switchto gives us:
329 * arg0(r0) = old lwp
330 * arg1(r1) = new lwp
331 * setup by cpu_lwp_fork:
332 * r4 = func to call
333 * r5 = arg to func
334 * r6 = <unused>
335 * r7 = spsr mode
336 */
337 bl _C_LABEL(lwp_startup)
338
339 mov fp, #0 /* top stack frame */
340 mov r0, r5
341 mov r1, sp
342 #ifdef _ARM_ARCH_5
343 blx r4
344 #else
345 mov lr, pc
346 mov pc, r4
347 #endif
348
349 GET_CPSR(r0)
350 CPSID_I(r0, r0) /* Kill irq's */
351
352 GET_CURCPU(r4) /* for DO_AST */
353 DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
354 PULLFRAME
355
356 movs pc, lr /* Exit */
357 END(lwp_trampoline)
358
359 AST_ALIGNMENT_FAULT_LOCALS
360
361 #ifdef __HAVE_FAST_SOFTINTS
362 /*
363 * Called at IPL_HIGH
364 * r0 = new lwp
365 * r1 = ipl for softint_dispatch
366 */
367 ENTRY_NP(softint_switch)
368 push {r4, r6, r7, lr}
369
370 ldr r7, [r0, #L_CPU] /* get curcpu */
371 #if defined(TPIDRPRW_IS_CURLWP)
372 mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
373 #else
374 ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
375 #endif
376 mrs r6, cpsr /* we need to save this */
377
378 /*
379 * If the soft lwp blocks, it needs to return to softint_tramp
380 */
381 mov r2, sp /* think ip */
382 adr r3, softint_tramp /* think lr */
383 push {r2-r3}
384 push {r4-r7}
385
386 mov r5, r0 /* save new lwp */
387
388 ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
389
390 /* Save all the registers into the old lwp's pcb */
391 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
392 strd r8, r9, [r2, #(PCB_R8)]
393 strd r10, r11, [r2, #(PCB_R10)]
394 strd r12, r13, [r2, #(PCB_R12)]
395 #else
396 add r3, r2, #(PCB_R8)
397 stmia r3, {r8-r13}
398 #endif
399
400 #ifdef _ARM_ARCH_6
401 /*
402 * Save user read/write thread/process id register in cause it was
403 * set in userland.
404 */
405 mrc p15, 0, r0, c13, c0, 2
406 str r0, [r2, #(PCB_USER_PID_RW)]
407 /*
408 * Now restore l_private for the softint thread.
409 */
410 ldr r0, [r5, #(L_PRIVATE)]
411 mcr p15, 0, r0, c13, c0, 3
412 #endif
413
414 /* this is an invariant so load before disabling intrs */
415 ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
416
417 #ifndef __HAVE_UNNESTED_INTRS
418 IRQdisable
419 #endif
420 /*
421 * We're switching to a bound LWP so its l_cpu is already correct.
422 */
423 #if defined(TPIDRPRW_IS_CURLWP)
424 mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
425 #endif
426 str r5, [r7, #(CI_CURLWP)] /* save new lwp */
427
428 /*
429 * Normally, we'd get {r8-r13} but since this is a softint lwp
430 * its existing state doesn't matter. We start the stack just
431 * below the trapframe.
432 */
433 ldr sp, [r5, #(L_MD_TF)] /* get new lwp's stack ptr */
434
435 /* At this point we can allow IRQ's again. */
436 #ifndef __HAVE_UNNESTED_INTRS
437 IRQenable
438 #endif
439
440 /* r1 still has ipl */
441 mov r0, r4 /* r0 has pinned (old) lwp */
442 bl _C_LABEL(softint_dispatch)
443 /*
444 * If we've returned, we need to change everything back and return.
445 */
446 #ifdef _ARM_ARCH_6
447 ldr r0, [r4, #(L_PRIVATE)] /* get pinned lwp's l_private */
448 mcr p15, 0, r0, c13, c0, 3 /* and restore it */
449 #endif
450 ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
451
452 #ifndef __HAVE_UNNESTED_INTRS
453 IRQdisable
454 #endif
455 /*
456 * We don't need to restore all the registers since another lwp was
457 * never executed. But we do need the SP from the formerly pinned lwp.
458 */
459
460 #if defined(TPIDRPRW_IS_CURLWP)
461 mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
462 #endif
463 str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
464 ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */
465
466 /* At this point we can allow IRQ's again. */
467 msr cpsr_c, r6
468
469 /*
470 * Grab the registers that got pushed at the start and return.
471 */
472 pop {r4-r7, ip, lr} /* eat switch frame */
473 pop {r4, r6, r7, pc} /* pop stack and return */
474
475 END(softint_switch)
476
477 /*
478 * r0 = previous LWP (the soft lwp)
479 * r4 = original LWP (the current lwp)
480 * r6 = original CPSR
481 * r7 = curcpu()
482 */
483 ENTRY_NP(softint_tramp)
484 ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
485 add r3, r3, #1
486 str r3, [r7, #(CI_MTX_COUNT)]
487
488 mov r3, #0 /* tell softint_dispatch */
489 str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
490
491 msr cpsr_c, r6 /* restore interrupts */
492 pop {r4, r6, r7, pc} /* pop stack and return */
493 END(softint_tramp)
494 #endif /* __HAVE_FAST_SOFTINTS */
495