cpuswitch.S revision 1.92.6.2 1 /* $NetBSD: cpuswitch.S,v 1.92.6.2 2020/04/08 14:07:28 martin Exp $ */
2
3 /*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 /*
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created : 15/10/94
78 */
79
80 #include "opt_armfpe.h"
81 #include "opt_multiprocessor.h"
82 #include "opt_cpuoptions.h"
83 #include "opt_lockdebug.h"
84
85 #include "assym.h"
86 #include <arm/asm.h>
87 #include <arm/locore.h>
88
89 RCSID("$NetBSD: cpuswitch.S,v 1.92.6.2 2020/04/08 14:07:28 martin Exp $")
90
91 /* LINTSTUB: include <sys/param.h> */
92
93 #undef IRQdisable
94 #undef IRQenable
95
96 /*
97 * New experimental definitions of IRQdisable and IRQenable
98 * These keep FIQ's enabled since FIQ's are special.
99 */
100
101 #ifdef _ARM_ARCH_6
102 #define IRQdisable cpsid i
103 #define IRQenable cpsie i
104 #else
105 #define IRQdisable \
106 mrs r14, cpsr ; \
107 orr r14, r14, #(I32_bit) ; \
108 msr cpsr_c, r14
109
110 #define IRQenable \
111 mrs r14, cpsr ; \
112 bic r14, r14, #(I32_bit) ; \
113 msr cpsr_c, r14
114
115 #endif
116
117 .text
118
119 /*
120 * struct lwp *
121 * cpu_switchto(struct lwp *current, struct lwp *next)
122 *
123 * Switch to the specified next LWP
124 * Arguments:
125 *
126 * r0 'struct lwp *' of the current LWP
127 * r1 'struct lwp *' of the LWP to switch to
128 * r2 returning
129 */
130 ENTRY(cpu_switchto)
131 mov ip, sp
132 push {r4-r7, ip, lr}
133
134 /* move lwps into caller saved registers */
135 mov r6, r1
136 mov r4, r0
137
138 #ifdef TPIDRPRW_IS_CURCPU
139 GET_CURCPU(r5)
140 #else
141 ldr r5, [r6, #L_CPU] /* get cpu from new lwp */
142 #endif
143
144 /* rem: r4 = old lwp */
145 /* rem: r5 = curcpu() */
146 /* rem: r6 = new lwp */
147 /* rem: interrupts are enabled */
148
149 /* Save old context */
150
151 /* Get the user structure for the old lwp. */
152 ldr r7, [r4, #(L_PCB)]
153
154 /* Save all the registers in the old lwp's pcb */
155 #if defined(_ARM_ARCH_DWORD_OK)
156 strd r8, r9, [r7, #(PCB_R8)]
157 strd r10, r11, [r7, #(PCB_R10)]
158 strd r12, r13, [r7, #(PCB_R12)]
159 #else
160 add r0, r7, #(PCB_R8)
161 stmia r0, {r8-r13}
162 #endif
163
164 #ifdef _ARM_ARCH_6
165 /*
166 * Save user read/write thread/process id register
167 */
168 mrc p15, 0, r0, c13, c0, 2
169 str r0, [r7, #(PCB_USER_PID_RW)]
170 #endif
171 /*
172 * NOTE: We can now use r8-r13 until it is time to restore
173 * them for the new process.
174 */
175
176 /* Restore saved context */
177
178 /* rem: r4 = old lwp */
179 /* rem: r5 = curcpu() */
180 /* rem: r6 = new lwp */
181
182 IRQdisable
183 #if defined(TPIDRPRW_IS_CURLWP)
184 mcr p15, 0, r6, c13, c0, 4 /* set current lwp */
185 #endif
186
187 /* We have a new curlwp now so make a note of it */
188 str r6, [r5, #(CI_CURLWP)]
189 /* Get the new pcb */
190 ldr r7, [r6, #(L_PCB)]
191
192 /* make sure we are using the new lwp's stack */
193 ldr sp, [r7, #(PCB_KSP)]
194
195 /* At this point we can allow IRQ's again. */
196 IRQenable
197
198 /* rem: r4 = old lwp */
199 /* rem: r5 = curcpu() */
200 /* rem: r6 = new lwp */
201 /* rem: r7 = new pcb */
202 /* rem: interrupts are enabled */
203
204 /*
205 * If we are switching to a system lwp, don't bother restoring
206 * thread or vfp registers and skip the ras check.
207 */
208 ldr r0, [r6, #(L_FLAG)]
209 tst r0, #(LW_SYSTEM)
210 bne .Lswitch_do_restore
211
212 #ifdef _ARM_ARCH_6
213 /*
214 * Restore user thread/process id registers
215 */
216 ldr r0, [r7, #(PCB_USER_PID_RW)]
217 mcr p15, 0, r0, c13, c0, 2
218 ldr r0, [r6, #(L_PRIVATE)]
219 mcr p15, 0, r0, c13, c0, 3
220 #endif
221
222 #ifdef FPU_VFP
223 /*
224 * If we have a VFP, we need to load FPEXC.
225 */
226 ldr r0, [r5, #(CI_VFP_ID)]
227 cmp r0, #0
228 ldrne r0, [r7, #(PCB_VFP_FPEXC)]
229 vmsrne fpexc, r0
230 #endif
231
232 /*
233 * Check for restartable atomic sequences (RAS).
234 */
235
236 ldr r0, [r6, #(L_PROC)] /* fetch the proc for ras_lookup */
237 ldr r2, [r0, #(P_RASLIST)]
238 cmp r2, #0 /* p->p_nras == 0? */
239 beq .Lswitch_do_restore
240
241 /* we can use r8 since we haven't restored saved registers yet. */
242 ldr r8, [r6, #(L_MD_TF)] /* r1 = trapframe (used below) */
243 ldr r1, [r8, #(TF_PC)] /* second ras_lookup() arg */
244 bl _C_LABEL(ras_lookup)
245 cmn r0, #1 /* -1 means "not in a RAS" */
246 strne r0, [r8, #(TF_PC)]
247
248 /* rem: r4 = old lwp */
249 /* rem: r5 = curcpu() */
250 /* rem: r6 = new lwp */
251 /* rem: r7 = new pcb */
252
253 .Lswitch_do_restore:
254 /* Restore all the saved registers */
255 #ifdef __XSCALE__
256 ldr r8, [r7, #(PCB_R8)]
257 ldr r9, [r7, #(PCB_R9)]
258 ldr r10, [r7, #(PCB_R10)]
259 ldr r11, [r7, #(PCB_R11)]
260 ldr r12, [r7, #(PCB_R12)]
261 #elif defined(_ARM_ARCH_DWORD_OK)
262 ldrd r8, r9, [r7, #(PCB_R8)]
263 ldrd r10, r11, [r7, #(PCB_R10)]
264 ldr r12, [r7, #(PCB_R12)]
265 #else
266 add r0, r7, #PCB_R8
267 ldmia r0, {r8-r12}
268 #endif
269
270 /* Record the old lwp for pmap_activate()'s benefit */
271 #ifndef ARM_MMU_EXTENDED
272 str r4, [r5, #CI_LASTLWP]
273 #endif
274
275 /* cpu_switchto returns the old lwp */
276 mov r0, r4
277 /* lwp_trampoline expects new lwp as its second argument */
278 mov r1, r6
279
280 #ifdef _ARM_ARCH_7
281 clrex /* cause any subsequent STREX* to fail */
282 #endif
283
284 /*
285 * Pull the registers that got pushed when cpu_switchto() was called,
286 * and return.
287 */
288 pop {r4-r7, ip, pc}
289
290 END(cpu_switchto)
291
292 ENTRY_NP(lwp_trampoline)
293 /*
294 * cpu_switchto gives us:
295 * arg0(r0) = old lwp
296 * arg1(r1) = new lwp
297 * setup by cpu_lwp_fork:
298 * r4 = func to call
299 * r5 = arg to func
300 * r6 = <unused>
301 * r7 = spsr mode
302 */
303 bl _C_LABEL(lwp_startup)
304
305 mov fp, #0 /* top stack frame */
306 mov r0, r5
307 mov r1, sp
308 #ifdef _ARM_ARCH_5
309 blx r4
310 #else
311 mov lr, pc
312 mov pc, r4
313 #endif
314
315 GET_CPSR(r0)
316 CPSID_I(r0, r0) /* Kill irq's */
317
318 GET_CURCPU(r4) /* for DO_AST */
319 DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
320 PULLFRAME
321
322 movs pc, lr /* Exit */
323 END(lwp_trampoline)
324
325 AST_ALIGNMENT_FAULT_LOCALS
326
327 #ifdef __HAVE_FAST_SOFTINTS
328 /*
329 * Called at IPL_HIGH
330 * r0 = new lwp
331 * r1 = ipl for softint_dispatch
332 */
333 ENTRY_NP(softint_switch)
334 push {r4, r6, r7, lr}
335
336 ldr r7, [r0, #L_CPU] /* get curcpu */
337 #if defined(TPIDRPRW_IS_CURLWP)
338 mrc p15, 0, r4, c13, c0, 4 /* get old lwp */
339 #else
340 ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */
341 #endif
342 mrs r6, cpsr /* we need to save this */
343
344 /*
345 * If the soft lwp blocks, it needs to return to softint_tramp
346 */
347 mov r2, sp /* think ip */
348 adr r3, softint_tramp /* think lr */
349 push {r2-r3}
350 push {r4-r7}
351
352 mov r5, r0 /* save new lwp */
353
354 ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */
355
356 /* Save all the registers into the old lwp's pcb */
357 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
358 strd r8, r9, [r2, #(PCB_R8)]
359 strd r10, r11, [r2, #(PCB_R10)]
360 strd r12, r13, [r2, #(PCB_R12)]
361 #else
362 add r3, r2, #(PCB_R8)
363 stmia r3, {r8-r13}
364 #endif
365
366 #ifdef _ARM_ARCH_6
367 /*
368 * Save user read/write thread/process id register in case it was
369 * set in userland.
370 */
371 mrc p15, 0, r0, c13, c0, 2
372 str r0, [r2, #(PCB_USER_PID_RW)]
373 #endif
374
375 /* this is an invariant so load before disabling intrs */
376 ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */
377
378 IRQdisable
379 /*
380 * We're switching to a bound LWP so its l_cpu is already correct.
381 */
382 #if defined(TPIDRPRW_IS_CURLWP)
383 mcr p15, 0, r5, c13, c0, 4 /* save new lwp */
384 #endif
385 str r5, [r7, #(CI_CURLWP)] /* save new lwp */
386
387 /*
388 * Normally, we'd get {r8-r13} but since this is a softint lwp
389 * its existing state doesn't matter. We start the stack just
390 * below the trapframe.
391 */
392 ldr sp, [r5, #(L_MD_TF)] /* get new lwp's stack ptr */
393
394 /* At this point we can allow IRQ's again. */
395 IRQenable
396 /* r1 still has ipl */
397 mov r0, r4 /* r0 has pinned (old) lwp */
398 bl _C_LABEL(softint_dispatch)
399 /*
400 * If we've returned, we need to change everything back and return.
401 */
402 ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */
403
404 /*
405 * We don't need to restore all the registers since another lwp was
406 * never executed. But we do need the SP from the formerly pinned lwp.
407 */
408
409 IRQdisable
410 #if defined(TPIDRPRW_IS_CURLWP)
411 mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */
412 #endif
413 str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */
414 ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */
415
416 /* At this point we can allow IRQ's again. */
417 msr cpsr_c, r6
418
419 /*
420 * Grab the registers that got pushed at the start and return.
421 */
422 pop {r4-r7, ip, lr} /* eat switch frame */
423 pop {r4, r6, r7, pc} /* pop stack and return */
424
425 END(softint_switch)
426
427 /*
428 * r0 = previous LWP (the soft lwp)
429 * r4 = original LWP (the current lwp)
430 * r6 = original CPSR
431 * r7 = curcpu()
432 */
433 ENTRY_NP(softint_tramp)
434 ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */
435 add r3, r3, #1
436 str r3, [r7, #(CI_MTX_COUNT)]
437
438 msr cpsr_c, r6 /* restore interrupts */
439 pop {r4, r6, r7, pc} /* pop stack and return */
440 END(softint_tramp)
441 #endif /* __HAVE_FAST_SOFTINTS */
442