cpuswitch.S revision 1.46.12.2 1 1.46.12.2 garbled /* $NetBSD: cpuswitch.S,v 1.46.12.2 2007/06/26 18:12:13 garbled Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.30 scw #include "opt_arm32_pmap.h"
82 1.19 bjh21 #include "opt_multiprocessor.h"
83 1.36 martin #include "opt_lockdebug.h"
84 1.1 chris
85 1.1 chris #include "assym.h"
86 1.46 briggs #include <arm/arm32/pte.h>
87 1.1 chris #include <machine/param.h>
88 1.1 chris #include <machine/cpu.h>
89 1.1 chris #include <machine/frame.h>
90 1.1 chris #include <machine/asm.h>
91 1.1 chris
92 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
93 1.34 kristerw
94 1.1 chris #undef IRQdisable
95 1.1 chris #undef IRQenable
96 1.1 chris
97 1.1 chris /*
98 1.1 chris * New experimental definitions of IRQdisable and IRQenable
99 1.1 chris * These keep FIQ's enabled since FIQ's are special.
100 1.1 chris */
101 1.1 chris
102 1.1 chris #define IRQdisable \
103 1.13 thorpej mrs r14, cpsr ; \
104 1.1 chris orr r14, r14, #(I32_bit) ; \
105 1.13 thorpej msr cpsr_c, r14 ; \
106 1.1 chris
107 1.1 chris #define IRQenable \
108 1.13 thorpej mrs r14, cpsr ; \
109 1.1 chris bic r14, r14, #(I32_bit) ; \
110 1.13 thorpej msr cpsr_c, r14 ; \
111 1.1 chris
112 1.30 scw /*
113 1.30 scw * These are used for switching the translation table/DACR.
114 1.30 scw * Since the vector page can be invalid for a short time, we must
115 1.30 scw * disable both regular IRQs *and* FIQs.
116 1.30 scw *
117 1.30 scw * XXX: This is not necessary if the vector table is relocated.
118 1.30 scw */
119 1.30 scw #define IRQdisableALL \
120 1.30 scw mrs r14, cpsr ; \
121 1.30 scw orr r14, r14, #(I32_bit | F32_bit) ; \
122 1.30 scw msr cpsr_c, r14
123 1.30 scw
124 1.30 scw #define IRQenableALL \
125 1.30 scw mrs r14, cpsr ; \
126 1.30 scw bic r14, r14, #(I32_bit | F32_bit) ; \
127 1.30 scw msr cpsr_c, r14
128 1.30 scw
129 1.1 chris .text
130 1.1 chris
131 1.19 bjh21 #ifdef MULTIPROCESSOR
132 1.19 bjh21 .Lcpu_info_store:
133 1.19 bjh21 .word _C_LABEL(cpu_info_store)
134 1.29 thorpej .Lcurlwp:
135 1.19 bjh21 /* FIXME: This is bogus in the general case. */
136 1.29 thorpej .word _C_LABEL(cpu_info_store) + CI_CURLWP
137 1.22 bjh21
138 1.22 bjh21 .Lcurpcb:
139 1.22 bjh21 .word _C_LABEL(cpu_info_store) + CI_CURPCB
140 1.19 bjh21 #else
141 1.29 thorpej .Lcurlwp:
142 1.29 thorpej .word _C_LABEL(curlwp)
143 1.1 chris
144 1.17 thorpej .Lcurpcb:
145 1.1 chris .word _C_LABEL(curpcb)
146 1.22 bjh21 #endif
147 1.1 chris
148 1.17 thorpej .Lcpufuncs:
149 1.1 chris .word _C_LABEL(cpufuncs)
150 1.1 chris
151 1.22 bjh21 #ifndef MULTIPROCESSOR
152 1.1 chris .data
153 1.1 chris .global _C_LABEL(curpcb)
154 1.1 chris _C_LABEL(curpcb):
155 1.1 chris .word 0x00000000
156 1.1 chris .text
157 1.22 bjh21 #endif
158 1.1 chris
159 1.17 thorpej .Lblock_userspace_access:
160 1.1 chris .word _C_LABEL(block_userspace_access)
161 1.1 chris
162 1.30 scw .Lpmap_kernel_cstate:
163 1.30 scw .word (kernel_pmap_store + PMAP_CSTATE)
164 1.30 scw
165 1.30 scw .Llast_cache_state_ptr:
166 1.30 scw .word _C_LABEL(pmap_cache_state)
167 1.30 scw
168 1.1 chris /*
169 1.46.12.1 matt * struct lwp *
170 1.46.12.1 matt * cpu_switchto(struct lwp *current, struct lwp *next)
171 1.46.12.2 garbled *
172 1.46.12.1 matt * Switch to the specified next LWP
173 1.29 thorpej * Arguments:
174 1.46.12.1 matt *
175 1.29 thorpej * r0 'struct lwp *' of the current LWP
176 1.46.12.1 matt * r1 'struct lwp *' of the LWP to switch to
177 1.1 chris */
178 1.46.12.1 matt ENTRY(cpu_switchto)
179 1.28 bjh21 stmfd sp!, {r4-r7, lr}
180 1.1 chris
181 1.46.12.1 matt mov r6, r1 /* save new lwp */
182 1.46.12.1 matt mov r4, r0 /* save old lwp, it's the return value */
183 1.7 chris
184 1.1 chris IRQdisable
185 1.7 chris
186 1.19 bjh21 #ifdef MULTIPROCESSOR
187 1.19 bjh21 /* XXX use curcpu() */
188 1.19 bjh21 ldr r0, .Lcpu_info_store
189 1.29 thorpej str r0, [r6, #(L_CPU)]
190 1.19 bjh21 #else
191 1.29 thorpej /* l->l_cpu initialized in fork1() for single-processor */
192 1.19 bjh21 #endif
193 1.1 chris
194 1.29 thorpej /* We have a new curlwp now so make a note it */
195 1.29 thorpej ldr r7, .Lcurlwp
196 1.1 chris str r6, [r7]
197 1.1 chris
198 1.1 chris /* Hook in a new pcb */
199 1.17 thorpej ldr r7, .Lcurpcb
200 1.29 thorpej ldr r0, [r6, #(L_ADDR)]
201 1.1 chris str r0, [r7]
202 1.1 chris
203 1.1 chris /* At this point we can allow IRQ's again. */
204 1.1 chris IRQenable
205 1.1 chris
206 1.46.12.1 matt /* rem: r4 = old lwp */
207 1.43 skrll /* rem: r6 = new lwp */
208 1.4 chris /* rem: interrupts are enabled */
209 1.1 chris
210 1.1 chris /*
211 1.46.12.1 matt * If the old lwp on entry to cpu_switchto was zero then the
212 1.1 chris * process that called it was exiting. This means that we do
213 1.1 chris * not need to save the current context. Instead we can jump
214 1.1 chris * straight to restoring the context for the new process.
215 1.1 chris */
216 1.46.12.1 matt teq r4, #0x00000000
217 1.14 briggs beq .Lswitch_exited
218 1.1 chris
219 1.46.12.1 matt /* rem: r4 = old lwp */
220 1.43 skrll /* rem: r6 = new lwp */
221 1.4 chris /* rem: interrupts are enabled */
222 1.1 chris
223 1.46.12.2 garbled /* Save old context */
224 1.1 chris
225 1.29 thorpej /* Get the user structure for the old lwp. */
226 1.46.12.1 matt ldr r1, [r4, #(L_ADDR)]
227 1.1 chris
228 1.29 thorpej /* Save all the registers in the old lwp's pcb */
229 1.37 scw #ifndef __XSCALE__
230 1.28 bjh21 add r7, r1, #(PCB_R8)
231 1.28 bjh21 stmia r7, {r8-r13}
232 1.37 scw #else
233 1.37 scw strd r8, [r1, #(PCB_R8)]
234 1.37 scw strd r10, [r1, #(PCB_R10)]
235 1.37 scw strd r12, [r1, #(PCB_R12)]
236 1.37 scw #endif
237 1.1 chris
238 1.1 chris /*
239 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
240 1.29 thorpej * them for the new process.
241 1.29 thorpej */
242 1.29 thorpej
243 1.46.12.1 matt /* rem: r1 = old lwp PCB */
244 1.46.12.1 matt /* rem: r4 = old lwp */
245 1.46.12.1 matt /* rem: r6 = new lwp */
246 1.46.12.1 matt /* rem: interrupts are enabled */
247 1.46.12.1 matt
248 1.29 thorpej /* Remember the old PCB. */
249 1.29 thorpej mov r8, r1
250 1.29 thorpej
251 1.29 thorpej /* r1 now free! */
252 1.29 thorpej
253 1.29 thorpej /* Get the user structure for the new process in r9 */
254 1.29 thorpej ldr r9, [r6, #(L_ADDR)]
255 1.29 thorpej
256 1.29 thorpej /*
257 1.1 chris * This can be optimised... We know we want to go from SVC32
258 1.1 chris * mode to UND32 mode
259 1.1 chris */
260 1.13 thorpej mrs r3, cpsr
261 1.1 chris bic r2, r3, #(PSR_MODE)
262 1.1 chris orr r2, r2, #(PSR_UND32_MODE | I32_bit)
263 1.13 thorpej msr cpsr_c, r2
264 1.1 chris
265 1.29 thorpej str sp, [r8, #(PCB_UND_SP)]
266 1.1 chris
267 1.13 thorpej msr cpsr_c, r3 /* Restore the old mode */
268 1.1 chris
269 1.46.12.2 garbled /* What else needs to be saved? Only FPA stuff when that is supported */
270 1.1 chris
271 1.46.12.2 garbled /* Restore saved context */
272 1.1 chris
273 1.46.12.1 matt /* rem: r4 = old lwp */
274 1.29 thorpej /* rem: r6 = new lwp */
275 1.29 thorpej /* rem: r8 = old PCB */
276 1.29 thorpej /* rem: r9 = new PCB */
277 1.9 thorpej /* rem: interrupts are enabled */
278 1.9 thorpej
279 1.9 thorpej /*
280 1.29 thorpej * Get the new L1 table pointer into r11. If we're switching to
281 1.29 thorpej * an LWP with the same address space as the outgoing one, we can
282 1.29 thorpej * skip the cache purge and the TTB load.
283 1.29 thorpej *
284 1.29 thorpej * To avoid data dep stalls that would happen anyway, we try
285 1.29 thorpej * and get some useful work done in the mean time.
286 1.29 thorpej */
287 1.29 thorpej ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
288 1.29 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
289 1.29 thorpej
290 1.30 scw ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */
291 1.30 scw ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
292 1.30 scw ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */
293 1.30 scw ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */
294 1.30 scw
295 1.30 scw teq r10, r11 /* Same L1? */
296 1.30 scw ldr r5, [r5]
297 1.30 scw cmpeq r0, r1 /* Same DACR? */
298 1.30 scw beq .Lcs_context_switched /* yes! */
299 1.30 scw
300 1.30 scw ldr r3, .Lblock_userspace_access
301 1.30 scw mov r12, #0
302 1.30 scw cmp r5, #0 /* No last vm? (switch_exit) */
303 1.30 scw beq .Lcs_cache_purge_skipped /* No, we can skip cache flsh */
304 1.30 scw
305 1.30 scw mov r2, #DOMAIN_CLIENT
306 1.30 scw cmp r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
307 1.30 scw beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */
308 1.30 scw
309 1.30 scw cmp r5, r8 /* Same userland VM space? */
310 1.30 scw ldrneb r12, [r5, #(CS_CACHE_ID)] /* Last VM space cache state */
311 1.30 scw
312 1.30 scw /*
313 1.30 scw * We're definately switching to a new userland VM space,
314 1.30 scw * and the previous userland VM space has yet to be flushed
315 1.30 scw * from the cache/tlb.
316 1.30 scw *
317 1.30 scw * r12 holds the previous VM space's cs_cache_id state
318 1.30 scw */
319 1.30 scw tst r12, #0xff /* Test cs_cache_id */
320 1.30 scw beq .Lcs_cache_purge_skipped /* VM space is not in cache */
321 1.30 scw
322 1.30 scw /*
323 1.30 scw * Definately need to flush the cache.
324 1.30 scw * Mark the old VM space as NOT being resident in the cache.
325 1.30 scw */
326 1.46.12.1 matt
327 1.30 scw mov r2, #0x00000000
328 1.32 chris strb r2, [r5, #(CS_CACHE_ID)]
329 1.32 chris strb r2, [r5, #(CS_CACHE_D)]
330 1.30 scw
331 1.46.12.1 matt .Lcs_cache_purge:
332 1.30 scw /*
333 1.30 scw * Don't allow user space access between the purge and the switch.
334 1.30 scw */
335 1.30 scw mov r2, #0x00000001
336 1.30 scw str r2, [r3]
337 1.30 scw
338 1.30 scw stmfd sp!, {r0-r3}
339 1.30 scw ldr r1, .Lcpufuncs
340 1.30 scw mov lr, pc
341 1.30 scw ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
342 1.30 scw ldmfd sp!, {r0-r3}
343 1.30 scw
344 1.30 scw .Lcs_cache_purge_skipped:
345 1.30 scw /* rem: r1 = new DACR */
346 1.30 scw /* rem: r3 = &block_userspace_access */
347 1.46.12.1 matt /* rem: r4 = old lwp */
348 1.30 scw /* rem: r5 = &old_pmap->pm_cstate (or NULL) */
349 1.30 scw /* rem: r6 = new lwp */
350 1.30 scw /* rem: r8 = &new_pmap->pm_cstate */
351 1.30 scw /* rem: r9 = new PCB */
352 1.30 scw /* rem: r10 = old L1 */
353 1.30 scw /* rem: r11 = new L1 */
354 1.30 scw
355 1.30 scw mov r2, #0x00000000
356 1.30 scw ldr r7, [r9, #(PCB_PL1VEC)]
357 1.30 scw
358 1.30 scw /*
359 1.30 scw * At this point we need to kill IRQ's again.
360 1.30 scw *
361 1.30 scw * XXXSCW: Don't need to block FIQs if vectors have been relocated
362 1.30 scw */
363 1.30 scw IRQdisableALL
364 1.30 scw
365 1.30 scw /*
366 1.30 scw * Interrupts are disabled so we can allow user space accesses again
367 1.30 scw * as none will occur until interrupts are re-enabled after the
368 1.30 scw * switch.
369 1.30 scw */
370 1.30 scw str r2, [r3]
371 1.30 scw
372 1.30 scw /*
373 1.30 scw * Ensure the vector table is accessible by fixing up the L1
374 1.30 scw */
375 1.30 scw cmp r7, #0 /* No need to fixup vector table? */
376 1.30 scw ldrne r2, [r7] /* But if yes, fetch current value */
377 1.30 scw ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
378 1.30 scw mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */
379 1.30 scw cmpne r2, r0 /* Stuffing the same value? */
380 1.31 thorpej #ifndef PMAP_INCLUDE_PTE_SYNC
381 1.30 scw strne r0, [r7] /* Nope, update it */
382 1.30 scw #else
383 1.30 scw beq .Lcs_same_vector
384 1.30 scw str r0, [r7] /* Otherwise, update it */
385 1.30 scw
386 1.30 scw /*
387 1.30 scw * Need to sync the cache to make sure that last store is
388 1.30 scw * visible to the MMU.
389 1.30 scw */
390 1.30 scw ldr r2, .Lcpufuncs
391 1.30 scw mov r0, r7
392 1.30 scw mov r1, #4
393 1.30 scw mov lr, pc
394 1.30 scw ldr pc, [r2, #CF_DCACHE_WB_RANGE]
395 1.30 scw
396 1.30 scw .Lcs_same_vector:
397 1.33 thorpej #endif /* PMAP_INCLUDE_PTE_SYNC */
398 1.30 scw
399 1.30 scw cmp r10, r11 /* Switching to the same L1? */
400 1.30 scw ldr r10, .Lcpufuncs
401 1.30 scw beq .Lcs_same_l1 /* Yup. */
402 1.30 scw
403 1.30 scw /*
404 1.30 scw * Do a full context switch, including full TLB flush.
405 1.30 scw */
406 1.30 scw mov r0, r11
407 1.30 scw mov lr, pc
408 1.30 scw ldr pc, [r10, #CF_CONTEXT_SWITCH]
409 1.30 scw
410 1.30 scw /*
411 1.30 scw * Mark the old VM space as NOT being resident in the TLB
412 1.30 scw */
413 1.30 scw mov r2, #0x00000000
414 1.30 scw cmp r5, #0
415 1.30 scw strneh r2, [r5, #(CS_TLB_ID)]
416 1.30 scw b .Lcs_context_switched
417 1.30 scw
418 1.30 scw /*
419 1.30 scw * We're switching to a different process in the same L1.
420 1.30 scw * In this situation, we only need to flush the TLB for the
421 1.30 scw * vector_page mapping, and even then only if r7 is non-NULL.
422 1.30 scw */
423 1.30 scw .Lcs_same_l1:
424 1.30 scw cmp r7, #0
425 1.30 scw movne r0, #0 /* We *know* vector_page's VA is 0x0 */
426 1.30 scw movne lr, pc
427 1.30 scw ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
428 1.30 scw
429 1.30 scw .Lcs_context_switched:
430 1.30 scw /* rem: r8 = &new_pmap->pm_cstate */
431 1.30 scw
432 1.30 scw /* XXXSCW: Safe to re-enable FIQs here */
433 1.30 scw
434 1.30 scw /*
435 1.30 scw * The new VM space is live in the cache and TLB.
436 1.30 scw * Update its cache/tlb state, and if it's not the kernel
437 1.30 scw * pmap, update the 'last cache state' pointer.
438 1.30 scw */
439 1.30 scw mov r2, #-1
440 1.30 scw ldr r5, .Lpmap_kernel_cstate
441 1.30 scw ldr r0, .Llast_cache_state_ptr
442 1.30 scw str r2, [r8, #(CS_ALL)]
443 1.30 scw cmp r5, r8
444 1.30 scw strne r8, [r0]
445 1.30 scw
446 1.46.12.1 matt /* rem: r4 = old lwp */
447 1.29 thorpej /* rem: r6 = new lwp */
448 1.29 thorpej /* rem: r9 = new PCB */
449 1.29 thorpej
450 1.1 chris /*
451 1.1 chris * This can be optimised... We know we want to go from SVC32
452 1.1 chris * mode to UND32 mode
453 1.1 chris */
454 1.13 thorpej mrs r3, cpsr
455 1.1 chris bic r2, r3, #(PSR_MODE)
456 1.1 chris orr r2, r2, #(PSR_UND32_MODE)
457 1.13 thorpej msr cpsr_c, r2
458 1.1 chris
459 1.29 thorpej ldr sp, [r9, #(PCB_UND_SP)]
460 1.1 chris
461 1.13 thorpej msr cpsr_c, r3 /* Restore the old mode */
462 1.1 chris
463 1.28 bjh21 /* Restore all the save registers */
464 1.37 scw #ifndef __XSCALE__
465 1.29 thorpej add r7, r9, #PCB_R8
466 1.28 bjh21 ldmia r7, {r8-r13}
467 1.28 bjh21
468 1.29 thorpej sub r7, r7, #PCB_R8 /* restore PCB pointer */
469 1.37 scw #else
470 1.37 scw mov r7, r9
471 1.37 scw ldr r8, [r7, #(PCB_R8)]
472 1.37 scw ldr r9, [r7, #(PCB_R9)]
473 1.37 scw ldr r10, [r7, #(PCB_R10)]
474 1.37 scw ldr r11, [r7, #(PCB_R11)]
475 1.37 scw ldr r12, [r7, #(PCB_R12)]
476 1.37 scw ldr r13, [r7, #(PCB_SP)]
477 1.37 scw #endif
478 1.29 thorpej
479 1.29 thorpej ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
480 1.29 thorpej
481 1.46.12.1 matt /* rem: r4 = old lwp */
482 1.29 thorpej /* rem: r5 = new lwp's proc */
483 1.29 thorpej /* rem: r6 = new lwp */
484 1.29 thorpej /* rem: r7 = new pcb */
485 1.18 thorpej
486 1.1 chris #ifdef ARMFPE
487 1.29 thorpej add r0, r7, #(USER_SIZE) & 0x00ff
488 1.1 chris add r0, r0, #(USER_SIZE) & 0xff00
489 1.1 chris bl _C_LABEL(arm_fpe_core_changecontext)
490 1.1 chris #endif
491 1.1 chris
492 1.1 chris /* We can enable interrupts again */
493 1.30 scw IRQenableALL
494 1.1 chris
495 1.46.12.1 matt /* rem: r4 = old lwp */
496 1.29 thorpej /* rem: r5 = new lwp's proc */
497 1.29 thorpej /* rem: r6 = new lwp */
498 1.18 thorpej /* rem: r7 = new PCB */
499 1.18 thorpej
500 1.18 thorpej /*
501 1.18 thorpej * Check for restartable atomic sequences (RAS).
502 1.18 thorpej */
503 1.18 thorpej
504 1.39 dsl ldr r2, [r5, #(P_RASLIST)]
505 1.38 scw ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
506 1.18 thorpej teq r2, #0 /* p->p_nras == 0? */
507 1.18 thorpej bne .Lswitch_do_ras /* no, check for one */
508 1.18 thorpej
509 1.14 briggs .Lswitch_return:
510 1.46.12.1 matt /* cpu_switchto returns the old lwp */
511 1.29 thorpej mov r0, r4
512 1.46.12.1 matt /* lwp_trampoline expects new lwp as it's second argument */
513 1.46.12.1 matt mov r1, r6
514 1.1 chris
515 1.1 chris /*
516 1.1 chris * Pull the registers that got pushed when either savectx() or
517 1.46.12.1 matt * cpu_switchto() was called and return.
518 1.1 chris */
519 1.28 bjh21 ldmfd sp!, {r4-r7, pc}
520 1.18 thorpej
521 1.18 thorpej .Lswitch_do_ras:
522 1.38 scw ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
523 1.29 thorpej mov r0, r5 /* first ras_lookup() arg */
524 1.18 thorpej bl _C_LABEL(ras_lookup)
525 1.18 thorpej cmn r0, #1 /* -1 means "not in a RAS" */
526 1.38 scw ldrne r1, [r7, #(PCB_TF)]
527 1.38 scw strne r0, [r1, #(TF_PC)]
528 1.18 thorpej b .Lswitch_return
529 1.1 chris
530 1.14 briggs .Lswitch_exited:
531 1.1 chris
532 1.30 scw /*
533 1.30 scw * We're about to clear both the cache and the TLB.
534 1.30 scw * Make sure to zap the 'last cache state' pointer since the
535 1.30 scw * pmap might be about to go away. Also ensure the outgoing
536 1.30 scw * VM space's cache state is marked as NOT resident in the
537 1.46.12.1 matt * cache.
538 1.9 thorpej */
539 1.30 scw
540 1.46.12.1 matt /* rem: r4 = old lwp (NULL) */
541 1.46.12.1 matt /* rem: r6 = new lwp */
542 1.46.12.1 matt /* rem: interrupts are enabled */
543 1.1 chris
544 1.1 chris /*
545 1.46.12.1 matt * Load up registers the way .Lcs_purge_cache expects.
546 1.1 chris */
547 1.41 scw
548 1.46.12.1 matt ldr r3, .Lblock_userspace_access
549 1.46.12.1 matt ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
550 1.46.12.1 matt mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
551 1.46.12.1 matt mov r5, #0 /* No previous cache state */
552 1.46.12.1 matt ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
553 1.46.12.1 matt ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */
554 1.46.12.1 matt ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
555 1.46.12.1 matt b .Lcs_cache_purge
556 1.1 chris
557 1.7 chris /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
558 1.1 chris ENTRY(savectx)
559 1.1 chris /*
560 1.1 chris * r0 = pcb
561 1.1 chris */
562 1.1 chris
563 1.1 chris /* Push registers.*/
564 1.28 bjh21 stmfd sp!, {r4-r7, lr}
565 1.1 chris
566 1.1 chris /* Store all the registers in the process's pcb */
567 1.37 scw #ifndef __XSCALE__
568 1.28 bjh21 add r2, r0, #(PCB_R8)
569 1.28 bjh21 stmia r2, {r8-r13}
570 1.37 scw #else
571 1.37 scw strd r8, [r0, #(PCB_R8)]
572 1.37 scw strd r10, [r0, #(PCB_R10)]
573 1.37 scw strd r12, [r0, #(PCB_R12)]
574 1.37 scw #endif
575 1.1 chris
576 1.1 chris /* Pull the regs of the stack */
577 1.28 bjh21 ldmfd sp!, {r4-r7, pc}
578 1.1 chris
579 1.46.12.1 matt ENTRY(lwp_trampoline)
580 1.46.12.1 matt bl _C_LABEL(lwp_startup)
581 1.38 scw
582 1.1 chris mov r0, r5
583 1.1 chris mov r1, sp
584 1.24 bjh21 mov lr, pc
585 1.1 chris mov pc, r4
586 1.1 chris
587 1.1 chris /* Kill irq's */
588 1.13 thorpej mrs r0, cpsr
589 1.1 chris orr r0, r0, #(I32_bit)
590 1.13 thorpej msr cpsr_c, r0
591 1.1 chris
592 1.1 chris PULLFRAME
593 1.1 chris
594 1.1 chris movs pc, lr /* Exit */
595