cpuswitch.S revision 1.46 1 1.46 briggs /* $NetBSD: cpuswitch.S,v 1.46 2007/02/19 01:59:23 briggs Exp $ */
2 1.1 chris
3 1.1 chris /*
4 1.30 scw * Copyright 2003 Wasabi Systems, Inc.
5 1.30 scw * All rights reserved.
6 1.30 scw *
7 1.30 scw * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 1.30 scw *
9 1.30 scw * Redistribution and use in source and binary forms, with or without
10 1.30 scw * modification, are permitted provided that the following conditions
11 1.30 scw * are met:
12 1.30 scw * 1. Redistributions of source code must retain the above copyright
13 1.30 scw * notice, this list of conditions and the following disclaimer.
14 1.30 scw * 2. Redistributions in binary form must reproduce the above copyright
15 1.30 scw * notice, this list of conditions and the following disclaimer in the
16 1.30 scw * documentation and/or other materials provided with the distribution.
17 1.30 scw * 3. All advertising materials mentioning features or use of this software
18 1.30 scw * must display the following acknowledgement:
19 1.30 scw * This product includes software developed for the NetBSD Project by
20 1.30 scw * Wasabi Systems, Inc.
21 1.30 scw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.30 scw * or promote products derived from this software without specific prior
23 1.30 scw * written permission.
24 1.30 scw *
25 1.30 scw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.30 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.30 scw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.30 scw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.30 scw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.30 scw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.30 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.30 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.30 scw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.30 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.30 scw * POSSIBILITY OF SUCH DAMAGE.
36 1.30 scw */
37 1.30 scw /*
38 1.1 chris * Copyright (c) 1994-1998 Mark Brinicombe.
39 1.1 chris * Copyright (c) 1994 Brini.
40 1.1 chris * All rights reserved.
41 1.1 chris *
42 1.1 chris * This code is derived from software written for Brini by Mark Brinicombe
43 1.1 chris *
44 1.1 chris * Redistribution and use in source and binary forms, with or without
45 1.1 chris * modification, are permitted provided that the following conditions
46 1.1 chris * are met:
47 1.1 chris * 1. Redistributions of source code must retain the above copyright
48 1.1 chris * notice, this list of conditions and the following disclaimer.
49 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 chris * notice, this list of conditions and the following disclaimer in the
51 1.1 chris * documentation and/or other materials provided with the distribution.
52 1.1 chris * 3. All advertising materials mentioning features or use of this software
53 1.1 chris * must display the following acknowledgement:
54 1.1 chris * This product includes software developed by Brini.
55 1.1 chris * 4. The name of the company nor the name of the author may be used to
56 1.1 chris * endorse or promote products derived from this software without specific
57 1.1 chris * prior written permission.
58 1.1 chris *
59 1.1 chris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 1.1 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 1.1 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 1.1 chris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 1.1 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 1.1 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 1.1 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 chris * SUCH DAMAGE.
70 1.1 chris *
71 1.1 chris * RiscBSD kernel project
72 1.1 chris *
73 1.1 chris * cpuswitch.S
74 1.1 chris *
75 1.1 chris * cpu switching functions
76 1.1 chris *
77 1.1 chris * Created : 15/10/94
78 1.1 chris */
79 1.1 chris
80 1.1 chris #include "opt_armfpe.h"
81 1.30 scw #include "opt_arm32_pmap.h"
82 1.19 bjh21 #include "opt_multiprocessor.h"
83 1.36 martin #include "opt_lockdebug.h"
84 1.1 chris
85 1.1 chris #include "assym.h"
86 1.46 briggs #include <arm/arm32/pte.h>
87 1.1 chris #include <machine/param.h>
88 1.1 chris #include <machine/cpu.h>
89 1.1 chris #include <machine/frame.h>
90 1.1 chris #include <machine/asm.h>
91 1.1 chris
92 1.34 kristerw /* LINTSTUB: include <sys/param.h> */
93 1.34 kristerw
94 1.1 chris #undef IRQdisable
95 1.1 chris #undef IRQenable
96 1.1 chris
97 1.1 chris /*
98 1.1 chris * New experimental definitions of IRQdisable and IRQenable
99 1.1 chris * These keep FIQ's enabled since FIQ's are special.
100 1.1 chris */
101 1.1 chris
102 1.1 chris #define IRQdisable \
103 1.13 thorpej mrs r14, cpsr ; \
104 1.1 chris orr r14, r14, #(I32_bit) ; \
105 1.13 thorpej msr cpsr_c, r14 ; \
106 1.1 chris
107 1.1 chris #define IRQenable \
108 1.13 thorpej mrs r14, cpsr ; \
109 1.1 chris bic r14, r14, #(I32_bit) ; \
110 1.13 thorpej msr cpsr_c, r14 ; \
111 1.1 chris
112 1.30 scw /*
113 1.30 scw * These are used for switching the translation table/DACR.
114 1.30 scw * Since the vector page can be invalid for a short time, we must
115 1.30 scw * disable both regular IRQs *and* FIQs.
116 1.30 scw *
117 1.30 scw * XXX: This is not necessary if the vector table is relocated.
118 1.30 scw */
119 1.30 scw #define IRQdisableALL \
120 1.30 scw mrs r14, cpsr ; \
121 1.30 scw orr r14, r14, #(I32_bit | F32_bit) ; \
122 1.30 scw msr cpsr_c, r14
123 1.30 scw
124 1.30 scw #define IRQenableALL \
125 1.30 scw mrs r14, cpsr ; \
126 1.30 scw bic r14, r14, #(I32_bit | F32_bit) ; \
127 1.30 scw msr cpsr_c, r14
128 1.30 scw
129 1.1 chris .text
130 1.1 chris
131 1.17 thorpej .Lwhichqs:
132 1.1 chris .word _C_LABEL(sched_whichqs)
133 1.1 chris
134 1.17 thorpej .Lqs:
135 1.1 chris .word _C_LABEL(sched_qs)
136 1.1 chris
137 1.1 chris /*
138 1.1 chris * cpuswitch()
139 1.1 chris *
140 1.1 chris * preforms a process context switch.
141 1.1 chris * This function has several entry points
142 1.1 chris */
143 1.1 chris
144 1.19 bjh21 #ifdef MULTIPROCESSOR
145 1.19 bjh21 .Lcpu_info_store:
146 1.19 bjh21 .word _C_LABEL(cpu_info_store)
147 1.29 thorpej .Lcurlwp:
148 1.19 bjh21 /* FIXME: This is bogus in the general case. */
149 1.29 thorpej .word _C_LABEL(cpu_info_store) + CI_CURLWP
150 1.22 bjh21
151 1.22 bjh21 .Lcurpcb:
152 1.22 bjh21 .word _C_LABEL(cpu_info_store) + CI_CURPCB
153 1.19 bjh21 #else
154 1.29 thorpej .Lcurlwp:
155 1.29 thorpej .word _C_LABEL(curlwp)
156 1.1 chris
157 1.17 thorpej .Lcurpcb:
158 1.1 chris .word _C_LABEL(curpcb)
159 1.22 bjh21 #endif
160 1.1 chris
161 1.17 thorpej .Lwant_resched:
162 1.1 chris .word _C_LABEL(want_resched)
163 1.1 chris
164 1.17 thorpej .Lcpufuncs:
165 1.1 chris .word _C_LABEL(cpufuncs)
166 1.1 chris
167 1.22 bjh21 #ifndef MULTIPROCESSOR
168 1.1 chris .data
169 1.1 chris .global _C_LABEL(curpcb)
170 1.1 chris _C_LABEL(curpcb):
171 1.1 chris .word 0x00000000
172 1.1 chris .text
173 1.22 bjh21 #endif
174 1.1 chris
175 1.17 thorpej .Lblock_userspace_access:
176 1.1 chris .word _C_LABEL(block_userspace_access)
177 1.1 chris
178 1.15 thorpej .Lcpu_do_powersave:
179 1.15 thorpej .word _C_LABEL(cpu_do_powersave)
180 1.15 thorpej
181 1.30 scw .Lpmap_kernel_cstate:
182 1.30 scw .word (kernel_pmap_store + PMAP_CSTATE)
183 1.30 scw
184 1.30 scw .Llast_cache_state_ptr:
185 1.30 scw .word _C_LABEL(pmap_cache_state)
186 1.30 scw
187 1.1 chris /*
188 1.1 chris * Idle loop, exercised while waiting for a process to wake up.
189 1.16 thorpej *
190 1.16 thorpej * NOTE: When we jump back to .Lswitch_search, we must have a
191 1.16 thorpej * pointer to whichqs in r7, which is what it is when we arrive
192 1.16 thorpej * here.
193 1.1 chris */
194 1.7 chris /* LINTSTUB: Ignore */
195 1.4 chris ASENTRY_NP(idle)
196 1.41 scw ldr r6, .Lcpu_do_powersave
197 1.41 scw IRQenable /* Enable interrupts */
198 1.41 scw ldr r6, [r6] /* r6 = cpu_do_powersave */
199 1.41 scw
200 1.7 chris bl _C_LABEL(sched_unlock_idle)
201 1.16 thorpej
202 1.41 scw /* Drop to spl0 (returns the current spl level in r0). */
203 1.38 scw #ifdef __NEWINTR
204 1.38 scw mov r0, #(IPL_NONE)
205 1.38 scw bl _C_LABEL(_spllower)
206 1.38 scw #else /* ! __NEWINTR */
207 1.38 scw mov r0, #(_SPL_0)
208 1.38 scw bl _C_LABEL(splx)
209 1.38 scw #endif /* __NEWINTR */
210 1.38 scw
211 1.41 scw teq r6, #0 /* cpu_do_powersave non zero? */
212 1.41 scw ldrne r6, .Lcpufuncs
213 1.41 scw mov r4, r0 /* Old interrupt level to r4 */
214 1.41 scw ldrne r6, [r6, #(CF_SLEEP)]
215 1.41 scw
216 1.41 scw /*
217 1.41 scw * Main idle loop.
218 1.41 scw * r6 points to power-save idle function if required, else NULL.
219 1.41 scw */
220 1.41 scw 1: ldr r3, [r7] /* r3 = sched_whichqs */
221 1.41 scw teq r3, #0
222 1.41 scw bne 2f /* We have work to do */
223 1.41 scw teq r6, #0 /* Powersave idle? */
224 1.41 scw beq 1b /* Nope. Just sit-n-spin. */
225 1.38 scw
226 1.41 scw /*
227 1.41 scw * Before going into powersave idle mode, disable interrupts
228 1.41 scw * and check sched_whichqs one more time.
229 1.41 scw */
230 1.41 scw IRQdisableALL
231 1.41 scw ldr r3, [r7]
232 1.41 scw mov r0, #0
233 1.41 scw teq r3, #0 /* sched_whichqs still zero? */
234 1.41 scw moveq lr, pc
235 1.41 scw moveq pc, r6 /* If so, do powersave idle */
236 1.41 scw IRQenableALL
237 1.41 scw b 1b /* Back around */
238 1.16 thorpej
239 1.41 scw /*
240 1.41 scw * sched_whichqs indicates that at least one lwp is ready to run.
241 1.41 scw * Restore the original interrupt priority level, grab the
242 1.41 scw * scheduler lock if necessary, and jump back into cpu_switch.
243 1.41 scw */
244 1.41 scw 2: mov r0, r4
245 1.41 scw bl _C_LABEL(splx)
246 1.41 scw adr lr, .Lswitch_search
247 1.41 scw b _C_LABEL(sched_lock_idle)
248 1.15 thorpej
249 1.1 chris
250 1.1 chris /*
251 1.29 thorpej * Find a new lwp to run, save the current context and
252 1.1 chris * load the new context
253 1.29 thorpej *
254 1.29 thorpej * Arguments:
255 1.29 thorpej * r0 'struct lwp *' of the current LWP
256 1.1 chris */
257 1.1 chris
258 1.1 chris ENTRY(cpu_switch)
259 1.1 chris /*
260 1.1 chris * Local register usage. Some of these registers are out of date.
261 1.29 thorpej * r1 = oldlwp
262 1.29 thorpej * r2 = spl level
263 1.1 chris * r3 = whichqs
264 1.1 chris * r4 = queue
265 1.1 chris * r5 = &qs[queue]
266 1.29 thorpej * r6 = newlwp
267 1.28 bjh21 * r7 = scratch
268 1.1 chris */
269 1.28 bjh21 stmfd sp!, {r4-r7, lr}
270 1.1 chris
271 1.1 chris /*
272 1.29 thorpej * Indicate that there is no longer a valid process (curlwp = 0).
273 1.29 thorpej * Zero the current PCB pointer while we're at it.
274 1.1 chris */
275 1.29 thorpej ldr r7, .Lcurlwp
276 1.28 bjh21 ldr r6, .Lcurpcb
277 1.29 thorpej mov r2, #0x00000000
278 1.44 skrll str r2, [r7] /* curlwp = NULL */
279 1.29 thorpej str r2, [r6] /* curpcb = NULL */
280 1.28 bjh21
281 1.44 skrll /* stash the old lwp while we call functions */
282 1.29 thorpej mov r5, r0
283 1.1 chris
284 1.29 thorpej /* First phase : find a new lwp */
285 1.17 thorpej ldr r7, .Lwhichqs
286 1.16 thorpej
287 1.29 thorpej /* rem: r5 = old lwp */
288 1.16 thorpej /* rem: r7 = &whichqs */
289 1.7 chris
290 1.14 briggs .Lswitch_search:
291 1.1 chris IRQdisable
292 1.7 chris
293 1.1 chris /* Do we have any active queues */
294 1.1 chris ldr r3, [r7]
295 1.1 chris
296 1.1 chris /* If not we must idle until we do. */
297 1.1 chris teq r3, #0x00000000
298 1.4 chris beq _ASM_LABEL(idle)
299 1.7 chris
300 1.44 skrll /* put old lwp back in r1 */
301 1.28 bjh21 mov r1, r5
302 1.28 bjh21
303 1.29 thorpej /* rem: r1 = old lwp */
304 1.1 chris /* rem: r3 = whichqs */
305 1.1 chris /* rem: interrupts are disabled */
306 1.1 chris
307 1.37 scw /* used further down, saves SA stall */
308 1.37 scw ldr r6, .Lqs
309 1.37 scw
310 1.1 chris /*
311 1.1 chris * We have found an active queue. Currently we do not know which queue
312 1.1 chris * is active just that one of them is.
313 1.1 chris */
314 1.37 scw /* Non-Xscale version of the ffs algorithm devised by d.seal and
315 1.37 scw * posted to comp.sys.arm on 16 Feb 1994.
316 1.1 chris */
317 1.1 chris rsb r5, r3, #0
318 1.1 chris ands r0, r3, r5
319 1.37 scw
320 1.37 scw #ifndef __XSCALE__
321 1.17 thorpej adr r5, .Lcpu_switch_ffs_table
322 1.37 scw
323 1.3 chris /* X = R0 */
324 1.3 chris orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
325 1.3 chris orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
326 1.3 chris rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
327 1.1 chris
328 1.3 chris /* now lookup in table indexed on top 6 bits of a4 */
329 1.1 chris ldrb r4, [ r5, r4, lsr #26 ]
330 1.1 chris
331 1.37 scw #else /* __XSCALE__ */
332 1.37 scw clz r4, r0
333 1.37 scw rsb r4, r4, #31
334 1.37 scw #endif /* __XSCALE__ */
335 1.37 scw
336 1.1 chris /* rem: r0 = bit mask of chosen queue (1 << r4) */
337 1.29 thorpej /* rem: r1 = old lwp */
338 1.1 chris /* rem: r3 = whichqs */
339 1.1 chris /* rem: r4 = queue number */
340 1.1 chris /* rem: interrupts are disabled */
341 1.1 chris
342 1.1 chris /* Get the address of the queue (&qs[queue]) */
343 1.1 chris add r5, r6, r4, lsl #3
344 1.1 chris
345 1.1 chris /*
346 1.29 thorpej * Get the lwp from the queue and place the next process in
347 1.29 thorpej * the queue at the head. This basically unlinks the lwp at
348 1.1 chris * the head of the queue.
349 1.1 chris */
350 1.29 thorpej ldr r6, [r5, #(L_FORW)]
351 1.1 chris
352 1.41 scw #ifdef DIAGNOSTIC
353 1.41 scw cmp r6, r5
354 1.41 scw beq .Lswitch_bogons
355 1.41 scw #endif
356 1.41 scw
357 1.29 thorpej /* rem: r6 = new lwp */
358 1.29 thorpej ldr r7, [r6, #(L_FORW)]
359 1.29 thorpej str r7, [r5, #(L_FORW)]
360 1.1 chris
361 1.1 chris /*
362 1.1 chris * Test to see if the queue is now empty. If the head of the queue
363 1.29 thorpej * points to the queue itself then there are no more lwps in
364 1.1 chris * the queue. We can therefore clear the queue not empty flag held
365 1.1 chris * in r3.
366 1.1 chris */
367 1.1 chris
368 1.1 chris teq r5, r7
369 1.1 chris biceq r3, r3, r0
370 1.1 chris
371 1.28 bjh21 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
372 1.28 bjh21
373 1.29 thorpej /* Fix the back pointer for the lwp now at the head of the queue. */
374 1.29 thorpej ldr r0, [r6, #(L_BACK)]
375 1.29 thorpej str r0, [r7, #(L_BACK)]
376 1.1 chris
377 1.1 chris /* Update the RAM copy of the queue not empty flags word. */
378 1.38 scw ldreq r7, .Lwhichqs
379 1.38 scw streq r3, [r7]
380 1.1 chris
381 1.29 thorpej /* rem: r1 = old lwp */
382 1.1 chris /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
383 1.1 chris /* rem: r4 = queue number - NOT NEEDED ANY MORE */
384 1.29 thorpej /* rem: r6 = new lwp */
385 1.1 chris /* rem: interrupts are disabled */
386 1.1 chris
387 1.1 chris /* Clear the want_resched flag */
388 1.28 bjh21 ldr r7, .Lwant_resched
389 1.1 chris mov r0, #0x00000000
390 1.28 bjh21 str r0, [r7]
391 1.1 chris
392 1.1 chris /*
393 1.29 thorpej * Clear the back pointer of the lwp we have removed from
394 1.29 thorpej * the head of the queue. The new lwp is isolated now.
395 1.1 chris */
396 1.29 thorpej str r0, [r6, #(L_BACK)]
397 1.1 chris
398 1.7 chris /*
399 1.7 chris * unlock the sched_lock, but leave interrupts off, for now.
400 1.7 chris */
401 1.28 bjh21 mov r7, r1
402 1.7 chris bl _C_LABEL(sched_unlock_idle)
403 1.28 bjh21 mov r1, r7
404 1.7 chris
405 1.38 scw
406 1.29 thorpej .Lswitch_resume:
407 1.38 scw /* rem: r1 = old lwp */
408 1.38 scw /* rem: r4 = return value [not used if came from cpu_switchto()] */
409 1.43 skrll /* rem: r6 = new lwp */
410 1.38 scw /* rem: interrupts are disabled */
411 1.38 scw
412 1.19 bjh21 #ifdef MULTIPROCESSOR
413 1.19 bjh21 /* XXX use curcpu() */
414 1.19 bjh21 ldr r0, .Lcpu_info_store
415 1.29 thorpej str r0, [r6, #(L_CPU)]
416 1.19 bjh21 #else
417 1.29 thorpej /* l->l_cpu initialized in fork1() for single-processor */
418 1.19 bjh21 #endif
419 1.1 chris
420 1.1 chris /* Process is now on a processor. */
421 1.29 thorpej mov r0, #LSONPROC /* l->l_stat = LSONPROC */
422 1.29 thorpej str r0, [r6, #(L_STAT)]
423 1.1 chris
424 1.29 thorpej /* We have a new curlwp now so make a note it */
425 1.29 thorpej ldr r7, .Lcurlwp
426 1.1 chris str r6, [r7]
427 1.1 chris
428 1.1 chris /* Hook in a new pcb */
429 1.17 thorpej ldr r7, .Lcurpcb
430 1.29 thorpej ldr r0, [r6, #(L_ADDR)]
431 1.1 chris str r0, [r7]
432 1.1 chris
433 1.1 chris /* At this point we can allow IRQ's again. */
434 1.1 chris IRQenable
435 1.1 chris
436 1.29 thorpej /* rem: r1 = old lwp */
437 1.29 thorpej /* rem: r4 = return value */
438 1.43 skrll /* rem: r6 = new lwp */
439 1.4 chris /* rem: interrupts are enabled */
440 1.1 chris
441 1.1 chris /*
442 1.43 skrll * If the new lwp is the same as the lwp that called
443 1.1 chris * cpu_switch() then we do not need to save and restore any
444 1.1 chris * contexts. This means we can make a quick exit.
445 1.29 thorpej * The test is simple if curlwp on entry (now in r1) is the
446 1.43 skrll * same as the lwp removed from the queue we can jump to the exit.
447 1.1 chris */
448 1.28 bjh21 teq r1, r6
449 1.29 thorpej moveq r4, #0x00000000 /* default to "didn't switch" */
450 1.14 briggs beq .Lswitch_return
451 1.1 chris
452 1.29 thorpej /*
453 1.29 thorpej * At this point, we are guaranteed to be switching to
454 1.29 thorpej * a new lwp.
455 1.29 thorpej */
456 1.29 thorpej mov r4, #0x00000001
457 1.29 thorpej
458 1.29 thorpej /* Remember the old lwp in r0 */
459 1.28 bjh21 mov r0, r1
460 1.28 bjh21
461 1.1 chris /*
462 1.29 thorpej * If the old lwp on entry to cpu_switch was zero then the
463 1.1 chris * process that called it was exiting. This means that we do
464 1.1 chris * not need to save the current context. Instead we can jump
465 1.1 chris * straight to restoring the context for the new process.
466 1.1 chris */
467 1.28 bjh21 teq r0, #0x00000000
468 1.14 briggs beq .Lswitch_exited
469 1.1 chris
470 1.29 thorpej /* rem: r0 = old lwp */
471 1.29 thorpej /* rem: r4 = return value */
472 1.43 skrll /* rem: r6 = new lwp */
473 1.4 chris /* rem: interrupts are enabled */
474 1.1 chris
475 1.1 chris /* Stage two : Save old context */
476 1.1 chris
477 1.29 thorpej /* Get the user structure for the old lwp. */
478 1.29 thorpej ldr r1, [r0, #(L_ADDR)]
479 1.1 chris
480 1.29 thorpej /* Save all the registers in the old lwp's pcb */
481 1.37 scw #ifndef __XSCALE__
482 1.28 bjh21 add r7, r1, #(PCB_R8)
483 1.28 bjh21 stmia r7, {r8-r13}
484 1.37 scw #else
485 1.37 scw strd r8, [r1, #(PCB_R8)]
486 1.37 scw strd r10, [r1, #(PCB_R10)]
487 1.37 scw strd r12, [r1, #(PCB_R12)]
488 1.37 scw #endif
489 1.1 chris
490 1.1 chris /*
491 1.29 thorpej * NOTE: We can now use r8-r13 until it is time to restore
492 1.29 thorpej * them for the new process.
493 1.29 thorpej */
494 1.29 thorpej
495 1.29 thorpej /* Remember the old PCB. */
496 1.29 thorpej mov r8, r1
497 1.29 thorpej
498 1.29 thorpej /* r1 now free! */
499 1.29 thorpej
500 1.29 thorpej /* Get the user structure for the new process in r9 */
501 1.29 thorpej ldr r9, [r6, #(L_ADDR)]
502 1.29 thorpej
503 1.29 thorpej /*
504 1.1 chris * This can be optimised... We know we want to go from SVC32
505 1.1 chris * mode to UND32 mode
506 1.1 chris */
507 1.13 thorpej mrs r3, cpsr
508 1.1 chris bic r2, r3, #(PSR_MODE)
509 1.1 chris orr r2, r2, #(PSR_UND32_MODE | I32_bit)
510 1.13 thorpej msr cpsr_c, r2
511 1.1 chris
512 1.29 thorpej str sp, [r8, #(PCB_UND_SP)]
513 1.1 chris
514 1.13 thorpej msr cpsr_c, r3 /* Restore the old mode */
515 1.1 chris
516 1.29 thorpej /* rem: r0 = old lwp */
517 1.29 thorpej /* rem: r4 = return value */
518 1.43 skrll /* rem: r6 = new lwp */
519 1.29 thorpej /* rem: r8 = old PCB */
520 1.29 thorpej /* rem: r9 = new PCB */
521 1.4 chris /* rem: interrupts are enabled */
522 1.1 chris
523 1.1 chris /* What else needs to be saved Only FPA stuff when that is supported */
524 1.1 chris
525 1.1 chris /* Third phase : restore saved context */
526 1.1 chris
527 1.29 thorpej /* rem: r0 = old lwp */
528 1.29 thorpej /* rem: r4 = return value */
529 1.29 thorpej /* rem: r6 = new lwp */
530 1.29 thorpej /* rem: r8 = old PCB */
531 1.29 thorpej /* rem: r9 = new PCB */
532 1.9 thorpej /* rem: interrupts are enabled */
533 1.9 thorpej
534 1.9 thorpej /*
535 1.29 thorpej * Get the new L1 table pointer into r11. If we're switching to
536 1.29 thorpej * an LWP with the same address space as the outgoing one, we can
537 1.29 thorpej * skip the cache purge and the TTB load.
538 1.29 thorpej *
539 1.29 thorpej * To avoid data dep stalls that would happen anyway, we try
540 1.29 thorpej * and get some useful work done in the mean time.
541 1.29 thorpej */
542 1.29 thorpej ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
543 1.29 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
544 1.29 thorpej
545 1.30 scw ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */
546 1.30 scw ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
547 1.30 scw ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */
548 1.30 scw ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */
549 1.30 scw
550 1.30 scw teq r10, r11 /* Same L1? */
551 1.30 scw ldr r5, [r5]
552 1.30 scw cmpeq r0, r1 /* Same DACR? */
553 1.30 scw beq .Lcs_context_switched /* yes! */
554 1.30 scw
555 1.30 scw ldr r3, .Lblock_userspace_access
556 1.30 scw mov r12, #0
557 1.30 scw cmp r5, #0 /* No last vm? (switch_exit) */
558 1.30 scw beq .Lcs_cache_purge_skipped /* No, we can skip cache flsh */
559 1.30 scw
560 1.30 scw mov r2, #DOMAIN_CLIENT
561 1.30 scw cmp r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
562 1.30 scw beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */
563 1.30 scw
564 1.30 scw cmp r5, r8 /* Same userland VM space? */
565 1.30 scw ldrneb r12, [r5, #(CS_CACHE_ID)] /* Last VM space cache state */
566 1.30 scw
567 1.30 scw /*
568 1.30 scw * We're definately switching to a new userland VM space,
569 1.30 scw * and the previous userland VM space has yet to be flushed
570 1.30 scw * from the cache/tlb.
571 1.30 scw *
572 1.30 scw * r12 holds the previous VM space's cs_cache_id state
573 1.30 scw */
574 1.30 scw tst r12, #0xff /* Test cs_cache_id */
575 1.30 scw beq .Lcs_cache_purge_skipped /* VM space is not in cache */
576 1.30 scw
577 1.30 scw /*
578 1.30 scw * Definately need to flush the cache.
579 1.30 scw * Mark the old VM space as NOT being resident in the cache.
580 1.30 scw */
581 1.30 scw mov r2, #0x00000000
582 1.32 chris strb r2, [r5, #(CS_CACHE_ID)]
583 1.32 chris strb r2, [r5, #(CS_CACHE_D)]
584 1.30 scw
585 1.30 scw /*
586 1.30 scw * Don't allow user space access between the purge and the switch.
587 1.30 scw */
588 1.30 scw mov r2, #0x00000001
589 1.30 scw str r2, [r3]
590 1.30 scw
591 1.30 scw stmfd sp!, {r0-r3}
592 1.30 scw ldr r1, .Lcpufuncs
593 1.30 scw mov lr, pc
594 1.30 scw ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
595 1.30 scw ldmfd sp!, {r0-r3}
596 1.30 scw
597 1.30 scw .Lcs_cache_purge_skipped:
598 1.30 scw /* rem: r1 = new DACR */
599 1.30 scw /* rem: r3 = &block_userspace_access */
600 1.30 scw /* rem: r4 = return value */
601 1.30 scw /* rem: r5 = &old_pmap->pm_cstate (or NULL) */
602 1.30 scw /* rem: r6 = new lwp */
603 1.30 scw /* rem: r8 = &new_pmap->pm_cstate */
604 1.30 scw /* rem: r9 = new PCB */
605 1.30 scw /* rem: r10 = old L1 */
606 1.30 scw /* rem: r11 = new L1 */
607 1.30 scw
608 1.30 scw mov r2, #0x00000000
609 1.30 scw ldr r7, [r9, #(PCB_PL1VEC)]
610 1.30 scw
611 1.30 scw /*
612 1.30 scw * At this point we need to kill IRQ's again.
613 1.30 scw *
614 1.30 scw * XXXSCW: Don't need to block FIQs if vectors have been relocated
615 1.30 scw */
616 1.30 scw IRQdisableALL
617 1.30 scw
618 1.30 scw /*
619 1.30 scw * Interrupts are disabled so we can allow user space accesses again
620 1.30 scw * as none will occur until interrupts are re-enabled after the
621 1.30 scw * switch.
622 1.30 scw */
623 1.30 scw str r2, [r3]
624 1.30 scw
625 1.30 scw /*
626 1.30 scw * Ensure the vector table is accessible by fixing up the L1
627 1.30 scw */
628 1.30 scw cmp r7, #0 /* No need to fixup vector table? */
629 1.30 scw ldrne r2, [r7] /* But if yes, fetch current value */
630 1.30 scw ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
631 1.30 scw mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */
632 1.30 scw cmpne r2, r0 /* Stuffing the same value? */
633 1.31 thorpej #ifndef PMAP_INCLUDE_PTE_SYNC
634 1.30 scw strne r0, [r7] /* Nope, update it */
635 1.30 scw #else
636 1.30 scw beq .Lcs_same_vector
637 1.30 scw str r0, [r7] /* Otherwise, update it */
638 1.30 scw
639 1.30 scw /*
640 1.30 scw * Need to sync the cache to make sure that last store is
641 1.30 scw * visible to the MMU.
642 1.30 scw */
643 1.30 scw ldr r2, .Lcpufuncs
644 1.30 scw mov r0, r7
645 1.30 scw mov r1, #4
646 1.30 scw mov lr, pc
647 1.30 scw ldr pc, [r2, #CF_DCACHE_WB_RANGE]
648 1.30 scw
649 1.30 scw .Lcs_same_vector:
650 1.33 thorpej #endif /* PMAP_INCLUDE_PTE_SYNC */
651 1.30 scw
652 1.30 scw cmp r10, r11 /* Switching to the same L1? */
653 1.30 scw ldr r10, .Lcpufuncs
654 1.30 scw beq .Lcs_same_l1 /* Yup. */
655 1.30 scw
656 1.30 scw /*
657 1.30 scw * Do a full context switch, including full TLB flush.
658 1.30 scw */
659 1.30 scw mov r0, r11
660 1.30 scw mov lr, pc
661 1.30 scw ldr pc, [r10, #CF_CONTEXT_SWITCH]
662 1.30 scw
663 1.30 scw /*
664 1.30 scw * Mark the old VM space as NOT being resident in the TLB
665 1.30 scw */
666 1.30 scw mov r2, #0x00000000
667 1.30 scw cmp r5, #0
668 1.30 scw strneh r2, [r5, #(CS_TLB_ID)]
669 1.30 scw b .Lcs_context_switched
670 1.30 scw
671 1.30 scw /*
672 1.30 scw * We're switching to a different process in the same L1.
673 1.30 scw * In this situation, we only need to flush the TLB for the
674 1.30 scw * vector_page mapping, and even then only if r7 is non-NULL.
675 1.30 scw */
676 1.30 scw .Lcs_same_l1:
677 1.30 scw cmp r7, #0
678 1.30 scw movne r0, #0 /* We *know* vector_page's VA is 0x0 */
679 1.30 scw movne lr, pc
680 1.30 scw ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
681 1.30 scw
682 1.30 scw .Lcs_context_switched:
683 1.30 scw /* rem: r8 = &new_pmap->pm_cstate */
684 1.30 scw
685 1.30 scw /* XXXSCW: Safe to re-enable FIQs here */
686 1.30 scw
687 1.30 scw /*
688 1.30 scw * The new VM space is live in the cache and TLB.
689 1.30 scw * Update its cache/tlb state, and if it's not the kernel
690 1.30 scw * pmap, update the 'last cache state' pointer.
691 1.30 scw */
692 1.30 scw mov r2, #-1
693 1.30 scw ldr r5, .Lpmap_kernel_cstate
694 1.30 scw ldr r0, .Llast_cache_state_ptr
695 1.30 scw str r2, [r8, #(CS_ALL)]
696 1.30 scw cmp r5, r8
697 1.30 scw strne r8, [r0]
698 1.30 scw
699 1.29 thorpej /* rem: r4 = return value */
700 1.29 thorpej /* rem: r6 = new lwp */
701 1.29 thorpej /* rem: r9 = new PCB */
702 1.29 thorpej
703 1.1 chris /*
704 1.1 chris * This can be optimised... We know we want to go from SVC32
705 1.1 chris * mode to UND32 mode
706 1.1 chris */
707 1.13 thorpej mrs r3, cpsr
708 1.1 chris bic r2, r3, #(PSR_MODE)
709 1.1 chris orr r2, r2, #(PSR_UND32_MODE)
710 1.13 thorpej msr cpsr_c, r2
711 1.1 chris
712 1.29 thorpej ldr sp, [r9, #(PCB_UND_SP)]
713 1.1 chris
714 1.13 thorpej msr cpsr_c, r3 /* Restore the old mode */
715 1.1 chris
716 1.28 bjh21 /* Restore all the save registers */
717 1.37 scw #ifndef __XSCALE__
718 1.29 thorpej add r7, r9, #PCB_R8
719 1.28 bjh21 ldmia r7, {r8-r13}
720 1.28 bjh21
721 1.29 thorpej sub r7, r7, #PCB_R8 /* restore PCB pointer */
722 1.37 scw #else
723 1.37 scw mov r7, r9
724 1.37 scw ldr r8, [r7, #(PCB_R8)]
725 1.37 scw ldr r9, [r7, #(PCB_R9)]
726 1.37 scw ldr r10, [r7, #(PCB_R10)]
727 1.37 scw ldr r11, [r7, #(PCB_R11)]
728 1.37 scw ldr r12, [r7, #(PCB_R12)]
729 1.37 scw ldr r13, [r7, #(PCB_SP)]
730 1.37 scw #endif
731 1.29 thorpej
732 1.29 thorpej ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
733 1.29 thorpej
734 1.29 thorpej /* rem: r4 = return value */
735 1.29 thorpej /* rem: r5 = new lwp's proc */
736 1.29 thorpej /* rem: r6 = new lwp */
737 1.29 thorpej /* rem: r7 = new pcb */
738 1.18 thorpej
739 1.1 chris #ifdef ARMFPE
740 1.29 thorpej add r0, r7, #(USER_SIZE) & 0x00ff
741 1.1 chris add r0, r0, #(USER_SIZE) & 0xff00
742 1.1 chris bl _C_LABEL(arm_fpe_core_changecontext)
743 1.1 chris #endif
744 1.1 chris
745 1.1 chris /* We can enable interrupts again */
746 1.30 scw IRQenableALL
747 1.1 chris
748 1.29 thorpej /* rem: r4 = return value */
749 1.29 thorpej /* rem: r5 = new lwp's proc */
750 1.29 thorpej /* rem: r6 = new lwp */
751 1.18 thorpej /* rem: r7 = new PCB */
752 1.18 thorpej
753 1.18 thorpej /*
754 1.18 thorpej * Check for restartable atomic sequences (RAS).
755 1.18 thorpej */
756 1.18 thorpej
757 1.39 dsl ldr r2, [r5, #(P_RASLIST)]
758 1.38 scw ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
759 1.18 thorpej teq r2, #0 /* p->p_nras == 0? */
760 1.18 thorpej bne .Lswitch_do_ras /* no, check for one */
761 1.18 thorpej
762 1.14 briggs .Lswitch_return:
763 1.29 thorpej /* cpu_switch returns 1 == switched, 0 == didn't switch */
764 1.29 thorpej mov r0, r4
765 1.1 chris
766 1.1 chris /*
767 1.1 chris * Pull the registers that got pushed when either savectx() or
768 1.1 chris * cpu_switch() was called and return.
769 1.1 chris */
770 1.28 bjh21 ldmfd sp!, {r4-r7, pc}
771 1.18 thorpej
772 1.18 thorpej .Lswitch_do_ras:
773 1.38 scw ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
774 1.29 thorpej mov r0, r5 /* first ras_lookup() arg */
775 1.18 thorpej bl _C_LABEL(ras_lookup)
776 1.18 thorpej cmn r0, #1 /* -1 means "not in a RAS" */
777 1.38 scw ldrne r1, [r7, #(PCB_TF)]
778 1.38 scw strne r0, [r1, #(TF_PC)]
779 1.18 thorpej b .Lswitch_return
780 1.1 chris
781 1.14 briggs .Lswitch_exited:
782 1.9 thorpej /*
783 1.29 thorpej * We skip the cache purge because switch_exit() already did it.
784 1.29 thorpej * Load up registers the way .Lcs_cache_purge_skipped expects.
785 1.43 skrll * Userspace access already blocked by switch_exit().
786 1.9 thorpej */
787 1.29 thorpej ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
788 1.17 thorpej ldr r3, .Lblock_userspace_access
789 1.30 scw mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
790 1.30 scw mov r5, #0 /* No previous cache state */
791 1.30 scw ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
792 1.30 scw ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */
793 1.29 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
794 1.14 briggs b .Lcs_cache_purge_skipped
795 1.9 thorpej
796 1.41 scw
797 1.41 scw #ifdef DIAGNOSTIC
798 1.41 scw .Lswitch_bogons:
799 1.41 scw adr r0, .Lswitch_panic_str
800 1.41 scw bl _C_LABEL(panic)
801 1.41 scw 1: nop
802 1.41 scw b 1b
803 1.41 scw
804 1.41 scw .Lswitch_panic_str:
805 1.41 scw .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
806 1.41 scw #endif
807 1.41 scw
808 1.7 chris /*
809 1.29 thorpej * cpu_switchto(struct lwp *current, struct lwp *next)
810 1.29 thorpej * Switch to the specified next LWP
811 1.29 thorpej * Arguments:
812 1.29 thorpej *
813 1.29 thorpej * r0 'struct lwp *' of the current LWP
814 1.29 thorpej * r1 'struct lwp *' of the LWP to switch to
815 1.29 thorpej */
816 1.29 thorpej ENTRY(cpu_switchto)
817 1.29 thorpej stmfd sp!, {r4-r7, lr}
818 1.29 thorpej
819 1.38 scw mov r6, r1 /* save new lwp */
820 1.29 thorpej
821 1.38 scw mov r5, r0 /* save old lwp */
822 1.29 thorpej bl _C_LABEL(sched_unlock_idle)
823 1.38 scw mov r1, r5
824 1.29 thorpej
825 1.29 thorpej IRQdisable
826 1.29 thorpej
827 1.29 thorpej /*
828 1.29 thorpej * Okay, set up registers the way cpu_switch() wants them,
829 1.29 thorpej * and jump into the middle of it (where we bring up the
830 1.29 thorpej * new process).
831 1.38 scw *
832 1.38 scw * r1 = old lwp (r6 = new lwp)
833 1.29 thorpej */
834 1.29 thorpej b .Lswitch_resume
835 1.29 thorpej
836 1.29 thorpej /*
837 1.29 thorpej * void switch_exit(struct lwp *l, struct lwp *l0, void (*exit)(struct lwp *));
838 1.29 thorpej * Switch to lwp0's saved context and deallocate the address space and kernel
839 1.29 thorpej * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
840 1.7 chris */
841 1.1 chris
842 1.34 kristerw /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0, void (*func)(struct lwp *)) */
843 1.1 chris ENTRY(switch_exit)
844 1.1 chris /*
845 1.29 thorpej * The process is going away, so we can use callee-saved
846 1.29 thorpej * registers here without having to save them.
847 1.1 chris */
848 1.1 chris
849 1.29 thorpej mov r4, r0
850 1.29 thorpej ldr r0, .Lcurlwp
851 1.29 thorpej
852 1.29 thorpej mov r5, r1
853 1.29 thorpej ldr r1, .Lblock_userspace_access
854 1.1 chris
855 1.29 thorpej mov r6, r2
856 1.29 thorpej
857 1.29 thorpej /*
858 1.29 thorpej * r4 = lwp
859 1.29 thorpej * r5 = lwp0
860 1.29 thorpej * r6 = exit func
861 1.29 thorpej */
862 1.29 thorpej
863 1.29 thorpej mov r2, #0x00000000 /* curlwp = NULL */
864 1.1 chris str r2, [r0]
865 1.1 chris
866 1.30 scw /*
867 1.30 scw * We're about to clear both the cache and the TLB.
868 1.30 scw * Make sure to zap the 'last cache state' pointer since the
869 1.30 scw * pmap might be about to go away. Also ensure the outgoing
870 1.30 scw * VM space's cache state is marked as NOT resident in the
871 1.30 scw * cache, and that lwp0's cache state IS resident.
872 1.30 scw */
873 1.30 scw ldr r7, [r4, #(L_ADDR)] /* r7 = old lwp's PCB */
874 1.30 scw ldr r0, .Llast_cache_state_ptr /* Last userland cache state */
875 1.30 scw ldr r9, [r7, #(PCB_CSTATE)] /* Fetch cache state pointer */
876 1.30 scw ldr r3, [r5, #(L_ADDR)] /* r3 = lwp0's PCB */
877 1.30 scw str r2, [r0] /* No previous cache state */
878 1.30 scw str r2, [r9, #(CS_ALL)] /* Zap old lwp's cache state */
879 1.30 scw ldr r3, [r3, #(PCB_CSTATE)] /* lwp0's cache state */
880 1.30 scw mov r2, #-1
881 1.30 scw str r2, [r3, #(CS_ALL)] /* lwp0 is in da cache! */
882 1.30 scw
883 1.9 thorpej /*
884 1.9 thorpej * Don't allow user space access between the purge and the switch.
885 1.9 thorpej */
886 1.9 thorpej mov r2, #0x00000001
887 1.29 thorpej str r2, [r1]
888 1.1 chris
889 1.30 scw /* Switch to lwp0 context */
890 1.30 scw
891 1.30 scw ldr r9, .Lcpufuncs
892 1.30 scw mov lr, pc
893 1.30 scw ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
894 1.30 scw
895 1.30 scw ldr r0, [r7, #(PCB_PL1VEC)]
896 1.30 scw ldr r1, [r7, #(PCB_DACR)]
897 1.30 scw
898 1.30 scw /*
899 1.30 scw * r0 = Pointer to L1 slot for vector_page (or NULL)
900 1.30 scw * r1 = lwp0's DACR
901 1.30 scw * r4 = lwp we're switching from
902 1.30 scw * r5 = lwp0
903 1.30 scw * r6 = exit func
904 1.30 scw * r7 = lwp0's PCB
905 1.30 scw * r9 = cpufuncs
906 1.30 scw */
907 1.30 scw
908 1.30 scw IRQdisableALL
909 1.30 scw
910 1.30 scw /*
911 1.30 scw * Ensure the vector table is accessible by fixing up lwp0's L1
912 1.30 scw */
913 1.30 scw cmp r0, #0 /* No need to fixup vector table? */
914 1.30 scw ldrne r3, [r0] /* But if yes, fetch current value */
915 1.30 scw ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
916 1.30 scw mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */
917 1.30 scw cmpne r3, r2 /* Stuffing the same value? */
918 1.30 scw strne r2, [r0] /* Store if not. */
919 1.30 scw
920 1.31 thorpej #ifdef PMAP_INCLUDE_PTE_SYNC
921 1.30 scw /*
922 1.30 scw * Need to sync the cache to make sure that last store is
923 1.30 scw * visible to the MMU.
924 1.30 scw */
925 1.30 scw movne r1, #4
926 1.30 scw movne lr, pc
927 1.30 scw ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
928 1.33 thorpej #endif /* PMAP_INCLUDE_PTE_SYNC */
929 1.30 scw
930 1.30 scw /*
931 1.30 scw * Note: We don't do the same optimisation as cpu_switch() with
932 1.30 scw * respect to avoiding flushing the TLB if we're switching to
933 1.30 scw * the same L1 since this process' VM space may be about to go
934 1.30 scw * away, so we don't want *any* turds left in the TLB.
935 1.30 scw */
936 1.30 scw
937 1.30 scw /* Switch the memory to the new process */
938 1.30 scw ldr r0, [r7, #(PCB_PAGEDIR)]
939 1.30 scw mov lr, pc
940 1.30 scw ldr pc, [r9, #CF_CONTEXT_SWITCH]
941 1.30 scw
942 1.30 scw ldr r0, .Lcurpcb
943 1.30 scw
944 1.30 scw /* Restore all the save registers */
945 1.37 scw #ifndef __XSCALE__
946 1.30 scw add r1, r7, #PCB_R8
947 1.30 scw ldmia r1, {r8-r13}
948 1.37 scw #else
949 1.37 scw ldr r8, [r7, #(PCB_R8)]
950 1.37 scw ldr r9, [r7, #(PCB_R9)]
951 1.37 scw ldr r10, [r7, #(PCB_R10)]
952 1.37 scw ldr r11, [r7, #(PCB_R11)]
953 1.37 scw ldr r12, [r7, #(PCB_R12)]
954 1.37 scw ldr r13, [r7, #(PCB_SP)]
955 1.37 scw #endif
956 1.30 scw str r7, [r0] /* curpcb = lwp0's PCB */
957 1.30 scw
958 1.30 scw IRQenableALL
959 1.1 chris
960 1.1 chris /*
961 1.1 chris * Schedule the vmspace and stack to be freed.
962 1.1 chris */
963 1.29 thorpej mov r0, r4 /* {lwp_}exit2(l) */
964 1.29 thorpej mov lr, pc
965 1.29 thorpej mov pc, r6
966 1.41 scw
967 1.41 scw bl _C_LABEL(sched_lock_idle)
968 1.1 chris
969 1.17 thorpej ldr r7, .Lwhichqs /* r7 = &whichqs */
970 1.29 thorpej mov r5, #0x00000000 /* r5 = old lwp = NULL */
971 1.14 briggs b .Lswitch_search
972 1.1 chris
973 1.7 chris /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
974 1.1 chris ENTRY(savectx)
975 1.1 chris /*
976 1.1 chris * r0 = pcb
977 1.1 chris */
978 1.1 chris
979 1.1 chris /* Push registers.*/
980 1.28 bjh21 stmfd sp!, {r4-r7, lr}
981 1.1 chris
982 1.1 chris /* Store all the registers in the process's pcb */
983 1.37 scw #ifndef __XSCALE__
984 1.28 bjh21 add r2, r0, #(PCB_R8)
985 1.28 bjh21 stmia r2, {r8-r13}
986 1.37 scw #else
987 1.37 scw strd r8, [r0, #(PCB_R8)]
988 1.37 scw strd r10, [r0, #(PCB_R10)]
989 1.37 scw strd r12, [r0, #(PCB_R12)]
990 1.37 scw #endif
991 1.1 chris
992 1.1 chris /* Pull the regs of the stack */
993 1.28 bjh21 ldmfd sp!, {r4-r7, pc}
994 1.1 chris
995 1.1 chris ENTRY(proc_trampoline)
996 1.38 scw #ifdef __NEWINTR
997 1.38 scw mov r0, #(IPL_NONE)
998 1.38 scw bl _C_LABEL(_spllower)
999 1.38 scw #else /* ! __NEWINTR */
1000 1.38 scw mov r0, #(_SPL_0)
1001 1.38 scw bl _C_LABEL(splx)
1002 1.38 scw #endif /* __NEWINTR */
1003 1.38 scw
1004 1.19 bjh21 #ifdef MULTIPROCESSOR
1005 1.19 bjh21 bl _C_LABEL(proc_trampoline_mp)
1006 1.19 bjh21 #endif
1007 1.1 chris mov r0, r5
1008 1.1 chris mov r1, sp
1009 1.24 bjh21 mov lr, pc
1010 1.1 chris mov pc, r4
1011 1.1 chris
1012 1.1 chris /* Kill irq's */
1013 1.13 thorpej mrs r0, cpsr
1014 1.1 chris orr r0, r0, #(I32_bit)
1015 1.13 thorpej msr cpsr_c, r0
1016 1.1 chris
1017 1.1 chris PULLFRAME
1018 1.1 chris
1019 1.1 chris movs pc, lr /* Exit */
1020 1.1 chris
1021 1.37 scw #ifndef __XSCALE__
1022 1.17 thorpej .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
1023 1.17 thorpej .Lcpu_switch_ffs_table:
1024 1.1 chris /* same as ffs table but all nums are -1 from that */
1025 1.1 chris /* 0 1 2 3 4 5 6 7 */
1026 1.1 chris .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
1027 1.1 chris .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
1028 1.1 chris .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
1029 1.1 chris .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
1030 1.1 chris .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
1031 1.1 chris .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
1032 1.1 chris .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
1033 1.1 chris .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
1034 1.37 scw #endif /* !__XSCALE_ */
1035