cpuswitch.S revision 1.3.2.26 1 /* $NetBSD: cpuswitch.S,v 1.3.2.26 2002/12/31 01:03:47 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47 #include "opt_multiprocessor.h"
48
49 #include "assym.h"
50 #include <machine/param.h>
51 #include <machine/cpu.h>
52 #include <machine/frame.h>
53 #include <machine/asm.h>
54
55 #undef IRQdisable
56 #undef IRQenable
57
58 /*
59 * New experimental definitions of IRQdisable and IRQenable
60 * These keep FIQ's enabled since FIQ's are special.
61 */
62
63 #define IRQdisable \
64 mrs r14, cpsr ; \
65 orr r14, r14, #(I32_bit) ; \
66 msr cpsr_c, r14 ; \
67
68 #define IRQenable \
69 mrs r14, cpsr ; \
70 bic r14, r14, #(I32_bit) ; \
71 msr cpsr_c, r14 ; \
72
73 .text
74
75 .Lwhichqs:
76 .word _C_LABEL(sched_whichqs)
77
78 .Lqs:
79 .word _C_LABEL(sched_qs)
80
81 /*
82 * cpuswitch()
83 *
84 * preforms a process context switch.
85 * This function has several entry points
86 */
87
88 #ifdef MULTIPROCESSOR
89 .Lcpu_info_store:
90 .word _C_LABEL(cpu_info_store)
91 .Lcurlwp:
92 /* FIXME: This is bogus in the general case. */
93 .word _C_LABEL(cpu_info_store) + CI_CURLWP
94
95 .Lcurpcb:
96 .word _C_LABEL(cpu_info_store) + CI_CURPCB
97 #else
98 .Lcurlwp:
99 .word _C_LABEL(curlwp)
100
101 .Lcurpcb:
102 .word _C_LABEL(curpcb)
103 #endif
104
105 .Lwant_resched:
106 .word _C_LABEL(want_resched)
107
108 .Lcpufuncs:
109 .word _C_LABEL(cpufuncs)
110
111 #ifndef MULTIPROCESSOR
112 .data
113 .global _C_LABEL(curpcb)
114 _C_LABEL(curpcb):
115 .word 0x00000000
116 .text
117 #endif
118
119 .Lblock_userspace_access:
120 .word _C_LABEL(block_userspace_access)
121
122 .Lcpu_do_powersave:
123 .word _C_LABEL(cpu_do_powersave)
124
125 /*
126 * Idle loop, exercised while waiting for a process to wake up.
127 *
128 * NOTE: When we jump back to .Lswitch_search, we must have a
129 * pointer to whichqs in r7, which is what it is when we arrive
130 * here.
131 */
132 /* LINTSTUB: Ignore */
133 ASENTRY_NP(idle)
134 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
135 bl _C_LABEL(sched_unlock_idle)
136 #endif
137 ldr r3, .Lcpu_do_powersave
138
139 /* Enable interrupts */
140 IRQenable
141
142 /* If we don't want to sleep, use a simpler loop. */
143 ldr r3, [r3] /* r3 = cpu_do_powersave */
144 teq r3, #0
145 bne 2f
146
147 /* Non-powersave idle. */
148 1: /* should maybe do uvm pageidlezero stuff here */
149 ldr r3, [r7] /* r3 = whichqs */
150 teq r3, #0x00000000
151 bne .Lswitch_search
152 b 1b
153
154 2: /* Powersave idle. */
155 ldr r4, .Lcpufuncs
156 3: ldr r3, [r7] /* r3 = whichqs */
157 teq r3, #0x00000000
158 bne .Lswitch_search
159
160 /* if saving power, don't want to pageidlezero */
161 mov r0, #0
162 adr lr, 3b
163 ldr pc, [r4, #(CF_SLEEP)]
164 /* loops back around */
165
166
167 /*
168 * Find a new lwp to run, save the current context and
169 * load the new context
170 *
171 * Arguments:
172 * r0 'struct lwp *' of the current LWP
173 */
174
175 ENTRY(cpu_switch)
176 /*
177 * Local register usage. Some of these registers are out of date.
178 * r1 = oldlwp
179 * r2 = spl level
180 * r3 = whichqs
181 * r4 = queue
182 * r5 = &qs[queue]
183 * r6 = newlwp
184 * r7 = scratch
185 */
186 stmfd sp!, {r4-r7, lr}
187
188 /*
189 * Get the current lwp and indicate that there is no longer
190 * a valid process (curlwp = 0). Zero the current PCB pointer
191 * while we're at it.
192 */
193 ldr r7, .Lcurlwp
194 ldr r6, .Lcurpcb
195 mov r0, #0x00000000
196 ldr r1, [r7] /* r1 = curproc */
197 str r0, [r7] /* curproc = NULL */
198 str r0, [r6] /* curpcb = NULL */
199
200 /* stash the old proc while we call functions */
201 mov r5, r1
202
203 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
204 /* release the sched_lock before handling interrupts */
205 bl _C_LABEL(sched_unlock_idle)
206 #endif
207
208 /* Lower the spl level to spl0 and get the current spl level. */
209 #ifdef __NEWINTR
210 mov r0, #(IPL_NONE)
211 bl _C_LABEL(_spllower)
212 #else /* ! __NEWINTR */
213 #ifdef spl0
214 mov r0, #(_SPL_0)
215 bl _C_LABEL(splx)
216 #else
217 bl _C_LABEL(spl0)
218 #endif /* spl0 */
219 #endif /* __NEWINTR */
220
221 /* Push the old spl level onto the stack */
222 str r0, [sp, #-0x0004]!
223
224 /* First phase : find a new lwp */
225
226 ldr r7, .Lwhichqs
227
228 /* rem: r5 = old lwp */
229 /* rem: r7 = &whichqs */
230
231 .Lswitch_search:
232 IRQdisable
233 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
234 bl _C_LABEL(sched_lock_idle)
235 #endif
236
237 /* Do we have any active queues */
238 ldr r3, [r7]
239
240 /* If not we must idle until we do. */
241 teq r3, #0x00000000
242 beq _ASM_LABEL(idle)
243
244 /* put old proc back in r1 */
245 mov r1, r5
246
247 /* rem: r1 = old lwp */
248 /* rem: r3 = whichqs */
249 /* rem: interrupts are disabled */
250
251 /*
252 * We have found an active queue. Currently we do not know which queue
253 * is active just that one of them is.
254 */
255 /* this is the ffs algorithm devised by d.seal and posted to
256 * comp.sys.arm on 16 Feb 1994.
257 */
258 rsb r5, r3, #0
259 ands r0, r3, r5
260
261 adr r5, .Lcpu_switch_ffs_table
262
263 /* X = R0 */
264 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
265 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
266 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
267
268 /* used further down, saves SA stall */
269 ldr r6, .Lqs
270
271 /* now lookup in table indexed on top 6 bits of a4 */
272 ldrb r4, [ r5, r4, lsr #26 ]
273
274 /* rem: r0 = bit mask of chosen queue (1 << r4) */
275 /* rem: r1 = old lwp */
276 /* rem: r3 = whichqs */
277 /* rem: r4 = queue number */
278 /* rem: interrupts are disabled */
279
280 /* Get the address of the queue (&qs[queue]) */
281 add r5, r6, r4, lsl #3
282
283 /*
284 * Get the lwp from the queue and place the next process in
285 * the queue at the head. This basically unlinks the lwp at
286 * the head of the queue.
287 */
288 ldr r6, [r5, #(L_FORW)]
289
290 /* rem: r6 = new lwp */
291 ldr r7, [r6, #(L_FORW)]
292 str r7, [r5, #(L_FORW)]
293
294 /*
295 * Test to see if the queue is now empty. If the head of the queue
296 * points to the queue itself then there are no more lwps in
297 * the queue. We can therefore clear the queue not empty flag held
298 * in r3.
299 */
300
301 teq r5, r7
302 biceq r3, r3, r0
303
304 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
305
306 /* Fix the back pointer for the lwp now at the head of the queue. */
307 ldr r0, [r6, #(L_BACK)]
308 str r0, [r7, #(L_BACK)]
309
310 /* Update the RAM copy of the queue not empty flags word. */
311 ldr r7, .Lwhichqs
312 str r3, [r7]
313
314 /* rem: r1 = old lwp */
315 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
316 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
317 /* rem: r6 = new lwp */
318 /* rem: interrupts are disabled */
319
320 /* Clear the want_resched flag */
321 ldr r7, .Lwant_resched
322 mov r0, #0x00000000
323 str r0, [r7]
324
325 /*
326 * Clear the back pointer of the lwp we have removed from
327 * the head of the queue. The new lwp is isolated now.
328 */
329 str r0, [r6, #(L_BACK)]
330
331 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
332 /*
333 * unlock the sched_lock, but leave interrupts off, for now.
334 */
335 mov r7, r1
336 bl _C_LABEL(sched_unlock_idle)
337 mov r1, r7
338 #endif
339
340 .Lswitch_resume:
341 #ifdef MULTIPROCESSOR
342 /* XXX use curcpu() */
343 ldr r0, .Lcpu_info_store
344 str r0, [r6, #(L_CPU)]
345 #else
346 /* l->l_cpu initialized in fork1() for single-processor */
347 #endif
348
349 /* Process is now on a processor. */
350 mov r0, #LSONPROC /* l->l_stat = LSONPROC */
351 str r0, [r6, #(L_STAT)]
352
353 /* We have a new curlwp now so make a note it */
354 ldr r7, .Lcurlwp
355 str r6, [r7]
356
357 /* Hook in a new pcb */
358 ldr r7, .Lcurpcb
359 ldr r0, [r6, #(L_ADDR)]
360 str r0, [r7]
361
362 /* At this point we can allow IRQ's again. */
363 IRQenable
364
365 /* rem: r1 = old lwp */
366 /* rem: r4 = return value */
367 /* rem: r6 = new process */
368 /* rem: interrupts are enabled */
369
370 /*
371 * If the new process is the same as the process that called
372 * cpu_switch() then we do not need to save and restore any
373 * contexts. This means we can make a quick exit.
374 * The test is simple if curlwp on entry (now in r1) is the
375 * same as the proc removed from the queue we can jump to the exit.
376 */
377 teq r1, r6
378 moveq r4, #0x00000000 /* default to "didn't switch" */
379 beq .Lswitch_return
380
381 /*
382 * At this point, we are guaranteed to be switching to
383 * a new lwp.
384 */
385 mov r4, #0x00000001
386
387 /* Remember the old lwp in r0 */
388 mov r0, r1
389
390 /*
391 * If the old lwp on entry to cpu_switch was zero then the
392 * process that called it was exiting. This means that we do
393 * not need to save the current context. Instead we can jump
394 * straight to restoring the context for the new process.
395 */
396 teq r0, #0x00000000
397 beq .Lswitch_exited
398
399 /* rem: r0 = old lwp */
400 /* rem: r4 = return value */
401 /* rem: r6 = new process */
402 /* rem: interrupts are enabled */
403
404 /* Stage two : Save old context */
405
406 /* Get the user structure for the old lwp. */
407 ldr r1, [r0, #(L_ADDR)]
408
409 /* Save all the registers in the old lwp's pcb */
410 add r7, r1, #(PCB_R8)
411 stmia r7, {r8-r13}
412
413 /*
414 * NOTE: We can now use r8-r13 until it is time to restore
415 * them for the new process.
416 */
417
418 /* Remember the old PCB. */
419 mov r8, r1
420
421 /* r1 now free! */
422
423 /* Get the user structure for the new process in r9 */
424 ldr r9, [r6, #(L_ADDR)]
425
426 /*
427 * This can be optimised... We know we want to go from SVC32
428 * mode to UND32 mode
429 */
430 mrs r3, cpsr
431 bic r2, r3, #(PSR_MODE)
432 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
433 msr cpsr_c, r2
434
435 str sp, [r8, #(PCB_UND_SP)]
436
437 msr cpsr_c, r3 /* Restore the old mode */
438
439 /* rem: r0 = old lwp */
440 /* rem: r4 = return value */
441 /* rem: r6 = new process */
442 /* rem: r8 = old PCB */
443 /* rem: r9 = new PCB */
444 /* rem: interrupts are enabled */
445
446 /* What else needs to be saved Only FPA stuff when that is supported */
447
448 /* Third phase : restore saved context */
449
450 /* rem: r0 = old lwp */
451 /* rem: r4 = return value */
452 /* rem: r6 = new lwp */
453 /* rem: r8 = old PCB */
454 /* rem: r9 = new PCB */
455 /* rem: interrupts are enabled */
456
457 /*
458 * Get the new L1 table pointer into r11. If we're switching to
459 * an LWP with the same address space as the outgoing one, we can
460 * skip the cache purge and the TTB load.
461 *
462 * To avoid data dep stalls that would happen anyway, we try
463 * and get some useful work done in the mean time.
464 */
465 ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
466 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
467
468 ldr r3, .Lblock_userspace_access
469 mov r1, #0x00000001
470 mov r2, #0x00000000
471
472 teq r10, r11 /* r10 == r11? */
473 beq .Lcs_context_switched /* yes! */
474
475 /*
476 * Don't allow user space access between the purge and the switch.
477 */
478 ldr r3, .Lblock_userspace_access
479 mov r1, #0x00000001
480 mov r2, #0x00000000
481 str r1, [r3]
482
483 stmfd sp!, {r0-r3}
484 ldr r1, .Lcpufuncs
485 mov lr, pc
486 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
487 ldmfd sp!, {r0-r3}
488
489 .Lcs_cache_purge_skipped:
490 /* At this point we need to kill IRQ's again. */
491 IRQdisable
492
493 /* rem: r2 = 0 */
494 /* rem: r3 = &block_userspace_access */
495 /* rem: r4 = return value */
496 /* rem: r6 = new lwp */
497 /* rem: r9 = new PCB */
498 /* rem: r11 == new L1 */
499
500 /*
501 * Interrupts are disabled so we can allow user space accesses again
502 * as none will occur until interrupts are re-enabled after the
503 * switch.
504 */
505 str r2, [r3]
506
507 /* Switch the memory to the new process */
508 ldr r3, .Lcpufuncs
509 mov r0, r11
510 mov lr, pc
511 ldr pc, [r3, #CF_CONTEXT_SWITCH]
512
513 .Lcs_context_switched:
514 /* rem: r4 = return value */
515 /* rem: r6 = new lwp */
516 /* rem: r9 = new PCB */
517
518 /*
519 * This can be optimised... We know we want to go from SVC32
520 * mode to UND32 mode
521 */
522 mrs r3, cpsr
523 bic r2, r3, #(PSR_MODE)
524 orr r2, r2, #(PSR_UND32_MODE)
525 msr cpsr_c, r2
526
527 ldr sp, [r9, #(PCB_UND_SP)]
528
529 msr cpsr_c, r3 /* Restore the old mode */
530
531 /* Restore all the save registers */
532 add r7, r9, #PCB_R8
533 ldmia r7, {r8-r13}
534
535 sub r7, r7, #PCB_R8 /* restore PCB pointer */
536
537 ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
538
539 /* rem: r4 = return value */
540 /* rem: r5 = new lwp's proc */
541 /* rem: r6 = new lwp */
542 /* rem: r7 = new pcb */
543
544 #ifdef ARMFPE
545 add r0, r7, #(USER_SIZE) & 0x00ff
546 add r0, r0, #(USER_SIZE) & 0xff00
547 bl _C_LABEL(arm_fpe_core_changecontext)
548 #endif
549
550 /* We can enable interrupts again */
551 IRQenable
552
553 /* rem: r4 = return value */
554 /* rem: r5 = new lwp's proc */
555 /* rem: r6 = new lwp */
556 /* rem: r7 = new PCB */
557
558 /*
559 * Check for restartable atomic sequences (RAS).
560 */
561
562 ldr r2, [r5, #(P_NRAS)]
563 ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
564 teq r2, #0 /* p->p_nras == 0? */
565 bne .Lswitch_do_ras /* no, check for one */
566
567 .Lswitch_return:
568
569 /* Get the spl level from the stack and update the current spl level */
570 ldr r0, [sp], #0x0004
571 bl _C_LABEL(splx)
572
573 /* cpu_switch returns 1 == switched, 0 == didn't switch */
574 mov r0, r4
575
576 /*
577 * Pull the registers that got pushed when either savectx() or
578 * cpu_switch() was called and return.
579 */
580 ldmfd sp!, {r4-r7, pc}
581
582 .Lswitch_do_ras:
583 ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
584 mov r0, r5 /* first ras_lookup() arg */
585 bl _C_LABEL(ras_lookup)
586 cmn r0, #1 /* -1 means "not in a RAS" */
587 strne r0, [r4, #(TF_PC)]
588 b .Lswitch_return
589
590 .Lswitch_exited:
591 /*
592 * We skip the cache purge because switch_exit()/switch_lwp_exit()
593 * already did it. Load up registers the way .Lcs_cache_purge_skipped
594 * expects. Userpsace access already blocked by switch_exit()/
595 * switch_lwp_exit().
596 */
597 ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
598 ldr r3, .Lblock_userspace_access
599 mov r2, #0x00000000
600 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
601 b .Lcs_cache_purge_skipped
602
603 /*
604 * cpu_switchto(struct lwp *current, struct lwp *next)
605 * Switch to the specified next LWP
606 * Arguments:
607 *
608 * r0 'struct lwp *' of the current LWP
609 * r1 'struct lwp *' of the LWP to switch to
610 */
611 ENTRY(cpu_switchto)
612 stmfd sp!, {r4-r7, lr}
613
614 /* Lower the spl level to spl0 and get the current spl level. */
615 mov r6, r0 /* save old lwp */
616 mov r5, r1 /* save new lwp */
617
618 #if defined(LOCKDEBUG)
619 /* release the sched_lock before handling interrupts */
620 bl _C_LABEL(sched_unlock_idle)
621 #endif
622
623 #ifdef __NEWINTR
624 mov r0, #(IPL_NONE)
625 bl _C_LABEL(_spllower)
626 #else /* ! __NEWINTR */
627 #ifdef spl0
628 mov r0, #(_SPL_0)
629 bl _C_LABEL(splx)
630 #else
631 bl _C_LABEL(spl0)
632 #endif /* spl0 */
633 #endif /* __NEWINTR */
634
635 /* Push the old spl level onto the stack */
636 str r0, [sp, #-0x0004]!
637
638 IRQdisable
639 #if defined(LOCKDEBUG)
640 bl _C_LABEL(sched_lock_idle)
641 #endif
642
643 mov r0, r6 /* restore old lwp */
644 mov r1, r5 /* restore new lwp */
645
646 /* rem: r0 = old lwp */
647 /* rem: r1 = new lwp */
648 /* rem: interrupts are disabled */
649
650 /*
651 * Okay, set up registers the way cpu_switch() wants them,
652 * and jump into the middle of it (where we bring up the
653 * new process).
654 */
655 mov r6, r1 /* r6 = new lwp */
656 #if defined(LOCKDEBUG)
657 mov r5, r0 /* preserve old lwp */
658 bl _C_LABEL(sched_unlock_idle)
659 mov r1, r5 /* r1 = old lwp */
660 #else
661 mov r1, r0 /* r1 = old lwp */
662 #endif
663 b .Lswitch_resume
664
665 /*
666 * void switch_exit(struct lwp *l, struct lwp *l0);
667 * Switch to lwp0's saved context and deallocate the address space and kernel
668 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
669 */
670
671 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
672 ENTRY(switch_exit)
673 /*
674 * r0 = lwp
675 * r1 = lwp0
676 */
677
678 mov r3, r0
679
680 /* In case we fault */
681 ldr r0, .Lcurlwp
682 mov r2, #0x00000000
683 str r2, [r0]
684
685 /* ldr r0, .Lcurpcb
686 str r2, [r0]*/
687
688 /*
689 * Don't allow user space access between the purge and the switch.
690 */
691 ldr r0, .Lblock_userspace_access
692 mov r2, #0x00000001
693 str r2, [r0]
694
695 /* Switch to lwp0 context */
696
697 stmfd sp!, {r0-r3}
698
699 ldr r0, .Lcpufuncs
700 mov lr, pc
701 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
702
703 ldmfd sp!, {r0-r3}
704
705 IRQdisable
706
707 ldr r2, [r1, #(L_ADDR)]
708 ldr r0, [r2, #(PCB_PAGEDIR)]
709
710 /* Switch the memory to the new process */
711 ldr r4, .Lcpufuncs
712 mov lr, pc
713 ldr pc, [r4, #CF_CONTEXT_SWITCH]
714
715 /* Restore all the save registers */
716 add r7, r2, #PCB_R8
717 ldmia r7, {r8-r13}
718
719 /* This is not really needed ! */
720 /* Yes it is for the su and fu routines */
721 ldr r0, .Lcurpcb
722 str r2, [r0]
723
724 IRQenable
725
726 /* str r3, [sp, #-0x0004]!*/
727
728 /*
729 * Schedule the vmspace and stack to be freed.
730 */
731 mov r0, r3 /* exit2(l) */
732 bl _C_LABEL(exit2)
733
734 /* Paranoia */
735 mov r0, #0x00000000
736 ldr r1, .Lcurlwp
737 str r0, [r1]
738
739 ldr r7, .Lwhichqs /* r7 = &whichqs */
740 mov r5, #0x00000000 /* r5 = old lwp = NULL */
741 b .Lswitch_search
742
743 /*
744 * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
745 * Switch to lwp0's saved context and deallocate the address space and kernel
746 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
747 */
748
749 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
750 ENTRY(switch_lwp_exit)
751 /*
752 * r0 = lwp
753 * r1 = lwp0
754 */
755
756 mov r3, r0
757
758 /* In case we fault */
759 mov r2, #0x00000000
760 ldr r0, .Lcurlwp
761 str r2, [r0]
762
763 /* ldr r0, .Lcurpcb
764 str r2, [r0]*/
765
766 /*
767 * Don't allow user space access between the purge and the switch.
768 */
769 ldr r0, .Lblock_userspace_access
770 mov r2, #0x00000001
771 str r2, [r0]
772
773 /* Switch to lwp0 context */
774
775 stmfd sp!, {r0-r3}
776
777 ldr r0, .Lcpufuncs
778 add lr, pc, #.Lsle_cache_purged - . - 8
779 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
780
781 .Lsle_cache_purged:
782 ldmfd sp!, {r0-r3}
783
784 IRQdisable
785
786 ldr r2, [r1, #(L_ADDR)]
787 ldr r0, [r2, #(PCB_PAGEDIR)]
788
789 /* Switch the memory to the new process */
790 ldr r4, .Lcpufuncs
791 add lr, pc, #.Lsle_context_switched - . - 8
792 ldr pc, [r4, #CF_CONTEXT_SWITCH]
793
794 .Lsle_context_switched:
795 /* Restore all the save registers */
796 add r7, r2, #PCB_R8
797 ldmia r7, {r8-r13}
798
799 /* This is not really needed ! */
800 /* Yes it is for the su and fu routines */
801 ldr r0, .Lcurpcb
802 str r2, [r0]
803
804 IRQenable
805
806 /* str r3, [sp, #-0x0004]!*/
807
808 /*
809 * Schedule the vmspace and stack to be freed.
810 */
811 mov r0, r3 /* lwp_exit2(l) */
812 bl _C_LABEL(lwp_exit2)
813
814 /* Paranoia */
815 ldr r1, .Lcurlwp
816 mov r0, #0x00000000
817 str r0, [r1]
818
819 ldr r7, .Lwhichqs /* r7 = &whichqs */
820 mov r5, #0x00000000 /* r5 = old lwp = NULL */
821 b .Lswitch_search
822
823 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
824 ENTRY(savectx)
825 /*
826 * r0 = pcb
827 */
828
829 /* Push registers.*/
830 stmfd sp!, {r4-r7, lr}
831
832 /* Store all the registers in the process's pcb */
833 add r2, r0, #(PCB_R8)
834 stmia r2, {r8-r13}
835
836 /* Pull the regs of the stack */
837 ldmfd sp!, {r4-r7, pc}
838
839 ENTRY(proc_trampoline)
840 #ifdef MULTIPROCESSOR
841 bl _C_LABEL(proc_trampoline_mp)
842 #endif
843 mov r0, r5
844 mov r1, sp
845 mov lr, pc
846 mov pc, r4
847
848 /* Kill irq's */
849 mrs r0, cpsr
850 orr r0, r0, #(I32_bit)
851 msr cpsr_c, r0
852
853 PULLFRAME
854
855 movs pc, lr /* Exit */
856
857 .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
858 .Lcpu_switch_ffs_table:
859 /* same as ffs table but all nums are -1 from that */
860 /* 0 1 2 3 4 5 6 7 */
861 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
862 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
863 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
864 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
865 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
866 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
867 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
868 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
869