cpuswitch.S revision 1.29 1 /* $NetBSD: cpuswitch.S,v 1.29 2003/01/17 22:28:49 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47 #include "opt_multiprocessor.h"
48
49 #include "assym.h"
50 #include <machine/param.h>
51 #include <machine/cpu.h>
52 #include <machine/frame.h>
53 #include <machine/asm.h>
54
55 #undef IRQdisable
56 #undef IRQenable
57
58 /*
59 * New experimental definitions of IRQdisable and IRQenable
60 * These keep FIQ's enabled since FIQ's are special.
61 */
62
63 #define IRQdisable \
64 mrs r14, cpsr ; \
65 orr r14, r14, #(I32_bit) ; \
66 msr cpsr_c, r14 ; \
67
68 #define IRQenable \
69 mrs r14, cpsr ; \
70 bic r14, r14, #(I32_bit) ; \
71 msr cpsr_c, r14 ; \
72
73 .text
74
75 .Lwhichqs:
76 .word _C_LABEL(sched_whichqs)
77
78 .Lqs:
79 .word _C_LABEL(sched_qs)
80
81 /*
82 * cpuswitch()
83 *
84 * preforms a process context switch.
85 * This function has several entry points
86 */
87
88 #ifdef MULTIPROCESSOR
89 .Lcpu_info_store:
90 .word _C_LABEL(cpu_info_store)
91 .Lcurlwp:
92 /* FIXME: This is bogus in the general case. */
93 .word _C_LABEL(cpu_info_store) + CI_CURLWP
94
95 .Lcurpcb:
96 .word _C_LABEL(cpu_info_store) + CI_CURPCB
97 #else
98 .Lcurlwp:
99 .word _C_LABEL(curlwp)
100
101 .Lcurpcb:
102 .word _C_LABEL(curpcb)
103 #endif
104
105 .Lwant_resched:
106 .word _C_LABEL(want_resched)
107
108 .Lcpufuncs:
109 .word _C_LABEL(cpufuncs)
110
111 #ifndef MULTIPROCESSOR
112 .data
113 .global _C_LABEL(curpcb)
114 _C_LABEL(curpcb):
115 .word 0x00000000
116 .text
117 #endif
118
119 .Lblock_userspace_access:
120 .word _C_LABEL(block_userspace_access)
121
122 .Lcpu_do_powersave:
123 .word _C_LABEL(cpu_do_powersave)
124
125 /*
126 * Idle loop, exercised while waiting for a process to wake up.
127 *
128 * NOTE: When we jump back to .Lswitch_search, we must have a
129 * pointer to whichqs in r7, which is what it is when we arrive
130 * here.
131 */
132 /* LINTSTUB: Ignore */
133 ASENTRY_NP(idle)
134 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
135 bl _C_LABEL(sched_unlock_idle)
136 #endif
137 ldr r3, .Lcpu_do_powersave
138
139 /* Enable interrupts */
140 IRQenable
141
142 /* If we don't want to sleep, use a simpler loop. */
143 ldr r3, [r3] /* r3 = cpu_do_powersave */
144 teq r3, #0
145 bne 2f
146
147 /* Non-powersave idle. */
148 1: /* should maybe do uvm pageidlezero stuff here */
149 ldr r3, [r7] /* r3 = whichqs */
150 teq r3, #0x00000000
151 bne .Lswitch_search
152 b 1b
153
154 2: /* Powersave idle. */
155 ldr r4, .Lcpufuncs
156 3: ldr r3, [r7] /* r3 = whichqs */
157 teq r3, #0x00000000
158 bne .Lswitch_search
159
160 /* if saving power, don't want to pageidlezero */
161 mov r0, #0
162 adr lr, 3b
163 ldr pc, [r4, #(CF_SLEEP)]
164 /* loops back around */
165
166
167 /*
168 * Find a new lwp to run, save the current context and
169 * load the new context
170 *
171 * Arguments:
172 * r0 'struct lwp *' of the current LWP
173 */
174
175 ENTRY(cpu_switch)
176 /*
177 * Local register usage. Some of these registers are out of date.
178 * r1 = oldlwp
179 * r2 = spl level
180 * r3 = whichqs
181 * r4 = queue
182 * r5 = &qs[queue]
183 * r6 = newlwp
184 * r7 = scratch
185 */
186 stmfd sp!, {r4-r7, lr}
187
188 /*
189 * Indicate that there is no longer a valid process (curlwp = 0).
190 * Zero the current PCB pointer while we're at it.
191 */
192 ldr r7, .Lcurlwp
193 ldr r6, .Lcurpcb
194 mov r2, #0x00000000
195 str r2, [r7] /* curproc = NULL */
196 str r2, [r6] /* curpcb = NULL */
197
198 /* stash the old proc while we call functions */
199 mov r5, r0
200
201 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
202 /* release the sched_lock before handling interrupts */
203 bl _C_LABEL(sched_unlock_idle)
204 #endif
205
206 /* Lower the spl level to spl0 and get the current spl level. */
207 #ifdef __NEWINTR
208 mov r0, #(IPL_NONE)
209 bl _C_LABEL(_spllower)
210 #else /* ! __NEWINTR */
211 #ifdef spl0
212 mov r0, #(_SPL_0)
213 bl _C_LABEL(splx)
214 #else
215 bl _C_LABEL(spl0)
216 #endif /* spl0 */
217 #endif /* __NEWINTR */
218
219 /* Push the old spl level onto the stack */
220 str r0, [sp, #-0x0004]!
221
222 /* First phase : find a new lwp */
223
224 ldr r7, .Lwhichqs
225
226 /* rem: r5 = old lwp */
227 /* rem: r7 = &whichqs */
228
229 .Lswitch_search:
230 IRQdisable
231 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
232 bl _C_LABEL(sched_lock_idle)
233 #endif
234
235 /* Do we have any active queues */
236 ldr r3, [r7]
237
238 /* If not we must idle until we do. */
239 teq r3, #0x00000000
240 beq _ASM_LABEL(idle)
241
242 /* put old proc back in r1 */
243 mov r1, r5
244
245 /* rem: r1 = old lwp */
246 /* rem: r3 = whichqs */
247 /* rem: interrupts are disabled */
248
249 /*
250 * We have found an active queue. Currently we do not know which queue
251 * is active just that one of them is.
252 */
253 /* this is the ffs algorithm devised by d.seal and posted to
254 * comp.sys.arm on 16 Feb 1994.
255 */
256 rsb r5, r3, #0
257 ands r0, r3, r5
258
259 adr r5, .Lcpu_switch_ffs_table
260
261 /* X = R0 */
262 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
263 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
264 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
265
266 /* used further down, saves SA stall */
267 ldr r6, .Lqs
268
269 /* now lookup in table indexed on top 6 bits of a4 */
270 ldrb r4, [ r5, r4, lsr #26 ]
271
272 /* rem: r0 = bit mask of chosen queue (1 << r4) */
273 /* rem: r1 = old lwp */
274 /* rem: r3 = whichqs */
275 /* rem: r4 = queue number */
276 /* rem: interrupts are disabled */
277
278 /* Get the address of the queue (&qs[queue]) */
279 add r5, r6, r4, lsl #3
280
281 /*
282 * Get the lwp from the queue and place the next process in
283 * the queue at the head. This basically unlinks the lwp at
284 * the head of the queue.
285 */
286 ldr r6, [r5, #(L_FORW)]
287
288 /* rem: r6 = new lwp */
289 ldr r7, [r6, #(L_FORW)]
290 str r7, [r5, #(L_FORW)]
291
292 /*
293 * Test to see if the queue is now empty. If the head of the queue
294 * points to the queue itself then there are no more lwps in
295 * the queue. We can therefore clear the queue not empty flag held
296 * in r3.
297 */
298
299 teq r5, r7
300 biceq r3, r3, r0
301
302 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
303
304 /* Fix the back pointer for the lwp now at the head of the queue. */
305 ldr r0, [r6, #(L_BACK)]
306 str r0, [r7, #(L_BACK)]
307
308 /* Update the RAM copy of the queue not empty flags word. */
309 ldr r7, .Lwhichqs
310 str r3, [r7]
311
312 /* rem: r1 = old lwp */
313 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
314 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
315 /* rem: r6 = new lwp */
316 /* rem: interrupts are disabled */
317
318 /* Clear the want_resched flag */
319 ldr r7, .Lwant_resched
320 mov r0, #0x00000000
321 str r0, [r7]
322
323 /*
324 * Clear the back pointer of the lwp we have removed from
325 * the head of the queue. The new lwp is isolated now.
326 */
327 str r0, [r6, #(L_BACK)]
328
329 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
330 /*
331 * unlock the sched_lock, but leave interrupts off, for now.
332 */
333 mov r7, r1
334 bl _C_LABEL(sched_unlock_idle)
335 mov r1, r7
336 #endif
337
338 .Lswitch_resume:
339 #ifdef MULTIPROCESSOR
340 /* XXX use curcpu() */
341 ldr r0, .Lcpu_info_store
342 str r0, [r6, #(L_CPU)]
343 #else
344 /* l->l_cpu initialized in fork1() for single-processor */
345 #endif
346
347 /* Process is now on a processor. */
348 mov r0, #LSONPROC /* l->l_stat = LSONPROC */
349 str r0, [r6, #(L_STAT)]
350
351 /* We have a new curlwp now so make a note it */
352 ldr r7, .Lcurlwp
353 str r6, [r7]
354
355 /* Hook in a new pcb */
356 ldr r7, .Lcurpcb
357 ldr r0, [r6, #(L_ADDR)]
358 str r0, [r7]
359
360 /* At this point we can allow IRQ's again. */
361 IRQenable
362
363 /* rem: r1 = old lwp */
364 /* rem: r4 = return value */
365 /* rem: r6 = new process */
366 /* rem: interrupts are enabled */
367
368 /*
369 * If the new process is the same as the process that called
370 * cpu_switch() then we do not need to save and restore any
371 * contexts. This means we can make a quick exit.
372 * The test is simple if curlwp on entry (now in r1) is the
373 * same as the proc removed from the queue we can jump to the exit.
374 */
375 teq r1, r6
376 moveq r4, #0x00000000 /* default to "didn't switch" */
377 beq .Lswitch_return
378
379 /*
380 * At this point, we are guaranteed to be switching to
381 * a new lwp.
382 */
383 mov r4, #0x00000001
384
385 /* Remember the old lwp in r0 */
386 mov r0, r1
387
388 /*
389 * If the old lwp on entry to cpu_switch was zero then the
390 * process that called it was exiting. This means that we do
391 * not need to save the current context. Instead we can jump
392 * straight to restoring the context for the new process.
393 */
394 teq r0, #0x00000000
395 beq .Lswitch_exited
396
397 /* rem: r0 = old lwp */
398 /* rem: r4 = return value */
399 /* rem: r6 = new process */
400 /* rem: interrupts are enabled */
401
402 /* Stage two : Save old context */
403
404 /* Get the user structure for the old lwp. */
405 ldr r1, [r0, #(L_ADDR)]
406
407 /* Save all the registers in the old lwp's pcb */
408 add r7, r1, #(PCB_R8)
409 stmia r7, {r8-r13}
410
411 /*
412 * NOTE: We can now use r8-r13 until it is time to restore
413 * them for the new process.
414 */
415
416 /* Remember the old PCB. */
417 mov r8, r1
418
419 /* r1 now free! */
420
421 /* Get the user structure for the new process in r9 */
422 ldr r9, [r6, #(L_ADDR)]
423
424 /*
425 * This can be optimised... We know we want to go from SVC32
426 * mode to UND32 mode
427 */
428 mrs r3, cpsr
429 bic r2, r3, #(PSR_MODE)
430 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
431 msr cpsr_c, r2
432
433 str sp, [r8, #(PCB_UND_SP)]
434
435 msr cpsr_c, r3 /* Restore the old mode */
436
437 /* rem: r0 = old lwp */
438 /* rem: r4 = return value */
439 /* rem: r6 = new process */
440 /* rem: r8 = old PCB */
441 /* rem: r9 = new PCB */
442 /* rem: interrupts are enabled */
443
444 /* What else needs to be saved Only FPA stuff when that is supported */
445
446 /* Third phase : restore saved context */
447
448 /* rem: r0 = old lwp */
449 /* rem: r4 = return value */
450 /* rem: r6 = new lwp */
451 /* rem: r8 = old PCB */
452 /* rem: r9 = new PCB */
453 /* rem: interrupts are enabled */
454
455 /*
456 * Get the new L1 table pointer into r11. If we're switching to
457 * an LWP with the same address space as the outgoing one, we can
458 * skip the cache purge and the TTB load.
459 *
460 * To avoid data dep stalls that would happen anyway, we try
461 * and get some useful work done in the mean time.
462 */
463 ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
464 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
465
466 ldr r3, .Lblock_userspace_access
467 mov r1, #0x00000001
468 mov r2, #0x00000000
469
470 teq r10, r11 /* r10 == r11? */
471 beq .Lcs_context_switched /* yes! */
472
473 /*
474 * Don't allow user space access between the purge and the switch.
475 */
476 ldr r3, .Lblock_userspace_access
477 mov r1, #0x00000001
478 mov r2, #0x00000000
479 str r1, [r3]
480
481 stmfd sp!, {r0-r3}
482 ldr r1, .Lcpufuncs
483 mov lr, pc
484 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
485 ldmfd sp!, {r0-r3}
486
487 .Lcs_cache_purge_skipped:
488 /* At this point we need to kill IRQ's again. */
489 IRQdisable
490
491 /* rem: r2 = 0 */
492 /* rem: r3 = &block_userspace_access */
493 /* rem: r4 = return value */
494 /* rem: r6 = new lwp */
495 /* rem: r9 = new PCB */
496 /* rem: r11 == new L1 */
497
498 /*
499 * Interrupts are disabled so we can allow user space accesses again
500 * as none will occur until interrupts are re-enabled after the
501 * switch.
502 */
503 str r2, [r3]
504
505 /* Switch the memory to the new process */
506 ldr r3, .Lcpufuncs
507 mov r0, r11
508 mov lr, pc
509 ldr pc, [r3, #CF_CONTEXT_SWITCH]
510
511 .Lcs_context_switched:
512 /* rem: r4 = return value */
513 /* rem: r6 = new lwp */
514 /* rem: r9 = new PCB */
515
516 /*
517 * This can be optimised... We know we want to go from SVC32
518 * mode to UND32 mode
519 */
520 mrs r3, cpsr
521 bic r2, r3, #(PSR_MODE)
522 orr r2, r2, #(PSR_UND32_MODE)
523 msr cpsr_c, r2
524
525 ldr sp, [r9, #(PCB_UND_SP)]
526
527 msr cpsr_c, r3 /* Restore the old mode */
528
529 /* Restore all the save registers */
530 add r7, r9, #PCB_R8
531 ldmia r7, {r8-r13}
532
533 sub r7, r7, #PCB_R8 /* restore PCB pointer */
534
535 ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
536
537 /* rem: r4 = return value */
538 /* rem: r5 = new lwp's proc */
539 /* rem: r6 = new lwp */
540 /* rem: r7 = new pcb */
541
542 #ifdef ARMFPE
543 add r0, r7, #(USER_SIZE) & 0x00ff
544 add r0, r0, #(USER_SIZE) & 0xff00
545 bl _C_LABEL(arm_fpe_core_changecontext)
546 #endif
547
548 /* We can enable interrupts again */
549 IRQenable
550
551 /* rem: r4 = return value */
552 /* rem: r5 = new lwp's proc */
553 /* rem: r6 = new lwp */
554 /* rem: r7 = new PCB */
555
556 /*
557 * Check for restartable atomic sequences (RAS).
558 */
559
560 ldr r2, [r5, #(P_NRAS)]
561 ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
562 teq r2, #0 /* p->p_nras == 0? */
563 bne .Lswitch_do_ras /* no, check for one */
564
565 .Lswitch_return:
566
567 /* Get the spl level from the stack and update the current spl level */
568 ldr r0, [sp], #0x0004
569 bl _C_LABEL(splx)
570
571 /* cpu_switch returns 1 == switched, 0 == didn't switch */
572 mov r0, r4
573
574 /*
575 * Pull the registers that got pushed when either savectx() or
576 * cpu_switch() was called and return.
577 */
578 ldmfd sp!, {r4-r7, pc}
579
580 .Lswitch_do_ras:
581 ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
582 mov r0, r5 /* first ras_lookup() arg */
583 bl _C_LABEL(ras_lookup)
584 cmn r0, #1 /* -1 means "not in a RAS" */
585 strne r0, [r4, #(TF_PC)]
586 b .Lswitch_return
587
588 .Lswitch_exited:
589 /*
590 * We skip the cache purge because switch_exit() already did it.
591 * Load up registers the way .Lcs_cache_purge_skipped expects.
592 * Userpsace access already blocked by switch_exit().
593 */
594 ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
595 ldr r3, .Lblock_userspace_access
596 mov r2, #0x00000000
597 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
598 b .Lcs_cache_purge_skipped
599
600 /*
601 * cpu_switchto(struct lwp *current, struct lwp *next)
602 * Switch to the specified next LWP
603 * Arguments:
604 *
605 * r0 'struct lwp *' of the current LWP
606 * r1 'struct lwp *' of the LWP to switch to
607 */
608 ENTRY(cpu_switchto)
609 stmfd sp!, {r4-r7, lr}
610
611 /* Lower the spl level to spl0 and get the current spl level. */
612 mov r6, r0 /* save old lwp */
613 mov r5, r1 /* save new lwp */
614
615 #if defined(LOCKDEBUG)
616 /* release the sched_lock before handling interrupts */
617 bl _C_LABEL(sched_unlock_idle)
618 #endif
619
620 #ifdef __NEWINTR
621 mov r0, #(IPL_NONE)
622 bl _C_LABEL(_spllower)
623 #else /* ! __NEWINTR */
624 #ifdef spl0
625 mov r0, #(_SPL_0)
626 bl _C_LABEL(splx)
627 #else
628 bl _C_LABEL(spl0)
629 #endif /* spl0 */
630 #endif /* __NEWINTR */
631
632 /* Push the old spl level onto the stack */
633 str r0, [sp, #-0x0004]!
634
635 IRQdisable
636 #if defined(LOCKDEBUG)
637 bl _C_LABEL(sched_lock_idle)
638 #endif
639
640 mov r0, r6 /* restore old lwp */
641 mov r1, r5 /* restore new lwp */
642
643 /* rem: r0 = old lwp */
644 /* rem: r1 = new lwp */
645 /* rem: interrupts are disabled */
646
647 /*
648 * Okay, set up registers the way cpu_switch() wants them,
649 * and jump into the middle of it (where we bring up the
650 * new process).
651 */
652 mov r6, r1 /* r6 = new lwp */
653 #if defined(LOCKDEBUG)
654 mov r5, r0 /* preserve old lwp */
655 bl _C_LABEL(sched_unlock_idle)
656 mov r1, r5 /* r1 = old lwp */
657 #else
658 mov r1, r0 /* r1 = old lwp */
659 #endif
660 b .Lswitch_resume
661
662 /*
663 * void switch_exit(struct lwp *l, struct lwp *l0, void (*exit)(struct lwp *));
664 * Switch to lwp0's saved context and deallocate the address space and kernel
665 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
666 */
667
668 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0, void (*)(struct lwp *)) */
669 ENTRY(switch_exit)
670 /*
671 * The process is going away, so we can use callee-saved
672 * registers here without having to save them.
673 */
674
675 mov r4, r0
676 ldr r0, .Lcurlwp
677
678 mov r5, r1
679 ldr r1, .Lblock_userspace_access
680
681 mov r6, r2
682
683 /*
684 * r4 = lwp
685 * r5 = lwp0
686 * r6 = exit func
687 */
688
689 mov r2, #0x00000000 /* curlwp = NULL */
690 str r2, [r0]
691
692 /*
693 * Don't allow user space access between the purge and the switch.
694 */
695 mov r2, #0x00000001
696 str r2, [r1]
697
698 /* Switch to lwp0 context */
699
700 ldr r0, .Lcpufuncs
701 mov lr, pc
702 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
703
704 ldr r2, [r5, #(L_ADDR)]
705
706 /*
707 * r2 = lwp0's PCB
708 */
709
710 IRQdisable
711
712 ldr r0, [r2, #(PCB_PAGEDIR)]
713
714 /* Switch the memory to the new process */
715 ldr r1, .Lcpufuncs
716 mov lr, pc
717 ldr pc, [r1, #CF_CONTEXT_SWITCH]
718
719 ldr r0, .Lcurpcb
720
721 /* Restore all the save registers */
722 add r7, r2, #PCB_R8
723 ldmia r7, {r8-r13}
724
725 str r2, [r0] /* curpcb = lwp0's PCB */
726
727 IRQenable
728
729 /*
730 * Schedule the vmspace and stack to be freed.
731 */
732 mov r0, r4 /* {lwp_}exit2(l) */
733 mov lr, pc
734 mov pc, r6
735
736 ldr r7, .Lwhichqs /* r7 = &whichqs */
737 mov r5, #0x00000000 /* r5 = old lwp = NULL */
738 b .Lswitch_search
739
740 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
741 ENTRY(savectx)
742 /*
743 * r0 = pcb
744 */
745
746 /* Push registers.*/
747 stmfd sp!, {r4-r7, lr}
748
749 /* Store all the registers in the process's pcb */
750 add r2, r0, #(PCB_R8)
751 stmia r2, {r8-r13}
752
753 /* Pull the regs of the stack */
754 ldmfd sp!, {r4-r7, pc}
755
756 ENTRY(proc_trampoline)
757 #ifdef MULTIPROCESSOR
758 bl _C_LABEL(proc_trampoline_mp)
759 #endif
760 mov r0, r5
761 mov r1, sp
762 mov lr, pc
763 mov pc, r4
764
765 /* Kill irq's */
766 mrs r0, cpsr
767 orr r0, r0, #(I32_bit)
768 msr cpsr_c, r0
769
770 PULLFRAME
771
772 movs pc, lr /* Exit */
773
774 .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
775 .Lcpu_switch_ffs_table:
776 /* same as ffs table but all nums are -1 from that */
777 /* 0 1 2 3 4 5 6 7 */
778 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
779 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
780 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
781 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
782 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
783 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
784 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
785 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
786