cpuswitch.S revision 1.3.2.24 1 /* $NetBSD: cpuswitch.S,v 1.3.2.24 2002/09/18 23:57:32 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47
48 #include "assym.h"
49 #include <machine/param.h>
50 #include <machine/cpu.h>
51 #include <machine/frame.h>
52 #include <machine/asm.h>
53
54 #undef IRQdisable
55 #undef IRQenable
56
57 /*
58 * New experimental definitions of IRQdisable and IRQenable
59 * These keep FIQ's enabled since FIQ's are special.
60 */
61
62 #define IRQdisable \
63 mrs r14, cpsr ; \
64 orr r14, r14, #(I32_bit) ; \
65 msr cpsr_c, r14 ; \
66
67 #define IRQenable \
68 mrs r14, cpsr ; \
69 bic r14, r14, #(I32_bit) ; \
70 msr cpsr_c, r14 ; \
71
72 /*
73 * setrunqueue() and remrunqueue()
74 *
75 * Functions to add and remove a process for the run queue.
76 */
77
78 .text
79
80 .Lwhichqs:
81 .word _C_LABEL(sched_whichqs)
82
83 .Lqs:
84 .word _C_LABEL(sched_qs)
85
86 /*
87 * On entry
88 * r0 = lwp
89 */
90
91 ENTRY(setrunqueue)
92 /*
93 * Local register usage
94 * r0 = process
95 * r1 = queue
96 * r2 = &qs[queue] and temp
97 * r3 = temp
98 * r12 = whichqs
99 */
100 #ifdef DIAGNOSTIC
101 ldr r1, [r0, #(L_BACK)]
102 teq r1, #0x00000000
103 bne .Lsetrunqueue_erg
104
105 ldr r1, [r0, #(L_WCHAN)]
106 teq r1, #0x00000000
107 bne .Lsetrunqueue_erg
108 #endif
109
110 /* Get the priority of the queue */
111 ldrb r1, [r0, #(L_PRIORITY)]
112
113 /* Indicate that there is a process on this queue */
114 ldr r12, .Lwhichqs
115 mov r1, r1, lsr #2
116 ldr r2, [r12]
117 mov r3, #0x00000001
118 mov r3, r3, lsl r1
119 orr r2, r2, r3
120 str r2, [r12]
121
122 /* Get the address of the queue */
123 ldr r2, .Lqs
124 add r1, r2, r1, lsl # 3
125
126 /* Hook the process in */
127 str r1, [r0, #(L_FORW)]
128 ldr r2, [r1, #(L_BACK)]
129
130 str r0, [r1, #(L_BACK)]
131 #ifdef DIAGNOSTIC
132 teq r2, #0x00000000
133 beq .Lsetrunqueue_erg
134 #endif
135 str r0, [r2, #(L_FORW)]
136 str r2, [r0, #(L_BACK)]
137
138 mov pc, lr
139
140 #ifdef DIAGNOSTIC
141 .Lsetrunqueue_erg:
142 mov r2, r1
143 mov r1, r0
144 add r0, pc, #.Ltext1 - . - 8
145 bl _C_LABEL(printf)
146
147 ldr r2, .Lqs
148 ldr r1, [r2]
149 add r0, pc, #.Ltext2 - . - 8
150 b _C_LABEL(panic)
151
152 .Ltext1:
153 .asciz "setrunqueue : %08x %08x\n"
154 .Ltext2:
155 .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
156 .align 0
157 #endif
158
159 /*
160 * On entry
161 * r0 = lwp
162 */
163
164 ENTRY(remrunqueue)
165 /*
166 * Local register usage
167 * r0 = oldproc
168 * r1 = queue
169 * r2 = &qs[queue] and scratch
170 * r3 = scratch
171 * r12 = whichqs
172 */
173
174 /* Get the priority of the queue */
175 ldrb r1, [r0, #(L_PRIORITY)]
176 mov r1, r1, lsr #2
177
178 /* Unhook the process */
179 ldr r2, [r0, #(L_FORW)]
180 ldr r3, [r0, #(L_BACK)]
181
182 str r3, [r2, #(L_BACK)]
183 str r2, [r3, #(L_FORW)]
184
185 /* If the queue is now empty clear the queue not empty flag */
186 teq r2, r3
187
188 /* This could be reworked to avoid the use of r4 */
189 ldreq r12, .Lwhichqs
190 moveq r3, #0x00000001
191 ldreq r2, [r12]
192 moveq r3, r3, lsl r1
193 biceq r2, r2, r3
194 streq r2, [r12]
195
196 /* Remove the back pointer for the process */
197 mov r1, #0x00000000
198 str r1, [r0, #(L_BACK)]
199
200 mov pc, lr
201
202
203 /*
204 * cpuswitch()
205 *
206 * preforms a process context switch.
207 * This function has several entry points
208 */
209
210 .Lcurlwp:
211 .word _C_LABEL(curlwp)
212
213 .Lcurpcb:
214 .word _C_LABEL(curpcb)
215
216 .Lwant_resched:
217 .word _C_LABEL(want_resched)
218
219 .Lcpufuncs:
220 .word _C_LABEL(cpufuncs)
221
222 .data
223 .global _C_LABEL(curpcb)
224 _C_LABEL(curpcb):
225 .word 0x00000000
226 .text
227
228 .Lblock_userspace_access:
229 .word _C_LABEL(block_userspace_access)
230
231 .Lcpu_do_powersave:
232 .word _C_LABEL(cpu_do_powersave)
233
234 /*
235 * Idle loop, exercised while waiting for a process to wake up.
236 *
237 * NOTE: When we jump back to .Lswitch_search, we must have a
238 * pointer to whichqs in r7, which is what it is when we arrive
239 * here.
240 */
241 /* LINTSTUB: Ignore */
242 ASENTRY_NP(idle)
243 #if defined(LOCKDEBUG)
244 bl _C_LABEL(sched_unlock_idle)
245 #endif
246 ldr r3, .Lcpu_do_powersave
247
248 /* Enable interrupts */
249 IRQenable
250
251 /* If we don't want to sleep, use a simpler loop. */
252 ldr r3, [r3] /* r3 = cpu_do_powersave */
253 teq r3, #0
254 bne 2f
255
256 /* Non-powersave idle. */
257 1: /* should maybe do uvm pageidlezero stuff here */
258 ldr r3, [r7] /* r3 = whichqs */
259 teq r3, #0x00000000
260 bne .Lswitch_search
261 b 1b
262
263 2: /* Powersave idle. */
264 ldr r4, .Lcpufuncs
265 3: ldr r3, [r7] /* r3 = whichqs */
266 teq r3, #0x00000000
267 bne .Lswitch_search
268
269 /* if saving power, don't want to pageidlezero */
270 mov r0, #0
271 add lr, pc, #3b - . - 8
272 ldr pc, [r4, #(CF_SLEEP)]
273 /* loops back around */
274
275
276 /*
277 * Find a new lwp to run, save the current context and
278 * load the new context
279 *
280 * Arguments:
281 * r0 'struct lwp *' of the current LWP
282 */
283
284 ENTRY(cpu_switch)
285 /*
286 * Local register usage. Some of these registers are out of date.
287 * r1 = oldlwp
288 * r2 = spl level
289 * r3 = whichqs
290 * r4 = queue
291 * r5 = &qs[queue]
292 * r6 = newlwp
293 * r7 = scratch
294 */
295 stmfd sp!, {r4-r7, lr}
296
297 /*
298 * Get the current lwp and indicate that there is no longer
299 * a valid process (curlwp = 0). Zero the current PCB pointer
300 * while we're at it.
301 */
302 ldr r7, .Lcurlwp
303 ldr r6, .Lcurpcb
304 mov r0, #0x00000000
305 ldr r1, [r7] /* r1 = curproc */
306 str r0, [r7] /* curproc = NULL */
307 str r0, [r6] /* curpcb = NULL */
308
309 /* stash the old proc while we call functions */
310 mov r5, r1
311
312 #if defined(LOCKDEBUG)
313 /* release the sched_lock before handling interrupts */
314 bl _C_LABEL(sched_unlock_idle)
315 #endif
316
317 /* Lower the spl level to spl0 and get the current spl level. */
318 #ifdef __NEWINTR
319 mov r0, #(IPL_NONE)
320 bl _C_LABEL(_spllower)
321 #else /* ! __NEWINTR */
322 #ifdef spl0
323 mov r0, #(_SPL_0)
324 bl _C_LABEL(splx)
325 #else
326 bl _C_LABEL(spl0)
327 #endif /* spl0 */
328 #endif /* __NEWINTR */
329
330 /* Push the old spl level onto the stack */
331 str r0, [sp, #-0x0004]!
332
333 /* First phase : find a new lwp */
334
335 ldr r7, .Lwhichqs
336
337 /* rem: r5 = old lwp */
338 /* rem: r7 = &whichqs */
339
340 .Lswitch_search:
341 IRQdisable
342 #if defined(LOCKDEBUG)
343 bl _C_LABEL(sched_lock_idle)
344 #endif
345
346 /* Do we have any active queues */
347 ldr r3, [r7]
348
349 /* If not we must idle until we do. */
350 teq r3, #0x00000000
351 beq _ASM_LABEL(idle)
352
353 /* put old proc back in r1 */
354 mov r1, r5
355
356 /* rem: r1 = old lwp */
357 /* rem: r3 = whichqs */
358 /* rem: interrupts are disabled */
359
360 /*
361 * We have found an active queue. Currently we do not know which queue
362 * is active just that one of them is.
363 */
364 /* this is the ffs algorithm devised by d.seal and posted to
365 * comp.sys.arm on 16 Feb 1994.
366 */
367 rsb r5, r3, #0
368 ands r0, r3, r5
369
370 adr r5, .Lcpu_switch_ffs_table
371
372 /* X = R0 */
373 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
374 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
375 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
376
377 /* used further down, saves SA stall */
378 ldr r6, .Lqs
379
380 /* now lookup in table indexed on top 6 bits of a4 */
381 ldrb r4, [ r5, r4, lsr #26 ]
382
383 /* rem: r0 = bit mask of chosen queue (1 << r4) */
384 /* rem: r1 = old lwp */
385 /* rem: r3 = whichqs */
386 /* rem: r4 = queue number */
387 /* rem: interrupts are disabled */
388
389 /* Get the address of the queue (&qs[queue]) */
390 add r5, r6, r4, lsl #3
391
392 /*
393 * Get the lwp from the queue and place the next process in
394 * the queue at the head. This basically unlinks the lwp at
395 * the head of the queue.
396 */
397 ldr r6, [r5, #(L_FORW)]
398
399 /* rem: r6 = new lwp */
400 ldr r7, [r6, #(L_FORW)]
401 str r7, [r5, #(L_FORW)]
402
403 /*
404 * Test to see if the queue is now empty. If the head of the queue
405 * points to the queue itself then there are no more lwps in
406 * the queue. We can therefore clear the queue not empty flag held
407 * in r3.
408 */
409
410 teq r5, r7
411 biceq r3, r3, r0
412
413 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
414
415 /* Fix the back pointer for the lwp now at the head of the queue. */
416 ldr r0, [r6, #(L_BACK)]
417 str r0, [r7, #(L_BACK)]
418
419 /* Update the RAM copy of the queue not empty flags word. */
420 ldr r7, .Lwhichqs
421 str r3, [r7]
422
423 /* rem: r1 = old lwp */
424 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
425 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
426 /* rem: r6 = new lwp */
427 /* rem: interrupts are disabled */
428
429 /* Clear the want_resched flag */
430 ldr r7, .Lwant_resched
431 mov r0, #0x00000000
432 str r0, [r7]
433
434 /*
435 * Clear the back pointer of the lwp we have removed from
436 * the head of the queue. The new lwp is isolated now.
437 */
438 str r0, [r6, #(L_BACK)]
439
440 #if defined(LOCKDEBUG)
441 /*
442 * unlock the sched_lock, but leave interrupts off, for now.
443 */
444 mov r7, r1
445 bl _C_LABEL(sched_unlock_idle)
446 mov r1, r7
447 #endif
448
449 .Lswitch_resume:
450 /* l->l_cpu initialized in fork1() for single-processor */
451
452 /* Process is now on a processor. */
453 mov r0, #LSONPROC /* l->l_stat = LSONPROC */
454 str r0, [r6, #(L_STAT)]
455
456 /* We have a new curlwp now so make a note it */
457 ldr r7, .Lcurlwp
458 str r6, [r7]
459
460 /* Hook in a new pcb */
461 ldr r7, .Lcurpcb
462 ldr r0, [r6, #(L_ADDR)]
463 str r0, [r7]
464
465 /* At this point we can allow IRQ's again. */
466 IRQenable
467
468 /* rem: r1 = old lwp */
469 /* rem: r4 = return value */
470 /* rem: r6 = new process */
471 /* rem: interrupts are enabled */
472
473 /*
474 * If the new process is the same as the process that called
475 * cpu_switch() then we do not need to save and restore any
476 * contexts. This means we can make a quick exit.
477 * The test is simple if curlwp on entry (now in r1) is the
478 * same as the proc removed from the queue we can jump to the exit.
479 */
480 teq r1, r6
481 moveq r4, #0x00000000 /* default to "didn't switch" */
482 beq .Lswitch_return
483
484 /*
485 * At this point, we are guaranteed to be switching to
486 * a new lwp.
487 */
488 mov r4, #0x00000001
489
490 /* Remember the old lwp in r0 */
491 mov r0, r1
492
493 /*
494 * If the old lwp on entry to cpu_switch was zero then the
495 * process that called it was exiting. This means that we do
496 * not need to save the current context. Instead we can jump
497 * straight to restoring the context for the new process.
498 */
499 teq r0, #0x00000000
500 beq .Lswitch_exited
501
502 /* rem: r0 = old lwp */
503 /* rem: r4 = return value */
504 /* rem: r6 = new process */
505 /* rem: interrupts are enabled */
506
507 /* Stage two : Save old context */
508
509 /* Get the user structure for the old lwp. */
510 ldr r1, [r0, #(L_ADDR)]
511
512 /* Save all the registers in the old lwp's pcb */
513 add r7, r1, #(PCB_R8)
514 stmia r7, {r8-r13}
515
516 /*
517 * NOTE: We can now use r8-r13 until it is time to restore
518 * them for the new process.
519 */
520
521 /* Remember the old PCB. */
522 mov r8, r1
523
524 /* r1 now free! */
525
526 /* Get the user structure for the new process in r9 */
527 ldr r9, [r6, #(L_ADDR)]
528
529 /*
530 * This can be optimised... We know we want to go from SVC32
531 * mode to UND32 mode
532 */
533 mrs r3, cpsr
534 bic r2, r3, #(PSR_MODE)
535 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
536 msr cpsr_c, r2
537
538 str sp, [r8, #(PCB_UND_SP)]
539
540 msr cpsr_c, r3 /* Restore the old mode */
541
542 /* rem: r0 = old lwp */
543 /* rem: r4 = return value */
544 /* rem: r6 = new process */
545 /* rem: r8 = old PCB */
546 /* rem: r9 = new PCB */
547 /* rem: interrupts are enabled */
548
549 /* What else needs to be saved Only FPA stuff when that is supported */
550
551 /* Third phase : restore saved context */
552
553 /* rem: r0 = old lwp */
554 /* rem: r4 = return value */
555 /* rem: r6 = new lwp */
556 /* rem: r8 = old PCB */
557 /* rem: r9 = new PCB */
558 /* rem: interrupts are enabled */
559
560 /*
561 * Get the new L1 table pointer into r11. If we're switching to
562 * an LWP with the same address space as the outgoing one, we can
563 * skip the cache purge and the TTB load.
564 *
565 * To avoid data dep stalls that would happen anyway, we try
566 * and get some useful work done in the mean time.
567 */
568 ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
569 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
570
571 ldr r3, .Lblock_userspace_access
572 mov r1, #0x00000001
573 mov r2, #0x00000000
574
575 teq r10, r11 /* r10 == r11? */
576 beq .Lcs_context_switched /* yes! */
577
578 /*
579 * Don't allow user space access between the purge and the switch.
580 */
581 ldr r3, .Lblock_userspace_access
582 mov r1, #0x00000001
583 mov r2, #0x00000000
584 str r1, [r3]
585
586 stmfd sp!, {r0-r3}
587 ldr r1, .Lcpufuncs
588 add lr, pc, #.Lcs_cache_purged - . - 8
589 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
590
591 .Lcs_cache_purged:
592 ldmfd sp!, {r0-r3}
593
594 .Lcs_cache_purge_skipped:
595 /* At this point we need to kill IRQ's again. */
596 IRQdisable
597
598 /* rem: r2 = 0 */
599 /* rem: r3 = &block_userspace_access */
600 /* rem: r4 = return value */
601 /* rem: r6 = new lwp */
602 /* rem: r9 = new PCB */
603 /* rem: r11 == new L1 */
604
605 /*
606 * Interrupts are disabled so we can allow user space accesses again
607 * as none will occur until interrupts are re-enabled after the
608 * switch.
609 */
610 str r2, [r3]
611
612 /* Switch the memory to the new process */
613 ldr r3, .Lcpufuncs
614 mov r0, r11
615 add lr, pc, #.Lcs_context_switched - . - 8
616 ldr pc, [r3, #CF_CONTEXT_SWITCH]
617
618 .Lcs_context_switched:
619 /* rem: r4 = return value */
620 /* rem: r6 = new lwp */
621 /* rem: r9 = new PCB */
622
623 /*
624 * This can be optimised... We know we want to go from SVC32
625 * mode to UND32 mode
626 */
627 mrs r3, cpsr
628 bic r2, r3, #(PSR_MODE)
629 orr r2, r2, #(PSR_UND32_MODE)
630 msr cpsr_c, r2
631
632 ldr sp, [r9, #(PCB_UND_SP)]
633
634 msr cpsr_c, r3 /* Restore the old mode */
635
636 /* Restore all the save registers */
637 add r7, r9, #PCB_R8
638 ldmia r7, {r8-r13}
639
640 sub r7, r7, #PCB_R8 /* restore PCB pointer */
641
642 ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
643
644 /* rem: r4 = return value */
645 /* rem: r5 = new lwp's proc */
646 /* rem: r6 = new lwp */
647 /* rem: r7 = new pcb */
648
649 #ifdef ARMFPE
650 add r0, r7, #(USER_SIZE) & 0x00ff
651 add r0, r0, #(USER_SIZE) & 0xff00
652 bl _C_LABEL(arm_fpe_core_changecontext)
653 #endif
654
655 /* We can enable interrupts again */
656 IRQenable
657
658 /* rem: r4 = return value */
659 /* rem: r5 = new lwp's proc */
660 /* rem: r6 = new lwp */
661 /* rem: r7 = new PCB */
662
663 /*
664 * Check for restartable atomic sequences (RAS).
665 */
666
667 ldr r2, [r5, #(P_NRAS)]
668 ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
669 teq r2, #0 /* p->p_nras == 0? */
670 bne .Lswitch_do_ras /* no, check for one */
671
672 .Lswitch_return:
673
674 /* Get the spl level from the stack and update the current spl level */
675 ldr r0, [sp], #0x0004
676 bl _C_LABEL(splx)
677
678 /* cpu_switch returns 1 == switched, 0 == didn't switch */
679 mov r0, r4
680
681 /*
682 * Pull the registers that got pushed when either savectx() or
683 * cpu_switch() was called and return.
684 */
685 ldmfd sp!, {r4-r7, pc}
686
687 .Lswitch_do_ras:
688 ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
689 mov r0, r5 /* first ras_lookup() arg */
690 bl _C_LABEL(ras_lookup)
691 cmn r0, #1 /* -1 means "not in a RAS" */
692 strne r0, [r4, #(TF_PC)]
693 b .Lswitch_return
694
695 .Lswitch_exited:
696 /*
697 * We skip the cache purge because switch_exit()/switch_lwp_exit()
698 * already did it. Load up registers the way .Lcs_cache_purge_skipped
699 * expects. Userpsace access already blocked by switch_exit()/
700 * switch_lwp_exit().
701 */
702 ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
703 ldr r3, .Lblock_userspace_access
704 mov r2, #0x00000000
705 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
706 b .Lcs_cache_purge_skipped
707
708 /*
709 * cpu_preempt(struct lwp *current, struct lwp *next)
710 * Switch to the specified next LWP
711 * Arguments:
712 *
713 * r0 'struct lwp *' of the current LWP
714 * r1 'struct lwp *' of the LWP to switch to
715 */
716 ENTRY(cpu_preempt)
717 stmfd sp!, {r4-r7, lr}
718
719 /* Lower the spl level to spl0 and get the current spl level. */
720 mov r6, r0 /* save old lwp */
721 mov r5, r1 /* save new lwp */
722
723 #if defined(LOCKDEBUG)
724 /* release the sched_lock before handling interrupts */
725 bl _C_LABEL(sched_unlock_idle)
726 #endif
727
728 #ifdef __NEWINTR
729 mov r0, #(IPL_NONE)
730 bl _C_LABEL(_spllower)
731 #else /* ! __NEWINTR */
732 #ifdef spl0
733 mov r0, #(_SPL_0)
734 bl _C_LABEL(splx)
735 #else
736 bl _C_LABEL(spl0)
737 #endif /* spl0 */
738 #endif /* __NEWINTR */
739
740 /* Push the old spl level onto the stack */
741 str r0, [sp, #-0x0004]!
742
743 IRQdisable
744 #if defined(LOCKDEBUG)
745 bl _C_LABEL(sched_lock_idle)
746 #endif
747
748 /* Do we have any active queues? */
749 ldr r7, .Lwhichqs
750 ldr r3, [r7]
751
752 /* If none, panic! */
753 teq r3, #0x00000000
754 beq .Lpreempt_noqueues
755
756 mov r0, r6 /* restore old lwp */
757 mov r1, r5 /* restore new lwp */
758
759 /* rem: r0 = old lwp */
760 /* rem: r1 = new lwp */
761 /* rem: r3 = whichqs */
762 /* rem: r7 = &whichqs */
763 /* rem: interrupts are disabled */
764
765 /* Compute the queue bit corresponding to the new lwp. */
766 ldrb r4, [r1, #(L_PRIORITY)]
767 mov r2, #0x00000001
768 mov r4, r4, lsr #2 /* queue number */
769 mov r2, r2, lsl r4 /* queue bit */
770
771 /* rem: r0 = old lwp */
772 /* rem: r1 = new lwp */
773 /* rem: r2 = queue bit */
774 /* rem: r3 = whichqs */
775 /* rem: r4 = queue number */
776 /* rem: r7 = &whichqs */
777
778 /*
779 * Unlink the lwp from the queue.
780 */
781 ldr r5, [r1, #(L_BACK)] /* r5 = l->l_back */
782 mov r6, #0x00000000
783 str r6, [r1, #(L_BACK)] /* firewall: l->l_back = NULL */
784 ldr r6, [r1, #(L_FORW)] /* r6 = l->l_forw */
785 str r5, [r6, #(L_BACK)] /* r6->l_back = r5 */
786 str r6, [r5, #(L_FORW)] /* r5->l_forw = r6 */
787
788 teq r5, r6 /* see if queue is empty */
789 biceq r3, r3, r2 /* clear bit if so */
790 streq r3, [r7] /* store it back if so */
791
792 /* rem: r2 (queue bit) now free */
793 /* rem: r3 (whichqs) now free */
794 /* rem: r7 (&whichqs) now free */
795
796 /*
797 * Okay, set up registers the way cpu_switch() wants them,
798 * and jump into the middle of it (where we bring up the
799 * new process).
800 */
801 mov r6, r1 /* r6 = new lwp */
802 #if defined(LOCKDEBUG)
803 mov r5, r0 /* preserve old lwp */
804 bl _C_LABEL(sched_unlock_idle)
805 mov r1, r5 /* r1 = old lwp */
806 #else
807 mov r1, r0 /* r1 = old lwp */
808 #endif
809 b .Lswitch_resume
810
811 .Lpreempt_noqueues:
812 add r0, pc, #.Lpreemptpanic - . - 8
813 bl _C_LABEL(panic)
814
815 .Lpreemptpanic:
816 .asciz "cpu_preempt: whichqs empty"
817 .align 0
818
819 /*
820 * void switch_exit(struct lwp *l, struct lwp *l0);
821 * Switch to lwp0's saved context and deallocate the address space and kernel
822 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
823 */
824
825 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
826 ENTRY(switch_exit)
827 /*
828 * r0 = lwp
829 * r1 = lwp0
830 */
831
832 mov r3, r0
833
834 /* In case we fault */
835 ldr r0, .Lcurlwp
836 mov r2, #0x00000000
837 str r2, [r0]
838
839 /* ldr r0, .Lcurpcb
840 str r2, [r0]*/
841
842 /*
843 * Don't allow user space access between the purge and the switch.
844 */
845 ldr r0, .Lblock_userspace_access
846 mov r2, #0x00000001
847 str r2, [r0]
848
849 /* Switch to lwp0 context */
850
851 stmfd sp!, {r0-r3}
852
853 ldr r0, .Lcpufuncs
854 add lr, pc, #.Lse_cache_purged - . - 8
855 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
856
857 .Lse_cache_purged:
858 ldmfd sp!, {r0-r3}
859
860 IRQdisable
861
862 ldr r2, [r1, #(L_ADDR)]
863 ldr r0, [r2, #(PCB_PAGEDIR)]
864
865 /* Switch the memory to the new process */
866 ldr r4, .Lcpufuncs
867 add lr, pc, #.Lse_context_switched - . - 8
868 ldr pc, [r4, #CF_CONTEXT_SWITCH]
869
870 .Lse_context_switched:
871 /* Restore all the save registers */
872 add r7, r2, #PCB_R8
873 ldmia r7, {r8-r13}
874
875 /* This is not really needed ! */
876 /* Yes it is for the su and fu routines */
877 ldr r0, .Lcurpcb
878 str r2, [r0]
879
880 IRQenable
881
882 /* str r3, [sp, #-0x0004]!*/
883
884 /*
885 * Schedule the vmspace and stack to be freed.
886 */
887 mov r0, r3 /* exit2(l) */
888 bl _C_LABEL(exit2)
889
890 /* Paranoia */
891 mov r0, #0x00000000
892 ldr r1, .Lcurlwp
893 str r0, [r1]
894
895 ldr r7, .Lwhichqs /* r7 = &whichqs */
896 mov r5, #0x00000000 /* r5 = old lwp = NULL */
897 b .Lswitch_search
898
899 /*
900 * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
901 * Switch to lwp0's saved context and deallocate the address space and kernel
902 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
903 */
904
905 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
906 ENTRY(switch_lwp_exit)
907 /*
908 * r0 = lwp
909 * r1 = lwp0
910 */
911
912 mov r3, r0
913
914 /* In case we fault */
915 mov r2, #0x00000000
916 ldr r0, .Lcurlwp
917 str r2, [r0]
918
919 /* ldr r0, .Lcurpcb
920 str r2, [r0]*/
921
922 /*
923 * Don't allow user space access between the purge and the switch.
924 */
925 ldr r0, .Lblock_userspace_access
926 mov r2, #0x00000001
927 str r2, [r0]
928
929 /* Switch to lwp0 context */
930
931 stmfd sp!, {r0-r3}
932
933 ldr r0, .Lcpufuncs
934 add lr, pc, #.Lsle_cache_purged - . - 8
935 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
936
937 .Lsle_cache_purged:
938 ldmfd sp!, {r0-r3}
939
940 IRQdisable
941
942 ldr r2, [r1, #(L_ADDR)]
943 ldr r0, [r2, #(PCB_PAGEDIR)]
944
945 /* Switch the memory to the new process */
946 ldr r4, .Lcpufuncs
947 add lr, pc, #.Lsle_context_switched - . - 8
948 ldr pc, [r4, #CF_CONTEXT_SWITCH]
949
950 .Lsle_context_switched:
951 /* Restore all the save registers */
952 add r7, r2, #PCB_R8
953 ldmia r7, {r8-r13}
954
955 /* This is not really needed ! */
956 /* Yes it is for the su and fu routines */
957 ldr r0, .Lcurpcb
958 str r2, [r0]
959
960 IRQenable
961
962 /* str r3, [sp, #-0x0004]!*/
963
964 /*
965 * Schedule the vmspace and stack to be freed.
966 */
967 mov r0, r3 /* lwp_exit2(l) */
968 bl _C_LABEL(lwp_exit2)
969
970 /* Paranoia */
971 ldr r1, .Lcurlwp
972 mov r0, #0x00000000
973 str r0, [r1]
974
975 ldr r7, .Lwhichqs /* r7 = &whichqs */
976 mov r5, #0x00000000 /* r5 = old lwp = NULL */
977 b .Lswitch_search
978
979 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
980 ENTRY(savectx)
981 /*
982 * r0 = pcb
983 */
984
985 /* Push registers.*/
986 stmfd sp!, {r4-r7, lr}
987
988 /* Store all the registers in the process's pcb */
989 add r2, r0, #(PCB_R8)
990 stmia r2, {r8-r13}
991
992 /* Pull the regs of the stack */
993 ldmfd sp!, {r4-r7, pc}
994
995 ENTRY(proc_trampoline)
996 add lr, pc, #(.Ltrampoline_return - . - 8)
997 mov r0, r5
998 mov r1, sp
999 mov pc, r4
1000
1001 .Ltrampoline_return:
1002 /* Kill irq's */
1003 mrs r0, cpsr
1004 orr r0, r0, #(I32_bit)
1005 msr cpsr_c, r0
1006
1007 PULLFRAME
1008
1009 movs pc, lr /* Exit */
1010
1011 .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
1012 .Lcpu_switch_ffs_table:
1013 /* same as ffs table but all nums are -1 from that */
1014 /* 0 1 2 3 4 5 6 7 */
1015 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
1016 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
1017 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
1018 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
1019 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
1020 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
1021 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
1022 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
1023