cpuswitch.S revision 1.3.2.18 1 /* $NetBSD: cpuswitch.S,v 1.3.2.18 2002/08/12 20:52:14 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47
48 #include "assym.h"
49 #include <machine/param.h>
50 #include <machine/cpu.h>
51 #include <machine/frame.h>
52 #include <machine/asm.h>
53
54 #undef IRQdisable
55 #undef IRQenable
56
57 /*
58 * New experimental definitions of IRQdisable and IRQenable
59 * These keep FIQ's enabled since FIQ's are special.
60 */
61
62 #define IRQdisable \
63 mrs r14, cpsr_all ; \
64 orr r14, r14, #(I32_bit) ; \
65 msr cpsr_all, r14 ; \
66
67 #define IRQenable \
68 mrs r14, cpsr_all ; \
69 bic r14, r14, #(I32_bit) ; \
70 msr cpsr_all, r14 ; \
71
72 /*
73 * setrunqueue() and remrunqueue()
74 *
75 * Functions to add and remove a process for the run queue.
76 */
77
78 .text
79
80 Lwhichqs:
81 .word _C_LABEL(sched_whichqs)
82
83 Lqs:
84 .word _C_LABEL(sched_qs)
85
86 /*
87 * On entry
88 * r0 = lwp
89 */
90
91 ENTRY(setrunqueue)
92 /*
93 * Local register usage
94 * r0 = process
95 * r1 = queue
96 * r2 = &qs[queue] and temp
97 * r3 = temp
98 * r12 = whichqs
99 */
100 #ifdef DIAGNOSTIC
101 ldr r1, [r0, #(L_BACK)]
102 teq r1, #0x00000000
103 bne Lsetrunqueue_erg
104
105 ldr r1, [r0, #(L_WCHAN)]
106 teq r1, #0x00000000
107 bne Lsetrunqueue_erg
108 #endif
109
110 /* Get the priority of the queue */
111 ldrb r1, [r0, #(L_PRIORITY)]
112 mov r1, r1, lsr #2
113
114 /* Indicate that there is a process on this queue */
115 ldr r12, Lwhichqs
116 ldr r2, [r12]
117 mov r3, #0x00000001
118 mov r3, r3, lsl r1
119 orr r2, r2, r3
120 str r2, [r12]
121
122 /* Get the address of the queue */
123 ldr r2, Lqs
124 add r1, r2, r1, lsl # 3
125
126 /* Hook the process in */
127 str r1, [r0, #(L_FORW)]
128 ldr r2, [r1, #(L_BACK)]
129
130 str r0, [r1, #(L_BACK)]
131 #ifdef DIAGNOSTIC
132 teq r2, #0x00000000
133 beq Lsetrunqueue_erg
134 #endif
135 str r0, [r2, #(L_FORW)]
136 str r2, [r0, #(L_BACK)]
137
138 mov pc, lr
139
140 #ifdef DIAGNOSTIC
141 Lsetrunqueue_erg:
142 mov r2, r1
143 mov r1, r0
144 add r0, pc, #Ltext1 - . - 8
145 bl _C_LABEL(printf)
146
147 ldr r2, Lqs
148 ldr r1, [r2]
149 add r0, pc, #Ltext2 - . - 8
150 b _C_LABEL(panic)
151
152 Ltext1:
153 .asciz "setrunqueue : %08x %08x\n"
154 Ltext2:
155 .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
156 .align 0
157 #endif
158
159 /*
160 * On entry
161 * r0 = lwp
162 */
163
164 ENTRY(remrunqueue)
165 /*
166 * Local register usage
167 * r0 = oldproc
168 * r1 = queue
169 * r2 = &qs[queue] and scratch
170 * r3 = scratch
171 * r12 = whichqs
172 */
173
174 /* Get the priority of the queue */
175 ldrb r1, [r0, #(L_PRIORITY)]
176 mov r1, r1, lsr #2
177
178 /* Unhook the process */
179 ldr r2, [r0, #(L_FORW)]
180 ldr r3, [r0, #(L_BACK)]
181
182 str r3, [r2, #(L_BACK)]
183 str r2, [r3, #(L_FORW)]
184
185 /* If the queue is now empty clear the queue not empty flag */
186 teq r2, r3
187
188 /* This could be reworked to avoid the use of r4 */
189 ldreq r12, Lwhichqs
190 ldreq r2, [r12]
191 moveq r3, #0x00000001
192 moveq r3, r3, lsl r1
193 biceq r2, r2, r3
194 streq r2, [r12]
195
196 /* Remove the back pointer for the process */
197 mov r1, #0x00000000
198 str r1, [r0, #(L_BACK)]
199
200 mov pc, lr
201
202
203 /*
204 * cpuswitch()
205 *
206 * preforms a process context switch.
207 * This function has several entry points
208 */
209
210 Lcurlwp:
211 .word _C_LABEL(curlwp)
212
213 Lcurpcb:
214 .word _C_LABEL(curpcb)
215
216 Lwant_resched:
217 .word _C_LABEL(want_resched)
218
219 Lcpufuncs:
220 .word _C_LABEL(cpufuncs)
221
222 .data
223 .global _C_LABEL(curpcb)
224 _C_LABEL(curpcb):
225 .word 0x00000000
226 .text
227
228 Lblock_userspace_access:
229 .word _C_LABEL(block_userspace_access)
230
231 /*
232 * Idle loop, exercised while waiting for a process to wake up.
233 */
234 /* LINTSTUB: Ignore */
235 ASENTRY_NP(idle)
236
237 #if defined(LOCKDEBUG)
238 bl _C_LABEL(sched_unlock_idle)
239 #endif
240 /* Enable interrupts */
241 IRQenable
242
243 ldr r3, Lcpufuncs
244 mov r0, #0
245 add lr, pc, #Lidle_slept - . - 8
246 ldr pc, [r3, #CF_SLEEP]
247
248 Lidle_slept:
249
250 /* Disable interrupts while we check for an active queue */
251 IRQdisable
252 #if defined(LOCKDEBUG)
253 bl _C_LABEL(sched_lock_idle)
254 #endif
255 ldr r7, Lwhichqs
256 ldr r3, [r7]
257 teq r3, #0x00000000
258
259 beq _ASM_LABEL(idle)
260 b Lidle_ret
261
262 /*
263 * Find a new lwp to run, save the current context and
264 * load the new context
265 *
266 * Arguments:
267 * r0 'struct lwp *' of the current LWP
268 */
269
270 ENTRY(cpu_switch)
271 /*
272 * Local register usage. Some of these registers are out of date.
273 * r1 = oldlwp
274 * r2 = spl level
275 * r3 = whichqs
276 * r4 = queue
277 * r5 = &qs[queue]
278 * r6 = newlwp
279 * r7 = scratch
280 */
281 stmfd sp!, {r4-r7, lr}
282
283 /*
284 * Get the current lwp and indicate that there is no longer
285 * a valid process (curlwp = 0)
286 */
287 ldr r7, Lcurlwp
288 ldr r1, [r7]
289 mov r0, #0x00000000
290 str r0, [r7]
291
292 /* Zero the pcb */
293 ldr r7, Lcurpcb
294 str r0, [r7]
295
296 /* stash the old proc while we call functions */
297 mov r5, r1
298
299 #if defined(LOCKDEBUG)
300 /* release the sched_lock before handling interrupts */
301 bl _C_LABEL(sched_unlock_idle)
302 #endif
303
304 /* Lower the spl level to spl0 and get the current spl level. */
305 #ifdef __NEWINTR
306 mov r0, #(IPL_NONE)
307 bl _C_LABEL(_spllower)
308 #else /* ! __NEWINTR */
309 #ifdef spl0
310 mov r0, #(_SPL_0)
311 bl _C_LABEL(splx)
312 #else
313 bl _C_LABEL(spl0)
314 #endif /* spl0 */
315 #endif /* __NEWINTR */
316
317 /* Push the old spl level onto the stack */
318 str r0, [sp, #-0x0004]!
319
320 /* First phase : find a new lwp */
321
322 /* rem: r5 = old lwp */
323
324 Lswitch_search:
325 IRQdisable
326 #if defined(LOCKDEBUG)
327 bl _C_LABEL(sched_lock_idle)
328 #endif
329
330 /* Do we have any active queues */
331 ldr r7, Lwhichqs
332 ldr r3, [r7]
333
334 /* If not we must idle until we do. */
335 teq r3, #0x00000000
336 beq _ASM_LABEL(idle)
337 Lidle_ret:
338
339 /* put old proc back in r1 */
340 mov r1, r5
341
342 /* rem: r1 = old lwp */
343 /* rem: r3 = whichqs */
344 /* rem: interrupts are disabled */
345
346 /*
347 * We have found an active queue. Currently we do not know which queue
348 * is active just that one of them is.
349 */
350 /* this is the ffs algorithm devised by d.seal and posted to
351 * comp.sys.arm on 16 Feb 1994.
352 */
353 rsb r5, r3, #0
354 ands r0, r3, r5
355
356 adr r5, Lcpu_switch_ffs_table
357
358 /* X = R0 */
359 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
360 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
361 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
362
363 /* used further down, saves SA stall */
364 ldr r6, Lqs
365
366 /* now lookup in table indexed on top 6 bits of a4 */
367 ldrb r4, [ r5, r4, lsr #26 ]
368
369 /* rem: r0 = bit mask of chosen queue (1 << r4) */
370 /* rem: r1 = old lwp */
371 /* rem: r3 = whichqs */
372 /* rem: r4 = queue number */
373 /* rem: interrupts are disabled */
374
375 /* Get the address of the queue (&qs[queue]) */
376 add r5, r6, r4, lsl #3
377
378 /*
379 * Get the lwp from the queue and place the next process in
380 * the queue at the head. This basically unlinks the lwp at
381 * the head of the queue.
382 */
383 ldr r6, [r5, #(L_FORW)]
384
385 /* rem: r6 = new lwp */
386 ldr r7, [r6, #(L_FORW)]
387 str r7, [r5, #(L_FORW)]
388
389 /*
390 * Test to see if the queue is now empty. If the head of the queue
391 * points to the queue itself then there are no more lwps in
392 * the queue. We can therefore clear the queue not empty flag held
393 * in r3.
394 */
395
396 teq r5, r7
397 biceq r3, r3, r0
398
399 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
400
401 /* Fix the back pointer for the lwp now at the head of the queue. */
402 ldr r0, [r6, #(L_BACK)]
403 str r0, [r7, #(L_BACK)]
404
405 /* Update the RAM copy of the queue not empty flags word. */
406 ldr r7, Lwhichqs
407 str r3, [r7]
408
409 /* rem: r1 = old lwp */
410 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
411 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
412 /* rem: r6 = new lwp */
413 /* rem: interrupts are disabled */
414
415 /* Clear the want_resched flag */
416 ldr r7, Lwant_resched
417 mov r0, #0x00000000
418 str r0, [r7]
419
420 /*
421 * Clear the back pointer of the lwp we have removed from
422 * the head of the queue. The new lwp is isolated now.
423 */
424 str r0, [r6, #(L_BACK)]
425
426 #if defined(LOCKDEBUG)
427 /*
428 * unlock the sched_lock, but leave interrupts off, for now.
429 */
430 mov r7, r1
431 bl _C_LABEL(sched_unlock_idle)
432 mov r1, r7
433 #endif
434
435 switch_resume:
436 /* l->l_cpu initialized in fork1() for single-processor */
437
438 /* Process is now on a processor. */
439 mov r0, #LSONPROC /* l->l_stat = LSONPROC */
440 str r0, [r6, #(L_STAT)]
441
442 /* We have a new curlwp now so make a note it */
443 ldr r7, Lcurlwp
444 str r6, [r7]
445
446 /* Hook in a new pcb */
447 ldr r7, Lcurpcb
448 ldr r0, [r6, #(L_ADDR)]
449 str r0, [r7]
450
451 /* At this point we can allow IRQ's again. */
452 IRQenable
453
454 /* rem: r1 = old lwp */
455 /* rem: r4 = return value */
456 /* rem: r6 = new process */
457 /* rem: interrupts are enabled */
458
459 /*
460 * If the new process is the same as the process that called
461 * cpu_switch() then we do not need to save and restore any
462 * contexts. This means we can make a quick exit.
463 * The test is simple if curlwp on entry (now in r1) is the
464 * same as the proc removed from the queue we can jump to the exit.
465 */
466 teq r1, r6
467 moveq r4, #0x00000000 /* default to "didn't switch" */
468 beq switch_return
469
470 /*
471 * At this point, we are guaranteed to be switching to
472 * a new lwp.
473 */
474 mov r4, #0x00000001
475
476 /* Remember the old lwp in r0 */
477 mov r0, r1
478
479 /*
480 * If the old lwp on entry to cpu_switch was zero then the
481 * process that called it was exiting. This means that we do
482 * not need to save the current context. Instead we can jump
483 * straight to restoring the context for the new process.
484 */
485 teq r0, #0x00000000
486 beq switch_exited
487
488 /* rem: r0 = old lwp */
489 /* rem: r4 = return value */
490 /* rem: r6 = new process */
491 /* rem: interrupts are enabled */
492
493 /* Stage two : Save old context */
494
495 /* Get the user structure for the old lwp. */
496 ldr r1, [r0, #(L_ADDR)]
497
498 /* Save all the registers in the old lwp's pcb */
499 add r7, r1, #(PCB_R8)
500 stmia r7, {r8-r13}
501
502 /*
503 * This can be optimised... We know we want to go from SVC32
504 * mode to UND32 mode
505 */
506 mrs r3, cpsr_all
507 bic r2, r3, #(PSR_MODE)
508 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
509 msr cpsr_all, r2
510
511 str sp, [r1, #(PCB_UND_SP)]
512
513 msr cpsr_all, r3 /* Restore the old mode */
514
515 /* rem: r0 = old lwp */
516 /* rem: r1 = old pcb */
517 /* rem: r4 = return value */
518 /* rem: r6 = new process */
519 /* rem: interrupts are enabled */
520
521 /* What else needs to be saved Only FPA stuff when that is supported */
522
523 /* r1 now free! */
524
525 /* Third phase : restore saved context */
526
527 /* rem: r0 = old lwp */
528 /* rem: r4 = return value */
529 /* rem: r6 = new lwp */
530 /* rem: interrupts are enabled */
531
532 /*
533 * Don't allow user space access between the purge and the switch.
534 */
535 ldr r3, Lblock_userspace_access
536 mov r1, #0x00000001
537 mov r2, #0x00000000
538 str r1, [r3]
539
540 stmfd sp!, {r0-r3}
541 ldr r1, Lcpufuncs
542 add lr, pc, #Lcs_cache_purged - . - 8
543 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
544
545 Lcs_cache_purged:
546 ldmfd sp!, {r0-r3}
547
548 Lcs_cache_purge_skipped:
549 /* At this point we need to kill IRQ's again. */
550 IRQdisable
551
552 /*
553 * Interrupts are disabled so we can allow user space accesses again
554 * as none will occur until interrupts are re-enabled after the
555 * switch.
556 */
557 str r2, [r3]
558
559 /* Get the user structure for the new process in r1 */
560 ldr r1, [r6, #(L_ADDR)]
561
562 /* Get the pagedir physical address for the process. */
563 ldr r0, [r1, #(PCB_PAGEDIR)]
564
565 /* Switch the memory to the new process */
566 ldr r3, Lcpufuncs
567 add lr, pc, #Lcs_context_switched - . - 8
568 ldr pc, [r3, #CF_CONTEXT_SWITCH]
569
570 Lcs_context_switched:
571 /*
572 * This can be optimised... We know we want to go from SVC32
573 * mode to UND32 mode
574 */
575 mrs r3, cpsr_all
576 bic r2, r3, #(PSR_MODE)
577 orr r2, r2, #(PSR_UND32_MODE)
578 msr cpsr_all, r2
579
580 ldr sp, [r1, #(PCB_UND_SP)]
581
582 msr cpsr_all, r3 /* Restore the old mode */
583
584 /* Restore all the save registers */
585 add r7, r1, #PCB_R8
586 ldmia r7, {r8-r13}
587
588 #ifdef ARMFPE
589 add r0, r1, #(USER_SIZE) & 0x00ff
590 add r0, r0, #(USER_SIZE) & 0xff00
591 bl _C_LABEL(arm_fpe_core_changecontext)
592 #endif
593
594 /* We can enable interrupts again */
595 IRQenable
596
597 switch_return:
598
599 /* Get the spl level from the stack and update the current spl level */
600 ldr r0, [sp], #0x0004
601 bl _C_LABEL(splx)
602
603 /* cpu_switch returns 1 == switched, 0 == didn't switch */
604 mov r0, r4
605
606 /*
607 * Pull the registers that got pushed when either savectx() or
608 * cpu_switch() was called and return.
609 */
610 ldmfd sp!, {r4-r7, pc}
611
612 switch_exited:
613 /*
614 * We skip the cache purge because switch_exit()/switch_lwp_exit()
615 * already did it. Load up registers the way Lcs_cache_purge_skipped
616 * expects. Userpsace access already blocked by switch_exit()/
617 * switch_lwp_exit().
618 */
619 ldr r3, Lblock_userspace_access
620 mov r2, #0x00000000
621 b Lcs_cache_purge_skipped
622
623 /*
624 * cpu_preempt(struct lwp *current, struct lwp *next)
625 * Switch to the specified next LWP
626 * Arguments:
627 *
628 * r0 'struct lwp *' of the current LWP
629 * r1 'struct lwp *' of the LWP to switch to
630 */
631 ENTRY(cpu_preempt)
632 stmfd sp!, {r4-r7, lr}
633
634 /* Lower the spl level to spl0 and get the current spl level. */
635 mov r6, r0 /* save old lwp */
636 mov r5, r1 /* save new lwp */
637
638 #if defined(LOCKDEBUG)
639 /* release the sched_lock before handling interrupts */
640 bl _C_LABEL(sched_unlock_idle)
641 #endif
642
643 #ifdef __NEWINTR
644 mov r0, #(IPL_NONE)
645 bl _C_LABEL(_spllower)
646 #else /* ! __NEWINTR */
647 #ifdef spl0
648 mov r0, #(_SPL_0)
649 bl _C_LABEL(splx)
650 #else
651 bl _C_LABEL(spl0)
652 #endif /* spl0 */
653 #endif /* __NEWINTR */
654
655 /* Push the old spl level onto the stack */
656 str r0, [sp, #-0x0004]!
657
658 IRQdisable
659 #if defined(LOCKDEBUG)
660 bl _C_LABEL(sched_lock_idle)
661 #endif
662
663 /* Do we have any active queues? */
664 ldr r7, Lwhichqs
665 ldr r3, [r7]
666
667 /* If none, panic! */
668 teq r3, #0x00000000
669 beq preempt_noqueues
670
671 mov r0, r6 /* restore old lwp */
672 mov r1, r5 /* restore new lwp */
673
674 /* rem: r0 = old lwp */
675 /* rem: r1 = new lwp */
676 /* rem: r3 = whichqs */
677 /* rem: r7 = &whichqs */
678 /* rem: interrupts are disabled */
679
680 /* Compute the queue bit corresponding to the new lwp. */
681 ldrb r4, [r1, #(L_PRIORITY)]
682 mov r2, #0x00000001
683 mov r4, r4, lsr #2 /* queue number */
684 mov r2, r2, lsl r4 /* queue bit */
685
686 /* rem: r0 = old lwp */
687 /* rem: r1 = new lwp */
688 /* rem: r2 = queue bit */
689 /* rem: r3 = whichqs */
690 /* rem: r4 = queue number */
691 /* rem: r7 = &whichqs */
692
693 /*
694 * Unlink the lwp from the queue.
695 */
696 ldr r5, [r1, #(L_BACK)] /* r5 = l->l_back */
697 mov r6, #0x00000000
698 str r6, [r1, #(L_BACK)] /* firewall: l->l_back = NULL */
699 ldr r6, [r1, #(L_FORW)] /* r6 = l->l_forw */
700 str r5, [r6, #(L_BACK)] /* r6->l_back = r5 */
701 str r6, [r5, #(L_FORW)] /* r5->l_forw = r6 */
702
703 teq r5, r6 /* see if queue is empty */
704 biceq r3, r3, r2 /* clear bit if so */
705 streq r3, [r7] /* store it back if so */
706
707 /* rem: r2 (queue bit) now free */
708 /* rem: r3 (whichqs) now free */
709 /* rem: r7 (&whichqs) now free */
710
711 /*
712 * Okay, set up registers the way cpu_switch() wants them,
713 * and jump into the middle of it (where we bring up the
714 * new process).
715 */
716 mov r6, r1 /* r6 = new lwp */
717 #if defined(LOCKDEBUG)
718 mov r5, r0 /* preserve old lwp */
719 bl _C_LABEL(sched_unlock_idle)
720 mov r1, r5 /* r1 = old lwp */
721 #else
722 mov r1, r0 /* r1 = old lwp */
723 #endif
724 b switch_resume
725
726 preempt_noqueues:
727 add r0, pc, #preemptpanic - . - 8
728 bl _C_LABEL(panic)
729
730 preemptpanic:
731 .asciz "cpu_preempt: whichqs empty"
732 .align 0
733
734 Llwp0:
735 .word _C_LABEL(lwp0)
736
737 Lkernel_map:
738 .word _C_LABEL(kernel_map)
739
740 /*
741 * void switch_exit(struct lwp *l, struct lwp *l0);
742 * Switch to lwp0's saved context and deallocate the address space and kernel
743 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
744 */
745
746 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
747 ENTRY(switch_exit)
748 /*
749 * r0 = lwp
750 * r1 = lwp0
751 */
752
753 mov r3, r0
754
755 /* In case we fault */
756 ldr r0, Lcurlwp
757 mov r2, #0x00000000
758 str r2, [r0]
759
760 /* ldr r0, Lcurpcb
761 str r2, [r0]*/
762
763 /*
764 * Don't allow user space access between the purge and the switch.
765 */
766 ldr r0, Lblock_userspace_access
767 mov r2, #0x00000001
768 str r2, [r0]
769
770 /* Switch to lwp0 context */
771
772 stmfd sp!, {r0-r3}
773
774 ldr r0, Lcpufuncs
775 add lr, pc, #Lse_cache_purged - . - 8
776 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
777
778 Lse_cache_purged:
779 ldmfd sp!, {r0-r3}
780
781 IRQdisable
782
783 ldr r2, [r1, #(L_ADDR)]
784 ldr r0, [r2, #(PCB_PAGEDIR)]
785
786 /* Switch the memory to the new process */
787 ldr r4, Lcpufuncs
788 add lr, pc, #Lse_context_switched - . - 8
789 ldr pc, [r4, #CF_CONTEXT_SWITCH]
790
791 Lse_context_switched:
792 /* Restore all the save registers */
793 add r7, r2, #PCB_R8
794 ldmia r7, {r8-r13}
795
796 /* This is not really needed ! */
797 /* Yes it is for the su and fu routines */
798 ldr r0, Lcurpcb
799 str r2, [r0]
800
801 IRQenable
802
803 /* str r3, [sp, #-0x0004]!*/
804
805 /*
806 * Schedule the vmspace and stack to be freed.
807 */
808 mov r0, r3 /* exit2(l) */
809 bl _C_LABEL(exit2)
810
811 /* Paranoia */
812 mov r0, #0x00000000
813 ldr r1, Lcurlwp
814 str r0, [r1]
815
816 mov r5, #0x00000000 /* r5 = old lwp = NULL */
817 b Lswitch_search
818
819 /*
820 * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
821 * Switch to lwp0's saved context and deallocate the address space and kernel
822 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
823 */
824
825 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
826 ENTRY(switch_lwp_exit)
827 /*
828 * r0 = lwp
829 * r1 = lwp0
830 */
831
832 mov r3, r0
833
834 /* In case we fault */
835 mov r2, #0x00000000
836 ldr r0, Lcurlwp
837 str r2, [r0]
838
839 /* ldr r0, Lcurpcb
840 str r2, [r0]*/
841
842 /*
843 * Don't allow user space access between the purge and the switch.
844 */
845 ldr r0, Lblock_userspace_access
846 mov r2, #0x00000001
847 str r2, [r0]
848
849 /* Switch to lwp0 context */
850
851 stmfd sp!, {r0-r3}
852
853 ldr r0, Lcpufuncs
854 add lr, pc, #Lsle_cache_purged - . - 8
855 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
856
857 Lsle_cache_purged:
858 ldmfd sp!, {r0-r3}
859
860 IRQdisable
861
862 ldr r2, [r1, #(L_ADDR)]
863 ldr r0, [r2, #(PCB_PAGEDIR)]
864
865 /* Switch the memory to the new process */
866 ldr r4, Lcpufuncs
867 add lr, pc, #Lsle_context_switched - . - 8
868 ldr pc, [r4, #CF_CONTEXT_SWITCH]
869
870 Lsle_context_switched:
871 /* Restore all the save registers */
872 add r7, r2, #PCB_R8
873 ldmia r7, {r8-r13}
874
875 /* This is not really needed ! */
876 /* Yes it is for the su and fu routines */
877 ldr r0, Lcurpcb
878 str r2, [r0]
879
880 IRQenable
881
882 /* str r3, [sp, #-0x0004]!*/
883
884 /*
885 * Schedule the vmspace and stack to be freed.
886 */
887 mov r0, r3 /* lwp_exit2(l) */
888 bl _C_LABEL(lwp_exit2)
889
890 /* Paranoia */
891 ldr r1, Lcurlwp
892 mov r0, #0x00000000
893 str r0, [r1]
894
895 mov r5, #0x00000000 /* r5 = old lwp = NULL */
896 b Lswitch_search
897
898 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
899 ENTRY(savectx)
900 /*
901 * r0 = pcb
902 */
903
904 /* Push registers.*/
905 stmfd sp!, {r4-r7, lr}
906
907 /* Store all the registers in the process's pcb */
908 add r2, r0, #(PCB_R8)
909 stmia r2, {r8-r13}
910
911 /* Pull the regs of the stack */
912 ldmfd sp!, {r4-r7, pc}
913
914 ENTRY(proc_trampoline)
915 add lr, pc, #(trampoline_return - . - 8)
916 mov r0, r5
917 mov r1, sp
918 mov pc, r4
919
920 trampoline_return:
921 /* Kill irq's */
922 mrs r0, cpsr_all
923 orr r0, r0, #(I32_bit)
924 msr cpsr_all, r0
925
926 PULLFRAME
927
928 movs pc, lr /* Exit */
929
930 .type Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
931 Lcpu_switch_ffs_table:
932 /* same as ffs table but all nums are -1 from that */
933 /* 0 1 2 3 4 5 6 7 */
934 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
935 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
936 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
937 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
938 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
939 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
940 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
941 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
942
943 /* End of cpuswitch.S */
944