cpuswitch.S revision 1.3.2.19 1 /* $NetBSD: cpuswitch.S,v 1.3.2.19 2002/08/12 21:00:26 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpuswitch.S
40 *
41 * cpu switching functions
42 *
43 * Created : 15/10/94
44 */
45
46 #include "opt_armfpe.h"
47
48 #include "assym.h"
49 #include <machine/param.h>
50 #include <machine/cpu.h>
51 #include <machine/frame.h>
52 #include <machine/asm.h>
53
54 #undef IRQdisable
55 #undef IRQenable
56
57 /*
58 * New experimental definitions of IRQdisable and IRQenable
59 * These keep FIQ's enabled since FIQ's are special.
60 */
61
62 #define IRQdisable \
63 mrs r14, cpsr_all ; \
64 orr r14, r14, #(I32_bit) ; \
65 msr cpsr_all, r14 ; \
66
67 #define IRQenable \
68 mrs r14, cpsr_all ; \
69 bic r14, r14, #(I32_bit) ; \
70 msr cpsr_all, r14 ; \
71
72 /*
73 * setrunqueue() and remrunqueue()
74 *
75 * Functions to add and remove a process for the run queue.
76 */
77
78 .text
79
80 Lwhichqs:
81 .word _C_LABEL(sched_whichqs)
82
83 Lqs:
84 .word _C_LABEL(sched_qs)
85
86 /*
87 * On entry
88 * r0 = lwp
89 */
90
91 ENTRY(setrunqueue)
92 /*
93 * Local register usage
94 * r0 = process
95 * r1 = queue
96 * r2 = &qs[queue] and temp
97 * r3 = temp
98 * r12 = whichqs
99 */
100 #ifdef DIAGNOSTIC
101 ldr r1, [r0, #(L_BACK)]
102 teq r1, #0x00000000
103 bne Lsetrunqueue_erg
104
105 ldr r1, [r0, #(L_WCHAN)]
106 teq r1, #0x00000000
107 bne Lsetrunqueue_erg
108 #endif
109
110 /* Get the priority of the queue */
111 ldrb r1, [r0, #(L_PRIORITY)]
112 mov r1, r1, lsr #2
113
114 /* Indicate that there is a process on this queue */
115 ldr r12, Lwhichqs
116 ldr r2, [r12]
117 mov r3, #0x00000001
118 mov r3, r3, lsl r1
119 orr r2, r2, r3
120 str r2, [r12]
121
122 /* Get the address of the queue */
123 ldr r2, Lqs
124 add r1, r2, r1, lsl # 3
125
126 /* Hook the process in */
127 str r1, [r0, #(L_FORW)]
128 ldr r2, [r1, #(L_BACK)]
129
130 str r0, [r1, #(L_BACK)]
131 #ifdef DIAGNOSTIC
132 teq r2, #0x00000000
133 beq Lsetrunqueue_erg
134 #endif
135 str r0, [r2, #(L_FORW)]
136 str r2, [r0, #(L_BACK)]
137
138 mov pc, lr
139
140 #ifdef DIAGNOSTIC
141 Lsetrunqueue_erg:
142 mov r2, r1
143 mov r1, r0
144 add r0, pc, #Ltext1 - . - 8
145 bl _C_LABEL(printf)
146
147 ldr r2, Lqs
148 ldr r1, [r2]
149 add r0, pc, #Ltext2 - . - 8
150 b _C_LABEL(panic)
151
152 Ltext1:
153 .asciz "setrunqueue : %08x %08x\n"
154 Ltext2:
155 .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
156 .align 0
157 #endif
158
159 /*
160 * On entry
161 * r0 = lwp
162 */
163
164 ENTRY(remrunqueue)
165 /*
166 * Local register usage
167 * r0 = oldproc
168 * r1 = queue
169 * r2 = &qs[queue] and scratch
170 * r3 = scratch
171 * r12 = whichqs
172 */
173
174 /* Get the priority of the queue */
175 ldrb r1, [r0, #(L_PRIORITY)]
176 mov r1, r1, lsr #2
177
178 /* Unhook the process */
179 ldr r2, [r0, #(L_FORW)]
180 ldr r3, [r0, #(L_BACK)]
181
182 str r3, [r2, #(L_BACK)]
183 str r2, [r3, #(L_FORW)]
184
185 /* If the queue is now empty clear the queue not empty flag */
186 teq r2, r3
187
188 /* This could be reworked to avoid the use of r4 */
189 ldreq r12, Lwhichqs
190 ldreq r2, [r12]
191 moveq r3, #0x00000001
192 moveq r3, r3, lsl r1
193 biceq r2, r2, r3
194 streq r2, [r12]
195
196 /* Remove the back pointer for the process */
197 mov r1, #0x00000000
198 str r1, [r0, #(L_BACK)]
199
200 mov pc, lr
201
202
203 /*
204 * cpuswitch()
205 *
206 * preforms a process context switch.
207 * This function has several entry points
208 */
209
210 Lcurlwp:
211 .word _C_LABEL(curlwp)
212
213 Lcurpcb:
214 .word _C_LABEL(curpcb)
215
216 Lwant_resched:
217 .word _C_LABEL(want_resched)
218
219 Lcpufuncs:
220 .word _C_LABEL(cpufuncs)
221
222 .data
223 .global _C_LABEL(curpcb)
224 _C_LABEL(curpcb):
225 .word 0x00000000
226 .text
227
228 Lblock_userspace_access:
229 .word _C_LABEL(block_userspace_access)
230
231 /*
232 * Idle loop, exercised while waiting for a process to wake up.
233 */
234 /* LINTSTUB: Ignore */
235 ASENTRY_NP(idle)
236
237 #if defined(LOCKDEBUG)
238 bl _C_LABEL(sched_unlock_idle)
239 #endif
240 /* Enable interrupts */
241 IRQenable
242
243 ldr r3, Lcpufuncs
244 mov r0, #0
245 add lr, pc, #Lidle_slept - . - 8
246 ldr pc, [r3, #CF_SLEEP]
247
248 Lidle_slept:
249
250 /* Disable interrupts while we check for an active queue */
251 IRQdisable
252 #if defined(LOCKDEBUG)
253 bl _C_LABEL(sched_lock_idle)
254 #endif
255 ldr r7, Lwhichqs
256 ldr r3, [r7]
257 teq r3, #0x00000000
258
259 beq _ASM_LABEL(idle)
260 b Lidle_ret
261
262 /*
263 * Find a new lwp to run, save the current context and
264 * load the new context
265 *
266 * Arguments:
267 * r0 'struct lwp *' of the current LWP
268 */
269
270 ENTRY(cpu_switch)
271 /*
272 * Local register usage. Some of these registers are out of date.
273 * r1 = oldlwp
274 * r2 = spl level
275 * r3 = whichqs
276 * r4 = queue
277 * r5 = &qs[queue]
278 * r6 = newlwp
279 * r7 = scratch
280 */
281 stmfd sp!, {r4-r7, lr}
282
283 /*
284 * Get the current lwp and indicate that there is no longer
285 * a valid process (curlwp = 0). Zero the current PCB pointer
286 * while we're at it.
287 */
288 ldr r7, Lcurlwp
289 ldr r6, Lcurpcb
290 mov r0, #0x00000000
291 ldr r1, [r7] /* r1 = curproc */
292 str r0, [r7] /* curproc = NULL */
293 str r0, [r6] /* curpcb = NULL */
294
295 /* stash the old proc while we call functions */
296 mov r5, r1
297
298 #if defined(LOCKDEBUG)
299 /* release the sched_lock before handling interrupts */
300 bl _C_LABEL(sched_unlock_idle)
301 #endif
302
303 /* Lower the spl level to spl0 and get the current spl level. */
304 #ifdef __NEWINTR
305 mov r0, #(IPL_NONE)
306 bl _C_LABEL(_spllower)
307 #else /* ! __NEWINTR */
308 #ifdef spl0
309 mov r0, #(_SPL_0)
310 bl _C_LABEL(splx)
311 #else
312 bl _C_LABEL(spl0)
313 #endif /* spl0 */
314 #endif /* __NEWINTR */
315
316 /* Push the old spl level onto the stack */
317 str r0, [sp, #-0x0004]!
318
319 /* First phase : find a new lwp */
320
321 /* rem: r5 = old lwp */
322
323 Lswitch_search:
324 IRQdisable
325 #if defined(LOCKDEBUG)
326 bl _C_LABEL(sched_lock_idle)
327 #endif
328
329 /* Do we have any active queues */
330 ldr r7, Lwhichqs
331 ldr r3, [r7]
332
333 /* If not we must idle until we do. */
334 teq r3, #0x00000000
335 beq _ASM_LABEL(idle)
336 Lidle_ret:
337
338 /* put old proc back in r1 */
339 mov r1, r5
340
341 /* rem: r1 = old lwp */
342 /* rem: r3 = whichqs */
343 /* rem: interrupts are disabled */
344
345 /*
346 * We have found an active queue. Currently we do not know which queue
347 * is active just that one of them is.
348 */
349 /* this is the ffs algorithm devised by d.seal and posted to
350 * comp.sys.arm on 16 Feb 1994.
351 */
352 rsb r5, r3, #0
353 ands r0, r3, r5
354
355 adr r5, Lcpu_switch_ffs_table
356
357 /* X = R0 */
358 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
359 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
360 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
361
362 /* used further down, saves SA stall */
363 ldr r6, Lqs
364
365 /* now lookup in table indexed on top 6 bits of a4 */
366 ldrb r4, [ r5, r4, lsr #26 ]
367
368 /* rem: r0 = bit mask of chosen queue (1 << r4) */
369 /* rem: r1 = old lwp */
370 /* rem: r3 = whichqs */
371 /* rem: r4 = queue number */
372 /* rem: interrupts are disabled */
373
374 /* Get the address of the queue (&qs[queue]) */
375 add r5, r6, r4, lsl #3
376
377 /*
378 * Get the lwp from the queue and place the next process in
379 * the queue at the head. This basically unlinks the lwp at
380 * the head of the queue.
381 */
382 ldr r6, [r5, #(L_FORW)]
383
384 /* rem: r6 = new lwp */
385 ldr r7, [r6, #(L_FORW)]
386 str r7, [r5, #(L_FORW)]
387
388 /*
389 * Test to see if the queue is now empty. If the head of the queue
390 * points to the queue itself then there are no more lwps in
391 * the queue. We can therefore clear the queue not empty flag held
392 * in r3.
393 */
394
395 teq r5, r7
396 biceq r3, r3, r0
397
398 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
399
400 /* Fix the back pointer for the lwp now at the head of the queue. */
401 ldr r0, [r6, #(L_BACK)]
402 str r0, [r7, #(L_BACK)]
403
404 /* Update the RAM copy of the queue not empty flags word. */
405 ldr r7, Lwhichqs
406 str r3, [r7]
407
408 /* rem: r1 = old lwp */
409 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
410 /* rem: r4 = queue number - NOT NEEDED ANY MORE */
411 /* rem: r6 = new lwp */
412 /* rem: interrupts are disabled */
413
414 /* Clear the want_resched flag */
415 ldr r7, Lwant_resched
416 mov r0, #0x00000000
417 str r0, [r7]
418
419 /*
420 * Clear the back pointer of the lwp we have removed from
421 * the head of the queue. The new lwp is isolated now.
422 */
423 str r0, [r6, #(L_BACK)]
424
425 #if defined(LOCKDEBUG)
426 /*
427 * unlock the sched_lock, but leave interrupts off, for now.
428 */
429 mov r7, r1
430 bl _C_LABEL(sched_unlock_idle)
431 mov r1, r7
432 #endif
433
434 switch_resume:
435 /* l->l_cpu initialized in fork1() for single-processor */
436
437 /* Process is now on a processor. */
438 mov r0, #LSONPROC /* l->l_stat = LSONPROC */
439 str r0, [r6, #(L_STAT)]
440
441 /* We have a new curlwp now so make a note it */
442 ldr r7, Lcurlwp
443 str r6, [r7]
444
445 /* Hook in a new pcb */
446 ldr r7, Lcurpcb
447 ldr r0, [r6, #(L_ADDR)]
448 str r0, [r7]
449
450 /* At this point we can allow IRQ's again. */
451 IRQenable
452
453 /* rem: r1 = old lwp */
454 /* rem: r4 = return value */
455 /* rem: r6 = new process */
456 /* rem: interrupts are enabled */
457
458 /*
459 * If the new process is the same as the process that called
460 * cpu_switch() then we do not need to save and restore any
461 * contexts. This means we can make a quick exit.
462 * The test is simple if curlwp on entry (now in r1) is the
463 * same as the proc removed from the queue we can jump to the exit.
464 */
465 teq r1, r6
466 moveq r4, #0x00000000 /* default to "didn't switch" */
467 beq switch_return
468
469 /*
470 * At this point, we are guaranteed to be switching to
471 * a new lwp.
472 */
473 mov r4, #0x00000001
474
475 /* Remember the old lwp in r0 */
476 mov r0, r1
477
478 /*
479 * If the old lwp on entry to cpu_switch was zero then the
480 * process that called it was exiting. This means that we do
481 * not need to save the current context. Instead we can jump
482 * straight to restoring the context for the new process.
483 */
484 teq r0, #0x00000000
485 beq switch_exited
486
487 /* rem: r0 = old lwp */
488 /* rem: r4 = return value */
489 /* rem: r6 = new process */
490 /* rem: interrupts are enabled */
491
492 /* Stage two : Save old context */
493
494 /* Get the user structure for the old lwp. */
495 ldr r1, [r0, #(L_ADDR)]
496
497 /* Save all the registers in the old lwp's pcb */
498 add r7, r1, #(PCB_R8)
499 stmia r7, {r8-r13}
500
501 /*
502 * This can be optimised... We know we want to go from SVC32
503 * mode to UND32 mode
504 */
505 mrs r3, cpsr_all
506 bic r2, r3, #(PSR_MODE)
507 orr r2, r2, #(PSR_UND32_MODE | I32_bit)
508 msr cpsr_all, r2
509
510 str sp, [r1, #(PCB_UND_SP)]
511
512 msr cpsr_all, r3 /* Restore the old mode */
513
514 /* rem: r0 = old lwp */
515 /* rem: r1 = old pcb */
516 /* rem: r4 = return value */
517 /* rem: r6 = new process */
518 /* rem: interrupts are enabled */
519
520 /* What else needs to be saved Only FPA stuff when that is supported */
521
522 /* r1 now free! */
523
524 /* Third phase : restore saved context */
525
526 /* rem: r0 = old lwp */
527 /* rem: r4 = return value */
528 /* rem: r6 = new lwp */
529 /* rem: interrupts are enabled */
530
531 /*
532 * Don't allow user space access between the purge and the switch.
533 */
534 ldr r3, Lblock_userspace_access
535 mov r1, #0x00000001
536 mov r2, #0x00000000
537 str r1, [r3]
538
539 stmfd sp!, {r0-r3}
540 ldr r1, Lcpufuncs
541 add lr, pc, #Lcs_cache_purged - . - 8
542 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
543
544 Lcs_cache_purged:
545 ldmfd sp!, {r0-r3}
546
547 Lcs_cache_purge_skipped:
548 /* At this point we need to kill IRQ's again. */
549 IRQdisable
550
551 /*
552 * Interrupts are disabled so we can allow user space accesses again
553 * as none will occur until interrupts are re-enabled after the
554 * switch.
555 */
556 str r2, [r3]
557
558 /* Get the user structure for the new process in r1 */
559 ldr r1, [r6, #(L_ADDR)]
560
561 /* Get the pagedir physical address for the process. */
562 ldr r0, [r1, #(PCB_PAGEDIR)]
563
564 /* Switch the memory to the new process */
565 ldr r3, Lcpufuncs
566 add lr, pc, #Lcs_context_switched - . - 8
567 ldr pc, [r3, #CF_CONTEXT_SWITCH]
568
569 Lcs_context_switched:
570 /*
571 * This can be optimised... We know we want to go from SVC32
572 * mode to UND32 mode
573 */
574 mrs r3, cpsr_all
575 bic r2, r3, #(PSR_MODE)
576 orr r2, r2, #(PSR_UND32_MODE)
577 msr cpsr_all, r2
578
579 ldr sp, [r1, #(PCB_UND_SP)]
580
581 msr cpsr_all, r3 /* Restore the old mode */
582
583 /* Restore all the save registers */
584 add r7, r1, #PCB_R8
585 ldmia r7, {r8-r13}
586
587 #ifdef ARMFPE
588 add r0, r1, #(USER_SIZE) & 0x00ff
589 add r0, r0, #(USER_SIZE) & 0xff00
590 bl _C_LABEL(arm_fpe_core_changecontext)
591 #endif
592
593 /* We can enable interrupts again */
594 IRQenable
595
596 switch_return:
597
598 /* Get the spl level from the stack and update the current spl level */
599 ldr r0, [sp], #0x0004
600 bl _C_LABEL(splx)
601
602 /* cpu_switch returns 1 == switched, 0 == didn't switch */
603 mov r0, r4
604
605 /*
606 * Pull the registers that got pushed when either savectx() or
607 * cpu_switch() was called and return.
608 */
609 ldmfd sp!, {r4-r7, pc}
610
611 switch_exited:
612 /*
613 * We skip the cache purge because switch_exit()/switch_lwp_exit()
614 * already did it. Load up registers the way Lcs_cache_purge_skipped
615 * expects. Userpsace access already blocked by switch_exit()/
616 * switch_lwp_exit().
617 */
618 ldr r3, Lblock_userspace_access
619 mov r2, #0x00000000
620 b Lcs_cache_purge_skipped
621
622 /*
623 * cpu_preempt(struct lwp *current, struct lwp *next)
624 * Switch to the specified next LWP
625 * Arguments:
626 *
627 * r0 'struct lwp *' of the current LWP
628 * r1 'struct lwp *' of the LWP to switch to
629 */
630 ENTRY(cpu_preempt)
631 stmfd sp!, {r4-r7, lr}
632
633 /* Lower the spl level to spl0 and get the current spl level. */
634 mov r6, r0 /* save old lwp */
635 mov r5, r1 /* save new lwp */
636
637 #if defined(LOCKDEBUG)
638 /* release the sched_lock before handling interrupts */
639 bl _C_LABEL(sched_unlock_idle)
640 #endif
641
642 #ifdef __NEWINTR
643 mov r0, #(IPL_NONE)
644 bl _C_LABEL(_spllower)
645 #else /* ! __NEWINTR */
646 #ifdef spl0
647 mov r0, #(_SPL_0)
648 bl _C_LABEL(splx)
649 #else
650 bl _C_LABEL(spl0)
651 #endif /* spl0 */
652 #endif /* __NEWINTR */
653
654 /* Push the old spl level onto the stack */
655 str r0, [sp, #-0x0004]!
656
657 IRQdisable
658 #if defined(LOCKDEBUG)
659 bl _C_LABEL(sched_lock_idle)
660 #endif
661
662 /* Do we have any active queues? */
663 ldr r7, Lwhichqs
664 ldr r3, [r7]
665
666 /* If none, panic! */
667 teq r3, #0x00000000
668 beq preempt_noqueues
669
670 mov r0, r6 /* restore old lwp */
671 mov r1, r5 /* restore new lwp */
672
673 /* rem: r0 = old lwp */
674 /* rem: r1 = new lwp */
675 /* rem: r3 = whichqs */
676 /* rem: r7 = &whichqs */
677 /* rem: interrupts are disabled */
678
679 /* Compute the queue bit corresponding to the new lwp. */
680 ldrb r4, [r1, #(L_PRIORITY)]
681 mov r2, #0x00000001
682 mov r4, r4, lsr #2 /* queue number */
683 mov r2, r2, lsl r4 /* queue bit */
684
685 /* rem: r0 = old lwp */
686 /* rem: r1 = new lwp */
687 /* rem: r2 = queue bit */
688 /* rem: r3 = whichqs */
689 /* rem: r4 = queue number */
690 /* rem: r7 = &whichqs */
691
692 /*
693 * Unlink the lwp from the queue.
694 */
695 ldr r5, [r1, #(L_BACK)] /* r5 = l->l_back */
696 mov r6, #0x00000000
697 str r6, [r1, #(L_BACK)] /* firewall: l->l_back = NULL */
698 ldr r6, [r1, #(L_FORW)] /* r6 = l->l_forw */
699 str r5, [r6, #(L_BACK)] /* r6->l_back = r5 */
700 str r6, [r5, #(L_FORW)] /* r5->l_forw = r6 */
701
702 teq r5, r6 /* see if queue is empty */
703 biceq r3, r3, r2 /* clear bit if so */
704 streq r3, [r7] /* store it back if so */
705
706 /* rem: r2 (queue bit) now free */
707 /* rem: r3 (whichqs) now free */
708 /* rem: r7 (&whichqs) now free */
709
710 /*
711 * Okay, set up registers the way cpu_switch() wants them,
712 * and jump into the middle of it (where we bring up the
713 * new process).
714 */
715 mov r6, r1 /* r6 = new lwp */
716 #if defined(LOCKDEBUG)
717 mov r5, r0 /* preserve old lwp */
718 bl _C_LABEL(sched_unlock_idle)
719 mov r1, r5 /* r1 = old lwp */
720 #else
721 mov r1, r0 /* r1 = old lwp */
722 #endif
723 b switch_resume
724
725 preempt_noqueues:
726 add r0, pc, #preemptpanic - . - 8
727 bl _C_LABEL(panic)
728
729 preemptpanic:
730 .asciz "cpu_preempt: whichqs empty"
731 .align 0
732
733 Llwp0:
734 .word _C_LABEL(lwp0)
735
736 Lkernel_map:
737 .word _C_LABEL(kernel_map)
738
739 /*
740 * void switch_exit(struct lwp *l, struct lwp *l0);
741 * Switch to lwp0's saved context and deallocate the address space and kernel
742 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
743 */
744
745 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
746 ENTRY(switch_exit)
747 /*
748 * r0 = lwp
749 * r1 = lwp0
750 */
751
752 mov r3, r0
753
754 /* In case we fault */
755 ldr r0, Lcurlwp
756 mov r2, #0x00000000
757 str r2, [r0]
758
759 /* ldr r0, Lcurpcb
760 str r2, [r0]*/
761
762 /*
763 * Don't allow user space access between the purge and the switch.
764 */
765 ldr r0, Lblock_userspace_access
766 mov r2, #0x00000001
767 str r2, [r0]
768
769 /* Switch to lwp0 context */
770
771 stmfd sp!, {r0-r3}
772
773 ldr r0, Lcpufuncs
774 add lr, pc, #Lse_cache_purged - . - 8
775 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
776
777 Lse_cache_purged:
778 ldmfd sp!, {r0-r3}
779
780 IRQdisable
781
782 ldr r2, [r1, #(L_ADDR)]
783 ldr r0, [r2, #(PCB_PAGEDIR)]
784
785 /* Switch the memory to the new process */
786 ldr r4, Lcpufuncs
787 add lr, pc, #Lse_context_switched - . - 8
788 ldr pc, [r4, #CF_CONTEXT_SWITCH]
789
790 Lse_context_switched:
791 /* Restore all the save registers */
792 add r7, r2, #PCB_R8
793 ldmia r7, {r8-r13}
794
795 /* This is not really needed ! */
796 /* Yes it is for the su and fu routines */
797 ldr r0, Lcurpcb
798 str r2, [r0]
799
800 IRQenable
801
802 /* str r3, [sp, #-0x0004]!*/
803
804 /*
805 * Schedule the vmspace and stack to be freed.
806 */
807 mov r0, r3 /* exit2(l) */
808 bl _C_LABEL(exit2)
809
810 /* Paranoia */
811 mov r0, #0x00000000
812 ldr r1, Lcurlwp
813 str r0, [r1]
814
815 mov r5, #0x00000000 /* r5 = old lwp = NULL */
816 b Lswitch_search
817
818 /*
819 * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
820 * Switch to lwp0's saved context and deallocate the address space and kernel
821 * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
822 */
823
824 /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
825 ENTRY(switch_lwp_exit)
826 /*
827 * r0 = lwp
828 * r1 = lwp0
829 */
830
831 mov r3, r0
832
833 /* In case we fault */
834 mov r2, #0x00000000
835 ldr r0, Lcurlwp
836 str r2, [r0]
837
838 /* ldr r0, Lcurpcb
839 str r2, [r0]*/
840
841 /*
842 * Don't allow user space access between the purge and the switch.
843 */
844 ldr r0, Lblock_userspace_access
845 mov r2, #0x00000001
846 str r2, [r0]
847
848 /* Switch to lwp0 context */
849
850 stmfd sp!, {r0-r3}
851
852 ldr r0, Lcpufuncs
853 add lr, pc, #Lsle_cache_purged - . - 8
854 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
855
856 Lsle_cache_purged:
857 ldmfd sp!, {r0-r3}
858
859 IRQdisable
860
861 ldr r2, [r1, #(L_ADDR)]
862 ldr r0, [r2, #(PCB_PAGEDIR)]
863
864 /* Switch the memory to the new process */
865 ldr r4, Lcpufuncs
866 add lr, pc, #Lsle_context_switched - . - 8
867 ldr pc, [r4, #CF_CONTEXT_SWITCH]
868
869 Lsle_context_switched:
870 /* Restore all the save registers */
871 add r7, r2, #PCB_R8
872 ldmia r7, {r8-r13}
873
874 /* This is not really needed ! */
875 /* Yes it is for the su and fu routines */
876 ldr r0, Lcurpcb
877 str r2, [r0]
878
879 IRQenable
880
881 /* str r3, [sp, #-0x0004]!*/
882
883 /*
884 * Schedule the vmspace and stack to be freed.
885 */
886 mov r0, r3 /* lwp_exit2(l) */
887 bl _C_LABEL(lwp_exit2)
888
889 /* Paranoia */
890 ldr r1, Lcurlwp
891 mov r0, #0x00000000
892 str r0, [r1]
893
894 mov r5, #0x00000000 /* r5 = old lwp = NULL */
895 b Lswitch_search
896
897 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
898 ENTRY(savectx)
899 /*
900 * r0 = pcb
901 */
902
903 /* Push registers.*/
904 stmfd sp!, {r4-r7, lr}
905
906 /* Store all the registers in the process's pcb */
907 add r2, r0, #(PCB_R8)
908 stmia r2, {r8-r13}
909
910 /* Pull the regs of the stack */
911 ldmfd sp!, {r4-r7, pc}
912
913 ENTRY(proc_trampoline)
914 add lr, pc, #(trampoline_return - . - 8)
915 mov r0, r5
916 mov r1, sp
917 mov pc, r4
918
919 trampoline_return:
920 /* Kill irq's */
921 mrs r0, cpsr_all
922 orr r0, r0, #(I32_bit)
923 msr cpsr_all, r0
924
925 PULLFRAME
926
927 movs pc, lr /* Exit */
928
929 .type Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
930 Lcpu_switch_ffs_table:
931 /* same as ffs table but all nums are -1 from that */
932 /* 0 1 2 3 4 5 6 7 */
933 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
934 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
935 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
936 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
937 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
938 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
939 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
940 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
941
942 /* End of cpuswitch.S */
943