cpuswitch.S revision 1.3.2.26 1 1.3.2.26 thorpej /* $NetBSD: cpuswitch.S,v 1.3.2.26 2002/12/31 01:03:47 thorpej Exp $ */
2 1.3.2.2 thorpej
3 1.3.2.2 thorpej /*
4 1.3.2.2 thorpej * Copyright (c) 1994-1998 Mark Brinicombe.
5 1.3.2.2 thorpej * Copyright (c) 1994 Brini.
6 1.3.2.2 thorpej * All rights reserved.
7 1.3.2.2 thorpej *
8 1.3.2.2 thorpej * This code is derived from software written for Brini by Mark Brinicombe
9 1.3.2.2 thorpej *
10 1.3.2.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.3.2.2 thorpej * modification, are permitted provided that the following conditions
12 1.3.2.2 thorpej * are met:
13 1.3.2.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.3.2.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.3.2.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.2.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.3.2.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.3.2.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.3.2.2 thorpej * must display the following acknowledgement:
20 1.3.2.2 thorpej * This product includes software developed by Brini.
21 1.3.2.2 thorpej * 4. The name of the company nor the name of the author may be used to
22 1.3.2.2 thorpej * endorse or promote products derived from this software without specific
23 1.3.2.2 thorpej * prior written permission.
24 1.3.2.2 thorpej *
25 1.3.2.2 thorpej * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.3.2.2 thorpej * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.3.2.2 thorpej * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.3.2.2 thorpej * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.3.2.2 thorpej * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.3.2.2 thorpej * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.3.2.2 thorpej * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.3.2.2 thorpej * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.3.2.2 thorpej * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.3.2.2 thorpej * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.3.2.2 thorpej * SUCH DAMAGE.
36 1.3.2.2 thorpej *
37 1.3.2.2 thorpej * RiscBSD kernel project
38 1.3.2.2 thorpej *
39 1.3.2.2 thorpej * cpuswitch.S
40 1.3.2.2 thorpej *
41 1.3.2.2 thorpej * cpu switching functions
42 1.3.2.2 thorpej *
43 1.3.2.2 thorpej * Created : 15/10/94
44 1.3.2.2 thorpej */
45 1.3.2.2 thorpej
46 1.3.2.2 thorpej #include "opt_armfpe.h"
47 1.3.2.25 nathanw #include "opt_multiprocessor.h"
48 1.3.2.2 thorpej
49 1.3.2.2 thorpej #include "assym.h"
50 1.3.2.2 thorpej #include <machine/param.h>
51 1.3.2.2 thorpej #include <machine/cpu.h>
52 1.3.2.2 thorpej #include <machine/frame.h>
53 1.3.2.2 thorpej #include <machine/asm.h>
54 1.3.2.2 thorpej
55 1.3.2.2 thorpej #undef IRQdisable
56 1.3.2.2 thorpej #undef IRQenable
57 1.3.2.2 thorpej
58 1.3.2.2 thorpej /*
59 1.3.2.2 thorpej * New experimental definitions of IRQdisable and IRQenable
60 1.3.2.2 thorpej * These keep FIQ's enabled since FIQ's are special.
61 1.3.2.2 thorpej */
62 1.3.2.2 thorpej
63 1.3.2.2 thorpej #define IRQdisable \
64 1.3.2.20 thorpej mrs r14, cpsr ; \
65 1.3.2.2 thorpej orr r14, r14, #(I32_bit) ; \
66 1.3.2.20 thorpej msr cpsr_c, r14 ; \
67 1.3.2.2 thorpej
68 1.3.2.2 thorpej #define IRQenable \
69 1.3.2.20 thorpej mrs r14, cpsr ; \
70 1.3.2.2 thorpej bic r14, r14, #(I32_bit) ; \
71 1.3.2.20 thorpej msr cpsr_c, r14 ; \
72 1.3.2.2 thorpej
73 1.3.2.2 thorpej .text
74 1.3.2.2 thorpej
75 1.3.2.20 thorpej .Lwhichqs:
76 1.3.2.2 thorpej .word _C_LABEL(sched_whichqs)
77 1.3.2.2 thorpej
78 1.3.2.20 thorpej .Lqs:
79 1.3.2.2 thorpej .word _C_LABEL(sched_qs)
80 1.3.2.2 thorpej
81 1.3.2.2 thorpej /*
82 1.3.2.2 thorpej * cpuswitch()
83 1.3.2.2 thorpej *
84 1.3.2.2 thorpej * preforms a process context switch.
85 1.3.2.2 thorpej * This function has several entry points
86 1.3.2.2 thorpej */
87 1.3.2.2 thorpej
88 1.3.2.25 nathanw #ifdef MULTIPROCESSOR
89 1.3.2.25 nathanw .Lcpu_info_store:
90 1.3.2.25 nathanw .word _C_LABEL(cpu_info_store)
91 1.3.2.25 nathanw .Lcurlwp:
92 1.3.2.25 nathanw /* FIXME: This is bogus in the general case. */
93 1.3.2.25 nathanw .word _C_LABEL(cpu_info_store) + CI_CURLWP
94 1.3.2.25 nathanw
95 1.3.2.25 nathanw .Lcurpcb:
96 1.3.2.25 nathanw .word _C_LABEL(cpu_info_store) + CI_CURPCB
97 1.3.2.25 nathanw #else
98 1.3.2.20 thorpej .Lcurlwp:
99 1.3.2.9 nathanw .word _C_LABEL(curlwp)
100 1.3.2.2 thorpej
101 1.3.2.20 thorpej .Lcurpcb:
102 1.3.2.2 thorpej .word _C_LABEL(curpcb)
103 1.3.2.25 nathanw #endif
104 1.3.2.2 thorpej
105 1.3.2.20 thorpej .Lwant_resched:
106 1.3.2.2 thorpej .word _C_LABEL(want_resched)
107 1.3.2.2 thorpej
108 1.3.2.20 thorpej .Lcpufuncs:
109 1.3.2.2 thorpej .word _C_LABEL(cpufuncs)
110 1.3.2.2 thorpej
111 1.3.2.25 nathanw #ifndef MULTIPROCESSOR
112 1.3.2.2 thorpej .data
113 1.3.2.2 thorpej .global _C_LABEL(curpcb)
114 1.3.2.2 thorpej _C_LABEL(curpcb):
115 1.3.2.2 thorpej .word 0x00000000
116 1.3.2.2 thorpej .text
117 1.3.2.25 nathanw #endif
118 1.3.2.2 thorpej
119 1.3.2.20 thorpej .Lblock_userspace_access:
120 1.3.2.2 thorpej .word _C_LABEL(block_userspace_access)
121 1.3.2.2 thorpej
122 1.3.2.20 thorpej .Lcpu_do_powersave:
123 1.3.2.20 thorpej .word _C_LABEL(cpu_do_powersave)
124 1.3.2.20 thorpej
125 1.3.2.2 thorpej /*
126 1.3.2.2 thorpej * Idle loop, exercised while waiting for a process to wake up.
127 1.3.2.20 thorpej *
128 1.3.2.20 thorpej * NOTE: When we jump back to .Lswitch_search, we must have a
129 1.3.2.20 thorpej * pointer to whichqs in r7, which is what it is when we arrive
130 1.3.2.20 thorpej * here.
131 1.3.2.2 thorpej */
132 1.3.2.7 nathanw /* LINTSTUB: Ignore */
133 1.3.2.4 nathanw ASENTRY_NP(idle)
134 1.3.2.25 nathanw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
135 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
136 1.3.2.18 thorpej #endif
137 1.3.2.20 thorpej ldr r3, .Lcpu_do_powersave
138 1.3.2.20 thorpej
139 1.3.2.2 thorpej /* Enable interrupts */
140 1.3.2.2 thorpej IRQenable
141 1.3.2.2 thorpej
142 1.3.2.20 thorpej /* If we don't want to sleep, use a simpler loop. */
143 1.3.2.20 thorpej ldr r3, [r3] /* r3 = cpu_do_powersave */
144 1.3.2.20 thorpej teq r3, #0
145 1.3.2.20 thorpej bne 2f
146 1.3.2.20 thorpej
147 1.3.2.20 thorpej /* Non-powersave idle. */
148 1.3.2.20 thorpej 1: /* should maybe do uvm pageidlezero stuff here */
149 1.3.2.20 thorpej ldr r3, [r7] /* r3 = whichqs */
150 1.3.2.20 thorpej teq r3, #0x00000000
151 1.3.2.20 thorpej bne .Lswitch_search
152 1.3.2.20 thorpej b 1b
153 1.3.2.2 thorpej
154 1.3.2.20 thorpej 2: /* Powersave idle. */
155 1.3.2.20 thorpej ldr r4, .Lcpufuncs
156 1.3.2.20 thorpej 3: ldr r3, [r7] /* r3 = whichqs */
157 1.3.2.2 thorpej teq r3, #0x00000000
158 1.3.2.20 thorpej bne .Lswitch_search
159 1.3.2.20 thorpej
160 1.3.2.20 thorpej /* if saving power, don't want to pageidlezero */
161 1.3.2.20 thorpej mov r0, #0
162 1.3.2.25 nathanw adr lr, 3b
163 1.3.2.20 thorpej ldr pc, [r4, #(CF_SLEEP)]
164 1.3.2.20 thorpej /* loops back around */
165 1.3.2.2 thorpej
166 1.3.2.2 thorpej
167 1.3.2.2 thorpej /*
168 1.3.2.2 thorpej * Find a new lwp to run, save the current context and
169 1.3.2.2 thorpej * load the new context
170 1.3.2.2 thorpej *
171 1.3.2.2 thorpej * Arguments:
172 1.3.2.2 thorpej * r0 'struct lwp *' of the current LWP
173 1.3.2.2 thorpej */
174 1.3.2.2 thorpej
175 1.3.2.2 thorpej ENTRY(cpu_switch)
176 1.3.2.2 thorpej /*
177 1.3.2.2 thorpej * Local register usage. Some of these registers are out of date.
178 1.3.2.2 thorpej * r1 = oldlwp
179 1.3.2.2 thorpej * r2 = spl level
180 1.3.2.2 thorpej * r3 = whichqs
181 1.3.2.2 thorpej * r4 = queue
182 1.3.2.2 thorpej * r5 = &qs[queue]
183 1.3.2.2 thorpej * r6 = newlwp
184 1.3.2.2 thorpej * r7 = scratch
185 1.3.2.2 thorpej */
186 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
187 1.3.2.2 thorpej
188 1.3.2.2 thorpej /*
189 1.3.2.2 thorpej * Get the current lwp and indicate that there is no longer
190 1.3.2.19 thorpej * a valid process (curlwp = 0). Zero the current PCB pointer
191 1.3.2.19 thorpej * while we're at it.
192 1.3.2.2 thorpej */
193 1.3.2.20 thorpej ldr r7, .Lcurlwp
194 1.3.2.20 thorpej ldr r6, .Lcurpcb
195 1.3.2.2 thorpej mov r0, #0x00000000
196 1.3.2.19 thorpej ldr r1, [r7] /* r1 = curproc */
197 1.3.2.19 thorpej str r0, [r7] /* curproc = NULL */
198 1.3.2.19 thorpej str r0, [r6] /* curpcb = NULL */
199 1.3.2.2 thorpej
200 1.3.2.16 thorpej /* stash the old proc while we call functions */
201 1.3.2.16 thorpej mov r5, r1
202 1.3.2.2 thorpej
203 1.3.2.25 nathanw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
204 1.3.2.18 thorpej /* release the sched_lock before handling interrupts */
205 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
206 1.3.2.18 thorpej #endif
207 1.3.2.18 thorpej
208 1.3.2.16 thorpej /* Lower the spl level to spl0 and get the current spl level. */
209 1.3.2.4 nathanw #ifdef __NEWINTR
210 1.3.2.4 nathanw mov r0, #(IPL_NONE)
211 1.3.2.4 nathanw bl _C_LABEL(_spllower)
212 1.3.2.4 nathanw #else /* ! __NEWINTR */
213 1.3.2.2 thorpej #ifdef spl0
214 1.3.2.2 thorpej mov r0, #(_SPL_0)
215 1.3.2.2 thorpej bl _C_LABEL(splx)
216 1.3.2.2 thorpej #else
217 1.3.2.2 thorpej bl _C_LABEL(spl0)
218 1.3.2.4 nathanw #endif /* spl0 */
219 1.3.2.4 nathanw #endif /* __NEWINTR */
220 1.3.2.2 thorpej
221 1.3.2.2 thorpej /* Push the old spl level onto the stack */
222 1.3.2.2 thorpej str r0, [sp, #-0x0004]!
223 1.3.2.2 thorpej
224 1.3.2.2 thorpej /* First phase : find a new lwp */
225 1.3.2.2 thorpej
226 1.3.2.20 thorpej ldr r7, .Lwhichqs
227 1.3.2.20 thorpej
228 1.3.2.17 thorpej /* rem: r5 = old lwp */
229 1.3.2.20 thorpej /* rem: r7 = &whichqs */
230 1.3.2.7 nathanw
231 1.3.2.20 thorpej .Lswitch_search:
232 1.3.2.2 thorpej IRQdisable
233 1.3.2.25 nathanw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
234 1.3.2.18 thorpej bl _C_LABEL(sched_lock_idle)
235 1.3.2.18 thorpej #endif
236 1.3.2.2 thorpej
237 1.3.2.2 thorpej /* Do we have any active queues */
238 1.3.2.2 thorpej ldr r3, [r7]
239 1.3.2.2 thorpej
240 1.3.2.2 thorpej /* If not we must idle until we do. */
241 1.3.2.2 thorpej teq r3, #0x00000000
242 1.3.2.4 nathanw beq _ASM_LABEL(idle)
243 1.3.2.2 thorpej
244 1.3.2.17 thorpej /* put old proc back in r1 */
245 1.3.2.17 thorpej mov r1, r5
246 1.3.2.17 thorpej
247 1.3.2.2 thorpej /* rem: r1 = old lwp */
248 1.3.2.2 thorpej /* rem: r3 = whichqs */
249 1.3.2.2 thorpej /* rem: interrupts are disabled */
250 1.3.2.2 thorpej
251 1.3.2.2 thorpej /*
252 1.3.2.2 thorpej * We have found an active queue. Currently we do not know which queue
253 1.3.2.2 thorpej * is active just that one of them is.
254 1.3.2.2 thorpej */
255 1.3.2.2 thorpej /* this is the ffs algorithm devised by d.seal and posted to
256 1.3.2.2 thorpej * comp.sys.arm on 16 Feb 1994.
257 1.3.2.2 thorpej */
258 1.3.2.2 thorpej rsb r5, r3, #0
259 1.3.2.2 thorpej ands r0, r3, r5
260 1.3.2.2 thorpej
261 1.3.2.20 thorpej adr r5, .Lcpu_switch_ffs_table
262 1.3.2.2 thorpej
263 1.3.2.2 thorpej /* X = R0 */
264 1.3.2.2 thorpej orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
265 1.3.2.2 thorpej orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
266 1.3.2.2 thorpej rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
267 1.3.2.2 thorpej
268 1.3.2.2 thorpej /* used further down, saves SA stall */
269 1.3.2.20 thorpej ldr r6, .Lqs
270 1.3.2.2 thorpej
271 1.3.2.2 thorpej /* now lookup in table indexed on top 6 bits of a4 */
272 1.3.2.2 thorpej ldrb r4, [ r5, r4, lsr #26 ]
273 1.3.2.2 thorpej
274 1.3.2.2 thorpej /* rem: r0 = bit mask of chosen queue (1 << r4) */
275 1.3.2.2 thorpej /* rem: r1 = old lwp */
276 1.3.2.2 thorpej /* rem: r3 = whichqs */
277 1.3.2.2 thorpej /* rem: r4 = queue number */
278 1.3.2.2 thorpej /* rem: interrupts are disabled */
279 1.3.2.2 thorpej
280 1.3.2.2 thorpej /* Get the address of the queue (&qs[queue]) */
281 1.3.2.2 thorpej add r5, r6, r4, lsl #3
282 1.3.2.2 thorpej
283 1.3.2.2 thorpej /*
284 1.3.2.2 thorpej * Get the lwp from the queue and place the next process in
285 1.3.2.2 thorpej * the queue at the head. This basically unlinks the lwp at
286 1.3.2.2 thorpej * the head of the queue.
287 1.3.2.2 thorpej */
288 1.3.2.2 thorpej ldr r6, [r5, #(L_FORW)]
289 1.3.2.2 thorpej
290 1.3.2.2 thorpej /* rem: r6 = new lwp */
291 1.3.2.2 thorpej ldr r7, [r6, #(L_FORW)]
292 1.3.2.2 thorpej str r7, [r5, #(L_FORW)]
293 1.3.2.2 thorpej
294 1.3.2.2 thorpej /*
295 1.3.2.2 thorpej * Test to see if the queue is now empty. If the head of the queue
296 1.3.2.2 thorpej * points to the queue itself then there are no more lwps in
297 1.3.2.2 thorpej * the queue. We can therefore clear the queue not empty flag held
298 1.3.2.2 thorpej * in r3.
299 1.3.2.2 thorpej */
300 1.3.2.2 thorpej
301 1.3.2.2 thorpej teq r5, r7
302 1.3.2.2 thorpej biceq r3, r3, r0
303 1.3.2.2 thorpej
304 1.3.2.2 thorpej /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
305 1.3.2.2 thorpej
306 1.3.2.2 thorpej /* Fix the back pointer for the lwp now at the head of the queue. */
307 1.3.2.2 thorpej ldr r0, [r6, #(L_BACK)]
308 1.3.2.2 thorpej str r0, [r7, #(L_BACK)]
309 1.3.2.2 thorpej
310 1.3.2.2 thorpej /* Update the RAM copy of the queue not empty flags word. */
311 1.3.2.20 thorpej ldr r7, .Lwhichqs
312 1.3.2.2 thorpej str r3, [r7]
313 1.3.2.2 thorpej
314 1.3.2.2 thorpej /* rem: r1 = old lwp */
315 1.3.2.2 thorpej /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
316 1.3.2.2 thorpej /* rem: r4 = queue number - NOT NEEDED ANY MORE */
317 1.3.2.2 thorpej /* rem: r6 = new lwp */
318 1.3.2.2 thorpej /* rem: interrupts are disabled */
319 1.3.2.2 thorpej
320 1.3.2.2 thorpej /* Clear the want_resched flag */
321 1.3.2.20 thorpej ldr r7, .Lwant_resched
322 1.3.2.4 nathanw mov r0, #0x00000000
323 1.3.2.2 thorpej str r0, [r7]
324 1.3.2.2 thorpej
325 1.3.2.2 thorpej /*
326 1.3.2.2 thorpej * Clear the back pointer of the lwp we have removed from
327 1.3.2.2 thorpej * the head of the queue. The new lwp is isolated now.
328 1.3.2.2 thorpej */
329 1.3.2.2 thorpej str r0, [r6, #(L_BACK)]
330 1.3.2.2 thorpej
331 1.3.2.25 nathanw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
332 1.3.2.18 thorpej /*
333 1.3.2.18 thorpej * unlock the sched_lock, but leave interrupts off, for now.
334 1.3.2.18 thorpej */
335 1.3.2.18 thorpej mov r7, r1
336 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
337 1.3.2.18 thorpej mov r1, r7
338 1.3.2.18 thorpej #endif
339 1.3.2.18 thorpej
340 1.3.2.20 thorpej .Lswitch_resume:
341 1.3.2.25 nathanw #ifdef MULTIPROCESSOR
342 1.3.2.25 nathanw /* XXX use curcpu() */
343 1.3.2.25 nathanw ldr r0, .Lcpu_info_store
344 1.3.2.25 nathanw str r0, [r6, #(L_CPU)]
345 1.3.2.25 nathanw #else
346 1.3.2.2 thorpej /* l->l_cpu initialized in fork1() for single-processor */
347 1.3.2.25 nathanw #endif
348 1.3.2.2 thorpej
349 1.3.2.2 thorpej /* Process is now on a processor. */
350 1.3.2.2 thorpej mov r0, #LSONPROC /* l->l_stat = LSONPROC */
351 1.3.2.3 thorpej str r0, [r6, #(L_STAT)]
352 1.3.2.2 thorpej
353 1.3.2.9 nathanw /* We have a new curlwp now so make a note it */
354 1.3.2.20 thorpej ldr r7, .Lcurlwp
355 1.3.2.2 thorpej str r6, [r7]
356 1.3.2.2 thorpej
357 1.3.2.2 thorpej /* Hook in a new pcb */
358 1.3.2.20 thorpej ldr r7, .Lcurpcb
359 1.3.2.2 thorpej ldr r0, [r6, #(L_ADDR)]
360 1.3.2.2 thorpej str r0, [r7]
361 1.3.2.2 thorpej
362 1.3.2.2 thorpej /* At this point we can allow IRQ's again. */
363 1.3.2.2 thorpej IRQenable
364 1.3.2.2 thorpej
365 1.3.2.2 thorpej /* rem: r1 = old lwp */
366 1.3.2.2 thorpej /* rem: r4 = return value */
367 1.3.2.2 thorpej /* rem: r6 = new process */
368 1.3.2.4 nathanw /* rem: interrupts are enabled */
369 1.3.2.2 thorpej
370 1.3.2.2 thorpej /*
371 1.3.2.2 thorpej * If the new process is the same as the process that called
372 1.3.2.2 thorpej * cpu_switch() then we do not need to save and restore any
373 1.3.2.2 thorpej * contexts. This means we can make a quick exit.
374 1.3.2.9 nathanw * The test is simple if curlwp on entry (now in r1) is the
375 1.3.2.2 thorpej * same as the proc removed from the queue we can jump to the exit.
376 1.3.2.2 thorpej */
377 1.3.2.2 thorpej teq r1, r6
378 1.3.2.2 thorpej moveq r4, #0x00000000 /* default to "didn't switch" */
379 1.3.2.20 thorpej beq .Lswitch_return
380 1.3.2.2 thorpej
381 1.3.2.2 thorpej /*
382 1.3.2.2 thorpej * At this point, we are guaranteed to be switching to
383 1.3.2.2 thorpej * a new lwp.
384 1.3.2.2 thorpej */
385 1.3.2.2 thorpej mov r4, #0x00000001
386 1.3.2.2 thorpej
387 1.3.2.13 thorpej /* Remember the old lwp in r0 */
388 1.3.2.13 thorpej mov r0, r1
389 1.3.2.13 thorpej
390 1.3.2.2 thorpej /*
391 1.3.2.12 thorpej * If the old lwp on entry to cpu_switch was zero then the
392 1.3.2.2 thorpej * process that called it was exiting. This means that we do
393 1.3.2.2 thorpej * not need to save the current context. Instead we can jump
394 1.3.2.2 thorpej * straight to restoring the context for the new process.
395 1.3.2.2 thorpej */
396 1.3.2.13 thorpej teq r0, #0x00000000
397 1.3.2.20 thorpej beq .Lswitch_exited
398 1.3.2.2 thorpej
399 1.3.2.13 thorpej /* rem: r0 = old lwp */
400 1.3.2.2 thorpej /* rem: r4 = return value */
401 1.3.2.2 thorpej /* rem: r6 = new process */
402 1.3.2.4 nathanw /* rem: interrupts are enabled */
403 1.3.2.2 thorpej
404 1.3.2.2 thorpej /* Stage two : Save old context */
405 1.3.2.2 thorpej
406 1.3.2.12 thorpej /* Get the user structure for the old lwp. */
407 1.3.2.13 thorpej ldr r1, [r0, #(L_ADDR)]
408 1.3.2.2 thorpej
409 1.3.2.12 thorpej /* Save all the registers in the old lwp's pcb */
410 1.3.2.2 thorpej add r7, r1, #(PCB_R8)
411 1.3.2.2 thorpej stmia r7, {r8-r13}
412 1.3.2.2 thorpej
413 1.3.2.2 thorpej /*
414 1.3.2.21 thorpej * NOTE: We can now use r8-r13 until it is time to restore
415 1.3.2.21 thorpej * them for the new process.
416 1.3.2.21 thorpej */
417 1.3.2.21 thorpej
418 1.3.2.21 thorpej /* Remember the old PCB. */
419 1.3.2.21 thorpej mov r8, r1
420 1.3.2.21 thorpej
421 1.3.2.21 thorpej /* r1 now free! */
422 1.3.2.21 thorpej
423 1.3.2.21 thorpej /* Get the user structure for the new process in r9 */
424 1.3.2.21 thorpej ldr r9, [r6, #(L_ADDR)]
425 1.3.2.21 thorpej
426 1.3.2.21 thorpej /*
427 1.3.2.2 thorpej * This can be optimised... We know we want to go from SVC32
428 1.3.2.2 thorpej * mode to UND32 mode
429 1.3.2.2 thorpej */
430 1.3.2.20 thorpej mrs r3, cpsr
431 1.3.2.2 thorpej bic r2, r3, #(PSR_MODE)
432 1.3.2.2 thorpej orr r2, r2, #(PSR_UND32_MODE | I32_bit)
433 1.3.2.20 thorpej msr cpsr_c, r2
434 1.3.2.2 thorpej
435 1.3.2.21 thorpej str sp, [r8, #(PCB_UND_SP)]
436 1.3.2.2 thorpej
437 1.3.2.20 thorpej msr cpsr_c, r3 /* Restore the old mode */
438 1.3.2.2 thorpej
439 1.3.2.12 thorpej /* rem: r0 = old lwp */
440 1.3.2.2 thorpej /* rem: r4 = return value */
441 1.3.2.2 thorpej /* rem: r6 = new process */
442 1.3.2.21 thorpej /* rem: r8 = old PCB */
443 1.3.2.21 thorpej /* rem: r9 = new PCB */
444 1.3.2.4 nathanw /* rem: interrupts are enabled */
445 1.3.2.2 thorpej
446 1.3.2.2 thorpej /* What else needs to be saved Only FPA stuff when that is supported */
447 1.3.2.2 thorpej
448 1.3.2.2 thorpej /* Third phase : restore saved context */
449 1.3.2.2 thorpej
450 1.3.2.13 thorpej /* rem: r0 = old lwp */
451 1.3.2.13 thorpej /* rem: r4 = return value */
452 1.3.2.13 thorpej /* rem: r6 = new lwp */
453 1.3.2.21 thorpej /* rem: r8 = old PCB */
454 1.3.2.21 thorpej /* rem: r9 = new PCB */
455 1.3.2.13 thorpej /* rem: interrupts are enabled */
456 1.3.2.13 thorpej
457 1.3.2.13 thorpej /*
458 1.3.2.21 thorpej * Get the new L1 table pointer into r11. If we're switching to
459 1.3.2.21 thorpej * an LWP with the same address space as the outgoing one, we can
460 1.3.2.21 thorpej * skip the cache purge and the TTB load.
461 1.3.2.21 thorpej *
462 1.3.2.21 thorpej * To avoid data dep stalls that would happen anyway, we try
463 1.3.2.21 thorpej * and get some useful work done in the mean time.
464 1.3.2.21 thorpej */
465 1.3.2.21 thorpej ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
466 1.3.2.21 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
467 1.3.2.21 thorpej
468 1.3.2.21 thorpej ldr r3, .Lblock_userspace_access
469 1.3.2.21 thorpej mov r1, #0x00000001
470 1.3.2.21 thorpej mov r2, #0x00000000
471 1.3.2.21 thorpej
472 1.3.2.21 thorpej teq r10, r11 /* r10 == r11? */
473 1.3.2.21 thorpej beq .Lcs_context_switched /* yes! */
474 1.3.2.21 thorpej
475 1.3.2.21 thorpej /*
476 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
477 1.3.2.13 thorpej */
478 1.3.2.20 thorpej ldr r3, .Lblock_userspace_access
479 1.3.2.13 thorpej mov r1, #0x00000001
480 1.3.2.13 thorpej mov r2, #0x00000000
481 1.3.2.13 thorpej str r1, [r3]
482 1.3.2.2 thorpej
483 1.3.2.2 thorpej stmfd sp!, {r0-r3}
484 1.3.2.20 thorpej ldr r1, .Lcpufuncs
485 1.3.2.25 nathanw mov lr, pc
486 1.3.2.13 thorpej ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
487 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
488 1.3.2.2 thorpej
489 1.3.2.20 thorpej .Lcs_cache_purge_skipped:
490 1.3.2.2 thorpej /* At this point we need to kill IRQ's again. */
491 1.3.2.2 thorpej IRQdisable
492 1.3.2.2 thorpej
493 1.3.2.21 thorpej /* rem: r2 = 0 */
494 1.3.2.21 thorpej /* rem: r3 = &block_userspace_access */
495 1.3.2.21 thorpej /* rem: r4 = return value */
496 1.3.2.21 thorpej /* rem: r6 = new lwp */
497 1.3.2.21 thorpej /* rem: r9 = new PCB */
498 1.3.2.21 thorpej /* rem: r11 == new L1 */
499 1.3.2.21 thorpej
500 1.3.2.13 thorpej /*
501 1.3.2.13 thorpej * Interrupts are disabled so we can allow user space accesses again
502 1.3.2.2 thorpej * as none will occur until interrupts are re-enabled after the
503 1.3.2.2 thorpej * switch.
504 1.3.2.2 thorpej */
505 1.3.2.2 thorpej str r2, [r3]
506 1.3.2.2 thorpej
507 1.3.2.2 thorpej /* Switch the memory to the new process */
508 1.3.2.20 thorpej ldr r3, .Lcpufuncs
509 1.3.2.21 thorpej mov r0, r11
510 1.3.2.25 nathanw mov lr, pc
511 1.3.2.2 thorpej ldr pc, [r3, #CF_CONTEXT_SWITCH]
512 1.3.2.21 thorpej
513 1.3.2.20 thorpej .Lcs_context_switched:
514 1.3.2.21 thorpej /* rem: r4 = return value */
515 1.3.2.21 thorpej /* rem: r6 = new lwp */
516 1.3.2.21 thorpej /* rem: r9 = new PCB */
517 1.3.2.21 thorpej
518 1.3.2.2 thorpej /*
519 1.3.2.2 thorpej * This can be optimised... We know we want to go from SVC32
520 1.3.2.2 thorpej * mode to UND32 mode
521 1.3.2.2 thorpej */
522 1.3.2.20 thorpej mrs r3, cpsr
523 1.3.2.2 thorpej bic r2, r3, #(PSR_MODE)
524 1.3.2.2 thorpej orr r2, r2, #(PSR_UND32_MODE)
525 1.3.2.20 thorpej msr cpsr_c, r2
526 1.3.2.2 thorpej
527 1.3.2.21 thorpej ldr sp, [r9, #(PCB_UND_SP)]
528 1.3.2.2 thorpej
529 1.3.2.20 thorpej msr cpsr_c, r3 /* Restore the old mode */
530 1.3.2.2 thorpej
531 1.3.2.22 nathanw /* Restore all the save registers */
532 1.3.2.24 thorpej add r7, r9, #PCB_R8
533 1.3.2.22 nathanw ldmia r7, {r8-r13}
534 1.3.2.22 nathanw
535 1.3.2.24 thorpej sub r7, r7, #PCB_R8 /* restore PCB pointer */
536 1.3.2.24 thorpej
537 1.3.2.24 thorpej ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
538 1.3.2.24 thorpej
539 1.3.2.24 thorpej /* rem: r4 = return value */
540 1.3.2.24 thorpej /* rem: r5 = new lwp's proc */
541 1.3.2.24 thorpej /* rem: r6 = new lwp */
542 1.3.2.24 thorpej /* rem: r7 = new pcb */
543 1.3.2.22 nathanw
544 1.3.2.2 thorpej #ifdef ARMFPE
545 1.3.2.24 thorpej add r0, r7, #(USER_SIZE) & 0x00ff
546 1.3.2.2 thorpej add r0, r0, #(USER_SIZE) & 0xff00
547 1.3.2.2 thorpej bl _C_LABEL(arm_fpe_core_changecontext)
548 1.3.2.2 thorpej #endif
549 1.3.2.2 thorpej
550 1.3.2.22 nathanw /* We can enable interrupts again */
551 1.3.2.22 nathanw IRQenable
552 1.3.2.21 thorpej
553 1.3.2.21 thorpej /* rem: r4 = return value */
554 1.3.2.24 thorpej /* rem: r5 = new lwp's proc */
555 1.3.2.21 thorpej /* rem: r6 = new lwp */
556 1.3.2.22 nathanw /* rem: r7 = new PCB */
557 1.3.2.21 thorpej
558 1.3.2.22 nathanw /*
559 1.3.2.22 nathanw * Check for restartable atomic sequences (RAS).
560 1.3.2.22 nathanw */
561 1.3.2.22 nathanw
562 1.3.2.23 nathanw ldr r2, [r5, #(P_NRAS)]
563 1.3.2.22 nathanw ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
564 1.3.2.22 nathanw teq r2, #0 /* p->p_nras == 0? */
565 1.3.2.22 nathanw bne .Lswitch_do_ras /* no, check for one */
566 1.3.2.2 thorpej
567 1.3.2.20 thorpej .Lswitch_return:
568 1.3.2.2 thorpej
569 1.3.2.2 thorpej /* Get the spl level from the stack and update the current spl level */
570 1.3.2.2 thorpej ldr r0, [sp], #0x0004
571 1.3.2.2 thorpej bl _C_LABEL(splx)
572 1.3.2.2 thorpej
573 1.3.2.2 thorpej /* cpu_switch returns 1 == switched, 0 == didn't switch */
574 1.3.2.2 thorpej mov r0, r4
575 1.3.2.2 thorpej
576 1.3.2.2 thorpej /*
577 1.3.2.2 thorpej * Pull the registers that got pushed when either savectx() or
578 1.3.2.2 thorpej * cpu_switch() was called and return.
579 1.3.2.2 thorpej */
580 1.3.2.2 thorpej ldmfd sp!, {r4-r7, pc}
581 1.3.2.2 thorpej
582 1.3.2.22 nathanw .Lswitch_do_ras:
583 1.3.2.22 nathanw ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
584 1.3.2.23 nathanw mov r0, r5 /* first ras_lookup() arg */
585 1.3.2.22 nathanw bl _C_LABEL(ras_lookup)
586 1.3.2.22 nathanw cmn r0, #1 /* -1 means "not in a RAS" */
587 1.3.2.22 nathanw strne r0, [r4, #(TF_PC)]
588 1.3.2.22 nathanw b .Lswitch_return
589 1.3.2.22 nathanw
590 1.3.2.20 thorpej .Lswitch_exited:
591 1.3.2.13 thorpej /*
592 1.3.2.13 thorpej * We skip the cache purge because switch_exit()/switch_lwp_exit()
593 1.3.2.20 thorpej * already did it. Load up registers the way .Lcs_cache_purge_skipped
594 1.3.2.13 thorpej * expects. Userpsace access already blocked by switch_exit()/
595 1.3.2.13 thorpej * switch_lwp_exit().
596 1.3.2.13 thorpej */
597 1.3.2.21 thorpej ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
598 1.3.2.20 thorpej ldr r3, .Lblock_userspace_access
599 1.3.2.13 thorpej mov r2, #0x00000000
600 1.3.2.21 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
601 1.3.2.20 thorpej b .Lcs_cache_purge_skipped
602 1.3.2.13 thorpej
603 1.3.2.2 thorpej /*
604 1.3.2.26 thorpej * cpu_switchto(struct lwp *current, struct lwp *next)
605 1.3.2.2 thorpej * Switch to the specified next LWP
606 1.3.2.2 thorpej * Arguments:
607 1.3.2.2 thorpej *
608 1.3.2.2 thorpej * r0 'struct lwp *' of the current LWP
609 1.3.2.2 thorpej * r1 'struct lwp *' of the LWP to switch to
610 1.3.2.2 thorpej */
611 1.3.2.26 thorpej ENTRY(cpu_switchto)
612 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
613 1.3.2.2 thorpej
614 1.3.2.10 thorpej /* Lower the spl level to spl0 and get the current spl level. */
615 1.3.2.2 thorpej mov r6, r0 /* save old lwp */
616 1.3.2.15 thorpej mov r5, r1 /* save new lwp */
617 1.3.2.2 thorpej
618 1.3.2.18 thorpej #if defined(LOCKDEBUG)
619 1.3.2.18 thorpej /* release the sched_lock before handling interrupts */
620 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
621 1.3.2.18 thorpej #endif
622 1.3.2.18 thorpej
623 1.3.2.11 thorpej #ifdef __NEWINTR
624 1.3.2.11 thorpej mov r0, #(IPL_NONE)
625 1.3.2.11 thorpej bl _C_LABEL(_spllower)
626 1.3.2.11 thorpej #else /* ! __NEWINTR */
627 1.3.2.2 thorpej #ifdef spl0
628 1.3.2.2 thorpej mov r0, #(_SPL_0)
629 1.3.2.2 thorpej bl _C_LABEL(splx)
630 1.3.2.2 thorpej #else
631 1.3.2.2 thorpej bl _C_LABEL(spl0)
632 1.3.2.11 thorpej #endif /* spl0 */
633 1.3.2.11 thorpej #endif /* __NEWINTR */
634 1.3.2.2 thorpej
635 1.3.2.2 thorpej /* Push the old spl level onto the stack */
636 1.3.2.2 thorpej str r0, [sp, #-0x0004]!
637 1.3.2.2 thorpej
638 1.3.2.2 thorpej IRQdisable
639 1.3.2.18 thorpej #if defined(LOCKDEBUG)
640 1.3.2.18 thorpej bl _C_LABEL(sched_lock_idle)
641 1.3.2.18 thorpej #endif
642 1.3.2.2 thorpej
643 1.3.2.15 thorpej mov r0, r6 /* restore old lwp */
644 1.3.2.15 thorpej mov r1, r5 /* restore new lwp */
645 1.3.2.15 thorpej
646 1.3.2.2 thorpej /* rem: r0 = old lwp */
647 1.3.2.2 thorpej /* rem: r1 = new lwp */
648 1.3.2.2 thorpej /* rem: interrupts are disabled */
649 1.3.2.2 thorpej
650 1.3.2.2 thorpej /*
651 1.3.2.2 thorpej * Okay, set up registers the way cpu_switch() wants them,
652 1.3.2.2 thorpej * and jump into the middle of it (where we bring up the
653 1.3.2.2 thorpej * new process).
654 1.3.2.2 thorpej */
655 1.3.2.2 thorpej mov r6, r1 /* r6 = new lwp */
656 1.3.2.18 thorpej #if defined(LOCKDEBUG)
657 1.3.2.18 thorpej mov r5, r0 /* preserve old lwp */
658 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
659 1.3.2.18 thorpej mov r1, r5 /* r1 = old lwp */
660 1.3.2.18 thorpej #else
661 1.3.2.2 thorpej mov r1, r0 /* r1 = old lwp */
662 1.3.2.18 thorpej #endif
663 1.3.2.20 thorpej b .Lswitch_resume
664 1.3.2.2 thorpej
665 1.3.2.7 nathanw /*
666 1.3.2.10 thorpej * void switch_exit(struct lwp *l, struct lwp *l0);
667 1.3.2.10 thorpej * Switch to lwp0's saved context and deallocate the address space and kernel
668 1.3.2.10 thorpej * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
669 1.3.2.7 nathanw */
670 1.3.2.2 thorpej
671 1.3.2.10 thorpej /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
672 1.3.2.2 thorpej ENTRY(switch_exit)
673 1.3.2.2 thorpej /*
674 1.3.2.2 thorpej * r0 = lwp
675 1.3.2.2 thorpej * r1 = lwp0
676 1.3.2.2 thorpej */
677 1.3.2.2 thorpej
678 1.3.2.2 thorpej mov r3, r0
679 1.3.2.2 thorpej
680 1.3.2.2 thorpej /* In case we fault */
681 1.3.2.20 thorpej ldr r0, .Lcurlwp
682 1.3.2.4 nathanw mov r2, #0x00000000
683 1.3.2.2 thorpej str r2, [r0]
684 1.3.2.2 thorpej
685 1.3.2.20 thorpej /* ldr r0, .Lcurpcb
686 1.3.2.2 thorpej str r2, [r0]*/
687 1.3.2.2 thorpej
688 1.3.2.13 thorpej /*
689 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
690 1.3.2.13 thorpej */
691 1.3.2.20 thorpej ldr r0, .Lblock_userspace_access
692 1.3.2.13 thorpej mov r2, #0x00000001
693 1.3.2.13 thorpej str r2, [r0]
694 1.3.2.13 thorpej
695 1.3.2.2 thorpej /* Switch to lwp0 context */
696 1.3.2.2 thorpej
697 1.3.2.2 thorpej stmfd sp!, {r0-r3}
698 1.3.2.2 thorpej
699 1.3.2.20 thorpej ldr r0, .Lcpufuncs
700 1.3.2.25 nathanw mov lr, pc
701 1.3.2.5 nathanw ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
702 1.3.2.2 thorpej
703 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
704 1.3.2.2 thorpej
705 1.3.2.2 thorpej IRQdisable
706 1.3.2.2 thorpej
707 1.3.2.2 thorpej ldr r2, [r1, #(L_ADDR)]
708 1.3.2.2 thorpej ldr r0, [r2, #(PCB_PAGEDIR)]
709 1.3.2.2 thorpej
710 1.3.2.2 thorpej /* Switch the memory to the new process */
711 1.3.2.20 thorpej ldr r4, .Lcpufuncs
712 1.3.2.25 nathanw mov lr, pc
713 1.3.2.2 thorpej ldr pc, [r4, #CF_CONTEXT_SWITCH]
714 1.3.2.2 thorpej
715 1.3.2.2 thorpej /* Restore all the save registers */
716 1.3.2.2 thorpej add r7, r2, #PCB_R8
717 1.3.2.2 thorpej ldmia r7, {r8-r13}
718 1.3.2.2 thorpej
719 1.3.2.2 thorpej /* This is not really needed ! */
720 1.3.2.2 thorpej /* Yes it is for the su and fu routines */
721 1.3.2.20 thorpej ldr r0, .Lcurpcb
722 1.3.2.2 thorpej str r2, [r0]
723 1.3.2.2 thorpej
724 1.3.2.2 thorpej IRQenable
725 1.3.2.2 thorpej
726 1.3.2.2 thorpej /* str r3, [sp, #-0x0004]!*/
727 1.3.2.2 thorpej
728 1.3.2.2 thorpej /*
729 1.3.2.2 thorpej * Schedule the vmspace and stack to be freed.
730 1.3.2.2 thorpej */
731 1.3.2.2 thorpej mov r0, r3 /* exit2(l) */
732 1.3.2.2 thorpej bl _C_LABEL(exit2)
733 1.3.2.2 thorpej
734 1.3.2.2 thorpej /* Paranoia */
735 1.3.2.2 thorpej mov r0, #0x00000000
736 1.3.2.20 thorpej ldr r1, .Lcurlwp
737 1.3.2.2 thorpej str r0, [r1]
738 1.3.2.2 thorpej
739 1.3.2.20 thorpej ldr r7, .Lwhichqs /* r7 = &whichqs */
740 1.3.2.17 thorpej mov r5, #0x00000000 /* r5 = old lwp = NULL */
741 1.3.2.20 thorpej b .Lswitch_search
742 1.3.2.2 thorpej
743 1.3.2.10 thorpej /*
744 1.3.2.10 thorpej * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
745 1.3.2.10 thorpej * Switch to lwp0's saved context and deallocate the address space and kernel
746 1.3.2.10 thorpej * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
747 1.3.2.10 thorpej */
748 1.3.2.10 thorpej
749 1.3.2.10 thorpej /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
750 1.3.2.2 thorpej ENTRY(switch_lwp_exit)
751 1.3.2.2 thorpej /*
752 1.3.2.2 thorpej * r0 = lwp
753 1.3.2.2 thorpej * r1 = lwp0
754 1.3.2.2 thorpej */
755 1.3.2.2 thorpej
756 1.3.2.2 thorpej mov r3, r0
757 1.3.2.2 thorpej
758 1.3.2.2 thorpej /* In case we fault */
759 1.3.2.2 thorpej mov r2, #0x00000000
760 1.3.2.20 thorpej ldr r0, .Lcurlwp
761 1.3.2.2 thorpej str r2, [r0]
762 1.3.2.2 thorpej
763 1.3.2.20 thorpej /* ldr r0, .Lcurpcb
764 1.3.2.2 thorpej str r2, [r0]*/
765 1.3.2.2 thorpej
766 1.3.2.13 thorpej /*
767 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
768 1.3.2.13 thorpej */
769 1.3.2.20 thorpej ldr r0, .Lblock_userspace_access
770 1.3.2.13 thorpej mov r2, #0x00000001
771 1.3.2.13 thorpej str r2, [r0]
772 1.3.2.13 thorpej
773 1.3.2.2 thorpej /* Switch to lwp0 context */
774 1.3.2.2 thorpej
775 1.3.2.2 thorpej stmfd sp!, {r0-r3}
776 1.3.2.2 thorpej
777 1.3.2.20 thorpej ldr r0, .Lcpufuncs
778 1.3.2.20 thorpej add lr, pc, #.Lsle_cache_purged - . - 8
779 1.3.2.6 thorpej ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
780 1.3.2.2 thorpej
781 1.3.2.20 thorpej .Lsle_cache_purged:
782 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
783 1.3.2.2 thorpej
784 1.3.2.2 thorpej IRQdisable
785 1.3.2.2 thorpej
786 1.3.2.2 thorpej ldr r2, [r1, #(L_ADDR)]
787 1.3.2.2 thorpej ldr r0, [r2, #(PCB_PAGEDIR)]
788 1.3.2.2 thorpej
789 1.3.2.2 thorpej /* Switch the memory to the new process */
790 1.3.2.20 thorpej ldr r4, .Lcpufuncs
791 1.3.2.20 thorpej add lr, pc, #.Lsle_context_switched - . - 8
792 1.3.2.2 thorpej ldr pc, [r4, #CF_CONTEXT_SWITCH]
793 1.3.2.2 thorpej
794 1.3.2.20 thorpej .Lsle_context_switched:
795 1.3.2.2 thorpej /* Restore all the save registers */
796 1.3.2.2 thorpej add r7, r2, #PCB_R8
797 1.3.2.2 thorpej ldmia r7, {r8-r13}
798 1.3.2.2 thorpej
799 1.3.2.2 thorpej /* This is not really needed ! */
800 1.3.2.2 thorpej /* Yes it is for the su and fu routines */
801 1.3.2.20 thorpej ldr r0, .Lcurpcb
802 1.3.2.2 thorpej str r2, [r0]
803 1.3.2.2 thorpej
804 1.3.2.2 thorpej IRQenable
805 1.3.2.2 thorpej
806 1.3.2.2 thorpej /* str r3, [sp, #-0x0004]!*/
807 1.3.2.2 thorpej
808 1.3.2.2 thorpej /*
809 1.3.2.2 thorpej * Schedule the vmspace and stack to be freed.
810 1.3.2.2 thorpej */
811 1.3.2.2 thorpej mov r0, r3 /* lwp_exit2(l) */
812 1.3.2.2 thorpej bl _C_LABEL(lwp_exit2)
813 1.3.2.2 thorpej
814 1.3.2.2 thorpej /* Paranoia */
815 1.3.2.20 thorpej ldr r1, .Lcurlwp
816 1.3.2.4 nathanw mov r0, #0x00000000
817 1.3.2.2 thorpej str r0, [r1]
818 1.3.2.2 thorpej
819 1.3.2.20 thorpej ldr r7, .Lwhichqs /* r7 = &whichqs */
820 1.3.2.17 thorpej mov r5, #0x00000000 /* r5 = old lwp = NULL */
821 1.3.2.20 thorpej b .Lswitch_search
822 1.3.2.2 thorpej
823 1.3.2.7 nathanw /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
824 1.3.2.2 thorpej ENTRY(savectx)
825 1.3.2.2 thorpej /*
826 1.3.2.2 thorpej * r0 = pcb
827 1.3.2.2 thorpej */
828 1.3.2.2 thorpej
829 1.3.2.2 thorpej /* Push registers.*/
830 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
831 1.3.2.2 thorpej
832 1.3.2.2 thorpej /* Store all the registers in the process's pcb */
833 1.3.2.2 thorpej add r2, r0, #(PCB_R8)
834 1.3.2.2 thorpej stmia r2, {r8-r13}
835 1.3.2.2 thorpej
836 1.3.2.2 thorpej /* Pull the regs of the stack */
837 1.3.2.2 thorpej ldmfd sp!, {r4-r7, pc}
838 1.3.2.2 thorpej
839 1.3.2.2 thorpej ENTRY(proc_trampoline)
840 1.3.2.25 nathanw #ifdef MULTIPROCESSOR
841 1.3.2.25 nathanw bl _C_LABEL(proc_trampoline_mp)
842 1.3.2.25 nathanw #endif
843 1.3.2.2 thorpej mov r0, r5
844 1.3.2.2 thorpej mov r1, sp
845 1.3.2.25 nathanw mov lr, pc
846 1.3.2.2 thorpej mov pc, r4
847 1.3.2.2 thorpej
848 1.3.2.2 thorpej /* Kill irq's */
849 1.3.2.20 thorpej mrs r0, cpsr
850 1.3.2.2 thorpej orr r0, r0, #(I32_bit)
851 1.3.2.20 thorpej msr cpsr_c, r0
852 1.3.2.2 thorpej
853 1.3.2.2 thorpej PULLFRAME
854 1.3.2.2 thorpej
855 1.3.2.2 thorpej movs pc, lr /* Exit */
856 1.3.2.2 thorpej
857 1.3.2.20 thorpej .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
858 1.3.2.20 thorpej .Lcpu_switch_ffs_table:
859 1.3.2.2 thorpej /* same as ffs table but all nums are -1 from that */
860 1.3.2.2 thorpej /* 0 1 2 3 4 5 6 7 */
861 1.3.2.2 thorpej .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
862 1.3.2.2 thorpej .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
863 1.3.2.2 thorpej .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
864 1.3.2.2 thorpej .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
865 1.3.2.2 thorpej .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
866 1.3.2.2 thorpej .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
867 1.3.2.2 thorpej .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
868 1.3.2.2 thorpej .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
869