cpuswitch.S revision 1.3.2.23 1 1.3.2.23 nathanw /* $NetBSD: cpuswitch.S,v 1.3.2.23 2002/09/17 23:50:18 nathanw Exp $ */
2 1.3.2.2 thorpej
3 1.3.2.2 thorpej /*
4 1.3.2.2 thorpej * Copyright (c) 1994-1998 Mark Brinicombe.
5 1.3.2.2 thorpej * Copyright (c) 1994 Brini.
6 1.3.2.2 thorpej * All rights reserved.
7 1.3.2.2 thorpej *
8 1.3.2.2 thorpej * This code is derived from software written for Brini by Mark Brinicombe
9 1.3.2.2 thorpej *
10 1.3.2.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.3.2.2 thorpej * modification, are permitted provided that the following conditions
12 1.3.2.2 thorpej * are met:
13 1.3.2.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.3.2.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.3.2.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.2.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.3.2.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.3.2.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.3.2.2 thorpej * must display the following acknowledgement:
20 1.3.2.2 thorpej * This product includes software developed by Brini.
21 1.3.2.2 thorpej * 4. The name of the company nor the name of the author may be used to
22 1.3.2.2 thorpej * endorse or promote products derived from this software without specific
23 1.3.2.2 thorpej * prior written permission.
24 1.3.2.2 thorpej *
25 1.3.2.2 thorpej * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.3.2.2 thorpej * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.3.2.2 thorpej * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.3.2.2 thorpej * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.3.2.2 thorpej * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.3.2.2 thorpej * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.3.2.2 thorpej * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.3.2.2 thorpej * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.3.2.2 thorpej * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.3.2.2 thorpej * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.3.2.2 thorpej * SUCH DAMAGE.
36 1.3.2.2 thorpej *
37 1.3.2.2 thorpej * RiscBSD kernel project
38 1.3.2.2 thorpej *
39 1.3.2.2 thorpej * cpuswitch.S
40 1.3.2.2 thorpej *
41 1.3.2.2 thorpej * cpu switching functions
42 1.3.2.2 thorpej *
43 1.3.2.2 thorpej * Created : 15/10/94
44 1.3.2.2 thorpej */
45 1.3.2.2 thorpej
46 1.3.2.2 thorpej #include "opt_armfpe.h"
47 1.3.2.2 thorpej
48 1.3.2.2 thorpej #include "assym.h"
49 1.3.2.2 thorpej #include <machine/param.h>
50 1.3.2.2 thorpej #include <machine/cpu.h>
51 1.3.2.2 thorpej #include <machine/frame.h>
52 1.3.2.2 thorpej #include <machine/asm.h>
53 1.3.2.2 thorpej
54 1.3.2.2 thorpej #undef IRQdisable
55 1.3.2.2 thorpej #undef IRQenable
56 1.3.2.2 thorpej
57 1.3.2.2 thorpej /*
58 1.3.2.2 thorpej * New experimental definitions of IRQdisable and IRQenable
59 1.3.2.2 thorpej * These keep FIQ's enabled since FIQ's are special.
60 1.3.2.2 thorpej */
61 1.3.2.2 thorpej
62 1.3.2.2 thorpej #define IRQdisable \
63 1.3.2.20 thorpej mrs r14, cpsr ; \
64 1.3.2.2 thorpej orr r14, r14, #(I32_bit) ; \
65 1.3.2.20 thorpej msr cpsr_c, r14 ; \
66 1.3.2.2 thorpej
67 1.3.2.2 thorpej #define IRQenable \
68 1.3.2.20 thorpej mrs r14, cpsr ; \
69 1.3.2.2 thorpej bic r14, r14, #(I32_bit) ; \
70 1.3.2.20 thorpej msr cpsr_c, r14 ; \
71 1.3.2.2 thorpej
72 1.3.2.2 thorpej /*
73 1.3.2.2 thorpej * setrunqueue() and remrunqueue()
74 1.3.2.2 thorpej *
75 1.3.2.2 thorpej * Functions to add and remove a process for the run queue.
76 1.3.2.2 thorpej */
77 1.3.2.2 thorpej
78 1.3.2.2 thorpej .text
79 1.3.2.2 thorpej
80 1.3.2.20 thorpej .Lwhichqs:
81 1.3.2.2 thorpej .word _C_LABEL(sched_whichqs)
82 1.3.2.2 thorpej
83 1.3.2.20 thorpej .Lqs:
84 1.3.2.2 thorpej .word _C_LABEL(sched_qs)
85 1.3.2.2 thorpej
86 1.3.2.2 thorpej /*
87 1.3.2.2 thorpej * On entry
88 1.3.2.2 thorpej * r0 = lwp
89 1.3.2.2 thorpej */
90 1.3.2.2 thorpej
91 1.3.2.2 thorpej ENTRY(setrunqueue)
92 1.3.2.2 thorpej /*
93 1.3.2.2 thorpej * Local register usage
94 1.3.2.2 thorpej * r0 = process
95 1.3.2.2 thorpej * r1 = queue
96 1.3.2.2 thorpej * r2 = &qs[queue] and temp
97 1.3.2.2 thorpej * r3 = temp
98 1.3.2.2 thorpej * r12 = whichqs
99 1.3.2.2 thorpej */
100 1.3.2.2 thorpej #ifdef DIAGNOSTIC
101 1.3.2.2 thorpej ldr r1, [r0, #(L_BACK)]
102 1.3.2.2 thorpej teq r1, #0x00000000
103 1.3.2.20 thorpej bne .Lsetrunqueue_erg
104 1.3.2.2 thorpej
105 1.3.2.2 thorpej ldr r1, [r0, #(L_WCHAN)]
106 1.3.2.2 thorpej teq r1, #0x00000000
107 1.3.2.20 thorpej bne .Lsetrunqueue_erg
108 1.3.2.2 thorpej #endif
109 1.3.2.2 thorpej
110 1.3.2.2 thorpej /* Get the priority of the queue */
111 1.3.2.2 thorpej ldrb r1, [r0, #(L_PRIORITY)]
112 1.3.2.2 thorpej
113 1.3.2.2 thorpej /* Indicate that there is a process on this queue */
114 1.3.2.20 thorpej ldr r12, .Lwhichqs
115 1.3.2.20 thorpej mov r1, r1, lsr #2
116 1.3.2.2 thorpej ldr r2, [r12]
117 1.3.2.2 thorpej mov r3, #0x00000001
118 1.3.2.2 thorpej mov r3, r3, lsl r1
119 1.3.2.2 thorpej orr r2, r2, r3
120 1.3.2.2 thorpej str r2, [r12]
121 1.3.2.2 thorpej
122 1.3.2.2 thorpej /* Get the address of the queue */
123 1.3.2.20 thorpej ldr r2, .Lqs
124 1.3.2.2 thorpej add r1, r2, r1, lsl # 3
125 1.3.2.2 thorpej
126 1.3.2.2 thorpej /* Hook the process in */
127 1.3.2.2 thorpej str r1, [r0, #(L_FORW)]
128 1.3.2.2 thorpej ldr r2, [r1, #(L_BACK)]
129 1.3.2.2 thorpej
130 1.3.2.2 thorpej str r0, [r1, #(L_BACK)]
131 1.3.2.2 thorpej #ifdef DIAGNOSTIC
132 1.3.2.2 thorpej teq r2, #0x00000000
133 1.3.2.20 thorpej beq .Lsetrunqueue_erg
134 1.3.2.2 thorpej #endif
135 1.3.2.2 thorpej str r0, [r2, #(L_FORW)]
136 1.3.2.2 thorpej str r2, [r0, #(L_BACK)]
137 1.3.2.2 thorpej
138 1.3.2.2 thorpej mov pc, lr
139 1.3.2.2 thorpej
140 1.3.2.2 thorpej #ifdef DIAGNOSTIC
141 1.3.2.20 thorpej .Lsetrunqueue_erg:
142 1.3.2.2 thorpej mov r2, r1
143 1.3.2.2 thorpej mov r1, r0
144 1.3.2.22 nathanw add r0, pc, #.Ltext1 - . - 8
145 1.3.2.2 thorpej bl _C_LABEL(printf)
146 1.3.2.2 thorpej
147 1.3.2.20 thorpej ldr r2, .Lqs
148 1.3.2.2 thorpej ldr r1, [r2]
149 1.3.2.22 nathanw add r0, pc, #.Ltext2 - . - 8
150 1.3.2.2 thorpej b _C_LABEL(panic)
151 1.3.2.2 thorpej
152 1.3.2.22 nathanw .Ltext1:
153 1.3.2.2 thorpej .asciz "setrunqueue : %08x %08x\n"
154 1.3.2.22 nathanw .Ltext2:
155 1.3.2.2 thorpej .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
156 1.3.2.2 thorpej .align 0
157 1.3.2.2 thorpej #endif
158 1.3.2.2 thorpej
159 1.3.2.2 thorpej /*
160 1.3.2.2 thorpej * On entry
161 1.3.2.2 thorpej * r0 = lwp
162 1.3.2.2 thorpej */
163 1.3.2.2 thorpej
164 1.3.2.2 thorpej ENTRY(remrunqueue)
165 1.3.2.2 thorpej /*
166 1.3.2.2 thorpej * Local register usage
167 1.3.2.2 thorpej * r0 = oldproc
168 1.3.2.2 thorpej * r1 = queue
169 1.3.2.2 thorpej * r2 = &qs[queue] and scratch
170 1.3.2.2 thorpej * r3 = scratch
171 1.3.2.2 thorpej * r12 = whichqs
172 1.3.2.2 thorpej */
173 1.3.2.2 thorpej
174 1.3.2.2 thorpej /* Get the priority of the queue */
175 1.3.2.2 thorpej ldrb r1, [r0, #(L_PRIORITY)]
176 1.3.2.2 thorpej mov r1, r1, lsr #2
177 1.3.2.2 thorpej
178 1.3.2.2 thorpej /* Unhook the process */
179 1.3.2.2 thorpej ldr r2, [r0, #(L_FORW)]
180 1.3.2.2 thorpej ldr r3, [r0, #(L_BACK)]
181 1.3.2.2 thorpej
182 1.3.2.2 thorpej str r3, [r2, #(L_BACK)]
183 1.3.2.2 thorpej str r2, [r3, #(L_FORW)]
184 1.3.2.2 thorpej
185 1.3.2.2 thorpej /* If the queue is now empty clear the queue not empty flag */
186 1.3.2.2 thorpej teq r2, r3
187 1.3.2.2 thorpej
188 1.3.2.2 thorpej /* This could be reworked to avoid the use of r4 */
189 1.3.2.20 thorpej ldreq r12, .Lwhichqs
190 1.3.2.2 thorpej moveq r3, #0x00000001
191 1.3.2.20 thorpej ldreq r2, [r12]
192 1.3.2.2 thorpej moveq r3, r3, lsl r1
193 1.3.2.2 thorpej biceq r2, r2, r3
194 1.3.2.2 thorpej streq r2, [r12]
195 1.3.2.2 thorpej
196 1.3.2.2 thorpej /* Remove the back pointer for the process */
197 1.3.2.2 thorpej mov r1, #0x00000000
198 1.3.2.2 thorpej str r1, [r0, #(L_BACK)]
199 1.3.2.2 thorpej
200 1.3.2.2 thorpej mov pc, lr
201 1.3.2.2 thorpej
202 1.3.2.2 thorpej
203 1.3.2.2 thorpej /*
204 1.3.2.2 thorpej * cpuswitch()
205 1.3.2.2 thorpej *
206 1.3.2.2 thorpej * preforms a process context switch.
207 1.3.2.2 thorpej * This function has several entry points
208 1.3.2.2 thorpej */
209 1.3.2.2 thorpej
210 1.3.2.20 thorpej .Lcurlwp:
211 1.3.2.9 nathanw .word _C_LABEL(curlwp)
212 1.3.2.2 thorpej
213 1.3.2.20 thorpej .Lcurpcb:
214 1.3.2.2 thorpej .word _C_LABEL(curpcb)
215 1.3.2.2 thorpej
216 1.3.2.20 thorpej .Lwant_resched:
217 1.3.2.2 thorpej .word _C_LABEL(want_resched)
218 1.3.2.2 thorpej
219 1.3.2.20 thorpej .Lcpufuncs:
220 1.3.2.2 thorpej .word _C_LABEL(cpufuncs)
221 1.3.2.2 thorpej
222 1.3.2.2 thorpej .data
223 1.3.2.2 thorpej .global _C_LABEL(curpcb)
224 1.3.2.2 thorpej _C_LABEL(curpcb):
225 1.3.2.2 thorpej .word 0x00000000
226 1.3.2.2 thorpej .text
227 1.3.2.2 thorpej
228 1.3.2.20 thorpej .Lblock_userspace_access:
229 1.3.2.2 thorpej .word _C_LABEL(block_userspace_access)
230 1.3.2.2 thorpej
231 1.3.2.20 thorpej .Lcpu_do_powersave:
232 1.3.2.20 thorpej .word _C_LABEL(cpu_do_powersave)
233 1.3.2.20 thorpej
234 1.3.2.2 thorpej /*
235 1.3.2.2 thorpej * Idle loop, exercised while waiting for a process to wake up.
236 1.3.2.20 thorpej *
237 1.3.2.20 thorpej * NOTE: When we jump back to .Lswitch_search, we must have a
238 1.3.2.20 thorpej * pointer to whichqs in r7, which is what it is when we arrive
239 1.3.2.20 thorpej * here.
240 1.3.2.2 thorpej */
241 1.3.2.7 nathanw /* LINTSTUB: Ignore */
242 1.3.2.4 nathanw ASENTRY_NP(idle)
243 1.3.2.20 thorpej #if defined(LOCKDEBUG)
244 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
245 1.3.2.18 thorpej #endif
246 1.3.2.20 thorpej ldr r3, .Lcpu_do_powersave
247 1.3.2.20 thorpej
248 1.3.2.2 thorpej /* Enable interrupts */
249 1.3.2.2 thorpej IRQenable
250 1.3.2.2 thorpej
251 1.3.2.20 thorpej /* If we don't want to sleep, use a simpler loop. */
252 1.3.2.20 thorpej ldr r3, [r3] /* r3 = cpu_do_powersave */
253 1.3.2.20 thorpej teq r3, #0
254 1.3.2.20 thorpej bne 2f
255 1.3.2.20 thorpej
256 1.3.2.20 thorpej /* Non-powersave idle. */
257 1.3.2.20 thorpej 1: /* should maybe do uvm pageidlezero stuff here */
258 1.3.2.20 thorpej ldr r3, [r7] /* r3 = whichqs */
259 1.3.2.20 thorpej teq r3, #0x00000000
260 1.3.2.20 thorpej bne .Lswitch_search
261 1.3.2.20 thorpej b 1b
262 1.3.2.2 thorpej
263 1.3.2.20 thorpej 2: /* Powersave idle. */
264 1.3.2.20 thorpej ldr r4, .Lcpufuncs
265 1.3.2.20 thorpej 3: ldr r3, [r7] /* r3 = whichqs */
266 1.3.2.2 thorpej teq r3, #0x00000000
267 1.3.2.20 thorpej bne .Lswitch_search
268 1.3.2.20 thorpej
269 1.3.2.20 thorpej /* if saving power, don't want to pageidlezero */
270 1.3.2.20 thorpej mov r0, #0
271 1.3.2.20 thorpej add lr, pc, #3b - . - 8
272 1.3.2.20 thorpej ldr pc, [r4, #(CF_SLEEP)]
273 1.3.2.20 thorpej /* loops back around */
274 1.3.2.2 thorpej
275 1.3.2.2 thorpej
276 1.3.2.2 thorpej /*
277 1.3.2.2 thorpej * Find a new lwp to run, save the current context and
278 1.3.2.2 thorpej * load the new context
279 1.3.2.2 thorpej *
280 1.3.2.2 thorpej * Arguments:
281 1.3.2.2 thorpej * r0 'struct lwp *' of the current LWP
282 1.3.2.2 thorpej */
283 1.3.2.2 thorpej
284 1.3.2.2 thorpej ENTRY(cpu_switch)
285 1.3.2.2 thorpej /*
286 1.3.2.2 thorpej * Local register usage. Some of these registers are out of date.
287 1.3.2.2 thorpej * r1 = oldlwp
288 1.3.2.2 thorpej * r2 = spl level
289 1.3.2.2 thorpej * r3 = whichqs
290 1.3.2.2 thorpej * r4 = queue
291 1.3.2.2 thorpej * r5 = &qs[queue]
292 1.3.2.2 thorpej * r6 = newlwp
293 1.3.2.2 thorpej * r7 = scratch
294 1.3.2.2 thorpej */
295 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
296 1.3.2.2 thorpej
297 1.3.2.2 thorpej /*
298 1.3.2.2 thorpej * Get the current lwp and indicate that there is no longer
299 1.3.2.19 thorpej * a valid process (curlwp = 0). Zero the current PCB pointer
300 1.3.2.19 thorpej * while we're at it.
301 1.3.2.2 thorpej */
302 1.3.2.20 thorpej ldr r7, .Lcurlwp
303 1.3.2.20 thorpej ldr r6, .Lcurpcb
304 1.3.2.2 thorpej mov r0, #0x00000000
305 1.3.2.19 thorpej ldr r1, [r7] /* r1 = curproc */
306 1.3.2.19 thorpej str r0, [r7] /* curproc = NULL */
307 1.3.2.19 thorpej str r0, [r6] /* curpcb = NULL */
308 1.3.2.2 thorpej
309 1.3.2.16 thorpej /* stash the old proc while we call functions */
310 1.3.2.16 thorpej mov r5, r1
311 1.3.2.2 thorpej
312 1.3.2.18 thorpej #if defined(LOCKDEBUG)
313 1.3.2.18 thorpej /* release the sched_lock before handling interrupts */
314 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
315 1.3.2.18 thorpej #endif
316 1.3.2.18 thorpej
317 1.3.2.16 thorpej /* Lower the spl level to spl0 and get the current spl level. */
318 1.3.2.4 nathanw #ifdef __NEWINTR
319 1.3.2.4 nathanw mov r0, #(IPL_NONE)
320 1.3.2.4 nathanw bl _C_LABEL(_spllower)
321 1.3.2.4 nathanw #else /* ! __NEWINTR */
322 1.3.2.2 thorpej #ifdef spl0
323 1.3.2.2 thorpej mov r0, #(_SPL_0)
324 1.3.2.2 thorpej bl _C_LABEL(splx)
325 1.3.2.2 thorpej #else
326 1.3.2.2 thorpej bl _C_LABEL(spl0)
327 1.3.2.4 nathanw #endif /* spl0 */
328 1.3.2.4 nathanw #endif /* __NEWINTR */
329 1.3.2.2 thorpej
330 1.3.2.2 thorpej /* Push the old spl level onto the stack */
331 1.3.2.2 thorpej str r0, [sp, #-0x0004]!
332 1.3.2.2 thorpej
333 1.3.2.2 thorpej /* First phase : find a new lwp */
334 1.3.2.2 thorpej
335 1.3.2.20 thorpej ldr r7, .Lwhichqs
336 1.3.2.20 thorpej
337 1.3.2.17 thorpej /* rem: r5 = old lwp */
338 1.3.2.20 thorpej /* rem: r7 = &whichqs */
339 1.3.2.7 nathanw
340 1.3.2.20 thorpej .Lswitch_search:
341 1.3.2.2 thorpej IRQdisable
342 1.3.2.18 thorpej #if defined(LOCKDEBUG)
343 1.3.2.18 thorpej bl _C_LABEL(sched_lock_idle)
344 1.3.2.18 thorpej #endif
345 1.3.2.2 thorpej
346 1.3.2.2 thorpej /* Do we have any active queues */
347 1.3.2.2 thorpej ldr r3, [r7]
348 1.3.2.2 thorpej
349 1.3.2.2 thorpej /* If not we must idle until we do. */
350 1.3.2.2 thorpej teq r3, #0x00000000
351 1.3.2.4 nathanw beq _ASM_LABEL(idle)
352 1.3.2.2 thorpej
353 1.3.2.17 thorpej /* put old proc back in r1 */
354 1.3.2.17 thorpej mov r1, r5
355 1.3.2.17 thorpej
356 1.3.2.2 thorpej /* rem: r1 = old lwp */
357 1.3.2.2 thorpej /* rem: r3 = whichqs */
358 1.3.2.2 thorpej /* rem: interrupts are disabled */
359 1.3.2.2 thorpej
360 1.3.2.2 thorpej /*
361 1.3.2.2 thorpej * We have found an active queue. Currently we do not know which queue
362 1.3.2.2 thorpej * is active just that one of them is.
363 1.3.2.2 thorpej */
364 1.3.2.2 thorpej /* this is the ffs algorithm devised by d.seal and posted to
365 1.3.2.2 thorpej * comp.sys.arm on 16 Feb 1994.
366 1.3.2.2 thorpej */
367 1.3.2.2 thorpej rsb r5, r3, #0
368 1.3.2.2 thorpej ands r0, r3, r5
369 1.3.2.2 thorpej
370 1.3.2.20 thorpej adr r5, .Lcpu_switch_ffs_table
371 1.3.2.2 thorpej
372 1.3.2.2 thorpej /* X = R0 */
373 1.3.2.2 thorpej orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
374 1.3.2.2 thorpej orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
375 1.3.2.2 thorpej rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
376 1.3.2.2 thorpej
377 1.3.2.2 thorpej /* used further down, saves SA stall */
378 1.3.2.20 thorpej ldr r6, .Lqs
379 1.3.2.2 thorpej
380 1.3.2.2 thorpej /* now lookup in table indexed on top 6 bits of a4 */
381 1.3.2.2 thorpej ldrb r4, [ r5, r4, lsr #26 ]
382 1.3.2.2 thorpej
383 1.3.2.2 thorpej /* rem: r0 = bit mask of chosen queue (1 << r4) */
384 1.3.2.2 thorpej /* rem: r1 = old lwp */
385 1.3.2.2 thorpej /* rem: r3 = whichqs */
386 1.3.2.2 thorpej /* rem: r4 = queue number */
387 1.3.2.2 thorpej /* rem: interrupts are disabled */
388 1.3.2.2 thorpej
389 1.3.2.2 thorpej /* Get the address of the queue (&qs[queue]) */
390 1.3.2.2 thorpej add r5, r6, r4, lsl #3
391 1.3.2.2 thorpej
392 1.3.2.2 thorpej /*
393 1.3.2.2 thorpej * Get the lwp from the queue and place the next process in
394 1.3.2.2 thorpej * the queue at the head. This basically unlinks the lwp at
395 1.3.2.2 thorpej * the head of the queue.
396 1.3.2.2 thorpej */
397 1.3.2.2 thorpej ldr r6, [r5, #(L_FORW)]
398 1.3.2.2 thorpej
399 1.3.2.2 thorpej /* rem: r6 = new lwp */
400 1.3.2.2 thorpej ldr r7, [r6, #(L_FORW)]
401 1.3.2.2 thorpej str r7, [r5, #(L_FORW)]
402 1.3.2.2 thorpej
403 1.3.2.2 thorpej /*
404 1.3.2.2 thorpej * Test to see if the queue is now empty. If the head of the queue
405 1.3.2.2 thorpej * points to the queue itself then there are no more lwps in
406 1.3.2.2 thorpej * the queue. We can therefore clear the queue not empty flag held
407 1.3.2.2 thorpej * in r3.
408 1.3.2.2 thorpej */
409 1.3.2.2 thorpej
410 1.3.2.2 thorpej teq r5, r7
411 1.3.2.2 thorpej biceq r3, r3, r0
412 1.3.2.2 thorpej
413 1.3.2.2 thorpej /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
414 1.3.2.2 thorpej
415 1.3.2.2 thorpej /* Fix the back pointer for the lwp now at the head of the queue. */
416 1.3.2.2 thorpej ldr r0, [r6, #(L_BACK)]
417 1.3.2.2 thorpej str r0, [r7, #(L_BACK)]
418 1.3.2.2 thorpej
419 1.3.2.2 thorpej /* Update the RAM copy of the queue not empty flags word. */
420 1.3.2.20 thorpej ldr r7, .Lwhichqs
421 1.3.2.2 thorpej str r3, [r7]
422 1.3.2.2 thorpej
423 1.3.2.2 thorpej /* rem: r1 = old lwp */
424 1.3.2.2 thorpej /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
425 1.3.2.2 thorpej /* rem: r4 = queue number - NOT NEEDED ANY MORE */
426 1.3.2.2 thorpej /* rem: r6 = new lwp */
427 1.3.2.2 thorpej /* rem: interrupts are disabled */
428 1.3.2.2 thorpej
429 1.3.2.2 thorpej /* Clear the want_resched flag */
430 1.3.2.20 thorpej ldr r7, .Lwant_resched
431 1.3.2.4 nathanw mov r0, #0x00000000
432 1.3.2.2 thorpej str r0, [r7]
433 1.3.2.2 thorpej
434 1.3.2.2 thorpej /*
435 1.3.2.2 thorpej * Clear the back pointer of the lwp we have removed from
436 1.3.2.2 thorpej * the head of the queue. The new lwp is isolated now.
437 1.3.2.2 thorpej */
438 1.3.2.2 thorpej str r0, [r6, #(L_BACK)]
439 1.3.2.2 thorpej
440 1.3.2.18 thorpej #if defined(LOCKDEBUG)
441 1.3.2.18 thorpej /*
442 1.3.2.18 thorpej * unlock the sched_lock, but leave interrupts off, for now.
443 1.3.2.18 thorpej */
444 1.3.2.18 thorpej mov r7, r1
445 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
446 1.3.2.18 thorpej mov r1, r7
447 1.3.2.18 thorpej #endif
448 1.3.2.18 thorpej
449 1.3.2.20 thorpej .Lswitch_resume:
450 1.3.2.2 thorpej /* l->l_cpu initialized in fork1() for single-processor */
451 1.3.2.2 thorpej
452 1.3.2.2 thorpej /* Process is now on a processor. */
453 1.3.2.2 thorpej mov r0, #LSONPROC /* l->l_stat = LSONPROC */
454 1.3.2.3 thorpej str r0, [r6, #(L_STAT)]
455 1.3.2.2 thorpej
456 1.3.2.9 nathanw /* We have a new curlwp now so make a note it */
457 1.3.2.20 thorpej ldr r7, .Lcurlwp
458 1.3.2.2 thorpej str r6, [r7]
459 1.3.2.2 thorpej
460 1.3.2.2 thorpej /* Hook in a new pcb */
461 1.3.2.20 thorpej ldr r7, .Lcurpcb
462 1.3.2.2 thorpej ldr r0, [r6, #(L_ADDR)]
463 1.3.2.2 thorpej str r0, [r7]
464 1.3.2.2 thorpej
465 1.3.2.2 thorpej /* At this point we can allow IRQ's again. */
466 1.3.2.2 thorpej IRQenable
467 1.3.2.2 thorpej
468 1.3.2.2 thorpej /* rem: r1 = old lwp */
469 1.3.2.2 thorpej /* rem: r4 = return value */
470 1.3.2.2 thorpej /* rem: r6 = new process */
471 1.3.2.4 nathanw /* rem: interrupts are enabled */
472 1.3.2.2 thorpej
473 1.3.2.2 thorpej /*
474 1.3.2.2 thorpej * If the new process is the same as the process that called
475 1.3.2.2 thorpej * cpu_switch() then we do not need to save and restore any
476 1.3.2.2 thorpej * contexts. This means we can make a quick exit.
477 1.3.2.9 nathanw * The test is simple if curlwp on entry (now in r1) is the
478 1.3.2.2 thorpej * same as the proc removed from the queue we can jump to the exit.
479 1.3.2.2 thorpej */
480 1.3.2.2 thorpej teq r1, r6
481 1.3.2.2 thorpej moveq r4, #0x00000000 /* default to "didn't switch" */
482 1.3.2.20 thorpej beq .Lswitch_return
483 1.3.2.2 thorpej
484 1.3.2.2 thorpej /*
485 1.3.2.2 thorpej * At this point, we are guaranteed to be switching to
486 1.3.2.2 thorpej * a new lwp.
487 1.3.2.2 thorpej */
488 1.3.2.2 thorpej mov r4, #0x00000001
489 1.3.2.2 thorpej
490 1.3.2.13 thorpej /* Remember the old lwp in r0 */
491 1.3.2.13 thorpej mov r0, r1
492 1.3.2.13 thorpej
493 1.3.2.2 thorpej /*
494 1.3.2.12 thorpej * If the old lwp on entry to cpu_switch was zero then the
495 1.3.2.2 thorpej * process that called it was exiting. This means that we do
496 1.3.2.2 thorpej * not need to save the current context. Instead we can jump
497 1.3.2.2 thorpej * straight to restoring the context for the new process.
498 1.3.2.2 thorpej */
499 1.3.2.13 thorpej teq r0, #0x00000000
500 1.3.2.20 thorpej beq .Lswitch_exited
501 1.3.2.2 thorpej
502 1.3.2.13 thorpej /* rem: r0 = old lwp */
503 1.3.2.2 thorpej /* rem: r4 = return value */
504 1.3.2.2 thorpej /* rem: r6 = new process */
505 1.3.2.4 nathanw /* rem: interrupts are enabled */
506 1.3.2.2 thorpej
507 1.3.2.2 thorpej /* Stage two : Save old context */
508 1.3.2.2 thorpej
509 1.3.2.12 thorpej /* Get the user structure for the old lwp. */
510 1.3.2.13 thorpej ldr r1, [r0, #(L_ADDR)]
511 1.3.2.2 thorpej
512 1.3.2.12 thorpej /* Save all the registers in the old lwp's pcb */
513 1.3.2.2 thorpej add r7, r1, #(PCB_R8)
514 1.3.2.2 thorpej stmia r7, {r8-r13}
515 1.3.2.2 thorpej
516 1.3.2.2 thorpej /*
517 1.3.2.21 thorpej * NOTE: We can now use r8-r13 until it is time to restore
518 1.3.2.21 thorpej * them for the new process.
519 1.3.2.21 thorpej */
520 1.3.2.21 thorpej
521 1.3.2.21 thorpej /* Remember the old PCB. */
522 1.3.2.21 thorpej mov r8, r1
523 1.3.2.21 thorpej
524 1.3.2.21 thorpej /* r1 now free! */
525 1.3.2.21 thorpej
526 1.3.2.21 thorpej /* Get the user structure for the new process in r9 */
527 1.3.2.21 thorpej ldr r9, [r6, #(L_ADDR)]
528 1.3.2.21 thorpej
529 1.3.2.21 thorpej /*
530 1.3.2.2 thorpej * This can be optimised... We know we want to go from SVC32
531 1.3.2.2 thorpej * mode to UND32 mode
532 1.3.2.2 thorpej */
533 1.3.2.20 thorpej mrs r3, cpsr
534 1.3.2.2 thorpej bic r2, r3, #(PSR_MODE)
535 1.3.2.2 thorpej orr r2, r2, #(PSR_UND32_MODE | I32_bit)
536 1.3.2.20 thorpej msr cpsr_c, r2
537 1.3.2.2 thorpej
538 1.3.2.21 thorpej str sp, [r8, #(PCB_UND_SP)]
539 1.3.2.2 thorpej
540 1.3.2.20 thorpej msr cpsr_c, r3 /* Restore the old mode */
541 1.3.2.2 thorpej
542 1.3.2.12 thorpej /* rem: r0 = old lwp */
543 1.3.2.2 thorpej /* rem: r4 = return value */
544 1.3.2.2 thorpej /* rem: r6 = new process */
545 1.3.2.21 thorpej /* rem: r8 = old PCB */
546 1.3.2.21 thorpej /* rem: r9 = new PCB */
547 1.3.2.4 nathanw /* rem: interrupts are enabled */
548 1.3.2.2 thorpej
549 1.3.2.2 thorpej /* What else needs to be saved Only FPA stuff when that is supported */
550 1.3.2.2 thorpej
551 1.3.2.2 thorpej /* Third phase : restore saved context */
552 1.3.2.2 thorpej
553 1.3.2.13 thorpej /* rem: r0 = old lwp */
554 1.3.2.13 thorpej /* rem: r4 = return value */
555 1.3.2.13 thorpej /* rem: r6 = new lwp */
556 1.3.2.21 thorpej /* rem: r8 = old PCB */
557 1.3.2.21 thorpej /* rem: r9 = new PCB */
558 1.3.2.13 thorpej /* rem: interrupts are enabled */
559 1.3.2.13 thorpej
560 1.3.2.13 thorpej /*
561 1.3.2.21 thorpej * Get the new L1 table pointer into r11. If we're switching to
562 1.3.2.21 thorpej * an LWP with the same address space as the outgoing one, we can
563 1.3.2.21 thorpej * skip the cache purge and the TTB load.
564 1.3.2.21 thorpej *
565 1.3.2.21 thorpej * To avoid data dep stalls that would happen anyway, we try
566 1.3.2.21 thorpej * and get some useful work done in the mean time.
567 1.3.2.21 thorpej */
568 1.3.2.21 thorpej ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
569 1.3.2.21 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
570 1.3.2.21 thorpej
571 1.3.2.21 thorpej ldr r3, .Lblock_userspace_access
572 1.3.2.21 thorpej mov r1, #0x00000001
573 1.3.2.21 thorpej mov r2, #0x00000000
574 1.3.2.21 thorpej
575 1.3.2.21 thorpej teq r10, r11 /* r10 == r11? */
576 1.3.2.21 thorpej beq .Lcs_context_switched /* yes! */
577 1.3.2.21 thorpej
578 1.3.2.21 thorpej /*
579 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
580 1.3.2.13 thorpej */
581 1.3.2.20 thorpej ldr r3, .Lblock_userspace_access
582 1.3.2.13 thorpej mov r1, #0x00000001
583 1.3.2.13 thorpej mov r2, #0x00000000
584 1.3.2.13 thorpej str r1, [r3]
585 1.3.2.2 thorpej
586 1.3.2.2 thorpej stmfd sp!, {r0-r3}
587 1.3.2.20 thorpej ldr r1, .Lcpufuncs
588 1.3.2.20 thorpej add lr, pc, #.Lcs_cache_purged - . - 8
589 1.3.2.13 thorpej ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
590 1.3.2.2 thorpej
591 1.3.2.20 thorpej .Lcs_cache_purged:
592 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
593 1.3.2.2 thorpej
594 1.3.2.20 thorpej .Lcs_cache_purge_skipped:
595 1.3.2.2 thorpej /* At this point we need to kill IRQ's again. */
596 1.3.2.2 thorpej IRQdisable
597 1.3.2.2 thorpej
598 1.3.2.21 thorpej /* rem: r2 = 0 */
599 1.3.2.21 thorpej /* rem: r3 = &block_userspace_access */
600 1.3.2.21 thorpej /* rem: r4 = return value */
601 1.3.2.21 thorpej /* rem: r6 = new lwp */
602 1.3.2.21 thorpej /* rem: r9 = new PCB */
603 1.3.2.21 thorpej /* rem: r11 == new L1 */
604 1.3.2.21 thorpej
605 1.3.2.13 thorpej /*
606 1.3.2.13 thorpej * Interrupts are disabled so we can allow user space accesses again
607 1.3.2.2 thorpej * as none will occur until interrupts are re-enabled after the
608 1.3.2.2 thorpej * switch.
609 1.3.2.2 thorpej */
610 1.3.2.2 thorpej str r2, [r3]
611 1.3.2.2 thorpej
612 1.3.2.2 thorpej /* Switch the memory to the new process */
613 1.3.2.20 thorpej ldr r3, .Lcpufuncs
614 1.3.2.21 thorpej mov r0, r11
615 1.3.2.20 thorpej add lr, pc, #.Lcs_context_switched - . - 8
616 1.3.2.2 thorpej ldr pc, [r3, #CF_CONTEXT_SWITCH]
617 1.3.2.21 thorpej
618 1.3.2.20 thorpej .Lcs_context_switched:
619 1.3.2.21 thorpej /* rem: r4 = return value */
620 1.3.2.21 thorpej /* rem: r6 = new lwp */
621 1.3.2.21 thorpej /* rem: r9 = new PCB */
622 1.3.2.21 thorpej
623 1.3.2.2 thorpej /*
624 1.3.2.2 thorpej * This can be optimised... We know we want to go from SVC32
625 1.3.2.2 thorpej * mode to UND32 mode
626 1.3.2.2 thorpej */
627 1.3.2.20 thorpej mrs r3, cpsr
628 1.3.2.2 thorpej bic r2, r3, #(PSR_MODE)
629 1.3.2.2 thorpej orr r2, r2, #(PSR_UND32_MODE)
630 1.3.2.20 thorpej msr cpsr_c, r2
631 1.3.2.2 thorpej
632 1.3.2.21 thorpej ldr sp, [r9, #(PCB_UND_SP)]
633 1.3.2.2 thorpej
634 1.3.2.20 thorpej msr cpsr_c, r3 /* Restore the old mode */
635 1.3.2.2 thorpej
636 1.3.2.22 nathanw /* Restore all the save registers */
637 1.3.2.22 nathanw add r7, r1, #PCB_R8
638 1.3.2.22 nathanw ldmia r7, {r8-r13}
639 1.3.2.22 nathanw
640 1.3.2.22 nathanw mov r7, r1 /* preserve PCB pointer */
641 1.3.2.22 nathanw
642 1.3.2.2 thorpej #ifdef ARMFPE
643 1.3.2.21 thorpej add r0, r9, #(USER_SIZE) & 0x00ff
644 1.3.2.2 thorpej add r0, r0, #(USER_SIZE) & 0xff00
645 1.3.2.2 thorpej bl _C_LABEL(arm_fpe_core_changecontext)
646 1.3.2.2 thorpej #endif
647 1.3.2.2 thorpej
648 1.3.2.22 nathanw /* We can enable interrupts again */
649 1.3.2.22 nathanw IRQenable
650 1.3.2.21 thorpej /*
651 1.3.2.21 thorpej * NOTE: We can now no longer use r8-r13.
652 1.3.2.21 thorpej */
653 1.3.2.21 thorpej
654 1.3.2.21 thorpej /* rem: r4 = return value */
655 1.3.2.21 thorpej /* rem: r6 = new lwp */
656 1.3.2.22 nathanw /* rem: r7 = new PCB */
657 1.3.2.21 thorpej
658 1.3.2.22 nathanw /*
659 1.3.2.22 nathanw * Check for restartable atomic sequences (RAS).
660 1.3.2.22 nathanw */
661 1.3.2.22 nathanw
662 1.3.2.23 nathanw ldr r5, [r6, #(L_PROC)]
663 1.3.2.23 nathanw ldr r2, [r5, #(P_NRAS)]
664 1.3.2.22 nathanw ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
665 1.3.2.22 nathanw teq r2, #0 /* p->p_nras == 0? */
666 1.3.2.22 nathanw bne .Lswitch_do_ras /* no, check for one */
667 1.3.2.2 thorpej
668 1.3.2.20 thorpej .Lswitch_return:
669 1.3.2.2 thorpej
670 1.3.2.2 thorpej /* Get the spl level from the stack and update the current spl level */
671 1.3.2.2 thorpej ldr r0, [sp], #0x0004
672 1.3.2.2 thorpej bl _C_LABEL(splx)
673 1.3.2.2 thorpej
674 1.3.2.2 thorpej /* cpu_switch returns 1 == switched, 0 == didn't switch */
675 1.3.2.2 thorpej mov r0, r4
676 1.3.2.2 thorpej
677 1.3.2.2 thorpej /*
678 1.3.2.2 thorpej * Pull the registers that got pushed when either savectx() or
679 1.3.2.2 thorpej * cpu_switch() was called and return.
680 1.3.2.2 thorpej */
681 1.3.2.2 thorpej ldmfd sp!, {r4-r7, pc}
682 1.3.2.2 thorpej
683 1.3.2.22 nathanw .Lswitch_do_ras:
684 1.3.2.22 nathanw ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
685 1.3.2.23 nathanw mov r0, r5 /* first ras_lookup() arg */
686 1.3.2.22 nathanw bl _C_LABEL(ras_lookup)
687 1.3.2.22 nathanw cmn r0, #1 /* -1 means "not in a RAS" */
688 1.3.2.22 nathanw strne r0, [r4, #(TF_PC)]
689 1.3.2.22 nathanw b .Lswitch_return
690 1.3.2.22 nathanw
691 1.3.2.20 thorpej .Lswitch_exited:
692 1.3.2.13 thorpej /*
693 1.3.2.13 thorpej * We skip the cache purge because switch_exit()/switch_lwp_exit()
694 1.3.2.20 thorpej * already did it. Load up registers the way .Lcs_cache_purge_skipped
695 1.3.2.13 thorpej * expects. Userpsace access already blocked by switch_exit()/
696 1.3.2.13 thorpej * switch_lwp_exit().
697 1.3.2.13 thorpej */
698 1.3.2.21 thorpej ldr r9, [r6, #(L_ADDR)] /* r9 = new PCB */
699 1.3.2.20 thorpej ldr r3, .Lblock_userspace_access
700 1.3.2.13 thorpej mov r2, #0x00000000
701 1.3.2.21 thorpej ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
702 1.3.2.20 thorpej b .Lcs_cache_purge_skipped
703 1.3.2.13 thorpej
704 1.3.2.2 thorpej /*
705 1.3.2.2 thorpej * cpu_preempt(struct lwp *current, struct lwp *next)
706 1.3.2.2 thorpej * Switch to the specified next LWP
707 1.3.2.2 thorpej * Arguments:
708 1.3.2.2 thorpej *
709 1.3.2.2 thorpej * r0 'struct lwp *' of the current LWP
710 1.3.2.2 thorpej * r1 'struct lwp *' of the LWP to switch to
711 1.3.2.2 thorpej */
712 1.3.2.2 thorpej ENTRY(cpu_preempt)
713 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
714 1.3.2.2 thorpej
715 1.3.2.10 thorpej /* Lower the spl level to spl0 and get the current spl level. */
716 1.3.2.2 thorpej mov r6, r0 /* save old lwp */
717 1.3.2.15 thorpej mov r5, r1 /* save new lwp */
718 1.3.2.2 thorpej
719 1.3.2.18 thorpej #if defined(LOCKDEBUG)
720 1.3.2.18 thorpej /* release the sched_lock before handling interrupts */
721 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
722 1.3.2.18 thorpej #endif
723 1.3.2.18 thorpej
724 1.3.2.11 thorpej #ifdef __NEWINTR
725 1.3.2.11 thorpej mov r0, #(IPL_NONE)
726 1.3.2.11 thorpej bl _C_LABEL(_spllower)
727 1.3.2.11 thorpej #else /* ! __NEWINTR */
728 1.3.2.2 thorpej #ifdef spl0
729 1.3.2.2 thorpej mov r0, #(_SPL_0)
730 1.3.2.2 thorpej bl _C_LABEL(splx)
731 1.3.2.2 thorpej #else
732 1.3.2.2 thorpej bl _C_LABEL(spl0)
733 1.3.2.11 thorpej #endif /* spl0 */
734 1.3.2.11 thorpej #endif /* __NEWINTR */
735 1.3.2.2 thorpej
736 1.3.2.2 thorpej /* Push the old spl level onto the stack */
737 1.3.2.2 thorpej str r0, [sp, #-0x0004]!
738 1.3.2.2 thorpej
739 1.3.2.2 thorpej IRQdisable
740 1.3.2.18 thorpej #if defined(LOCKDEBUG)
741 1.3.2.18 thorpej bl _C_LABEL(sched_lock_idle)
742 1.3.2.18 thorpej #endif
743 1.3.2.2 thorpej
744 1.3.2.2 thorpej /* Do we have any active queues? */
745 1.3.2.20 thorpej ldr r7, .Lwhichqs
746 1.3.2.2 thorpej ldr r3, [r7]
747 1.3.2.2 thorpej
748 1.3.2.2 thorpej /* If none, panic! */
749 1.3.2.2 thorpej teq r3, #0x00000000
750 1.3.2.20 thorpej beq .Lpreempt_noqueues
751 1.3.2.2 thorpej
752 1.3.2.15 thorpej mov r0, r6 /* restore old lwp */
753 1.3.2.15 thorpej mov r1, r5 /* restore new lwp */
754 1.3.2.15 thorpej
755 1.3.2.2 thorpej /* rem: r0 = old lwp */
756 1.3.2.2 thorpej /* rem: r1 = new lwp */
757 1.3.2.2 thorpej /* rem: r3 = whichqs */
758 1.3.2.2 thorpej /* rem: r7 = &whichqs */
759 1.3.2.2 thorpej /* rem: interrupts are disabled */
760 1.3.2.2 thorpej
761 1.3.2.2 thorpej /* Compute the queue bit corresponding to the new lwp. */
762 1.3.2.2 thorpej ldrb r4, [r1, #(L_PRIORITY)]
763 1.3.2.2 thorpej mov r2, #0x00000001
764 1.3.2.2 thorpej mov r4, r4, lsr #2 /* queue number */
765 1.3.2.2 thorpej mov r2, r2, lsl r4 /* queue bit */
766 1.3.2.2 thorpej
767 1.3.2.2 thorpej /* rem: r0 = old lwp */
768 1.3.2.2 thorpej /* rem: r1 = new lwp */
769 1.3.2.2 thorpej /* rem: r2 = queue bit */
770 1.3.2.2 thorpej /* rem: r3 = whichqs */
771 1.3.2.2 thorpej /* rem: r4 = queue number */
772 1.3.2.2 thorpej /* rem: r7 = &whichqs */
773 1.3.2.2 thorpej
774 1.3.2.2 thorpej /*
775 1.3.2.2 thorpej * Unlink the lwp from the queue.
776 1.3.2.2 thorpej */
777 1.3.2.2 thorpej ldr r5, [r1, #(L_BACK)] /* r5 = l->l_back */
778 1.3.2.2 thorpej mov r6, #0x00000000
779 1.3.2.2 thorpej str r6, [r1, #(L_BACK)] /* firewall: l->l_back = NULL */
780 1.3.2.2 thorpej ldr r6, [r1, #(L_FORW)] /* r6 = l->l_forw */
781 1.3.2.2 thorpej str r5, [r6, #(L_BACK)] /* r6->l_back = r5 */
782 1.3.2.2 thorpej str r6, [r5, #(L_FORW)] /* r5->l_forw = r6 */
783 1.3.2.2 thorpej
784 1.3.2.2 thorpej teq r5, r6 /* see if queue is empty */
785 1.3.2.2 thorpej biceq r3, r3, r2 /* clear bit if so */
786 1.3.2.2 thorpej streq r3, [r7] /* store it back if so */
787 1.3.2.2 thorpej
788 1.3.2.2 thorpej /* rem: r2 (queue bit) now free */
789 1.3.2.2 thorpej /* rem: r3 (whichqs) now free */
790 1.3.2.2 thorpej /* rem: r7 (&whichqs) now free */
791 1.3.2.2 thorpej
792 1.3.2.2 thorpej /*
793 1.3.2.2 thorpej * Okay, set up registers the way cpu_switch() wants them,
794 1.3.2.2 thorpej * and jump into the middle of it (where we bring up the
795 1.3.2.2 thorpej * new process).
796 1.3.2.2 thorpej */
797 1.3.2.2 thorpej mov r6, r1 /* r6 = new lwp */
798 1.3.2.18 thorpej #if defined(LOCKDEBUG)
799 1.3.2.18 thorpej mov r5, r0 /* preserve old lwp */
800 1.3.2.18 thorpej bl _C_LABEL(sched_unlock_idle)
801 1.3.2.18 thorpej mov r1, r5 /* r1 = old lwp */
802 1.3.2.18 thorpej #else
803 1.3.2.2 thorpej mov r1, r0 /* r1 = old lwp */
804 1.3.2.18 thorpej #endif
805 1.3.2.20 thorpej b .Lswitch_resume
806 1.3.2.2 thorpej
807 1.3.2.20 thorpej .Lpreempt_noqueues:
808 1.3.2.20 thorpej add r0, pc, #.Lpreemptpanic - . - 8
809 1.3.2.2 thorpej bl _C_LABEL(panic)
810 1.3.2.2 thorpej
811 1.3.2.20 thorpej .Lpreemptpanic:
812 1.3.2.2 thorpej .asciz "cpu_preempt: whichqs empty"
813 1.3.2.2 thorpej .align 0
814 1.3.2.2 thorpej
815 1.3.2.7 nathanw /*
816 1.3.2.10 thorpej * void switch_exit(struct lwp *l, struct lwp *l0);
817 1.3.2.10 thorpej * Switch to lwp0's saved context and deallocate the address space and kernel
818 1.3.2.10 thorpej * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
819 1.3.2.7 nathanw */
820 1.3.2.2 thorpej
821 1.3.2.10 thorpej /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
822 1.3.2.2 thorpej ENTRY(switch_exit)
823 1.3.2.2 thorpej /*
824 1.3.2.2 thorpej * r0 = lwp
825 1.3.2.2 thorpej * r1 = lwp0
826 1.3.2.2 thorpej */
827 1.3.2.2 thorpej
828 1.3.2.2 thorpej mov r3, r0
829 1.3.2.2 thorpej
830 1.3.2.2 thorpej /* In case we fault */
831 1.3.2.20 thorpej ldr r0, .Lcurlwp
832 1.3.2.4 nathanw mov r2, #0x00000000
833 1.3.2.2 thorpej str r2, [r0]
834 1.3.2.2 thorpej
835 1.3.2.20 thorpej /* ldr r0, .Lcurpcb
836 1.3.2.2 thorpej str r2, [r0]*/
837 1.3.2.2 thorpej
838 1.3.2.13 thorpej /*
839 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
840 1.3.2.13 thorpej */
841 1.3.2.20 thorpej ldr r0, .Lblock_userspace_access
842 1.3.2.13 thorpej mov r2, #0x00000001
843 1.3.2.13 thorpej str r2, [r0]
844 1.3.2.13 thorpej
845 1.3.2.2 thorpej /* Switch to lwp0 context */
846 1.3.2.2 thorpej
847 1.3.2.2 thorpej stmfd sp!, {r0-r3}
848 1.3.2.2 thorpej
849 1.3.2.20 thorpej ldr r0, .Lcpufuncs
850 1.3.2.20 thorpej add lr, pc, #.Lse_cache_purged - . - 8
851 1.3.2.5 nathanw ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
852 1.3.2.2 thorpej
853 1.3.2.20 thorpej .Lse_cache_purged:
854 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
855 1.3.2.2 thorpej
856 1.3.2.2 thorpej IRQdisable
857 1.3.2.2 thorpej
858 1.3.2.2 thorpej ldr r2, [r1, #(L_ADDR)]
859 1.3.2.2 thorpej ldr r0, [r2, #(PCB_PAGEDIR)]
860 1.3.2.2 thorpej
861 1.3.2.2 thorpej /* Switch the memory to the new process */
862 1.3.2.20 thorpej ldr r4, .Lcpufuncs
863 1.3.2.20 thorpej add lr, pc, #.Lse_context_switched - . - 8
864 1.3.2.2 thorpej ldr pc, [r4, #CF_CONTEXT_SWITCH]
865 1.3.2.2 thorpej
866 1.3.2.20 thorpej .Lse_context_switched:
867 1.3.2.2 thorpej /* Restore all the save registers */
868 1.3.2.2 thorpej add r7, r2, #PCB_R8
869 1.3.2.2 thorpej ldmia r7, {r8-r13}
870 1.3.2.2 thorpej
871 1.3.2.2 thorpej /* This is not really needed ! */
872 1.3.2.2 thorpej /* Yes it is for the su and fu routines */
873 1.3.2.20 thorpej ldr r0, .Lcurpcb
874 1.3.2.2 thorpej str r2, [r0]
875 1.3.2.2 thorpej
876 1.3.2.2 thorpej IRQenable
877 1.3.2.2 thorpej
878 1.3.2.2 thorpej /* str r3, [sp, #-0x0004]!*/
879 1.3.2.2 thorpej
880 1.3.2.2 thorpej /*
881 1.3.2.2 thorpej * Schedule the vmspace and stack to be freed.
882 1.3.2.2 thorpej */
883 1.3.2.2 thorpej mov r0, r3 /* exit2(l) */
884 1.3.2.2 thorpej bl _C_LABEL(exit2)
885 1.3.2.2 thorpej
886 1.3.2.2 thorpej /* Paranoia */
887 1.3.2.2 thorpej mov r0, #0x00000000
888 1.3.2.20 thorpej ldr r1, .Lcurlwp
889 1.3.2.2 thorpej str r0, [r1]
890 1.3.2.2 thorpej
891 1.3.2.20 thorpej ldr r7, .Lwhichqs /* r7 = &whichqs */
892 1.3.2.17 thorpej mov r5, #0x00000000 /* r5 = old lwp = NULL */
893 1.3.2.20 thorpej b .Lswitch_search
894 1.3.2.2 thorpej
895 1.3.2.10 thorpej /*
896 1.3.2.10 thorpej * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
897 1.3.2.10 thorpej * Switch to lwp0's saved context and deallocate the address space and kernel
898 1.3.2.10 thorpej * stack for l. Then jump into cpu_switch(), as if we were in lwp0 all along.
899 1.3.2.10 thorpej */
900 1.3.2.10 thorpej
901 1.3.2.10 thorpej /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
902 1.3.2.2 thorpej ENTRY(switch_lwp_exit)
903 1.3.2.2 thorpej /*
904 1.3.2.2 thorpej * r0 = lwp
905 1.3.2.2 thorpej * r1 = lwp0
906 1.3.2.2 thorpej */
907 1.3.2.2 thorpej
908 1.3.2.2 thorpej mov r3, r0
909 1.3.2.2 thorpej
910 1.3.2.2 thorpej /* In case we fault */
911 1.3.2.2 thorpej mov r2, #0x00000000
912 1.3.2.20 thorpej ldr r0, .Lcurlwp
913 1.3.2.2 thorpej str r2, [r0]
914 1.3.2.2 thorpej
915 1.3.2.20 thorpej /* ldr r0, .Lcurpcb
916 1.3.2.2 thorpej str r2, [r0]*/
917 1.3.2.2 thorpej
918 1.3.2.13 thorpej /*
919 1.3.2.13 thorpej * Don't allow user space access between the purge and the switch.
920 1.3.2.13 thorpej */
921 1.3.2.20 thorpej ldr r0, .Lblock_userspace_access
922 1.3.2.13 thorpej mov r2, #0x00000001
923 1.3.2.13 thorpej str r2, [r0]
924 1.3.2.13 thorpej
925 1.3.2.2 thorpej /* Switch to lwp0 context */
926 1.3.2.2 thorpej
927 1.3.2.2 thorpej stmfd sp!, {r0-r3}
928 1.3.2.2 thorpej
929 1.3.2.20 thorpej ldr r0, .Lcpufuncs
930 1.3.2.20 thorpej add lr, pc, #.Lsle_cache_purged - . - 8
931 1.3.2.6 thorpej ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
932 1.3.2.2 thorpej
933 1.3.2.20 thorpej .Lsle_cache_purged:
934 1.3.2.2 thorpej ldmfd sp!, {r0-r3}
935 1.3.2.2 thorpej
936 1.3.2.2 thorpej IRQdisable
937 1.3.2.2 thorpej
938 1.3.2.2 thorpej ldr r2, [r1, #(L_ADDR)]
939 1.3.2.2 thorpej ldr r0, [r2, #(PCB_PAGEDIR)]
940 1.3.2.2 thorpej
941 1.3.2.2 thorpej /* Switch the memory to the new process */
942 1.3.2.20 thorpej ldr r4, .Lcpufuncs
943 1.3.2.20 thorpej add lr, pc, #.Lsle_context_switched - . - 8
944 1.3.2.2 thorpej ldr pc, [r4, #CF_CONTEXT_SWITCH]
945 1.3.2.2 thorpej
946 1.3.2.20 thorpej .Lsle_context_switched:
947 1.3.2.2 thorpej /* Restore all the save registers */
948 1.3.2.2 thorpej add r7, r2, #PCB_R8
949 1.3.2.2 thorpej ldmia r7, {r8-r13}
950 1.3.2.2 thorpej
951 1.3.2.2 thorpej /* This is not really needed ! */
952 1.3.2.2 thorpej /* Yes it is for the su and fu routines */
953 1.3.2.20 thorpej ldr r0, .Lcurpcb
954 1.3.2.2 thorpej str r2, [r0]
955 1.3.2.2 thorpej
956 1.3.2.2 thorpej IRQenable
957 1.3.2.2 thorpej
958 1.3.2.2 thorpej /* str r3, [sp, #-0x0004]!*/
959 1.3.2.2 thorpej
960 1.3.2.2 thorpej /*
961 1.3.2.2 thorpej * Schedule the vmspace and stack to be freed.
962 1.3.2.2 thorpej */
963 1.3.2.2 thorpej mov r0, r3 /* lwp_exit2(l) */
964 1.3.2.2 thorpej bl _C_LABEL(lwp_exit2)
965 1.3.2.2 thorpej
966 1.3.2.2 thorpej /* Paranoia */
967 1.3.2.20 thorpej ldr r1, .Lcurlwp
968 1.3.2.4 nathanw mov r0, #0x00000000
969 1.3.2.2 thorpej str r0, [r1]
970 1.3.2.2 thorpej
971 1.3.2.20 thorpej ldr r7, .Lwhichqs /* r7 = &whichqs */
972 1.3.2.17 thorpej mov r5, #0x00000000 /* r5 = old lwp = NULL */
973 1.3.2.20 thorpej b .Lswitch_search
974 1.3.2.2 thorpej
975 1.3.2.7 nathanw /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
976 1.3.2.2 thorpej ENTRY(savectx)
977 1.3.2.2 thorpej /*
978 1.3.2.2 thorpej * r0 = pcb
979 1.3.2.2 thorpej */
980 1.3.2.2 thorpej
981 1.3.2.2 thorpej /* Push registers.*/
982 1.3.2.2 thorpej stmfd sp!, {r4-r7, lr}
983 1.3.2.2 thorpej
984 1.3.2.2 thorpej /* Store all the registers in the process's pcb */
985 1.3.2.2 thorpej add r2, r0, #(PCB_R8)
986 1.3.2.2 thorpej stmia r2, {r8-r13}
987 1.3.2.2 thorpej
988 1.3.2.2 thorpej /* Pull the regs of the stack */
989 1.3.2.2 thorpej ldmfd sp!, {r4-r7, pc}
990 1.3.2.2 thorpej
991 1.3.2.2 thorpej ENTRY(proc_trampoline)
992 1.3.2.20 thorpej add lr, pc, #(.Ltrampoline_return - . - 8)
993 1.3.2.2 thorpej mov r0, r5
994 1.3.2.2 thorpej mov r1, sp
995 1.3.2.2 thorpej mov pc, r4
996 1.3.2.2 thorpej
997 1.3.2.20 thorpej .Ltrampoline_return:
998 1.3.2.2 thorpej /* Kill irq's */
999 1.3.2.20 thorpej mrs r0, cpsr
1000 1.3.2.2 thorpej orr r0, r0, #(I32_bit)
1001 1.3.2.20 thorpej msr cpsr_c, r0
1002 1.3.2.2 thorpej
1003 1.3.2.2 thorpej PULLFRAME
1004 1.3.2.2 thorpej
1005 1.3.2.2 thorpej movs pc, lr /* Exit */
1006 1.3.2.2 thorpej
1007 1.3.2.20 thorpej .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
1008 1.3.2.20 thorpej .Lcpu_switch_ffs_table:
1009 1.3.2.2 thorpej /* same as ffs table but all nums are -1 from that */
1010 1.3.2.2 thorpej /* 0 1 2 3 4 5 6 7 */
1011 1.3.2.2 thorpej .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
1012 1.3.2.2 thorpej .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
1013 1.3.2.2 thorpej .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
1014 1.3.2.2 thorpej .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
1015 1.3.2.2 thorpej .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
1016 1.3.2.2 thorpej .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
1017 1.3.2.2 thorpej .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
1018 1.3.2.2 thorpej .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
1019