subr.S revision 1.6 1 1.6 ragge /* $NetBSD: subr.S,v 1.6 2003/01/19 22:29:24 ragge Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * Redistribution and use in source and binary forms, with or without
8 1.1 matt * modification, are permitted provided that the following conditions
9 1.1 matt * are met:
10 1.1 matt * 1. Redistributions of source code must retain the above copyright
11 1.1 matt * notice, this list of conditions and the following disclaimer.
12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer in the
14 1.1 matt * documentation and/or other materials provided with the distribution.
15 1.1 matt * 3. All advertising materials mentioning features or use of this software
16 1.1 matt * must display the following acknowledgement:
17 1.1 matt * This product includes software developed at Ludd, University of Lule}.
18 1.1 matt * 4. The name of the author may not be used to endorse or promote products
19 1.1 matt * derived from this software without specific prior written permission
20 1.1 matt *
21 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 matt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 matt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 matt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 matt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #include <machine/asm.h>
34 1.1 matt
35 1.1 matt #include "assym.h"
36 1.1 matt #include "opt_ddb.h"
37 1.1 matt #include "opt_multiprocessor.h"
38 1.1 matt #include "opt_lockdebug.h"
39 1.1 matt #include "opt_compat_netbsd.h"
40 1.1 matt #include "opt_compat_ibcs2.h"
41 1.1 matt #ifdef COMPAT_IBCS2
42 1.1 matt #include <compat/ibcs2/ibcs2_syscall.h>
43 1.1 matt #endif
44 1.1 matt #include "opt_compat_ultrix.h"
45 1.1 matt #ifdef COMPAT_ULTRIX
46 1.1 matt #include <compat/ultrix/ultrix_syscall.h>
47 1.1 matt #endif
48 1.1 matt
49 1.1 matt #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 1.1 matt
51 1.1 matt .text
52 1.1 matt
53 1.1 matt #ifdef KERNEL_LOADABLE_BY_MOP
54 1.1 matt /*
55 1.1 matt * This is a little tricky. The kernel is not loaded at the correct
56 1.1 matt * address, so the kernel must first be relocated, then copied, then
57 1.1 matt * jump back to the correct address.
58 1.1 matt */
59 1.1 matt /* Copy routine */
60 1.1 matt cps:
61 1.2 matt 2: movb (%r0)+,(%r1)+
62 1.2 matt cmpl %r0,%r7
63 1.1 matt bneq 2b
64 1.1 matt
65 1.2 matt 3: clrb (%r1)+
66 1.2 matt incl %r0
67 1.2 matt cmpl %r0,%r6
68 1.1 matt bneq 3b
69 1.2 matt clrl -(%sp)
70 1.2 matt movl %sp,%ap
71 1.2 matt movl $_cca,%r7
72 1.2 matt movl %r8,(%r7)
73 1.2 matt movpsl -(%sp)
74 1.2 matt pushl %r2
75 1.1 matt rei
76 1.1 matt cpe:
77 1.1 matt
78 1.1 matt /* Copy the copy routine */
79 1.2 matt 1: movab cps,%r0
80 1.2 matt movab cpe,%r1
81 1.2 matt movl $0x300000,%sp
82 1.2 matt movl %sp,%r3
83 1.2 matt 4: movb (%r0)+,(%r3)+
84 1.2 matt cmpl %r0,%r1
85 1.1 matt bneq 4b
86 1.2 matt movl %r7,%r8
87 1.1 matt /* Ok, copy routine copied, set registers and rei */
88 1.2 matt movab _edata,%r7
89 1.2 matt movab _end,%r6
90 1.2 matt movl $0x80000000,%r1
91 1.2 matt movl $0x80000200,%r0
92 1.2 matt subl3 $0x200,%r6,%r9
93 1.2 matt movab 2f,%r2
94 1.2 matt subl2 $0x200,%r2
95 1.2 matt movpsl -(%sp)
96 1.2 matt pushab 4(%sp)
97 1.1 matt rei
98 1.1 matt
99 1.1 matt /*
100 1.1 matt * First entry routine from boot. This should be in a file called locore.
101 1.1 matt */
102 1.1 matt JSBENTRY(start)
103 1.1 matt brb 1b # Netbooted starts here
104 1.1 matt #else
105 1.1 matt ASENTRY(start, 0)
106 1.1 matt #endif
107 1.2 matt 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
108 1.1 matt pushl $0x1f0000 # Push a nice PSL
109 1.1 matt pushl $to # Address to jump to
110 1.1 matt rei # change to kernel stack
111 1.1 matt to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
112 1.3 matt cmpb (%ap),$3 # symbols info present?
113 1.1 matt blssu 3f # nope, skip
114 1.2 matt bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
115 1.1 matt # save start of symtab
116 1.2 matt movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
117 1.3 matt bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
118 1.2 matt # save end of symtab
119 1.2 matt 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
120 1.2 matt bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
121 1.2 matt bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
122 1.2 matt mtpr %r0,$PR_PCBB # Save in IPR PCBB
123 1.2 matt addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
124 1.2 matt mtpr %r0,$PR_KSP # put in IPR KSP
125 1.2 matt movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
126 1.2 matt movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
127 1.2 matt movab IFTRAP(%r0),4(%r0) # Save trap address in ESP
128 1.2 matt mtpr 4(%r0),$PR_ESP # Put it in ESP also
129 1.1 matt
130 1.1 matt # Set some registers in known state
131 1.2 matt movl _C_LABEL(proc0paddr),%r0
132 1.2 matt clrl P0LR(%r0)
133 1.2 matt clrl P1LR(%r0)
134 1.1 matt mtpr $0,$PR_P0LR
135 1.1 matt mtpr $0,$PR_P1LR
136 1.2 matt movl $0x80000000,%r1
137 1.2 matt movl %r1,P0BR(%r0)
138 1.2 matt movl %r1,P1BR(%r0)
139 1.2 matt mtpr %r1,$PR_P0BR
140 1.2 matt mtpr %r1,$PR_P1BR
141 1.2 matt clrl IFTRAP(%r0)
142 1.1 matt mtpr $0,$PR_SCBB
143 1.1 matt
144 1.1 matt # Copy the RPB to its new position
145 1.1 matt #if defined(COMPAT_14)
146 1.2 matt tstl (%ap) # Any arguments?
147 1.1 matt bneq 1f # Yes, called from new boot
148 1.2 matt movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
149 1.2 matt # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
150 1.2 matt movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
151 1.2 matt clrl -(%sp) # Have no RPB
152 1.1 matt brb 2f
153 1.1 matt #endif
154 1.1 matt
155 1.2 matt 1: pushl 4(%ap) # Address of old rpb
156 1.1 matt 2: calls $1,_C_LABEL(_start) # Jump away.
157 1.1 matt /* NOTREACHED */
158 1.1 matt
159 1.1 matt
160 1.1 matt /*
161 1.1 matt * Signal handler code.
162 1.1 matt */
163 1.1 matt
164 1.1 matt .align 2
165 1.5 thorpej .globl _C_LABEL(sigcode),_C_LABEL(upcallcode),_C_LABEL(esigcode)
166 1.1 matt _C_LABEL(sigcode):
167 1.1 matt pushr $0x3f
168 1.2 matt subl2 $0xc,%sp
169 1.2 matt movl 0x24(%sp),%r0
170 1.2 matt calls $3,(%r0)
171 1.1 matt popr $0x3f
172 1.1 matt chmk $SYS___sigreturn14
173 1.1 matt chmk $SYS_exit
174 1.1 matt halt
175 1.5 thorpej
176 1.5 thorpej /*
177 1.5 thorpej * Trampoline for SA upcalls. This would be totally unnecessary if we
178 1.5 thorpej * didn't need to account for the saved registers in the callee.
179 1.5 thorpej */
180 1.5 thorpej _C_LABEL(upcallcode):
181 1.5 thorpej callg (%sp),(%r0)
182 1.5 thorpej halt
183 1.1 matt _C_LABEL(esigcode):
184 1.1 matt
185 1.1 matt #ifdef COMPAT_IBCS2
186 1.1 matt .align 2
187 1.1 matt .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
188 1.1 matt _C_LABEL(ibcs2_sigcode):
189 1.1 matt pushr $0x3f
190 1.2 matt subl2 $0xc,%sp
191 1.2 matt movl 0x24(%sp),%r0
192 1.2 matt calls $3,(%r0)
193 1.1 matt popr $0x3f
194 1.1 matt chmk $SYS___sigreturn14
195 1.1 matt chmk $SYS_exit
196 1.1 matt halt
197 1.1 matt _C_LABEL(ibcs2_esigcode):
198 1.1 matt #endif /* COMPAT_IBCS2 */
199 1.1 matt
200 1.1 matt #ifdef COMPAT_ULTRIX
201 1.1 matt .align 2
202 1.1 matt .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
203 1.1 matt _C_LABEL(ultrix_sigcode):
204 1.1 matt pushr $0x3f
205 1.2 matt subl2 $0xc,%sp
206 1.2 matt movl 0x24(%sp),%r0
207 1.2 matt calls $3,(%r0)
208 1.1 matt popr $0x3f
209 1.1 matt chmk $ULTRIX_SYS_sigreturn
210 1.1 matt chmk $SYS_exit
211 1.1 matt halt
212 1.1 matt _C_LABEL(ultrix_esigcode):
213 1.1 matt #endif
214 1.1 matt
215 1.1 matt .align 2
216 1.1 matt .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
217 1.1 matt _C_LABEL(idsptch): pushr $0x3f
218 1.1 matt .word 0x9f16 # jsb to absolute address
219 1.1 matt .long _C_LABEL(cmn_idsptch) # the absolute address
220 1.1 matt .long 0 # the callback interrupt routine
221 1.1 matt .long 0 # its argument
222 1.1 matt .long 0 # ptr to correspond evcnt struct
223 1.1 matt _C_LABEL(eidsptch):
224 1.1 matt
225 1.1 matt _C_LABEL(cmn_idsptch):
226 1.2 matt movl (%sp)+,%r0 # get pointer to idspvec
227 1.2 matt movl 8(%r0),%r1 # get evcnt pointer
228 1.1 matt beql 1f # no ptr, skip increment
229 1.2 matt incl EV_COUNT(%r1) # increment low longword
230 1.2 matt adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
231 1.6 ragge 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
232 1.6 ragge pushl 4(%r0) # push argument
233 1.2 matt calls $1,*(%r0) # call interrupt routine
234 1.1 matt popr $0x3f # pop registers
235 1.1 matt rei # return from interrut
236 1.1 matt
237 1.1 matt ENTRY(badaddr,0) # Called with addr,b/w/l
238 1.2 matt mfpr $PR_IPL,%r0 # splhigh()
239 1.1 matt mtpr $IPL_HIGH,$PR_IPL
240 1.2 matt movl 4(%ap),%r2 # First argument, the address
241 1.2 matt movl 8(%ap),%r1 # Sec arg, b,w,l
242 1.2 matt pushl %r0 # Save old IPL
243 1.2 matt clrl %r3
244 1.1 matt movab 4f,_C_LABEL(memtest) # Set the return address
245 1.1 matt
246 1.2 matt caseb %r1,$1,$4 # What is the size
247 1.1 matt 1: .word 1f-1b
248 1.1 matt .word 2f-1b
249 1.1 matt .word 3f-1b # This is unused
250 1.1 matt .word 3f-1b
251 1.1 matt
252 1.2 matt 1: movb (%r2),%r1 # Test a byte
253 1.1 matt brb 5f
254 1.1 matt
255 1.2 matt 2: movw (%r2),%r1 # Test a word
256 1.1 matt brb 5f
257 1.1 matt
258 1.2 matt 3: movl (%r2),%r1 # Test a long
259 1.1 matt brb 5f
260 1.1 matt
261 1.2 matt 4: incl %r3 # Got machine chk => addr bad
262 1.2 matt 5: mtpr (%sp)+,$PR_IPL
263 1.2 matt movl %r3,%r0
264 1.1 matt ret
265 1.1 matt
266 1.1 matt #ifdef DDB
267 1.1 matt /*
268 1.1 matt * DDB is the only routine that uses setjmp/longjmp.
269 1.1 matt */
270 1.1 matt .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
271 1.1 matt _C_LABEL(setjmp):.word 0
272 1.2 matt movl 4(%ap), %r0
273 1.2 matt movl 8(%fp), (%r0)
274 1.2 matt movl 12(%fp), 4(%r0)
275 1.2 matt movl 16(%fp), 8(%r0)
276 1.2 matt moval 28(%fp),12(%r0)
277 1.2 matt clrl %r0
278 1.1 matt ret
279 1.1 matt
280 1.1 matt _C_LABEL(longjmp):.word 0
281 1.2 matt movl 4(%ap), %r1
282 1.2 matt movl 8(%ap), %r0
283 1.2 matt movl (%r1), %ap
284 1.2 matt movl 4(%r1), %fp
285 1.2 matt movl 12(%r1), %sp
286 1.2 matt jmp *8(%r1)
287 1.1 matt #endif
288 1.1 matt
289 1.1 matt #
290 1.1 matt # setrunqueue/remrunqueue fast variants.
291 1.1 matt #
292 1.1 matt
293 1.1 matt JSBENTRY(Setrq)
294 1.1 matt #ifdef DIAGNOSTIC
295 1.2 matt tstl 4(%r0) # Check that process actually are off the queue
296 1.1 matt beql 1f
297 1.1 matt pushab setrq
298 1.1 matt calls $1,_C_LABEL(panic)
299 1.1 matt setrq: .asciz "setrunqueue"
300 1.1 matt #endif
301 1.5 thorpej 1: extzv $2,$6,L_PRIORITY(%r0),%r1 # get priority
302 1.2 matt movaq _C_LABEL(sched_qs)[%r1],%r2 # get address of queue
303 1.2 matt insque (%r0),*PH_RLINK(%r2) # put proc last in queue
304 1.2 matt bbss %r1,_C_LABEL(sched_whichqs),1f # set queue bit.
305 1.1 matt 1: rsb
306 1.1 matt
307 1.1 matt JSBENTRY(Remrq)
308 1.5 thorpej extzv $2,$6,L_PRIORITY(%r0),%r1
309 1.1 matt #ifdef DIAGNOSTIC
310 1.2 matt bbs %r1,_C_LABEL(sched_whichqs),1f
311 1.1 matt pushab remrq
312 1.1 matt calls $1,_C_LABEL(panic)
313 1.1 matt remrq: .asciz "remrunqueue"
314 1.1 matt #endif
315 1.2 matt 1: remque (%r0),%r2
316 1.1 matt bneq 2f # Not last process on queue
317 1.2 matt bbsc %r1,_C_LABEL(sched_whichqs),2f
318 1.5 thorpej 2: clrl L_BACK(%r0) # saftey belt
319 1.1 matt rsb
320 1.1 matt
321 1.1 matt #
322 1.1 matt # Idle loop. Here we could do something fun, maybe, like calculating
323 1.1 matt # pi or something.
324 1.1 matt #
325 1.1 matt idle:
326 1.1 matt #if defined(LOCKDEBUG)
327 1.1 matt calls $0,_C_LABEL(sched_unlock_idle)
328 1.1 matt #elif defined(MULTIPROCESSOR)
329 1.1 matt clrl _C_LABEL(sched_lock) # release sched lock
330 1.1 matt #endif
331 1.1 matt mtpr $1,$PR_IPL # IPL cannot be 0 because we are
332 1.1 matt # running on the interrupt stack
333 1.1 matt # and may get interrupts
334 1.1 matt
335 1.1 matt 1: tstl _C_LABEL(sched_whichqs) # Anything ready to run?
336 1.1 matt beql 1b # no, run the idle loop again.
337 1.1 matt /* Now try the test the long way */
338 1.1 matt mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
339 1.1 matt #if defined(LOCKDEBUG)
340 1.1 matt calls $0,_C_LABEL(sched_lock_idle)
341 1.1 matt #elif defined(MULTIPROCESSOR)
342 1.1 matt 3: bbssi $0,_C_LABEL(sched_lock),3b # acquire sched lock
343 1.1 matt #endif
344 1.1 matt brb lp # check sched_whichqs again
345 1.1 matt
346 1.1 matt #
347 1.5 thorpej # cpu_switch, cpu_preempt, cpu_exit and the idle loop implemented in
348 1.5 thorpej # assembler for efficiency. This is called at IPL_HIGH.
349 1.1 matt #
350 1.1 matt
351 1.1 matt JSBENTRY(Swtch)
352 1.2 matt mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
353 1.5 thorpej clrl CI_CURLWP(%r1) # Stop process accounting
354 1.1 matt svpctx # Save context if another CPU
355 1.1 matt # get control first (must be on
356 1.1 matt # the interrupt stack when idling)
357 1.1 matt
358 1.1 matt
359 1.2 matt lp: ffs $0,$32,_C_LABEL(sched_whichqs),%r3 # Search for bit set
360 1.1 matt beql idle # no bit set, go to idle loop
361 1.1 matt
362 1.2 matt movaq _C_LABEL(sched_qs)[%r3],%r1 # get address of queue head
363 1.5 thorpej remque *(%r1),%r2 # remove lwp pointed to by queue head
364 1.5 thorpej # lwp ptr is now in %r2
365 1.1 matt #ifdef DIAGNOSTIC
366 1.1 matt bvc 1f # check if something on queue
367 1.1 matt pushab noque
368 1.1 matt calls $1,_C_LABEL(panic)
369 1.1 matt #endif
370 1.1 matt
371 1.1 matt 1: bneq 2f # more processes on queue?
372 1.2 matt bbsc %r3,_C_LABEL(sched_whichqs),2f # no, clear bit in whichqs
373 1.5 thorpej 2: clrl L_BACK(%r2) # clear proc backpointer
374 1.2 matt mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
375 1.1 matt /* p->p_cpu initialized in fork1() for single-processor */
376 1.1 matt #if defined(MULTIPROCESSOR)
377 1.5 thorpej movl %r1,L_CPU(%r2) # l->l_cpu = curcpu();
378 1.1 matt #endif
379 1.5 thorpej movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
380 1.5 thorpej movl %r2,CI_CURLWP(%r1) # set new process running
381 1.2 matt clrl CI_WANT_RESCHED(%r1) # we are now changing process
382 1.5 thorpej movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
383 1.2 matt addl3 %r0,$IFTRAP,%r1 # Save for copy* functions.
384 1.2 matt mtpr %r1,$PR_ESP # Use ESP as CPU-specific pointer
385 1.2 matt movl %r1,ESP(%r0) # Must save in PCB also.
386 1.2 matt mfpr $PR_SSP,%r1 # New process must inherit cpu_info
387 1.2 matt movl %r1,SSP(%r0) # Put it in new PCB
388 1.1 matt
389 1.1 matt #
390 1.1 matt # Nice routine to get physical from virtual adresses.
391 1.1 matt #
392 1.2 matt extzv $9,$21,%r0,%r1 # extract offset
393 1.2 matt ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
394 1.1 matt
395 1.5 thorpej clrl PCB_R0(%r0) # Assume switch to same lwp
396 1.5 thorpej mfpr $PR_PCBB,%r1 # Get old PCB address
397 1.5 thorpej cmpl %r1,%r3 # The same lwp?
398 1.5 thorpej beql 1f # Branch if it is
399 1.5 thorpej movl $1,PCB_R0(%r0) # Otherwise, return 1.
400 1.5 thorpej
401 1.5 thorpej 1: mtpr %r3,$PR_PCBB
402 1.1 matt ldpctx
403 1.1 matt #if defined(LOCKDEBUG)
404 1.5 thorpej pushl %r0
405 1.1 matt calls $0,_C_LABEL(sched_unlock_idle)
406 1.5 thorpej movl (%sp)+,%r0
407 1.1 matt #elif defined(MULTIPROCESSOR)
408 1.1 matt clrl _C_LABEL(sched_lock) # clear sched lock
409 1.1 matt #endif
410 1.1 matt rei
411 1.1 matt
412 1.1 matt #if defined(MULTIPROCESSOR)
413 1.1 matt .align 2
414 1.1 matt .globl _C_LABEL(tramp) # used to kick off multiprocessor systems.
415 1.1 matt _C_LABEL(tramp):
416 1.1 matt ldpctx
417 1.1 matt rei
418 1.1 matt #endif
419 1.5 thorpej
420 1.5 thorpej JSBENTRY(Swtchto)
421 1.5 thorpej mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
422 1.5 thorpej clrl CI_CURLWP(%r1) # Stop process accounting
423 1.5 thorpej svpctx # Now on interrupt stack
424 1.5 thorpej
425 1.5 thorpej # New LWP already in %r2
426 1.5 thorpej mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
427 1.5 thorpej #if defined(MULTIPROCESSOR)
428 1.5 thorpej movl r1,L_CPU(%r2) # l->l_cpu = curcpu();
429 1.5 thorpej #endif
430 1.5 thorpej movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
431 1.5 thorpej movl %r2,CI_CURLWP(%r1) # set new process running
432 1.5 thorpej movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
433 1.5 thorpej addl3 %r0,$IFTRAP,%r3 # Save for copy* functions.
434 1.5 thorpej mtpr %r3,$PR_ESP # Use ESP as CPU-specific pointer
435 1.5 thorpej movl %r3,ESP(%r0) # Must save in PCB also.
436 1.5 thorpej movl %r1,SSP(%r0) # Put it in new PCB
437 1.5 thorpej
438 1.5 thorpej extzv $9,$21,%r0,%r1 # extract offset
439 1.5 thorpej ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
440 1.5 thorpej
441 1.5 thorpej mtpr %r3,$PR_PCBB
442 1.5 thorpej ldpctx
443 1.5 thorpej #if defined(LOCKDEBUG)
444 1.5 thorpej pushl %r0
445 1.5 thorpej calls $0,_C_LABEL(sched_unlock_idle)
446 1.5 thorpej movl (%sp)+,%r0
447 1.5 thorpej #elif defined(MULTIPROCESSOR)
448 1.5 thorpej clrl _C_LABEL(sched_lock) # clear sched lock
449 1.5 thorpej #endif
450 1.5 thorpej rei
451 1.1 matt
452 1.1 matt #
453 1.1 matt # the last routine called by a process.
454 1.1 matt #
455 1.1 matt
456 1.1 matt ENTRY(cpu_exit,0)
457 1.2 matt movl 4(%ap),%r6 # Process pointer in %r6
458 1.4 ragge
459 1.4 ragge pushl %r6
460 1.4 ragge calls $1,_C_LABEL(pmap_deactivate)
461 1.4 ragge
462 1.1 matt mtpr $IPL_CLOCK,$PR_IPL # Block almost everything
463 1.2 matt mfpr $PR_SSP,%r7 # get cpu_info ptr
464 1.2 matt movl CI_EXIT(%r7),%r8 # scratch page address
465 1.2 matt movab 512(%r8),%sp # change stack
466 1.2 matt bicl2 $0xc0000000,%r8 # get physical address
467 1.2 matt mtpr %r8,$PR_PCBB # new PCB
468 1.2 matt mtpr %r7,$PR_SSP # In case...
469 1.2 matt pushl %r6
470 1.1 matt calls $1,_C_LABEL(exit2) # release last resources.
471 1.1 matt mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
472 1.1 matt #if defined(LOCKDEBUG)
473 1.1 matt calls $0,_C_LABEL(sched_lock_idle)
474 1.1 matt #elif defined(MULTIPROCESSOR)
475 1.1 matt 1: bbssi $0,_C_LABEL(sched_lock),1b # acquire sched lock
476 1.1 matt #endif
477 1.2 matt clrl %r6
478 1.1 matt brw Swtch
479 1.1 matt
480 1.1 matt #
481 1.1 matt # copy/fetch/store routines.
482 1.1 matt #
483 1.1 matt
484 1.1 matt ENTRY(copyout, 0)
485 1.2 matt movl 8(%ap),%r2
486 1.1 matt blss 3f # kernel space
487 1.2 matt movl 4(%ap),%r1
488 1.1 matt brb 2f
489 1.1 matt
490 1.1 matt ENTRY(copyin, 0)
491 1.2 matt movl 4(%ap),%r1
492 1.1 matt blss 3f # kernel space
493 1.2 matt movl 8(%ap),%r2
494 1.2 matt 2: mfpr $PR_ESP,%r3
495 1.2 matt movab 1f,(%r3)
496 1.2 matt movc3 12(%ap),(%r1),(%r2)
497 1.2 matt 1: mfpr $PR_ESP,%r3
498 1.2 matt clrl (%r3)
499 1.1 matt ret
500 1.1 matt
501 1.2 matt 3: mnegl $1,%r0
502 1.1 matt ret
503 1.1 matt
504 1.1 matt ENTRY(kcopy,0)
505 1.2 matt mfpr $PR_ESP,%r3
506 1.2 matt movl (%r3),-(%sp)
507 1.2 matt movab 1f,(%r3)
508 1.2 matt movl 4(%ap),%r1
509 1.2 matt movl 8(%ap),%r2
510 1.2 matt movc3 12(%ap),(%r1), (%r2)
511 1.2 matt clrl %r1
512 1.2 matt 1: mfpr $PR_ESP,%r3
513 1.2 matt movl (%sp)+,(%r3)
514 1.2 matt movl %r1,%r0
515 1.1 matt ret
516 1.1 matt
517 1.1 matt /*
518 1.1 matt * copy{in,out}str() copies data from/to user space to/from kernel space.
519 1.1 matt * Security checks:
520 1.1 matt * 1) user space address must be < KERNBASE
521 1.1 matt * 2) the VM system will do the checks while copying
522 1.1 matt */
523 1.1 matt ENTRY(copyinstr, 0)
524 1.2 matt tstl 4(%ap) # kernel address?
525 1.1 matt bgeq 8f # no, continue
526 1.2 matt 6: movl $EFAULT,%r0
527 1.2 matt movl 16(%ap),%r2
528 1.1 matt beql 7f
529 1.2 matt clrl (%r2)
530 1.1 matt 7: ret
531 1.1 matt
532 1.1 matt ENTRY(copyoutstr, 0)
533 1.2 matt tstl 8(%ap) # kernel address?
534 1.1 matt bgeq 8f # no, continue
535 1.1 matt brb 6b # yes, return EFAULT
536 1.1 matt
537 1.1 matt ENTRY(copystr,0)
538 1.2 matt 8: movl 4(%ap),%r5 # from
539 1.2 matt movl 8(%ap),%r4 # to
540 1.2 matt movl 12(%ap),%r3 # len
541 1.2 matt movl 16(%ap),%r2 # copied
542 1.2 matt clrl %r0
543 1.2 matt mfpr $PR_ESP,%r1
544 1.2 matt movab 3f,(%r1)
545 1.1 matt
546 1.2 matt tstl %r3 # any chars to copy?
547 1.1 matt bneq 1f # yes, jump for more
548 1.2 matt 0: tstl %r2 # save copied len?
549 1.1 matt beql 2f # no
550 1.2 matt subl3 4(%ap),%r5,(%r2) # save copied len
551 1.1 matt 2: ret
552 1.1 matt
553 1.2 matt 1: movb (%r5)+,(%r4)+ # copy one char
554 1.1 matt beql 0b # jmp if last char
555 1.2 matt sobgtr %r3,1b # copy one more
556 1.2 matt movl $ENAMETOOLONG,%r0 # inform about too long string
557 1.1 matt brb 0b # out of chars
558 1.1 matt
559 1.2 matt 3: mfpr $PR_ESP,%r1
560 1.2 matt clrl (%r1)
561 1.1 matt brb 0b
562 1.1 matt
563 1.1 matt ENTRY(subyte,0)
564 1.2 matt movl 4(%ap),%r0
565 1.1 matt blss 3f # illegal space
566 1.2 matt mfpr $PR_ESP,%r1
567 1.2 matt movab 1f,(%r1)
568 1.2 matt movb 8(%ap),(%r0)
569 1.2 matt clrl %r1
570 1.2 matt 1: mfpr $PR_ESP,%r2
571 1.2 matt clrl (%r2)
572 1.2 matt movl %r1,%r0
573 1.1 matt ret
574 1.1 matt
575 1.1 matt ENTRY(suword,0)
576 1.2 matt movl 4(%ap),%r0
577 1.1 matt blss 3f # illegal space
578 1.2 matt mfpr $PR_ESP,%r1
579 1.2 matt movab 1f,(%r1)
580 1.2 matt movl 8(%ap),(%r0)
581 1.2 matt clrl %r1
582 1.2 matt 1: mfpr $PR_ESP,%r2
583 1.2 matt clrl (%r2)
584 1.2 matt movl %r1,%r0
585 1.1 matt ret
586 1.1 matt
587 1.1 matt ENTRY(suswintr,0)
588 1.2 matt movl 4(%ap),%r0
589 1.1 matt blss 3f # illegal space
590 1.2 matt mfpr $PR_ESP,%r1
591 1.2 matt movab 1f,(%r1)
592 1.2 matt movw 8(%ap),(%r0)
593 1.2 matt clrl %r1
594 1.2 matt 1: mfpr $PR_ESP,%r2
595 1.2 matt clrl (%r2)
596 1.2 matt movl %r1,%r0
597 1.1 matt ret
598 1.1 matt
599 1.2 matt 3: mnegl $1,%r0
600 1.1 matt ret
601 1.1 matt
602 1.1 matt .align 2
603 1.1 matt ALTENTRY(fusword)
604 1.1 matt ENTRY(fuswintr,0)
605 1.2 matt movl 4(%ap),%r0
606 1.1 matt blss 3b
607 1.2 matt mfpr $PR_ESP,%r1
608 1.2 matt movab 1f,(%r1)
609 1.2 matt movzwl (%r0),%r1
610 1.2 matt 1: mfpr $PR_ESP,%r2
611 1.2 matt clrl (%r2)
612 1.2 matt movl %r1,%r0
613 1.1 matt ret
614 1.1 matt
615 1.1 matt #if defined(MULTIPROCESSOR)
616 1.1 matt
617 1.1 matt JSBENTRY(Slock)
618 1.2 matt 1: bbssi $0,(%r1),1b
619 1.1 matt rsb
620 1.1 matt
621 1.1 matt JSBENTRY(Slocktry)
622 1.2 matt clrl %r0
623 1.2 matt bbssi $0,(%r1),1f
624 1.2 matt incl %r0
625 1.1 matt 1: rsb
626 1.1 matt
627 1.1 matt JSBENTRY(Sunlock)
628 1.2 matt bbcci $0,(%r1),1f
629 1.1 matt 1: rsb
630 1.1 matt
631 1.1 matt #endif
632 1.1 matt
633 1.1 matt #
634 1.1 matt # data department
635 1.1 matt #
636 1.1 matt .data
637 1.1 matt
638 1.1 matt .globl _C_LABEL(memtest)
639 1.1 matt _C_LABEL(memtest): # memory test in progress
640 1.1 matt .long 0
641 1.1 matt
642 1.1 matt #ifdef __ELF__
643 1.1 matt .section .rodata
644 1.1 matt #endif
645 1.1 matt noque: .asciz "swtch"
646