subr.S revision 1.22 1 1.22 matt /* $NetBSD: subr.S,v 1.22 2008/02/21 03:52:47 matt Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * Redistribution and use in source and binary forms, with or without
8 1.1 matt * modification, are permitted provided that the following conditions
9 1.1 matt * are met:
10 1.1 matt * 1. Redistributions of source code must retain the above copyright
11 1.1 matt * notice, this list of conditions and the following disclaimer.
12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer in the
14 1.1 matt * documentation and/or other materials provided with the distribution.
15 1.1 matt * 3. All advertising materials mentioning features or use of this software
16 1.1 matt * must display the following acknowledgement:
17 1.1 matt * This product includes software developed at Ludd, University of Lule}.
18 1.1 matt * 4. The name of the author may not be used to endorse or promote products
19 1.1 matt * derived from this software without specific prior written permission
20 1.1 matt *
21 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 matt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 matt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 matt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 matt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #include <machine/asm.h>
34 1.1 matt
35 1.1 matt #include "assym.h"
36 1.1 matt #include "opt_ddb.h"
37 1.1 matt #include "opt_multiprocessor.h"
38 1.1 matt #include "opt_lockdebug.h"
39 1.1 matt #include "opt_compat_netbsd.h"
40 1.1 matt #include "opt_compat_ibcs2.h"
41 1.1 matt #ifdef COMPAT_IBCS2
42 1.1 matt #include <compat/ibcs2/ibcs2_syscall.h>
43 1.1 matt #endif
44 1.1 matt #include "opt_compat_ultrix.h"
45 1.1 matt #ifdef COMPAT_ULTRIX
46 1.1 matt #include <compat/ultrix/ultrix_syscall.h>
47 1.1 matt #endif
48 1.1 matt
49 1.1 matt #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 1.21 matt #define SCBENTRY(name) \
51 1.21 matt .text ; \
52 1.21 matt .align 2 ; \
53 1.21 matt .globl __CONCAT(X,name) ; \
54 1.21 matt __CONCAT(X,name):
55 1.1 matt
56 1.1 matt .text
57 1.1 matt
58 1.1 matt #ifdef KERNEL_LOADABLE_BY_MOP
59 1.1 matt /*
60 1.1 matt * This is a little tricky. The kernel is not loaded at the correct
61 1.1 matt * address, so the kernel must first be relocated, then copied, then
62 1.1 matt * jump back to the correct address.
63 1.1 matt */
64 1.1 matt /* Copy routine */
65 1.1 matt cps:
66 1.2 matt 2: movb (%r0)+,(%r1)+
67 1.2 matt cmpl %r0,%r7
68 1.1 matt bneq 2b
69 1.1 matt
70 1.2 matt 3: clrb (%r1)+
71 1.2 matt incl %r0
72 1.2 matt cmpl %r0,%r6
73 1.1 matt bneq 3b
74 1.2 matt clrl -(%sp)
75 1.2 matt movl %sp,%ap
76 1.2 matt movl $_cca,%r7
77 1.2 matt movl %r8,(%r7)
78 1.2 matt movpsl -(%sp)
79 1.2 matt pushl %r2
80 1.1 matt rei
81 1.1 matt cpe:
82 1.1 matt
83 1.1 matt /* Copy the copy routine */
84 1.2 matt 1: movab cps,%r0
85 1.2 matt movab cpe,%r1
86 1.2 matt movl $0x300000,%sp
87 1.2 matt movl %sp,%r3
88 1.2 matt 4: movb (%r0)+,(%r3)+
89 1.2 matt cmpl %r0,%r1
90 1.1 matt bneq 4b
91 1.2 matt movl %r7,%r8
92 1.1 matt /* Ok, copy routine copied, set registers and rei */
93 1.2 matt movab _edata,%r7
94 1.2 matt movab _end,%r6
95 1.2 matt movl $0x80000000,%r1
96 1.2 matt movl $0x80000200,%r0
97 1.2 matt subl3 $0x200,%r6,%r9
98 1.2 matt movab 2f,%r2
99 1.2 matt subl2 $0x200,%r2
100 1.2 matt movpsl -(%sp)
101 1.2 matt pushab 4(%sp)
102 1.1 matt rei
103 1.1 matt
104 1.1 matt /*
105 1.1 matt * First entry routine from boot. This should be in a file called locore.
106 1.1 matt */
107 1.1 matt JSBENTRY(start)
108 1.1 matt brb 1b # Netbooted starts here
109 1.1 matt #else
110 1.1 matt ASENTRY(start, 0)
111 1.1 matt #endif
112 1.2 matt 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
113 1.1 matt pushl $0x1f0000 # Push a nice PSL
114 1.1 matt pushl $to # Address to jump to
115 1.1 matt rei # change to kernel stack
116 1.1 matt to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
117 1.3 matt cmpb (%ap),$3 # symbols info present?
118 1.1 matt blssu 3f # nope, skip
119 1.2 matt bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
120 1.1 matt # save start of symtab
121 1.2 matt movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
122 1.3 matt bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
123 1.2 matt # save end of symtab
124 1.2 matt 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
125 1.2 matt bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
126 1.2 matt bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
127 1.2 matt mtpr %r0,$PR_PCBB # Save in IPR PCBB
128 1.2 matt addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
129 1.2 matt mtpr %r0,$PR_KSP # put in IPR KSP
130 1.2 matt movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
131 1.21 matt movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
132 1.21 matt mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
133 1.21 matt movab IFTRAP(%r0),ESP(%r0) # Save trap address in ESP
134 1.2 matt mtpr 4(%r0),$PR_ESP # Put it in ESP also
135 1.1 matt
136 1.1 matt # Set some registers in known state
137 1.2 matt movl _C_LABEL(proc0paddr),%r0
138 1.2 matt clrl P0LR(%r0)
139 1.2 matt clrl P1LR(%r0)
140 1.1 matt mtpr $0,$PR_P0LR
141 1.1 matt mtpr $0,$PR_P1LR
142 1.2 matt movl $0x80000000,%r1
143 1.2 matt movl %r1,P0BR(%r0)
144 1.2 matt movl %r1,P1BR(%r0)
145 1.2 matt mtpr %r1,$PR_P0BR
146 1.2 matt mtpr %r1,$PR_P1BR
147 1.2 matt clrl IFTRAP(%r0)
148 1.1 matt mtpr $0,$PR_SCBB
149 1.1 matt
150 1.1 matt # Copy the RPB to its new position
151 1.1 matt #if defined(COMPAT_14)
152 1.2 matt tstl (%ap) # Any arguments?
153 1.1 matt bneq 1f # Yes, called from new boot
154 1.2 matt movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
155 1.2 matt # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
156 1.2 matt movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
157 1.2 matt clrl -(%sp) # Have no RPB
158 1.1 matt brb 2f
159 1.1 matt #endif
160 1.1 matt
161 1.2 matt 1: pushl 4(%ap) # Address of old rpb
162 1.1 matt 2: calls $1,_C_LABEL(_start) # Jump away.
163 1.1 matt /* NOTREACHED */
164 1.1 matt
165 1.1 matt
166 1.1 matt /*
167 1.1 matt * Signal handler code.
168 1.1 matt */
169 1.1 matt
170 1.1 matt .align 2
171 1.11 matt .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
172 1.1 matt _C_LABEL(sigcode):
173 1.1 matt pushr $0x3f
174 1.2 matt subl2 $0xc,%sp
175 1.2 matt movl 0x24(%sp),%r0
176 1.2 matt calls $3,(%r0)
177 1.1 matt popr $0x3f
178 1.8 matt chmk $SYS_compat_16___sigreturn14
179 1.1 matt chmk $SYS_exit
180 1.1 matt halt
181 1.1 matt _C_LABEL(esigcode):
182 1.1 matt
183 1.1 matt #ifdef COMPAT_IBCS2
184 1.1 matt .align 2
185 1.1 matt .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
186 1.1 matt _C_LABEL(ibcs2_sigcode):
187 1.1 matt pushr $0x3f
188 1.2 matt subl2 $0xc,%sp
189 1.2 matt movl 0x24(%sp),%r0
190 1.2 matt calls $3,(%r0)
191 1.1 matt popr $0x3f
192 1.8 matt chmk $SYS_compat_16___sigreturn14
193 1.1 matt chmk $SYS_exit
194 1.1 matt halt
195 1.1 matt _C_LABEL(ibcs2_esigcode):
196 1.1 matt #endif /* COMPAT_IBCS2 */
197 1.1 matt
198 1.1 matt #ifdef COMPAT_ULTRIX
199 1.1 matt .align 2
200 1.1 matt .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
201 1.1 matt _C_LABEL(ultrix_sigcode):
202 1.1 matt pushr $0x3f
203 1.2 matt subl2 $0xc,%sp
204 1.2 matt movl 0x24(%sp),%r0
205 1.2 matt calls $3,(%r0)
206 1.1 matt popr $0x3f
207 1.1 matt chmk $ULTRIX_SYS_sigreturn
208 1.1 matt chmk $SYS_exit
209 1.1 matt halt
210 1.1 matt _C_LABEL(ultrix_esigcode):
211 1.1 matt #endif
212 1.1 matt
213 1.1 matt .align 2
214 1.1 matt .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
215 1.16 matt _C_LABEL(idsptch):
216 1.16 matt pushr $0x3f
217 1.1 matt .word 0x9f16 # jsb to absolute address
218 1.1 matt .long _C_LABEL(cmn_idsptch) # the absolute address
219 1.1 matt .long 0 # the callback interrupt routine
220 1.1 matt .long 0 # its argument
221 1.1 matt .long 0 # ptr to correspond evcnt struct
222 1.1 matt _C_LABEL(eidsptch):
223 1.1 matt
224 1.1 matt _C_LABEL(cmn_idsptch):
225 1.16 matt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
226 1.16 matt calls $0,_C_LABEL(krnlock)
227 1.16 matt #endif
228 1.2 matt movl (%sp)+,%r0 # get pointer to idspvec
229 1.20 matt mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
230 1.2 matt movl 8(%r0),%r1 # get evcnt pointer
231 1.1 matt beql 1f # no ptr, skip increment
232 1.2 matt incl EV_COUNT(%r1) # increment low longword
233 1.2 matt adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
234 1.6 ragge 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
235 1.16 matt #if 0
236 1.16 matt pushl %r0
237 1.16 matt movq (%r0),-(%sp)
238 1.16 matt pushab 2f
239 1.16 matt calls $3,_C_LABEL(printf)
240 1.16 matt movl (%sp)+,%r0
241 1.16 matt #endif
242 1.6 ragge pushl 4(%r0) # push argument
243 1.2 matt calls $1,*(%r0) # call interrupt routine
244 1.16 matt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
245 1.16 matt calls $0,_C_LABEL(krnunlock)
246 1.16 matt #endif
247 1.1 matt popr $0x3f # pop registers
248 1.1 matt rei # return from interrut
249 1.16 matt #if 0
250 1.16 matt 2: .asciz "intr %p(%p)\n"
251 1.16 matt #endif
252 1.1 matt
253 1.1 matt ENTRY(badaddr,0) # Called with addr,b/w/l
254 1.2 matt mfpr $PR_IPL,%r0 # splhigh()
255 1.1 matt mtpr $IPL_HIGH,$PR_IPL
256 1.2 matt movl 4(%ap),%r2 # First argument, the address
257 1.2 matt movl 8(%ap),%r1 # Sec arg, b,w,l
258 1.2 matt pushl %r0 # Save old IPL
259 1.2 matt clrl %r3
260 1.1 matt movab 4f,_C_LABEL(memtest) # Set the return address
261 1.1 matt
262 1.2 matt caseb %r1,$1,$4 # What is the size
263 1.1 matt 1: .word 1f-1b
264 1.1 matt .word 2f-1b
265 1.1 matt .word 3f-1b # This is unused
266 1.1 matt .word 3f-1b
267 1.1 matt
268 1.2 matt 1: movb (%r2),%r1 # Test a byte
269 1.1 matt brb 5f
270 1.1 matt
271 1.2 matt 2: movw (%r2),%r1 # Test a word
272 1.1 matt brb 5f
273 1.1 matt
274 1.2 matt 3: movl (%r2),%r1 # Test a long
275 1.1 matt brb 5f
276 1.1 matt
277 1.2 matt 4: incl %r3 # Got machine chk => addr bad
278 1.2 matt 5: mtpr (%sp)+,$PR_IPL
279 1.2 matt movl %r3,%r0
280 1.1 matt ret
281 1.1 matt
282 1.1 matt #ifdef DDB
283 1.1 matt /*
284 1.1 matt * DDB is the only routine that uses setjmp/longjmp.
285 1.1 matt */
286 1.1 matt .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
287 1.1 matt _C_LABEL(setjmp):.word 0
288 1.2 matt movl 4(%ap), %r0
289 1.2 matt movl 8(%fp), (%r0)
290 1.2 matt movl 12(%fp), 4(%r0)
291 1.2 matt movl 16(%fp), 8(%r0)
292 1.2 matt moval 28(%fp),12(%r0)
293 1.2 matt clrl %r0
294 1.1 matt ret
295 1.1 matt
296 1.1 matt _C_LABEL(longjmp):.word 0
297 1.2 matt movl 4(%ap), %r1
298 1.2 matt movl 8(%ap), %r0
299 1.2 matt movl (%r1), %ap
300 1.2 matt movl 4(%r1), %fp
301 1.2 matt movl 12(%r1), %sp
302 1.2 matt jmp *8(%r1)
303 1.1 matt #endif
304 1.1 matt
305 1.1 matt #if defined(MULTIPROCESSOR)
306 1.1 matt .align 2
307 1.12 he .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
308 1.12 he _C_LABEL(vax_mp_tramp):
309 1.1 matt ldpctx
310 1.1 matt rei
311 1.1 matt #endif
312 1.5 thorpej
313 1.22 matt .globl softint_cleanup,softint_exit,softint_process
314 1.22 matt .type softint_cleanup@function
315 1.22 matt .type softint_exit@function
316 1.22 matt .type softint_process@function
317 1.21 matt softint_cleanup:
318 1.21 matt mfpr $PR_SSP,%r1 /* get cpu_info */
319 1.21 matt incl CI_MTX_COUNT(%r1) /* increment mutex count */
320 1.21 matt clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
321 1.22 matt movl L_ADDR(%r0),%r1 /* get PCB of softint LWP */
322 1.21 matt softint_exit:
323 1.22 matt /*
324 1.22 matt * Now restore the PCB since we have been interrupted or blocked so
325 1.22 matt * we no idea what state it was last in
326 1.22 matt */
327 1.22 matt movab (USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r1),%r0
328 1.22 matt /* calculate where KSP should be */
329 1.22 matt movl %r0,KSP(%r1) /* save it as SP */
330 1.22 matt movl %r0,PCB_FP(%r1) /* and as the FP too */
331 1.22 matt movab CA_ARGNO(%r0),PCB_AP(%r1) /* update the AP as well */
332 1.22 matt movab softint_process,PCB_PC(%r1) /* and where we will restart */
333 1.21 matt popr $0x3 /* restore r0 and r1 */
334 1.21 matt rei /* return from interrupt */
335 1.21 matt
336 1.21 matt softint_process:
337 1.21 matt /*
338 1.21 matt * R6 contains pinned LWP
339 1.21 matt * R7 contains ipl to dispatch with
340 1.21 matt */
341 1.21 matt movq %r6,-(%sp) /* push old lwp and ipl onto stack */
342 1.21 matt calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
343 1.21 matt
344 1.21 matt /* We can use any register because ldpctx will overwrite them */
345 1.21 matt mfpr $PR_SSP,%r8 /* get cpu_info */
346 1.21 matt movl %r6,CI_CURLWP(%r8) /* update ci_curlwp */
347 1.21 matt movl L_ADDR(%r6),%r3 /* Get pointer to new pcb. */
348 1.22 matt movl %r3,PCB_R1(%r3) /* Make %r1 point to the pcb. */
349 1.21 matt
350 1.21 matt movab softint_exit,PCB_PC(%r3)/* do a quick exit */
351 1.21 matt
352 1.21 matt mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
353 1.21 matt ldpctx
354 1.21 matt rei
355 1.21 matt
356 1.21 matt
357 1.21 matt softint_common:
358 1.21 matt mfpr $PR_IPL,%r1
359 1.21 matt mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
360 1.21 matt movpsl -(%sp) /* add cleanup hook */
361 1.21 matt pushab softint_cleanup
362 1.21 matt svpctx
363 1.21 matt
364 1.21 matt /* We can use any register because ldpctx will overwrite them */
365 1.21 matt mfpr $PR_SSP,%r8 /* Get curcpu */
366 1.21 matt movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
367 1.21 matt movl L_ADDR(%r2),%r3 /* Get pointer to its pcb. */
368 1.21 matt movl CI_CURLWP(%r8),PCB_R6(%r3) /* move old lwp into new pcb */
369 1.21 matt movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
370 1.21 matt movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
371 1.21 matt mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
372 1.21 matt ldpctx /* load it */
373 1.21 matt rei /* get off interrupt stack */
374 1.21 matt
375 1.21 matt SCBENTRY(softclock)
376 1.21 matt pushr $0x3 /* save r0 and r1 */
377 1.21 matt movl $SOFTINT_CLOCK,%r0
378 1.21 matt brb softint_common
379 1.21 matt
380 1.21 matt SCBENTRY(softbio)
381 1.21 matt pushr $0x3 /* save r0 and r1 */
382 1.21 matt movl $SOFTINT_BIO,%r0
383 1.21 matt brb softint_common
384 1.21 matt
385 1.21 matt SCBENTRY(softnet)
386 1.21 matt pushr $0x3 /* save r0 and r1 */
387 1.21 matt movl $SOFTINT_NET,%r0
388 1.21 matt brb softint_common
389 1.21 matt
390 1.21 matt SCBENTRY(softserial)
391 1.21 matt pushr $0x3 /* save r0 and r1 */
392 1.21 matt movl $SOFTINT_SERIAL,%r0
393 1.21 matt brb softint_common
394 1.21 matt
395 1.17 yamt /*
396 1.17 yamt * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
397 1.17 yamt * It let's the kernel know the lwp is alive and then calls func(arg)
398 1.17 yamt * and possibly returns to sret.
399 1.17 yamt */
400 1.17 yamt ENTRY(cpu_lwp_bootstrap, 0)
401 1.17 yamt movq %r2,-(%sp) /* save func & arg */
402 1.17 yamt movq %r0,-(%sp) /* push oldl/newl */
403 1.17 yamt calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
404 1.17 yamt movl (%sp)+,%r0 /* grab func */
405 1.17 yamt calls $1,(%r0) /* call it with arg */
406 1.17 yamt ret
407 1.17 yamt
408 1.17 yamt /*
409 1.17 yamt * r1 = newlwp
410 1.17 yamt * r0 = oldlwp
411 1.17 yamt */
412 1.5 thorpej JSBENTRY(Swtchto)
413 1.17 yamt /* this pops the pc and psw from the stack and puts them in the pcb. */
414 1.5 thorpej svpctx # Now on interrupt stack
415 1.5 thorpej
416 1.17 yamt /* We can know use any register because ldpctx will overwrite them */
417 1.17 yamt /* New LWP already in %r1 */
418 1.17 yamt mfpr $PR_SSP,%r4 # Get curcpu
419 1.17 yamt movl %r1,CI_CURLWP(%r4) # update ci_curlwp
420 1.17 yamt movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
421 1.17 yamt movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
422 1.21 matt #ifdef MULTIPROCESSOR
423 1.17 yamt movl %r4,SSP(%r3) # Put curcpu into new PCB
424 1.21 matt #endif
425 1.17 yamt
426 1.21 matt mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
427 1.21 matt mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
428 1.17 yamt ldpctx # load it
429 1.17 yamt /* r0 already has previous lwp */
430 1.17 yamt /* r1 already has this lwp */
431 1.17 yamt /* r2/r3 and r4/r5 restored */
432 1.17 yamt rei /* get off interrupt stack */
433 1.1 matt
434 1.1 matt #
435 1.1 matt # copy/fetch/store routines.
436 1.1 matt #
437 1.1 matt
438 1.1 matt ENTRY(copyout, 0)
439 1.2 matt movl 8(%ap),%r2
440 1.1 matt blss 3f # kernel space
441 1.2 matt movl 4(%ap),%r1
442 1.1 matt brb 2f
443 1.1 matt
444 1.1 matt ENTRY(copyin, 0)
445 1.2 matt movl 4(%ap),%r1
446 1.1 matt blss 3f # kernel space
447 1.2 matt movl 8(%ap),%r2
448 1.2 matt 2: mfpr $PR_ESP,%r3
449 1.2 matt movab 1f,(%r3)
450 1.2 matt movc3 12(%ap),(%r1),(%r2)
451 1.2 matt 1: mfpr $PR_ESP,%r3
452 1.2 matt clrl (%r3)
453 1.1 matt ret
454 1.1 matt
455 1.2 matt 3: mnegl $1,%r0
456 1.1 matt ret
457 1.1 matt
458 1.1 matt ENTRY(kcopy,0)
459 1.2 matt mfpr $PR_ESP,%r3
460 1.2 matt movl (%r3),-(%sp)
461 1.2 matt movab 1f,(%r3)
462 1.2 matt movl 4(%ap),%r1
463 1.2 matt movl 8(%ap),%r2
464 1.2 matt movc3 12(%ap),(%r1), (%r2)
465 1.2 matt clrl %r1
466 1.2 matt 1: mfpr $PR_ESP,%r3
467 1.2 matt movl (%sp)+,(%r3)
468 1.2 matt movl %r1,%r0
469 1.1 matt ret
470 1.1 matt
471 1.1 matt /*
472 1.1 matt * copy{in,out}str() copies data from/to user space to/from kernel space.
473 1.1 matt * Security checks:
474 1.1 matt * 1) user space address must be < KERNBASE
475 1.1 matt * 2) the VM system will do the checks while copying
476 1.1 matt */
477 1.1 matt ENTRY(copyinstr, 0)
478 1.2 matt tstl 4(%ap) # kernel address?
479 1.1 matt bgeq 8f # no, continue
480 1.2 matt 6: movl $EFAULT,%r0
481 1.2 matt movl 16(%ap),%r2
482 1.1 matt beql 7f
483 1.2 matt clrl (%r2)
484 1.1 matt 7: ret
485 1.1 matt
486 1.1 matt ENTRY(copyoutstr, 0)
487 1.2 matt tstl 8(%ap) # kernel address?
488 1.1 matt bgeq 8f # no, continue
489 1.1 matt brb 6b # yes, return EFAULT
490 1.1 matt
491 1.1 matt ENTRY(copystr,0)
492 1.2 matt 8: movl 4(%ap),%r5 # from
493 1.2 matt movl 8(%ap),%r4 # to
494 1.2 matt movl 12(%ap),%r3 # len
495 1.2 matt movl 16(%ap),%r2 # copied
496 1.2 matt clrl %r0
497 1.2 matt mfpr $PR_ESP,%r1
498 1.2 matt movab 3f,(%r1)
499 1.1 matt
500 1.2 matt tstl %r3 # any chars to copy?
501 1.1 matt bneq 1f # yes, jump for more
502 1.2 matt 0: tstl %r2 # save copied len?
503 1.1 matt beql 2f # no
504 1.2 matt subl3 4(%ap),%r5,(%r2) # save copied len
505 1.1 matt 2: ret
506 1.1 matt
507 1.2 matt 1: movb (%r5)+,(%r4)+ # copy one char
508 1.1 matt beql 0b # jmp if last char
509 1.2 matt sobgtr %r3,1b # copy one more
510 1.2 matt movl $ENAMETOOLONG,%r0 # inform about too long string
511 1.1 matt brb 0b # out of chars
512 1.1 matt
513 1.2 matt 3: mfpr $PR_ESP,%r1
514 1.2 matt clrl (%r1)
515 1.1 matt brb 0b
516 1.1 matt
517 1.1 matt ENTRY(subyte,0)
518 1.2 matt movl 4(%ap),%r0
519 1.1 matt blss 3f # illegal space
520 1.2 matt mfpr $PR_ESP,%r1
521 1.2 matt movab 1f,(%r1)
522 1.2 matt movb 8(%ap),(%r0)
523 1.2 matt clrl %r1
524 1.2 matt 1: mfpr $PR_ESP,%r2
525 1.2 matt clrl (%r2)
526 1.2 matt movl %r1,%r0
527 1.1 matt ret
528 1.1 matt
529 1.1 matt ENTRY(suword,0)
530 1.2 matt movl 4(%ap),%r0
531 1.1 matt blss 3f # illegal space
532 1.2 matt mfpr $PR_ESP,%r1
533 1.2 matt movab 1f,(%r1)
534 1.2 matt movl 8(%ap),(%r0)
535 1.2 matt clrl %r1
536 1.2 matt 1: mfpr $PR_ESP,%r2
537 1.2 matt clrl (%r2)
538 1.2 matt movl %r1,%r0
539 1.1 matt ret
540 1.1 matt
541 1.1 matt ENTRY(suswintr,0)
542 1.2 matt movl 4(%ap),%r0
543 1.1 matt blss 3f # illegal space
544 1.2 matt mfpr $PR_ESP,%r1
545 1.2 matt movab 1f,(%r1)
546 1.2 matt movw 8(%ap),(%r0)
547 1.2 matt clrl %r1
548 1.2 matt 1: mfpr $PR_ESP,%r2
549 1.2 matt clrl (%r2)
550 1.2 matt movl %r1,%r0
551 1.1 matt ret
552 1.1 matt
553 1.2 matt 3: mnegl $1,%r0
554 1.1 matt ret
555 1.1 matt
556 1.1 matt .align 2
557 1.1 matt ALTENTRY(fusword)
558 1.1 matt ENTRY(fuswintr,0)
559 1.2 matt movl 4(%ap),%r0
560 1.1 matt blss 3b
561 1.2 matt mfpr $PR_ESP,%r1
562 1.2 matt movab 1f,(%r1)
563 1.2 matt movzwl (%r0),%r1
564 1.2 matt 1: mfpr $PR_ESP,%r2
565 1.2 matt clrl (%r2)
566 1.2 matt movl %r1,%r0
567 1.1 matt ret
568 1.1 matt
569 1.1 matt JSBENTRY(Slock)
570 1.2 matt 1: bbssi $0,(%r1),1b
571 1.1 matt rsb
572 1.1 matt
573 1.1 matt JSBENTRY(Slocktry)
574 1.2 matt clrl %r0
575 1.2 matt bbssi $0,(%r1),1f
576 1.2 matt incl %r0
577 1.1 matt 1: rsb
578 1.1 matt
579 1.1 matt JSBENTRY(Sunlock)
580 1.2 matt bbcci $0,(%r1),1f
581 1.1 matt 1: rsb
582 1.1 matt
583 1.1 matt #
584 1.1 matt # data department
585 1.1 matt #
586 1.1 matt .data
587 1.1 matt
588 1.1 matt .globl _C_LABEL(memtest)
589 1.1 matt _C_LABEL(memtest): # memory test in progress
590 1.1 matt .long 0
591 1.1 matt
592 1.1 matt #ifdef __ELF__
593 1.1 matt .section .rodata
594 1.1 matt #endif
595 1.1 matt noque: .asciz "swtch"
596