subr.S revision 1.21 1 /* $NetBSD: subr.S,v 1.21 2008/02/20 16:37:52 matt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 #define SCBENTRY(name) \
51 .text ; \
52 .align 2 ; \
53 .globl __CONCAT(X,name) ; \
54 __CONCAT(X,name):
55
56 .text
57
58 #ifdef KERNEL_LOADABLE_BY_MOP
59 /*
60 * This is a little tricky. The kernel is not loaded at the correct
61 * address, so the kernel must first be relocated, then copied, then
62 * jump back to the correct address.
63 */
64 /* Copy routine */
65 cps:
66 2: movb (%r0)+,(%r1)+
67 cmpl %r0,%r7
68 bneq 2b
69
70 3: clrb (%r1)+
71 incl %r0
72 cmpl %r0,%r6
73 bneq 3b
74 clrl -(%sp)
75 movl %sp,%ap
76 movl $_cca,%r7
77 movl %r8,(%r7)
78 movpsl -(%sp)
79 pushl %r2
80 rei
81 cpe:
82
83 /* Copy the copy routine */
84 1: movab cps,%r0
85 movab cpe,%r1
86 movl $0x300000,%sp
87 movl %sp,%r3
88 4: movb (%r0)+,(%r3)+
89 cmpl %r0,%r1
90 bneq 4b
91 movl %r7,%r8
92 /* Ok, copy routine copied, set registers and rei */
93 movab _edata,%r7
94 movab _end,%r6
95 movl $0x80000000,%r1
96 movl $0x80000200,%r0
97 subl3 $0x200,%r6,%r9
98 movab 2f,%r2
99 subl2 $0x200,%r2
100 movpsl -(%sp)
101 pushab 4(%sp)
102 rei
103
104 /*
105 * First entry routine from boot. This should be in a file called locore.
106 */
107 JSBENTRY(start)
108 brb 1b # Netbooted starts here
109 #else
110 ASENTRY(start, 0)
111 #endif
112 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
113 pushl $0x1f0000 # Push a nice PSL
114 pushl $to # Address to jump to
115 rei # change to kernel stack
116 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
117 cmpb (%ap),$3 # symbols info present?
118 blssu 3f # nope, skip
119 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
120 # save start of symtab
121 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
122 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
123 # save end of symtab
124 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
125 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
126 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
127 mtpr %r0,$PR_PCBB # Save in IPR PCBB
128 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
129 mtpr %r0,$PR_KSP # put in IPR KSP
130 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
131 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
132 mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
133 movab IFTRAP(%r0),ESP(%r0) # Save trap address in ESP
134 mtpr 4(%r0),$PR_ESP # Put it in ESP also
135
136 # Set some registers in known state
137 movl _C_LABEL(proc0paddr),%r0
138 clrl P0LR(%r0)
139 clrl P1LR(%r0)
140 mtpr $0,$PR_P0LR
141 mtpr $0,$PR_P1LR
142 movl $0x80000000,%r1
143 movl %r1,P0BR(%r0)
144 movl %r1,P1BR(%r0)
145 mtpr %r1,$PR_P0BR
146 mtpr %r1,$PR_P1BR
147 clrl IFTRAP(%r0)
148 mtpr $0,$PR_SCBB
149
150 # Copy the RPB to its new position
151 #if defined(COMPAT_14)
152 tstl (%ap) # Any arguments?
153 bneq 1f # Yes, called from new boot
154 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
155 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
156 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
157 clrl -(%sp) # Have no RPB
158 brb 2f
159 #endif
160
161 1: pushl 4(%ap) # Address of old rpb
162 2: calls $1,_C_LABEL(_start) # Jump away.
163 /* NOTREACHED */
164
165
166 /*
167 * Signal handler code.
168 */
169
170 .align 2
171 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
172 _C_LABEL(sigcode):
173 pushr $0x3f
174 subl2 $0xc,%sp
175 movl 0x24(%sp),%r0
176 calls $3,(%r0)
177 popr $0x3f
178 chmk $SYS_compat_16___sigreturn14
179 chmk $SYS_exit
180 halt
181 _C_LABEL(esigcode):
182
183 #ifdef COMPAT_IBCS2
184 .align 2
185 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
186 _C_LABEL(ibcs2_sigcode):
187 pushr $0x3f
188 subl2 $0xc,%sp
189 movl 0x24(%sp),%r0
190 calls $3,(%r0)
191 popr $0x3f
192 chmk $SYS_compat_16___sigreturn14
193 chmk $SYS_exit
194 halt
195 _C_LABEL(ibcs2_esigcode):
196 #endif /* COMPAT_IBCS2 */
197
198 #ifdef COMPAT_ULTRIX
199 .align 2
200 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
201 _C_LABEL(ultrix_sigcode):
202 pushr $0x3f
203 subl2 $0xc,%sp
204 movl 0x24(%sp),%r0
205 calls $3,(%r0)
206 popr $0x3f
207 chmk $ULTRIX_SYS_sigreturn
208 chmk $SYS_exit
209 halt
210 _C_LABEL(ultrix_esigcode):
211 #endif
212
213 .align 2
214 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
215 _C_LABEL(idsptch):
216 pushr $0x3f
217 .word 0x9f16 # jsb to absolute address
218 .long _C_LABEL(cmn_idsptch) # the absolute address
219 .long 0 # the callback interrupt routine
220 .long 0 # its argument
221 .long 0 # ptr to correspond evcnt struct
222 _C_LABEL(eidsptch):
223
224 _C_LABEL(cmn_idsptch):
225 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
226 calls $0,_C_LABEL(krnlock)
227 #endif
228 movl (%sp)+,%r0 # get pointer to idspvec
229 mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
230 movl 8(%r0),%r1 # get evcnt pointer
231 beql 1f # no ptr, skip increment
232 incl EV_COUNT(%r1) # increment low longword
233 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
234 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
235 #if 0
236 pushl %r0
237 movq (%r0),-(%sp)
238 pushab 2f
239 calls $3,_C_LABEL(printf)
240 movl (%sp)+,%r0
241 #endif
242 pushl 4(%r0) # push argument
243 calls $1,*(%r0) # call interrupt routine
244 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
245 calls $0,_C_LABEL(krnunlock)
246 #endif
247 popr $0x3f # pop registers
248 rei # return from interrut
249 #if 0
250 2: .asciz "intr %p(%p)\n"
251 #endif
252
253 ENTRY(badaddr,0) # Called with addr,b/w/l
254 mfpr $PR_IPL,%r0 # splhigh()
255 mtpr $IPL_HIGH,$PR_IPL
256 movl 4(%ap),%r2 # First argument, the address
257 movl 8(%ap),%r1 # Sec arg, b,w,l
258 pushl %r0 # Save old IPL
259 clrl %r3
260 movab 4f,_C_LABEL(memtest) # Set the return address
261
262 caseb %r1,$1,$4 # What is the size
263 1: .word 1f-1b
264 .word 2f-1b
265 .word 3f-1b # This is unused
266 .word 3f-1b
267
268 1: movb (%r2),%r1 # Test a byte
269 brb 5f
270
271 2: movw (%r2),%r1 # Test a word
272 brb 5f
273
274 3: movl (%r2),%r1 # Test a long
275 brb 5f
276
277 4: incl %r3 # Got machine chk => addr bad
278 5: mtpr (%sp)+,$PR_IPL
279 movl %r3,%r0
280 ret
281
282 #ifdef DDB
283 /*
284 * DDB is the only routine that uses setjmp/longjmp.
285 */
286 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
287 _C_LABEL(setjmp):.word 0
288 movl 4(%ap), %r0
289 movl 8(%fp), (%r0)
290 movl 12(%fp), 4(%r0)
291 movl 16(%fp), 8(%r0)
292 moval 28(%fp),12(%r0)
293 clrl %r0
294 ret
295
296 _C_LABEL(longjmp):.word 0
297 movl 4(%ap), %r1
298 movl 8(%ap), %r0
299 movl (%r1), %ap
300 movl 4(%r1), %fp
301 movl 12(%r1), %sp
302 jmp *8(%r1)
303 #endif
304
305 #if defined(MULTIPROCESSOR)
306 .align 2
307 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
308 _C_LABEL(vax_mp_tramp):
309 ldpctx
310 rei
311 #endif
312
313 .globl softint_cleaup,softint_exit,softint_process
314 softint_cleanup:
315 mfpr $PR_SSP,%r1 /* get cpu_info */
316 incl CI_MTX_COUNT(%r1) /* increment mutex count */
317 clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
318 softint_exit:
319 popr $0x3 /* restore r0 and r1 */
320 rei /* return from interrupt */
321
322 softint_process:
323 /*
324 * R6 contains pinned LWP
325 * R7 contains ipl to dispatch with
326 */
327 movq %r6,-(%sp) /* push old lwp and ipl onto stack */
328 calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
329
330 /* We can use any register because ldpctx will overwrite them */
331 mfpr $PR_SSP,%r8 /* get cpu_info */
332 movl %r6,CI_CURLWP(%r8) /* update ci_curlwp */
333 movl L_ADDR(%r6),%r3 /* Get pointer to new pcb. */
334
335 movab softint_exit,PCB_PC(%r3)/* do a quick exit */
336 #if 0
337 movl KSP(%r3),%r0 /* get pinned lwp's KSP */
338 movq (%r0)+,PCB_PC(%r3) /* return to real PC & PSL */
339 movq (%r0)+,PCB_R0(%r3) /* restore saved r0 and r1 */
340 movl %r0,KSP(%r3) /* restore pinned KSP */
341 #endif
342
343 mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
344 ldpctx
345 rei
346
347
348 softint_common:
349 mfpr $PR_IPL,%r1
350 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
351 movpsl -(%sp) /* add cleanup hook */
352 pushab softint_cleanup
353 svpctx
354
355 /* We can use any register because ldpctx will overwrite them */
356 mfpr $PR_SSP,%r8 /* Get curcpu */
357 movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
358 movl L_ADDR(%r2),%r3 /* Get pointer to its pcb. */
359 movl CI_CURLWP(%r8),PCB_R6(%r3) /* move old lwp into new pcb */
360 movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
361 movab softint_process,PCB_PC(%r3) /* where we want to start running */
362 movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
363 mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
364 ldpctx /* load it */
365 rei /* get off interrupt stack */
366
367 SCBENTRY(softclock)
368 pushr $0x3 /* save r0 and r1 */
369 movl $SOFTINT_CLOCK,%r0
370 brb softint_common
371
372 SCBENTRY(softbio)
373 pushr $0x3 /* save r0 and r1 */
374 movl $SOFTINT_BIO,%r0
375 brb softint_common
376
377 SCBENTRY(softnet)
378 pushr $0x3 /* save r0 and r1 */
379 movl $SOFTINT_NET,%r0
380 brb softint_common
381
382 SCBENTRY(softserial)
383 pushr $0x3 /* save r0 and r1 */
384 movl $SOFTINT_SERIAL,%r0
385 brb softint_common
386
387 /*
388 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
389 * It let's the kernel know the lwp is alive and then calls func(arg)
390 * and possibly returns to sret.
391 */
392 ENTRY(cpu_lwp_bootstrap, 0)
393 movq %r2,-(%sp) /* save func & arg */
394 movq %r0,-(%sp) /* push oldl/newl */
395 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
396 movl (%sp)+,%r0 /* grab func */
397 calls $1,(%r0) /* call it with arg */
398 ret
399
400 /*
401 * r1 = newlwp
402 * r0 = oldlwp
403 */
404 JSBENTRY(Swtchto)
405 /* this pops the pc and psw from the stack and puts them in the pcb. */
406 svpctx # Now on interrupt stack
407
408 /* We can know use any register because ldpctx will overwrite them */
409 /* New LWP already in %r1 */
410 mfpr $PR_SSP,%r4 # Get curcpu
411 movl %r1,CI_CURLWP(%r4) # update ci_curlwp
412 movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
413 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
414 #ifdef MULTIPROCESSOR
415 movl %r4,SSP(%r3) # Put curcpu into new PCB
416 #endif
417
418 mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
419 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
420 ldpctx # load it
421 /* r0 already has previous lwp */
422 /* r1 already has this lwp */
423 /* r2/r3 and r4/r5 restored */
424 rei /* get off interrupt stack */
425
426 #
427 # copy/fetch/store routines.
428 #
429
430 ENTRY(copyout, 0)
431 movl 8(%ap),%r2
432 blss 3f # kernel space
433 movl 4(%ap),%r1
434 brb 2f
435
436 ENTRY(copyin, 0)
437 movl 4(%ap),%r1
438 blss 3f # kernel space
439 movl 8(%ap),%r2
440 2: mfpr $PR_ESP,%r3
441 movab 1f,(%r3)
442 movc3 12(%ap),(%r1),(%r2)
443 1: mfpr $PR_ESP,%r3
444 clrl (%r3)
445 ret
446
447 3: mnegl $1,%r0
448 ret
449
450 ENTRY(kcopy,0)
451 mfpr $PR_ESP,%r3
452 movl (%r3),-(%sp)
453 movab 1f,(%r3)
454 movl 4(%ap),%r1
455 movl 8(%ap),%r2
456 movc3 12(%ap),(%r1), (%r2)
457 clrl %r1
458 1: mfpr $PR_ESP,%r3
459 movl (%sp)+,(%r3)
460 movl %r1,%r0
461 ret
462
463 /*
464 * copy{in,out}str() copies data from/to user space to/from kernel space.
465 * Security checks:
466 * 1) user space address must be < KERNBASE
467 * 2) the VM system will do the checks while copying
468 */
469 ENTRY(copyinstr, 0)
470 tstl 4(%ap) # kernel address?
471 bgeq 8f # no, continue
472 6: movl $EFAULT,%r0
473 movl 16(%ap),%r2
474 beql 7f
475 clrl (%r2)
476 7: ret
477
478 ENTRY(copyoutstr, 0)
479 tstl 8(%ap) # kernel address?
480 bgeq 8f # no, continue
481 brb 6b # yes, return EFAULT
482
483 ENTRY(copystr,0)
484 8: movl 4(%ap),%r5 # from
485 movl 8(%ap),%r4 # to
486 movl 12(%ap),%r3 # len
487 movl 16(%ap),%r2 # copied
488 clrl %r0
489 mfpr $PR_ESP,%r1
490 movab 3f,(%r1)
491
492 tstl %r3 # any chars to copy?
493 bneq 1f # yes, jump for more
494 0: tstl %r2 # save copied len?
495 beql 2f # no
496 subl3 4(%ap),%r5,(%r2) # save copied len
497 2: ret
498
499 1: movb (%r5)+,(%r4)+ # copy one char
500 beql 0b # jmp if last char
501 sobgtr %r3,1b # copy one more
502 movl $ENAMETOOLONG,%r0 # inform about too long string
503 brb 0b # out of chars
504
505 3: mfpr $PR_ESP,%r1
506 clrl (%r1)
507 brb 0b
508
509 ENTRY(subyte,0)
510 movl 4(%ap),%r0
511 blss 3f # illegal space
512 mfpr $PR_ESP,%r1
513 movab 1f,(%r1)
514 movb 8(%ap),(%r0)
515 clrl %r1
516 1: mfpr $PR_ESP,%r2
517 clrl (%r2)
518 movl %r1,%r0
519 ret
520
521 ENTRY(suword,0)
522 movl 4(%ap),%r0
523 blss 3f # illegal space
524 mfpr $PR_ESP,%r1
525 movab 1f,(%r1)
526 movl 8(%ap),(%r0)
527 clrl %r1
528 1: mfpr $PR_ESP,%r2
529 clrl (%r2)
530 movl %r1,%r0
531 ret
532
533 ENTRY(suswintr,0)
534 movl 4(%ap),%r0
535 blss 3f # illegal space
536 mfpr $PR_ESP,%r1
537 movab 1f,(%r1)
538 movw 8(%ap),(%r0)
539 clrl %r1
540 1: mfpr $PR_ESP,%r2
541 clrl (%r2)
542 movl %r1,%r0
543 ret
544
545 3: mnegl $1,%r0
546 ret
547
548 .align 2
549 ALTENTRY(fusword)
550 ENTRY(fuswintr,0)
551 movl 4(%ap),%r0
552 blss 3b
553 mfpr $PR_ESP,%r1
554 movab 1f,(%r1)
555 movzwl (%r0),%r1
556 1: mfpr $PR_ESP,%r2
557 clrl (%r2)
558 movl %r1,%r0
559 ret
560
561 JSBENTRY(Slock)
562 1: bbssi $0,(%r1),1b
563 rsb
564
565 JSBENTRY(Slocktry)
566 clrl %r0
567 bbssi $0,(%r1),1f
568 incl %r0
569 1: rsb
570
571 JSBENTRY(Sunlock)
572 bbcci $0,(%r1),1f
573 1: rsb
574
575 #
576 # data department
577 #
578 .data
579
580 .globl _C_LABEL(memtest)
581 _C_LABEL(memtest): # memory test in progress
582 .long 0
583
584 #ifdef __ELF__
585 .section .rodata
586 #endif
587 noque: .asciz "swtch"
588