subr.S revision 1.22 1 /* $NetBSD: subr.S,v 1.22 2008/02/21 03:52:47 matt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 #define SCBENTRY(name) \
51 .text ; \
52 .align 2 ; \
53 .globl __CONCAT(X,name) ; \
54 __CONCAT(X,name):
55
56 .text
57
58 #ifdef KERNEL_LOADABLE_BY_MOP
59 /*
60 * This is a little tricky. The kernel is not loaded at the correct
61 * address, so the kernel must first be relocated, then copied, then
62 * jump back to the correct address.
63 */
64 /* Copy routine */
65 cps:
66 2: movb (%r0)+,(%r1)+
67 cmpl %r0,%r7
68 bneq 2b
69
70 3: clrb (%r1)+
71 incl %r0
72 cmpl %r0,%r6
73 bneq 3b
74 clrl -(%sp)
75 movl %sp,%ap
76 movl $_cca,%r7
77 movl %r8,(%r7)
78 movpsl -(%sp)
79 pushl %r2
80 rei
81 cpe:
82
83 /* Copy the copy routine */
84 1: movab cps,%r0
85 movab cpe,%r1
86 movl $0x300000,%sp
87 movl %sp,%r3
88 4: movb (%r0)+,(%r3)+
89 cmpl %r0,%r1
90 bneq 4b
91 movl %r7,%r8
92 /* Ok, copy routine copied, set registers and rei */
93 movab _edata,%r7
94 movab _end,%r6
95 movl $0x80000000,%r1
96 movl $0x80000200,%r0
97 subl3 $0x200,%r6,%r9
98 movab 2f,%r2
99 subl2 $0x200,%r2
100 movpsl -(%sp)
101 pushab 4(%sp)
102 rei
103
104 /*
105 * First entry routine from boot. This should be in a file called locore.
106 */
107 JSBENTRY(start)
108 brb 1b # Netbooted starts here
109 #else
110 ASENTRY(start, 0)
111 #endif
112 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
113 pushl $0x1f0000 # Push a nice PSL
114 pushl $to # Address to jump to
115 rei # change to kernel stack
116 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
117 cmpb (%ap),$3 # symbols info present?
118 blssu 3f # nope, skip
119 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
120 # save start of symtab
121 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
122 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
123 # save end of symtab
124 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
125 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
126 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
127 mtpr %r0,$PR_PCBB # Save in IPR PCBB
128 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
129 mtpr %r0,$PR_KSP # put in IPR KSP
130 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
131 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
132 mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
133 movab IFTRAP(%r0),ESP(%r0) # Save trap address in ESP
134 mtpr 4(%r0),$PR_ESP # Put it in ESP also
135
136 # Set some registers in known state
137 movl _C_LABEL(proc0paddr),%r0
138 clrl P0LR(%r0)
139 clrl P1LR(%r0)
140 mtpr $0,$PR_P0LR
141 mtpr $0,$PR_P1LR
142 movl $0x80000000,%r1
143 movl %r1,P0BR(%r0)
144 movl %r1,P1BR(%r0)
145 mtpr %r1,$PR_P0BR
146 mtpr %r1,$PR_P1BR
147 clrl IFTRAP(%r0)
148 mtpr $0,$PR_SCBB
149
150 # Copy the RPB to its new position
151 #if defined(COMPAT_14)
152 tstl (%ap) # Any arguments?
153 bneq 1f # Yes, called from new boot
154 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
155 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
156 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
157 clrl -(%sp) # Have no RPB
158 brb 2f
159 #endif
160
161 1: pushl 4(%ap) # Address of old rpb
162 2: calls $1,_C_LABEL(_start) # Jump away.
163 /* NOTREACHED */
164
165
166 /*
167 * Signal handler code.
168 */
169
170 .align 2
171 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
172 _C_LABEL(sigcode):
173 pushr $0x3f
174 subl2 $0xc,%sp
175 movl 0x24(%sp),%r0
176 calls $3,(%r0)
177 popr $0x3f
178 chmk $SYS_compat_16___sigreturn14
179 chmk $SYS_exit
180 halt
181 _C_LABEL(esigcode):
182
183 #ifdef COMPAT_IBCS2
184 .align 2
185 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
186 _C_LABEL(ibcs2_sigcode):
187 pushr $0x3f
188 subl2 $0xc,%sp
189 movl 0x24(%sp),%r0
190 calls $3,(%r0)
191 popr $0x3f
192 chmk $SYS_compat_16___sigreturn14
193 chmk $SYS_exit
194 halt
195 _C_LABEL(ibcs2_esigcode):
196 #endif /* COMPAT_IBCS2 */
197
198 #ifdef COMPAT_ULTRIX
199 .align 2
200 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
201 _C_LABEL(ultrix_sigcode):
202 pushr $0x3f
203 subl2 $0xc,%sp
204 movl 0x24(%sp),%r0
205 calls $3,(%r0)
206 popr $0x3f
207 chmk $ULTRIX_SYS_sigreturn
208 chmk $SYS_exit
209 halt
210 _C_LABEL(ultrix_esigcode):
211 #endif
212
213 .align 2
214 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
215 _C_LABEL(idsptch):
216 pushr $0x3f
217 .word 0x9f16 # jsb to absolute address
218 .long _C_LABEL(cmn_idsptch) # the absolute address
219 .long 0 # the callback interrupt routine
220 .long 0 # its argument
221 .long 0 # ptr to correspond evcnt struct
222 _C_LABEL(eidsptch):
223
224 _C_LABEL(cmn_idsptch):
225 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
226 calls $0,_C_LABEL(krnlock)
227 #endif
228 movl (%sp)+,%r0 # get pointer to idspvec
229 mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
230 movl 8(%r0),%r1 # get evcnt pointer
231 beql 1f # no ptr, skip increment
232 incl EV_COUNT(%r1) # increment low longword
233 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
234 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
235 #if 0
236 pushl %r0
237 movq (%r0),-(%sp)
238 pushab 2f
239 calls $3,_C_LABEL(printf)
240 movl (%sp)+,%r0
241 #endif
242 pushl 4(%r0) # push argument
243 calls $1,*(%r0) # call interrupt routine
244 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
245 calls $0,_C_LABEL(krnunlock)
246 #endif
247 popr $0x3f # pop registers
248 rei # return from interrut
249 #if 0
250 2: .asciz "intr %p(%p)\n"
251 #endif
252
253 ENTRY(badaddr,0) # Called with addr,b/w/l
254 mfpr $PR_IPL,%r0 # splhigh()
255 mtpr $IPL_HIGH,$PR_IPL
256 movl 4(%ap),%r2 # First argument, the address
257 movl 8(%ap),%r1 # Sec arg, b,w,l
258 pushl %r0 # Save old IPL
259 clrl %r3
260 movab 4f,_C_LABEL(memtest) # Set the return address
261
262 caseb %r1,$1,$4 # What is the size
263 1: .word 1f-1b
264 .word 2f-1b
265 .word 3f-1b # This is unused
266 .word 3f-1b
267
268 1: movb (%r2),%r1 # Test a byte
269 brb 5f
270
271 2: movw (%r2),%r1 # Test a word
272 brb 5f
273
274 3: movl (%r2),%r1 # Test a long
275 brb 5f
276
277 4: incl %r3 # Got machine chk => addr bad
278 5: mtpr (%sp)+,$PR_IPL
279 movl %r3,%r0
280 ret
281
282 #ifdef DDB
283 /*
284 * DDB is the only routine that uses setjmp/longjmp.
285 */
286 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
287 _C_LABEL(setjmp):.word 0
288 movl 4(%ap), %r0
289 movl 8(%fp), (%r0)
290 movl 12(%fp), 4(%r0)
291 movl 16(%fp), 8(%r0)
292 moval 28(%fp),12(%r0)
293 clrl %r0
294 ret
295
296 _C_LABEL(longjmp):.word 0
297 movl 4(%ap), %r1
298 movl 8(%ap), %r0
299 movl (%r1), %ap
300 movl 4(%r1), %fp
301 movl 12(%r1), %sp
302 jmp *8(%r1)
303 #endif
304
305 #if defined(MULTIPROCESSOR)
306 .align 2
307 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
308 _C_LABEL(vax_mp_tramp):
309 ldpctx
310 rei
311 #endif
312
313 .globl softint_cleanup,softint_exit,softint_process
314 .type softint_cleanup@function
315 .type softint_exit@function
316 .type softint_process@function
317 softint_cleanup:
318 mfpr $PR_SSP,%r1 /* get cpu_info */
319 incl CI_MTX_COUNT(%r1) /* increment mutex count */
320 clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
321 movl L_ADDR(%r0),%r1 /* get PCB of softint LWP */
322 softint_exit:
323 /*
324 * Now restore the PCB since we have been interrupted or blocked so
325 * we no idea what state it was last in
326 */
327 movab (USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r1),%r0
328 /* calculate where KSP should be */
329 movl %r0,KSP(%r1) /* save it as SP */
330 movl %r0,PCB_FP(%r1) /* and as the FP too */
331 movab CA_ARGNO(%r0),PCB_AP(%r1) /* update the AP as well */
332 movab softint_process,PCB_PC(%r1) /* and where we will restart */
333 popr $0x3 /* restore r0 and r1 */
334 rei /* return from interrupt */
335
336 softint_process:
337 /*
338 * R6 contains pinned LWP
339 * R7 contains ipl to dispatch with
340 */
341 movq %r6,-(%sp) /* push old lwp and ipl onto stack */
342 calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
343
344 /* We can use any register because ldpctx will overwrite them */
345 mfpr $PR_SSP,%r8 /* get cpu_info */
346 movl %r6,CI_CURLWP(%r8) /* update ci_curlwp */
347 movl L_ADDR(%r6),%r3 /* Get pointer to new pcb. */
348 movl %r3,PCB_R1(%r3) /* Make %r1 point to the pcb. */
349
350 movab softint_exit,PCB_PC(%r3)/* do a quick exit */
351
352 mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
353 ldpctx
354 rei
355
356
357 softint_common:
358 mfpr $PR_IPL,%r1
359 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
360 movpsl -(%sp) /* add cleanup hook */
361 pushab softint_cleanup
362 svpctx
363
364 /* We can use any register because ldpctx will overwrite them */
365 mfpr $PR_SSP,%r8 /* Get curcpu */
366 movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
367 movl L_ADDR(%r2),%r3 /* Get pointer to its pcb. */
368 movl CI_CURLWP(%r8),PCB_R6(%r3) /* move old lwp into new pcb */
369 movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
370 movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
371 mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
372 ldpctx /* load it */
373 rei /* get off interrupt stack */
374
375 SCBENTRY(softclock)
376 pushr $0x3 /* save r0 and r1 */
377 movl $SOFTINT_CLOCK,%r0
378 brb softint_common
379
380 SCBENTRY(softbio)
381 pushr $0x3 /* save r0 and r1 */
382 movl $SOFTINT_BIO,%r0
383 brb softint_common
384
385 SCBENTRY(softnet)
386 pushr $0x3 /* save r0 and r1 */
387 movl $SOFTINT_NET,%r0
388 brb softint_common
389
390 SCBENTRY(softserial)
391 pushr $0x3 /* save r0 and r1 */
392 movl $SOFTINT_SERIAL,%r0
393 brb softint_common
394
395 /*
396 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
397 * It let's the kernel know the lwp is alive and then calls func(arg)
398 * and possibly returns to sret.
399 */
400 ENTRY(cpu_lwp_bootstrap, 0)
401 movq %r2,-(%sp) /* save func & arg */
402 movq %r0,-(%sp) /* push oldl/newl */
403 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
404 movl (%sp)+,%r0 /* grab func */
405 calls $1,(%r0) /* call it with arg */
406 ret
407
408 /*
409 * r1 = newlwp
410 * r0 = oldlwp
411 */
412 JSBENTRY(Swtchto)
413 /* this pops the pc and psw from the stack and puts them in the pcb. */
414 svpctx # Now on interrupt stack
415
416 /* We can know use any register because ldpctx will overwrite them */
417 /* New LWP already in %r1 */
418 mfpr $PR_SSP,%r4 # Get curcpu
419 movl %r1,CI_CURLWP(%r4) # update ci_curlwp
420 movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
421 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
422 #ifdef MULTIPROCESSOR
423 movl %r4,SSP(%r3) # Put curcpu into new PCB
424 #endif
425
426 mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
427 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
428 ldpctx # load it
429 /* r0 already has previous lwp */
430 /* r1 already has this lwp */
431 /* r2/r3 and r4/r5 restored */
432 rei /* get off interrupt stack */
433
434 #
435 # copy/fetch/store routines.
436 #
437
438 ENTRY(copyout, 0)
439 movl 8(%ap),%r2
440 blss 3f # kernel space
441 movl 4(%ap),%r1
442 brb 2f
443
444 ENTRY(copyin, 0)
445 movl 4(%ap),%r1
446 blss 3f # kernel space
447 movl 8(%ap),%r2
448 2: mfpr $PR_ESP,%r3
449 movab 1f,(%r3)
450 movc3 12(%ap),(%r1),(%r2)
451 1: mfpr $PR_ESP,%r3
452 clrl (%r3)
453 ret
454
455 3: mnegl $1,%r0
456 ret
457
458 ENTRY(kcopy,0)
459 mfpr $PR_ESP,%r3
460 movl (%r3),-(%sp)
461 movab 1f,(%r3)
462 movl 4(%ap),%r1
463 movl 8(%ap),%r2
464 movc3 12(%ap),(%r1), (%r2)
465 clrl %r1
466 1: mfpr $PR_ESP,%r3
467 movl (%sp)+,(%r3)
468 movl %r1,%r0
469 ret
470
471 /*
472 * copy{in,out}str() copies data from/to user space to/from kernel space.
473 * Security checks:
474 * 1) user space address must be < KERNBASE
475 * 2) the VM system will do the checks while copying
476 */
477 ENTRY(copyinstr, 0)
478 tstl 4(%ap) # kernel address?
479 bgeq 8f # no, continue
480 6: movl $EFAULT,%r0
481 movl 16(%ap),%r2
482 beql 7f
483 clrl (%r2)
484 7: ret
485
486 ENTRY(copyoutstr, 0)
487 tstl 8(%ap) # kernel address?
488 bgeq 8f # no, continue
489 brb 6b # yes, return EFAULT
490
491 ENTRY(copystr,0)
492 8: movl 4(%ap),%r5 # from
493 movl 8(%ap),%r4 # to
494 movl 12(%ap),%r3 # len
495 movl 16(%ap),%r2 # copied
496 clrl %r0
497 mfpr $PR_ESP,%r1
498 movab 3f,(%r1)
499
500 tstl %r3 # any chars to copy?
501 bneq 1f # yes, jump for more
502 0: tstl %r2 # save copied len?
503 beql 2f # no
504 subl3 4(%ap),%r5,(%r2) # save copied len
505 2: ret
506
507 1: movb (%r5)+,(%r4)+ # copy one char
508 beql 0b # jmp if last char
509 sobgtr %r3,1b # copy one more
510 movl $ENAMETOOLONG,%r0 # inform about too long string
511 brb 0b # out of chars
512
513 3: mfpr $PR_ESP,%r1
514 clrl (%r1)
515 brb 0b
516
517 ENTRY(subyte,0)
518 movl 4(%ap),%r0
519 blss 3f # illegal space
520 mfpr $PR_ESP,%r1
521 movab 1f,(%r1)
522 movb 8(%ap),(%r0)
523 clrl %r1
524 1: mfpr $PR_ESP,%r2
525 clrl (%r2)
526 movl %r1,%r0
527 ret
528
529 ENTRY(suword,0)
530 movl 4(%ap),%r0
531 blss 3f # illegal space
532 mfpr $PR_ESP,%r1
533 movab 1f,(%r1)
534 movl 8(%ap),(%r0)
535 clrl %r1
536 1: mfpr $PR_ESP,%r2
537 clrl (%r2)
538 movl %r1,%r0
539 ret
540
541 ENTRY(suswintr,0)
542 movl 4(%ap),%r0
543 blss 3f # illegal space
544 mfpr $PR_ESP,%r1
545 movab 1f,(%r1)
546 movw 8(%ap),(%r0)
547 clrl %r1
548 1: mfpr $PR_ESP,%r2
549 clrl (%r2)
550 movl %r1,%r0
551 ret
552
553 3: mnegl $1,%r0
554 ret
555
556 .align 2
557 ALTENTRY(fusword)
558 ENTRY(fuswintr,0)
559 movl 4(%ap),%r0
560 blss 3b
561 mfpr $PR_ESP,%r1
562 movab 1f,(%r1)
563 movzwl (%r0),%r1
564 1: mfpr $PR_ESP,%r2
565 clrl (%r2)
566 movl %r1,%r0
567 ret
568
569 JSBENTRY(Slock)
570 1: bbssi $0,(%r1),1b
571 rsb
572
573 JSBENTRY(Slocktry)
574 clrl %r0
575 bbssi $0,(%r1),1f
576 incl %r0
577 1: rsb
578
579 JSBENTRY(Sunlock)
580 bbcci $0,(%r1),1f
581 1: rsb
582
583 #
584 # data department
585 #
586 .data
587
588 .globl _C_LABEL(memtest)
589 _C_LABEL(memtest): # memory test in progress
590 .long 0
591
592 #ifdef __ELF__
593 .section .rodata
594 #endif
595 noque: .asciz "swtch"
596