subr.S revision 1.24 1 /* $NetBSD: subr.S,v 1.24 2008/02/23 05:48:14 matt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 #define SCBENTRY(name) \
51 .text ; \
52 .align 2 ; \
53 .globl __CONCAT(X,name) ; \
54 __CONCAT(X,name):
55
56 .text
57
58 #ifdef KERNEL_LOADABLE_BY_MOP
59 /*
60 * This is a little tricky. The kernel is not loaded at the correct
61 * address, so the kernel must first be relocated, then copied, then
62 * jump back to the correct address.
63 */
64 /* Copy routine */
65 cps:
66 2: movb (%r0)+,(%r1)+
67 cmpl %r0,%r7
68 bneq 2b
69
70 3: clrb (%r1)+
71 incl %r0
72 cmpl %r0,%r6
73 bneq 3b
74 clrl -(%sp)
75 movl %sp,%ap
76 movl $_cca,%r7
77 movl %r8,(%r7)
78 movpsl -(%sp)
79 pushl %r2
80 rei
81 cpe:
82
83 /* Copy the copy routine */
84 1: movab cps,%r0
85 movab cpe,%r1
86 movl $0x300000,%sp
87 movl %sp,%r3
88 4: movb (%r0)+,(%r3)+
89 cmpl %r0,%r1
90 bneq 4b
91 movl %r7,%r8
92 /* Ok, copy routine copied, set registers and rei */
93 movab _edata,%r7
94 movab _end,%r6
95 movl $0x80000000,%r1
96 movl $0x80000200,%r0
97 subl3 $0x200,%r6,%r9
98 movab 2f,%r2
99 subl2 $0x200,%r2
100 movpsl -(%sp)
101 pushab 4(%sp)
102 rei
103
104 /*
105 * First entry routine from boot. This should be in a file called locore.
106 */
107 JSBENTRY(start)
108 brb 1b # Netbooted starts here
109 #else
110 ASENTRY(start, 0)
111 #endif
112 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
113 pushl $0x1f0000 # Push a nice PSL
114 pushl $to # Address to jump to
115 rei # change to kernel stack
116 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
117 cmpb (%ap),$3 # symbols info present?
118 blssu 3f # nope, skip
119 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
120 # save start of symtab
121 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
122 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
123 # save end of symtab
124 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
125 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
126 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
127 mtpr %r0,$PR_PCBB # Save in IPR PCBB
128 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
129 mtpr %r0,$PR_KSP # put in IPR KSP
130 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
131 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
132 mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
133 movab IFTRAP(%r0),ESP(%r0) # Save trap address in ESP
134 mtpr 4(%r0),$PR_ESP # Put it in ESP also
135
136 # Set some registers in known state
137 movl _C_LABEL(proc0paddr),%r0
138 clrl P0LR(%r0)
139 clrl P1LR(%r0)
140 mtpr $0,$PR_P0LR
141 mtpr $0,$PR_P1LR
142 movl $0x80000000,%r1
143 movl %r1,P0BR(%r0)
144 movl %r1,P1BR(%r0)
145 mtpr %r1,$PR_P0BR
146 mtpr %r1,$PR_P1BR
147 clrl IFTRAP(%r0)
148 mtpr $0,$PR_SCBB
149
150 # Copy the RPB to its new position
151 #if defined(COMPAT_14)
152 tstl (%ap) # Any arguments?
153 bneq 1f # Yes, called from new boot
154 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
155 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
156 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
157 clrl -(%sp) # Have no RPB
158 brb 2f
159 #endif
160
161 1: pushl 4(%ap) # Address of old rpb
162 2: calls $1,_C_LABEL(_start) # Jump away.
163 /* NOTREACHED */
164
165
166 /*
167 * Signal handler code.
168 */
169
170 .align 2
171 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
172 _C_LABEL(sigcode):
173 pushr $0x3f
174 subl2 $0xc,%sp
175 movl 0x24(%sp),%r0
176 calls $3,(%r0)
177 popr $0x3f
178 chmk $SYS_compat_16___sigreturn14
179 chmk $SYS_exit
180 halt
181 _C_LABEL(esigcode):
182
183 #ifdef COMPAT_IBCS2
184 .align 2
185 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
186 _C_LABEL(ibcs2_sigcode):
187 pushr $0x3f
188 subl2 $0xc,%sp
189 movl 0x24(%sp),%r0
190 calls $3,(%r0)
191 popr $0x3f
192 chmk $SYS_compat_16___sigreturn14
193 chmk $SYS_exit
194 halt
195 _C_LABEL(ibcs2_esigcode):
196 #endif /* COMPAT_IBCS2 */
197
198 #ifdef COMPAT_ULTRIX
199 .align 2
200 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
201 _C_LABEL(ultrix_sigcode):
202 pushr $0x3f
203 subl2 $0xc,%sp
204 movl 0x24(%sp),%r0
205 calls $3,(%r0)
206 popr $0x3f
207 chmk $ULTRIX_SYS_sigreturn
208 chmk $SYS_exit
209 halt
210 _C_LABEL(ultrix_esigcode):
211 #endif
212
213 .align 2
214 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
215 _C_LABEL(idsptch):
216 pushr $0x3f
217 .word 0x9f16 # jsb to absolute address
218 .long _C_LABEL(cmn_idsptch) # the absolute address
219 .long 0 # the callback interrupt routine
220 .long 0 # its argument
221 .long 0 # ptr to correspond evcnt struct
222 _C_LABEL(eidsptch):
223
224 _C_LABEL(cmn_idsptch):
225 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
226 calls $0,_C_LABEL(krnlock)
227 #endif
228 movl (%sp)+,%r0 # get pointer to idspvec
229 mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
230 movl 8(%r0),%r1 # get evcnt pointer
231 beql 1f # no ptr, skip increment
232 incl EV_COUNT(%r1) # increment low longword
233 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
234 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
235 #if 0
236 pushl %r0
237 movq (%r0),-(%sp)
238 pushab 2f
239 calls $3,_C_LABEL(printf)
240 movl (%sp)+,%r0
241 #endif
242 pushl 4(%r0) # push argument
243 calls $1,*(%r0) # call interrupt routine
244 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
245 calls $0,_C_LABEL(krnunlock)
246 #endif
247 popr $0x3f # pop registers
248 rei # return from interrut
249 #if 0
250 2: .asciz "intr %p(%p)\n"
251 #endif
252
253 ENTRY(badaddr,0) # Called with addr,b/w/l
254 mfpr $PR_IPL,%r0 # splhigh()
255 mtpr $IPL_HIGH,$PR_IPL
256 movl 4(%ap),%r2 # First argument, the address
257 movl 8(%ap),%r1 # Sec arg, b,w,l
258 pushl %r0 # Save old IPL
259 clrl %r3
260 movab 4f,_C_LABEL(memtest) # Set the return address
261
262 caseb %r1,$1,$4 # What is the size
263 1: .word 1f-1b
264 .word 2f-1b
265 .word 3f-1b # This is unused
266 .word 3f-1b
267
268 1: movb (%r2),%r1 # Test a byte
269 brb 5f
270
271 2: movw (%r2),%r1 # Test a word
272 brb 5f
273
274 3: movl (%r2),%r1 # Test a long
275 brb 5f
276
277 4: incl %r3 # Got machine chk => addr bad
278 5: mtpr (%sp)+,$PR_IPL
279 movl %r3,%r0
280 ret
281
282 #ifdef DDB
283 /*
284 * DDB is the only routine that uses setjmp/longjmp.
285 */
286 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
287 _C_LABEL(setjmp):.word 0
288 movl 4(%ap), %r0
289 movl 8(%fp), (%r0)
290 movl 12(%fp), 4(%r0)
291 movl 16(%fp), 8(%r0)
292 moval 28(%fp),12(%r0)
293 clrl %r0
294 ret
295
296 _C_LABEL(longjmp):.word 0
297 movl 4(%ap), %r1
298 movl 8(%ap), %r0
299 movl (%r1), %ap
300 movl 4(%r1), %fp
301 movl 12(%r1), %sp
302 jmp *8(%r1)
303 #endif
304
305 #if defined(MULTIPROCESSOR)
306 .align 2
307 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
308 _C_LABEL(vax_mp_tramp):
309 ldpctx
310 rei
311 #endif
312
313 .globl softint_cleanup,softint_exit,softint_process
314 .type softint_cleanup@function
315 .type softint_exit@function
316 .type softint_process@function
317 softint_cleanup:
318 movl L_CPU(%r0),%r1 /* get cpu_info */
319 incl CI_MTX_COUNT(%r1) /* increment mutex count */
320 clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
321 movl L_ADDR(%r0),%r1 /* get PCB of softint LWP */
322 softint_exit:
323 popr $0x3 /* restore r0 and r1 */
324 rei /* return from interrupt */
325
326 softint_process:
327 /*
328 * R6 contains pinned LWP
329 * R7 contains ipl to dispatch with
330 */
331 movq %r6,-(%sp) /* push old lwp and ipl onto stack */
332 calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
333
334 /* We can use any register because ldpctx will overwrite them */
335 movl L_ADDR(%r6),%r3 /* get pcb */
336 movab softint_exit,PCB_PC(%r3)/* do a quick exit */
337 #ifdef MULTIPROCESSOR
338 movl L_CPU(%r6),%r8
339 movl %r6,CI_CURLWP(%r8)
340 #endif
341
342 mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
343 ldpctx /* implictily updates curlwp */
344 rei
345
346
347 softint_common:
348 mfpr $PR_IPL,%r1
349 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
350 movpsl -(%sp) /* add cleanup hook */
351 pushab softint_cleanup
352 svpctx
353
354 /* We can use any register because ldpctx will overwrite them */
355 mfpr $PR_SSP,%r6 /* Get curlwp */
356 movl L_CPU(%r6),%r8 /* get cpu_info */
357 movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
358 movl L_ADDR(%r2),%r3 /* Get pointer to its pcb. */
359 movl %r6,PCB_R6(%r3) /* move old lwp into new pcb */
360 movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
361 #ifdef MULTIPROCESSOR
362 movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
363 #endif
364
365 /*
366 * Now reset the PCB since we no idea what state it was last in
367 */
368 movab (USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r3),%r0
369 /* calculate where KSP should be */
370 movl %r0,KSP(%r3) /* save it as SP */
371 movl %r0,PCB_FP(%r3) /* and as the FP too */
372 movab CA_ARGNO(%r0),PCB_AP(%r3) /* update the AP as well */
373 movab softint_process,PCB_PC(%r3) /* and where we will start */
374
375 mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
376 ldpctx /* load it */
377 rei /* get off interrupt stack */
378
379 SCBENTRY(softclock)
380 pushr $0x3 /* save r0 and r1 */
381 movl $SOFTINT_CLOCK,%r0
382 brb softint_common
383
384 SCBENTRY(softbio)
385 pushr $0x3 /* save r0 and r1 */
386 movl $SOFTINT_BIO,%r0
387 brb softint_common
388
389 SCBENTRY(softnet)
390 pushr $0x3 /* save r0 and r1 */
391 movl $SOFTINT_NET,%r0
392 brb softint_common
393
394 SCBENTRY(softserial)
395 pushr $0x3 /* save r0 and r1 */
396 movl $SOFTINT_SERIAL,%r0
397 brb softint_common
398
399 /*
400 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
401 * It let's the kernel know the lwp is alive and then calls func(arg)
402 * and possibly returns to sret.
403 */
404 ENTRY(cpu_lwp_bootstrap, 0)
405 movq %r2,-(%sp) /* save func & arg */
406 movq %r0,-(%sp) /* push oldl/newl */
407 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
408 movl (%sp)+,%r0 /* grab func */
409 calls $1,(%r0) /* call it with arg */
410 ret
411
412 /*
413 * r1 = newlwp
414 * r0 = oldlwp
415 */
416 JSBENTRY(Swtchto)
417 /* this pops the pc and psw from the stack and puts them in the pcb. */
418 svpctx # Now on interrupt stack
419
420 /* We can know use any register because ldpctx will overwrite them */
421 /* New LWP already in %r1 */
422 movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
423 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
424 #ifdef MULTIPROCESSOR
425 movl L_CPU(%r0), %r8 /* get cpu_info of old lwp */
426 movl %r8, L_CPU(%r1) /* update cpu_info of new lwp */
427 movl %r1,CI_CURLWP(%r8) /* update ci_curlwp */
428 #endif
429
430 mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
431 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
432 ldpctx # load it
433 /* r0 already has previous lwp */
434 /* r1 already has this lwp */
435 /* r2/r3 and r4/r5 restored */
436 rei /* get off interrupt stack */
437
438 #
439 # copy/fetch/store routines.
440 #
441
442 ENTRY(copyout, 0)
443 movl 8(%ap),%r2
444 blss 3f # kernel space
445 movl 4(%ap),%r1
446 brb 2f
447
448 ENTRY(copyin, 0)
449 movl 4(%ap),%r1
450 blss 3f # kernel space
451 movl 8(%ap),%r2
452 2: mfpr $PR_ESP,%r3
453 movab 1f,(%r3)
454 movc3 12(%ap),(%r1),(%r2)
455 1: mfpr $PR_ESP,%r3
456 clrl (%r3)
457 ret
458
459 3: mnegl $1,%r0
460 ret
461
462 ENTRY(kcopy,0)
463 mfpr $PR_ESP,%r3
464 movl (%r3),-(%sp)
465 movab 1f,(%r3)
466 movl 4(%ap),%r1
467 movl 8(%ap),%r2
468 movc3 12(%ap),(%r1), (%r2)
469 clrl %r1
470 1: mfpr $PR_ESP,%r3
471 movl (%sp)+,(%r3)
472 movl %r1,%r0
473 ret
474
475 /*
476 * copy{in,out}str() copies data from/to user space to/from kernel space.
477 * Security checks:
478 * 1) user space address must be < KERNBASE
479 * 2) the VM system will do the checks while copying
480 */
481 ENTRY(copyinstr, 0)
482 tstl 4(%ap) # kernel address?
483 bgeq 8f # no, continue
484 6: movl $EFAULT,%r0
485 movl 16(%ap),%r2
486 beql 7f
487 clrl (%r2)
488 7: ret
489
490 ENTRY(copyoutstr, 0)
491 tstl 8(%ap) # kernel address?
492 bgeq 8f # no, continue
493 brb 6b # yes, return EFAULT
494
495 ENTRY(copystr,0)
496 8: movl 4(%ap),%r5 # from
497 movl 8(%ap),%r4 # to
498 movl 12(%ap),%r3 # len
499 movl 16(%ap),%r2 # copied
500 clrl %r0
501 mfpr $PR_ESP,%r1
502 movab 3f,(%r1)
503
504 tstl %r3 # any chars to copy?
505 bneq 1f # yes, jump for more
506 0: tstl %r2 # save copied len?
507 beql 2f # no
508 subl3 4(%ap),%r5,(%r2) # save copied len
509 2: ret
510
511 1: movb (%r5)+,(%r4)+ # copy one char
512 beql 0b # jmp if last char
513 sobgtr %r3,1b # copy one more
514 movl $ENAMETOOLONG,%r0 # inform about too long string
515 brb 0b # out of chars
516
517 3: mfpr $PR_ESP,%r1
518 clrl (%r1)
519 brb 0b
520
521 ENTRY(subyte,0)
522 movl 4(%ap),%r0
523 blss 3f # illegal space
524 mfpr $PR_ESP,%r1
525 movab 1f,(%r1)
526 movb 8(%ap),(%r0)
527 clrl %r1
528 1: mfpr $PR_ESP,%r2
529 clrl (%r2)
530 movl %r1,%r0
531 ret
532
533 ENTRY(suword,0)
534 movl 4(%ap),%r0
535 blss 3f # illegal space
536 mfpr $PR_ESP,%r1
537 movab 1f,(%r1)
538 movl 8(%ap),(%r0)
539 clrl %r1
540 1: mfpr $PR_ESP,%r2
541 clrl (%r2)
542 movl %r1,%r0
543 ret
544
545 ENTRY(suswintr,0)
546 movl 4(%ap),%r0
547 blss 3f # illegal space
548 mfpr $PR_ESP,%r1
549 movab 1f,(%r1)
550 movw 8(%ap),(%r0)
551 clrl %r1
552 1: mfpr $PR_ESP,%r2
553 clrl (%r2)
554 movl %r1,%r0
555 ret
556
557 3: mnegl $1,%r0
558 ret
559
560 .align 2
561 ALTENTRY(fusword)
562 ENTRY(fuswintr,0)
563 movl 4(%ap),%r0
564 blss 3b
565 mfpr $PR_ESP,%r1
566 movab 1f,(%r1)
567 movzwl (%r0),%r1
568 1: mfpr $PR_ESP,%r2
569 clrl (%r2)
570 movl %r1,%r0
571 ret
572
573 JSBENTRY(Slock)
574 1: bbssi $0,(%r1),1b
575 rsb
576
577 JSBENTRY(Slocktry)
578 clrl %r0
579 bbssi $0,(%r1),1f
580 incl %r0
581 1: rsb
582
583 JSBENTRY(Sunlock)
584 bbcci $0,(%r1),1f
585 1: rsb
586
587 #
588 # data department
589 #
590 .data
591
592 .globl _C_LABEL(memtest)
593 _C_LABEL(memtest): # memory test in progress
594 .long 0
595
596 #ifdef __ELF__
597 .section .rodata
598 #endif
599 noque: .asciz "swtch"
600