subr.S revision 1.27 1 /* $NetBSD: subr.S,v 1.27 2009/11/26 00:19:23 matt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50 #define SCBENTRY(name) \
51 .text ; \
52 .align 2 ; \
53 .globl __CONCAT(X,name) ; \
54 __CONCAT(X,name):
55
56 .text
57
58 #ifdef KERNEL_LOADABLE_BY_MOP
59 /*
60 * This is a little tricky. The kernel is not loaded at the correct
61 * address, so the kernel must first be relocated, then copied, then
62 * jump back to the correct address.
63 */
64 /* Copy routine */
65 cps:
66 2: movb (%r0)+,(%r1)+
67 cmpl %r0,%r7
68 bneq 2b
69
70 3: clrb (%r1)+
71 incl %r0
72 cmpl %r0,%r6
73 bneq 3b
74 clrl -(%sp)
75 movl %sp,%ap
76 movl $_cca,%r7
77 movl %r8,(%r7)
78 movpsl -(%sp)
79 pushl %r2
80 rei
81 cpe:
82
83 /* Copy the copy routine */
84 1: movab cps,%r0
85 movab cpe,%r1
86 movl $0x300000,%sp
87 movl %sp,%r3
88 4: movb (%r0)+,(%r3)+
89 cmpl %r0,%r1
90 bneq 4b
91 movl %r7,%r8
92 /* Ok, copy routine copied, set registers and rei */
93 movab _edata,%r7
94 movab _end,%r6
95 movl $0x80000000,%r1
96 movl $0x80000200,%r0
97 subl3 $0x200,%r6,%r9
98 movab 2f,%r2
99 subl2 $0x200,%r2
100 movpsl -(%sp)
101 pushab 4(%sp)
102 rei
103
104 /*
105 * First entry routine from boot. This should be in a file called locore.
106 */
107 JSBENTRY(start)
108 brb 1b # Netbooted starts here
109 #else
110 ASENTRY(start, 0)
111 #endif
112 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
113 pushl $0x1f0000 # Push a nice PSL
114 pushl $to # Address to jump to
115 rei # change to kernel stack
116 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
117 cmpb (%ap),$3 # symbols info present?
118 blssu 3f # nope, skip
119 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
120 # save start of symtab
121 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
122 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
123 # save end of symtab
124 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
125 bicl3 $0x3ff,%r0,%r1 #
126 movl %r1,_C_LABEL(lwp0)+L_ADDR # save lwp0 uarea pointer
127 bicl2 $0x80000000,%r1 # get phys lwp0 uarea addr
128 mtpr %r0,$PR_PCBB # Save in IPR PCBB
129 addl3 $USPACE,%r1,%r0 # Get kernel stack top
130 mtpr %r0,$PR_KSP # put in IPR KSP
131 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
132 movl _C_LABEL(lwp0)+L_ADDR,%r0 # get PCB virtual address
133 mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
134 movab IFTRAP(%r0),ESP(%r0) # Save trap address in ESP
135 mtpr 4(%r0),$PR_ESP # Put it in ESP also
136
137 # Set some registers in known state
138 movl %r1,%r0 # get lwp0.l_addr
139 clrl P0LR(%r0)
140 clrl P1LR(%r0)
141 mtpr $0,$PR_P0LR
142 mtpr $0,$PR_P1LR
143 movl $0x80000000,%r1
144 movl %r1,P0BR(%r0)
145 movl %r1,P1BR(%r0)
146 mtpr %r1,$PR_P0BR
147 mtpr %r1,$PR_P1BR
148 clrl IFTRAP(%r0)
149 mtpr $0,$PR_SCBB
150
151 # Copy the RPB to its new position
152 #if defined(COMPAT_14)
153 tstl (%ap) # Any arguments?
154 bneq 1f # Yes, called from new boot
155 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
156 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
157 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
158 clrl -(%sp) # Have no RPB
159 brb 2f
160 #endif
161
162 1: pushl 4(%ap) # Address of old rpb
163 2: calls $1,_C_LABEL(_start) # Jump away.
164 /* NOTREACHED */
165
166
167 /*
168 * Signal handler code.
169 */
170
171 .align 2
172 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
173 _C_LABEL(sigcode):
174 pushr $0x3f
175 subl2 $0xc,%sp
176 movl 0x24(%sp),%r0
177 calls $3,(%r0)
178 popr $0x3f
179 chmk $SYS_compat_16___sigreturn14
180 chmk $SYS_exit
181 halt
182 _C_LABEL(esigcode):
183
184 #ifdef COMPAT_IBCS2
185 .align 2
186 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
187 _C_LABEL(ibcs2_sigcode):
188 pushr $0x3f
189 subl2 $0xc,%sp
190 movl 0x24(%sp),%r0
191 calls $3,(%r0)
192 popr $0x3f
193 chmk $SYS_compat_16___sigreturn14
194 chmk $SYS_exit
195 halt
196 _C_LABEL(ibcs2_esigcode):
197 #endif /* COMPAT_IBCS2 */
198
199 #ifdef COMPAT_ULTRIX
200 .align 2
201 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
202 _C_LABEL(ultrix_sigcode):
203 pushr $0x3f
204 subl2 $0xc,%sp
205 movl 0x24(%sp),%r0
206 calls $3,(%r0)
207 popr $0x3f
208 chmk $ULTRIX_SYS_sigreturn
209 chmk $SYS_exit
210 halt
211 _C_LABEL(ultrix_esigcode):
212 #endif
213
214 .align 2
215 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
216 _C_LABEL(idsptch):
217 pushr $0x3f
218 .word 0x9f16 # jsb to absolute address
219 .long _C_LABEL(cmn_idsptch) # the absolute address
220 .long 0 # the callback interrupt routine
221 .long 0 # its argument
222 .long 0 # ptr to correspond evcnt struct
223 _C_LABEL(eidsptch):
224
225 _C_LABEL(cmn_idsptch):
226 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
227 calls $0,_C_LABEL(krnlock)
228 #endif
229 movl (%sp)+,%r0 # get pointer to idspvec
230 mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
231 movl 8(%r0),%r1 # get evcnt pointer
232 beql 1f # no ptr, skip increment
233 incl EV_COUNT(%r1) # increment low longword
234 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
235 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
236 #if 0
237 pushl %r0
238 movq (%r0),-(%sp)
239 pushab 2f
240 calls $3,_C_LABEL(printf)
241 movl (%sp)+,%r0
242 #endif
243 pushl 4(%r0) # push argument
244 calls $1,*(%r0) # call interrupt routine
245 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
246 calls $0,_C_LABEL(krnunlock)
247 #endif
248 popr $0x3f # pop registers
249 rei # return from interrut
250 #if 0
251 2: .asciz "intr %p(%p)\n"
252 #endif
253
254 ENTRY(badaddr,0) # Called with addr,b/w/l
255 mfpr $PR_IPL,%r0 # splhigh()
256 mtpr $IPL_HIGH,$PR_IPL
257 movl 4(%ap),%r2 # First argument, the address
258 movl 8(%ap),%r1 # Sec arg, b,w,l
259 pushl %r0 # Save old IPL
260 clrl %r3
261 movab 4f,_C_LABEL(memtest) # Set the return address
262
263 caseb %r1,$1,$4 # What is the size
264 1: .word 1f-1b
265 .word 2f-1b
266 .word 3f-1b # This is unused
267 .word 3f-1b
268
269 1: movb (%r2),%r1 # Test a byte
270 brb 5f
271
272 2: movw (%r2),%r1 # Test a word
273 brb 5f
274
275 3: movl (%r2),%r1 # Test a long
276 brb 5f
277
278 4: incl %r3 # Got machine chk => addr bad
279 5: mtpr (%sp)+,$PR_IPL
280 movl %r3,%r0
281 ret
282
283 #ifdef DDB
284 /*
285 * DDB is the only routine that uses setjmp/longjmp.
286 */
287 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
288 _C_LABEL(setjmp):.word 0
289 movl 4(%ap), %r0
290 movl 8(%fp), (%r0)
291 movl 12(%fp), 4(%r0)
292 movl 16(%fp), 8(%r0)
293 moval 28(%fp),12(%r0)
294 clrl %r0
295 ret
296
297 _C_LABEL(longjmp):.word 0
298 movl 4(%ap), %r1
299 movl 8(%ap), %r0
300 movl (%r1), %ap
301 movl 4(%r1), %fp
302 movl 12(%r1), %sp
303 jmp *8(%r1)
304 #endif
305
306 #if defined(MULTIPROCESSOR)
307 .align 2
308 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
309 _C_LABEL(vax_mp_tramp):
310 ldpctx
311 rei
312 #endif
313
314 .globl softint_cleanup,softint_exit,softint_process
315 .type softint_cleanup@function
316 .type softint_exit@function
317 .type softint_process@function
318 softint_cleanup:
319 movl L_CPU(%r0),%r1 /* get cpu_info */
320 incl CI_MTX_COUNT(%r1) /* increment mutex count */
321 clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
322 movl L_ADDR(%r0),%r1 /* get PCB of softint LWP */
323 softint_exit:
324 popr $0x3 /* restore r0 and r1 */
325 rei /* return from interrupt */
326
327 softint_process:
328 /*
329 * R6 contains pinned LWP
330 * R7 contains ipl to dispatch with
331 */
332 movq %r6,-(%sp) /* push old lwp and ipl onto stack */
333 calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
334
335 /* We can use any register because ldpctx will overwrite them */
336 movl L_ADDR(%r6),%r3 /* get pcb */
337 movab softint_exit,PCB_PC(%r3)/* do a quick exit */
338 #ifdef MULTIPROCESSOR
339 movl L_CPU(%r6),%r8
340 movl %r6,CI_CURLWP(%r8)
341 #endif
342
343 mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
344 ldpctx /* implictily updates curlwp */
345 rei
346
347
348 softint_common:
349 mfpr $PR_IPL,%r1
350 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
351 movpsl -(%sp) /* add cleanup hook */
352 pushab softint_cleanup
353 svpctx
354
355 /* We can use any register because ldpctx will overwrite them */
356 mfpr $PR_SSP,%r6 /* Get curlwp */
357 movl L_CPU(%r6),%r8 /* get cpu_info */
358 movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
359 movl L_ADDR(%r2),%r3 /* Get pointer to its pcb. */
360 movl %r6,PCB_R6(%r3) /* move old lwp into new pcb */
361 movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
362 #ifdef MULTIPROCESSOR
363 movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
364 #endif
365
366 /*
367 * Now reset the PCB since we no idea what state it was last in
368 */
369 movab (USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r3),%r0
370 /* calculate where KSP should be */
371 movl %r0,KSP(%r3) /* save it as SP */
372 movl %r0,PCB_FP(%r3) /* and as the FP too */
373 movab CA_ARGNO(%r0),PCB_AP(%r3) /* update the AP as well */
374 movab softint_process,PCB_PC(%r3) /* and where we will start */
375
376 mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
377 ldpctx /* load it */
378 rei /* get off interrupt stack */
379
380 SCBENTRY(softclock)
381 pushr $0x3 /* save r0 and r1 */
382 movl $SOFTINT_CLOCK,%r0
383 brb softint_common
384
385 SCBENTRY(softbio)
386 pushr $0x3 /* save r0 and r1 */
387 movl $SOFTINT_BIO,%r0
388 brb softint_common
389
390 SCBENTRY(softnet)
391 pushr $0x3 /* save r0 and r1 */
392 movl $SOFTINT_NET,%r0
393 brb softint_common
394
395 SCBENTRY(softserial)
396 pushr $0x3 /* save r0 and r1 */
397 movl $SOFTINT_SERIAL,%r0
398 brb softint_common
399
400 /*
401 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
402 * It let's the kernel know the lwp is alive and then calls func(arg)
403 * and possibly returns to sret.
404 */
405 ENTRY(cpu_lwp_bootstrap, 0)
406 movq %r2,-(%sp) /* save func & arg */
407 movq %r0,-(%sp) /* push oldl/newl */
408 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
409 movl (%sp)+,%r0 /* grab func */
410 calls $1,(%r0) /* call it with arg */
411 ret
412
413 /*
414 * r1 = newlwp
415 * r0 = oldlwp
416 */
417 JSBENTRY(Swtchto)
418 /* this pops the pc and psw from the stack and puts them in the pcb. */
419 svpctx # Now on interrupt stack
420
421 /* We can know use any register because ldpctx will overwrite them */
422 /* New LWP already in %r1 */
423 movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
424 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
425 #ifdef MULTIPROCESSOR
426 movl L_CPU(%r0), %r8 /* get cpu_info of old lwp */
427 movl %r8, L_CPU(%r1) /* update cpu_info of new lwp */
428 movl %r1,CI_CURLWP(%r8) /* update ci_curlwp */
429 #endif
430
431 mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
432 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
433 ldpctx # load it
434 /* r0 already has previous lwp */
435 /* r1 already has this lwp */
436 /* r2/r3 and r4/r5 restored */
437 rei /* get off interrupt stack */
438
439 #
440 # copy/fetch/store routines.
441 #
442
443 ENTRY(copyout, 0)
444 movl 8(%ap),%r3
445 blss 3f # kernel space
446 movl 4(%ap),%r1
447 brb 2f
448
449 ENTRY(copyin, 0)
450 movl 4(%ap),%r1
451 blss 3f # kernel space
452 movl 8(%ap),%r3
453 2: mfpr $PR_ESP,%r2
454 movab 1f,(%r2)
455 4: tstw 14(%ap) # check if >= 64K
456 bneq 5f
457 movc3 12(%ap),(%r1),(%r3)
458 1: mfpr $PR_ESP,%r2
459 clrl (%r2)
460 ret
461 5: movc3 $0xfffc,(%r1),(%r3)
462 subl2 $0xfffc,12(%ap)
463 brb 4b
464
465 3: mnegl $1,%r0
466 ret
467
468 ENTRY(kcopy,0)
469 mfpr $PR_ESP,%r3
470 movl (%r3),-(%sp)
471 movab 1f,(%r3)
472 movl 4(%ap),%r1
473 movl 8(%ap),%r2
474 movc3 12(%ap),(%r1), (%r2)
475 clrl %r1
476 1: mfpr $PR_ESP,%r3
477 movl (%sp)+,(%r3)
478 movl %r1,%r0
479 ret
480
481 /*
482 * copy{in,out}str() copies data from/to user space to/from kernel space.
483 * Security checks:
484 * 1) user space address must be < KERNBASE
485 * 2) the VM system will do the checks while copying
486 */
487 ENTRY(copyinstr, 0)
488 tstl 4(%ap) # kernel address?
489 bgeq 8f # no, continue
490 6: movl $EFAULT,%r0
491 movl 16(%ap),%r2
492 beql 7f
493 clrl (%r2)
494 7: ret
495
496 ENTRY(copyoutstr, 0)
497 tstl 8(%ap) # kernel address?
498 bgeq 8f # no, continue
499 brb 6b # yes, return EFAULT
500
501 ENTRY(copystr,0)
502 8: movl 4(%ap),%r5 # from
503 movl 8(%ap),%r4 # to
504 movl 12(%ap),%r3 # len
505 movl 16(%ap),%r2 # copied
506 clrl %r0
507 mfpr $PR_ESP,%r1
508 movab 3f,(%r1)
509
510 tstl %r3 # any chars to copy?
511 bneq 1f # yes, jump for more
512 0: tstl %r2 # save copied len?
513 beql 2f # no
514 subl3 4(%ap),%r5,(%r2) # save copied len
515 2: ret
516
517 1: movb (%r5)+,(%r4)+ # copy one char
518 beql 0b # jmp if last char
519 sobgtr %r3,1b # copy one more
520 movl $ENAMETOOLONG,%r0 # inform about too long string
521 brb 0b # out of chars
522
523 3: mfpr $PR_ESP,%r1
524 clrl (%r1)
525 brb 0b
526
527 ENTRY(subyte,0)
528 movl 4(%ap),%r0
529 blss 3f # illegal space
530 mfpr $PR_ESP,%r1
531 movab 1f,(%r1)
532 movb 8(%ap),(%r0)
533 clrl %r1
534 1: mfpr $PR_ESP,%r2
535 clrl (%r2)
536 movl %r1,%r0
537 ret
538
539 ENTRY(suword,0)
540 movl 4(%ap),%r0
541 blss 3f # illegal space
542 mfpr $PR_ESP,%r1
543 movab 1f,(%r1)
544 movl 8(%ap),(%r0)
545 clrl %r1
546 1: mfpr $PR_ESP,%r2
547 clrl (%r2)
548 movl %r1,%r0
549 ret
550
551 ENTRY(suswintr,0)
552 movl 4(%ap),%r0
553 blss 3f # illegal space
554 mfpr $PR_ESP,%r1
555 movab 1f,(%r1)
556 movw 8(%ap),(%r0)
557 clrl %r1
558 1: mfpr $PR_ESP,%r2
559 clrl (%r2)
560 movl %r1,%r0
561 ret
562
563 3: mnegl $1,%r0
564 ret
565
566 .align 2
567 ALTENTRY(fusword)
568 ENTRY(fuswintr,0)
569 movl 4(%ap),%r0
570 blss 3b
571 mfpr $PR_ESP,%r1
572 movab 1f,(%r1)
573 movzwl (%r0),%r1
574 1: mfpr $PR_ESP,%r2
575 clrl (%r2)
576 movl %r1,%r0
577 ret
578
579 JSBENTRY(Slock)
580 1: bbssi $0,(%r1),1b
581 rsb
582
583 JSBENTRY(Slocktry)
584 clrl %r0
585 bbssi $0,(%r1),1f
586 incl %r0
587 1: rsb
588
589 JSBENTRY(Sunlock)
590 bbcci $0,(%r1),1f
591 1: rsb
592
593 #
594 # data department
595 #
596 .data
597
598 .globl _C_LABEL(memtest)
599 _C_LABEL(memtest): # memory test in progress
600 .long 0
601