subr.S revision 1.14 1 /* $NetBSD: subr.S,v 1.14 2007/02/09 21:55:13 ad Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50
51 .text
52
53 #ifdef KERNEL_LOADABLE_BY_MOP
54 /*
55 * This is a little tricky. The kernel is not loaded at the correct
56 * address, so the kernel must first be relocated, then copied, then
57 * jump back to the correct address.
58 */
59 /* Copy routine */
60 cps:
61 2: movb (%r0)+,(%r1)+
62 cmpl %r0,%r7
63 bneq 2b
64
65 3: clrb (%r1)+
66 incl %r0
67 cmpl %r0,%r6
68 bneq 3b
69 clrl -(%sp)
70 movl %sp,%ap
71 movl $_cca,%r7
72 movl %r8,(%r7)
73 movpsl -(%sp)
74 pushl %r2
75 rei
76 cpe:
77
78 /* Copy the copy routine */
79 1: movab cps,%r0
80 movab cpe,%r1
81 movl $0x300000,%sp
82 movl %sp,%r3
83 4: movb (%r0)+,(%r3)+
84 cmpl %r0,%r1
85 bneq 4b
86 movl %r7,%r8
87 /* Ok, copy routine copied, set registers and rei */
88 movab _edata,%r7
89 movab _end,%r6
90 movl $0x80000000,%r1
91 movl $0x80000200,%r0
92 subl3 $0x200,%r6,%r9
93 movab 2f,%r2
94 subl2 $0x200,%r2
95 movpsl -(%sp)
96 pushab 4(%sp)
97 rei
98
99 /*
100 * First entry routine from boot. This should be in a file called locore.
101 */
102 JSBENTRY(start)
103 brb 1b # Netbooted starts here
104 #else
105 ASENTRY(start, 0)
106 #endif
107 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
108 pushl $0x1f0000 # Push a nice PSL
109 pushl $to # Address to jump to
110 rei # change to kernel stack
111 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
112 cmpb (%ap),$3 # symbols info present?
113 blssu 3f # nope, skip
114 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
115 # save start of symtab
116 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
117 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
118 # save end of symtab
119 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
120 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
121 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
122 mtpr %r0,$PR_PCBB # Save in IPR PCBB
123 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
124 mtpr %r0,$PR_KSP # put in IPR KSP
125 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
126 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
127 movab IFTRAP(%r0),4(%r0) # Save trap address in ESP
128 mtpr 4(%r0),$PR_ESP # Put it in ESP also
129
130 # Set some registers in known state
131 movl _C_LABEL(proc0paddr),%r0
132 clrl P0LR(%r0)
133 clrl P1LR(%r0)
134 mtpr $0,$PR_P0LR
135 mtpr $0,$PR_P1LR
136 movl $0x80000000,%r1
137 movl %r1,P0BR(%r0)
138 movl %r1,P1BR(%r0)
139 mtpr %r1,$PR_P0BR
140 mtpr %r1,$PR_P1BR
141 clrl IFTRAP(%r0)
142 mtpr $0,$PR_SCBB
143
144 # Copy the RPB to its new position
145 #if defined(COMPAT_14)
146 tstl (%ap) # Any arguments?
147 bneq 1f # Yes, called from new boot
148 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
149 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
150 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
151 clrl -(%sp) # Have no RPB
152 brb 2f
153 #endif
154
155 1: pushl 4(%ap) # Address of old rpb
156 2: calls $1,_C_LABEL(_start) # Jump away.
157 /* NOTREACHED */
158
159
160 /*
161 * Signal handler code.
162 */
163
164 .align 2
165 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
166 _C_LABEL(sigcode):
167 pushr $0x3f
168 subl2 $0xc,%sp
169 movl 0x24(%sp),%r0
170 calls $3,(%r0)
171 popr $0x3f
172 chmk $SYS_compat_16___sigreturn14
173 chmk $SYS_exit
174 halt
175 _C_LABEL(esigcode):
176
177 #ifdef COMPAT_IBCS2
178 .align 2
179 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
180 _C_LABEL(ibcs2_sigcode):
181 pushr $0x3f
182 subl2 $0xc,%sp
183 movl 0x24(%sp),%r0
184 calls $3,(%r0)
185 popr $0x3f
186 chmk $SYS_compat_16___sigreturn14
187 chmk $SYS_exit
188 halt
189 _C_LABEL(ibcs2_esigcode):
190 #endif /* COMPAT_IBCS2 */
191
192 #ifdef COMPAT_ULTRIX
193 .align 2
194 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
195 _C_LABEL(ultrix_sigcode):
196 pushr $0x3f
197 subl2 $0xc,%sp
198 movl 0x24(%sp),%r0
199 calls $3,(%r0)
200 popr $0x3f
201 chmk $ULTRIX_SYS_sigreturn
202 chmk $SYS_exit
203 halt
204 _C_LABEL(ultrix_esigcode):
205 #endif
206
207 .align 2
208 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
209 _C_LABEL(idsptch): pushr $0x3f
210 .word 0x9f16 # jsb to absolute address
211 .long _C_LABEL(cmn_idsptch) # the absolute address
212 .long 0 # the callback interrupt routine
213 .long 0 # its argument
214 .long 0 # ptr to correspond evcnt struct
215 _C_LABEL(eidsptch):
216
217 _C_LABEL(cmn_idsptch):
218 movl (%sp)+,%r0 # get pointer to idspvec
219 movl 8(%r0),%r1 # get evcnt pointer
220 beql 1f # no ptr, skip increment
221 incl EV_COUNT(%r1) # increment low longword
222 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
223 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
224 pushl 4(%r0) # push argument
225 calls $1,*(%r0) # call interrupt routine
226 popr $0x3f # pop registers
227 rei # return from interrut
228
229 ENTRY(badaddr,0) # Called with addr,b/w/l
230 mfpr $PR_IPL,%r0 # splhigh()
231 mtpr $IPL_HIGH,$PR_IPL
232 movl 4(%ap),%r2 # First argument, the address
233 movl 8(%ap),%r1 # Sec arg, b,w,l
234 pushl %r0 # Save old IPL
235 clrl %r3
236 movab 4f,_C_LABEL(memtest) # Set the return address
237
238 caseb %r1,$1,$4 # What is the size
239 1: .word 1f-1b
240 .word 2f-1b
241 .word 3f-1b # This is unused
242 .word 3f-1b
243
244 1: movb (%r2),%r1 # Test a byte
245 brb 5f
246
247 2: movw (%r2),%r1 # Test a word
248 brb 5f
249
250 3: movl (%r2),%r1 # Test a long
251 brb 5f
252
253 4: incl %r3 # Got machine chk => addr bad
254 5: mtpr (%sp)+,$PR_IPL
255 movl %r3,%r0
256 ret
257
258 #ifdef DDB
259 /*
260 * DDB is the only routine that uses setjmp/longjmp.
261 */
262 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
263 _C_LABEL(setjmp):.word 0
264 movl 4(%ap), %r0
265 movl 8(%fp), (%r0)
266 movl 12(%fp), 4(%r0)
267 movl 16(%fp), 8(%r0)
268 moval 28(%fp),12(%r0)
269 clrl %r0
270 ret
271
272 _C_LABEL(longjmp):.word 0
273 movl 4(%ap), %r1
274 movl 8(%ap), %r0
275 movl (%r1), %ap
276 movl 4(%r1), %fp
277 movl 12(%r1), %sp
278 jmp *8(%r1)
279 #endif
280
281 #
282 # setrunqueue/remrunqueue fast variants.
283 #
284
285 JSBENTRY(Setrq)
286 #ifdef DIAGNOSTIC
287 tstl 4(%r0) # Check that process actually are off the queue
288 beql 1f
289 pushab setrq
290 calls $1,_C_LABEL(panic)
291 setrq: .asciz "setrunqueue"
292 #endif
293 1: extzv $2,$6,L_PRIORITY(%r0),%r1 # get priority
294 movaq _C_LABEL(sched_qs)[%r1],%r2 # get address of queue
295 insque (%r0),*PH_RLINK(%r2) # put proc last in queue
296 bbss %r1,_C_LABEL(sched_whichqs),1f # set queue bit.
297 1: rsb
298
299 JSBENTRY(Remrq)
300 extzv $2,$6,L_PRIORITY(%r0),%r1
301 #ifdef DIAGNOSTIC
302 bbs %r1,_C_LABEL(sched_whichqs),1f
303 pushab remrq
304 calls $1,_C_LABEL(panic)
305 remrq: .asciz "remrunqueue"
306 #endif
307 1: remque (%r0),%r2
308 bneq 2f # Not last process on queue
309 bbsc %r1,_C_LABEL(sched_whichqs),2f
310 2: clrl L_BACK(%r0) # saftey belt
311 rsb
312
313 #
314 # Idle loop. Here we could do something fun, maybe, like calculating
315 # pi or something.
316 #
317 idle:
318 calls $0,_C_LABEL(sched_unlock_idle)
319 mtpr $1,$PR_IPL # IPL cannot be 0 because we are
320 # running on the interrupt stack
321 # and may get interrupts
322
323 1: tstl _C_LABEL(sched_whichqs) # Anything ready to run?
324 beql 1b # no, run the idle loop again.
325 /* Now try the test the long way */
326 mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
327 3: calls $0,_C_LABEL(sched_lock_idle)
328 brb lp # check sched_whichqs again
329
330 #
331 # cpu_switch, cpu_preempt, cpu_exit and the idle loop implemented in
332 # assembler for efficiency. This is called at IPL_HIGH.
333 #
334
335 JSBENTRY(Swtch)
336 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
337 clrl CI_CURLWP(%r1) # Stop process accounting
338 svpctx # Save context if another CPU
339 # get control first (must be on
340 # the interrupt stack when idling)
341
342
343 lp: ffs $0,$32,_C_LABEL(sched_whichqs),%r3 # Search for bit set
344 beql idle # no bit set, go to idle loop
345
346 movaq _C_LABEL(sched_qs)[%r3],%r1 # get address of queue head
347 remque *(%r1),%r2 # remove lwp pointed to by queue head
348 # lwp ptr is now in %r2
349 #ifdef DIAGNOSTIC
350 bvc 1f # check if something on queue
351 pushab noque
352 calls $1,_C_LABEL(panic)
353 #endif
354
355 1: bneq 2f # more processes on queue?
356 bbsc %r3,_C_LABEL(sched_whichqs),2f # no, clear bit in whichqs
357 2: clrl L_BACK(%r2) # clear proc backpointer
358 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
359 /* p->p_cpu initialized in fork1() for single-processor */
360 #if defined(MULTIPROCESSOR)
361 movl %r1,L_CPU(%r2) # l->l_cpu = curcpu();
362 #endif
363 movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
364 movl %r2,CI_CURLWP(%r1) # set new process running
365 clrl CI_WANT_RESCHED(%r1) # we are now changing process
366 movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
367 addl3 %r0,$IFTRAP,%r1 # Save for copy* functions.
368 mtpr %r1,$PR_ESP # Use ESP as CPU-specific pointer
369 movl %r1,ESP(%r0) # Must save in PCB also.
370 mfpr $PR_SSP,%r1 # New process must inherit cpu_info
371 movl %r1,SSP(%r0) # Put it in new PCB
372
373 #
374 # Nice routine to get physical from virtual addresses.
375 #
376 extzv $9,$21,%r0,%r1 # extract offset
377 ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
378
379 clrl PCB_R0(%r0) # Assume switch to same lwp
380 mfpr $PR_PCBB,%r1 # Get old PCB address
381 cmpl %r1,%r3 # The same lwp?
382 beql 1f # Branch if it is
383 movl $1,PCB_R0(%r0) # Otherwise, return 1.
384
385 1: mtpr %r3,$PR_PCBB
386 ldpctx
387 pushl %r0
388 calls $0,_C_LABEL(sched_unlock_idle)
389 movl (%sp)+,%r0
390 rei
391
392 #if defined(MULTIPROCESSOR)
393 .align 2
394 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
395 _C_LABEL(vax_mp_tramp):
396 ldpctx
397 rei
398 #endif
399
400 JSBENTRY(Swtchto)
401 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
402 clrl CI_CURLWP(%r1) # Stop process accounting
403 svpctx # Now on interrupt stack
404
405 # New LWP already in %r2
406 mfpr $PR_SSP,%r1 # Get ptr to this cpu_info struct
407 #if defined(MULTIPROCESSOR)
408 movl %r1,L_CPU(%r2) # l->l_cpu = curcpu();
409 #endif
410 movb $LSONPROC,L_STAT(%r2) # l->l_stat = LSONPROC;
411 movl %r2,CI_CURLWP(%r1) # set new process running
412 movl L_ADDR(%r2),%r0 # Get pointer to new pcb.
413 addl3 %r0,$IFTRAP,%r3 # Save for copy* functions.
414 mtpr %r3,$PR_ESP # Use ESP as CPU-specific pointer
415 movl %r3,ESP(%r0) # Must save in PCB also.
416 movl %r1,SSP(%r0) # Put it in new PCB
417
418 extzv $9,$21,%r0,%r1 # extract offset
419 ashl $9,*_C_LABEL(Sysmap)[%r1],%r3
420
421 mtpr %r3,$PR_PCBB
422 ldpctx
423 pushl %r0
424 calls $0,_C_LABEL(sched_unlock_idle)
425 movl (%sp)+,%r0
426 rei
427
428 #
429 # the last routine called by a process.
430 #
431
432 ENTRY(cpu_exit,0)
433 movl 4(%ap),%r6 # Process pointer in %r6
434
435 mtpr $IPL_CLOCK,$PR_IPL # Block almost everything
436 mfpr $PR_SSP,%r7 # get cpu_info ptr
437 movl CI_EXIT(%r7),%r8 # scratch page address
438 movab 512(%r8),%sp # change stack
439 bicl2 $0xc0000000,%r8 # get physical address
440 mtpr %r8,$PR_PCBB # new PCB
441 mtpr %r7,$PR_SSP # In case...
442 pushl %r6
443 calls $1,_C_LABEL(lwp_exit2) # release last resources.
444 mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
445 calls $0,_C_LABEL(sched_lock_idle)
446 clrl %r6
447 brw Swtch
448
449 #
450 # copy/fetch/store routines.
451 #
452
453 ENTRY(copyout, 0)
454 movl 8(%ap),%r2
455 blss 3f # kernel space
456 movl 4(%ap),%r1
457 brb 2f
458
459 ENTRY(copyin, 0)
460 movl 4(%ap),%r1
461 blss 3f # kernel space
462 movl 8(%ap),%r2
463 2: mfpr $PR_ESP,%r3
464 movab 1f,(%r3)
465 movc3 12(%ap),(%r1),(%r2)
466 1: mfpr $PR_ESP,%r3
467 clrl (%r3)
468 ret
469
470 3: mnegl $1,%r0
471 ret
472
473 ENTRY(kcopy,0)
474 mfpr $PR_ESP,%r3
475 movl (%r3),-(%sp)
476 movab 1f,(%r3)
477 movl 4(%ap),%r1
478 movl 8(%ap),%r2
479 movc3 12(%ap),(%r1), (%r2)
480 clrl %r1
481 1: mfpr $PR_ESP,%r3
482 movl (%sp)+,(%r3)
483 movl %r1,%r0
484 ret
485
486 /*
487 * copy{in,out}str() copies data from/to user space to/from kernel space.
488 * Security checks:
489 * 1) user space address must be < KERNBASE
490 * 2) the VM system will do the checks while copying
491 */
492 ENTRY(copyinstr, 0)
493 tstl 4(%ap) # kernel address?
494 bgeq 8f # no, continue
495 6: movl $EFAULT,%r0
496 movl 16(%ap),%r2
497 beql 7f
498 clrl (%r2)
499 7: ret
500
501 ENTRY(copyoutstr, 0)
502 tstl 8(%ap) # kernel address?
503 bgeq 8f # no, continue
504 brb 6b # yes, return EFAULT
505
506 ENTRY(copystr,0)
507 8: movl 4(%ap),%r5 # from
508 movl 8(%ap),%r4 # to
509 movl 12(%ap),%r3 # len
510 movl 16(%ap),%r2 # copied
511 clrl %r0
512 mfpr $PR_ESP,%r1
513 movab 3f,(%r1)
514
515 tstl %r3 # any chars to copy?
516 bneq 1f # yes, jump for more
517 0: tstl %r2 # save copied len?
518 beql 2f # no
519 subl3 4(%ap),%r5,(%r2) # save copied len
520 2: ret
521
522 1: movb (%r5)+,(%r4)+ # copy one char
523 beql 0b # jmp if last char
524 sobgtr %r3,1b # copy one more
525 movl $ENAMETOOLONG,%r0 # inform about too long string
526 brb 0b # out of chars
527
528 3: mfpr $PR_ESP,%r1
529 clrl (%r1)
530 brb 0b
531
532 ENTRY(subyte,0)
533 movl 4(%ap),%r0
534 blss 3f # illegal space
535 mfpr $PR_ESP,%r1
536 movab 1f,(%r1)
537 movb 8(%ap),(%r0)
538 clrl %r1
539 1: mfpr $PR_ESP,%r2
540 clrl (%r2)
541 movl %r1,%r0
542 ret
543
544 ENTRY(suword,0)
545 movl 4(%ap),%r0
546 blss 3f # illegal space
547 mfpr $PR_ESP,%r1
548 movab 1f,(%r1)
549 movl 8(%ap),(%r0)
550 clrl %r1
551 1: mfpr $PR_ESP,%r2
552 clrl (%r2)
553 movl %r1,%r0
554 ret
555
556 ENTRY(suswintr,0)
557 movl 4(%ap),%r0
558 blss 3f # illegal space
559 mfpr $PR_ESP,%r1
560 movab 1f,(%r1)
561 movw 8(%ap),(%r0)
562 clrl %r1
563 1: mfpr $PR_ESP,%r2
564 clrl (%r2)
565 movl %r1,%r0
566 ret
567
568 3: mnegl $1,%r0
569 ret
570
571 .align 2
572 ALTENTRY(fusword)
573 ENTRY(fuswintr,0)
574 movl 4(%ap),%r0
575 blss 3b
576 mfpr $PR_ESP,%r1
577 movab 1f,(%r1)
578 movzwl (%r0),%r1
579 1: mfpr $PR_ESP,%r2
580 clrl (%r2)
581 movl %r1,%r0
582 ret
583
584 #if defined(MULTIPROCESSOR)
585
586 JSBENTRY(Slock)
587 1: bbssi $0,(%r1),1b
588 rsb
589
590 JSBENTRY(Slocktry)
591 clrl %r0
592 bbssi $0,(%r1),1f
593 incl %r0
594 1: rsb
595
596 JSBENTRY(Sunlock)
597 bbcci $0,(%r1),1f
598 1: rsb
599
600 #endif
601
602 #
603 # data department
604 #
605 .data
606
607 .globl _C_LABEL(memtest)
608 _C_LABEL(memtest): # memory test in progress
609 .long 0
610
611 #ifdef __ELF__
612 .section .rodata
613 #endif
614 noque: .asciz "swtch"
615